Merge branch 'develop' into wip-isildur-g-cppcheck-47-48-58

This commit is contained in:
g. economou
2024-05-01 10:59:59 +03:00
committed by GitHub
89 changed files with 1608 additions and 588 deletions

View File

@@ -542,14 +542,13 @@ u32 crc32c_sb8_64_bit(u32 running_crc, const unsigned char* p_buf,
// Main aligned loop, processes eight bytes at a time.
u32 term1, term2;
for (size_t li = 0; li < running_length/8; li++) {
u32 block = *(const u32 *)p_buf;
crc ^= block;
p_buf += 4;
term1 = crc_tableil8_o88[crc & 0x000000FF] ^
u32 term1 = crc_tableil8_o88[crc & 0x000000FF] ^
crc_tableil8_o80[(crc >> 8) & 0x000000FF];
term2 = crc >> 16;
u32 term2 = crc >> 16;
crc = term1 ^
crc_tableil8_o72[term2 & 0x000000FF] ^
crc_tableil8_o64[(term2 >> 8) & 0x000000FF];

View File

@@ -79,21 +79,18 @@ static UNUSED
const platform_t hs_current_platform_no_avx2 = {
HS_PLATFORM_NOAVX2 |
HS_PLATFORM_NOAVX512 |
HS_PLATFORM_NOAVX512VBMI |
0,
HS_PLATFORM_NOAVX512VBMI
};
static UNUSED
const platform_t hs_current_platform_no_avx512 = {
HS_PLATFORM_NOAVX512 |
HS_PLATFORM_NOAVX512VBMI |
0,
HS_PLATFORM_NOAVX512VBMI
};
static UNUSED
const platform_t hs_current_platform_no_avx512vbmi = {
HS_PLATFORM_NOAVX512VBMI |
0,
HS_PLATFORM_NOAVX512VBMI
};
/*

View File

@@ -1,5 +1,6 @@
/*
* Copyright (c) 2016-2020, Intel Corporation
* Copyright (c) 2024, VectorCamp PC
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -30,6 +31,39 @@
#include "hs_common.h"
#include "hs_runtime.h"
#include "ue2common.h"
/* Streamlining the dispatch to eliminate runtime checking/branching:
* What we want to do is, first call to the function will run the resolve
* code and set the static resolved/dispatch pointer to point to the
* correct function. Subsequent calls to the function will go directly to
* the resolved ptr. The simplest way to accomplish this is, to
* initially set the pointer to the resolve function.
* To accomplish this in a manner invisible to the user,
* we do involve some rather ugly/confusing macros in here.
* There are four macros that assemble the code for each function
* we want to dispatch in this manner:
* CREATE_DISPATCH
* this generates the declarations for the candidate target functions,
* for the fat_dispatch function pointer, for the resolve_ function,
* points the function pointer to the resolve function, and contains
* most of the definition of the resolve function. The very end of the
* resolve function is completed by the next macro, because in the
* CREATE_DISPATCH macro we have the argument list with the arg declarations,
* which is needed to generate correct function signatures, but we
* can't generate from this, in a macro, a _call_ to one of those functions.
* CONNECT_ARGS_1
* this macro fills in the actual call at the end of the resolve function,
* with the correct arg list. hence the name connect args.
* CONNECT_DISPATCH_2
* this macro likewise gives up the beginning of the definition of the
* actual entry point function (the 'real name' that's called by the user)
* but again in the pass-through call, cannot invoke the target without
* getting the arg list , which is supplied by the final macro,
* CONNECT_ARGS_3
*
*/
#if defined(ARCH_IA32) || defined(ARCH_X86_64)
#include "util/arch/x86/cpuid_inline.h"
#include "util/join.h"
@@ -57,30 +91,38 @@
return (RTYPE)HS_ARCH_ERROR; \
} \
\
/* resolver */ \
static RTYPE (*JOIN(resolve_, NAME)(void))(__VA_ARGS__) { \
if (check_avx512vbmi()) { \
return JOIN(avx512vbmi_, NAME); \
} \
if (check_avx512()) { \
return JOIN(avx512_, NAME); \
} \
if (check_avx2()) { \
return JOIN(avx2_, NAME); \
} \
if (check_sse42() && check_popcnt()) { \
return JOIN(corei7_, NAME); \
} \
if (check_ssse3()) { \
return JOIN(core2_, NAME); \
} \
/* anything else is fail */ \
return JOIN(error_, NAME); \
} \
/* dispatch routing pointer for this function */ \
/* initially point it at the resolve function */ \
static RTYPE JOIN(resolve_, NAME)(__VA_ARGS__); \
static RTYPE (* JOIN(fat_dispatch_, NAME))(__VA_ARGS__) = \
&JOIN(resolve_, NAME); \
\
/* function */ \
HS_PUBLIC_API \
RTYPE NAME(__VA_ARGS__) __attribute__((ifunc("resolve_" #NAME)))
/* resolver */ \
static RTYPE JOIN(resolve_, NAME)(__VA_ARGS__) { \
if (check_avx512vbmi()) { \
fat_dispatch_ ## NAME = &JOIN(avx512vbmi_, NAME); \
} \
else if (check_avx512()) { \
fat_dispatch_ ## NAME = &JOIN(avx512_, NAME); \
} \
else if (check_avx2()) { \
fat_dispatch_ ## NAME = &JOIN(avx2_, NAME); \
} \
else if (check_sse42() && check_popcnt()) { \
fat_dispatch_ ## NAME = &JOIN(corei7_, NAME); \
} \
else if (check_ssse3()) { \
fat_dispatch_ ## NAME = &JOIN(core2_, NAME); \
} else { \
/* anything else is fail */ \
fat_dispatch_ ## NAME = &JOIN(error_, NAME); \
} \
/* the rest of the function is completed in the CONNECT_ARGS_1 macro. */
#elif defined(ARCH_AARCH64)
#include "util/arch/arm/cpuid_inline.h"
@@ -97,99 +139,226 @@
return (RTYPE)HS_ARCH_ERROR; \
} \
\
/* resolver */ \
static RTYPE (*JOIN(resolve_, NAME)(void))(__VA_ARGS__) { \
if (check_sve2()) { \
return JOIN(sve2_, NAME); \
} \
if (check_sve()) { \
return JOIN(sve_, NAME); \
} \
if (check_neon()) { \
return JOIN(neon_, NAME); \
} \
/* anything else is fail */ \
return JOIN(error_, NAME); \
} \
/* dispatch routing pointer for this function */ \
/* initially point it at the resolve function */ \
static RTYPE JOIN(resolve_, NAME)(__VA_ARGS__); \
static RTYPE (* JOIN(fat_dispatch_, NAME))(__VA_ARGS__) = \
&JOIN(resolve_, NAME); \
\
/* function */ \
HS_PUBLIC_API \
RTYPE NAME(__VA_ARGS__) __attribute__((ifunc("resolve_" #NAME)))
/* resolver */ \
static RTYPE JOIN(resolve_, NAME)(__VA_ARGS__) { \
if (check_sve2()) { \
fat_dispatch_ ## NAME = &JOIN(sve2_, NAME); \
} \
else if (check_sve()) { \
fat_dispatch_ ## NAME = &JOIN(sve_, NAME); \
} \
else if (check_neon()) { \
fat_dispatch_ ## NAME = &JOIN(neon_, NAME); \
} else { \
/* anything else is fail */ \
fat_dispatch_ ## NAME = &JOIN(error_, NAME); \
} \
/* the rest of the function is completed in the CONNECT_ARGS_1 macro. */
#endif
#define CONNECT_ARGS_1(RTYPE, NAME, ...) \
return (*fat_dispatch_ ## NAME)(__VA_ARGS__); \
} \
#define CONNECT_DISPATCH_2(RTYPE, NAME, ...) \
/* new function */ \
HS_PUBLIC_API \
RTYPE NAME(__VA_ARGS__) { \
#define CONNECT_ARGS_3(RTYPE, NAME, ...) \
return (*fat_dispatch_ ## NAME)(__VA_ARGS__); \
} \
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-function"
/* this gets a bit ugly to compose the static redirect functions,
* as we necessarily need first the typed arg list and then just the arg
* names, twice in a row, to define the redirect function and the
* dispatch function call */
CREATE_DISPATCH(hs_error_t, hs_scan, const hs_database_t *db, const char *data,
unsigned length, unsigned flags, hs_scratch_t *scratch,
match_event_handler onEvent, void *userCtx);
CONNECT_ARGS_1(hs_error_t, hs_scan, db, data, length, flags, scratch, onEvent, userCtx);
CONNECT_DISPATCH_2(hs_error_t, hs_scan, const hs_database_t *db, const char *data,
unsigned length, unsigned flags, hs_scratch_t *scratch,
match_event_handler onEvent, void *userCtx);
CONNECT_ARGS_3(hs_error_t, hs_scan, db, data, length, flags, scratch, onEvent, userCtx);
CREATE_DISPATCH(hs_error_t, hs_stream_size, const hs_database_t *database,
size_t *stream_size);
CONNECT_ARGS_1(hs_error_t, hs_stream_size, database, stream_size);
CONNECT_DISPATCH_2(hs_error_t, hs_stream_size, const hs_database_t *database,
size_t *stream_size);
CONNECT_ARGS_3(hs_error_t, hs_stream_size, database, stream_size);
CREATE_DISPATCH(hs_error_t, hs_database_size, const hs_database_t *db,
size_t *size);
CONNECT_ARGS_1(hs_error_t, hs_database_size, db, size);
CONNECT_DISPATCH_2(hs_error_t, hs_database_size, const hs_database_t *db,
size_t *size);
CONNECT_ARGS_3(hs_error_t, hs_database_size, db, size);
CREATE_DISPATCH(hs_error_t, dbIsValid, const hs_database_t *db);
CONNECT_ARGS_1(hs_error_t, dbIsValid, db);
CONNECT_DISPATCH_2(hs_error_t, dbIsValid, const hs_database_t *db);
CONNECT_ARGS_3(hs_error_t, dbIsValid, db);
CREATE_DISPATCH(hs_error_t, hs_free_database, hs_database_t *db);
CONNECT_ARGS_1(hs_error_t, hs_free_database, db);
CONNECT_DISPATCH_2(hs_error_t, hs_free_database, hs_database_t *db);
CONNECT_ARGS_3(hs_error_t, hs_free_database, db);
CREATE_DISPATCH(hs_error_t, hs_open_stream, const hs_database_t *db,
unsigned int flags, hs_stream_t **stream);
CONNECT_ARGS_1(hs_error_t, hs_open_stream, db, flags, stream);
CONNECT_DISPATCH_2(hs_error_t, hs_open_stream, const hs_database_t *db,
unsigned int flags, hs_stream_t **stream);
CONNECT_ARGS_3(hs_error_t, hs_open_stream, db, flags, stream);
CREATE_DISPATCH(hs_error_t, hs_scan_stream, hs_stream_t *id, const char *data,
unsigned int length, unsigned int flags, hs_scratch_t *scratch,
match_event_handler onEvent, void *ctxt);
CONNECT_ARGS_1(hs_error_t, hs_scan_stream, id, data, length, flags, scratch, onEvent, ctxt);
CONNECT_DISPATCH_2(hs_error_t, hs_scan_stream, hs_stream_t *id, const char *data,
unsigned int length, unsigned int flags, hs_scratch_t *scratch,
match_event_handler onEvent, void *ctxt);
CONNECT_ARGS_3(hs_error_t, hs_scan_stream, id, data, length, flags, scratch, onEvent, ctxt);
CREATE_DISPATCH(hs_error_t, hs_close_stream, hs_stream_t *id,
hs_scratch_t *scratch, match_event_handler onEvent, void *ctxt);
CONNECT_ARGS_1(hs_error_t, hs_close_stream, id, scratch, onEvent, ctxt);
CONNECT_DISPATCH_2(hs_error_t, hs_close_stream, hs_stream_t *id,
hs_scratch_t *scratch, match_event_handler onEvent, void *ctxt);
CONNECT_ARGS_3(hs_error_t, hs_close_stream, id, scratch, onEvent, ctxt);
CREATE_DISPATCH(hs_error_t, hs_scan_vector, const hs_database_t *db,
const char *const *data, const unsigned int *length,
unsigned int count, unsigned int flags, hs_scratch_t *scratch,
match_event_handler onevent, void *context);
CONNECT_ARGS_1(hs_error_t, hs_scan_vector, db, data, length, count, flags, scratch, onevent, context);
CONNECT_DISPATCH_2(hs_error_t, hs_scan_vector, const hs_database_t *db,
const char *const *data, const unsigned int *length,
unsigned int count, unsigned int flags, hs_scratch_t *scratch,
match_event_handler onevent, void *context);
CONNECT_ARGS_3(hs_error_t, hs_scan_vector, db, data, length, count, flags, scratch, onevent, context);
CREATE_DISPATCH(hs_error_t, hs_database_info, const hs_database_t *db, char **info);
CONNECT_ARGS_1(hs_error_t, hs_database_info, db, info);
CONNECT_DISPATCH_2(hs_error_t, hs_database_info, const hs_database_t *db, char **info);
CONNECT_ARGS_3(hs_error_t, hs_database_info, db, info);
CREATE_DISPATCH(hs_error_t, hs_copy_stream, hs_stream_t **to_id,
const hs_stream_t *from_id);
CONNECT_ARGS_1(hs_error_t, hs_copy_stream, to_id, from_id);
CONNECT_DISPATCH_2(hs_error_t, hs_copy_stream, hs_stream_t **to_id,
const hs_stream_t *from_id);
CONNECT_ARGS_3(hs_error_t, hs_copy_stream, to_id, from_id);
CREATE_DISPATCH(hs_error_t, hs_reset_stream, hs_stream_t *id,
unsigned int flags, hs_scratch_t *scratch,
match_event_handler onEvent, void *context);
CONNECT_ARGS_1(hs_error_t, hs_reset_stream, id, flags, scratch, onEvent, context);
CONNECT_DISPATCH_2(hs_error_t, hs_reset_stream, hs_stream_t *id,
unsigned int flags, hs_scratch_t *scratch,
match_event_handler onEvent, void *context);
CONNECT_ARGS_3(hs_error_t, hs_reset_stream, id, flags, scratch, onEvent, context);
CREATE_DISPATCH(hs_error_t, hs_reset_and_copy_stream, hs_stream_t *to_id,
const hs_stream_t *from_id, hs_scratch_t *scratch,
match_event_handler onEvent, void *context);
CONNECT_ARGS_1(hs_error_t, hs_reset_and_copy_stream, to_id, from_id, scratch, onEvent, context);
CONNECT_DISPATCH_2(hs_error_t, hs_reset_and_copy_stream, hs_stream_t *to_id,
const hs_stream_t *from_id, hs_scratch_t *scratch,
match_event_handler onEvent, void *context);
CONNECT_ARGS_3(hs_error_t, hs_reset_and_copy_stream, to_id, from_id, scratch, onEvent, context);
CREATE_DISPATCH(hs_error_t, hs_serialize_database, const hs_database_t *db,
char **bytes, size_t *length);
CONNECT_ARGS_1(hs_error_t, hs_serialize_database, db, bytes, length);
CONNECT_DISPATCH_2(hs_error_t, hs_serialize_database, const hs_database_t *db,
char **bytes, size_t *length);
CONNECT_ARGS_3(hs_error_t, hs_serialize_database, db, bytes, length);
CREATE_DISPATCH(hs_error_t, hs_deserialize_database, const char *bytes,
const size_t length, hs_database_t **db);
CONNECT_ARGS_1(hs_error_t, hs_deserialize_database, bytes, length, db);
CONNECT_DISPATCH_2(hs_error_t, hs_deserialize_database, const char *bytes,
const size_t length, hs_database_t **db);
CONNECT_ARGS_3(hs_error_t, hs_deserialize_database, bytes, length, db);
CREATE_DISPATCH(hs_error_t, hs_deserialize_database_at, const char *bytes,
const size_t length, hs_database_t *db);
CONNECT_ARGS_1(hs_error_t, hs_deserialize_database_at, bytes, length, db);
CONNECT_DISPATCH_2(hs_error_t, hs_deserialize_database_at, const char *bytes,
const size_t length, hs_database_t *db);
CONNECT_ARGS_3(hs_error_t, hs_deserialize_database_at, bytes, length, db);
CREATE_DISPATCH(hs_error_t, hs_serialized_database_info, const char *bytes,
size_t length, char **info);
CONNECT_ARGS_1(hs_error_t, hs_serialized_database_info, bytes, length, info);
CONNECT_DISPATCH_2(hs_error_t, hs_serialized_database_info, const char *bytes,
size_t length, char **info);
CONNECT_ARGS_3(hs_error_t, hs_serialized_database_info, bytes, length, info);
CREATE_DISPATCH(hs_error_t, hs_serialized_database_size, const char *bytes,
const size_t length, size_t *deserialized_size);
CONNECT_ARGS_1(hs_error_t, hs_serialized_database_size, bytes, length, deserialized_size);
CONNECT_DISPATCH_2(hs_error_t, hs_serialized_database_size, const char *bytes,
const size_t length, size_t *deserialized_size);
CONNECT_ARGS_3(hs_error_t, hs_serialized_database_size, bytes, length, deserialized_size);
CREATE_DISPATCH(hs_error_t, hs_compress_stream, const hs_stream_t *stream,
char *buf, size_t buf_space, size_t *used_space);
CONNECT_ARGS_1(hs_error_t, hs_compress_stream, stream,
buf, buf_space, used_space);
CONNECT_DISPATCH_2(hs_error_t, hs_compress_stream, const hs_stream_t *stream,
char *buf, size_t buf_space, size_t *used_space);
CONNECT_ARGS_3(hs_error_t, hs_compress_stream, stream,
buf, buf_space, used_space);
CREATE_DISPATCH(hs_error_t, hs_expand_stream, const hs_database_t *db,
hs_stream_t **stream, const char *buf,size_t buf_size);
CONNECT_ARGS_1(hs_error_t, hs_expand_stream, db, stream, buf,buf_size);
CONNECT_DISPATCH_2(hs_error_t, hs_expand_stream, const hs_database_t *db,
hs_stream_t **stream, const char *buf,size_t buf_size);
CONNECT_ARGS_3(hs_error_t, hs_expand_stream, db, stream, buf,buf_size);
CREATE_DISPATCH(hs_error_t, hs_reset_and_expand_stream, hs_stream_t *to_stream,
const char *buf, size_t buf_size, hs_scratch_t *scratch,
match_event_handler onEvent, void *context);
CONNECT_ARGS_1(hs_error_t, hs_reset_and_expand_stream, to_stream,
buf, buf_size, scratch, onEvent, context);
CONNECT_DISPATCH_2(hs_error_t, hs_reset_and_expand_stream, hs_stream_t *to_stream,
const char *buf, size_t buf_size, hs_scratch_t *scratch,
match_event_handler onEvent, void *context);
CONNECT_ARGS_3(hs_error_t, hs_reset_and_expand_stream, to_stream,
buf, buf_size, scratch, onEvent, context);
/** INTERNALS **/
CREATE_DISPATCH(u32, Crc32c_ComputeBuf, u32 inCrc32, const void *buf, size_t bufLen);
CONNECT_ARGS_1(u32, Crc32c_ComputeBuf, inCrc32, buf, bufLen);
CONNECT_DISPATCH_2(u32, Crc32c_ComputeBuf, u32 inCrc32, const void *buf, size_t bufLen);
CONNECT_ARGS_3(u32, Crc32c_ComputeBuf, inCrc32, buf, bufLen);
#pragma GCC diagnostic pop
#pragma GCC diagnostic pop

View File

@@ -298,7 +298,7 @@ void get_conf_stride_4(const u8 *itPtr, UNUSED const u8 *start_ptr,
static really_inline
void do_confirm_fdr(u64a *conf, u8 offset, hwlmcb_rv_t *control,
const u32 *confBase, const struct FDR_Runtime_Args *a,
const u8 *ptr, u32 *last_match_id, struct zone *z) {
const u8 *ptr, u32 *last_match_id, const struct zone *z) {
const u8 bucket = 8;
if (likely(!*conf)) {

View File

@@ -52,14 +52,14 @@ u32 TeddyEngineDescription::getDefaultFloodSuffixLength() const {
void getTeddyDescriptions(vector<TeddyEngineDescription> *out) {
static const TeddyEngineDef defns[] = {
{ 3, 0 | HS_CPU_FEATURES_AVX2, 1, 16, false },
{ 4, 0 | HS_CPU_FEATURES_AVX2, 1, 16, true },
{ 5, 0 | HS_CPU_FEATURES_AVX2, 2, 16, false },
{ 6, 0 | HS_CPU_FEATURES_AVX2, 2, 16, true },
{ 7, 0 | HS_CPU_FEATURES_AVX2, 3, 16, false },
{ 8, 0 | HS_CPU_FEATURES_AVX2, 3, 16, true },
{ 9, 0 | HS_CPU_FEATURES_AVX2, 4, 16, false },
{ 10, 0 | HS_CPU_FEATURES_AVX2, 4, 16, true },
{ 3, HS_CPU_FEATURES_AVX2, 1, 16, false },
{ 4, HS_CPU_FEATURES_AVX2, 1, 16, true },
{ 5, HS_CPU_FEATURES_AVX2, 2, 16, false },
{ 6, HS_CPU_FEATURES_AVX2, 2, 16, true },
{ 7, HS_CPU_FEATURES_AVX2, 3, 16, false },
{ 8, HS_CPU_FEATURES_AVX2, 3, 16, true },
{ 9, HS_CPU_FEATURES_AVX2, 4, 16, false },
{ 10, HS_CPU_FEATURES_AVX2, 4, 16, true },
{ 11, 0, 1, 8, false },
{ 12, 0, 1, 8, true },
{ 13, 0, 2, 8, false },

View File

@@ -400,7 +400,7 @@ char castleFindMatch(const struct Castle *c, const u64a begin, const u64a end,
}
static really_inline
u64a subCastleNextMatch(const struct Castle *c, void *full_state,
u64a subCastleNextMatch(const struct Castle *c, const void *full_state,
void *stream_state, const u64a loc,
const u32 subIdx) {
DEBUG_PRINTF("subcastle %u\n", subIdx);
@@ -489,7 +489,6 @@ char castleMatchLoop(const struct Castle *c, const u64a begin, const u64a end,
// full_state (scratch).
u64a offset = end; // min offset of next match
u32 activeIdx = 0;
mmbit_clear(matching, c->numRepeats);
if (c->exclusive) {
u8 *active = (u8 *)stream_state;
@@ -497,7 +496,7 @@ char castleMatchLoop(const struct Castle *c, const u64a begin, const u64a end,
for (u32 i = mmbit_iterate(groups, c->numGroups, MMB_INVALID);
i != MMB_INVALID; i = mmbit_iterate(groups, c->numGroups, i)) {
u8 *cur = active + i * c->activeIdxSize;
activeIdx = partial_load_u32(cur, c->activeIdxSize);
u32 activeIdx = partial_load_u32(cur, c->activeIdxSize);
u64a match = subCastleNextMatch(c, full_state, stream_state,
loc, activeIdx);
set_matching(c, match, groups, matching, c->numGroups, i,
@@ -907,7 +906,6 @@ s64a castleLastKillLoc(const struct Castle *c, struct mq *q) {
if (castleRevScan(c, q->history, sp + hlen, ep + hlen, &loc)) {
return (s64a)loc - hlen;
}
ep = 0;
}
return sp - 1; /* the repeats are never killed */

View File

@@ -655,7 +655,8 @@ buildCastle(const CastleProto &proto,
if (!stale_iter.empty()) {
c->staleIterOffset = verify_u32(ptr - base_ptr);
copy_bytes(ptr, stale_iter);
ptr += byte_length(stale_iter);
// Removed unused increment operation
// ptr += byte_length(stale_iter);
}
return nfa;

View File

@@ -332,7 +332,7 @@ void EXPIRE_ESTATE_FN(const IMPL_NFA_T *limex, struct CONTEXT_T *ctx,
// UE-1636) need to guard cyclic tug-accepts as well.
static really_inline
char LIMEX_INACCEPT_FN(const IMPL_NFA_T *limex, STATE_T state,
union RepeatControl *repeat_ctrl, char *repeat_state,
const union RepeatControl *repeat_ctrl, const char *repeat_state,
u64a offset, ReportID report) {
assert(limex);
@@ -382,7 +382,7 @@ char LIMEX_INACCEPT_FN(const IMPL_NFA_T *limex, STATE_T state,
static really_inline
char LIMEX_INANYACCEPT_FN(const IMPL_NFA_T *limex, STATE_T state,
union RepeatControl *repeat_ctrl, char *repeat_state,
const union RepeatControl *repeat_ctrl, const char *repeat_state,
u64a offset) {
assert(limex);

View File

@@ -1572,7 +1572,7 @@ u32 findMaxVarShift(const build_info &args, u32 nShifts) {
static
int getLimexScore(const build_info &args, u32 nShifts) {
const NGHolder &h = args.h;
u32 maxVarShift = nShifts;
u32 maxVarShift;
int score = 0;
score += SHIFT_COST * nShifts;

View File

@@ -512,7 +512,7 @@ size_t find_last_bad(const struct mpv_kilopuff *kp, const u8 *buf,
verm_restart:;
assert(buf[curr] == kp->u.verm.c);
size_t test = curr;
size_t test;
if (curr + min_rep < length) {
test = curr + min_rep;
} else {
@@ -534,7 +534,7 @@ size_t find_last_bad(const struct mpv_kilopuff *kp, const u8 *buf,
m128 hi = kp->u.shuf.mask_hi;
shuf_restart:
assert(do_single_shufti(lo, hi, buf[curr]));
size_t test = curr;
size_t test;
if (curr + min_rep < length) {
test = curr + min_rep;
} else {
@@ -556,7 +556,7 @@ size_t find_last_bad(const struct mpv_kilopuff *kp, const u8 *buf,
const m128 mask1 = kp->u.truffle.mask1;
const m128 mask2 = kp->u.truffle.mask2;
truffle_restart:;
size_t test = curr;
size_t test;
if (curr + min_rep < length) {
test = curr + min_rep;
} else {
@@ -582,7 +582,7 @@ size_t find_last_bad(const struct mpv_kilopuff *kp, const u8 *buf,
nverm_restart:;
assert(buf[curr] != kp->u.verm.c);
size_t test = curr;
size_t test;
if (curr + min_rep < length) {
test = curr + min_rep;
} else {
@@ -607,7 +607,7 @@ size_t find_last_bad(const struct mpv_kilopuff *kp, const u8 *buf,
}
static really_inline
void restartKilo(const struct mpv *m, UNUSED u8 *active, u8 *reporters,
void restartKilo(const struct mpv *m, UNUSED const u8 *active, u8 *reporters,
struct mpv_decomp_state *dstate, struct mpv_pq_item *pq,
const u8 *buf, u64a prev_limit, size_t buf_length, u32 i) {
const struct mpv_kilopuff *kp = (const void *)(m + 1);

View File

@@ -94,9 +94,6 @@ u32 repeatRecurTable(struct RepeatStateInfo *info, const depth &repeatMax,
static
u32 findOptimalPatchSize(struct RepeatStateInfo *info, const depth &repeatMax,
const u32 minPeriod, u32 rv) {
u32 cnt = 0;
u32 patch_bits = 0;
u32 total_size = 0;
u32 min = ~0U;
u32 patch_len = 0;
@@ -105,11 +102,11 @@ u32 findOptimalPatchSize(struct RepeatStateInfo *info, const depth &repeatMax,
}
for (u32 i = minPeriod; i <= rv; i++) {
cnt = ((u32)repeatMax + (i - 1)) / i + 1;
u32 cnt = ((u32)repeatMax + (i - 1)) / i + 1;
// no bit packing version
patch_bits = calcPackedBits(info->table[i]);
total_size = (patch_bits + 7U) / 8U * cnt;
u32 patch_bits = calcPackedBits(info->table[i]);
u32 total_size = (patch_bits + 7U) / 8U * cnt;
if (total_size < min) {
patch_len = i;

View File

@@ -154,7 +154,7 @@ char fireReports(const struct sheng *sh, NfaCallback cb, void *ctxt,
return MO_CONTINUE_MATCHING; /* continue execution */
}
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
// Sheng32
static really_inline
const struct sheng32 *get_sheng32(const struct NFA *n) {
@@ -351,7 +351,7 @@ char fireReports64(const struct sheng64 *sh, NfaCallback cb, void *ctxt,
}
return MO_CONTINUE_MATCHING; /* continue execution */
}
#endif // end of HAVE_AVX512VBMI
#endif // end of HAVE_AVX512VBMI || HAVE_SVE
/* include Sheng function definitions */
#include "sheng_defs.h"
@@ -871,7 +871,7 @@ char nfaExecSheng_expandState(UNUSED const struct NFA *nfa, void *dest,
return 0;
}
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
// Sheng32
static really_inline
char runSheng32Cb(const struct sheng32 *sh, NfaCallback cb, void *ctxt,
@@ -1874,4 +1874,4 @@ char nfaExecSheng64_expandState(UNUSED const struct NFA *nfa, void *dest,
*(u8 *)dest = *(const u8 *)src;
return 0;
}
#endif // end of HAVE_AVX512VBMI
#endif // end of HAVE_AVX512VBMI || HAVE_SVE

View File

@@ -58,7 +58,7 @@ char nfaExecSheng_reportCurrent(const struct NFA *n, struct mq *q);
char nfaExecSheng_B(const struct NFA *n, u64a offset, const u8 *buffer,
size_t length, NfaCallback cb, void *context);
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#define nfaExecSheng32_B_Reverse NFA_API_NO_IMPL
#define nfaExecSheng32_zombie_status NFA_API_ZOMBIE_NO_IMPL
@@ -106,8 +106,7 @@ char nfaExecSheng64_reportCurrent(const struct NFA *n, struct mq *q);
char nfaExecSheng64_B(const struct NFA *n, u64a offset, const u8 *buffer,
size_t length, NfaCallback cb, void *context);
#else // !HAVE_AVX512VBMI
#else // !HAVE_AVX512VBMI && !HAVE_SVE
#define nfaExecSheng32_B_Reverse NFA_API_NO_IMPL
#define nfaExecSheng32_zombie_status NFA_API_ZOMBIE_NO_IMPL
@@ -138,6 +137,7 @@ char nfaExecSheng64_B(const struct NFA *n, u64a offset, const u8 *buffer,
#define nfaExecSheng64_testEOD NFA_API_NO_IMPL
#define nfaExecSheng64_reportCurrent NFA_API_NO_IMPL
#define nfaExecSheng64_B NFA_API_NO_IMPL
#endif // end of HAVE_AVX512VBMI
#endif // end of HAVE_AVX512VBMI || defined(HAVE_SVE)
#endif /* SHENG_H_ */

View File

@@ -52,7 +52,7 @@ u8 hasInterestingStates(const u8 a, const u8 b, const u8 c, const u8 d) {
return (a | b | c | d) & (SHENG_STATE_FLAG_MASK);
}
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
static really_inline
u8 isDeadState32(const u8 a) {
return a & SHENG32_STATE_DEAD;
@@ -108,7 +108,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#define SHENG_IMPL sheng_cod
#define DEAD_FUNC isDeadState
#define ACCEPT_FUNC isAcceptState
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#define SHENG32_IMPL sheng32_cod
#define DEAD_FUNC32 isDeadState32
#define ACCEPT_FUNC32 isAcceptState32
@@ -121,7 +121,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef SHENG_IMPL
#undef DEAD_FUNC
#undef ACCEPT_FUNC
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#undef SHENG32_IMPL
#undef DEAD_FUNC32
#undef ACCEPT_FUNC32
@@ -135,7 +135,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#define SHENG_IMPL sheng_co
#define DEAD_FUNC dummyFunc
#define ACCEPT_FUNC isAcceptState
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#define SHENG32_IMPL sheng32_co
#define DEAD_FUNC32 dummyFunc
#define ACCEPT_FUNC32 isAcceptState32
@@ -148,7 +148,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef SHENG_IMPL
#undef DEAD_FUNC
#undef ACCEPT_FUNC
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#undef SHENG32_IMPL
#undef DEAD_FUNC32
#undef ACCEPT_FUNC32
@@ -162,7 +162,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#define SHENG_IMPL sheng_samd
#define DEAD_FUNC isDeadState
#define ACCEPT_FUNC isAcceptState
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#define SHENG32_IMPL sheng32_samd
#define DEAD_FUNC32 isDeadState32
#define ACCEPT_FUNC32 isAcceptState32
@@ -175,7 +175,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef SHENG_IMPL
#undef DEAD_FUNC
#undef ACCEPT_FUNC
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#undef SHENG32_IMPL
#undef DEAD_FUNC32
#undef ACCEPT_FUNC32
@@ -189,7 +189,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#define SHENG_IMPL sheng_sam
#define DEAD_FUNC dummyFunc
#define ACCEPT_FUNC isAcceptState
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#define SHENG32_IMPL sheng32_sam
#define DEAD_FUNC32 dummyFunc
#define ACCEPT_FUNC32 isAcceptState32
@@ -202,7 +202,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef SHENG_IMPL
#undef DEAD_FUNC
#undef ACCEPT_FUNC
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#undef SHENG32_IMPL
#undef DEAD_FUNC32
#undef ACCEPT_FUNC32
@@ -216,7 +216,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#define SHENG_IMPL sheng_nmd
#define DEAD_FUNC isDeadState
#define ACCEPT_FUNC dummyFunc
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#define SHENG32_IMPL sheng32_nmd
#define DEAD_FUNC32 isDeadState32
#define ACCEPT_FUNC32 dummyFunc
@@ -229,7 +229,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef SHENG_IMPL
#undef DEAD_FUNC
#undef ACCEPT_FUNC
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#undef SHENG32_IMPL
#undef DEAD_FUNC32
#undef ACCEPT_FUNC32
@@ -243,7 +243,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#define SHENG_IMPL sheng_nm
#define DEAD_FUNC dummyFunc
#define ACCEPT_FUNC dummyFunc
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#define SHENG32_IMPL sheng32_nm
#define DEAD_FUNC32 dummyFunc
#define ACCEPT_FUNC32 dummyFunc
@@ -256,7 +256,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef SHENG_IMPL
#undef DEAD_FUNC
#undef ACCEPT_FUNC
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#undef SHENG32_IMPL
#undef DEAD_FUNC32
#undef ACCEPT_FUNC32
@@ -277,7 +277,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC isAccelState
#define OUTER_ACCEL_FUNC dummyFunc
#define ACCEPT_FUNC isAcceptState
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#define SHENG32_IMPL sheng32_4_coda
#define INTERESTING_FUNC32 hasInterestingStates32
#define INNER_DEAD_FUNC32 isDeadState32
@@ -296,7 +296,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#undef SHENG32_IMPL
#undef INTERESTING_FUNC32
#undef INNER_DEAD_FUNC32
@@ -316,7 +316,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC dummyFunc
#define OUTER_ACCEL_FUNC dummyFunc
#define ACCEPT_FUNC isAcceptState
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#define SHENG32_IMPL sheng32_4_cod
#define INTERESTING_FUNC32 hasInterestingStates32
#define INNER_DEAD_FUNC32 isDeadState32
@@ -339,7 +339,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#undef SHENG32_IMPL
#undef INTERESTING_FUNC32
#undef INNER_DEAD_FUNC32
@@ -363,7 +363,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC isAccelState
#define OUTER_ACCEL_FUNC dummyFunc
#define ACCEPT_FUNC isAcceptState
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#define SHENG32_IMPL sheng32_4_coa
#define INTERESTING_FUNC32 hasInterestingStates32
#define INNER_DEAD_FUNC32 dummyFunc
@@ -382,7 +382,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#undef SHENG32_IMPL
#undef INTERESTING_FUNC32
#undef INNER_DEAD_FUNC32
@@ -402,7 +402,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC dummyFunc
#define OUTER_ACCEL_FUNC dummyFunc
#define ACCEPT_FUNC isAcceptState
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#define SHENG32_IMPL sheng32_4_co
#define INTERESTING_FUNC32 hasInterestingStates32
#define INNER_DEAD_FUNC32 dummyFunc
@@ -425,7 +425,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#undef SHENG32_IMPL
#undef INTERESTING_FUNC32
#undef INNER_DEAD_FUNC32
@@ -449,7 +449,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC isAccelState
#define OUTER_ACCEL_FUNC dummyFunc
#define ACCEPT_FUNC isAcceptState
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#define SHENG32_IMPL sheng32_4_samda
#define INTERESTING_FUNC32 hasInterestingStates32
#define INNER_DEAD_FUNC32 isDeadState32
@@ -468,7 +468,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#undef SHENG32_IMPL
#undef INTERESTING_FUNC32
#undef INNER_DEAD_FUNC32
@@ -488,7 +488,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC dummyFunc
#define OUTER_ACCEL_FUNC dummyFunc
#define ACCEPT_FUNC isAcceptState
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#define SHENG32_IMPL sheng32_4_samd
#define INTERESTING_FUNC32 hasInterestingStates32
#define INNER_DEAD_FUNC32 isDeadState32
@@ -511,7 +511,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#undef SHENG32_IMPL
#undef INTERESTING_FUNC32
#undef INNER_DEAD_FUNC32
@@ -535,7 +535,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC isAccelState
#define OUTER_ACCEL_FUNC dummyFunc
#define ACCEPT_FUNC isAcceptState
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#define SHENG32_IMPL sheng32_4_sama
#define INTERESTING_FUNC32 hasInterestingStates32
#define INNER_DEAD_FUNC32 dummyFunc
@@ -554,7 +554,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#undef SHENG32_IMPL
#undef INTERESTING_FUNC32
#undef INNER_DEAD_FUNC32
@@ -574,7 +574,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC dummyFunc
#define OUTER_ACCEL_FUNC dummyFunc
#define ACCEPT_FUNC isAcceptState
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#define SHENG32_IMPL sheng32_4_sam
#define INTERESTING_FUNC32 hasInterestingStates32
#define INNER_DEAD_FUNC32 dummyFunc
@@ -597,7 +597,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#undef SHENG32_IMPL
#undef INTERESTING_FUNC32
#undef INNER_DEAD_FUNC32
@@ -623,7 +623,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC dummyFunc
#define OUTER_ACCEL_FUNC isAccelState
#define ACCEPT_FUNC dummyFunc
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#define SHENG32_IMPL sheng32_4_nmda
#define INTERESTING_FUNC32 dummyFunc4
#define INNER_DEAD_FUNC32 dummyFunc
@@ -642,7 +642,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#undef SHENG32_IMPL
#undef INTERESTING_FUNC32
#undef INNER_DEAD_FUNC32
@@ -662,7 +662,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC dummyFunc
#define OUTER_ACCEL_FUNC dummyFunc
#define ACCEPT_FUNC dummyFunc
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#define SHENG32_IMPL sheng32_4_nmd
#define INTERESTING_FUNC32 dummyFunc4
#define INNER_DEAD_FUNC32 dummyFunc
@@ -685,7 +685,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#undef SHENG32_IMPL
#undef INTERESTING_FUNC32
#undef INNER_DEAD_FUNC32
@@ -712,7 +712,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC dummyFunc
#define OUTER_ACCEL_FUNC dummyFunc
#define ACCEPT_FUNC dummyFunc
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#define SHENG32_IMPL sheng32_4_nm
#define INTERESTING_FUNC32 dummyFunc4
#define INNER_DEAD_FUNC32 dummyFunc
@@ -735,7 +735,7 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
#undef SHENG32_IMPL
#undef INTERESTING_FUNC32
#undef INNER_DEAD_FUNC32

View File

@@ -96,7 +96,7 @@ char SHENG_IMPL(u8 *state, NfaCallback cb, void *ctxt, const struct sheng *s,
return MO_CONTINUE_MATCHING;
}
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
static really_inline
char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt,
const struct sheng32 *s,
@@ -114,14 +114,28 @@ char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt,
}
DEBUG_PRINTF("Scanning %lli bytes\n", (s64a)(end - start));
#if defined(HAVE_SVE)
const svbool_t lane_pred_32 = svwhilelt_b8(0, 32);
svuint8_t cur_state = svdup_u8(*state);
svuint8_t tbl_mask = svdup_u8((unsigned char)0x1F);
const m512 *masks = s->succ_masks;
#else
m512 cur_state = set1_64x8(*state);
const m512 *masks = s->succ_masks;
#endif
while (likely(cur_buf != end)) {
const u8 c = *cur_buf;
#if defined(HAVE_SVE)
svuint8_t succ_mask = svld1(lane_pred_32, (const u8*)(masks + c));
cur_state = svtbl(succ_mask, svand_x(svptrue_b8(), tbl_mask, cur_state));
const u8 tmp = svlastb(lane_pred_32, cur_state);
#else
const m512 succ_mask = masks[c];
cur_state = vpermb512(cur_state, succ_mask);
const u8 tmp = movd512(cur_state);
#endif
DEBUG_PRINTF("c: %02hhx '%c'\n", c, ourisprint(c) ? c : '?');
DEBUG_PRINTF("s: %u (flag: %u)\n", tmp & SHENG32_STATE_MASK,
@@ -153,7 +167,11 @@ char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt,
}
cur_buf++;
}
#if defined(HAVE_SVE)
*state = svlastb(lane_pred_32, cur_state);
#else
*state = movd512(cur_state);
#endif
*scan_end = cur_buf;
return MO_CONTINUE_MATCHING;
}
@@ -175,14 +193,28 @@ char SHENG64_IMPL(u8 *state, NfaCallback cb, void *ctxt,
}
DEBUG_PRINTF("Scanning %lli bytes\n", (s64a)(end - start));
#if defined(HAVE_SVE)
const svbool_t lane_pred_64 = svwhilelt_b8(0, 64);
svuint8_t cur_state = svdup_u8(*state);
svuint8_t tbl_mask = svdup_u8((unsigned char)0x3F);
const m512 *masks = s->succ_masks;
#else
m512 cur_state = set1_64x8(*state);
const m512 *masks = s->succ_masks;
#endif
while (likely(cur_buf != end)) {
const u8 c = *cur_buf;
#if defined(HAVE_SVE)
svuint8_t succ_mask = svld1(lane_pred_64, (const u8*)(masks + c));
cur_state = svtbl(succ_mask, svand_x(svptrue_b8(), tbl_mask, cur_state));
const u8 tmp = svlastb(lane_pred_64, cur_state);
#else
const m512 succ_mask = masks[c];
cur_state = vpermb512(cur_state, succ_mask);
const u8 tmp = movd512(cur_state);
#endif
DEBUG_PRINTF("c: %02hhx '%c'\n", c, ourisprint(c) ? c : '?');
DEBUG_PRINTF("s: %u (flag: %u)\n", tmp & SHENG64_STATE_MASK,
@@ -214,7 +246,11 @@ char SHENG64_IMPL(u8 *state, NfaCallback cb, void *ctxt,
}
cur_buf++;
}
#if defined(HAVE_SVE)
*state = svlastb(lane_pred_64, cur_state);
#else
*state = movd512(cur_state);
#endif
*scan_end = cur_buf;
return MO_CONTINUE_MATCHING;
}

View File

@@ -283,7 +283,7 @@ char SHENG_IMPL(u8 *state, NfaCallback cb, void *ctxt, const struct sheng *s,
return MO_CONTINUE_MATCHING;
}
#if defined(HAVE_AVX512VBMI)
#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE)
static really_inline
char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt,
const struct sheng32 *s,
@@ -320,8 +320,15 @@ char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt,
return MO_CONTINUE_MATCHING;
}
#if defined(HAVE_SVE)
const svbool_t lane_pred_32 = svwhilelt_b8(0, 32);
svuint8_t cur_state = svdup_u8(*state);
svuint8_t tbl_mask = svdup_u8((unsigned char)0x1F);
const m512 *masks = s->succ_masks;
#else
m512 cur_state = set1_64x8(*state);
const m512 *masks = s->succ_masks;
#endif
while (likely(end - cur_buf >= 4)) {
const u8 *b1 = cur_buf;
@@ -333,6 +340,23 @@ char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt,
const u8 c3 = *b3;
const u8 c4 = *b4;
#if defined(HAVE_SVE)
svuint8_t succ_mask1 = svld1(lane_pred_32, (const u8*)(masks+c1));
cur_state = svtbl(succ_mask1, svand_x(svptrue_b8(), tbl_mask, cur_state));
const u8 a1 = svlastb(lane_pred_32, cur_state);
svuint8_t succ_mask2 = svld1(lane_pred_32, (const u8*)(masks+c2));
cur_state = svtbl(succ_mask2, svand_x(svptrue_b8(), tbl_mask, cur_state));
const u8 a2 = svlastb(lane_pred_32, cur_state);
svuint8_t succ_mask3 = svld1(lane_pred_32, (const u8*)(masks+c3));
cur_state = svtbl(succ_mask3, svand_x(svptrue_b8(), tbl_mask, cur_state));
const u8 a3 = svlastb(lane_pred_32, cur_state);
svuint8_t succ_mask4 = svld1(lane_pred_32, (const u8*)(masks+c4));
cur_state = svtbl(succ_mask4, svand_x(svptrue_b8(), tbl_mask, cur_state));
const u8 a4 = svlastb(lane_pred_32, cur_state);
#else
const m512 succ_mask1 = masks[c1];
cur_state = vpermb512(cur_state, succ_mask1);
const u8 a1 = movd512(cur_state);
@@ -348,6 +372,7 @@ char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt,
const m512 succ_mask4 = masks[c4];
cur_state = vpermb512(cur_state, succ_mask4);
const u8 a4 = movd512(cur_state);
#endif
DEBUG_PRINTF("c: %02hhx '%c'\n", c1, ourisprint(c1) ? c1 : '?');
DEBUG_PRINTF("s: %u (flag: %u)\n", a1 & SHENG32_STATE_MASK,
@@ -517,7 +542,11 @@ char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt,
};
cur_buf += 4;
}
#if defined(HAVE_SVE)
*state = svlastb(lane_pred_32, cur_state);
#else
*state = movd512(cur_state);
#endif
*scan_end = cur_buf;
return MO_CONTINUE_MATCHING;
}
@@ -541,9 +570,15 @@ char SHENG64_IMPL(u8 *state, NfaCallback cb, void *ctxt,
*scan_end = end;
return MO_CONTINUE_MATCHING;
}
#if defined(HAVE_SVE)
const svbool_t lane_pred_64 = svwhilelt_b8(0, 64);
svuint8_t cur_state = svdup_u8(*state);
svuint8_t tbl_mask = svdup_u8((unsigned char)0x3F);
const m512 *masks = s->succ_masks;
#else
m512 cur_state = set1_64x8(*state);
const m512 *masks = s->succ_masks;
#endif
while (likely(end - cur_buf >= 4)) {
const u8 *b1 = cur_buf;
@@ -555,6 +590,23 @@ char SHENG64_IMPL(u8 *state, NfaCallback cb, void *ctxt,
const u8 c3 = *b3;
const u8 c4 = *b4;
#if defined(HAVE_SVE)
svuint8_t succ_mask1 = svld1(lane_pred_64, (const u8*)(masks+c1));
cur_state = svtbl(succ_mask1, svand_x(svptrue_b8(), tbl_mask, cur_state));
const u8 a1 = svlastb(lane_pred_64, cur_state);
svuint8_t succ_mask2 = svld1(lane_pred_64, (const u8*)(masks+c2));
cur_state = svtbl(succ_mask2, svand_x(svptrue_b8(), tbl_mask, cur_state));
const u8 a2 = svlastb(lane_pred_64, cur_state);
svuint8_t succ_mask3 = svld1(lane_pred_64, (const u8*)(masks+c3));
cur_state = svtbl(succ_mask3, svand_x(svptrue_b8(), tbl_mask, cur_state));
const u8 a3 = svlastb(lane_pred_64, cur_state);
svuint8_t succ_mask4 = svld1(lane_pred_64, (const u8*)(masks+c4));
cur_state = svtbl(succ_mask4, svand_x(svptrue_b8(), tbl_mask, cur_state));
const u8 a4 = svlastb(lane_pred_64, cur_state);
#else
const m512 succ_mask1 = masks[c1];
cur_state = vpermb512(cur_state, succ_mask1);
const u8 a1 = movd512(cur_state);
@@ -570,6 +622,7 @@ char SHENG64_IMPL(u8 *state, NfaCallback cb, void *ctxt,
const m512 succ_mask4 = masks[c4];
cur_state = vpermb512(cur_state, succ_mask4);
const u8 a4 = movd512(cur_state);
#endif
DEBUG_PRINTF("c: %02hhx '%c'\n", c1, ourisprint(c1) ? c1 : '?');
DEBUG_PRINTF("s: %u (flag: %u)\n", a1 & SHENG64_STATE_MASK,
@@ -703,7 +756,11 @@ char SHENG64_IMPL(u8 *state, NfaCallback cb, void *ctxt,
}
cur_buf += 4;
}
#if defined(HAVE_SVE)
*state = svlastb(lane_pred_64, cur_state);
#else
*state = movd512(cur_state);
#endif
*scan_end = cur_buf;
return MO_CONTINUE_MATCHING;
}

View File

@@ -730,10 +730,17 @@ bytecode_ptr<NFA> sheng32Compile(raw_dfa &raw, const CompileContext &cc,
return nullptr;
}
#ifdef HAVE_SVE
if (svcntb()<32) {
DEBUG_PRINTF("Sheng32 failed, SVE width is too small!\n");
return nullptr;
}
#else
if (!cc.target_info.has_avx512vbmi()) {
DEBUG_PRINTF("Sheng32 failed, no HS_CPU_FEATURES_AVX512VBMI!\n");
return nullptr;
}
#endif
sheng_build_strat strat(raw, rm, only_accel_init);
dfa_info info(strat);
@@ -762,10 +769,17 @@ bytecode_ptr<NFA> sheng64Compile(raw_dfa &raw, const CompileContext &cc,
return nullptr;
}
#ifdef HAVE_SVE
if (svcntb()<64) {
DEBUG_PRINTF("Sheng64 failed, SVE width is too small!\n");
return nullptr;
}
#else
if (!cc.target_info.has_avx512vbmi()) {
DEBUG_PRINTF("Sheng64 failed, no HS_CPU_FEATURES_AVX512VBMI!\n");
return nullptr;
}
#endif
sheng_build_strat strat(raw, rm, only_accel_init);
dfa_info info(strat);

View File

@@ -193,9 +193,6 @@ void reduceGraph(NGHolder &g, som_type som, bool utf8,
if (!som) {
mergeCyclicDotStars(g);
}
if (!som) {
removeSiblingsOfStartDotStar(g);
}
}

View File

@@ -165,9 +165,9 @@ void reformAnchoredRepeatsComponent(NGHolder &g,
return;
}
NFAVertex dotV = NGHolder::null_vertex();
set<NFAVertex> otherV;
dotV = findReformable(g, compAnchoredStarts, otherV);
NFAVertex dotV = findReformable(g, compAnchoredStarts, otherV);
if (dotV == NGHolder::null_vertex()) {
DEBUG_PRINTF("no candidate reformable dot found.\n");
return;
@@ -268,9 +268,9 @@ void reformUnanchoredRepeatsComponent(NGHolder &g,
}
while (true) {
NFAVertex dotV = NGHolder::null_vertex();
set<NFAVertex> otherV;
dotV = findReformable(g, compUnanchoredStarts, otherV);
NFAVertex dotV = findReformable(g, compUnanchoredStarts, otherV);
if (dotV == NGHolder::null_vertex()) {
DEBUG_PRINTF("no candidate reformable dot found.\n");
return;

View File

@@ -513,12 +513,12 @@ static
bool doHaig(const NGHolder &g, som_type som,
const vector<vector<CharReach>> &triggers, bool unordered_som,
raw_som_dfa *rdfa) {
u32 state_limit = HAIG_FINAL_DFA_STATE_LIMIT; /* haig never backs down from
a fight */
using StateSet = typename Auto::StateSet;
vector<StateSet> nfa_state_map;
Auto n(g, som, triggers, unordered_som);
try {
u32 state_limit = HAIG_FINAL_DFA_STATE_LIMIT; /* haig never backs down from
a fight */
if (!determinise(n, rdfa->states, state_limit, &nfa_state_map)) {
DEBUG_PRINTF("state limit exceeded\n");
return false;

View File

@@ -321,7 +321,7 @@ struct DAccelScheme {
bool cd_a = buildDvermMask(a.double_byte);
bool cd_b = buildDvermMask(b.double_byte);
if (cd_a != cd_b) {
return cd_a > cd_b;
return cd_a;
}
}
@@ -811,11 +811,9 @@ depth_done:
return true;
}
}
}
// Second option: a two-byte shufti (i.e. less than eight 2-byte
// literals)
if (depth > 1) {
for (unsigned int i = 0; i < (depth - 1); i++) {
if (depthReach[i].count() * depthReach[i+1].count()
<= DOUBLE_SHUFTI_LIMIT) {

View File

@@ -636,12 +636,12 @@ bool reversePathReachSubset(const NFAEdge &e, const NFAVertex &dom,
NFAVertex start = source(e, g);
using RevGraph = boost::reverse_graph<NGHolder, const NGHolder &>;
map<RevGraph::vertex_descriptor, boost::default_color_type> vertexColor;
// Walk the graph backwards from v, examining each node. We fail (return
// false) if we encounter a node with reach NOT a subset of domReach, and
// we stop searching at dom.
try {
map<RevGraph::vertex_descriptor, boost::default_color_type> vertexColor;
depth_first_visit(RevGraph(g), start,
ReachSubsetVisitor(domReach),
make_assoc_property_map(vertexColor),
@@ -664,12 +664,12 @@ bool forwardPathReachSubset(const NFAEdge &e, const NFAVertex &dom,
}
NFAVertex start = target(e, g);
map<NFAVertex, boost::default_color_type> vertexColor;
// Walk the graph forward from v, examining each node. We fail (return
// false) if we encounter a node with reach NOT a subset of domReach, and
// we stop searching at dom.
try {
map<NFAVertex, boost::default_color_type> vertexColor;
depth_first_visit(g, start, ReachSubsetVisitor(domReach),
make_assoc_property_map(vertexColor),
VertexIs<NGHolder, NFAVertex>(dom));

View File

@@ -1292,8 +1292,8 @@ bool doTreePlanningIntl(const NGHolder &g,
DEBUG_PRINTF("add mapped reporters for region %u\n", it->first);
addMappedReporterVertices(it->second, g, copy_to_orig,
plan.back().reporters);
} while (it->second.optional && it != info.rend() &&
(++it)->first > furthest->first);
} while (it != info.rend() && it->second.optional &&
(++it)->first > furthest->first);
return true;
}
@@ -1551,7 +1551,7 @@ bool doSomPlanning(NGHolder &g, bool stuck_in,
DEBUG_PRINTF("region %u contributes reporters to last plan\n",
it->first);
addReporterVertices(it->second, g, plan.back().reporters);
} while (it->second.optional && it != info.rend() &&
} while (it != info.rend() && it->second.optional &&
(++it)->first > furthest->first);
DEBUG_PRINTF("done!\n");

View File

@@ -267,18 +267,6 @@ bool somMayGoBackwards(NFAVertex u, const NGHolder &g,
boost::depth_first_search(c_g, visitor(backEdgeVisitor)
.root_vertex(c_g.start));
for (const auto &e : be) {
NFAVertex s = source(e, c_g);
NFAVertex t = target(e, c_g);
DEBUG_PRINTF("back edge %zu %zu\n", c_g[s].index, c_g[t].index);
if (s != t) {
assert(0);
DEBUG_PRINTF("eek big cycle\n");
rv = true; /* big cycle -> eek */
goto exit;
}
}
DEBUG_PRINTF("checking acyclic+selfloop graph\n");
rv = !firstMatchIsFirst(c_g);

View File

@@ -589,7 +589,7 @@ void getHighlanderReporters(const NGHolder &g, const NFAVertex accept,
verts.insert(v);
next_vertex:
continue;
;
}
}

View File

@@ -314,7 +314,7 @@ void duplicateReport(NGHolder &g, ReportID r_old, ReportID r_new);
/** Construct a reversed copy of an arbitrary NGHolder, mapping starts to
* accepts. */
void reverseHolder(const NGHolder &g, NGHolder &out);
void reverseHolder(const NGHolder &g_in, NGHolder &g);
/** \brief Returns the delay or ~0U if the graph cannot match with
* the trailing literal. */

View File

@@ -348,10 +348,9 @@ void getSimpleRoseLiterals(const NGHolder &g, bool seeking_anchored,
map<NFAVertex, u64a> scores;
map<NFAVertex, unique_ptr<VertLitInfo>> lit_info;
set<ue2_literal> s;
for (auto v : a_dom) {
s = getLiteralSet(g, v, true); /* RHS will take responsibility for any
set<ue2_literal> s = getLiteralSet(g, v, true); /* RHS will take responsibility for any
revisits to the target vertex */
if (s.empty()) {
@@ -2868,7 +2867,6 @@ static
bool splitForImplementability(RoseInGraph &vg, NGHolder &h,
const vector<RoseInEdge> &edges,
const CompileContext &cc) {
vector<pair<ue2_literal, u32>> succ_lits;
DEBUG_PRINTF("trying to split %s with %zu vertices on %zu edges\n",
to_string(h.kind).c_str(), num_vertices(h), edges.size());
@@ -2877,6 +2875,7 @@ bool splitForImplementability(RoseInGraph &vg, NGHolder &h,
}
if (!generates_callbacks(h)) {
vector<pair<ue2_literal, u32>> succ_lits;
for (const auto &e : edges) {
const auto &lit = vg[target(e, vg)].s;
u32 delay = vg[e].graph_lag;
@@ -2889,8 +2888,8 @@ bool splitForImplementability(RoseInGraph &vg, NGHolder &h,
}
unique_ptr<VertLitInfo> split;
bool last_chance = true;
if (h.kind == NFA_PREFIX) {
bool last_chance = true;
auto depths = calcDepths(h);
split = findBestPrefixSplit(h, depths, vg, edges, last_chance, cc);

View File

@@ -109,20 +109,20 @@ void ComponentAlternation::append(unique_ptr<Component> component) {
vector<PositionInfo> ComponentAlternation::first() const {
// firsts come from all our subcomponents in position order. This will
// maintain left-to-right priority order.
vector<PositionInfo> firsts, subfirsts;
vector<PositionInfo> firsts;
for (const auto &c : children) {
subfirsts = c->first();
vector<PositionInfo> subfirsts = c->first();
firsts.insert(firsts.end(), subfirsts.begin(), subfirsts.end());
}
return firsts;
}
vector<PositionInfo> ComponentAlternation::last() const {
vector<PositionInfo> lasts, sublasts;
vector<PositionInfo> lasts;
for (const auto &c : children) {
sublasts = c->last();
vector<PositionInfo> sublasts = c->last();
lasts.insert(lasts.end(), sublasts.begin(), sublasts.end());
}
return lasts;

View File

@@ -320,7 +320,7 @@ void ComponentRepeat::wireRepeats(GlushkovBuildState &bs) {
}
}
DEBUG_PRINTF("wiring up %d optional repeats\n", copies - m_min);
DEBUG_PRINTF("wiring up %u optional repeats\n", copies - m_min);
for (u32 rep = MAX(m_min, 1); rep < copies; rep++) {
vector<PositionInfo> lasts = m_lasts[rep - 1];
if (rep != m_min) {

View File

@@ -157,10 +157,10 @@ void ComponentSequence::finalize() {
}
vector<PositionInfo> ComponentSequence::first() const {
vector<PositionInfo> firsts, subfirsts;
vector<PositionInfo> firsts;
for (const auto &c : children) {
subfirsts = c->first();
vector<PositionInfo> subfirsts = c->first();
replaceEpsilons(firsts, subfirsts);
if (!c->empty()) {
break;
@@ -229,12 +229,12 @@ void applyEpsilonVisits(vector<PositionInfo> &lasts,
}
vector<PositionInfo> ComponentSequence::last() const {
vector<PositionInfo> lasts, sublasts;
vector<PositionInfo> lasts;
vector<eps_info> visits(1);
auto i = children.rbegin(), e = children.rend();
for (; i != e; ++i) {
sublasts = (*i)->last();
vector<PositionInfo> sublasts = (*i)->last();
applyEpsilonVisits(sublasts, visits);
lasts.insert(lasts.end(), sublasts.begin(), sublasts.end());
if ((*i)->empty()) {

View File

@@ -260,14 +260,14 @@ void ParsedLogical::parseLogicalCombination(unsigned id, const char *logical,
u32 ekey, u64a min_offset,
u64a max_offset) {
u32 ckey = getCombKey(id);
vector<LogicalOperator> op_stack;
vector<u32> subid_stack;
u32 lkey_start = INVALID_LKEY; // logical operation's lkey
u32 paren = 0; // parentheses
u32 digit = (u32)-1; // digit start offset, invalid offset is -1
u32 subid = (u32)-1;
u32 i;
try {
vector<LogicalOperator> op_stack;
u32 paren = 0; // parentheses
for (i = 0; logical[i]; i++) {
if (isdigit(logical[i])) {
if (digit == (u32)-1) { // new digit start
@@ -284,7 +284,7 @@ void ParsedLogical::parseLogicalCombination(unsigned id, const char *logical,
if (logical[i] == '(') {
paren += 1;
} else if (logical[i] == ')') {
if (paren <= 0) {
if (paren == 0) {
throw LocatedParseError("Not enough left parentheses");
}
paren -= 1;

View File

@@ -192,7 +192,7 @@ int roseCountingMiracleOccurs(const struct RoseEngine *t,
u32 count = 0;
s64a m_loc = start;
s64a m_loc;
if (!cm->shufti) {
u8 c = cm->c;

View File

@@ -131,7 +131,6 @@ void findMaskLiteral(const vector<CharReach> &mask, bool streaming,
if (better) {
best_begin = begin;
best_end = end;
best_len = len;
}
for (size_t i = best_begin; i < best_end; i++) {
@@ -393,8 +392,9 @@ bool validateTransientMask(const vector<CharReach> &mask, bool anchored,
none_of(begin(lits), end(lits), mixed_sensitivity));
// Build the HWLM literal mask.
vector<u8> msk, cmp;
vector<u8> msk;
if (grey.roseHamsterMasks) {
vector<u8> cmp;
buildLiteralMask(mask, msk, cmp, delay);
}

View File

@@ -2251,10 +2251,9 @@ vector<u32> buildSuffixEkeyLists(const RoseBuildImpl &build, build_context &bc,
/* for each outfix also build elists */
for (const auto &outfix : build.outfixes) {
u32 qi = outfix.get_queue();
set<u32> ekeys = reportsToEkeys(all_reports(outfix), build.rm);
if (!ekeys.empty()) {
u32 qi = outfix.get_queue();
qi_to_ekeys[qi] = {ekeys.begin(), ekeys.end()};
}
}
@@ -2975,7 +2974,8 @@ void buildFragmentPrograms(const RoseBuildImpl &build,
!lit_prog.empty()) {
const auto &cfrag = fragments[pfrag.included_frag_id];
assert(pfrag.s.length() >= cfrag.s.length() &&
!pfrag.s.any_nocase() >= !cfrag.s.any_nocase());
!pfrag.s.any_nocase() == !cfrag.s.any_nocase());
/** !pfrag.s.any_nocase() >= !cfrag.s.any_nocase()); **/
u32 child_offset = cfrag.lit_program_offset;
DEBUG_PRINTF("child %u offset %u\n", cfrag.fragment_id,
child_offset);
@@ -2992,8 +2992,8 @@ void buildFragmentPrograms(const RoseBuildImpl &build,
pfrag.lit_ids);
if (pfrag.included_delay_frag_id != INVALID_FRAG_ID &&
!rebuild_prog.empty()) {
const auto &cfrag = fragments[pfrag.included_delay_frag_id];
assert(pfrag.s.length() >= cfrag.s.length() &&
/** assert(pfrag.s.length() >= cfrag.s.length() && **/
assert(pfrag.s.length() == cfrag.s.length() &&
!pfrag.s.any_nocase() >= !cfrag.s.any_nocase());
u32 child_offset = cfrag.delay_program_offset;
DEBUG_PRINTF("child %u offset %u\n", cfrag.fragment_id,

View File

@@ -170,7 +170,6 @@ void renovateCastle(RoseBuildImpl &tbi, CastleProto *castle,
return; /* bail - TODO: be less lazy */
}
vector<CharReach> rem_local_cr;
u32 ok_count = 0;
for (auto it = e.s.end() - g[v].left.lag; it != e.s.end(); ++it) {
if (!isSubsetOf(*it, cr)) {

View File

@@ -884,7 +884,7 @@ void buildAccel(const RoseBuildImpl &build,
}
bytecode_ptr<HWLM>
buildHWLMMatcher(const RoseBuildImpl &build, LitProto *litProto) {
buildHWLMMatcher(const RoseBuildImpl &build, const LitProto *litProto) {
if (!litProto) {
return nullptr;
}

View File

@@ -101,7 +101,7 @@ struct LitProto {
};
bytecode_ptr<HWLM>
buildHWLMMatcher(const RoseBuildImpl &build, LitProto *proto);
buildHWLMMatcher(const RoseBuildImpl &build, const LitProto *proto);
std::unique_ptr<LitProto>
buildFloatingMatcherProto(const RoseBuildImpl &build,

View File

@@ -1599,7 +1599,8 @@ void dedupeLeftfixesVariableLag(RoseBuildImpl &build) {
continue;
}
}
engine_groups[DedupeLeftKey(build, std::move(preds), left)].emplace_back(left);
auto preds_copy = std::move(preds);
engine_groups[DedupeLeftKey(build, preds_copy , left)].emplace_back(left);
}
/* We don't bother chunking as we expect deduping to be successful if the

View File

@@ -1004,9 +1004,9 @@ bool hasOrphanedTops(const RoseBuildImpl &build) {
for (auto v : vertices_range(g)) {
if (g[v].left) {
set<u32> &tops = leftfixes[g[v].left];
if (!build.isRootSuccessor(v)) {
// Tops for infixes come from the in-edges.
set<u32> &tops = leftfixes[g[v].left];
for (const auto &e : in_edges_range(v, g)) {
tops.insert(g[e].rose_top);
}

View File

@@ -104,7 +104,7 @@ void runAnchoredTableStream(const struct RoseEngine *t, const void *atable,
static really_inline
void saveStreamState(const struct NFA *nfa, struct mq *q, s64a loc) {
void saveStreamState(const struct NFA *nfa, const struct mq *q, s64a loc) {
DEBUG_PRINTF("offset=%llu, length=%zu, hlength=%zu, loc=%lld\n",
q->offset, q->length, q->hlength, loc);
nfaQueueCompressState(nfa, q, loc);

View File

@@ -215,12 +215,12 @@ struct ALIGN_CL_DIRECTIVE hs_scratch {
/* array of fatbit ptr; TODO: why not an array of fatbits? */
static really_inline
struct fatbit **getAnchoredLiteralLog(struct hs_scratch *scratch) {
struct fatbit **getAnchoredLiteralLog(const struct hs_scratch *scratch) {
return scratch->al_log;
}
static really_inline
struct fatbit **getDelaySlots(struct hs_scratch *scratch) {
struct fatbit **getDelaySlots(const struct hs_scratch *scratch) {
return scratch->delay_slots;
}

View File

@@ -69,8 +69,8 @@ void setSomLoc(struct fatbit *som_set_now, u64a *som_store, u32 som_store_count,
}
static really_inline
char ok_and_mark_if_write(u8 *som_store_valid, struct fatbit *som_set_now,
u8 *som_store_writable, u32 som_store_count,
char ok_and_mark_if_write(u8 *som_store_valid, const struct fatbit *som_set_now,
const u8 *som_store_writable, u32 som_store_count,
u32 loc) {
return !mmbit_set(som_store_valid, som_store_count, loc) /* unwritten */
|| fatbit_isset(som_set_now, som_store_count, loc) /* write here, need
@@ -79,7 +79,7 @@ char ok_and_mark_if_write(u8 *som_store_valid, struct fatbit *som_set_now,
}
static really_inline
char ok_and_mark_if_unset(u8 *som_store_valid, struct fatbit *som_set_now,
char ok_and_mark_if_unset(u8 *som_store_valid, const struct fatbit *som_set_now,
u32 som_store_count, u32 loc) {
return !mmbit_set(som_store_valid, som_store_count, loc) /* unwritten */
|| fatbit_isset(som_set_now, som_store_count, loc); /* write here, need

View File

@@ -68,7 +68,7 @@ namespace ue2 {
#endif
void *aligned_malloc_internal(size_t size, size_t align) {
void *mem;
void *mem= nullptr;;
int rv = posix_memalign(&mem, align, size);
if (rv != 0) {
DEBUG_PRINTF("posix_memalign returned %d when asked for %zu bytes\n",

View File

@@ -155,13 +155,13 @@ u32 compress32_impl_c(u32 x, u32 m) {
return 0;
}
u32 mk, mp, mv, t;
u32 mk, mv;
x &= m; // clear irrelevant bits
mk = ~m << 1; // we will count 0's to right
for (u32 i = 0; i < 5; i++) {
mp = mk ^ (mk << 1);
u32 mp = mk ^ (mk << 1);
mp ^= mp << 2;
mp ^= mp << 4;
mp ^= mp << 8;
@@ -169,7 +169,7 @@ u32 compress32_impl_c(u32 x, u32 m) {
mv = mp & m; // bits to move
m = (m ^ mv) | (mv >> (1 << i)); // compress m
t = x & mv;
u32 t = x & mv;
x = (x ^ t) | (t >> (1 << i)); // compress x
mk = mk & ~mp;
}
@@ -239,14 +239,14 @@ u32 expand32_impl_c(u32 x, u32 m) {
return 0;
}
u32 m0, mk, mp, mv, t;
u32 m0, mk, mv;
u32 array[5];
m0 = m; // save original mask
mk = ~m << 1; // we will count 0's to right
for (int i = 0; i < 5; i++) {
mp = mk ^ (mk << 1); // parallel suffix
u32 mp = mk ^ (mk << 1); // parallel suffix
mp = mp ^ (mp << 2);
mp = mp ^ (mp << 4);
mp = mp ^ (mp << 8);
@@ -259,7 +259,7 @@ u32 expand32_impl_c(u32 x, u32 m) {
for (int i = 4; i >= 0; i--) {
mv = array[i];
t = x << (1 << i);
u32 t = x << (1 << i);
x = (x & ~mv) | (t & mv);
}
@@ -409,7 +409,7 @@ u64a pdep64_impl_c(u64a x, u64a _m) {
u64a result = 0x0UL;
const u64a mask = 0x8000000000000000UL;
u64a m = _m;
u64a c, t;
u64a p;
/* The pop-count of the mask gives the number of the bits from
@@ -421,8 +421,8 @@ u64a pdep64_impl_c(u64a x, u64a _m) {
each mask bit as it is processed. */
while (m != 0)
{
c = __builtin_clzl (m);
t = x << (p - c);
u64a c = __builtin_clzl (m);
u64a t = x << (p - c);
m ^= (mask >> c);
result |= (t & (mask >> c));
p++;

View File

@@ -178,9 +178,9 @@ size_t describeClassInt(ostream &os, const CharReach &incr, size_t maxLength,
// Render charclass as a series of ranges
size_t c_start = cr.find_first();
size_t c = c_start, c_last = 0;
size_t c = c_start;
while (c != CharReach::npos) {
c_last = c;
size_t c_last = c;
c = cr.find_next(c);
if (c != c_last + 1 || c_last == 0xff) {
describeRange(os, c_start, c_last, out_type);

View File

@@ -102,10 +102,10 @@ public:
using category = boost::read_write_property_map_tag;
small_color_map(size_t n_in, const IndexMap &index_map_in)
: n(n_in), index_map(index_map_in) {
size_t num_bytes = (n + entries_per_byte - 1) / entries_per_byte;
data = std::make_shared<std::vector<unsigned char>>(num_bytes);
fill(small_color::white);
: n(n_in),
index_map(index_map_in),
data(std::make_shared<std::vector<unsigned char>>((n_in + entries_per_byte - 1) / entries_per_byte)) {
fill(small_color::white);
}
void fill(small_color color) {

View File

@@ -1145,7 +1145,7 @@ really_inline SuperVector<32> SuperVector<32>::loadu_maskz(void const *ptr, uint
template<>
really_inline SuperVector<32> SuperVector<32>::alignr(SuperVector<32> &other, int8_t offset)
{
#if defined(HAVE__BUILTIN_CONSTANT_P) && !(defined(__GNUC__) && (__GNUC__ == 13))
#if defined(HAVE__BUILTIN_CONSTANT_P) && !(defined(__GNUC__) && ((__GNUC__ == 13) || (__GNUC__ == 14)))
if (__builtin_constant_p(offset)) {
if (offset == 16) {
return *this;
@@ -1801,7 +1801,7 @@ really_inline SuperVector<64> SuperVector<64>::pshufb_maskz(SuperVector<64> b, u
template<>
really_inline SuperVector<64> SuperVector<64>::alignr(SuperVector<64> &l, int8_t offset)
{
#if defined(HAVE__BUILTIN_CONSTANT_P)
#if defined(HAVE__BUILTIN_CONSTANT_P) && !(defined(__GNUC__) && (__GNUC__ == 14))
if (__builtin_constant_p(offset)) {
if (offset == 16) {
return *this;