mirror of
https://github.com/VectorCamp/vectorscan.git
synced 2025-06-28 16:41:01 +03:00
Fix unreadVariable warning
This commit is contained in:
parent
0cf72ef474
commit
6d6d4e1013
@ -294,9 +294,6 @@ setupFullConfs(const vector<hwlmLiteral> &lits,
|
|||||||
const EngineDescription &eng,
|
const EngineDescription &eng,
|
||||||
const map<BucketIndex, vector<LiteralIndex>> &bucketToLits,
|
const map<BucketIndex, vector<LiteralIndex>> &bucketToLits,
|
||||||
bool make_small) {
|
bool make_small) {
|
||||||
unique_ptr<TeddyEngineDescription> teddyDescr =
|
|
||||||
getTeddyDescription(eng.getID());
|
|
||||||
|
|
||||||
BC2CONF bc2Conf;
|
BC2CONF bc2Conf;
|
||||||
u32 totalConfirmSize = 0;
|
u32 totalConfirmSize = 0;
|
||||||
for (BucketIndex b = 0; b < eng.getNumBuckets(); b++) {
|
for (BucketIndex b = 0; b < eng.getNumBuckets(); b++) {
|
||||||
|
@ -304,6 +304,7 @@ void minimize_hopcroft(raw_dfa &rdfa, const Grey &grey) {
|
|||||||
DEBUG_PRINTF("dfa is empty\n");
|
DEBUG_PRINTF("dfa is empty\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// cppcheck-suppress unreadVariable
|
||||||
UNUSED const size_t states_before = rdfa.states.size();
|
UNUSED const size_t states_before = rdfa.states.size();
|
||||||
|
|
||||||
HopcroftInfo info(rdfa);
|
HopcroftInfo info(rdfa);
|
||||||
|
@ -802,7 +802,7 @@ private:
|
|||||||
|
|
||||||
static
|
static
|
||||||
void prep_joins_for_generation(const GoughGraph &g, GoughVertex v,
|
void prep_joins_for_generation(const GoughGraph &g, GoughVertex v,
|
||||||
map<GoughEdge, edge_join_info> *edge_info) {
|
map<GoughEdge, edge_join_info> &edge_info) {
|
||||||
DEBUG_PRINTF("writing out joins for %u\n", g[v].state_id);
|
DEBUG_PRINTF("writing out joins for %u\n", g[v].state_id);
|
||||||
for (const auto &var : g[v].vars) {
|
for (const auto &var : g[v].vars) {
|
||||||
u32 dest_slot = var->slot;
|
u32 dest_slot = var->slot;
|
||||||
@ -813,7 +813,7 @@ void prep_joins_for_generation(const GoughGraph &g, GoughVertex v,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (const GoughEdge &incoming_edge : var_edges.second) {
|
for (const GoughEdge &incoming_edge : var_edges.second) {
|
||||||
(*edge_info)[incoming_edge].insert(input, dest_slot);
|
edge_info[incoming_edge].insert(input, dest_slot);
|
||||||
DEBUG_PRINTF("need %u<-%u\n", dest_slot, input);
|
DEBUG_PRINTF("need %u<-%u\n", dest_slot, input);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -911,7 +911,7 @@ void build_blocks(const GoughGraph &g,
|
|||||||
}
|
}
|
||||||
|
|
||||||
map<GoughEdge, edge_join_info> eji;
|
map<GoughEdge, edge_join_info> eji;
|
||||||
prep_joins_for_generation(g, t, &eji);
|
prep_joins_for_generation(g, t, eji);
|
||||||
|
|
||||||
for (auto &m : eji) {
|
for (auto &m : eji) {
|
||||||
vector<gough_ins> &block = (*blocks)[gough_edge_id(g, m.first)];
|
vector<gough_ins> &block = (*blocks)[gough_edge_id(g, m.first)];
|
||||||
|
@ -1018,12 +1018,16 @@ bytecode_ptr<NFA> mcshengCompile16(dfa_info &info, dstate_id_t sheng_end,
|
|||||||
|
|
||||||
// Sherman optimization
|
// Sherman optimization
|
||||||
if (info.impl_alpha_size > 16) {
|
if (info.impl_alpha_size > 16) {
|
||||||
|
#ifdef DEBUG
|
||||||
u16 total_daddy = 0;
|
u16 total_daddy = 0;
|
||||||
|
#endif // DEBUG
|
||||||
for (u32 i = 0; i < info.size(); i++) {
|
for (u32 i = 0; i < info.size(); i++) {
|
||||||
find_better_daddy(info, i,
|
find_better_daddy(info, i,
|
||||||
is_cyclic_near(info.raw, info.raw.start_anchored),
|
is_cyclic_near(info.raw, info.raw.start_anchored),
|
||||||
grey);
|
grey);
|
||||||
|
#ifdef DEBUG
|
||||||
total_daddy += info.extra[i].daddytaken;
|
total_daddy += info.extra[i].daddytaken;
|
||||||
|
#endif // DEBUG
|
||||||
}
|
}
|
||||||
|
|
||||||
DEBUG_PRINTF("daddy %hu/%zu states=%zu alpha=%hu\n", total_daddy,
|
DEBUG_PRINTF("daddy %hu/%zu states=%zu alpha=%hu\n", total_daddy,
|
||||||
@ -1172,12 +1176,16 @@ bytecode_ptr<NFA> mcsheng64Compile16(dfa_info&info, dstate_id_t sheng_end,
|
|||||||
|
|
||||||
// Sherman optimization
|
// Sherman optimization
|
||||||
if (info.impl_alpha_size > 16) {
|
if (info.impl_alpha_size > 16) {
|
||||||
|
#ifdef DEBUG
|
||||||
u16 total_daddy = 0;
|
u16 total_daddy = 0;
|
||||||
|
#endif // DEBUG
|
||||||
for (u32 i = 0; i < info.size(); i++) {
|
for (u32 i = 0; i < info.size(); i++) {
|
||||||
find_better_daddy(info, i,
|
find_better_daddy(info, i,
|
||||||
is_cyclic_near(info.raw, info.raw.start_anchored),
|
is_cyclic_near(info.raw, info.raw.start_anchored),
|
||||||
grey);
|
grey);
|
||||||
|
#ifdef DEBUG
|
||||||
total_daddy += info.extra[i].daddytaken;
|
total_daddy += info.extra[i].daddytaken;
|
||||||
|
#endif // DEBUG
|
||||||
}
|
}
|
||||||
|
|
||||||
DEBUG_PRINTF("daddy %hu/%zu states=%zu alpha=%hu\n", total_daddy,
|
DEBUG_PRINTF("daddy %hu/%zu states=%zu alpha=%hu\n", total_daddy,
|
||||||
@ -1430,11 +1438,9 @@ bytecode_ptr<NFA> mcshengCompile(raw_dfa &raw, const CompileContext &cc,
|
|||||||
|
|
||||||
map<dstate_id_t, AccelScheme> accel_escape_info
|
map<dstate_id_t, AccelScheme> accel_escape_info
|
||||||
= info.strat.getAccelInfo(cc.grey);
|
= info.strat.getAccelInfo(cc.grey);
|
||||||
auto old_states = info.states;
|
|
||||||
dstate_id_t sheng_end = find_sheng_states(info, accel_escape_info, MAX_SHENG_STATES);
|
dstate_id_t sheng_end = find_sheng_states(info, accel_escape_info, MAX_SHENG_STATES);
|
||||||
|
|
||||||
if (sheng_end <= DEAD_STATE + 1) {
|
if (sheng_end <= DEAD_STATE + 1) {
|
||||||
info.states = old_states;
|
|
||||||
return bytecode_ptr<NFA>(nullptr);
|
return bytecode_ptr<NFA>(nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1447,7 +1453,6 @@ bytecode_ptr<NFA> mcshengCompile(raw_dfa &raw, const CompileContext &cc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!nfa) {
|
if (!nfa) {
|
||||||
info.states = old_states;
|
|
||||||
return nfa;
|
return nfa;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -251,6 +251,10 @@ void q_skip_forward_to(struct mq *q, s64a min_loc) {
|
|||||||
// Dump the contents of the given queue.
|
// Dump the contents of the given queue.
|
||||||
static never_inline UNUSED
|
static never_inline UNUSED
|
||||||
void debugQueue(const struct mq *q) {
|
void debugQueue(const struct mq *q) {
|
||||||
|
if (q == nullptr) {
|
||||||
|
DEBUG_PRINTF("q=NULL!\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
DEBUG_PRINTF("q=%p, nfa=%p\n", q, q->nfa);
|
DEBUG_PRINTF("q=%p, nfa=%p\n", q, q->nfa);
|
||||||
DEBUG_PRINTF("q offset=%llu, buf={%p, len=%zu}, history={%p, len=%zu}\n",
|
DEBUG_PRINTF("q offset=%llu, buf={%p, len=%zu}, history={%p, len=%zu}\n",
|
||||||
q->offset, q->buffer, q->length, q->history, q->hlength);
|
q->offset, q->buffer, q->length, q->history, q->hlength);
|
||||||
|
@ -800,7 +800,7 @@ bytecode_ptr<NFA> sheng64Compile(raw_dfa &raw, const CompileContext &cc,
|
|||||||
old_states = info.states;
|
old_states = info.states;
|
||||||
auto nfa = shengCompile_int<sheng64>(raw, cc, accel_states, strat, info);
|
auto nfa = shengCompile_int<sheng64>(raw, cc, accel_states, strat, info);
|
||||||
if (!nfa) {
|
if (!nfa) {
|
||||||
info.states = old_states;
|
info.states = old_states; // cppcheck-suppress unreadVariable
|
||||||
}
|
}
|
||||||
return nfa;
|
return nfa;
|
||||||
}
|
}
|
||||||
|
@ -854,9 +854,7 @@ void replaceSubgraphWithLazySpecial(NGHolder &g, ReachSubgraph &rsi,
|
|||||||
assert(rsi.repeatMax >= rsi.repeatMin);
|
assert(rsi.repeatMax >= rsi.repeatMin);
|
||||||
|
|
||||||
DEBUG_PRINTF("entry\n");
|
DEBUG_PRINTF("entry\n");
|
||||||
|
|
||||||
const unordered_set<NFAVertex> involved(rsi.vertices.begin(),
|
|
||||||
rsi.vertices.end());
|
|
||||||
vector<NFAVertex> g_succs;
|
vector<NFAVertex> g_succs;
|
||||||
getSuccessors(g, rsi, &g_succs);
|
getSuccessors(g, rsi, &g_succs);
|
||||||
|
|
||||||
|
@ -3125,7 +3125,7 @@ sombe_rv doSomWithHaig(NG &ng, NGHolder &g, const ExpressionInfo &expr,
|
|||||||
|
|
||||||
// try a redundancy pass.
|
// try a redundancy pass.
|
||||||
if (addSomRedundancy(g, depths)) {
|
if (addSomRedundancy(g, depths)) {
|
||||||
depths = getDistancesFromSOM(g);
|
depths = getDistancesFromSOM(g); // cppcheck-suppress unreadVariable
|
||||||
}
|
}
|
||||||
|
|
||||||
auto regions = assignRegions(g);
|
auto regions = assignRegions(g);
|
||||||
|
@ -267,7 +267,8 @@ hwlmcb_rv_t playDelaySlot(const struct RoseEngine *t,
|
|||||||
const u32 *programs = getByOffset(t, t->delayProgramOffset);
|
const u32 *programs = getByOffset(t, t->delayProgramOffset);
|
||||||
|
|
||||||
for (u32 it = fatbit_iterate(vicSlot, delay_count, MMB_INVALID);
|
for (u32 it = fatbit_iterate(vicSlot, delay_count, MMB_INVALID);
|
||||||
it != MMB_INVALID; it = fatbit_iterate(vicSlot, delay_count, it)) {
|
it != MMB_INVALID; it = fatbit_iterate(vicSlot, delay_count, it)) {
|
||||||
|
// cppcheck-suppress unreadVariable
|
||||||
UNUSED rose_group old_groups = tctxt->groups;
|
UNUSED rose_group old_groups = tctxt->groups;
|
||||||
|
|
||||||
DEBUG_PRINTF("DELAYED MATCH id=%u offset=%llu\n", it, offset);
|
DEBUG_PRINTF("DELAYED MATCH id=%u offset=%llu\n", it, offset);
|
||||||
|
@ -1039,9 +1039,9 @@ bool canImplementGraph(NGHolder &h, bool prefilter, const ReportManager &rm,
|
|||||||
|
|
||||||
if (prefilter && cc.grey.prefilterReductions) {
|
if (prefilter && cc.grey.prefilterReductions) {
|
||||||
// If we're prefiltering, we can have another go with a reduced graph.
|
// If we're prefiltering, we can have another go with a reduced graph.
|
||||||
UNUSED size_t numBefore = num_vertices(h);
|
UNUSED size_t numBefore = num_vertices(h); // cppcheck-suppress unreadVariable
|
||||||
prefilterReductions(h, cc);
|
prefilterReductions(h, cc);
|
||||||
UNUSED size_t numAfter = num_vertices(h);
|
UNUSED size_t numAfter = num_vertices(h); // cppcheck-suppress unreadVariable
|
||||||
DEBUG_PRINTF("reduced from %zu to %zu vertices\n", numBefore, numAfter);
|
DEBUG_PRINTF("reduced from %zu to %zu vertices\n", numBefore, numAfter);
|
||||||
|
|
||||||
if (isImplementableNFA(h, &rm, cc)) {
|
if (isImplementableNFA(h, &rm, cc)) {
|
||||||
|
@ -98,8 +98,7 @@ void addToBloomFilter(vector<u8> &bloom, const u8 *substr, bool nocase) {
|
|||||||
|
|
||||||
const auto hash_functions = { bloomHash_1, bloomHash_2, bloomHash_3 };
|
const auto hash_functions = { bloomHash_1, bloomHash_2, bloomHash_3 };
|
||||||
for (const auto &hash_func : hash_functions) {
|
for (const auto &hash_func : hash_functions) {
|
||||||
u32 hash = hash_func(substr, nocase);
|
u32 key = hash_func(substr, nocase) & key_mask;
|
||||||
u32 key = hash & key_mask;
|
|
||||||
DEBUG_PRINTF("set key %u (of %zu)\n", key, bloom.size() * 8);
|
DEBUG_PRINTF("set key %u (of %zu)\n", key, bloom.size() * 8);
|
||||||
bloom[key / 8] |= 1U << (key % 8);
|
bloom[key / 8] |= 1U << (key % 8);
|
||||||
}
|
}
|
||||||
@ -193,11 +192,9 @@ vector<RoseLongLitHashEntry> buildHashTable(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (const auto &m : hashToLitOffPairs) {
|
for (const auto &m : hashToLitOffPairs) {
|
||||||
u32 hash = m.first;
|
u32 bucket = m.first % numEntries;
|
||||||
const LitOffsetVector &d = m.second;
|
const LitOffsetVector &d = m.second;
|
||||||
|
|
||||||
u32 bucket = hash % numEntries;
|
|
||||||
|
|
||||||
// Placement via linear probing.
|
// Placement via linear probing.
|
||||||
for (const auto &lit_offset : d) {
|
for (const auto &lit_offset : d) {
|
||||||
while (tab[bucket].str_offset != 0) {
|
while (tab[bucket].str_offset != 0) {
|
||||||
|
@ -861,7 +861,6 @@ u32 roseQuality(const RoseResources &res, const RoseEngine *t) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (eod_prefix) {
|
if (eod_prefix) {
|
||||||
always_run++;
|
|
||||||
DEBUG_PRINTF("eod prefixes are slow");
|
DEBUG_PRINTF("eod prefixes are slow");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -142,6 +142,7 @@ size_t JOIN(sc_, FN_SUFFIX)(const struct RoseEngine *rose,
|
|||||||
COPY(stream_body + so->groups, so->groups_size);
|
COPY(stream_body + so->groups, so->groups_size);
|
||||||
|
|
||||||
/* copy the real bits of history */
|
/* copy the real bits of history */
|
||||||
|
// cppcheck-suppress unreadVariable
|
||||||
UNUSED u32 hend = so->history + rose->historyRequired;
|
UNUSED u32 hend = so->history + rose->historyRequired;
|
||||||
COPY(stream_body + hend - history, history);
|
COPY(stream_body + hend - history, history);
|
||||||
|
|
||||||
|
@ -180,7 +180,7 @@ TEST(state_compress, m128_2) {
|
|||||||
loadcompressed128(&val_out, &buf, &mask, 0);
|
loadcompressed128(&val_out, &buf, &mask, 0);
|
||||||
EXPECT_TRUE(!diff128(and128(val, mask), val_out));
|
EXPECT_TRUE(!diff128(and128(val, mask), val_out));
|
||||||
|
|
||||||
mask_raw[j] = 0x7f;
|
mask_raw[j] = 0x7f; // cppcheck-suppress unreadVariable
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -258,7 +258,7 @@ TEST(state_compress, m256_2) {
|
|||||||
loadcompressed256(&val_out, &buf, &mask, 0);
|
loadcompressed256(&val_out, &buf, &mask, 0);
|
||||||
EXPECT_TRUE(!diff256(and256(val, mask), val_out));
|
EXPECT_TRUE(!diff256(and256(val, mask), val_out));
|
||||||
|
|
||||||
mask_raw[j] = 0x7f;
|
mask_raw[j] = 0x7f; // cppcheck-suppress unreadVariable
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -338,7 +338,7 @@ TEST(state_compress, m384_2) {
|
|||||||
loadcompressed384(&val_out, &buf, &mask, 0);
|
loadcompressed384(&val_out, &buf, &mask, 0);
|
||||||
EXPECT_TRUE(!diff384(and384(val, mask), val_out));
|
EXPECT_TRUE(!diff384(and384(val, mask), val_out));
|
||||||
|
|
||||||
mask_raw[j] = 0x7f;
|
mask_raw[j] = 0x7f; // cppcheck-suppress unreadVariable
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -419,7 +419,7 @@ TEST(state_compress, m512_2) {
|
|||||||
loadcompressed512(&val_out, &buf, &mask, 0);
|
loadcompressed512(&val_out, &buf, &mask, 0);
|
||||||
EXPECT_TRUE(!diff512(and512(val, mask), val_out));
|
EXPECT_TRUE(!diff512(and512(val, mask), val_out));
|
||||||
|
|
||||||
mask_raw[j] = 0x7f;
|
mask_raw[j] = 0x7f; // cppcheck-suppress unreadVariable
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user