addressing cppcheck shadowFunction warnings

This commit is contained in:
G.E
2024-05-02 18:00:03 +03:00
parent 6e306a508e
commit c7f7d17ebc
25 changed files with 313 additions and 313 deletions

View File

@@ -518,9 +518,9 @@ u32 findRoseAnchorFloatingOverlap(const RoseInEdgeProps &ep,
static
void findRoseLiteralMask(const NGHolder &h, const u32 lag, vector<u8> &msk,
vector<u8> &cmp) {
vector<u8> &lcmp) {
if (lag >= HWLM_MASKLEN) {
msk.clear(); cmp.clear();
msk.clear(); lcmp.clear();
return;
}
@@ -532,7 +532,7 @@ void findRoseLiteralMask(const NGHolder &h, const u32 lag, vector<u8> &msk,
assert(!curr.empty());
msk.assign(HWLM_MASKLEN, 0);
cmp.assign(HWLM_MASKLEN, 0);
lcmp.assign(HWLM_MASKLEN, 0);
size_t i = HWLM_MASKLEN - lag - 1;
do {
if (curr.empty() || contains(curr, h.start) ||
@@ -549,9 +549,9 @@ void findRoseLiteralMask(const NGHolder &h, const u32 lag, vector<u8> &msk,
cr |= h[v].char_reach;
insert(&next, inv_adjacent_vertices(v, h));
}
make_and_cmp_mask(cr, &msk[i], &cmp[i]);
DEBUG_PRINTF("%zu: reach=%s, msk=%u, cmp=%u\n", i,
describeClass(cr).c_str(), msk.at(i), cmp.at(i));
make_and_cmp_mask(cr, &msk[i], &lcmp[i]);
DEBUG_PRINTF("%zu: reach=%s, msk=%u, lcmp=%u\n", i,
describeClass(cr).c_str(), msk.at(i), lcmp.at(i));
curr.swap(next);
} while (i-- > 0);
}
@@ -617,18 +617,18 @@ void doRoseLiteralVertex(RoseBuildImpl *tbi, bool use_eod_table,
}
floating:
vector<u8> msk, cmp;
vector<u8> msk, lcmp;
if (tbi->cc.grey.roseHamsterMasks && in_degree(iv, ig) == 1) {
RoseInEdge e = *in_edges(iv, ig).first;
if (ig[e].graph) {
findRoseLiteralMask(*ig[e].graph, ig[e].graph_lag, msk, cmp);
findRoseLiteralMask(*ig[e].graph, ig[e].graph_lag, msk, lcmp);
}
}
u32 delay = iv_info.delay;
rose_literal_table table = use_eod_table ? ROSE_EOD_ANCHORED : ROSE_FLOATING;
u32 literalId = tbi->getLiteralId(iv_info.s, msk, cmp, delay, table);
u32 literalId = tbi->getLiteralId(iv_info.s, msk, lcmp, delay, table);
DEBUG_PRINTF("literal=%u (len=%zu, delay=%u, offsets=[%u,%u] '%s')\n",
literalId, iv_info.s.length(), delay, iv_info.min_offset,
@@ -1087,20 +1087,20 @@ bool predsAreDelaySensitive(const RoseInGraph &ig, RoseInVertex v) {
static
u32 maxAvailableDelay(const ue2_literal &pred_key, const ue2_literal &lit_key) {
/* overly conservative if only part of the string is nocase */
string pred = pred_key.get_string();
string predk = pred_key.get_string();
string lit = lit_key.get_string();
if (pred_key.any_nocase() || lit_key.any_nocase()) {
upperString(pred);
upperString(predk);
upperString(lit);
}
string::size_type last = pred.rfind(lit);
string::size_type last = predk.rfind(lit);
if (last == string::npos) {
return MAX_DELAY;
}
u32 raw = pred.size() - last - 1;
u32 raw = predk.size() - last - 1;
return MIN(raw, MAX_DELAY);
}

View File

@@ -303,31 +303,31 @@ unique_ptr<NGHolder> buildMaskLhs(bool anchored, u32 prefix_len,
assert(prefix_len);
assert(mask.size() >= prefix_len);
NFAVertex pred = anchored ? lhs->start : lhs->startDs;
NFAVertex preds = anchored ? lhs->start : lhs->startDs;
u32 m_idx = 0;
while (prefix_len--) {
NFAVertex v = add_vertex(*lhs);
(*lhs)[v].char_reach = mask[m_idx++];
add_edge(pred, v, *lhs);
pred = v;
add_edge(preds, v, *lhs);
preds = v;
}
add_edge(pred, lhs->accept, *lhs);
(*lhs)[pred].reports.insert(0);
add_edge(preds, lhs->accept, *lhs);
(*lhs)[preds].reports.insert(0);
return lhs;
}
static
void buildLiteralMask(const vector<CharReach> &mask, vector<u8> &msk,
vector<u8> &cmp, u32 delay) {
vector<u8> &lcmp, u32 delay) {
msk.clear();
cmp.clear();
lcmp.clear();
if (mask.size() <= delay) {
return;
}
// Construct an and/cmp mask from our mask ending at delay positions before
// Construct an and/lcmp mask from our mask ending at delay positions before
// the end of the literal, with max length HWLM_MASKLEN.
auto ite = mask.end() - delay;
@@ -335,11 +335,11 @@ void buildLiteralMask(const vector<CharReach> &mask, vector<u8> &msk,
for (; it != ite; ++it) {
msk.emplace_back(0);
cmp.emplace_back(0);
make_and_cmp_mask(*it, &msk.back(), &cmp.back());
lcmp.emplace_back(0);
make_and_cmp_mask(*it, &msk.back(), &lcmp.back());
}
assert(msk.size() == cmp.size());
assert(msk.size() == lcmp.size());
assert(msk.size() <= HWLM_MASKLEN);
}
@@ -393,9 +393,9 @@ bool validateTransientMask(const vector<CharReach> &mask, bool anchored,
none_of(begin(lits), end(lits), mixed_sensitivity));
// Build the HWLM literal mask.
vector<u8> msk, cmp;
vector<u8> msk, lcmp;
if (grey.roseHamsterMasks) {
buildLiteralMask(mask, msk, cmp, delay);
buildLiteralMask(mask, msk, lcmp, delay);
}
// We consider the HWLM mask length to run from the first non-zero byte to
@@ -491,9 +491,9 @@ void addTransientMask(RoseBuildImpl &build, const vector<CharReach> &mask,
set_report(*mask_graph, mask_report);
// Build the HWLM literal mask.
vector<u8> msk, cmp;
vector<u8> msk, lcmp;
if (build.cc.grey.roseHamsterMasks) {
buildLiteralMask(mask, msk, cmp, delay);
buildLiteralMask(mask, msk, lcmp, delay);
}
/* adjust bounds to be relative to trigger rather than mask */
@@ -527,7 +527,7 @@ void addTransientMask(RoseBuildImpl &build, const vector<CharReach> &mask,
const flat_set<ReportID> no_reports;
for (const auto &lit : lits) {
u32 lit_id = build.getLiteralId(lit, msk, cmp, delay, table);
u32 lit_id = build.getLiteralId(lit, msk, lcmp, delay, table);
const RoseVertex parent = anchored ? build.anchored_root : build.root;
bool use_mask = delay || maskIsNeeded(lit, *mask_graph);
@@ -570,19 +570,19 @@ unique_ptr<NGHolder> buildMaskRhs(const flat_set<ReportID> &reports,
unique_ptr<NGHolder> rhs = std::make_unique<NGHolder>(NFA_SUFFIX);
NGHolder &h = *rhs;
NFAVertex succ = h.accept;
NFAVertex asucc = h.accept;
u32 m_idx = mask.size() - 1;
while (suffix_len--) {
NFAVertex u = add_vertex(h);
if (succ == h.accept) {
if (asucc == h.accept) {
h[u].reports.insert(reports.begin(), reports.end());
}
h[u].char_reach = mask[m_idx--];
add_edge(u, succ, h);
succ = u;
add_edge(u, asucc, h);
asucc = u;
}
NFAEdge e = add_edge(h.start, succ, h);
NFAEdge e = add_edge(h.start, asucc, h);
h[e].tops.insert(DEFAULT_TOP);
return rhs;

View File

@@ -348,11 +348,11 @@ public:
next[s].wdelay = wdelay;
}
nfa_state_set succ;
nfa_state_set gsucc;
if (wdelay != in.wdelay) {
DEBUG_PRINTF("enabling start\n");
succ.set(vertexToIndex[g.startDs]);
gsucc.set(vertexToIndex[g.startDs]);
}
for (size_t i = in.wrap_state.find_first(); i != nfa_state_set::npos;
@@ -368,12 +368,12 @@ public:
continue;
}
succ.set(vertexToIndex[w]);
gsucc.set(vertexToIndex[w]);
}
}
for (size_t j = succ.find_first(); j != nfa_state_set::npos;
j = succ.find_next(j)) {
for (size_t j = gsucc.find_first(); j != nfa_state_set::npos;
j = gsucc.find_next(j)) {
const CharReach &cr = cr_by_index[j];
for (size_t s = cr.find_first(); s != CharReach::npos;
s = cr.find_next(s)) {

View File

@@ -476,9 +476,9 @@ rose_group RoseBuildImpl::getInitialGroups() const {
static
bool nfaStuckOn(const NGHolder &g) {
assert(!proper_out_degree(g.startDs, g));
set<NFAVertex> succ;
insert(&succ, adjacent_vertices(g.start, g));
succ.erase(g.startDs);
set<NFAVertex> vsucc;
insert(&vsucc, adjacent_vertices(g.start, g));
vsucc.erase(g.startDs);
set<NFAVertex> asucc;
set<u32> tops;
@@ -493,7 +493,7 @@ bool nfaStuckOn(const NGHolder &g) {
asucc.clear();
insert(&asucc, adjacent_vertices(target(e, g), g));
if (asucc == succ) {
if (asucc == vsucc) {
insert(&done_tops, g[e].tops);
}
}
@@ -531,12 +531,12 @@ void findFixedDepthTops(const RoseGraph &g, const set<PredTopPair> &triggers,
for (const auto &e : pred_by_top) {
u32 top = e.first;
const set<RoseVertex> &preds = e.second;
if (!g[*preds.begin()].fixedOffset()) {
const set<RoseVertex> &spreds = e.second;
if (!g[*spreds.begin()].fixedOffset()) {
continue;
}
u32 depth = g[*preds.begin()].min_offset;
for (RoseVertex u : preds) {
u32 depth = g[*spreds.begin()].min_offset;
for (RoseVertex u : spreds) {
if (g[u].min_offset != depth || g[u].max_offset != depth) {
goto next_top;
}
@@ -925,12 +925,12 @@ void appendTailToHolder(NGHolder &h, const vector<CharReach> &tail) {
static
u32 decreaseLag(const RoseBuildImpl &build, NGHolder &h,
const vector<RoseVertex> &succs) {
const vector<RoseVertex> &vsuccs) {
const RoseGraph &rg = build.g;
static const size_t MAX_RESTORE_LEN = 5;
vector<CharReach> restored(MAX_RESTORE_LEN);
for (RoseVertex v : succs) {
for (RoseVertex v : vsuccs) {
u32 lag = rg[v].left.lag;
for (u32 lit_id : rg[v].literals) {
u32 delay = build.literals.at(lit_id).delay;
@@ -969,7 +969,7 @@ struct eager_info {
static
bool checkSuitableForEager(bool is_prefix, const left_id &left,
const RoseBuildImpl &build,
const vector<RoseVertex> &succs,
const vector<RoseVertex> &vsuccs,
rose_group squash_mask, rose_group initial_groups,
eager_info &ei, const CompileContext &cc) {
DEBUG_PRINTF("checking prefix --> %016llx...\n", squash_mask);
@@ -986,7 +986,7 @@ bool checkSuitableForEager(bool is_prefix, const left_id &left,
return false;
}
for (RoseVertex s : succs) {
for (RoseVertex s : vsuccs) {
if (build.isInETable(s)
|| contains(rg[s].literals, build.eod_event_literal_id)) {
return false; /* Ignore EOD related prefixes */
@@ -1005,7 +1005,7 @@ bool checkSuitableForEager(bool is_prefix, const left_id &left,
if (!can_die_early(dfa, EAGER_DIE_BEFORE_LIMIT)) {
return false;
}
ei.new_graph = rg[succs[0]].left.graph;
ei.new_graph = rg[vsuccs[0]].left.graph;
} else if (left.graph()) {
const NGHolder &g = *left.graph();
if (proper_out_degree(g.startDs, g)) {
@@ -1016,7 +1016,7 @@ bool checkSuitableForEager(bool is_prefix, const left_id &left,
auto gg = ei.new_graph;
gg->kind = NFA_EAGER_PREFIX;
ei.lag_adjust = decreaseLag(build, *gg, succs);
ei.lag_adjust = decreaseLag(build, *gg, vsuccs);
if (is_match_vertex(gg->start, *gg)) {
return false; /* should not still be vacuous as lag decreased */
@@ -1044,17 +1044,17 @@ bool checkSuitableForEager(bool is_prefix, const left_id &left,
static
left_id updateLeftfixWithEager(RoseGraph &g, const eager_info &ei,
const vector<RoseVertex> &succs) {
const vector<RoseVertex> &vsuccs) {
u32 lag_adjust = ei.lag_adjust;
auto gg = ei.new_graph;
for (RoseVertex v : succs) {
for (RoseVertex v : vsuccs) {
g[v].left.graph = gg;
assert(g[v].left.lag >= lag_adjust);
g[v].left.lag -= lag_adjust;
DEBUG_PRINTF("added %u literal chars back, new lag %u\n", lag_adjust,
g[v].left.lag);
}
left_id leftfix = g[succs[0]].left;
left_id leftfix = g[vsuccs[0]].left;
if (leftfix.graph()) {
assert(leftfix.graph()->kind == NFA_PREFIX
@@ -1099,7 +1099,7 @@ bool buildLeftfix(RoseBuildImpl &build, build_context &bc, bool prefix, u32 qi,
const map<left_id, set<PredTopPair> > &infixTriggers,
set<u32> *no_retrigger_queues, set<u32> *eager_queues,
const map<left_id, eager_info> &eager,
const vector<RoseVertex> &succs, left_id leftfix) {
const vector<RoseVertex> &vsuccs, left_id leftfix) {
RoseGraph &g = build.g;
const CompileContext &cc = build.cc;
const ReportManager &rm = build.rm;
@@ -1111,7 +1111,7 @@ bool buildLeftfix(RoseBuildImpl &build, build_context &bc, bool prefix, u32 qi,
if (contains(eager, leftfix)) {
eager_queues->insert(qi);
leftfix = updateLeftfixWithEager(g, eager.at(leftfix), succs);
leftfix = updateLeftfixWithEager(g, eager.at(leftfix), vsuccs);
}
bytecode_ptr<NFA> nfa;
@@ -1159,7 +1159,7 @@ bool buildLeftfix(RoseBuildImpl &build, build_context &bc, bool prefix, u32 qi,
u32 max_queuelen = UINT32_MAX;
if (!prefix) {
set<ue2_literal> lits;
for (RoseVertex v : succs) {
for (RoseVertex v : vsuccs) {
for (auto u : inv_adjacent_vertices_range(v, g)) {
for (u32 lit_id : g[u].literals) {
lits.insert(build.literals.at(lit_id).s);
@@ -1188,7 +1188,7 @@ bool buildLeftfix(RoseBuildImpl &build, build_context &bc, bool prefix, u32 qi,
findCountingMiracleInfo(leftfix, stop, &cm_count, &cm_cr);
}
for (RoseVertex v : succs) {
for (RoseVertex v : vsuccs) {
bc.leftfix_info.emplace(v, left_build_info(qi, g[v].left.lag, max_width,
squash_mask, stop,
max_queuelen, cm_count,
@@ -1504,7 +1504,7 @@ bool buildLeftfixes(RoseBuildImpl &tbi, build_context &bc,
map<left_id, set<PredTopPair>> infixTriggers;
findInfixTriggers(tbi, &infixTriggers);
insertion_ordered_map<left_id, vector<RoseVertex>> succs;
insertion_ordered_map<left_id, vector<RoseVertex>> lsuccs;
if (cc.grey.allowTamarama && cc.streaming && !do_prefix) {
findExclusiveInfixes(tbi, bc, qif, infixTriggers, no_retrigger_queues);
@@ -1544,7 +1544,7 @@ bool buildLeftfixes(RoseBuildImpl &tbi, build_context &bc,
}
}
succs[leftfix].emplace_back(v);
lsuccs[leftfix].emplace_back(v);
}
rose_group initial_groups = tbi.getInitialGroups();
@@ -1552,7 +1552,7 @@ bool buildLeftfixes(RoseBuildImpl &tbi, build_context &bc,
map<left_id, eager_info> eager;
for (const auto &m : succs) {
for (const auto &m : lsuccs) {
const left_id &leftfix = m.first;
const auto &left_succs = m.second;
@@ -1573,7 +1573,7 @@ bool buildLeftfixes(RoseBuildImpl &tbi, build_context &bc,
eager.clear();
}
for (const auto &m : succs) {
for (const auto &m : lsuccs) {
const left_id &leftfix = m.first;
const auto &left_succs = m.second;
buildLeftfix(tbi, bc, do_prefix, qif.get_queue(), infixTriggers,

View File

@@ -1144,10 +1144,10 @@ void findTopTriggerCancels(RoseBuildImpl &build) {
for (const auto &r : left_succ) {
const left_id &left = r.first;
const vector<RoseVertex> &succs = r.second;
const vector<RoseVertex> &rsuccs = r.second;
assert(!succs.empty());
if (build.isRootSuccessor(*succs.begin())) {
assert(!rsuccs.empty());
if (build.isRootSuccessor(*rsuccs.begin())) {
/* a prefix is never an infix */
continue;
}
@@ -1156,7 +1156,7 @@ void findTopTriggerCancels(RoseBuildImpl &build) {
set<RoseEdge> rose_edges;
set<u32> pred_lit_ids;
for (auto v : succs) {
for (auto v : rsuccs) {
for (const auto &e : in_edges_range(v, build.g)) {
RoseVertex u = source(e, build.g);
tops_seen.insert(build.g[e].rose_top);
@@ -1212,11 +1212,11 @@ void buildRoseSquashMasks(RoseBuildImpl &tbi) {
* successor of the nfa and all the literals */
for (const auto &e : roses) {
const left_id &left = e.first;
const vector<RoseVertex> &succs = e.second;
const vector<RoseVertex> &rsuccs = e.second;
set<u32> lit_ids;
bool anchored_pred = false;
for (auto v : succs) {
for (auto v : rsuccs) {
lit_ids.insert(tbi.g[v].literals.begin(), tbi.g[v].literals.end());
for (auto u : inv_adjacent_vertices_range(v, tbi.g)) {
anchored_pred |= tbi.isAnchored(u);
@@ -1230,7 +1230,7 @@ void buildRoseSquashMasks(RoseBuildImpl &tbi) {
if (anchored_pred) { /* infix with pred in anchored table */
u32 min_off = ~0U;
u32 max_off = 0U;
for (auto v : succs) {
for (auto v : rsuccs) {
for (auto u : inv_adjacent_vertices_range(v, tbi.g)) {
min_off = min(min_off, tbi.g[u].min_offset);
max_off = max(max_off, tbi.g[u].max_offset);

View File

@@ -202,10 +202,10 @@ void getForwardReach(const raw_dfa &rdfa, map<s32, CharReach> &look) {
}
for (unsigned c = 0; c < N_CHARS; c++) {
dstate_id_t succ = ds.next[rdfa.alpha_remap[c]];
if (succ != DEAD_STATE) {
dstate_id_t dnsucc = ds.next[rdfa.alpha_remap[c]];
if (dnsucc != DEAD_STATE) {
cr.set(c);
next.insert(succ);
next.insert(dnsucc);
}
}
}

View File

@@ -75,7 +75,7 @@ string dumpMask(const vector<u8> &v) {
static
bool maskFromLeftGraph(const LeftEngInfo &left, vector<u8> &msk,
vector<u8> &cmp) {
vector<u8> &lcmp) {
const u32 lag = left.lag;
const ReportID report = left.leftfix_report;
@@ -111,9 +111,9 @@ bool maskFromLeftGraph(const LeftEngInfo &left, vector<u8> &msk,
cr |= v_cr;
insert(&next, inv_adjacent_vertices(v, h));
}
make_and_cmp_mask(cr, &msk.at(i), &cmp.at(i));
DEBUG_PRINTF("%zu: reach=%s, msk=%u, cmp=%u\n", i,
describeClass(cr).c_str(), msk[i], cmp[i]);
make_and_cmp_mask(cr, &msk.at(i), &lcmp.at(i));
DEBUG_PRINTF("%zu: reach=%s, msk=%u, lcmp=%u\n", i,
describeClass(cr).c_str(), msk[i], lcmp[i]);
curr.swap(next);
} while (i-- > 0);
@@ -122,7 +122,7 @@ bool maskFromLeftGraph(const LeftEngInfo &left, vector<u8> &msk,
static
bool maskFromLeftCastle(const LeftEngInfo &left, vector<u8> &msk,
vector<u8> &cmp) {
vector<u8> &lcmp) {
const u32 lag = left.lag;
const ReportID report = left.leftfix_report;
@@ -149,23 +149,23 @@ bool maskFromLeftCastle(const LeftEngInfo &left, vector<u8> &msk,
u32 len = min_width;
u32 end = HWLM_MASKLEN - lag;
for (u32 i = end; i > end - min(end, len); i--) {
make_and_cmp_mask(c.reach(), &msk.at(i - 1), &cmp.at(i - 1));
make_and_cmp_mask(c.reach(), &msk.at(i - 1), &lcmp.at(i - 1));
}
return true;
}
static
bool maskFromLeft(const LeftEngInfo &left, vector<u8> &msk, vector<u8> &cmp) {
bool maskFromLeft(const LeftEngInfo &left, vector<u8> &msk, vector<u8> &lcmp) {
if (left.lag >= HWLM_MASKLEN) {
DEBUG_PRINTF("too much lag\n");
return false;
}
if (left.graph) {
return maskFromLeftGraph(left, msk, cmp);
return maskFromLeftGraph(left, msk, lcmp);
} else if (left.castle) {
return maskFromLeftCastle(left, msk, cmp);
return maskFromLeftCastle(left, msk, lcmp);
}
return false;
@@ -173,7 +173,7 @@ bool maskFromLeft(const LeftEngInfo &left, vector<u8> &msk, vector<u8> &cmp) {
static
bool maskFromPreds(const RoseBuildImpl &build, const rose_literal_id &id,
const RoseVertex v, vector<u8> &msk, vector<u8> &cmp) {
const RoseVertex v, vector<u8> &msk, vector<u8> &lcmp) {
const RoseGraph &g = build.g;
// For right now, wuss out and only handle cases with one pred.
@@ -222,7 +222,7 @@ bool maskFromPreds(const RoseBuildImpl &build, const rose_literal_id &id,
ue2_literal::const_iterator it, ite;
for (it = u_id.s.begin() + (u_len - u_sublen), ite = u_id.s.end();
it != ite; ++it) {
make_and_cmp_mask(*it, &msk.at(i), &cmp.at(i));
make_and_cmp_mask(*it, &msk.at(i), &lcmp.at(i));
++i;
}
@@ -231,21 +231,21 @@ bool maskFromPreds(const RoseBuildImpl &build, const rose_literal_id &id,
static
bool addSurroundingMask(const RoseBuildImpl &build, const rose_literal_id &id,
const RoseVertex v, vector<u8> &msk, vector<u8> &cmp) {
const RoseVertex v, vector<u8> &msk, vector<u8> &lcmp) {
// Start with zero masks.
msk.assign(HWLM_MASKLEN, 0);
cmp.assign(HWLM_MASKLEN, 0);
lcmp.assign(HWLM_MASKLEN, 0);
const LeftEngInfo &left = build.g[v].left;
if (left && left.lag < HWLM_MASKLEN) {
if (maskFromLeft(left, msk, cmp)) {
if (maskFromLeft(left, msk, lcmp)) {
DEBUG_PRINTF("mask from a leftfix!\n");
return true;
}
}
if (id.s.length() < HWLM_MASKLEN) {
if (maskFromPreds(build, id, v, msk, cmp)) {
if (maskFromPreds(build, id, v, msk, lcmp)) {
DEBUG_PRINTF("mask from preds!\n");
return true;
}
@@ -255,18 +255,18 @@ bool addSurroundingMask(const RoseBuildImpl &build, const rose_literal_id &id,
}
static
bool hamsterMaskCombine(vector<u8> &msk, vector<u8> &cmp,
bool hamsterMaskCombine(vector<u8> &msk, vector<u8> &lcmp,
const vector<u8> &v_msk, const vector<u8> &v_cmp) {
assert(msk.size() == HWLM_MASKLEN && cmp.size() == HWLM_MASKLEN);
assert(msk.size() == HWLM_MASKLEN && lcmp.size() == HWLM_MASKLEN);
assert(v_msk.size() == HWLM_MASKLEN && v_cmp.size() == HWLM_MASKLEN);
u8 all_masks = 0;
for (size_t i = 0; i < HWLM_MASKLEN; i++) {
u8 filter = ~(cmp[i] ^ v_cmp[i]);
u8 filter = ~(lcmp[i] ^ v_cmp[i]);
msk[i] &= v_msk[i];
msk[i] &= filter;
cmp[i] &= filter;
lcmp[i] &= filter;
all_masks |= msk[i];
}
@@ -278,7 +278,7 @@ bool hamsterMaskCombine(vector<u8> &msk, vector<u8> &cmp,
static
bool addSurroundingMask(const RoseBuildImpl &build, const rose_literal_id &id,
const rose_literal_info &info, vector<u8> &msk,
vector<u8> &cmp) {
vector<u8> &lcmp) {
if (!build.cc.grey.roseHamsterMasks) {
return false;
}
@@ -289,7 +289,7 @@ bool addSurroundingMask(const RoseBuildImpl &build, const rose_literal_id &id,
}
msk.assign(HWLM_MASKLEN, 0);
cmp.assign(HWLM_MASKLEN, 0);
lcmp.assign(HWLM_MASKLEN, 0);
size_t num = 0;
vector<u8> v_msk, v_cmp;
@@ -301,28 +301,28 @@ bool addSurroundingMask(const RoseBuildImpl &build, const rose_literal_id &id,
}
if (!num++) {
// First (or only) vertex, this becomes the mask/cmp pair.
// First (or only) vertex, this becomes the mask/lcmp pair.
msk = v_msk;
cmp = v_cmp;
lcmp = v_cmp;
} else {
// Multiple vertices with potentially different masks. We combine
// them into an 'advisory' mask.
if (!hamsterMaskCombine(msk, cmp, v_msk, v_cmp)) {
if (!hamsterMaskCombine(msk, lcmp, v_msk, v_cmp)) {
DEBUG_PRINTF("mask went to zero\n");
return false;
}
}
}
normaliseLiteralMask(id.s, msk, cmp);
normaliseLiteralMask(id.s, msk, lcmp);
if (msk.empty()) {
DEBUG_PRINTF("no mask\n");
return false;
}
DEBUG_PRINTF("msk=%s, cmp=%s\n", dumpMask(msk).c_str(),
dumpMask(cmp).c_str());
DEBUG_PRINTF("msk=%s, lcmp=%s\n", dumpMask(msk).c_str(),
dumpMask(lcmp).c_str());
return true;
}
@@ -357,13 +357,13 @@ void findMoreLiteralMasks(RoseBuildImpl &build) {
const auto &lit = build.literals.at(id);
auto &lit_info = build.literal_info.at(id);
vector<u8> msk, cmp;
if (!addSurroundingMask(build, lit, lit_info, msk, cmp)) {
vector<u8> msk, lcmp;
if (!addSurroundingMask(build, lit, lit_info, msk, lcmp)) {
continue;
}
DEBUG_PRINTF("found surrounding mask for lit_id=%u (%s)\n", id,
dumpString(lit.s).c_str());
u32 new_id = build.getLiteralId(lit.s, msk, cmp, lit.delay, lit.table);
u32 new_id = build.getLiteralId(lit.s, msk, lcmp, lit.delay, lit.table);
if (new_id == id) {
continue;
}
@@ -392,7 +392,7 @@ void findMoreLiteralMasks(RoseBuildImpl &build) {
// mixed-case is mandatory.
static
void addLiteralMask(const rose_literal_id &id, vector<u8> &msk,
vector<u8> &cmp) {
vector<u8> &lcmp) {
const size_t suffix_len = min(id.s.length(), size_t{HWLM_MASKLEN});
bool mixed_suffix = mixed_sensitivity_in(id.s.end() - suffix_len,
id.s.end());
@@ -403,7 +403,7 @@ void addLiteralMask(const rose_literal_id &id, vector<u8> &msk,
while (msk.size() < HWLM_MASKLEN) {
msk.insert(msk.begin(), 0);
cmp.insert(cmp.begin(), 0);
lcmp.insert(lcmp.begin(), 0);
}
if (!id.msk.empty()) {
@@ -413,7 +413,7 @@ void addLiteralMask(const rose_literal_id &id, vector<u8> &msk,
size_t mand_offset = msk.size() - i - 1;
size_t lit_offset = id.msk.size() - i - 1;
msk[mand_offset] = id.msk[lit_offset];
cmp[mand_offset] = id.cmp[lit_offset];
lcmp[mand_offset] = id.cmp[lit_offset];
}
}
@@ -425,12 +425,12 @@ void addLiteralMask(const rose_literal_id &id, vector<u8> &msk,
size_t offset = HWLM_MASKLEN - i - 1;
DEBUG_PRINTF("offset %zu must match 0x%02x exactly\n", offset,
c.c);
make_and_cmp_mask(c, &msk[offset], &cmp[offset]);
make_and_cmp_mask(c, &msk[offset], &lcmp[offset]);
}
}
}
normaliseLiteralMask(id.s, msk, cmp);
normaliseLiteralMask(id.s, msk, lcmp);
}
static
@@ -704,7 +704,7 @@ void addFragmentLiteral(const RoseBuildImpl &build, MatcherProto &mp,
lit.s.length());
vector<u8> msk = lit.msk; // copy
vector<u8> cmp = lit.cmp; // copy
vector<u8> lcmp = lit.cmp; // copy
bool noruns = isNoRunsFragment(build, f, max_len);
DEBUG_PRINTF("fragment is %s\n", noruns ? "noruns" : "not noruns");
@@ -720,24 +720,24 @@ void addFragmentLiteral(const RoseBuildImpl &build, MatcherProto &mp,
assert(!noruns);
}
addLiteralMask(lit, msk, cmp);
addLiteralMask(lit, msk, lcmp);
const auto &s_final = lit_final.get_string();
bool nocase = lit_final.any_nocase();
DEBUG_PRINTF("id=%u, s='%s', nocase=%d, noruns=%d, msk=%s, cmp=%s\n",
DEBUG_PRINTF("id=%u, s='%s', nocase=%d, noruns=%d, msk=%s, lcmp=%s\n",
f.fragment_id, escapeString(s_final).c_str(), (int)nocase,
noruns, dumpMask(msk).c_str(), dumpMask(cmp).c_str());
noruns, dumpMask(msk).c_str(), dumpMask(lcmp).c_str());
if (!maskIsConsistent(s_final, nocase, msk, cmp)) {
DEBUG_PRINTF("msk/cmp for literal can't match, skipping\n");
if (!maskIsConsistent(s_final, nocase, msk, lcmp)) {
DEBUG_PRINTF("msk/lcmp for literal can't match, skipping\n");
return;
}
const auto &groups = f.groups;
mp.lits.emplace_back(std::move(s_final), nocase, noruns, f.fragment_id,
groups, msk, cmp);
groups, msk, lcmp);
}
static
@@ -748,11 +748,11 @@ void addAccelLiteral(MatcherProto &mp, const rose_literal_id &lit,
DEBUG_PRINTF("lit='%s' (len %zu)\n", dumpString(s).c_str(), s.length());
vector<u8> msk = lit.msk; // copy
vector<u8> cmp = lit.cmp; // copy
addLiteralMask(lit, msk, cmp);
vector<u8> lcmp = lit.cmp; // copy
addLiteralMask(lit, msk, lcmp);
if (!maskIsConsistent(s.get_string(), s.any_nocase(), msk, cmp)) {
DEBUG_PRINTF("msk/cmp for literal can't match, skipping\n");
if (!maskIsConsistent(s.get_string(), s.any_nocase(), msk, lcmp)) {
DEBUG_PRINTF("msk/lcmp for literal can't match, skipping\n");
return;
}
@@ -761,9 +761,9 @@ void addAccelLiteral(MatcherProto &mp, const rose_literal_id &lit,
string s_final = lit.s.get_string();
trim_to_suffix(s_final, max_len);
trim_to_suffix(msk, max_len);
trim_to_suffix(cmp, max_len);
trim_to_suffix(lcmp, max_len);
mp.accel_lits.emplace_back(s_final, lit.s.any_nocase(), msk, cmp,
mp.accel_lits.emplace_back(s_final, lit.s.any_nocase(), msk, lcmp,
info.group_mask);
}

View File

@@ -794,9 +794,9 @@ template<typename VertexCont>
static never_inline
bool checkPredDelays(const RoseBuildImpl &build, const VertexCont &v1,
const VertexCont &v2) {
flat_set<RoseVertex> preds;
flat_set<RoseVertex> fpreds;
for (auto v : v1) {
insert(&preds, inv_adjacent_vertices(v, build.g));
insert(&fpreds, inv_adjacent_vertices(v, build.g));
}
flat_set<u32> pred_lits;
@@ -811,7 +811,7 @@ bool checkPredDelays(const RoseBuildImpl &build, const VertexCont &v1,
insert(&known_good_preds, inv_adjacent_vertices(v, build.g));
}
for (auto u : preds) {
for (auto u : fpreds) {
if (!contains(known_good_preds, u)) {
insert(&pred_lits, build.g[u].literals);
}
@@ -1536,11 +1536,11 @@ private:
static
flat_set<pair<size_t, u32>> get_pred_tops(RoseVertex v, const RoseGraph &g) {
flat_set<pair<size_t, u32>> preds;
flat_set<pair<size_t, u32>> fpreds;
for (const auto &e : in_edges_range(v, g)) {
preds.emplace(g[source(e, g)].index, g[e].rose_top);
fpreds.emplace(g[source(e, g)].index, g[e].rose_top);
}
return preds;
return fpreds;
}
/**
@@ -1592,14 +1592,14 @@ void dedupeLeftfixesVariableLag(RoseBuildImpl &build) {
assert(!is_triggered(*left.graph()) || onlyOneTop(*left.graph()));
}
auto preds = get_pred_tops(verts.front(), g);
auto vpreds = get_pred_tops(verts.front(), g);
for (RoseVertex v : verts) {
if (preds != get_pred_tops(v, g)) {
if (vpreds != get_pred_tops(v, g)) {
DEBUG_PRINTF("distinct pred sets\n");
continue;
}
}
engine_groups[DedupeLeftKey(build, std::move(preds), left)].emplace_back(left);
engine_groups[DedupeLeftKey(build, std::move(vpreds), left)].emplace_back(left);
}
/* We don't bother chunking as we expect deduping to be successful if the
@@ -2054,8 +2054,8 @@ void mergeCastleLeftfixes(RoseBuildImpl &build) {
DEBUG_PRINTF("chunked castles into %zu groups\n", chunks.size());
for (auto &chunk : chunks) {
mergeCastleChunk(build, chunk, eng_verts);
for (auto &cchunk : chunks) {
mergeCastleChunk(build, cchunk, eng_verts);
}
}
@@ -2449,13 +2449,13 @@ void chunkedDfaMerge(vector<RawDfa *> &dfas,
DEBUG_PRINTF("begin merge of %zu dfas\n", dfas.size());
vector<RawDfa *> out_dfas;
vector<RawDfa *> chunk;
vector<RawDfa *> dchunk;
for (auto it = begin(dfas), ite = end(dfas); it != ite; ++it) {
chunk.emplace_back(*it);
if (chunk.size() >= DFA_CHUNK_SIZE_MAX || next(it) == ite) {
pairwiseDfaMerge(chunk, dfa_mapping, outfixes, merge_func);
out_dfas.insert(end(out_dfas), begin(chunk), end(chunk));
chunk.clear();
dchunk.emplace_back(*it);
if (dchunk.size() >= DFA_CHUNK_SIZE_MAX || next(it) == ite) {
pairwiseDfaMerge(dchunk, dfa_mapping, outfixes, merge_func);
out_dfas.insert(end(out_dfas), begin(dchunk), end(dchunk));
dchunk.clear();
}
}
@@ -2798,8 +2798,8 @@ void mergeCastleSuffixes(RoseBuildImpl &build) {
eng_verts[c].emplace_back(v);
}
for (auto &chunk : by_reach | map_values) {
mergeCastleSuffixChunk(g, chunk, eng_verts);
for (auto &cchunk : by_reach | map_values) {
mergeCastleSuffixChunk(g, cchunk, eng_verts);
}
}

View File

@@ -1719,18 +1719,18 @@ void getLeftMergeSiblings(const RoseBuildImpl &build, RoseVertex a,
assert(!g[a].literals.empty());
u32 lit_id = *g[a].literals.begin();
const auto &verts = build.literal_info.at(lit_id).vertices;
RoseVertex pred = pickPred(a, g, build);
RoseVertex ppred = pickPred(a, g, build);
siblings.clear();
if (pred == RoseGraph::null_vertex() || build.isAnyStart(pred) ||
out_degree(pred, g) > verts.size()) {
if (ppred == RoseGraph::null_vertex() || build.isAnyStart(ppred) ||
out_degree(ppred, g) > verts.size()) {
// Select sibling from amongst the vertices that share a literal.
insert(&siblings, siblings.end(), verts);
} else {
// Select sibling from amongst the vertices that share a
// predecessor.
insert(&siblings, siblings.end(), adjacent_vertices(pred, g));
insert(&siblings, siblings.end(), adjacent_vertices(ppred, g));
}
}
@@ -1853,14 +1853,14 @@ void splitByRightProps(const RoseGraph &g,
vector<vector<RoseVertex>> &buckets) {
// Successor vector used in make_split_key. We declare it here so we can
// reuse storage.
vector<RoseVertex> succ;
vector<RoseVertex> vsucc;
// Split by {successors, literals, reports}.
auto make_split_key = [&](RoseVertex v) {
succ.clear();
insert(&succ, succ.end(), adjacent_vertices(v, g));
sort(succ.begin(), succ.end());
return hash_all(g[v].literals, g[v].reports, succ);
vsucc.clear();
insert(&vsucc, vsucc.end(), adjacent_vertices(v, g));
sort(vsucc.begin(), vsucc.end());
return hash_all(g[v].literals, g[v].reports, vsucc);
};
splitAndFilterBuckets(buckets, make_split_key);
}