Merge pull request #265 from isildur-g/wip-isildur-g-cppcheck66

addressing cppcheck shadowFunction warnings
This commit is contained in:
Konstantinos Margaritis 2024-05-10 22:43:05 +03:00 committed by GitHub
commit cd1e13d4d2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
25 changed files with 314 additions and 315 deletions

View File

@ -294,7 +294,7 @@ vector<vector<u32>> checkExclusion(u32 &streamStateSize,
size_t lower = 0;
size_t total = 0;
while (lower < trigSize) {
vector<CliqueVertex> vertices;
vector<CliqueVertex> clvertices;
unique_ptr<CliqueGraph> cg = make_unique<CliqueGraph>();
vector<vector<size_t>> min_reset_dist;
@ -302,7 +302,7 @@ vector<vector<u32>> checkExclusion(u32 &streamStateSize,
// get min reset distance for each repeat
for (size_t i = lower; i < upper; i++) {
CliqueVertex v = add_vertex(CliqueVertexProps(i), *cg);
vertices.emplace_back(v);
clvertices.emplace_back(v);
const vector<size_t> &tmp_dist =
minResetDistToEnd(triggers[i], cr);
@ -311,11 +311,11 @@ vector<vector<u32>> checkExclusion(u32 &streamStateSize,
// find exclusive pair for each repeat
for (size_t i = lower; i < upper; i++) {
CliqueVertex s = vertices[i - lower];
CliqueVertex s = clvertices[i - lower];
for (size_t j = i + 1; j < upper; j++) {
if (findExclusivePair(i, j, lower, min_reset_dist,
triggers)) {
CliqueVertex d = vertices[j - lower];
CliqueVertex d = clvertices[j - lower];
add_edge(s, d, *cg);
}
}

View File

@ -485,15 +485,15 @@ void collapseVariableDotRepeat(NGHolder &g, NFAVertex start,
// Collect all the other optional dot vertices and the successor vertices
// by walking down the graph from initialDot
set<NFAVertex> dots, succ;
if (!gatherParticipants(g, start, initialDot, dots, succ)) {
set<NFAVertex> dots, succr;
if (!gatherParticipants(g, start, initialDot, dots, succr)) {
DEBUG_PRINTF("gatherParticipants failed\n");
return;
}
DEBUG_PRINTF("optional dot repeat with %zu participants, "
"terminating in %zu non-dot nodes\n",
dots.size(), succ.size());
dots.size(), succr.size());
// Remove all the participants and set the start offset
dead.insert(dots.begin(), dots.end());
@ -509,7 +509,7 @@ void collapseVariableDotRepeat(NGHolder &g, NFAVertex start,
assert(startEnd->is_reachable());
// Connect our successor vertices to both start and startDs.
for (auto v : succ) {
for (auto v : succr) {
add_edge_if_not_present(g.start, v, g);
add_edge_if_not_present(g.startDs, v, g);
}

View File

@ -506,14 +506,14 @@ bool transformMinLengthToRepeat(NGHolder &g, ReportManager &rm) {
while (v != cyclic) {
DEBUG_PRINTF("vertex %zu\n", g[v].index);
width++;
auto succ = succs(v, g);
if (contains(succ, cyclic)) {
if (succ.size() == 1) {
auto s = succs(v, g);
if (contains(s, cyclic)) {
if (s.size() == 1) {
v = cyclic;
} else if (succ.size() == 2) {
} else if (s.size() == 2) {
// Cyclic and jump edge.
succ.erase(cyclic);
NFAVertex v2 = *succ.begin();
s.erase(cyclic);
NFAVertex v2 = *s.begin();
if (!edge(cyclic, v2, g).second) {
DEBUG_PRINTF("bad form\n");
return false;
@ -524,11 +524,11 @@ bool transformMinLengthToRepeat(NGHolder &g, ReportManager &rm) {
return false;
}
} else {
if (succ.size() != 1) {
if (s.size() != 1) {
DEBUG_PRINTF("bad form\n");
return false;
}
v = *succ.begin();
v = *s.begin();
}
}
@ -544,12 +544,12 @@ bool transformMinLengthToRepeat(NGHolder &g, ReportManager &rm) {
while (!is_any_accept(v, g)) {
DEBUG_PRINTF("vertex %zu\n", g[v].index);
width++;
auto succ = succs(v, g);
if (succ.size() != 1) {
auto s = succs(v, g);
if (s.size() != 1) {
DEBUG_PRINTF("bad form\n");
return false;
}
v = *succ.begin();
v = *s.begin();
}
int offsetAdjust = 0;
@ -569,14 +569,14 @@ bool transformMinLengthToRepeat(NGHolder &g, ReportManager &rm) {
return true;
}
vector<NFAVertex> preds;
vector<NFAVertex> predcs;
vector<NFAEdge> dead;
for (auto u : inv_adjacent_vertices_range(cyclic, g)) {
DEBUG_PRINTF("pred %zu\n", g[u].index);
if (u == cyclic) {
continue;
}
preds.emplace_back(u);
predcs.emplace_back(u);
// We want to delete the out-edges of each predecessor, but need to
// make sure we don't delete the startDs self loop.
@ -589,7 +589,7 @@ bool transformMinLengthToRepeat(NGHolder &g, ReportManager &rm) {
remove_edges(dead, g);
assert(!preds.empty());
assert(!predcs.empty());
const CharReach &cr = g[cyclic].char_reach;
@ -597,14 +597,14 @@ bool transformMinLengthToRepeat(NGHolder &g, ReportManager &rm) {
v = add_vertex(g);
g[v].char_reach = cr;
for (auto u : preds) {
for (auto u : predcs) {
add_edge(u, v, g);
}
preds.clear();
preds.emplace_back(v);
predcs.clear();
predcs.emplace_back(v);
}
assert(!preds.empty());
for (auto u : preds) {
assert(!predcs.empty());
for (auto u : predcs) {
add_edge(u, cyclic, g);
}

View File

@ -66,15 +66,15 @@ bool findMask(const NGHolder &g, vector<CharReach> *mask, bool *anchored,
return false;
}
set<NFAVertex> &succs = *anchored ? s_succ : sds_succ;
succs.erase(g.startDs);
if (succs.size() != 1) {
set<NFAVertex> &succrs = *anchored ? s_succ : sds_succ;
succrs.erase(g.startDs);
if (succrs.size() != 1) {
DEBUG_PRINTF("branchy root\n");
return false;
}
NFAVertex u = *anchored ? g.start : g.startDs;
NFAVertex v = *succs.begin();
NFAVertex v = *succrs.begin();
while (true) {
DEBUG_PRINTF("validating vertex %zu\n", g[v].index);

View File

@ -71,13 +71,13 @@ vector<flat_set<NFAVertex>> gatherSuccessorsByDepth(const NGHolder &g,
continue;
}
for (auto succ : adjacent_vertices_range(v, g)) {
for (auto succr : adjacent_vertices_range(v, g)) {
// ignore self-loops
if (v == succ) {
if (v == succr) {
continue;
}
DEBUG_PRINTF("Node %zu depth %u\n", g[succ].index, d + 1);
next.insert(succ);
next.insert(succr);
}
}
result[d] = next;
@ -113,13 +113,13 @@ vector<flat_set<NFAVertex>> gatherPredecessorsByDepth(const NGHolder &g,
for (unsigned d = 1; d < depth; d++) {
// collect all successors for all current level vertices
for (auto v : cur) {
for (auto pred : inv_adjacent_vertices_range(v, g)) {
for (auto predc : inv_adjacent_vertices_range(v, g)) {
// ignore self-loops
if (v == pred) {
if (v == predc) {
continue;
}
DEBUG_PRINTF("Node %zu depth %u\n", g[pred].index, d + 1);
next.insert(pred);
next.insert(predc);
}
}
result[d] = next;
@ -584,9 +584,9 @@ private:
bool clone = false;
for (const auto &pair : reports_to_vertices) {
const auto &reports = pair.first;
const auto &vertices = pair.second;
const auto &svertices = pair.second;
for (auto src : vertices) {
for (auto src : svertices) {
// get all predecessors up to edit distance
auto src_vertices_by_depth =
gatherPredecessorsByDepth(g, src, edit_distance);
@ -602,8 +602,8 @@ private:
assert(targets.size());
for (unsigned d = 0; d < src_vertices_by_depth.size(); d++) {
const auto &preds = src_vertices_by_depth[d];
for (auto v : preds) {
const auto &predcs = src_vertices_by_depth[d];
for (auto v : predcs) {
// only clone a node if it already contains reports
if (clone && !g[v].reports.empty()) {
create_clone(v, reports, edit_distance - d,

View File

@ -342,7 +342,7 @@ void attemptToUseAsStart(const NGHolder &g, NFAVertex u,
map<NFAVertex, flat_set<u32>> &unhandled_succ_tops,
map<u32, set<NFAVertex>> &tops_out) {
flat_set<u32> top_inter = unhandled_succ_tops.at(u);
flat_set<NFAVertex> succs;
flat_set<NFAVertex> f_succs;
for (NFAVertex v : adjacent_vertices_range(u, g)) {
if (!contains(unhandled_succ_tops, v)) {
return;
@ -360,7 +360,7 @@ void attemptToUseAsStart(const NGHolder &g, NFAVertex u,
set_intersection(top_inter.begin(), top_inter.end(),
v_tops.begin(), v_tops.end(), ni_inserter);
top_inter = std::move(new_inter);
succs.insert(v);
f_succs.insert(v);
}
if (top_inter.empty()) {
@ -373,7 +373,7 @@ void attemptToUseAsStart(const NGHolder &g, NFAVertex u,
}
DEBUG_PRINTF("reusing %zu is a start vertex\n", g[u].index);
markTopSuccAsHandled(u, top_inter, succs, tops_out, unhandled_top_succs,
markTopSuccAsHandled(u, top_inter, f_succs, tops_out, unhandled_top_succs,
unhandled_succ_tops);
}

View File

@ -62,12 +62,12 @@ namespace ue2 {
static
void findAccelFriendGeneration(const NGHolder &g, const CharReach &cr,
const flat_set<NFAVertex> &cands,
const flat_set<NFAVertex> &preds,
const flat_set<NFAVertex> &f_preds,
flat_set<NFAVertex> *next_cands,
flat_set<NFAVertex> *next_preds,
flat_set<NFAVertex> *friends) {
for (auto v : cands) {
if (contains(preds, v)) {
if (contains(f_preds, v)) {
continue;
}
@ -80,7 +80,7 @@ void findAccelFriendGeneration(const NGHolder &g, const CharReach &cr,
}
for (auto u : inv_adjacent_vertices_range(v, g)) {
if (!contains(preds, u)) {
if (!contains(f_preds, u)) {
DEBUG_PRINTF("bad pred\n");
goto next_cand;
}
@ -116,8 +116,8 @@ void findAccelFriends(const NGHolder &g, NFAVertex v,
u32 friend_depth = offset + 1;
flat_set<NFAVertex> preds;
insert(&preds, inv_adjacent_vertices(v, g));
flat_set<NFAVertex> f_preds;
insert(&f_preds, inv_adjacent_vertices(v, g));
const CharReach &cr = g[v].char_reach;
flat_set<NFAVertex> cands;
@ -126,9 +126,9 @@ void findAccelFriends(const NGHolder &g, NFAVertex v,
flat_set<NFAVertex> next_preds;
flat_set<NFAVertex> next_cands;
for (u32 i = 0; i < friend_depth; i++) {
findAccelFriendGeneration(g, cr, cands, preds, &next_cands, &next_preds,
findAccelFriendGeneration(g, cr, cands, f_preds, &next_cands, &next_preds,
friends);
preds.insert(next_preds.begin(), next_preds.end());
f_preds.insert(next_preds.begin(), next_preds.end());
next_preds.clear();
cands.swap(next_cands);
next_cands.clear();

View File

@ -94,7 +94,7 @@ void transition_graph(autom &nfa, const std::vector<NFAVertex> &vByStateId,
/* generate top transitions, false -> top = selfloop */
bool top_allowed = is_triggered(graph);
StateSet succ = nfa.dead;
StateSet succr = nfa.dead;
for (size_t i = in.find_first(); i != in.npos; i = in.find_next(i)) {
NFAVertex u = vByStateId[i];
@ -102,7 +102,7 @@ void transition_graph(autom &nfa, const std::vector<NFAVertex> &vByStateId,
if (contains(unused, v)) {
continue;
}
succ.set(graph[v].index);
succr.set(graph[v].index);
}
if (top_allowed && !nfa.toppable.test(i)) {
@ -112,15 +112,15 @@ void transition_graph(autom &nfa, const std::vector<NFAVertex> &vByStateId,
}
}
StateSet active_squash = succ & squash;
StateSet active_squash = succr & squash;
if (active_squash.any()) {
for (size_t j = active_squash.find_first(); j != active_squash.npos;
j = active_squash.find_next(j)) {
succ &= squash_mask.find(j)->second;
succr &= squash_mask.find(j)->second;
}
}
for (size_t j = succ.find_first(); j != succ.npos; j = succ.find_next(j)) {
for (size_t j = succr.find_first(); j != succr.npos; j = succr.find_next(j)) {
const CharReach &cr = cr_by_index[j];
for (size_t s = cr.find_first(); s != cr.npos; s = cr.find_next(s)) {
next[s].set(j); /* already alpha'ed */

View File

@ -398,19 +398,19 @@ CharReach reduced_cr(NFAVertex v, const NGHolder &g,
return v_cr;
}
NFAVertex pred = getSoleSourceVertex(g, v);
assert(pred);
NFAVertex s_pred = getSoleSourceVertex(g, v);
assert(s_pred);
/* require pred to be fed by one vertex OR (start + startDS) */
/* require s_pred to be fed by one vertex OR (start + startDS) */
NFAVertex predpred;
size_t idp = in_degree(pred, g);
if (hasSelfLoop(pred, g)) {
size_t idp = in_degree(s_pred, g);
if (hasSelfLoop(s_pred, g)) {
return v_cr; /* not cliche */
} else if (idp == 1) {
predpred = getSoleSourceVertex(g, pred);
predpred = getSoleSourceVertex(g, s_pred);
} else if (idp == 2
&& edge(g.start, pred, g).second
&& edge(g.startDs, pred, g).second) {
&& edge(g.start, s_pred, g).second
&& edge(g.startDs, s_pred, g).second) {
predpred = g.startDs;
} else {
return v_cr; /* not cliche */
@ -419,7 +419,7 @@ CharReach reduced_cr(NFAVertex v, const NGHolder &g,
assert(predpred);
/* require predpred to be cyclic and its cr to be a superset of
pred and v */
s_pred and v */
if (!hasSelfLoop(predpred, g)) {
return v_cr; /* not cliche */
}
@ -429,7 +429,7 @@ CharReach reduced_cr(NFAVertex v, const NGHolder &g,
return v_cr; /* fake cyclic */
}
const CharReach &p_cr = g[pred].char_reach;
const CharReach &p_cr = g[s_pred].char_reach;
const CharReach &pp_cr = g[predpred].char_reach;
if (!v_cr.isSubsetOf(pp_cr) || !p_cr.isSubsetOf(pp_cr)) {
return v_cr; /* not cliche */
@ -440,7 +440,7 @@ CharReach reduced_cr(NFAVertex v, const NGHolder &g,
set<NFAVertex> v_succ;
insert(&v_succ, adjacent_vertices(v, g));
set<NFAVertex> p_succ;
insert(&p_succ, adjacent_vertices(pred, g));
insert(&p_succ, adjacent_vertices(s_pred, g));
if (!is_subset_of(v_succ, p_succ)) {
DEBUG_PRINTF("fail\n");
@ -450,7 +450,7 @@ CharReach reduced_cr(NFAVertex v, const NGHolder &g,
if (contains(v_succ, g.accept) || contains(v_succ, g.acceptEod)) {
/* need to check that reports of v are a subset of p's */
if (!is_subset_of(g[v].reports,
g[pred].reports)) {
g[s_pred].reports)) {
DEBUG_PRINTF("fail - reports not subset\n");
return v_cr; /* not cliche */
}

View File

@ -391,9 +391,9 @@ void checkReachSubgraphs(const NGHolder &g, vector<ReachSubgraph> &rs,
unordered_set<NFAVertex> involved(rsi.vertices.begin(),
rsi.vertices.end());
unordered_set<NFAVertex> tail(involved); // to look for back-edges.
unordered_set<NFAVertex> pred, succ;
proper_pred(g, rsi.vertices.front(), pred);
proper_succ(g, rsi.vertices.back(), succ);
unordered_set<NFAVertex> v_pred, v_succ;
proper_pred(g, rsi.vertices.front(), v_pred);
proper_succ(g, rsi.vertices.back(), v_succ);
flat_set<ReportID> reports;
findFirstReports(g, rsi, reports);
@ -404,7 +404,7 @@ void checkReachSubgraphs(const NGHolder &g, vector<ReachSubgraph> &rs,
for (auto v : rsi.vertices) {
tail.erase(v); // now contains all vertices _after_ this one.
if (vertexIsBad(g, v, involved, tail, pred, succ, reports)) {
if (vertexIsBad(g, v, involved, tail, v_pred, v_succ, reports)) {
recalc = true;
continue;
}
@ -788,10 +788,10 @@ void replaceSubgraphWithSpecial(NGHolder &g, ReachSubgraph &rsi,
const unordered_set<NFAVertex> involved(rsi.vertices.begin(),
rsi.vertices.end());
vector<NFAVertex> succs;
getSuccessors(g, rsi, &succs);
vector<NFAVertex> g_succs;
getSuccessors(g, rsi, &g_succs);
unpeelNearEnd(g, rsi, depths, &succs);
unpeelNearEnd(g, rsi, depths, &g_succs);
// Create our replacement cyclic state with the same reachability and
// report info as the last vertex in our topo-ordered list.
@ -819,7 +819,7 @@ void replaceSubgraphWithSpecial(NGHolder &g, ReachSubgraph &rsi,
// Wire cyclic state to tug trigger states built from successors.
vector<NFAVertex> tugs;
for (auto v : succs) {
for (auto v : g_succs) {
buildTugTrigger(g, cyclic, v, involved, depths, tugs);
}
created.insert(tugs.begin(), tugs.end());
@ -857,8 +857,8 @@ void replaceSubgraphWithLazySpecial(NGHolder &g, ReachSubgraph &rsi,
const unordered_set<NFAVertex> involved(rsi.vertices.begin(),
rsi.vertices.end());
vector<NFAVertex> succs;
getSuccessors(g, rsi, &succs);
vector<NFAVertex> g_succs;
getSuccessors(g, rsi, &g_succs);
// Create our replacement cyclic state with the same reachability and
// report info as the last vertex in our topo-ordered list.
@ -887,15 +887,15 @@ void replaceSubgraphWithLazySpecial(NGHolder &g, ReachSubgraph &rsi,
// In the rose case, our tug is our cyclic, and it's wired to our
// successors (which should be just the accept).
vector<NFAVertex> tugs;
assert(succs.size() == 1);
for (auto v : succs) {
assert(g_succs.size() == 1);
for (auto v : g_succs) {
add_edge(cyclic, v, g);
}
// Wire pos trigger to accept if min repeat is one -- this deals with cases
// where we can get a pos and tug trigger on the same byte.
if (rsi.repeatMin == depth(1)) {
for (auto v : succs) {
for (auto v : g_succs) {
add_edge(pos_trigger, v, g);
g[pos_trigger].reports = g[cyclic].reports;
}
@ -1456,9 +1456,9 @@ struct StrawWalker {
}
if (ai != ae) {
DEBUG_PRINTF("more than one succ\n");
set<NFAVertex> succs;
insert(&succs, adjacent_vertices(v, g));
succs.erase(v);
set<NFAVertex> a_succs;
insert(&a_succs, adjacent_vertices(v, g));
a_succs.erase(v);
for (tie(ai, ae) = adjacent_vertices(v, g); ai != ae; ++ai) {
next = *ai;
DEBUG_PRINTF("checking %zu\n", g[next].index);
@ -1468,7 +1468,7 @@ struct StrawWalker {
set<NFAVertex> lsuccs;
insert(&lsuccs, adjacent_vertices(next, g));
if (lsuccs != succs) {
if (lsuccs != a_succs) {
continue;
}
@ -1895,9 +1895,9 @@ bool improveLeadingRepeat(NGHolder &g, const BoundedRepeatData &rd,
}
vector<NFAVertex> straw;
NFAVertex pred =
NFAVertex w_pred =
walkStrawToCyclicRev(g, rd.pos_trigger, all_repeats, straw);
if (pred != g.startDs) {
if (w_pred != g.startDs) {
DEBUG_PRINTF("straw walk doesn't lead to startDs\n");
return false;
}
@ -1999,9 +1999,9 @@ bool improveLeadingRepeatOutfix(NGHolder &g, BoundedRepeatData &rd,
}
vector<NFAVertex> straw;
NFAVertex pred =
NFAVertex w_pred =
walkStrawToCyclicRev(g, rd.pos_trigger, all_repeats, straw);
if (pred != g.startDs) {
if (w_pred != g.startDs) {
DEBUG_PRINTF("straw walk doesn't lead to startDs\n");
return false;
}

View File

@ -876,18 +876,18 @@ bool beginsWithDotStar(const NGHolder &g) {
// We can ignore the successors of start, as matches that begin there will
// necessarily have a SOM of 0.
set<NFAVertex> succ;
insert(&succ, adjacent_vertices(g.startDs, g));
succ.erase(g.startDs);
set<NFAVertex> a_succ;
insert(&a_succ, adjacent_vertices(g.startDs, g));
a_succ.erase(g.startDs);
for (auto v : succ) {
for (auto v : a_succ) {
// We want 'dot' states that aren't virtual starts.
if (g[v].char_reach.all() &&
!g[v].assert_flags) {
hasDot = true;
set<NFAVertex> dotsucc;
insert(&dotsucc, adjacent_vertices(v, g));
if (dotsucc != succ) {
if (dotsucc != a_succ) {
DEBUG_PRINTF("failed dot-star succ check\n");
return false;
}

View File

@ -114,7 +114,7 @@ bool forkVertex(NFAVertex v, NGHolder &g, vector<DepthMinMax> &depths,
for (const auto &group : predGroups) {
const depth &predDepth = group.first;
const vector<NFAEdge> &preds = group.second;
const vector<NFAEdge> &gspreds = group.second;
// Clone v for this depth with all its associated out-edges.
u32 clone_idx = depths.size(); // next index to be used
@ -130,8 +130,8 @@ bool forkVertex(NFAVertex v, NGHolder &g, vector<DepthMinMax> &depths,
add_edge(clone, target(e, g), g[e], g);
}
// Add in-edges from preds in this group.
for (const auto &e : preds) {
// Add in-edges from gspreds in this group.
for (const auto &e : gspreds) {
add_edge(source(e, g), clone, g[e], g);
}
}

View File

@ -255,19 +255,19 @@ void buildSquashMask(NFAStateSet &mask, const NGHolder &g, NFAVertex v,
}
static
void buildSucc(NFAStateSet &succ, const NGHolder &g, NFAVertex v) {
void buildSucc(NFAStateSet &ssucc, const NGHolder &g, NFAVertex v) {
for (auto w : adjacent_vertices_range(v, g)) {
if (!is_special(w, g)) {
succ.set(g[w].index);
ssucc.set(g[w].index);
}
}
}
static
void buildPred(NFAStateSet &pred, const NGHolder &g, NFAVertex v) {
void buildPred(NFAStateSet &spred, const NGHolder &g, NFAVertex v) {
for (auto u : inv_adjacent_vertices_range(v, g)) {
if (!is_special(u, g)) {
pred.set(g[u].index);
spred.set(g[u].index);
}
}
}
@ -408,19 +408,19 @@ unordered_map<NFAVertex, NFAStateSet> findSquashers(const NGHolder &g,
DEBUG_PRINTF("state %u is cyclic\n", i);
NFAStateSet mask(numStates), succ(numStates), pred(numStates);
NFAStateSet mask(numStates), ssucc(numStates), spred(numStates);
buildSquashMask(mask, g, v, cr, initStates, vByIndex, pdom_tree, som,
som_depths, region_map, cache);
buildSucc(succ, g, v);
buildPred(pred, g, v);
buildSucc(ssucc, g, v);
buildPred(spred, g, v);
const auto &reports = g[v].reports;
for (size_t j = succ.find_first(); j != succ.npos;
j = succ.find_next(j)) {
for (size_t j = ssucc.find_first(); j != ssucc.npos;
j = ssucc.find_next(j)) {
NFAVertex vj = vByIndex[j];
NFAStateSet pred2(numStates);
buildPred(pred2, g, vj);
if (pred2 == pred) {
if (pred2 == spred) {
DEBUG_PRINTF("adding the sm from %zu to %u's sm\n", j, i);
NFAStateSet tmp(numStates);
buildSquashMask(tmp, g, vj, cr, initStates, vByIndex, pdom_tree,
@ -429,14 +429,14 @@ unordered_map<NFAVertex, NFAStateSet> findSquashers(const NGHolder &g,
}
}
for (size_t j = pred.find_first(); j != pred.npos;
j = pred.find_next(j)) {
for (size_t j = spred.find_first(); j != spred.npos;
j = spred.find_next(j)) {
NFAVertex vj = vByIndex[j];
NFAStateSet succ2(numStates);
buildSucc(succ2, g, vj);
/* we can use j as a basis for squashing if its succs are a subset
* of ours */
if ((succ2 & ~succ).any()) {
if ((succ2 & ~ssucc).any()) {
continue;
}

View File

@ -709,15 +709,15 @@ u32 removeTrailingLiteralStates(NGHolder &g, const ue2_literal &lit,
assert(delay <= lit.length());
DEBUG_PRINTF("managed delay %u (of max %u)\n", delay, max_delay);
set<NFAVertex> pred;
set<NFAVertex> predv;
for (auto v : curr) {
insert(&pred, inv_adjacent_vertices_range(v, g));
insert(&predv, inv_adjacent_vertices_range(v, g));
}
clear_in_edges(g.accept, g);
clearReports(g);
for (auto v : pred) {
for (auto v : predv) {
NFAEdge e = add_edge(v, g.accept, g);
g[v].reports.insert(0);
if (is_triggered(g) && v == g.start) {

View File

@ -703,13 +703,13 @@ unique_ptr<VertLitInfo> findBestSplit(const NGHolder &g,
}
}
auto cmp = LitComparator(g, seeking_anchored, seeking_transient,
auto lcmp = LitComparator(g, seeking_anchored, seeking_transient,
last_chance);
unique_ptr<VertLitInfo> best = std::move(lits.back());
lits.pop_back();
while (!lits.empty()) {
if (cmp(best, lits.back())) {
if (lcmp(best, lits.back())) {
best = std::move(lits.back());
}
lits.pop_back();
@ -811,7 +811,7 @@ flat_set<NFAEdge> poisonEdges(const NGHolder &h,
/* poison edges covered by successor literal */
set<pair<ue2_literal, bool> > succs;
set<pair<ue2_literal, bool> > lsuccs;
for (const RoseInEdge &ve : ee) {
if (vg[target(ve, vg)].type != RIV_LITERAL) {
/* nothing to poison in suffixes/outfixes */
@ -819,15 +819,15 @@ flat_set<NFAEdge> poisonEdges(const NGHolder &h,
assert(is_any_accept_type(vg[target(ve, vg)].type));
continue;
}
succs.insert({vg[target(ve, vg)].s,
lsuccs.insert({vg[target(ve, vg)].s,
vg[source(ve, vg)].type == RIV_LITERAL});
}
DEBUG_PRINTF("poisoning edges %zu successor literals\n", succs.size());
DEBUG_PRINTF("poisoning edges %zu successor literals\n", lsuccs.size());
flat_set<NFAEdge> bad;
for (const auto &p : succs) {
for (const auto &p : lsuccs) {
poisonFromSuccessor(h, p.first, p.second, bad);
}
@ -1433,11 +1433,11 @@ bool deanchorIfNeeded(NGHolder &g) {
if (succ_v == succ_g) {
DEBUG_PRINTF("found ^.*\n");
for (auto succ : adjacent_vertices_range(g.start, g)) {
if (succ == g.startDs) {
for (auto asucc : adjacent_vertices_range(g.start, g)) {
if (asucc == g.startDs) {
continue;
}
add_edge(g.startDs, succ, g);
add_edge(g.startDs, asucc, g);
}
clear_vertex(v, g);
remove_vertex(v, g);
@ -1684,18 +1684,18 @@ void removeRedundantLiteralsFromInfix(const NGHolder &h, RoseInGraph &ig,
* successor literal. This would require using distinct report ids and also
* taking into account overlap of successor literals. */
set<ue2_literal> preds;
set<ue2_literal> succs;
set<ue2_literal> lpreds;
set<ue2_literal> lsuccs;
for (const RoseInEdge &e : ee) {
RoseInVertex u = source(e, ig);
assert(ig[u].type == RIV_LITERAL);
assert(!ig[u].delay);
preds.insert(ig[u].s);
lpreds.insert(ig[u].s);
RoseInVertex v = target(e, ig);
assert(ig[v].type == RIV_LITERAL);
assert(!ig[v].delay);
succs.insert(ig[v].s);
lsuccs.insert(ig[v].s);
if (ig[e].graph_lag) {
/* already removed redundant parts of literals */
@ -1707,9 +1707,9 @@ void removeRedundantLiteralsFromInfix(const NGHolder &h, RoseInGraph &ig,
map<ue2_literal, pair<shared_ptr<NGHolder>, u32> > graphs; /* + delay */
for (const ue2_literal &right : succs) {
for (const ue2_literal &right : lsuccs) {
size_t max_overlap = 0;
for (const ue2_literal &left : preds) {
for (const ue2_literal &left : lpreds) {
size_t overlap = maxOverlap(left, right, 0);
ENSURE_AT_LEAST(&max_overlap, overlap);
}
@ -1746,13 +1746,13 @@ void removeRedundantLiteralsFromInfix(const NGHolder &h, RoseInGraph &ig,
for (const RoseInEdge &e : ee) {
RoseInVertex v = target(e, ig);
const ue2_literal &succ = ig[v].s;
if (!contains(graphs, succ)) {
const ue2_literal &igsucc = ig[v].s;
if (!contains(graphs, igsucc)) {
continue;
}
ig[e].graph = graphs[succ].first;
ig[e].graph_lag = graphs[succ].second;
ig[e].graph = graphs[igsucc].first;
ig[e].graph_lag = graphs[igsucc].second;
if (isStarCliche(*ig[e].graph)) {
DEBUG_PRINTF("is a X star!\n");
@ -1792,8 +1792,8 @@ void removeRedundantLiteralsFromInfixes(RoseInGraph &g,
for (const auto &m : infixes) {
const NGHolder *h = m.first;
const auto &edges = m.second;
removeRedundantLiteralsFromInfix(*h, g, edges, cc);
const auto &medges = m.second;
removeRedundantLiteralsFromInfix(*h, g, medges, cc);
}
}
@ -1952,7 +1952,7 @@ bool makeTransientFromLongLiteral(const NGHolder &h, RoseInGraph &vg,
static
void restoreTrailingLiteralStates(NGHolder &g, const ue2_literal &lit,
u32 delay, const vector<NFAVertex> &preds) {
u32 delay, const vector<NFAVertex> &lpreds) {
assert(delay <= lit.length());
assert(isCorrectlyTopped(g));
DEBUG_PRINTF("adding on '%s' %u\n", dumpString(lit).c_str(), delay);
@ -1968,7 +1968,7 @@ void restoreTrailingLiteralStates(NGHolder &g, const ue2_literal &lit,
prev = curr;
}
for (auto v : preds) {
for (auto v : lpreds) {
NFAEdge e = add_edge_if_not_present(v, prev, g);
if (v == g.start && is_triggered(g)) {
g[e].tops.insert(DEFAULT_TOP);
@ -1987,11 +1987,11 @@ void restoreTrailingLiteralStates(NGHolder &g, const ue2_literal &lit,
static
void restoreTrailingLiteralStates(NGHolder &g,
const vector<pair<ue2_literal, u32>> &lits) {
vector<NFAVertex> preds;
insert(&preds, preds.end(), inv_adjacent_vertices(g.accept, g));
vector<NFAVertex> vpreds;
insert(&vpreds, vpreds.end(), inv_adjacent_vertices(g.accept, g));
clear_in_edges(g.accept, g);
for (auto v : preds) {
for (auto v : vpreds) {
g[v].reports.clear(); /* clear report from old accepts */
}
@ -1999,7 +1999,7 @@ void restoreTrailingLiteralStates(NGHolder &g,
const ue2_literal &lit = p.first;
u32 delay = p.second;
restoreTrailingLiteralStates(g, lit, delay, preds);
restoreTrailingLiteralStates(g, lit, delay, vpreds);
}
}
@ -2133,14 +2133,14 @@ void findBetterPrefixes(RoseInGraph &vg, const CompileContext &cc) {
/* look for bad prefixes and try to split */
for (const auto &m : prefixes) {
NGHolder *h = m.first;
const auto &edges = m.second;
const auto &medges = m.second;
depth max_width = findMaxWidth(*h);
if (willBeTransient(max_width, cc)
|| willBeAnchoredTable(max_width, cc.grey)) {
continue;
}
changed = improvePrefix(*h, vg, edges, cc);
changed = improvePrefix(*h, vg, medges, cc);
}
} while (changed && gen++ < MAX_FIND_BETTER_PREFIX_GEN);
}
@ -2197,12 +2197,12 @@ void extractStrongLiterals(RoseInGraph &vg, const CompileContext &cc) {
for (const auto &m : edges_by_graph) {
NGHolder *g = m.first;
const auto &edges = m.second;
const auto &medges = m.second;
if (contains(stuck, g)) {
DEBUG_PRINTF("already known to be bad\n");
continue;
}
bool rv = extractStrongLiteral(*g, vg, edges, cc);
bool rv = extractStrongLiteral(*g, vg, medges, cc);
if (rv) {
changed = true;
} else {
@ -2280,8 +2280,8 @@ void improveWeakInfixes(RoseInGraph &vg, const CompileContext &cc) {
for (const auto &m : weak_edges) {
NGHolder *h = m.first;
const auto &edges = m.second;
improveInfix(*h, vg, edges, cc);
const auto &medges = m.second;
improveInfix(*h, vg, medges, cc);
}
}
@ -2406,14 +2406,14 @@ bool replaceSuffixWithInfix(const NGHolder &h, RoseInGraph &vg,
assert(!by_reports.empty());
/* TODO: how strong a min len do we want here ? */
u32 min_len = cc.grey.minRoseLiteralLength;
ENSURE_AT_LEAST(&min_len, MIN_SUFFIX_LEN);
u32 rose_min_len = cc.grey.minRoseLiteralLength;
ENSURE_AT_LEAST(&rose_min_len, MIN_SUFFIX_LEN);
for (auto &vli : by_reports | map_values) {
u64a score = sanitizeAndCompressAndScore(vli.lit);
if (vli.lit.empty()
|| !validateRoseLiteralSetQuality(vli.lit, score, false, min_len,
|| !validateRoseLiteralSetQuality(vli.lit, score, false, rose_min_len,
false, false)) {
return false;
}
@ -2457,8 +2457,8 @@ void avoidSuffixes(RoseInGraph &vg, const CompileContext &cc) {
/* look at suffixes and try to split */
for (const auto &m : suffixes) {
const NGHolder *h = m.first;
const auto &edges = m.second;
replaceSuffixWithInfix(*h, vg, edges, cc);
const auto &medges = m.second;
replaceSuffixWithInfix(*h, vg, medges, cc);
}
}
@ -2552,8 +2552,8 @@ void lookForDoubleCut(RoseInGraph &vg, const CompileContext &cc) {
for (const auto &m : right_edges) {
const NGHolder *h = m.first;
const auto &edges = m.second;
lookForDoubleCut(*h, edges, vg, cc.grey);
const auto &medges = m.second;
lookForDoubleCut(*h, medges, vg, cc.grey);
}
}
@ -2744,8 +2744,8 @@ void lookForCleanEarlySplits(RoseInGraph &vg, const CompileContext &cc) {
for (const auto &m : rightfixes) {
const NGHolder *h = m.first;
const auto &edges = m.second;
lookForCleanSplit(*h, edges, vg, cc);
const auto &medges = m.second;
lookForCleanSplit(*h, medges, vg, cc);
}
prev = std::move(curr);
@ -2941,10 +2941,10 @@ bool ensureImplementable(RoseBuild &rose, RoseInGraph &vg, bool allow_changes,
continue;
}
const auto &edges = m.second;
const auto &medges = m.second;
if (tryForEarlyDfa(*h, cc) &&
doEarlyDfa(rose, vg, *h, edges, final_chance, rm, cc)) {
doEarlyDfa(rose, vg, *h, medges, final_chance, rm, cc)) {
continue;
}
@ -2953,7 +2953,7 @@ bool ensureImplementable(RoseBuild &rose, RoseInGraph &vg, bool allow_changes,
return false;
}
if (splitForImplementability(vg, *h, edges, cc)) {
if (splitForImplementability(vg, *h, medges, cc)) {
added_count++;
if (added_count > MAX_IMPLEMENTABLE_SPLITS) {
DEBUG_PRINTF("added_count hit limit\n");

View File

@ -518,9 +518,9 @@ u32 findRoseAnchorFloatingOverlap(const RoseInEdgeProps &ep,
static
void findRoseLiteralMask(const NGHolder &h, const u32 lag, vector<u8> &msk,
vector<u8> &cmp) {
vector<u8> &lcmp) {
if (lag >= HWLM_MASKLEN) {
msk.clear(); cmp.clear();
msk.clear(); lcmp.clear();
return;
}
@ -532,7 +532,7 @@ void findRoseLiteralMask(const NGHolder &h, const u32 lag, vector<u8> &msk,
assert(!curr.empty());
msk.assign(HWLM_MASKLEN, 0);
cmp.assign(HWLM_MASKLEN, 0);
lcmp.assign(HWLM_MASKLEN, 0);
size_t i = HWLM_MASKLEN - lag - 1;
do {
if (curr.empty() || contains(curr, h.start) ||
@ -549,9 +549,9 @@ void findRoseLiteralMask(const NGHolder &h, const u32 lag, vector<u8> &msk,
cr |= h[v].char_reach;
insert(&next, inv_adjacent_vertices(v, h));
}
make_and_cmp_mask(cr, &msk[i], &cmp[i]);
DEBUG_PRINTF("%zu: reach=%s, msk=%u, cmp=%u\n", i,
describeClass(cr).c_str(), msk.at(i), cmp.at(i));
make_and_cmp_mask(cr, &msk[i], &lcmp[i]);
DEBUG_PRINTF("%zu: reach=%s, msk=%u, lcmp=%u\n", i,
describeClass(cr).c_str(), msk.at(i), lcmp.at(i));
curr.swap(next);
} while (i-- > 0);
}
@ -617,18 +617,18 @@ void doRoseLiteralVertex(RoseBuildImpl *tbi, bool use_eod_table,
}
floating:
vector<u8> msk, cmp;
vector<u8> msk, lcmp;
if (tbi->cc.grey.roseHamsterMasks && in_degree(iv, ig) == 1) {
RoseInEdge e = *in_edges(iv, ig).first;
if (ig[e].graph) {
findRoseLiteralMask(*ig[e].graph, ig[e].graph_lag, msk, cmp);
findRoseLiteralMask(*ig[e].graph, ig[e].graph_lag, msk, lcmp);
}
}
u32 delay = iv_info.delay;
rose_literal_table table = use_eod_table ? ROSE_EOD_ANCHORED : ROSE_FLOATING;
u32 literalId = tbi->getLiteralId(iv_info.s, msk, cmp, delay, table);
u32 literalId = tbi->getLiteralId(iv_info.s, msk, lcmp, delay, table);
DEBUG_PRINTF("literal=%u (len=%zu, delay=%u, offsets=[%u,%u] '%s')\n",
literalId, iv_info.s.length(), delay, iv_info.min_offset,
@ -1087,20 +1087,20 @@ bool predsAreDelaySensitive(const RoseInGraph &ig, RoseInVertex v) {
static
u32 maxAvailableDelay(const ue2_literal &pred_key, const ue2_literal &lit_key) {
/* overly conservative if only part of the string is nocase */
string pred = pred_key.get_string();
string predk = pred_key.get_string();
string lit = lit_key.get_string();
if (pred_key.any_nocase() || lit_key.any_nocase()) {
upperString(pred);
upperString(predk);
upperString(lit);
}
string::size_type last = pred.rfind(lit);
string::size_type last = predk.rfind(lit);
if (last == string::npos) {
return MAX_DELAY;
}
u32 raw = pred.size() - last - 1;
u32 raw = predk.size() - last - 1;
return MIN(raw, MAX_DELAY);
}

View File

@ -302,31 +302,31 @@ unique_ptr<NGHolder> buildMaskLhs(bool anchored, u32 prefix_len,
assert(prefix_len);
assert(mask.size() >= prefix_len);
NFAVertex pred = anchored ? lhs->start : lhs->startDs;
NFAVertex lpreds = anchored ? lhs->start : lhs->startDs;
u32 m_idx = 0;
while (prefix_len--) {
NFAVertex v = add_vertex(*lhs);
(*lhs)[v].char_reach = mask[m_idx++];
add_edge(pred, v, *lhs);
pred = v;
add_edge(lpreds, v, *lhs);
lpreds = v;
}
add_edge(pred, lhs->accept, *lhs);
(*lhs)[pred].reports.insert(0);
add_edge(lpreds, lhs->accept, *lhs);
(*lhs)[lpreds].reports.insert(0);
return lhs;
}
static
void buildLiteralMask(const vector<CharReach> &mask, vector<u8> &msk,
vector<u8> &cmp, u32 delay) {
vector<u8> &lcmp, u32 delay) {
msk.clear();
cmp.clear();
lcmp.clear();
if (mask.size() <= delay) {
return;
}
// Construct an and/cmp mask from our mask ending at delay positions before
// Construct an and/lcmp mask from our mask ending at delay positions before
// the end of the literal, with max length HWLM_MASKLEN.
auto ite = mask.end() - delay;
@ -334,11 +334,11 @@ void buildLiteralMask(const vector<CharReach> &mask, vector<u8> &msk,
for (; it != ite; ++it) {
msk.emplace_back(0);
cmp.emplace_back(0);
make_and_cmp_mask(*it, &msk.back(), &cmp.back());
lcmp.emplace_back(0);
make_and_cmp_mask(*it, &msk.back(), &lcmp.back());
}
assert(msk.size() == cmp.size());
assert(msk.size() == lcmp.size());
assert(msk.size() <= HWLM_MASKLEN);
}
@ -392,10 +392,9 @@ bool validateTransientMask(const vector<CharReach> &mask, bool anchored,
none_of(begin(lits), end(lits), mixed_sensitivity));
// Build the HWLM literal mask.
vector<u8> msk;
vector<u8> msk, lcmp;
if (grey.roseHamsterMasks) {
vector<u8> cmp;
buildLiteralMask(mask, msk, cmp, delay);
buildLiteralMask(mask, msk, lcmp, delay);
}
// We consider the HWLM mask length to run from the first non-zero byte to
@ -491,9 +490,9 @@ void addTransientMask(RoseBuildImpl &build, const vector<CharReach> &mask,
set_report(*mask_graph, mask_report);
// Build the HWLM literal mask.
vector<u8> msk, cmp;
vector<u8> msk, lcmp;
if (build.cc.grey.roseHamsterMasks) {
buildLiteralMask(mask, msk, cmp, delay);
buildLiteralMask(mask, msk, lcmp, delay);
}
/* adjust bounds to be relative to trigger rather than mask */
@ -527,7 +526,7 @@ void addTransientMask(RoseBuildImpl &build, const vector<CharReach> &mask,
const flat_set<ReportID> no_reports;
for (const auto &lit : lits) {
u32 lit_id = build.getLiteralId(lit, msk, cmp, delay, table);
u32 lit_id = build.getLiteralId(lit, msk, lcmp, delay, table);
const RoseVertex parent = anchored ? build.anchored_root : build.root;
bool use_mask = delay || maskIsNeeded(lit, *mask_graph);
@ -570,19 +569,19 @@ unique_ptr<NGHolder> buildMaskRhs(const flat_set<ReportID> &reports,
unique_ptr<NGHolder> rhs = std::make_unique<NGHolder>(NFA_SUFFIX);
NGHolder &h = *rhs;
NFAVertex succ = h.accept;
NFAVertex asucc = h.accept;
u32 m_idx = mask.size() - 1;
while (suffix_len--) {
NFAVertex u = add_vertex(h);
if (succ == h.accept) {
if (asucc == h.accept) {
h[u].reports.insert(reports.begin(), reports.end());
}
h[u].char_reach = mask[m_idx--];
add_edge(u, succ, h);
succ = u;
add_edge(u, asucc, h);
asucc = u;
}
NFAEdge e = add_edge(h.start, succ, h);
NFAEdge e = add_edge(h.start, asucc, h);
h[e].tops.insert(DEFAULT_TOP);
return rhs;

View File

@ -348,11 +348,11 @@ public:
next[s].wdelay = wdelay;
}
nfa_state_set succ;
nfa_state_set gsucc;
if (wdelay != in.wdelay) {
DEBUG_PRINTF("enabling start\n");
succ.set(vertexToIndex[g.startDs]);
gsucc.set(vertexToIndex[g.startDs]);
}
for (size_t i = in.wrap_state.find_first(); i != nfa_state_set::npos;
@ -368,12 +368,12 @@ public:
continue;
}
succ.set(vertexToIndex[w]);
gsucc.set(vertexToIndex[w]);
}
}
for (size_t j = succ.find_first(); j != nfa_state_set::npos;
j = succ.find_next(j)) {
for (size_t j = gsucc.find_first(); j != nfa_state_set::npos;
j = gsucc.find_next(j)) {
const CharReach &cr = cr_by_index[j];
for (size_t s = cr.find_first(); s != CharReach::npos;
s = cr.find_next(s)) {

View File

@ -476,9 +476,9 @@ rose_group RoseBuildImpl::getInitialGroups() const {
static
bool nfaStuckOn(const NGHolder &g) {
assert(!proper_out_degree(g.startDs, g));
set<NFAVertex> succ;
insert(&succ, adjacent_vertices(g.start, g));
succ.erase(g.startDs);
set<NFAVertex> vsucc;
insert(&vsucc, adjacent_vertices(g.start, g));
vsucc.erase(g.startDs);
set<NFAVertex> asucc;
set<u32> tops;
@ -493,7 +493,7 @@ bool nfaStuckOn(const NGHolder &g) {
asucc.clear();
insert(&asucc, adjacent_vertices(target(e, g), g));
if (asucc == succ) {
if (asucc == vsucc) {
insert(&done_tops, g[e].tops);
}
}
@ -531,12 +531,12 @@ void findFixedDepthTops(const RoseGraph &g, const set<PredTopPair> &triggers,
for (const auto &e : pred_by_top) {
u32 top = e.first;
const set<RoseVertex> &preds = e.second;
if (!g[*preds.begin()].fixedOffset()) {
const set<RoseVertex> &spreds = e.second;
if (!g[*spreds.begin()].fixedOffset()) {
continue;
}
u32 depth = g[*preds.begin()].min_offset;
for (RoseVertex u : preds) {
u32 depth = g[*spreds.begin()].min_offset;
for (RoseVertex u : spreds) {
if (g[u].min_offset != depth || g[u].max_offset != depth) {
goto next_top;
}
@ -925,12 +925,12 @@ void appendTailToHolder(NGHolder &h, const vector<CharReach> &tail) {
static
u32 decreaseLag(const RoseBuildImpl &build, NGHolder &h,
const vector<RoseVertex> &succs) {
const vector<RoseVertex> &vsuccs) {
const RoseGraph &rg = build.g;
static const size_t MAX_RESTORE_LEN = 5;
vector<CharReach> restored(MAX_RESTORE_LEN);
for (RoseVertex v : succs) {
for (RoseVertex v : vsuccs) {
u32 lag = rg[v].left.lag;
for (u32 lit_id : rg[v].literals) {
u32 delay = build.literals.at(lit_id).delay;
@ -969,7 +969,7 @@ struct eager_info {
static
bool checkSuitableForEager(bool is_prefix, const left_id &left,
const RoseBuildImpl &build,
const vector<RoseVertex> &succs,
const vector<RoseVertex> &vsuccs,
rose_group squash_mask, rose_group initial_groups,
eager_info &ei, const CompileContext &cc) {
DEBUG_PRINTF("checking prefix --> %016llx...\n", squash_mask);
@ -986,7 +986,7 @@ bool checkSuitableForEager(bool is_prefix, const left_id &left,
return false;
}
for (RoseVertex s : succs) {
for (RoseVertex s : vsuccs) {
if (build.isInETable(s)
|| contains(rg[s].literals, build.eod_event_literal_id)) {
return false; /* Ignore EOD related prefixes */
@ -1005,7 +1005,7 @@ bool checkSuitableForEager(bool is_prefix, const left_id &left,
if (!can_die_early(dfa, EAGER_DIE_BEFORE_LIMIT)) {
return false;
}
ei.new_graph = rg[succs[0]].left.graph;
ei.new_graph = rg[vsuccs[0]].left.graph;
} else if (left.graph()) {
const NGHolder &g = *left.graph();
if (proper_out_degree(g.startDs, g)) {
@ -1016,7 +1016,7 @@ bool checkSuitableForEager(bool is_prefix, const left_id &left,
auto gg = ei.new_graph;
gg->kind = NFA_EAGER_PREFIX;
ei.lag_adjust = decreaseLag(build, *gg, succs);
ei.lag_adjust = decreaseLag(build, *gg, vsuccs);
if (is_match_vertex(gg->start, *gg)) {
return false; /* should not still be vacuous as lag decreased */
@ -1044,17 +1044,17 @@ bool checkSuitableForEager(bool is_prefix, const left_id &left,
static
left_id updateLeftfixWithEager(RoseGraph &g, const eager_info &ei,
const vector<RoseVertex> &succs) {
const vector<RoseVertex> &vsuccs) {
u32 lag_adjust = ei.lag_adjust;
auto gg = ei.new_graph;
for (RoseVertex v : succs) {
for (RoseVertex v : vsuccs) {
g[v].left.graph = gg;
assert(g[v].left.lag >= lag_adjust);
g[v].left.lag -= lag_adjust;
DEBUG_PRINTF("added %u literal chars back, new lag %u\n", lag_adjust,
g[v].left.lag);
}
left_id leftfix = g[succs[0]].left;
left_id leftfix = g[vsuccs[0]].left;
if (leftfix.graph()) {
assert(leftfix.graph()->kind == NFA_PREFIX
@ -1099,7 +1099,7 @@ bool buildLeftfix(RoseBuildImpl &build, build_context &bc, bool prefix, u32 qi,
const map<left_id, set<PredTopPair> > &infixTriggers,
set<u32> *no_retrigger_queues, set<u32> *eager_queues,
const map<left_id, eager_info> &eager,
const vector<RoseVertex> &succs, left_id leftfix) {
const vector<RoseVertex> &vsuccs, left_id leftfix) {
RoseGraph &g = build.g;
const CompileContext &cc = build.cc;
const ReportManager &rm = build.rm;
@ -1111,7 +1111,7 @@ bool buildLeftfix(RoseBuildImpl &build, build_context &bc, bool prefix, u32 qi,
if (contains(eager, leftfix)) {
eager_queues->insert(qi);
leftfix = updateLeftfixWithEager(g, eager.at(leftfix), succs);
leftfix = updateLeftfixWithEager(g, eager.at(leftfix), vsuccs);
}
bytecode_ptr<NFA> nfa;
@ -1159,7 +1159,7 @@ bool buildLeftfix(RoseBuildImpl &build, build_context &bc, bool prefix, u32 qi,
u32 max_queuelen = UINT32_MAX;
if (!prefix) {
set<ue2_literal> lits;
for (RoseVertex v : succs) {
for (RoseVertex v : vsuccs) {
for (auto u : inv_adjacent_vertices_range(v, g)) {
for (u32 lit_id : g[u].literals) {
lits.insert(build.literals.at(lit_id).s);
@ -1188,7 +1188,7 @@ bool buildLeftfix(RoseBuildImpl &build, build_context &bc, bool prefix, u32 qi,
findCountingMiracleInfo(leftfix, stop, &cm_count, &cm_cr);
}
for (RoseVertex v : succs) {
for (RoseVertex v : vsuccs) {
bc.leftfix_info.emplace(v, left_build_info(qi, g[v].left.lag, max_width,
squash_mask, stop,
max_queuelen, cm_count,
@ -1504,7 +1504,7 @@ void buildLeftfixes(RoseBuildImpl &tbi, build_context &bc,
map<left_id, set<PredTopPair>> infixTriggers;
findInfixTriggers(tbi, &infixTriggers);
insertion_ordered_map<left_id, vector<RoseVertex>> succs;
insertion_ordered_map<left_id, vector<RoseVertex>> lsuccs;
if (cc.grey.allowTamarama && cc.streaming && !do_prefix) {
findExclusiveInfixes(tbi, bc, qif, infixTriggers, no_retrigger_queues);
@ -1544,7 +1544,7 @@ void buildLeftfixes(RoseBuildImpl &tbi, build_context &bc,
}
}
succs[leftfix].emplace_back(v);
lsuccs[leftfix].emplace_back(v);
}
rose_group initial_groups = tbi.getInitialGroups();
@ -1552,7 +1552,7 @@ void buildLeftfixes(RoseBuildImpl &tbi, build_context &bc,
map<left_id, eager_info> eager;
for (const auto &m : succs) {
for (const auto &m : lsuccs) {
const left_id &leftfix = m.first;
const auto &left_succs = m.second;
@ -1573,7 +1573,7 @@ void buildLeftfixes(RoseBuildImpl &tbi, build_context &bc,
eager.clear();
}
for (const auto &m : succs) {
for (const auto &m : lsuccs) {
const left_id &leftfix = m.first;
const auto &left_succs = m.second;
buildLeftfix(tbi, bc, do_prefix, qif.get_queue(), infixTriggers,

View File

@ -1144,10 +1144,10 @@ void findTopTriggerCancels(RoseBuildImpl &build) {
for (const auto &r : left_succ) {
const left_id &left = r.first;
const vector<RoseVertex> &succs = r.second;
const vector<RoseVertex> &rsuccs = r.second;
assert(!succs.empty());
if (build.isRootSuccessor(*succs.begin())) {
assert(!rsuccs.empty());
if (build.isRootSuccessor(*rsuccs.begin())) {
/* a prefix is never an infix */
continue;
}
@ -1156,7 +1156,7 @@ void findTopTriggerCancels(RoseBuildImpl &build) {
set<RoseEdge> rose_edges;
set<u32> pred_lit_ids;
for (auto v : succs) {
for (auto v : rsuccs) {
for (const auto &e : in_edges_range(v, build.g)) {
RoseVertex u = source(e, build.g);
tops_seen.insert(build.g[e].rose_top);
@ -1212,11 +1212,11 @@ void buildRoseSquashMasks(RoseBuildImpl &tbi) {
* successor of the nfa and all the literals */
for (const auto &e : roses) {
const left_id &left = e.first;
const vector<RoseVertex> &succs = e.second;
const vector<RoseVertex> &rsuccs = e.second;
set<u32> lit_ids;
bool anchored_pred = false;
for (auto v : succs) {
for (auto v : rsuccs) {
lit_ids.insert(tbi.g[v].literals.begin(), tbi.g[v].literals.end());
for (auto u : inv_adjacent_vertices_range(v, tbi.g)) {
anchored_pred |= tbi.isAnchored(u);
@ -1230,7 +1230,7 @@ void buildRoseSquashMasks(RoseBuildImpl &tbi) {
if (anchored_pred) { /* infix with pred in anchored table */
u32 min_off = ~0U;
u32 max_off = 0U;
for (auto v : succs) {
for (auto v : rsuccs) {
for (auto u : inv_adjacent_vertices_range(v, tbi.g)) {
min_off = min(min_off, tbi.g[u].min_offset);
max_off = max(max_off, tbi.g[u].max_offset);

View File

@ -202,10 +202,10 @@ void getForwardReach(const raw_dfa &rdfa, map<s32, CharReach> &look) {
}
for (unsigned c = 0; c < N_CHARS; c++) {
dstate_id_t succ = ds.next[rdfa.alpha_remap[c]];
if (succ != DEAD_STATE) {
dstate_id_t dnsucc = ds.next[rdfa.alpha_remap[c]];
if (dnsucc != DEAD_STATE) {
cr.set(c);
next.insert(succ);
next.insert(dnsucc);
}
}
}

View File

@ -75,7 +75,7 @@ string dumpMask(const vector<u8> &v) {
static
bool maskFromLeftGraph(const LeftEngInfo &left, vector<u8> &msk,
vector<u8> &cmp) {
vector<u8> &lcmp) {
const u32 lag = left.lag;
const ReportID report = left.leftfix_report;
@ -111,9 +111,9 @@ bool maskFromLeftGraph(const LeftEngInfo &left, vector<u8> &msk,
cr |= v_cr;
insert(&next, inv_adjacent_vertices(v, h));
}
make_and_cmp_mask(cr, &msk.at(i), &cmp.at(i));
DEBUG_PRINTF("%zu: reach=%s, msk=%u, cmp=%u\n", i,
describeClass(cr).c_str(), msk[i], cmp[i]);
make_and_cmp_mask(cr, &msk.at(i), &lcmp.at(i));
DEBUG_PRINTF("%zu: reach=%s, msk=%u, lcmp=%u\n", i,
describeClass(cr).c_str(), msk[i], lcmp[i]);
curr.swap(next);
} while (i-- > 0);
@ -122,7 +122,7 @@ bool maskFromLeftGraph(const LeftEngInfo &left, vector<u8> &msk,
static
bool maskFromLeftCastle(const LeftEngInfo &left, vector<u8> &msk,
vector<u8> &cmp) {
vector<u8> &lcmp) {
const u32 lag = left.lag;
const ReportID report = left.leftfix_report;
@ -149,23 +149,23 @@ bool maskFromLeftCastle(const LeftEngInfo &left, vector<u8> &msk,
u32 len = min_width;
u32 end = HWLM_MASKLEN - lag;
for (u32 i = end; i > end - min(end, len); i--) {
make_and_cmp_mask(c.reach(), &msk.at(i - 1), &cmp.at(i - 1));
make_and_cmp_mask(c.reach(), &msk.at(i - 1), &lcmp.at(i - 1));
}
return true;
}
static
bool maskFromLeft(const LeftEngInfo &left, vector<u8> &msk, vector<u8> &cmp) {
bool maskFromLeft(const LeftEngInfo &left, vector<u8> &msk, vector<u8> &lcmp) {
if (left.lag >= HWLM_MASKLEN) {
DEBUG_PRINTF("too much lag\n");
return false;
}
if (left.graph) {
return maskFromLeftGraph(left, msk, cmp);
return maskFromLeftGraph(left, msk, lcmp);
} else if (left.castle) {
return maskFromLeftCastle(left, msk, cmp);
return maskFromLeftCastle(left, msk, lcmp);
}
return false;
@ -173,7 +173,7 @@ bool maskFromLeft(const LeftEngInfo &left, vector<u8> &msk, vector<u8> &cmp) {
static
bool maskFromPreds(const RoseBuildImpl &build, const rose_literal_id &id,
const RoseVertex v, vector<u8> &msk, vector<u8> &cmp) {
const RoseVertex v, vector<u8> &msk, vector<u8> &lcmp) {
const RoseGraph &g = build.g;
// For right now, wuss out and only handle cases with one pred.
@ -222,7 +222,7 @@ bool maskFromPreds(const RoseBuildImpl &build, const rose_literal_id &id,
ue2_literal::const_iterator it, ite;
for (it = u_id.s.begin() + (u_len - u_sublen), ite = u_id.s.end();
it != ite; ++it) {
make_and_cmp_mask(*it, &msk.at(i), &cmp.at(i));
make_and_cmp_mask(*it, &msk.at(i), &lcmp.at(i));
++i;
}
@ -231,21 +231,21 @@ bool maskFromPreds(const RoseBuildImpl &build, const rose_literal_id &id,
static
bool addSurroundingMask(const RoseBuildImpl &build, const rose_literal_id &id,
const RoseVertex v, vector<u8> &msk, vector<u8> &cmp) {
const RoseVertex v, vector<u8> &msk, vector<u8> &lcmp) {
// Start with zero masks.
msk.assign(HWLM_MASKLEN, 0);
cmp.assign(HWLM_MASKLEN, 0);
lcmp.assign(HWLM_MASKLEN, 0);
const LeftEngInfo &left = build.g[v].left;
if (left && left.lag < HWLM_MASKLEN) {
if (maskFromLeft(left, msk, cmp)) {
if (maskFromLeft(left, msk, lcmp)) {
DEBUG_PRINTF("mask from a leftfix!\n");
return true;
}
}
if (id.s.length() < HWLM_MASKLEN) {
if (maskFromPreds(build, id, v, msk, cmp)) {
if (maskFromPreds(build, id, v, msk, lcmp)) {
DEBUG_PRINTF("mask from preds!\n");
return true;
}
@ -255,18 +255,18 @@ bool addSurroundingMask(const RoseBuildImpl &build, const rose_literal_id &id,
}
static
bool hamsterMaskCombine(vector<u8> &msk, vector<u8> &cmp,
bool hamsterMaskCombine(vector<u8> &msk, vector<u8> &lcmp,
const vector<u8> &v_msk, const vector<u8> &v_cmp) {
assert(msk.size() == HWLM_MASKLEN && cmp.size() == HWLM_MASKLEN);
assert(msk.size() == HWLM_MASKLEN && lcmp.size() == HWLM_MASKLEN);
assert(v_msk.size() == HWLM_MASKLEN && v_cmp.size() == HWLM_MASKLEN);
u8 all_masks = 0;
for (size_t i = 0; i < HWLM_MASKLEN; i++) {
u8 filter = ~(cmp[i] ^ v_cmp[i]);
u8 filter = ~(lcmp[i] ^ v_cmp[i]);
msk[i] &= v_msk[i];
msk[i] &= filter;
cmp[i] &= filter;
lcmp[i] &= filter;
all_masks |= msk[i];
}
@ -278,7 +278,7 @@ bool hamsterMaskCombine(vector<u8> &msk, vector<u8> &cmp,
static
bool addSurroundingMask(const RoseBuildImpl &build, const rose_literal_id &id,
const rose_literal_info &info, vector<u8> &msk,
vector<u8> &cmp) {
vector<u8> &lcmp) {
if (!build.cc.grey.roseHamsterMasks) {
return false;
}
@ -289,7 +289,7 @@ bool addSurroundingMask(const RoseBuildImpl &build, const rose_literal_id &id,
}
msk.assign(HWLM_MASKLEN, 0);
cmp.assign(HWLM_MASKLEN, 0);
lcmp.assign(HWLM_MASKLEN, 0);
size_t num = 0;
vector<u8> v_msk, v_cmp;
@ -301,28 +301,28 @@ bool addSurroundingMask(const RoseBuildImpl &build, const rose_literal_id &id,
}
if (!num++) {
// First (or only) vertex, this becomes the mask/cmp pair.
// First (or only) vertex, this becomes the mask/lcmp pair.
msk = v_msk;
cmp = v_cmp;
lcmp = v_cmp;
} else {
// Multiple vertices with potentially different masks. We combine
// them into an 'advisory' mask.
if (!hamsterMaskCombine(msk, cmp, v_msk, v_cmp)) {
if (!hamsterMaskCombine(msk, lcmp, v_msk, v_cmp)) {
DEBUG_PRINTF("mask went to zero\n");
return false;
}
}
}
normaliseLiteralMask(id.s, msk, cmp);
normaliseLiteralMask(id.s, msk, lcmp);
if (msk.empty()) {
DEBUG_PRINTF("no mask\n");
return false;
}
DEBUG_PRINTF("msk=%s, cmp=%s\n", dumpMask(msk).c_str(),
dumpMask(cmp).c_str());
DEBUG_PRINTF("msk=%s, lcmp=%s\n", dumpMask(msk).c_str(),
dumpMask(lcmp).c_str());
return true;
}
@ -357,13 +357,13 @@ void findMoreLiteralMasks(RoseBuildImpl &build) {
const auto &lit = build.literals.at(id);
auto &lit_info = build.literal_info.at(id);
vector<u8> msk, cmp;
if (!addSurroundingMask(build, lit, lit_info, msk, cmp)) {
vector<u8> msk, lcmp;
if (!addSurroundingMask(build, lit, lit_info, msk, lcmp)) {
continue;
}
DEBUG_PRINTF("found surrounding mask for lit_id=%u (%s)\n", id,
dumpString(lit.s).c_str());
u32 new_id = build.getLiteralId(lit.s, msk, cmp, lit.delay, lit.table);
u32 new_id = build.getLiteralId(lit.s, msk, lcmp, lit.delay, lit.table);
if (new_id == id) {
continue;
}
@ -392,7 +392,7 @@ void findMoreLiteralMasks(RoseBuildImpl &build) {
// mixed-case is mandatory.
static
void addLiteralMask(const rose_literal_id &id, vector<u8> &msk,
vector<u8> &cmp) {
vector<u8> &lcmp) {
const size_t suffix_len = min(id.s.length(), size_t{HWLM_MASKLEN});
bool mixed_suffix = mixed_sensitivity_in(id.s.end() - suffix_len,
id.s.end());
@ -403,7 +403,7 @@ void addLiteralMask(const rose_literal_id &id, vector<u8> &msk,
while (msk.size() < HWLM_MASKLEN) {
msk.insert(msk.begin(), 0);
cmp.insert(cmp.begin(), 0);
lcmp.insert(lcmp.begin(), 0);
}
if (!id.msk.empty()) {
@ -413,7 +413,7 @@ void addLiteralMask(const rose_literal_id &id, vector<u8> &msk,
size_t mand_offset = msk.size() - i - 1;
size_t lit_offset = id.msk.size() - i - 1;
msk[mand_offset] = id.msk[lit_offset];
cmp[mand_offset] = id.cmp[lit_offset];
lcmp[mand_offset] = id.cmp[lit_offset];
}
}
@ -425,12 +425,12 @@ void addLiteralMask(const rose_literal_id &id, vector<u8> &msk,
size_t offset = HWLM_MASKLEN - i - 1;
DEBUG_PRINTF("offset %zu must match 0x%02x exactly\n", offset,
c.c);
make_and_cmp_mask(c, &msk[offset], &cmp[offset]);
make_and_cmp_mask(c, &msk[offset], &lcmp[offset]);
}
}
}
normaliseLiteralMask(id.s, msk, cmp);
normaliseLiteralMask(id.s, msk, lcmp);
}
static
@ -704,7 +704,7 @@ void addFragmentLiteral(const RoseBuildImpl &build, MatcherProto &mp,
lit.s.length());
vector<u8> msk = lit.msk; // copy
vector<u8> cmp = lit.cmp; // copy
vector<u8> lcmp = lit.cmp; // copy
bool noruns = isNoRunsFragment(build, f, max_len);
DEBUG_PRINTF("fragment is %s\n", noruns ? "noruns" : "not noruns");
@ -720,24 +720,24 @@ void addFragmentLiteral(const RoseBuildImpl &build, MatcherProto &mp,
assert(!noruns);
}
addLiteralMask(lit, msk, cmp);
addLiteralMask(lit, msk, lcmp);
const auto &s_final = lit_final.get_string();
bool nocase = lit_final.any_nocase();
DEBUG_PRINTF("id=%u, s='%s', nocase=%d, noruns=%d, msk=%s, cmp=%s\n",
DEBUG_PRINTF("id=%u, s='%s', nocase=%d, noruns=%d, msk=%s, lcmp=%s\n",
f.fragment_id, escapeString(s_final).c_str(), (int)nocase,
noruns, dumpMask(msk).c_str(), dumpMask(cmp).c_str());
noruns, dumpMask(msk).c_str(), dumpMask(lcmp).c_str());
if (!maskIsConsistent(s_final, nocase, msk, cmp)) {
DEBUG_PRINTF("msk/cmp for literal can't match, skipping\n");
if (!maskIsConsistent(s_final, nocase, msk, lcmp)) {
DEBUG_PRINTF("msk/lcmp for literal can't match, skipping\n");
return;
}
const auto &groups = f.groups;
mp.lits.emplace_back(std::move(s_final), nocase, noruns, f.fragment_id,
groups, msk, cmp);
groups, msk, lcmp);
}
static
@ -748,11 +748,11 @@ void addAccelLiteral(MatcherProto &mp, const rose_literal_id &lit,
DEBUG_PRINTF("lit='%s' (len %zu)\n", dumpString(s).c_str(), s.length());
vector<u8> msk = lit.msk; // copy
vector<u8> cmp = lit.cmp; // copy
addLiteralMask(lit, msk, cmp);
vector<u8> lcmp = lit.cmp; // copy
addLiteralMask(lit, msk, lcmp);
if (!maskIsConsistent(s.get_string(), s.any_nocase(), msk, cmp)) {
DEBUG_PRINTF("msk/cmp for literal can't match, skipping\n");
if (!maskIsConsistent(s.get_string(), s.any_nocase(), msk, lcmp)) {
DEBUG_PRINTF("msk/lcmp for literal can't match, skipping\n");
return;
}
@ -761,9 +761,9 @@ void addAccelLiteral(MatcherProto &mp, const rose_literal_id &lit,
string s_final = lit.s.get_string();
trim_to_suffix(s_final, max_len);
trim_to_suffix(msk, max_len);
trim_to_suffix(cmp, max_len);
trim_to_suffix(lcmp, max_len);
mp.accel_lits.emplace_back(s_final, lit.s.any_nocase(), msk, cmp,
mp.accel_lits.emplace_back(s_final, lit.s.any_nocase(), msk, lcmp,
info.group_mask);
}

View File

@ -794,9 +794,9 @@ template<typename VertexCont>
static never_inline
bool checkPredDelays(const RoseBuildImpl &build, const VertexCont &v1,
const VertexCont &v2) {
flat_set<RoseVertex> preds;
flat_set<RoseVertex> fpreds;
for (auto v : v1) {
insert(&preds, inv_adjacent_vertices(v, build.g));
insert(&fpreds, inv_adjacent_vertices(v, build.g));
}
flat_set<u32> pred_lits;
@ -811,7 +811,7 @@ bool checkPredDelays(const RoseBuildImpl &build, const VertexCont &v1,
insert(&known_good_preds, inv_adjacent_vertices(v, build.g));
}
for (auto u : preds) {
for (auto u : fpreds) {
if (!contains(known_good_preds, u)) {
insert(&pred_lits, build.g[u].literals);
}
@ -1536,11 +1536,11 @@ private:
static
flat_set<pair<size_t, u32>> get_pred_tops(RoseVertex v, const RoseGraph &g) {
flat_set<pair<size_t, u32>> preds;
flat_set<pair<size_t, u32>> fpreds;
for (const auto &e : in_edges_range(v, g)) {
preds.emplace(g[source(e, g)].index, g[e].rose_top);
fpreds.emplace(g[source(e, g)].index, g[e].rose_top);
}
return preds;
return fpreds;
}
/**
@ -1592,14 +1592,14 @@ void dedupeLeftfixesVariableLag(RoseBuildImpl &build) {
assert(!is_triggered(*left.graph()) || onlyOneTop(*left.graph()));
}
auto preds = get_pred_tops(verts.front(), g);
auto vpreds = get_pred_tops(verts.front(), g);
for (RoseVertex v : verts) {
if (preds != get_pred_tops(v, g)) {
if (vpreds != get_pred_tops(v, g)) {
DEBUG_PRINTF("distinct pred sets\n");
continue;
}
}
auto preds_copy = std::move(preds);
auto preds_copy = std::move(vpreds);
engine_groups[DedupeLeftKey(build, preds_copy , left)].emplace_back(left);
}
@ -2049,8 +2049,8 @@ void mergeCastleLeftfixes(RoseBuildImpl &build) {
DEBUG_PRINTF("chunked castles into %zu groups\n", chunks.size());
for (auto &chunk : chunks) {
mergeCastleChunk(build, chunk, eng_verts);
for (auto &cchunk : chunks) {
mergeCastleChunk(build, cchunk, eng_verts);
}
}
@ -2441,13 +2441,13 @@ void chunkedDfaMerge(vector<RawDfa *> &dfas,
DEBUG_PRINTF("begin merge of %zu dfas\n", dfas.size());
vector<RawDfa *> out_dfas;
vector<RawDfa *> chunk;
vector<RawDfa *> dchunk;
for (auto it = begin(dfas), ite = end(dfas); it != ite; ++it) {
chunk.emplace_back(*it);
if (chunk.size() >= DFA_CHUNK_SIZE_MAX || next(it) == ite) {
pairwiseDfaMerge(chunk, dfa_mapping, outfixes, merge_func);
out_dfas.insert(end(out_dfas), begin(chunk), end(chunk));
chunk.clear();
dchunk.emplace_back(*it);
if (dchunk.size() >= DFA_CHUNK_SIZE_MAX || next(it) == ite) {
pairwiseDfaMerge(dchunk, dfa_mapping, outfixes, merge_func);
out_dfas.insert(end(out_dfas), begin(dchunk), end(dchunk));
dchunk.clear();
}
}
@ -2790,8 +2790,8 @@ void mergeCastleSuffixes(RoseBuildImpl &build) {
eng_verts[c].emplace_back(v);
}
for (auto &chunk : by_reach | map_values) {
mergeCastleSuffixChunk(g, chunk, eng_verts);
for (auto &cchunk : by_reach | map_values) {
mergeCastleSuffixChunk(g, cchunk, eng_verts);
}
}

View File

@ -1714,18 +1714,18 @@ void getLeftMergeSiblings(const RoseBuildImpl &build, RoseVertex a,
assert(!g[a].literals.empty());
u32 lit_id = *g[a].literals.begin();
const auto &verts = build.literal_info.at(lit_id).vertices;
RoseVertex pred = pickPred(a, g, build);
RoseVertex ppred = pickPred(a, g, build);
siblings.clear();
if (pred == RoseGraph::null_vertex() || build.isAnyStart(pred) ||
out_degree(pred, g) > verts.size()) {
if (ppred == RoseGraph::null_vertex() || build.isAnyStart(ppred) ||
out_degree(ppred, g) > verts.size()) {
// Select sibling from amongst the vertices that share a literal.
insert(&siblings, siblings.end(), verts);
} else {
// Select sibling from amongst the vertices that share a
// predecessor.
insert(&siblings, siblings.end(), adjacent_vertices(pred, g));
insert(&siblings, siblings.end(), adjacent_vertices(ppred, g));
}
}
@ -1848,14 +1848,14 @@ void splitByRightProps(const RoseGraph &g,
vector<vector<RoseVertex>> &buckets) {
// Successor vector used in make_split_key. We declare it here so we can
// reuse storage.
vector<RoseVertex> succ;
vector<RoseVertex> vsucc;
// Split by {successors, literals, reports}.
auto make_split_key = [&](RoseVertex v) {
succ.clear();
insert(&succ, succ.end(), adjacent_vertices(v, g));
sort(succ.begin(), succ.end());
return hash_all(g[v].literals, g[v].reports, succ);
vsucc.clear();
insert(&vsucc, vsucc.end(), adjacent_vertices(v, g));
sort(vsucc.begin(), vsucc.end());
return hash_all(g[v].literals, g[v].reports, vsucc);
};
splitAndFilterBuckets(buckets, make_split_key);
}

View File

@ -102,7 +102,7 @@ bool determinise(Auto &n, std::vector<ds> &dstates, size_t state_limit,
dstates.emplace_back(ds(alphabet_size));
}
std::vector<StateSet> succs(alphabet_size, n.dead);
std::vector<StateSet> succrs(alphabet_size, n.dead);
while (!q.empty()) {
auto m = std::move(q.front());
@ -133,13 +133,13 @@ bool determinise(Auto &n, std::vector<ds> &dstates, size_t state_limit,
}
/* fill in successor states */
n.transition(curr, &succs[0]);
n.transition(curr, &succrs[0]);
for (symbol_t s = 0; s < n.alphasize; s++) {
dstate_id_t succ_id;
if (s && succs[s] == succs[s - 1]) {
if (s && succrs[s] == succrs[s - 1]) {
succ_id = dstates[curr_id].next[s - 1];
} else {
auto p = dstate_ids.find(succs[s]);
auto p = dstate_ids.find(succrs[s]);
if (p != dstate_ids.end()) { // succ[s] is already present
succ_id = p->second;
if (succ_id > curr_id && !dstates[succ_id].daddy
@ -148,10 +148,10 @@ bool determinise(Auto &n, std::vector<ds> &dstates, size_t state_limit,
}
} else {
succ_id = dstate_ids.size();
dstate_ids.emplace(succs[s], succ_id);
dstate_ids.emplace(succrs[s], succ_id);
dstates.emplace_back(ds(alphabet_size));
dstates.back().daddy = n.unalpha[s] < N_CHARS ? curr_id : 0;
q.emplace(succs[s], succ_id);
q.emplace(succrs[s], succ_id);
}
DEBUG_PRINTF("-->%hu on %02hx\n", succ_id, n.unalpha[s]);