Merge branch 'develop' into bugfix-rose-segfault

This commit is contained in:
g. economou
2024-05-16 09:57:58 +03:00
committed by GitHub
201 changed files with 3022 additions and 2001 deletions

View File

@@ -227,7 +227,7 @@ int roseBlockFloating(const struct RoseEngine *t, struct hs_scratch *scratch) {
const size_t length = scratch->core_info.len;
char *state = scratch->core_info.state;
struct RoseContext *tctxt = &scratch->tctxt;
const struct RoseContext *tctxt = &scratch->tctxt;
DEBUG_PRINTF("ftable fd=%u fmd %u\n", t->floatingDistance,
t->floatingMinDistance);
@@ -377,7 +377,7 @@ void roseBlockExec(const struct RoseEngine *t, struct hs_scratch *scratch) {
init_for_block(t, scratch, state, is_small_block);
struct RoseContext *tctxt = &scratch->tctxt;
const struct RoseContext *tctxt = &scratch->tctxt;
if (is_small_block) {
const void *sbtable = getSBLiteralMatcher(t);

View File

@@ -679,7 +679,7 @@ hwlmcb_rv_t buildSufPQ(const struct RoseEngine *t, char *state, s64a safe_loc,
s64a final_loc, struct hs_scratch *scratch) {
assert(scratch->catchup_pq.qm_size <= t->outfixEndQueue);
struct RoseContext *tctxt = &scratch->tctxt;
const struct RoseContext *tctxt = &scratch->tctxt;
assert(t->activeArrayCount);
assert(scratch->core_info.buf_offset + final_loc

View File

@@ -192,7 +192,7 @@ int roseCountingMiracleOccurs(const struct RoseEngine *t,
u32 count = 0;
s64a m_loc = start;
s64a m_loc;
if (!cm->shufti) {
u8 c = cm->c;

View File

@@ -68,7 +68,7 @@ void printMatch(const struct core_info *ci, u64a start, u64a end) {
hwlmcb_rv_t roseDelayRebuildCallback(size_t end, u32 id,
struct hs_scratch *scratch) {
struct RoseContext *tctx = &scratch->tctxt;
const struct RoseContext *tctx = &scratch->tctxt;
struct core_info *ci = &scratch->core_info;
const struct RoseEngine *t = ci->rose;
size_t rb_len = MIN(ci->hlen, t->delayRebuildLength);
@@ -109,7 +109,7 @@ hwlmcb_rv_t roseHandleChainMatch(const struct RoseEngine *t,
u64a top_squash_distance, u64a end,
char in_catchup) {
assert(event == MQE_TOP || event >= MQE_TOP_FIRST);
struct core_info *ci = &scratch->core_info;
const struct core_info *ci = &scratch->core_info;
u8 *aa = getActiveLeafArray(t, scratch->core_info.state);
u32 aaCount = t->activeArrayCount;
@@ -267,7 +267,8 @@ hwlmcb_rv_t playDelaySlot(const struct RoseEngine *t,
const u32 *programs = getByOffset(t, t->delayProgramOffset);
for (u32 it = fatbit_iterate(vicSlot, delay_count, MMB_INVALID);
it != MMB_INVALID; it = fatbit_iterate(vicSlot, delay_count, it)) {
it != MMB_INVALID; it = fatbit_iterate(vicSlot, delay_count, it)) {
// cppcheck-suppress unreadVariable
UNUSED rose_group old_groups = tctxt->groups;
DEBUG_PRINTF("DELAYED MATCH id=%u offset=%llu\n", it, offset);
@@ -296,7 +297,7 @@ hwlmcb_rv_t flushAnchoredLiteralAtLoc(const struct RoseEngine *t,
struct hs_scratch *scratch,
u32 curr_loc) {
struct RoseContext *tctxt = &scratch->tctxt;
struct fatbit *curr_row = getAnchoredLiteralLog(scratch)[curr_loc - 1];
const struct fatbit *curr_row = getAnchoredLiteralLog(scratch)[curr_loc - 1];
u32 region_width = t->anchored_count;
const u32 *programs = getByOffset(t, t->anchoredProgramOffset);
@@ -334,7 +335,7 @@ hwlmcb_rv_t flushAnchoredLiteralAtLoc(const struct RoseEngine *t,
static really_inline
u32 anchored_it_begin(struct hs_scratch *scratch) {
struct RoseContext *tctxt = &scratch->tctxt;
const struct RoseContext *tctxt = &scratch->tctxt;
if (tctxt->lastEndOffset >= scratch->anchored_literal_region_len) {
return MMB_INVALID;
}

View File

@@ -254,8 +254,8 @@ void roseFlushLastByteHistory(const struct RoseEngine *t,
return;
}
struct RoseContext *tctxt = &scratch->tctxt;
struct core_info *ci = &scratch->core_info;
const struct RoseContext *tctxt = &scratch->tctxt;
const struct core_info *ci = &scratch->core_info;
/* currEnd is last byte of string + 1 */
if (tctxt->lastEndOffset == ci->buf_offset + ci->len

View File

@@ -131,7 +131,7 @@ RoseVertex createVertex(RoseBuildImpl *build, const RoseVertex parent,
/* fill in report information */
g[v].reports.insert(reports.begin(), reports.end());
RoseEdge e = add_edge(parent, v, g);
RoseEdge e = add_edge(parent, v, g).first;
DEBUG_PRINTF("adding edge (%u, %u) to parent\n", minBound, maxBound);
g[e].minBound = minBound;
@@ -161,7 +161,7 @@ RoseVertex createAnchoredVertex(RoseBuildImpl *build, u32 literalId,
DEBUG_PRINTF("created anchored vertex %zu with lit id %u\n", g[v].index,
literalId);
RoseEdge e = add_edge(build->anchored_root, v, g);
RoseEdge e = add_edge(build->anchored_root, v, g).first;
g[e].minBound = min_offset;
g[e].maxBound = max_offset;
@@ -307,7 +307,7 @@ void createVertices(RoseBuildImpl *tbi,
RoseVertex p = pv.first;
RoseEdge e = add_edge(p, w, g);
RoseEdge e = add_edge(p, w, g).first;
DEBUG_PRINTF("adding edge (%u,%u) to parent\n", edge_props.minBound,
edge_props.maxBound);
g[e].minBound = edge_props.minBound;
@@ -345,7 +345,7 @@ void createVertices(RoseBuildImpl *tbi,
for (const auto &pv : parents) {
const RoseInEdgeProps &edge_props = bd.ig[pv.second];
RoseEdge e = add_edge(pv.first, g_v, tbi->g);
RoseEdge e = add_edge(pv.first, g_v, tbi->g).first;
g[e].minBound = edge_props.minBound;
g[e].maxBound = edge_props.maxBound;
g[e].history = selectHistory(*tbi, bd, pv.second, e);
@@ -353,7 +353,7 @@ void createVertices(RoseBuildImpl *tbi,
edge_props.minBound, edge_props.maxBound);
}
for (auto &m : created) {
for (const auto &m : created) {
tbi->ghost[m.second] = g_v;
}
}
@@ -518,9 +518,9 @@ u32 findRoseAnchorFloatingOverlap(const RoseInEdgeProps &ep,
static
void findRoseLiteralMask(const NGHolder &h, const u32 lag, vector<u8> &msk,
vector<u8> &cmp) {
vector<u8> &lcmp) {
if (lag >= HWLM_MASKLEN) {
msk.clear(); cmp.clear();
msk.clear(); lcmp.clear();
return;
}
@@ -532,7 +532,7 @@ void findRoseLiteralMask(const NGHolder &h, const u32 lag, vector<u8> &msk,
assert(!curr.empty());
msk.assign(HWLM_MASKLEN, 0);
cmp.assign(HWLM_MASKLEN, 0);
lcmp.assign(HWLM_MASKLEN, 0);
size_t i = HWLM_MASKLEN - lag - 1;
do {
if (curr.empty() || contains(curr, h.start) ||
@@ -549,9 +549,9 @@ void findRoseLiteralMask(const NGHolder &h, const u32 lag, vector<u8> &msk,
cr |= h[v].char_reach;
insert(&next, inv_adjacent_vertices(v, h));
}
make_and_cmp_mask(cr, &msk[i], &cmp[i]);
DEBUG_PRINTF("%zu: reach=%s, msk=%u, cmp=%u\n", i,
describeClass(cr).c_str(), msk.at(i), cmp.at(i));
make_and_cmp_mask(cr, &msk[i], &lcmp[i]);
DEBUG_PRINTF("%zu: reach=%s, msk=%u, lcmp=%u\n", i,
describeClass(cr).c_str(), msk.at(i), lcmp.at(i));
curr.swap(next);
} while (i-- > 0);
}
@@ -617,18 +617,18 @@ void doRoseLiteralVertex(RoseBuildImpl *tbi, bool use_eod_table,
}
floating:
vector<u8> msk, cmp;
vector<u8> msk, lcmp;
if (tbi->cc.grey.roseHamsterMasks && in_degree(iv, ig) == 1) {
RoseInEdge e = *in_edges(iv, ig).first;
if (ig[e].graph) {
findRoseLiteralMask(*ig[e].graph, ig[e].graph_lag, msk, cmp);
findRoseLiteralMask(*ig[e].graph, ig[e].graph_lag, msk, lcmp);
}
}
u32 delay = iv_info.delay;
rose_literal_table table = use_eod_table ? ROSE_EOD_ANCHORED : ROSE_FLOATING;
u32 literalId = tbi->getLiteralId(iv_info.s, msk, cmp, delay, table);
u32 literalId = tbi->getLiteralId(iv_info.s, msk, lcmp, delay, table);
DEBUG_PRINTF("literal=%u (len=%zu, delay=%u, offsets=[%u,%u] '%s')\n",
literalId, iv_info.s.length(), delay, iv_info.min_offset,
@@ -698,7 +698,7 @@ void makeEodEventLeftfix(RoseBuildImpl &build, RoseVertex u,
g[v].left.graph = eod_leftfix;
g[v].left.leftfix_report = report_mapping.second;
g[v].left.lag = 0;
RoseEdge e1 = add_edge(u, v, g);
RoseEdge e1 = add_edge(u, v, g).first;
g[e1].minBound = 0;
g[e1].maxBound = ROSE_BOUND_INF;
g[v].min_offset = add_rose_depth(g[u].min_offset,
@@ -718,7 +718,7 @@ void makeEodEventLeftfix(RoseBuildImpl &build, RoseVertex u,
g[w].reports = report_mapping.first;
g[w].min_offset = g[v].min_offset;
g[w].max_offset = g[v].max_offset;
RoseEdge e = add_edge(v, w, g);
RoseEdge e = add_edge(v, w, g).first;
g[e].minBound = 0;
g[e].maxBound = 0;
/* No need to set history as the event is only delivered at the last
@@ -794,7 +794,7 @@ void doRoseAcceptVertex(RoseBuildImpl *tbi,
g[w].reports = ig[iv].reports;
g[w].min_offset = g[u].min_offset;
g[w].max_offset = g[u].max_offset;
RoseEdge e = add_edge(u, w, g);
RoseEdge e = add_edge(u, w, g).first;
g[e].minBound = 0;
g[e].maxBound = 0;
g[e].history = ROSE_ROLE_HISTORY_LAST_BYTE;
@@ -938,7 +938,7 @@ void shift_accepts_to_end(const RoseInGraph &ig,
}
static
void populateRoseGraph(RoseBuildImpl *tbi, RoseBuildData &bd) {
void populateRoseGraph(RoseBuildImpl *tbi, const RoseBuildData &bd) {
const RoseInGraph &ig = bd.ig;
/* add the pattern in to the main rose graph */
@@ -1039,9 +1039,9 @@ bool canImplementGraph(NGHolder &h, bool prefilter, const ReportManager &rm,
if (prefilter && cc.grey.prefilterReductions) {
// If we're prefiltering, we can have another go with a reduced graph.
UNUSED size_t numBefore = num_vertices(h);
UNUSED size_t numBefore = num_vertices(h); // cppcheck-suppress unreadVariable
prefilterReductions(h, cc);
UNUSED size_t numAfter = num_vertices(h);
UNUSED size_t numAfter = num_vertices(h); // cppcheck-suppress unreadVariable
DEBUG_PRINTF("reduced from %zu to %zu vertices\n", numBefore, numAfter);
if (isImplementableNFA(h, &rm, cc)) {
@@ -1087,20 +1087,20 @@ bool predsAreDelaySensitive(const RoseInGraph &ig, RoseInVertex v) {
static
u32 maxAvailableDelay(const ue2_literal &pred_key, const ue2_literal &lit_key) {
/* overly conservative if only part of the string is nocase */
string pred = pred_key.get_string();
string predk = pred_key.get_string();
string lit = lit_key.get_string();
if (pred_key.any_nocase() || lit_key.any_nocase()) {
upperString(pred);
upperString(predk);
upperString(lit);
}
string::size_type last = pred.rfind(lit);
string::size_type last = predk.rfind(lit);
if (last == string::npos) {
return MAX_DELAY;
}
u32 raw = pred.size() - last - 1;
u32 raw = predk.size() - last - 1;
return MIN(raw, MAX_DELAY);
}
@@ -1719,7 +1719,7 @@ bool addEodOutfix(RoseBuildImpl &build, const NGHolder &h) {
g[v].left.graph = eod_leftfix;
g[v].left.leftfix_report = report_mapping.second;
g[v].left.lag = 0;
RoseEdge e1 = add_edge(build.anchored_root, v, g);
RoseEdge e1 = add_edge(build.anchored_root, v, g).first;
g[e1].minBound = 0;
g[e1].maxBound = ROSE_BOUND_INF;
g[v].min_offset = findMinWidth(*eod_leftfix);
@@ -1737,7 +1737,7 @@ bool addEodOutfix(RoseBuildImpl &build, const NGHolder &h) {
g[w].reports = report_mapping.first;
g[w].min_offset = g[v].min_offset;
g[w].max_offset = g[v].max_offset;
RoseEdge e = add_edge(v, w, g);
RoseEdge e = add_edge(v, w, g).first;
g[e].minBound = 0;
g[e].maxBound = 0;
g[e].history = ROSE_ROLE_HISTORY_NONE;

View File

@@ -131,7 +131,6 @@ void findMaskLiteral(const vector<CharReach> &mask, bool streaming,
if (better) {
best_begin = begin;
best_end = end;
best_len = len;
}
for (size_t i = best_begin; i < best_end; i++) {
@@ -303,31 +302,31 @@ unique_ptr<NGHolder> buildMaskLhs(bool anchored, u32 prefix_len,
assert(prefix_len);
assert(mask.size() >= prefix_len);
NFAVertex pred = anchored ? lhs->start : lhs->startDs;
NFAVertex lpreds = anchored ? lhs->start : lhs->startDs;
u32 m_idx = 0;
while (prefix_len--) {
NFAVertex v = add_vertex(*lhs);
(*lhs)[v].char_reach = mask[m_idx++];
add_edge(pred, v, *lhs);
pred = v;
add_edge(lpreds, v, *lhs);
lpreds = v;
}
add_edge(pred, lhs->accept, *lhs);
(*lhs)[pred].reports.insert(0);
add_edge(lpreds, lhs->accept, *lhs);
(*lhs)[lpreds].reports.insert(0);
return lhs;
}
static
void buildLiteralMask(const vector<CharReach> &mask, vector<u8> &msk,
vector<u8> &cmp, u32 delay) {
vector<u8> &lcmp, u32 delay) {
msk.clear();
cmp.clear();
lcmp.clear();
if (mask.size() <= delay) {
return;
}
// Construct an and/cmp mask from our mask ending at delay positions before
// Construct an and/lcmp mask from our mask ending at delay positions before
// the end of the literal, with max length HWLM_MASKLEN.
auto ite = mask.end() - delay;
@@ -335,11 +334,11 @@ void buildLiteralMask(const vector<CharReach> &mask, vector<u8> &msk,
for (; it != ite; ++it) {
msk.emplace_back(0);
cmp.emplace_back(0);
make_and_cmp_mask(*it, &msk.back(), &cmp.back());
lcmp.emplace_back(0);
make_and_cmp_mask(*it, &msk.back(), &lcmp.back());
}
assert(msk.size() == cmp.size());
assert(msk.size() == lcmp.size());
assert(msk.size() <= HWLM_MASKLEN);
}
@@ -393,9 +392,10 @@ bool validateTransientMask(const vector<CharReach> &mask, bool anchored,
none_of(begin(lits), end(lits), mixed_sensitivity));
// Build the HWLM literal mask.
vector<u8> msk, cmp;
vector<u8> msk;
if (grey.roseHamsterMasks) {
buildLiteralMask(mask, msk, cmp, delay);
vector<u8> lcmp;
buildLiteralMask(mask, msk, lcmp, delay);
}
// We consider the HWLM mask length to run from the first non-zero byte to
@@ -491,9 +491,9 @@ void addTransientMask(RoseBuildImpl &build, const vector<CharReach> &mask,
set_report(*mask_graph, mask_report);
// Build the HWLM literal mask.
vector<u8> msk, cmp;
vector<u8> msk, lcmp;
if (build.cc.grey.roseHamsterMasks) {
buildLiteralMask(mask, msk, cmp, delay);
buildLiteralMask(mask, msk, lcmp, delay);
}
/* adjust bounds to be relative to trigger rather than mask */
@@ -527,7 +527,7 @@ void addTransientMask(RoseBuildImpl &build, const vector<CharReach> &mask,
const flat_set<ReportID> no_reports;
for (const auto &lit : lits) {
u32 lit_id = build.getLiteralId(lit, msk, cmp, delay, table);
u32 lit_id = build.getLiteralId(lit, msk, lcmp, delay, table);
const RoseVertex parent = anchored ? build.anchored_root : build.root;
bool use_mask = delay || maskIsNeeded(lit, *mask_graph);
@@ -540,7 +540,7 @@ void addTransientMask(RoseBuildImpl &build, const vector<CharReach> &mask,
g[v].left.leftfix_report = mask_report;
} else {
// Make sure our edge bounds are correct.
RoseEdge e = edge(parent, v, g);
RoseEdge e = edge(parent, v, g).first;
g[e].minBound = 0;
g[e].maxBound = anchored ? 0 : ROSE_BOUND_INF;
g[e].history = anchored ? ROSE_ROLE_HISTORY_ANCH
@@ -552,7 +552,7 @@ void addTransientMask(RoseBuildImpl &build, const vector<CharReach> &mask,
g[v].max_offset = v_max_offset;
if (eod) {
RoseEdge e = add_edge(v, eod_v, g);
RoseEdge e = add_edge(v, eod_v, g).first;
g[e].minBound = 0;
g[e].maxBound = 0;
g[e].history = ROSE_ROLE_HISTORY_LAST_BYTE;
@@ -570,19 +570,19 @@ unique_ptr<NGHolder> buildMaskRhs(const flat_set<ReportID> &reports,
unique_ptr<NGHolder> rhs = std::make_unique<NGHolder>(NFA_SUFFIX);
NGHolder &h = *rhs;
NFAVertex succ = h.accept;
NFAVertex asucc = h.accept;
u32 m_idx = mask.size() - 1;
while (suffix_len--) {
NFAVertex u = add_vertex(h);
if (succ == h.accept) {
if (asucc == h.accept) {
h[u].reports.insert(reports.begin(), reports.end());
}
h[u].char_reach = mask[m_idx--];
add_edge(u, succ, h);
succ = u;
add_edge(u, asucc, h);
asucc = u;
}
NFAEdge e = add_edge(h.start, succ, h);
NFAEdge e = add_edge(h.start, asucc, h).first;
h[e].tops.insert(DEFAULT_TOP);
return rhs;

View File

@@ -348,11 +348,11 @@ public:
next[s].wdelay = wdelay;
}
nfa_state_set succ;
nfa_state_set gsucc;
if (wdelay != in.wdelay) {
DEBUG_PRINTF("enabling start\n");
succ.set(vertexToIndex[g.startDs]);
gsucc.set(vertexToIndex[g.startDs]);
}
for (size_t i = in.wrap_state.find_first(); i != nfa_state_set::npos;
@@ -368,12 +368,12 @@ public:
continue;
}
succ.set(vertexToIndex[w]);
gsucc.set(vertexToIndex[w]);
}
}
for (size_t j = succ.find_first(); j != nfa_state_set::npos;
j = succ.find_next(j)) {
for (size_t j = gsucc.find_first(); j != nfa_state_set::npos;
j = gsucc.find_next(j)) {
const CharReach &cr = cr_by_index[j];
for (size_t s = cr.find_first(); s != CharReach::npos;
s = cr.find_next(s)) {
@@ -867,13 +867,13 @@ vector<raw_dfa> buildAnchoredDfas(RoseBuildImpl &build,
}
bytecode_ptr<anchored_matcher_info>
buildAnchoredMatcher(RoseBuildImpl &build, const vector<LitFragment> &fragments,
buildAnchoredMatcher(const RoseBuildImpl &build, const vector<LitFragment> &fragments,
vector<raw_dfa> &dfas) {
const CompileContext &cc = build.cc;
if (dfas.empty()) {
DEBUG_PRINTF("empty\n");
return nullptr;
return bytecode_ptr<anchored_matcher_info>(nullptr);
}
for (auto &rdfa : dfas) {
@@ -896,7 +896,7 @@ buildAnchoredMatcher(RoseBuildImpl &build, const vector<LitFragment> &fragments,
for (size_t i = 0; i < nfas.size(); i++) {
const NFA *nfa = nfas[i].get();
anchored_matcher_info *ami = (anchored_matcher_info *)curr;
char *prev_curr = curr;
const char *prev_curr = curr;
curr += sizeof(anchored_matcher_info);

View File

@@ -60,7 +60,7 @@ std::vector<raw_dfa> buildAnchoredDfas(RoseBuildImpl &build,
* given in litPrograms.
*/
bytecode_ptr<anchored_matcher_info>
buildAnchoredMatcher(RoseBuildImpl &build,
buildAnchoredMatcher(const RoseBuildImpl &build,
const std::vector<LitFragment> &fragments,
std::vector<raw_dfa> &dfas);

View File

@@ -476,9 +476,9 @@ rose_group RoseBuildImpl::getInitialGroups() const {
static
bool nfaStuckOn(const NGHolder &g) {
assert(!proper_out_degree(g.startDs, g));
set<NFAVertex> succ;
insert(&succ, adjacent_vertices(g.start, g));
succ.erase(g.startDs);
set<NFAVertex> vsucc;
insert(&vsucc, adjacent_vertices(g.start, g));
vsucc.erase(g.startDs);
set<NFAVertex> asucc;
set<u32> tops;
@@ -493,7 +493,7 @@ bool nfaStuckOn(const NGHolder &g) {
asucc.clear();
insert(&asucc, adjacent_vertices(target(e, g), g));
if (asucc == succ) {
if (asucc == vsucc) {
insert(&done_tops, g[e].tops);
}
}
@@ -531,12 +531,12 @@ void findFixedDepthTops(const RoseGraph &g, const set<PredTopPair> &triggers,
for (const auto &e : pred_by_top) {
u32 top = e.first;
const set<RoseVertex> &preds = e.second;
if (!g[*preds.begin()].fixedOffset()) {
const set<RoseVertex> &spreds = e.second;
if (!g[*spreds.begin()].fixedOffset()) {
continue;
}
u32 depth = g[*preds.begin()].min_offset;
for (RoseVertex u : preds) {
u32 depth = g[*spreds.begin()].min_offset;
for (RoseVertex u : spreds) {
if (g[u].min_offset != depth || g[u].max_offset != depth) {
goto next_top;
}
@@ -674,7 +674,7 @@ buildSuffix(const ReportManager &rm, const SomSlotManager &ssm,
}
assert(suff.graph());
NGHolder &holder = *suff.graph();
const NGHolder &holder = *suff.graph();
assert(holder.kind == NFA_SUFFIX);
const bool oneTop = onlyOneTop(holder);
bool compress_state = cc.streaming;
@@ -925,12 +925,12 @@ void appendTailToHolder(NGHolder &h, const vector<CharReach> &tail) {
static
u32 decreaseLag(const RoseBuildImpl &build, NGHolder &h,
const vector<RoseVertex> &succs) {
const vector<RoseVertex> &vsuccs) {
const RoseGraph &rg = build.g;
static const size_t MAX_RESTORE_LEN = 5;
vector<CharReach> restored(MAX_RESTORE_LEN);
for (RoseVertex v : succs) {
for (RoseVertex v : vsuccs) {
u32 lag = rg[v].left.lag;
for (u32 lit_id : rg[v].literals) {
u32 delay = build.literals.at(lit_id).delay;
@@ -969,7 +969,7 @@ struct eager_info {
static
bool checkSuitableForEager(bool is_prefix, const left_id &left,
const RoseBuildImpl &build,
const vector<RoseVertex> &succs,
const vector<RoseVertex> &vsuccs,
rose_group squash_mask, rose_group initial_groups,
eager_info &ei, const CompileContext &cc) {
DEBUG_PRINTF("checking prefix --> %016llx...\n", squash_mask);
@@ -986,7 +986,7 @@ bool checkSuitableForEager(bool is_prefix, const left_id &left,
return false;
}
for (RoseVertex s : succs) {
for (RoseVertex s : vsuccs) {
if (build.isInETable(s)
|| contains(rg[s].literals, build.eod_event_literal_id)) {
return false; /* Ignore EOD related prefixes */
@@ -1005,7 +1005,7 @@ bool checkSuitableForEager(bool is_prefix, const left_id &left,
if (!can_die_early(dfa, EAGER_DIE_BEFORE_LIMIT)) {
return false;
}
ei.new_graph = rg[succs[0]].left.graph;
ei.new_graph = rg[vsuccs[0]].left.graph;
} else if (left.graph()) {
const NGHolder &g = *left.graph();
if (proper_out_degree(g.startDs, g)) {
@@ -1016,7 +1016,7 @@ bool checkSuitableForEager(bool is_prefix, const left_id &left,
auto gg = ei.new_graph;
gg->kind = NFA_EAGER_PREFIX;
ei.lag_adjust = decreaseLag(build, *gg, succs);
ei.lag_adjust = decreaseLag(build, *gg, vsuccs);
if (is_match_vertex(gg->start, *gg)) {
return false; /* should not still be vacuous as lag decreased */
@@ -1044,17 +1044,17 @@ bool checkSuitableForEager(bool is_prefix, const left_id &left,
static
left_id updateLeftfixWithEager(RoseGraph &g, const eager_info &ei,
const vector<RoseVertex> &succs) {
const vector<RoseVertex> &vsuccs) {
u32 lag_adjust = ei.lag_adjust;
auto gg = ei.new_graph;
for (RoseVertex v : succs) {
for (RoseVertex v : vsuccs) {
g[v].left.graph = gg;
assert(g[v].left.lag >= lag_adjust);
g[v].left.lag -= lag_adjust;
DEBUG_PRINTF("added %u literal chars back, new lag %u\n", lag_adjust,
g[v].left.lag);
}
left_id leftfix = g[succs[0]].left;
left_id leftfix = left_id(g[vsuccs[0]].left);
if (leftfix.graph()) {
assert(leftfix.graph()->kind == NFA_PREFIX
@@ -1099,7 +1099,7 @@ bool buildLeftfix(RoseBuildImpl &build, build_context &bc, bool prefix, u32 qi,
const map<left_id, set<PredTopPair> > &infixTriggers,
set<u32> *no_retrigger_queues, set<u32> *eager_queues,
const map<left_id, eager_info> &eager,
const vector<RoseVertex> &succs, left_id leftfix) {
const vector<RoseVertex> &vsuccs, left_id leftfix) {
RoseGraph &g = build.g;
const CompileContext &cc = build.cc;
const ReportManager &rm = build.rm;
@@ -1111,7 +1111,7 @@ bool buildLeftfix(RoseBuildImpl &build, build_context &bc, bool prefix, u32 qi,
if (contains(eager, leftfix)) {
eager_queues->insert(qi);
leftfix = updateLeftfixWithEager(g, eager.at(leftfix), succs);
leftfix = updateLeftfixWithEager(g, eager.at(leftfix), vsuccs);
}
bytecode_ptr<NFA> nfa;
@@ -1159,7 +1159,7 @@ bool buildLeftfix(RoseBuildImpl &build, build_context &bc, bool prefix, u32 qi,
u32 max_queuelen = UINT32_MAX;
if (!prefix) {
set<ue2_literal> lits;
for (RoseVertex v : succs) {
for (RoseVertex v : vsuccs) {
for (auto u : inv_adjacent_vertices_range(v, g)) {
for (u32 lit_id : g[u].literals) {
lits.insert(build.literals.at(lit_id).s);
@@ -1188,7 +1188,7 @@ bool buildLeftfix(RoseBuildImpl &build, build_context &bc, bool prefix, u32 qi,
findCountingMiracleInfo(leftfix, stop, &cm_count, &cm_cr);
}
for (RoseVertex v : succs) {
for (RoseVertex v : vsuccs) {
bc.leftfix_info.emplace(v, left_build_info(qi, g[v].left.lag, max_width,
squash_mask, stop,
max_queuelen, cm_count,
@@ -1229,7 +1229,7 @@ void updateTops(const RoseGraph &g, const TamaInfo &tamaInfo,
const map<pair<const NFA *, u32>, u32> &out_top_remap,
const bool is_suffix) {
u32 i = 0;
for (const auto &n : tamaInfo.subengines) {
for (const auto *n : tamaInfo.subengines) {
for (const auto &v : subengines[i].vertices) {
if (is_suffix) {
tamaProto.add(n, g[v].index, g[v].suffix.top, out_top_remap);
@@ -1378,7 +1378,7 @@ void updateExclusiveSuffixProperties(const RoseBuildImpl &build,
const vector<ExclusiveInfo> &exclusive_info,
set<u32> *no_retrigger_queues) {
const RoseGraph &g = build.g;
for (auto &info : exclusive_info) {
for (const auto &info : exclusive_info) {
const auto &qi = info.queue;
const auto &subengines = info.subengines;
bool no_retrigger = true;
@@ -1495,7 +1495,7 @@ void findExclusiveInfixes(RoseBuildImpl &build, build_context &bc,
}
static
bool buildLeftfixes(RoseBuildImpl &tbi, build_context &bc,
void buildLeftfixes(RoseBuildImpl &tbi, build_context &bc,
QueueIndexFactory &qif, set<u32> *no_retrigger_queues,
set<u32> *eager_queues, bool do_prefix) {
RoseGraph &g = tbi.g;
@@ -1504,7 +1504,7 @@ bool buildLeftfixes(RoseBuildImpl &tbi, build_context &bc,
map<left_id, set<PredTopPair>> infixTriggers;
findInfixTriggers(tbi, &infixTriggers);
insertion_ordered_map<left_id, vector<RoseVertex>> succs;
insertion_ordered_map<left_id, vector<RoseVertex>> lsuccs;
if (cc.grey.allowTamarama && cc.streaming && !do_prefix) {
findExclusiveInfixes(tbi, bc, qif, infixTriggers, no_retrigger_queues);
@@ -1544,7 +1544,7 @@ bool buildLeftfixes(RoseBuildImpl &tbi, build_context &bc,
}
}
succs[leftfix].emplace_back(v);
lsuccs[leftfix].emplace_back(v);
}
rose_group initial_groups = tbi.getInitialGroups();
@@ -1552,7 +1552,7 @@ bool buildLeftfixes(RoseBuildImpl &tbi, build_context &bc,
map<left_id, eager_info> eager;
for (const auto &m : succs) {
for (const auto &m : lsuccs) {
const left_id &leftfix = m.first;
const auto &left_succs = m.second;
@@ -1573,7 +1573,7 @@ bool buildLeftfixes(RoseBuildImpl &tbi, build_context &bc,
eager.clear();
}
for (const auto &m : succs) {
for (const auto &m : lsuccs) {
const left_id &leftfix = m.first;
const auto &left_succs = m.second;
buildLeftfix(tbi, bc, do_prefix, qif.get_queue(), infixTriggers,
@@ -1581,7 +1581,7 @@ bool buildLeftfixes(RoseBuildImpl &tbi, build_context &bc,
leftfix);
}
return true;
return ;
}
static
@@ -1593,7 +1593,7 @@ void findSuffixTriggers(const RoseBuildImpl &tbi,
continue;
}
PredTopPair ptp(v, g[v].suffix.top);
(*suffixTriggers)[g[v].suffix].insert(ptp);
(*suffixTriggers)[suffix_id(g[v].suffix)].insert(ptp);
}
}
@@ -1613,7 +1613,7 @@ public:
explicit OutfixBuilder(const RoseBuildImpl &build_in) : build(build_in) {}
bytecode_ptr<NFA> operator()(boost::blank&) const {
return nullptr;
return bytecode_ptr<NFA>(nullptr);
};
bytecode_ptr<NFA> operator()(unique_ptr<raw_dfa> &rdfa) const {
@@ -1627,11 +1627,11 @@ public:
build.rm);
}
bytecode_ptr<NFA> operator()(unique_ptr<NGHolder> &holder) const {
bytecode_ptr<NFA> operator()(const unique_ptr<NGHolder> &holder) const {
const CompileContext &cc = build.cc;
const ReportManager &rm = build.rm;
NGHolder &h = *holder;
const NGHolder &h = *holder;
assert(h.kind == NFA_OUTFIX);
// Build NFA.
@@ -1657,10 +1657,10 @@ public:
return n;
}
bytecode_ptr<NFA> operator()(UNUSED MpvProto &mpv) const {
bytecode_ptr<NFA> operator()(UNUSED const MpvProto &mpv) const {
// MPV construction handled separately.
assert(mpv.puffettes.empty());
return nullptr;
return bytecode_ptr<NFA>(nullptr);
}
private:
@@ -1901,8 +1901,8 @@ void findExclusiveSuffixes(RoseBuildImpl &tbi, build_context &bc,
// We may have already built this NFA.
if (contains(suffixes, s)) {
u32 id = suffixes[s];
if (!tbi.isInETable(v)) {
u32 id = suffixes[s];
vertex_map[id].emplace_back(v);
}
continue;
@@ -2059,16 +2059,8 @@ bool buildNfas(RoseBuildImpl &tbi, build_context &bc, QueueIndexFactory &qif,
suffixTriggers.clear();
*leftfixBeginQueue = qif.allocated_count();
if (!buildLeftfixes(tbi, bc, qif, no_retrigger_queues, eager_queues,
true)) {
return false;
}
if (!buildLeftfixes(tbi, bc, qif, no_retrigger_queues, eager_queues,
false)) {
return false;
}
buildLeftfixes(tbi, bc, qif, no_retrigger_queues, eager_queues,true);
buildLeftfixes(tbi, bc, qif, no_retrigger_queues, eager_queues,false);
return true;
}
@@ -2241,20 +2233,19 @@ vector<u32> buildSuffixEkeyLists(const RoseBuildImpl &build, build_context &bc,
for (const auto &e : bc.suffixes) {
const suffix_id &s = e.first;
u32 qi = e.second;
set<u32> ekeys = reportsToEkeys(all_reports(s), build.rm);
if (!ekeys.empty()) {
u32 qi = e.second;
qi_to_ekeys[qi] = {ekeys.begin(), ekeys.end()};
}
}
/* for each outfix also build elists */
for (const auto &outfix : build.outfixes) {
u32 qi = outfix.get_queue();
set<u32> ekeys = reportsToEkeys(all_reports(outfix), build.rm);
if (!ekeys.empty()) {
u32 qi = outfix.get_queue();
qi_to_ekeys[qi] = {ekeys.begin(), ekeys.end()};
}
}
@@ -2313,12 +2304,12 @@ bool anyEndfixMpvTriggers(const RoseBuildImpl &build) {
if (!g[v].suffix) {
continue;
}
if (contains(done, g[v].suffix)) {
if (contains(done, suffix_id(g[v].suffix))) {
continue; /* already done */
}
done.insert(g[v].suffix);
done.insert(suffix_id(g[v].suffix));
if (hasMpvTrigger(all_reports(g[v].suffix), build.rm)) {
if (hasMpvTrigger(all_reports(suffix_id(g[v].suffix)), build.rm)) {
return true;
}
}
@@ -2378,7 +2369,7 @@ void recordResources(RoseResources &resources, const RoseBuildImpl &build,
resources.has_eod = true;
break;
}
if (g[v].suffix && has_eod_accepts(g[v].suffix)) {
if (g[v].suffix && has_eod_accepts(suffix_id(g[v].suffix))) {
resources.has_eod = true;
break;
}
@@ -2463,7 +2454,7 @@ bool hasEodAnchors(const RoseBuildImpl &build, const build_context &bc,
DEBUG_PRINTF("literally report eod\n");
return true;
}
if (g[v].suffix && has_eod_accepts(g[v].suffix)) {
if (g[v].suffix && has_eod_accepts(suffix_id(g[v].suffix))) {
DEBUG_PRINTF("eod suffix\n");
return true;
}
@@ -2538,7 +2529,7 @@ void writeNfaInfo(const RoseBuildImpl &build, build_context &bc,
if (!g[v].suffix) {
continue;
}
u32 qi = bc.suffixes.at(g[v].suffix);
u32 qi = bc.suffixes.at(suffix_id(g[v].suffix));
assert(qi < infos.size());
if (build.isInETable(v)) {
infos.at(qi).eod = 1;
@@ -2728,7 +2719,7 @@ void buildLeftInfoTable(const RoseBuildImpl &tbi, build_context &bc,
}
static
RoseProgram makeLiteralProgram(const RoseBuildImpl &build, build_context &bc,
RoseProgram makeLiteralProgram(const RoseBuildImpl &build, const build_context &bc,
ProgramBuild &prog_build, u32 lit_id,
const vector<vector<RoseEdge>> &lit_edge_map,
bool is_anchored_replay_program) {
@@ -2742,7 +2733,7 @@ RoseProgram makeLiteralProgram(const RoseBuildImpl &build, build_context &bc,
}
static
RoseProgram makeFragmentProgram(const RoseBuildImpl &build, build_context &bc,
RoseProgram makeFragmentProgram(const RoseBuildImpl &build, const build_context &bc,
ProgramBuild &prog_build,
const vector<u32> &lit_ids,
const vector<vector<RoseEdge>> &lit_edge_map) {
@@ -2973,9 +2964,10 @@ void buildFragmentPrograms(const RoseBuildImpl &build,
pfrag.lit_ids, lit_edge_map);
if (pfrag.included_frag_id != INVALID_FRAG_ID &&
!lit_prog.empty()) {
auto &cfrag = fragments[pfrag.included_frag_id];
const auto &cfrag = fragments[pfrag.included_frag_id];
assert(pfrag.s.length() >= cfrag.s.length() &&
!pfrag.s.any_nocase() >= !cfrag.s.any_nocase());
!pfrag.s.any_nocase() == !cfrag.s.any_nocase());
/** !pfrag.s.any_nocase() >= !cfrag.s.any_nocase()); **/
u32 child_offset = cfrag.lit_program_offset;
DEBUG_PRINTF("child %u offset %u\n", cfrag.fragment_id,
child_offset);
@@ -2993,8 +2985,9 @@ void buildFragmentPrograms(const RoseBuildImpl &build,
if (pfrag.included_delay_frag_id != INVALID_FRAG_ID &&
!rebuild_prog.empty()) {
const auto &cfrag = fragments[pfrag.included_delay_frag_id];
assert(pfrag.s.length() >= cfrag.s.length() &&
!pfrag.s.any_nocase() >= !cfrag.s.any_nocase());
/** assert(pfrag.s.length() >= cfrag.s.length() && **/
assert(pfrag.s.length() == cfrag.s.length() &&
!pfrag.s.any_nocase() != !cfrag.s.any_nocase());
u32 child_offset = cfrag.delay_program_offset;
DEBUG_PRINTF("child %u offset %u\n", cfrag.fragment_id,
child_offset);
@@ -3011,7 +3004,7 @@ void updateLitProtoProgramOffset(vector<LitFragment> &fragments,
auto &proto = *litProto.hwlmProto;
for (auto &lit : proto.lits) {
auto fragId = lit.id;
auto &frag = fragments[fragId];
const auto &frag = fragments[fragId];
if (delay) {
DEBUG_PRINTF("delay_program_offset:%u\n",
frag.delay_program_offset);
@@ -3192,7 +3185,7 @@ set<ReportID> findEngineReports(const RoseBuildImpl &build) {
const auto &g = build.g;
for (auto v : vertices_range(g)) {
if (g[v].suffix) {
insert(&reports, all_reports(g[v].suffix));
insert(&reports, all_reports(suffix_id(g[v].suffix)));
}
}
@@ -3308,10 +3301,11 @@ void addEodEventProgram(const RoseBuildImpl &build, build_context &bc,
// Collect all edges leading into EOD event literal vertices.
vector<RoseEdge> edge_list;
for (const auto &v : lit_info.vertices) {
for (const auto &e : in_edges_range(v, g)) {
edge_list.emplace_back(e);
}
const auto &er = in_edges_range(v, g);
std::copy(begin(er), end(er), std::back_inserter(edge_list));
}
// Sort edge list for determinism, prettiness.
@@ -3648,7 +3642,7 @@ bytecode_ptr<RoseEngine> RoseBuildImpl::buildFinalEngine(u32 minWidth) {
prepMpv(*this, bc, &historyRequired, &mpv_as_outfix);
proto.outfixBeginQueue = qif.allocated_count();
if (!prepOutfixes(*this, bc, &historyRequired)) {
return nullptr;
return bytecode_ptr<RoseEngine>(nullptr);
}
proto.outfixEndQueue = qif.allocated_count();
proto.leftfixBeginQueue = proto.outfixEndQueue;
@@ -3659,7 +3653,7 @@ bytecode_ptr<RoseEngine> RoseBuildImpl::buildFinalEngine(u32 minWidth) {
/* Note: buildNfas may reduce the lag for vertices that have prefixes */
if (!buildNfas(*this, bc, qif, &no_retrigger_queues, &eager_queues,
&proto.leftfixBeginQueue)) {
return nullptr;
return bytecode_ptr<RoseEngine>(nullptr);
}
u32 eodNfaIterOffset = buildEodNfaIterator(bc, proto.leftfixBeginQueue);
buildCountingMiracles(bc);

View File

@@ -170,7 +170,6 @@ void renovateCastle(RoseBuildImpl &tbi, CastleProto *castle,
return; /* bail - TODO: be less lazy */
}
vector<CharReach> rem_local_cr;
u32 ok_count = 0;
for (auto it = e.s.end() - g[v].left.lag; it != e.s.end(); ++it) {
if (!isSubsetOf(*it, cr)) {
@@ -253,11 +252,11 @@ bool unmakeCastles(RoseBuildImpl &tbi) {
for (auto v : vertices_range(g)) {
const LeftEngInfo &left = g[v].left;
if (left.castle && left.castle->repeats.size() > 1) {
left_castles[left].emplace_back(v);
left_castles[left_id(left)].emplace_back(v);
}
const RoseSuffixInfo &suffix = g[v].suffix;
if (suffix.castle && suffix.castle->repeats.size() > 1) {
suffix_castles[suffix].emplace_back(v);
suffix_castles[suffix_id(suffix)].emplace_back(v);
}
}

View File

@@ -811,7 +811,7 @@ void RoseBuildImpl::findTransientLeftfixes(void) {
continue;
}
const left_id &left(g[v].left);
const left_id &left(left_id(g[v].left));
if (::ue2::isAnchored(left) && !isInETable(v)) {
/* etable prefixes currently MUST be transient as we do not know
@@ -863,7 +863,7 @@ map<left_id, vector<RoseVertex>> findLeftSucc(const RoseBuildImpl &build) {
for (auto v : vertices_range(build.g)) {
if (build.g[v].left) {
const LeftEngInfo &lei = build.g[v].left;
leftfixes[lei].emplace_back(v);
leftfixes[left_id(lei)].emplace_back(v);
}
}
return leftfixes;
@@ -1144,10 +1144,10 @@ void findTopTriggerCancels(RoseBuildImpl &build) {
for (const auto &r : left_succ) {
const left_id &left = r.first;
const vector<RoseVertex> &succs = r.second;
const vector<RoseVertex> &rsuccs = r.second;
assert(!succs.empty());
if (build.isRootSuccessor(*succs.begin())) {
assert(!rsuccs.empty());
if (build.isRootSuccessor(*rsuccs.begin())) {
/* a prefix is never an infix */
continue;
}
@@ -1156,7 +1156,7 @@ void findTopTriggerCancels(RoseBuildImpl &build) {
set<RoseEdge> rose_edges;
set<u32> pred_lit_ids;
for (auto v : succs) {
for (auto v : rsuccs) {
for (const auto &e : in_edges_range(v, build.g)) {
RoseVertex u = source(e, build.g);
tops_seen.insert(build.g[e].rose_top);
@@ -1212,11 +1212,11 @@ void buildRoseSquashMasks(RoseBuildImpl &tbi) {
* successor of the nfa and all the literals */
for (const auto &e : roses) {
const left_id &left = e.first;
const vector<RoseVertex> &succs = e.second;
const vector<RoseVertex> &rsuccs = e.second;
set<u32> lit_ids;
bool anchored_pred = false;
for (auto v : succs) {
for (auto v : rsuccs) {
lit_ids.insert(tbi.g[v].literals.begin(), tbi.g[v].literals.end());
for (auto u : inv_adjacent_vertices_range(v, tbi.g)) {
anchored_pred |= tbi.isAnchored(u);
@@ -1230,7 +1230,7 @@ void buildRoseSquashMasks(RoseBuildImpl &tbi) {
if (anchored_pred) { /* infix with pred in anchored table */
u32 min_off = ~0U;
u32 max_off = 0U;
for (auto v : succs) {
for (auto v : rsuccs) {
for (auto u : inv_adjacent_vertices_range(v, tbi.g)) {
min_off = min(min_off, tbi.g[u].min_offset);
max_off = max(max_off, tbi.g[u].max_offset);
@@ -1250,7 +1250,7 @@ void buildRoseSquashMasks(RoseBuildImpl &tbi) {
if (!info.delayed_ids.empty()
|| !all_of_in(info.vertices,
[&](RoseVertex v) {
return left == tbi.g[v].left; })) {
return left == left_id(tbi.g[v].left); })) {
DEBUG_PRINTF("group %llu is unsquashable\n", info.group_mask);
unsquashable |= info.group_mask;
}
@@ -1393,7 +1393,7 @@ void addSmallBlockLiteral(RoseBuildImpl &tbi, const simple_anchored_info &sai,
g[v].max_offset = sai.max_bound + sai.literal.length();
lit_info.vertices.insert(v);
RoseEdge e = add_edge(anchored_root, v, g);
RoseEdge e = add_edge(anchored_root, v, g).first;
g[e].minBound = sai.min_bound;
g[e].maxBound = sai.max_bound;
}
@@ -1417,7 +1417,7 @@ void addSmallBlockLiteral(RoseBuildImpl &tbi, const ue2_literal &lit,
g[v].literals.insert(lit_id);
g[v].reports = reports;
RoseEdge e = add_edge(tbi.root, v, g);
RoseEdge e = add_edge(tbi.root, v, g).first;
g[e].minBound = 0;
g[e].maxBound = ROSE_BOUND_INF;
g[v].min_offset = 1;

View File

@@ -99,7 +99,7 @@ unique_ptr<NGHolder> makeFloodProneSuffix(const ue2_literal &s, size_t len,
NFAVertex u = h->start;
for (auto it = s.begin() + s.length() - len; it != s.end(); ++it) {
NFAVertex v = addHolderVertex(*it, *h);
NFAEdge e = add_edge(u, v, *h);
NFAEdge e = add_edge(u, v, *h).first;
if (u == h->start) {
(*h)[e].tops.insert(DEFAULT_TOP);
}
@@ -410,7 +410,7 @@ bool handleStartPrefixCliche(const NGHolder &h, RoseGraph &g, RoseVertex v,
assert(g[e_old].maxBound >= bound_max);
setEdgeBounds(g, e_old, bound_min, bound_max);
} else {
RoseEdge e_new = add_edge(ar, v, g);
RoseEdge e_new = add_edge(ar, v, g).first;
setEdgeBounds(g, e_new, bound_min, bound_max);
to_delete->emplace_back(e_old);
}
@@ -561,10 +561,7 @@ bool handleMixedPrefixCliche(const NGHolder &h, RoseGraph &g, RoseVertex v,
DEBUG_PRINTF("woot?\n");
shared_ptr<NGHolder> h_new = make_shared<NGHolder>();
if (!h_new) {
assert(0);
throw std::bad_alloc();
}
unordered_map<NFAVertex, NFAVertex> rhs_map;
vector<NFAVertex> exits_vec;
insert(&exits_vec, exits_vec.end(), exits);
@@ -606,7 +603,7 @@ bool handleMixedPrefixCliche(const NGHolder &h, RoseGraph &g, RoseVertex v,
if (source(e_old, g) == ar) {
setEdgeBounds(g, e_old, ri.repeatMin + width, ri.repeatMax + width);
} else {
RoseEdge e_new = add_edge(ar, v, g);
RoseEdge e_new = add_edge(ar, v, g).first;
setEdgeBounds(g, e_new, ri.repeatMin + width, ri.repeatMax + width);
to_delete->emplace_back(e_old);
}

View File

@@ -129,7 +129,7 @@ RoseDedupeAuxImpl::RoseDedupeAuxImpl(const RoseBuildImpl &build_in)
// Several vertices may share a suffix, so we collect the set of
// suffixes first to avoid repeating work.
if (g[v].suffix) {
suffixes.insert(g[v].suffix);
suffixes.insert(suffix_id(g[v].suffix));
}
}

View File

@@ -118,7 +118,7 @@ bool addPrefixLiterals(NGHolder &h, unordered_set<u32> &tailId,
for (auto v : adjacent_vertices_range(start, h)) {
if (v != h.startDs) {
for (auto &t : tails) {
for (const auto &t : tails) {
add_edge(t, v, h);
}
}
@@ -126,7 +126,7 @@ bool addPrefixLiterals(NGHolder &h, unordered_set<u32> &tailId,
clear_out_edges(start, h);
add_edge(h.start, h.start, h);
for (auto &t : heads) {
for (const auto &t : heads) {
add_edge(start, t, h);
}

View File

@@ -77,7 +77,7 @@ static
bool eligibleForAlwaysOnGroup(const RoseBuildImpl &build, u32 id) {
auto eligble = [&](RoseVertex v) {
return build.isRootSuccessor(v)
&& (!build.g[v].left || !isAnchored(build.g[v].left));
&& (!build.g[v].left || !isAnchored(left_id(build.g[v].left)));
};
if (any_of_in(build.literal_info[id].vertices, eligble)) {
@@ -208,7 +208,7 @@ void allocateGroupForEvent(RoseBuildImpl &build, u32 group_always_on,
bool new_group = !groupCount[group_always_on];
for (RoseVertex v : info.vertices) {
if (build.g[v].left && !isAnchored(build.g[v].left)) {
if (build.g[v].left && !isAnchored(left_id(build.g[v].left))) {
new_group = false;
}
}
@@ -275,7 +275,7 @@ void assignGroupsToLiterals(RoseBuildImpl &build) {
// Second pass: the other literals.
for (u32 id = 0; id < literals.size(); id++) {
const rose_literal_id &lit = literals.at(id);
rose_literal_info &info = literal_info[id];
const rose_literal_info &info = literal_info[id];
if (!requires_group_assignment(lit, info)) {
continue;

View File

@@ -80,7 +80,7 @@ class SmallWriteBuild;
class SomSlotManager;
struct suffix_id {
suffix_id(const RoseSuffixInfo &in)
explicit suffix_id(const RoseSuffixInfo &in)
: g(in.graph.get()), c(in.castle.get()), d(in.rdfa.get()),
h(in.haig.get()), t(in.tamarama.get()),
dfa_min_width(in.dfa_min_width),
@@ -181,7 +181,7 @@ depth findMaxWidth(const suffix_id &s, u32 top);
/** \brief represents an engine to the left of a rose role */
struct left_id {
left_id(const LeftEngInfo &in)
explicit left_id(const LeftEngInfo &in)
: g(in.graph.get()), c(in.castle.get()), d(in.dfa.get()),
h(in.haig.get()), dfa_min_width(in.dfa_min_width),
dfa_max_width(in.dfa_max_width) {

View File

@@ -2319,7 +2319,7 @@ class RoseInstrSetCombination
public:
u32 ckey;
RoseInstrSetCombination(u32 ckey_in) : ckey(ckey_in) {}
explicit RoseInstrSetCombination(u32 ckey_in) : ckey(ckey_in) {}
bool operator==(const RoseInstrSetCombination &ri) const {
return ckey == ri.ckey;
@@ -2361,7 +2361,7 @@ class RoseInstrSetExhaust
public:
u32 ekey;
RoseInstrSetExhaust(u32 ekey_in) : ekey(ekey_in) {}
explicit RoseInstrSetExhaust(u32 ekey_in) : ekey(ekey_in) {}
bool operator==(const RoseInstrSetExhaust &ri) const {
return ekey == ri.ekey;

View File

@@ -98,8 +98,7 @@ void addToBloomFilter(vector<u8> &bloom, const u8 *substr, bool nocase) {
const auto hash_functions = { bloomHash_1, bloomHash_2, bloomHash_3 };
for (const auto &hash_func : hash_functions) {
u32 hash = hash_func(substr, nocase);
u32 key = hash & key_mask;
u32 key = hash_func(substr, nocase) & key_mask;
DEBUG_PRINTF("set key %u (of %zu)\n", key, bloom.size() * 8);
bloom[key / 8] |= 1U << (key % 8);
}
@@ -193,11 +192,9 @@ vector<RoseLongLitHashEntry> buildHashTable(
}
for (const auto &m : hashToLitOffPairs) {
u32 hash = m.first;
u32 bucket = m.first % numEntries;
const LitOffsetVector &d = m.second;
u32 bucket = hash % numEntries;
// Placement via linear probing.
for (const auto &lit_offset : d) {
while (tab[bucket].str_offset != 0) {

View File

@@ -202,10 +202,10 @@ void getForwardReach(const raw_dfa &rdfa, map<s32, CharReach> &look) {
}
for (unsigned c = 0; c < N_CHARS; c++) {
dstate_id_t succ = ds.next[rdfa.alpha_remap[c]];
if (succ != DEAD_STATE) {
dstate_id_t dnsucc = ds.next[rdfa.alpha_remap[c]];
if (dnsucc != DEAD_STATE) {
cr.set(c);
next.insert(succ);
next.insert(dnsucc);
}
}
}
@@ -280,13 +280,13 @@ void findForwardReach(const RoseGraph &g, const RoseVertex v,
return;
}
rose_look.emplace_back(map<s32, CharReach>());
getRoseForwardReach(g[t].left, g[e].rose_top, rose_look.back());
getRoseForwardReach(left_id(g[t].left), g[e].rose_top, rose_look.back());
}
if (g[v].suffix) {
DEBUG_PRINTF("suffix engine\n");
rose_look.emplace_back(map<s32, CharReach>());
getSuffixForwardReach(g[v].suffix, g[v].suffix.top, rose_look.back());
getSuffixForwardReach(suffix_id(g[v].suffix), g[v].suffix.top, rose_look.back());
}
combineForwardMasks(rose_look, look);

View File

@@ -75,7 +75,7 @@ string dumpMask(const vector<u8> &v) {
static
bool maskFromLeftGraph(const LeftEngInfo &left, vector<u8> &msk,
vector<u8> &cmp) {
vector<u8> &lcmp) {
const u32 lag = left.lag;
const ReportID report = left.leftfix_report;
@@ -111,9 +111,9 @@ bool maskFromLeftGraph(const LeftEngInfo &left, vector<u8> &msk,
cr |= v_cr;
insert(&next, inv_adjacent_vertices(v, h));
}
make_and_cmp_mask(cr, &msk.at(i), &cmp.at(i));
DEBUG_PRINTF("%zu: reach=%s, msk=%u, cmp=%u\n", i,
describeClass(cr).c_str(), msk[i], cmp[i]);
make_and_cmp_mask(cr, &msk.at(i), &lcmp.at(i));
DEBUG_PRINTF("%zu: reach=%s, msk=%u, lcmp=%u\n", i,
describeClass(cr).c_str(), msk[i], lcmp[i]);
curr.swap(next);
} while (i-- > 0);
@@ -122,7 +122,7 @@ bool maskFromLeftGraph(const LeftEngInfo &left, vector<u8> &msk,
static
bool maskFromLeftCastle(const LeftEngInfo &left, vector<u8> &msk,
vector<u8> &cmp) {
vector<u8> &lcmp) {
const u32 lag = left.lag;
const ReportID report = left.leftfix_report;
@@ -149,23 +149,23 @@ bool maskFromLeftCastle(const LeftEngInfo &left, vector<u8> &msk,
u32 len = min_width;
u32 end = HWLM_MASKLEN - lag;
for (u32 i = end; i > end - min(end, len); i--) {
make_and_cmp_mask(c.reach(), &msk.at(i - 1), &cmp.at(i - 1));
make_and_cmp_mask(c.reach(), &msk.at(i - 1), &lcmp.at(i - 1));
}
return true;
}
static
bool maskFromLeft(const LeftEngInfo &left, vector<u8> &msk, vector<u8> &cmp) {
bool maskFromLeft(const LeftEngInfo &left, vector<u8> &msk, vector<u8> &lcmp) {
if (left.lag >= HWLM_MASKLEN) {
DEBUG_PRINTF("too much lag\n");
return false;
}
if (left.graph) {
return maskFromLeftGraph(left, msk, cmp);
return maskFromLeftGraph(left, msk, lcmp);
} else if (left.castle) {
return maskFromLeftCastle(left, msk, cmp);
return maskFromLeftCastle(left, msk, lcmp);
}
return false;
@@ -173,7 +173,7 @@ bool maskFromLeft(const LeftEngInfo &left, vector<u8> &msk, vector<u8> &cmp) {
static
bool maskFromPreds(const RoseBuildImpl &build, const rose_literal_id &id,
const RoseVertex v, vector<u8> &msk, vector<u8> &cmp) {
const RoseVertex v, vector<u8> &msk, vector<u8> &lcmp) {
const RoseGraph &g = build.g;
// For right now, wuss out and only handle cases with one pred.
@@ -222,7 +222,7 @@ bool maskFromPreds(const RoseBuildImpl &build, const rose_literal_id &id,
ue2_literal::const_iterator it, ite;
for (it = u_id.s.begin() + (u_len - u_sublen), ite = u_id.s.end();
it != ite; ++it) {
make_and_cmp_mask(*it, &msk.at(i), &cmp.at(i));
make_and_cmp_mask(*it, &msk.at(i), &lcmp.at(i));
++i;
}
@@ -231,21 +231,21 @@ bool maskFromPreds(const RoseBuildImpl &build, const rose_literal_id &id,
static
bool addSurroundingMask(const RoseBuildImpl &build, const rose_literal_id &id,
const RoseVertex v, vector<u8> &msk, vector<u8> &cmp) {
const RoseVertex v, vector<u8> &msk, vector<u8> &lcmp) {
// Start with zero masks.
msk.assign(HWLM_MASKLEN, 0);
cmp.assign(HWLM_MASKLEN, 0);
lcmp.assign(HWLM_MASKLEN, 0);
const LeftEngInfo &left = build.g[v].left;
if (left && left.lag < HWLM_MASKLEN) {
if (maskFromLeft(left, msk, cmp)) {
if (maskFromLeft(left, msk, lcmp)) {
DEBUG_PRINTF("mask from a leftfix!\n");
return true;
}
}
if (id.s.length() < HWLM_MASKLEN) {
if (maskFromPreds(build, id, v, msk, cmp)) {
if (maskFromPreds(build, id, v, msk, lcmp)) {
DEBUG_PRINTF("mask from preds!\n");
return true;
}
@@ -255,18 +255,18 @@ bool addSurroundingMask(const RoseBuildImpl &build, const rose_literal_id &id,
}
static
bool hamsterMaskCombine(vector<u8> &msk, vector<u8> &cmp,
bool hamsterMaskCombine(vector<u8> &msk, vector<u8> &lcmp,
const vector<u8> &v_msk, const vector<u8> &v_cmp) {
assert(msk.size() == HWLM_MASKLEN && cmp.size() == HWLM_MASKLEN);
assert(msk.size() == HWLM_MASKLEN && lcmp.size() == HWLM_MASKLEN);
assert(v_msk.size() == HWLM_MASKLEN && v_cmp.size() == HWLM_MASKLEN);
u8 all_masks = 0;
for (size_t i = 0; i < HWLM_MASKLEN; i++) {
u8 filter = ~(cmp[i] ^ v_cmp[i]);
u8 filter = ~(lcmp[i] ^ v_cmp[i]);
msk[i] &= v_msk[i];
msk[i] &= filter;
cmp[i] &= filter;
lcmp[i] &= filter;
all_masks |= msk[i];
}
@@ -278,7 +278,7 @@ bool hamsterMaskCombine(vector<u8> &msk, vector<u8> &cmp,
static
bool addSurroundingMask(const RoseBuildImpl &build, const rose_literal_id &id,
const rose_literal_info &info, vector<u8> &msk,
vector<u8> &cmp) {
vector<u8> &lcmp) {
if (!build.cc.grey.roseHamsterMasks) {
return false;
}
@@ -289,7 +289,7 @@ bool addSurroundingMask(const RoseBuildImpl &build, const rose_literal_id &id,
}
msk.assign(HWLM_MASKLEN, 0);
cmp.assign(HWLM_MASKLEN, 0);
lcmp.assign(HWLM_MASKLEN, 0);
size_t num = 0;
vector<u8> v_msk, v_cmp;
@@ -301,28 +301,28 @@ bool addSurroundingMask(const RoseBuildImpl &build, const rose_literal_id &id,
}
if (!num++) {
// First (or only) vertex, this becomes the mask/cmp pair.
// First (or only) vertex, this becomes the mask/lcmp pair.
msk = v_msk;
cmp = v_cmp;
lcmp = v_cmp;
} else {
// Multiple vertices with potentially different masks. We combine
// them into an 'advisory' mask.
if (!hamsterMaskCombine(msk, cmp, v_msk, v_cmp)) {
if (!hamsterMaskCombine(msk, lcmp, v_msk, v_cmp)) {
DEBUG_PRINTF("mask went to zero\n");
return false;
}
}
}
normaliseLiteralMask(id.s, msk, cmp);
normaliseLiteralMask(id.s, msk, lcmp);
if (msk.empty()) {
DEBUG_PRINTF("no mask\n");
return false;
}
DEBUG_PRINTF("msk=%s, cmp=%s\n", dumpMask(msk).c_str(),
dumpMask(cmp).c_str());
DEBUG_PRINTF("msk=%s, lcmp=%s\n", dumpMask(msk).c_str(),
dumpMask(lcmp).c_str());
return true;
}
@@ -357,13 +357,13 @@ void findMoreLiteralMasks(RoseBuildImpl &build) {
const auto &lit = build.literals.at(id);
auto &lit_info = build.literal_info.at(id);
vector<u8> msk, cmp;
if (!addSurroundingMask(build, lit, lit_info, msk, cmp)) {
vector<u8> msk, lcmp;
if (!addSurroundingMask(build, lit, lit_info, msk, lcmp)) {
continue;
}
DEBUG_PRINTF("found surrounding mask for lit_id=%u (%s)\n", id,
dumpString(lit.s).c_str());
u32 new_id = build.getLiteralId(lit.s, msk, cmp, lit.delay, lit.table);
u32 new_id = build.getLiteralId(lit.s, msk, lcmp, lit.delay, lit.table);
if (new_id == id) {
continue;
}
@@ -392,7 +392,7 @@ void findMoreLiteralMasks(RoseBuildImpl &build) {
// mixed-case is mandatory.
static
void addLiteralMask(const rose_literal_id &id, vector<u8> &msk,
vector<u8> &cmp) {
vector<u8> &lcmp) {
const size_t suffix_len = min(id.s.length(), size_t{HWLM_MASKLEN});
bool mixed_suffix = mixed_sensitivity_in(id.s.end() - suffix_len,
id.s.end());
@@ -403,7 +403,7 @@ void addLiteralMask(const rose_literal_id &id, vector<u8> &msk,
while (msk.size() < HWLM_MASKLEN) {
msk.insert(msk.begin(), 0);
cmp.insert(cmp.begin(), 0);
lcmp.insert(lcmp.begin(), 0);
}
if (!id.msk.empty()) {
@@ -413,7 +413,7 @@ void addLiteralMask(const rose_literal_id &id, vector<u8> &msk,
size_t mand_offset = msk.size() - i - 1;
size_t lit_offset = id.msk.size() - i - 1;
msk[mand_offset] = id.msk[lit_offset];
cmp[mand_offset] = id.cmp[lit_offset];
lcmp[mand_offset] = id.cmp[lit_offset];
}
}
@@ -425,12 +425,12 @@ void addLiteralMask(const rose_literal_id &id, vector<u8> &msk,
size_t offset = HWLM_MASKLEN - i - 1;
DEBUG_PRINTF("offset %zu must match 0x%02x exactly\n", offset,
c.c);
make_and_cmp_mask(c, &msk[offset], &cmp[offset]);
make_and_cmp_mask(c, &msk[offset], &lcmp[offset]);
}
}
}
normaliseLiteralMask(id.s, msk, cmp);
normaliseLiteralMask(id.s, msk, lcmp);
}
static
@@ -477,10 +477,10 @@ bool isNoRunsVertex(const RoseBuildImpl &build, RoseVertex u) {
DEBUG_PRINTF("u=%zu is not a root role\n", g[u].index);
return false;
}
auto edge_result = edge(build.root, u, g);
RoseEdge e = edge_result.first;
RoseEdge e = edge(build.root, u, g);
if (!e) {
if (!edge_result.second) {
DEBUG_PRINTF("u=%zu is not a root role\n", g[u].index);
return false;
}
@@ -635,7 +635,7 @@ u64a literalMinReportOffset(const RoseBuildImpl &build,
}
if (g[v].suffix) {
depth suffix_width = findMinWidth(g[v].suffix, g[v].suffix.top);
depth suffix_width = findMinWidth(suffix_id(g[v].suffix), g[v].suffix.top);
assert(suffix_width.is_reachable());
DEBUG_PRINTF("suffix with width %s\n", suffix_width.str().c_str());
min_offset = min(min_offset, vert_offset + suffix_width);
@@ -704,7 +704,7 @@ void addFragmentLiteral(const RoseBuildImpl &build, MatcherProto &mp,
lit.s.length());
vector<u8> msk = lit.msk; // copy
vector<u8> cmp = lit.cmp; // copy
vector<u8> lcmp = lit.cmp; // copy
bool noruns = isNoRunsFragment(build, f, max_len);
DEBUG_PRINTF("fragment is %s\n", noruns ? "noruns" : "not noruns");
@@ -720,24 +720,24 @@ void addFragmentLiteral(const RoseBuildImpl &build, MatcherProto &mp,
assert(!noruns);
}
addLiteralMask(lit, msk, cmp);
addLiteralMask(lit, msk, lcmp);
const auto &s_final = lit_final.get_string();
bool nocase = lit_final.any_nocase();
DEBUG_PRINTF("id=%u, s='%s', nocase=%d, noruns=%d, msk=%s, cmp=%s\n",
DEBUG_PRINTF("id=%u, s='%s', nocase=%d, noruns=%d, msk=%s, lcmp=%s\n",
f.fragment_id, escapeString(s_final).c_str(), (int)nocase,
noruns, dumpMask(msk).c_str(), dumpMask(cmp).c_str());
noruns, dumpMask(msk).c_str(), dumpMask(lcmp).c_str());
if (!maskIsConsistent(s_final, nocase, msk, cmp)) {
DEBUG_PRINTF("msk/cmp for literal can't match, skipping\n");
if (!maskIsConsistent(s_final, nocase, msk, lcmp)) {
DEBUG_PRINTF("msk/lcmp for literal can't match, skipping\n");
return;
}
const auto &groups = f.groups;
mp.lits.emplace_back(std::move(s_final), nocase, noruns, f.fragment_id,
groups, msk, cmp);
groups, msk, lcmp);
}
static
@@ -748,11 +748,11 @@ void addAccelLiteral(MatcherProto &mp, const rose_literal_id &lit,
DEBUG_PRINTF("lit='%s' (len %zu)\n", dumpString(s).c_str(), s.length());
vector<u8> msk = lit.msk; // copy
vector<u8> cmp = lit.cmp; // copy
addLiteralMask(lit, msk, cmp);
vector<u8> lcmp = lit.cmp; // copy
addLiteralMask(lit, msk, lcmp);
if (!maskIsConsistent(s.get_string(), s.any_nocase(), msk, cmp)) {
DEBUG_PRINTF("msk/cmp for literal can't match, skipping\n");
if (!maskIsConsistent(s.get_string(), s.any_nocase(), msk, lcmp)) {
DEBUG_PRINTF("msk/lcmp for literal can't match, skipping\n");
return;
}
@@ -761,9 +761,9 @@ void addAccelLiteral(MatcherProto &mp, const rose_literal_id &lit,
string s_final = lit.s.get_string();
trim_to_suffix(s_final, max_len);
trim_to_suffix(msk, max_len);
trim_to_suffix(cmp, max_len);
trim_to_suffix(lcmp, max_len);
mp.accel_lits.emplace_back(s_final, lit.s.any_nocase(), msk, cmp,
mp.accel_lits.emplace_back(s_final, lit.s.any_nocase(), msk, lcmp,
info.group_mask);
}
@@ -884,9 +884,9 @@ void buildAccel(const RoseBuildImpl &build,
}
bytecode_ptr<HWLM>
buildHWLMMatcher(const RoseBuildImpl &build, LitProto *litProto) {
buildHWLMMatcher(const RoseBuildImpl &build, const LitProto *litProto) {
if (!litProto) {
return nullptr;
return bytecode_ptr<HWLM>(nullptr);
}
auto hwlm = hwlmBuild(*litProto->hwlmProto, build.cc,
build.getInitialGroups());

View File

@@ -101,7 +101,7 @@ struct LitProto {
};
bytecode_ptr<HWLM>
buildHWLMMatcher(const RoseBuildImpl &build, LitProto *proto);
buildHWLMMatcher(const RoseBuildImpl &build, const LitProto *proto);
std::unique_ptr<LitProto>
buildFloatingMatcherProto(const RoseBuildImpl &build,

View File

@@ -145,7 +145,7 @@ namespace {
/** Key used to group sets of leftfixes by the dedupeLeftfixes path. */
struct RoseGroup {
RoseGroup(const RoseBuildImpl &build, RoseVertex v)
: left_hash(hashLeftfix(build.g[v].left)),
: left_hash(hashLeftfix(left_id(build.g[v].left))),
lag(build.g[v].left.lag), eod_table(build.isInETable(v)) {
const RoseGraph &g = build.g;
assert(in_degree(v, g) == 1);
@@ -262,8 +262,8 @@ bool dedupeLeftfixes(RoseBuildImpl &tbi) {
// Scan the rest of the list for dupes.
for (auto kt = std::next(jt); kt != jte; ++kt) {
if (g[v].left == g[*kt].left
|| !is_equal(g[v].left, g[v].left.leftfix_report,
g[*kt].left, g[*kt].left.leftfix_report)) {
|| !is_equal(left_id(g[v].left), g[v].left.leftfix_report,
left_id(g[*kt].left), g[*kt].left.leftfix_report)) {
continue;
}
@@ -547,8 +547,8 @@ bool checkPrefix(const rose_literal_id &ul, const u32 ulag,
static
bool hasSameEngineType(const RoseVertexProps &u_prop,
const RoseVertexProps &v_prop) {
const left_id u_left = u_prop.left;
const left_id v_left = v_prop.left;
const left_id u_left = left_id(u_prop.left);
const left_id v_left = left_id(v_prop.left);
return !u_left.haig() == !v_left.haig()
&& !u_left.dfa() == !v_left.dfa()
@@ -794,9 +794,9 @@ template<typename VertexCont>
static never_inline
bool checkPredDelays(const RoseBuildImpl &build, const VertexCont &v1,
const VertexCont &v2) {
flat_set<RoseVertex> preds;
flat_set<RoseVertex> fpreds;
for (auto v : v1) {
insert(&preds, inv_adjacent_vertices(v, build.g));
insert(&fpreds, inv_adjacent_vertices(v, build.g));
}
flat_set<u32> pred_lits;
@@ -811,7 +811,7 @@ bool checkPredDelays(const RoseBuildImpl &build, const VertexCont &v1,
insert(&known_good_preds, inv_adjacent_vertices(v, build.g));
}
for (auto u : preds) {
for (auto u : fpreds) {
if (!contains(known_good_preds, u)) {
insert(&pred_lits, build.g[u].literals);
}
@@ -1186,8 +1186,8 @@ bool mergeLeftVL_tryMergeCandidate(RoseBuildImpl &build, left_id &r1,
assert(!r1.graph() == !r2.graph());
if (r1.graph()) {
NGHolder *h1 = r1.graph();
NGHolder *h2 = r2.graph();
const NGHolder *h1 = r1.graph();
const NGHolder *h2 = r2.graph();
CharReach stop1 = findStopAlphabet(*h1, SOM_NONE);
CharReach stop2 = findStopAlphabet(*h2, SOM_NONE);
CharReach stopboth = stop1 & stop2;
@@ -1338,15 +1338,15 @@ void chunk(vector<T> in, vector<vector<T>> *out, size_t chunk_size) {
}
static
insertion_ordered_map<left_id, vector<RoseVertex>> get_eng_verts(RoseGraph &g) {
insertion_ordered_map<left_id, vector<RoseVertex>> get_eng_verts(const RoseGraph &g) {
insertion_ordered_map<left_id, vector<RoseVertex>> eng_verts;
for (auto v : vertices_range(g)) {
const auto &left = g[v].left;
if (!left) {
continue;
}
assert(contains(all_reports(left), left.leftfix_report));
eng_verts[left].emplace_back(v);
assert(contains(all_reports(left_id(left)), left.leftfix_report));
eng_verts[left_id(left)].emplace_back(v);
}
return eng_verts;
@@ -1536,11 +1536,11 @@ private:
static
flat_set<pair<size_t, u32>> get_pred_tops(RoseVertex v, const RoseGraph &g) {
flat_set<pair<size_t, u32>> preds;
flat_set<pair<size_t, u32>> fpreds;
for (const auto &e : in_edges_range(v, g)) {
preds.emplace(g[source(e, g)].index, g[e].rose_top);
fpreds.emplace(g[source(e, g)].index, g[e].rose_top);
}
return preds;
return fpreds;
}
/**
@@ -1592,14 +1592,15 @@ void dedupeLeftfixesVariableLag(RoseBuildImpl &build) {
assert(!is_triggered(*left.graph()) || onlyOneTop(*left.graph()));
}
auto preds = get_pred_tops(verts.front(), g);
auto vpreds = get_pred_tops(verts.front(), g);
for (RoseVertex v : verts) {
if (preds != get_pred_tops(v, g)) {
if (vpreds != get_pred_tops(v, g)) {
DEBUG_PRINTF("distinct pred sets\n");
continue;
}
}
engine_groups[DedupeLeftKey(build, std::move(preds), left)].emplace_back(left);
auto preds_copy = std::move(vpreds);
engine_groups[DedupeLeftKey(build, preds_copy , left)].emplace_back(left);
}
/* We don't bother chunking as we expect deduping to be successful if the
@@ -1686,7 +1687,7 @@ void replaceTops(NGHolder &h, const map<u32, u32> &top_mapping) {
}
static
bool setDistinctTops(NGHolder &h1, const NGHolder &h2,
void setDistinctTops(NGHolder &h1, const NGHolder &h2,
map<u32, u32> &top_mapping) {
flat_set<u32> tops1 = getTops(h1), tops2 = getTops(h2);
@@ -1696,7 +1697,7 @@ bool setDistinctTops(NGHolder &h1, const NGHolder &h2,
// If our tops don't intersect, we're OK to merge with no changes.
if (!has_intersection(tops1, tops2)) {
DEBUG_PRINTF("tops don't intersect\n");
return true;
return ;
}
// Otherwise, we have to renumber the tops in h1 so that they don't overlap
@@ -1711,18 +1712,17 @@ bool setDistinctTops(NGHolder &h1, const NGHolder &h2,
}
replaceTops(h1, top_mapping);
return true;
return ;
}
bool setDistinctRoseTops(RoseGraph &g, NGHolder &h1, const NGHolder &h2,
void setDistinctRoseTops(RoseGraph &g, NGHolder &h1, const NGHolder &h2,
const deque<RoseVertex> &verts1) {
map<u32, u32> top_mapping;
if (!setDistinctTops(h1, h2, top_mapping)) {
return false;
}
setDistinctTops(h1, h2, top_mapping);
if (top_mapping.empty()) {
return true; // No remapping necessary.
return ; // No remapping necessary.
}
for (auto v : verts1) {
@@ -1740,19 +1740,17 @@ bool setDistinctRoseTops(RoseGraph &g, NGHolder &h1, const NGHolder &h2,
}
}
return true;
return ;
}
static
bool setDistinctSuffixTops(RoseGraph &g, NGHolder &h1, const NGHolder &h2,
void setDistinctSuffixTops(RoseGraph &g, NGHolder &h1, const NGHolder &h2,
const deque<RoseVertex> &verts1) {
map<u32, u32> top_mapping;
if (!setDistinctTops(h1, h2, top_mapping)) {
return false;
}
setDistinctTops(h1, h2, top_mapping);
if (top_mapping.empty()) {
return true; // No remapping necessary.
return ; // No remapping necessary.
}
for (auto v : verts1) {
@@ -1762,7 +1760,7 @@ bool setDistinctSuffixTops(RoseGraph &g, NGHolder &h1, const NGHolder &h2,
g[v].suffix.top = top_mapping[t];
}
return true;
return ;
}
/** \brief Estimate the number of accel states in the given graph when built as
@@ -1836,10 +1834,7 @@ void mergeNfaLeftfixes(RoseBuildImpl &tbi, LeftfixBouquet &roses) {
}
}
if (!setDistinctRoseTops(g, victim, *r1.graph(), verts2)) {
DEBUG_PRINTF("can't set distinct tops\n");
continue; // next h2
}
setDistinctRoseTops(g, victim, *r1.graph(), verts2);
assert(victim.kind == r1.graph()->kind);
assert(!generates_callbacks(*r1.graph()));
@@ -1892,7 +1887,7 @@ void mergeSmallLeftfixes(RoseBuildImpl &tbi) {
return;
}
RoseGraph &g = tbi.g;
const RoseGraph &g = tbi.g;
LeftfixBouquet nfa_leftfixes;
@@ -1924,7 +1919,7 @@ void mergeSmallLeftfixes(RoseBuildImpl &tbi) {
}
assert(left.graph());
NGHolder &h = *left.graph();
const NGHolder &h = *left.graph();
/* Ensure that kind on the graph is correct */
assert(h.kind == (tbi.isRootSuccessor(v) ? NFA_PREFIX : NFA_INFIX));
@@ -2024,7 +2019,7 @@ void mergeCastleLeftfixes(RoseBuildImpl &build) {
return;
}
RoseGraph &g = build.g;
const RoseGraph &g = build.g;
insertion_ordered_map<left_id, vector<RoseVertex>> eng_verts;
@@ -2038,7 +2033,7 @@ void mergeCastleLeftfixes(RoseBuildImpl &build) {
continue;
}
eng_verts[g[v].left].emplace_back(v);
eng_verts[left_id(g[v].left)].emplace_back(v);
}
map<CharReach, vector<left_id>> by_reach;
@@ -2054,8 +2049,8 @@ void mergeCastleLeftfixes(RoseBuildImpl &build) {
DEBUG_PRINTF("chunked castles into %zu groups\n", chunks.size());
for (auto &chunk : chunks) {
mergeCastleChunk(build, chunk, eng_verts);
for (auto &cchunk : chunks) {
mergeCastleChunk(build, cchunk, eng_verts);
}
}
@@ -2119,10 +2114,7 @@ void mergeSuffixes(RoseBuildImpl &tbi, SuffixBouquet &suffixes,
old_tops[v] = g[v].suffix.top;
}
if (!setDistinctSuffixTops(g, victim, *s1.graph(), verts2)) {
DEBUG_PRINTF("can't set distinct tops\n");
continue; // next h2
}
setDistinctSuffixTops(g, victim, *s1.graph(), verts2);
if (!mergeNfaPair(victim, *s1.graph(), &tbi.rm, tbi.cc)) {
DEBUG_PRINTF("merge failed\n");
@@ -2188,7 +2180,7 @@ void mergeAcyclicSuffixes(RoseBuildImpl &tbi) {
SuffixBouquet suffixes;
RoseGraph &g = tbi.g;
const RoseGraph &g = tbi.g;
for (auto v : vertices_range(g)) {
shared_ptr<NGHolder> h = g[v].suffix.graph;
@@ -2206,7 +2198,7 @@ void mergeAcyclicSuffixes(RoseBuildImpl &tbi) {
continue;
}
suffixes.insert(g[v].suffix, v);
suffixes.insert(suffix_id(g[v].suffix), v);
}
deque<SuffixBouquet> suff_groups;
@@ -2248,7 +2240,7 @@ void mergeSmallSuffixes(RoseBuildImpl &tbi) {
return;
}
RoseGraph &g = tbi.g;
const RoseGraph &g = tbi.g;
SuffixBouquet suffixes;
for (auto v : vertices_range(g)) {
@@ -2268,7 +2260,7 @@ void mergeSmallSuffixes(RoseBuildImpl &tbi) {
continue;
}
suffixes.insert(g[v].suffix, v);
suffixes.insert(suffix_id(g[v].suffix), v);
}
deque<SuffixBouquet> suff_groups;
@@ -2306,7 +2298,7 @@ void mergeOutfixInfo(OutfixInfo &winner, const OutfixInfo &victim) {
}
static
map<NGHolder *, NGHolder *> chunkedNfaMerge(RoseBuildImpl &build,
map<NGHolder *, NGHolder *> chunkedNfaMerge(const RoseBuildImpl &build,
const vector<NGHolder *> &nfas) {
map<NGHolder *, NGHolder *> merged;
@@ -2449,13 +2441,13 @@ void chunkedDfaMerge(vector<RawDfa *> &dfas,
DEBUG_PRINTF("begin merge of %zu dfas\n", dfas.size());
vector<RawDfa *> out_dfas;
vector<RawDfa *> chunk;
vector<RawDfa *> dchunk;
for (auto it = begin(dfas), ite = end(dfas); it != ite; ++it) {
chunk.emplace_back(*it);
if (chunk.size() >= DFA_CHUNK_SIZE_MAX || next(it) == ite) {
pairwiseDfaMerge(chunk, dfa_mapping, outfixes, merge_func);
out_dfas.insert(end(out_dfas), begin(chunk), end(chunk));
chunk.clear();
dchunk.emplace_back(*it);
if (dchunk.size() >= DFA_CHUNK_SIZE_MAX || next(it) == ite) {
pairwiseDfaMerge(dchunk, dfa_mapping, outfixes, merge_func);
out_dfas.insert(end(out_dfas), begin(dchunk), end(dchunk));
dchunk.clear();
}
}
@@ -2798,8 +2790,8 @@ void mergeCastleSuffixes(RoseBuildImpl &build) {
eng_verts[c].emplace_back(v);
}
for (auto &chunk : by_reach | map_values) {
mergeCastleSuffixChunk(g, chunk, eng_verts);
for (auto &cchunk : by_reach | map_values) {
mergeCastleSuffixChunk(g, cchunk, eng_verts);
}
}

View File

@@ -62,7 +62,7 @@ bool mergeableRoseVertices(const RoseBuildImpl &tbi, RoseVertex u,
bool mergeableRoseVertices(const RoseBuildImpl &tbi,
const std::set<RoseVertex> &v1,
const std::set<RoseVertex> &v2);
bool setDistinctRoseTops(RoseGraph &g, NGHolder &h1, const NGHolder &h2,
void setDistinctRoseTops(RoseGraph &g, NGHolder &h1, const NGHolder &h2,
const std::deque<RoseVertex> &verts1);
} // namespace ue2

View File

@@ -513,8 +513,8 @@ bool roseHasTops(const RoseBuildImpl &build, RoseVertex v) {
graph_tops.insert(g[e].rose_top);
}
}
return is_subset_of(graph_tops, all_tops(g[v].left));
return is_subset_of(graph_tops, all_tops(left_id(g[v].left)));
}
#endif
@@ -861,7 +861,6 @@ u32 roseQuality(const RoseResources &res, const RoseEngine *t) {
}
if (eod_prefix) {
always_run++;
DEBUG_PRINTF("eod prefixes are slow");
return 0;
}
@@ -1004,16 +1003,16 @@ bool hasOrphanedTops(const RoseBuildImpl &build) {
for (auto v : vertices_range(g)) {
if (g[v].left) {
set<u32> &tops = leftfixes[g[v].left];
if (!build.isRootSuccessor(v)) {
// Tops for infixes come from the in-edges.
set<u32> &tops = leftfixes[left_id(g[v].left)];
for (const auto &e : in_edges_range(v, g)) {
tops.insert(g[e].rose_top);
}
}
}
if (g[v].suffix) {
suffixes[g[v].suffix].insert(g[v].suffix.top);
suffixes[suffix_id(g[v].suffix)].insert(g[v].suffix.top);
}
}

View File

@@ -1918,8 +1918,8 @@ void makeRoleSuffix(const RoseBuildImpl &build,
if (!g[v].suffix) {
return;
}
assert(contains(suffixes, g[v].suffix));
u32 queue = suffixes.at(g[v].suffix);
assert(contains(suffixes, suffix_id(g[v].suffix)));
u32 queue = suffixes.at(suffix_id(g[v].suffix));
u32 event;
assert(contains(engine_info_by_queue, queue));
const auto eng_info = engine_info_by_queue.at(queue);
@@ -1991,7 +1991,7 @@ void makeRoleInfixTriggers(const RoseBuildImpl &build,
make_pair(g[v].index, g[e].rose_top));
assert(top < MQE_INVALID);
} else if (!isMultiTopType(eng_info.type)) {
assert(num_tops(g[v].left) == 1);
assert(num_tops(left_id(g[v].left)) == 1);
top = MQE_TOP;
} else {
top = MQE_TOP_FIRST + g[e].rose_top;
@@ -2178,7 +2178,7 @@ void makeGroupSquashInstruction(const RoseBuildImpl &build, u32 lit_id,
namespace {
struct ProgKey {
ProgKey(const RoseProgram &p) : prog(&p) {}
explicit ProgKey(const RoseProgram &p) : prog(&p) {}
bool operator==(const ProgKey &b) const {
return RoseProgramEquivalence()(*prog, *b.prog);
@@ -2200,7 +2200,7 @@ RoseProgram assembleProgramBlocks(vector<RoseProgram> &&blocks_in) {
ue2_unordered_set<ProgKey> seen;
for (auto &block : blocks_in) {
if (contains(seen, block)) {
if (contains(seen, ProgKey(block))) {
continue;
}
@@ -2432,9 +2432,8 @@ void addPredBlocksAny(map<u32, RoseProgram> &pred_blocks, u32 num_states,
RoseProgram sparse_program;
vector<u32> keys;
for (const u32 &key : pred_blocks | map_keys) {
keys.emplace_back(key);
}
const auto &k = pred_blocks | map_keys;
std::copy(begin(k), end(k), std::back_inserter(keys));
const RoseInstruction *end_inst = sparse_program.end_instruction();
auto ri = std::make_unique<RoseInstrSparseIterAny>(num_states, keys, end_inst);

View File

@@ -159,13 +159,13 @@ private:
};
struct RoseAliasingInfo {
RoseAliasingInfo(const RoseBuildImpl &build) {
explicit RoseAliasingInfo(const RoseBuildImpl &build) {
const auto &g = build.g;
// Populate reverse leftfix map.
for (auto v : vertices_range(g)) {
if (g[v].left) {
rev_leftfix[g[v].left].insert(v);
rev_leftfix[left_id(g[v].left)].insert(v);
}
}
@@ -259,8 +259,10 @@ bool samePredecessors(RoseVertex a, RoseVertex b, const RoseGraph &g) {
}
for (const auto &e_a : in_edges_range(a, g)) {
RoseEdge e = edge(source(e_a, g), b, g);
if (!e || g[e].rose_top != g[e_a].rose_top) {
auto edge_result = edge(source(e_a, g), b, g);
RoseEdge e = edge_result.first;
if (!edge_result.second || g[e].rose_top != g[e_a].rose_top) {
DEBUG_PRINTF("bad tops\n");
return false;
}
@@ -274,7 +276,9 @@ static
bool hasCommonSuccWithBadBounds(RoseVertex a, RoseVertex b,
const RoseGraph &g) {
for (const auto &e_a : out_edges_range(a, g)) {
if (RoseEdge e = edge(b, target(e_a, g), g)) {
auto edge_result = edge(b, target(e_a, g), g);
RoseEdge e = edge_result.first;
if (edge_result.second) {
if (g[e_a].maxBound < g[e].minBound
|| g[e].maxBound < g[e_a].minBound) {
return true;
@@ -293,7 +297,9 @@ static
bool hasCommonPredWithBadBounds(RoseVertex a, RoseVertex b,
const RoseGraph &g) {
for (const auto &e_a : in_edges_range(a, g)) {
if (RoseEdge e = edge(source(e_a, g), b, g)) {
auto edge_result = edge(source(e_a, g), b, g);
RoseEdge e = edge_result.first;
if (edge_result.second) {
if (g[e_a].maxBound < g[e].minBound
|| g[e].maxBound < g[e_a].minBound) {
return true;
@@ -700,7 +706,9 @@ bool hasCommonPredWithDiffRoses(RoseVertex a, RoseVertex b,
const bool equal_roses = hasEqualLeftfixes(a, b, g);
for (const auto &e_a : in_edges_range(a, g)) {
if (RoseEdge e = edge(source(e_a, g), b, g)) {
auto edge_result = edge(source(e_a, g), b, g);
RoseEdge e = edge_result.first;
if (edge_result.second) {
DEBUG_PRINTF("common pred, e_r=%d r_t %u,%u\n",
(int)equal_roses, g[e].rose_top, g[e_a].rose_top);
if (!equal_roses) {
@@ -907,9 +915,9 @@ bool mergeSameCastle(RoseBuildImpl &build, RoseVertex a, RoseVertex b,
}
}
assert(contains(rai.rev_leftfix[b_left], b));
rai.rev_leftfix[b_left].erase(b);
rai.rev_leftfix[a_left].insert(b);
assert(contains(rai.rev_leftfix[left_id(b_left)], b));
rai.rev_leftfix[left_id(b_left)].erase(b);
rai.rev_leftfix[left_id(a_left)].insert(b);
a_left.leftfix_report = new_report;
b_left.leftfix_report = new_report;
@@ -918,7 +926,7 @@ bool mergeSameCastle(RoseBuildImpl &build, RoseVertex a, RoseVertex b,
updateEdgeTops(g, a, a_top_map);
updateEdgeTops(g, b, b_top_map);
pruneUnusedTops(castle, g, rai.rev_leftfix[a_left]);
pruneUnusedTops(castle, g, rai.rev_leftfix[left_id(a_left)]);
return true;
}
@@ -1026,9 +1034,9 @@ bool attemptRoseCastleMerge(RoseBuildImpl &build, bool preds_same, RoseVertex a,
b_left.castle = new_castle;
assert(a_left == b_left);
rai.rev_leftfix[a_left].insert(a);
rai.rev_leftfix[a_left].insert(b);
pruneUnusedTops(*new_castle, g, rai.rev_leftfix[a_left]);
rai.rev_leftfix[left_id(a_left)].insert(a);
rai.rev_leftfix[left_id(a_left)].insert(b);
pruneUnusedTops(*new_castle, g, rai.rev_leftfix[left_id(a_left)]);
return true;
}
@@ -1079,7 +1087,9 @@ bool attemptRoseCastleMerge(RoseBuildImpl &build, bool preds_same, RoseVertex a,
// We should be protected from merging common preds with tops leading
// to completely different repeats by earlier checks, but just in
// case...
if (RoseEdge a_edge = edge(source(e, g), a, g)) {
auto edge_result = edge(source(e, g), a, g);
RoseEdge a_edge = edge_result.first;
if (edge_result.second) {
u32 a_top = g[a_edge].rose_top;
const PureRepeat &a_pr = m_castle->repeats[a_top]; // new report
if (pr != a_pr) {
@@ -1112,9 +1122,9 @@ bool attemptRoseCastleMerge(RoseBuildImpl &build, bool preds_same, RoseVertex a,
b_left.leftfix_report = new_report;
assert(a_left == b_left);
rai.rev_leftfix[a_left].insert(a);
rai.rev_leftfix[a_left].insert(b);
pruneUnusedTops(*m_castle, g, rai.rev_leftfix[a_left]);
rai.rev_leftfix[left_id(a_left)].insert(a);
rai.rev_leftfix[left_id(a_left)].insert(b);
pruneUnusedTops(*m_castle, g, rai.rev_leftfix[left_id(a_left)]);
return true;
}
@@ -1237,9 +1247,9 @@ bool attemptRoseGraphMerge(RoseBuildImpl &build, bool preds_same, RoseVertex a,
a_left.graph = new_graph;
b_left.graph = new_graph;
rai.rev_leftfix[a_left].insert(a);
rai.rev_leftfix[a_left].insert(b);
pruneUnusedTops(*new_graph, g, rai.rev_leftfix[a_left]);
rai.rev_leftfix[left_id(a_left)].insert(a);
rai.rev_leftfix[left_id(a_left)].insert(b);
pruneUnusedTops(*new_graph, g, rai.rev_leftfix[left_id(a_left)]);
return true;
}
@@ -1258,7 +1268,7 @@ bool attemptRoseGraphMerge(RoseBuildImpl &build, bool preds_same, RoseVertex a,
DEBUG_PRINTF("attempting merge of roses on vertices %zu and %zu\n",
g[a].index, g[b].index);
set<RoseVertex> &b_verts = rai.rev_leftfix[b_left];
set<RoseVertex> &b_verts = rai.rev_leftfix[left_id(b_left)];
set<RoseVertex> aa;
aa.insert(a);
@@ -1280,7 +1290,7 @@ bool attemptRoseGraphMerge(RoseBuildImpl &build, bool preds_same, RoseVertex a,
ReportID new_report = build.getNewNfaReport();
duplicateReport(*b_h, b_left.leftfix_report, new_report);
b_left.leftfix_report = new_report;
pruneReportIfUnused(build, b_h, rai.rev_leftfix[b_left_id], b_oldreport);
pruneReportIfUnused(build, b_h, rai.rev_leftfix[left_id(b_left_id)], b_oldreport);
NGHolder victim;
cloneHolder(victim, *a_h);
@@ -1294,12 +1304,7 @@ bool attemptRoseGraphMerge(RoseBuildImpl &build, bool preds_same, RoseVertex a,
DEBUG_PRINTF("victim %zu states\n", num_vertices(*a_h));
DEBUG_PRINTF("winner %zu states\n", num_vertices(*b_h));
if (!setDistinctRoseTops(g, victim, *b_h, deque<RoseVertex>(1, a))) {
assert(roseHasTops(build, a));
assert(roseHasTops(build, b));
return false;
}
setDistinctRoseTops(g, victim, *b_h, deque<RoseVertex>(1, a));
assert(victim.kind == b_h->kind);
assert(!generates_callbacks(*b_h));
@@ -1321,16 +1326,16 @@ bool attemptRoseGraphMerge(RoseBuildImpl &build, bool preds_same, RoseVertex a,
a_left.graph = b_h;
a_left.leftfix_report = new_report;
assert(contains(rai.rev_leftfix[a_left_id], a));
assert(contains(rai.rev_leftfix[b_left_id], b));
rai.rev_leftfix[a_left_id].erase(a);
rai.rev_leftfix[b_left_id].insert(a);
assert(contains(rai.rev_leftfix[left_id(a_left_id)], a));
assert(contains(rai.rev_leftfix[left_id(b_left_id)], b));
rai.rev_leftfix[left_id(a_left_id)].erase(a);
rai.rev_leftfix[left_id(b_left_id)].insert(a);
pruneUnusedTops(*a_h, g, rai.rev_leftfix[a_left_id]);
pruneUnusedTops(*b_h, g, rai.rev_leftfix[b_left_id]);
pruneUnusedTops(*a_h, g, rai.rev_leftfix[left_id(a_left_id)]);
pruneUnusedTops(*b_h, g, rai.rev_leftfix[left_id(b_left_id)]);
// Prune A's report from its old prefix if it was only used by A.
pruneReportIfUnused(build, a_h, rai.rev_leftfix[a_left_id], a_oldreport);
pruneReportIfUnused(build, a_h, rai.rev_leftfix[left_id(a_left_id)], a_oldreport);
reduceImplementableGraph(*b_h, SOM_NONE, nullptr, build.cc);
@@ -1351,9 +1356,9 @@ bool attemptRoseMerge(RoseBuildImpl &build, bool preds_same, RoseVertex a,
build.g[a].index, build.g[b].index);
assert(a != b);
RoseGraph &g = build.g;
LeftEngInfo &a_left = g[a].left;
LeftEngInfo &b_left = g[b].left;
const RoseGraph &g = build.g;
const LeftEngInfo &a_left = g[a].left;
const LeftEngInfo &b_left = g[b].left;
// Trivial case.
if (a_left == b_left) {
@@ -1601,7 +1606,7 @@ void diamondMergePass(CandidateSet &candidates, RoseBuildImpl &build,
vector<RoseVertex> *dead, bool mergeRoses,
RoseAliasingInfo &rai) {
DEBUG_PRINTF("begin\n");
RoseGraph &g = build.g;
const RoseGraph &g = build.g;
if (candidates.empty()) {
return;
@@ -1719,18 +1724,18 @@ void getLeftMergeSiblings(const RoseBuildImpl &build, RoseVertex a,
assert(!g[a].literals.empty());
u32 lit_id = *g[a].literals.begin();
const auto &verts = build.literal_info.at(lit_id).vertices;
RoseVertex pred = pickPred(a, g, build);
RoseVertex ppred = pickPred(a, g, build);
siblings.clear();
if (pred == RoseGraph::null_vertex() || build.isAnyStart(pred) ||
out_degree(pred, g) > verts.size()) {
if (ppred == RoseGraph::null_vertex() || build.isAnyStart(ppred) ||
out_degree(ppred, g) > verts.size()) {
// Select sibling from amongst the vertices that share a literal.
insert(&siblings, siblings.end(), verts);
} else {
// Select sibling from amongst the vertices that share a
// predecessor.
insert(&siblings, siblings.end(), adjacent_vertices(pred, g));
insert(&siblings, siblings.end(), adjacent_vertices(ppred, g));
}
}
@@ -1853,14 +1858,14 @@ void splitByRightProps(const RoseGraph &g,
vector<vector<RoseVertex>> &buckets) {
// Successor vector used in make_split_key. We declare it here so we can
// reuse storage.
vector<RoseVertex> succ;
vector<RoseVertex> vsucc;
// Split by {successors, literals, reports}.
auto make_split_key = [&](RoseVertex v) {
succ.clear();
insert(&succ, succ.end(), adjacent_vertices(v, g));
sort(succ.begin(), succ.end());
return hash_all(g[v].literals, g[v].reports, succ);
vsucc.clear();
insert(&vsucc, vsucc.end(), adjacent_vertices(v, g));
sort(vsucc.begin(), vsucc.end());
return hash_all(g[v].literals, g[v].reports, vsucc);
};
splitAndFilterBuckets(buckets, make_split_key);
}
@@ -1972,15 +1977,14 @@ bool hasNoDiamondSiblings(const RoseGraph &g, RoseVertex v) {
* merge.
*/
static
void filterDiamondCandidates(RoseGraph &g, CandidateSet &candidates) {
void filterDiamondCandidates(const RoseGraph &g, CandidateSet &candidates) {
DEBUG_PRINTF("%zu candidates enter\n", candidates.size());
vector<RoseVertex> dead;
for (const auto &v : candidates) {
if (hasNoDiamondSiblings(g, v)) {
dead.emplace_back(v);
}
}
auto deads = [&g=g](const RoseVertex &v) {
return (hasNoDiamondSiblings(g, v));
};
std::copy_if(begin(candidates), end(candidates), std::back_inserter(dead), deads);
for (const auto &v : dead) {
candidates.erase(v);
@@ -1992,7 +1996,7 @@ void filterDiamondCandidates(RoseGraph &g, CandidateSet &candidates) {
void aliasRoles(RoseBuildImpl &build, bool mergeRoses) {
const CompileContext &cc = build.cc;
RoseGraph &g = build.g;
const RoseGraph &g = build.g;
assert(!hasOrphanedTops(build));
assert(canImplementGraphs(build));
@@ -2141,7 +2145,9 @@ void mergeDupeLeaves(RoseBuildImpl &build) {
for (const auto &e : in_edges_range(v, g)) {
RoseVertex u = source(e, g);
DEBUG_PRINTF("u index=%zu\n", g[u].index);
if (RoseEdge et = edge(u, t, g)) {
auto edge_result = edge(u, t, g);
RoseEdge et = edge_result.first;
if (edge_result.second) {
if (g[et].minBound <= g[e].minBound
&& g[et].maxBound >= g[e].maxBound) {
DEBUG_PRINTF("remove more constrained edge\n");

View File

@@ -111,21 +111,41 @@ void write_out(scatter_full_plan *plan_out, void *aux_out,
const scatter_plan_raw &raw, u32 aux_base_offset) {
memset(plan_out, 0, sizeof(*plan_out));
#define DO_CASE(t) \
if (!raw.p_##t.empty()) { \
plan_out->s_##t##_offset = aux_base_offset; \
plan_out->s_##t##_count = raw.p_##t.size(); \
assert(ISALIGNED_N((char *)aux_out + aux_base_offset, \
alignof(scatter_unit_##t))); \
memcpy((char *)aux_out + aux_base_offset, raw.p_##t.data(), \
byte_length(raw.p_##t)); \
aux_base_offset += byte_length(raw.p_##t); \
if (!raw.p_u64a.empty()) {
plan_out->s_u64a_offset = aux_base_offset;
plan_out->s_u64a_count = raw.p_u64a.size();
assert(ISALIGNED_N(static_cast<char *>(aux_out) + aux_base_offset, alignof(scatter_unit_u64a)));
auto *start = reinterpret_cast<const char *>(raw.p_u64a.data());
auto *to = static_cast<char *>(aux_out) + aux_base_offset;
std::copy(start, start + byte_length(raw.p_u64a), to);
aux_base_offset += byte_length(raw.p_u64a);
}
if (!raw.p_u32.empty()) {
plan_out->s_u32_offset = aux_base_offset;
plan_out->s_u32_count = raw.p_u32.size();
assert(ISALIGNED_N(static_cast<char *>(aux_out) + aux_base_offset, alignof(scatter_unit_u32)));
auto *start = reinterpret_cast<const char *>(raw.p_u32.data());
auto *to = static_cast<char *>(aux_out) + aux_base_offset;
std::copy(start, start + byte_length(raw.p_u32), to);
aux_base_offset += byte_length(raw.p_u32);
}
if (!raw.p_u16.empty()) {
plan_out->s_u16_offset = aux_base_offset;
plan_out->s_u16_count = raw.p_u16.size();
assert(ISALIGNED_N(static_cast<char *>(aux_out) + aux_base_offset, alignof(scatter_unit_u16)));
auto *start = reinterpret_cast<const char *>(raw.p_u16.data());
auto *to = static_cast<char *>(aux_out) + aux_base_offset;
std::copy(start, start + byte_length(raw.p_u16), to);
aux_base_offset += byte_length(raw.p_u16);
}
if (!raw.p_u8.empty()) {
plan_out->s_u8_offset = aux_base_offset;
plan_out->s_u8_count = raw.p_u8.size();
assert(ISALIGNED_N(static_cast<char *>(aux_out) + aux_base_offset, alignof(scatter_unit_u8)));
auto *start = reinterpret_cast<const char *>(raw.p_u8.data());
auto *to = static_cast<char *>(aux_out) + aux_base_offset;
std::copy(start, start + byte_length(raw.p_u8), to);
}
DO_CASE(u64a);
DO_CASE(u32);
DO_CASE(u16);
DO_CASE(u8);
}
} // namespace ue2

View File

@@ -64,12 +64,11 @@ u32 findMinWidth(const RoseBuildImpl &tbi, enum rose_literal_table table) {
const RoseGraph &g = tbi.g;
vector<RoseVertex> table_verts;
for (auto v : vertices_range(g)) {
if (tbi.hasLiteralInTable(v, table)) {
table_verts.emplace_back(v);
}
}
auto tvs = [&tbi=tbi, &table=table](const RoseVertex &v) {
return (tbi.hasLiteralInTable(v, table));
};
const auto &vr = vertices_range(g);
std::copy_if(begin(vr), end(vr), std::back_inserter(table_verts), tvs);
set<RoseVertex> reachable;
find_reachable(g, table_verts, &reachable);
@@ -95,7 +94,7 @@ u32 findMinWidth(const RoseBuildImpl &tbi, enum rose_literal_table table) {
}
if (g[v].suffix) {
depth suffix_width = findMinWidth(g[v].suffix, g[v].suffix.top);
depth suffix_width = findMinWidth(suffix_id(g[v].suffix), g[v].suffix.top);
assert(suffix_width.is_reachable());
DEBUG_PRINTF("%zu has suffix with top %u (width %s), can fire "
"report at %u\n",
@@ -145,10 +144,10 @@ u32 findMaxBAWidth(const RoseBuildImpl &tbi) {
u64a w = g[v].max_offset;
if (g[v].suffix) {
if (has_non_eod_accepts(g[v].suffix)) {
if (has_non_eod_accepts(suffix_id(g[v].suffix))) {
return ROSE_BOUND_INF;
}
depth suffix_width = findMaxWidth(g[v].suffix, g[v].suffix.top);
depth suffix_width = findMaxWidth(suffix_id(g[v].suffix), g[v].suffix.top);
DEBUG_PRINTF("suffix max width for top %u is %s\n", g[v].suffix.top,
suffix_width.str().c_str());
assert(suffix_width.is_reachable());
@@ -189,13 +188,12 @@ u32 findMaxBAWidth(const RoseBuildImpl &tbi, enum rose_literal_table table) {
table == ROSE_FLOATING ? "floating" : "anchored");
vector<RoseVertex> table_verts;
for (auto v : vertices_range(g)) {
if ((table == ROSE_FLOATING && tbi.isFloating(v))
|| (table == ROSE_ANCHORED && tbi.isAnchored(v))) {
table_verts.emplace_back(v);
}
}
auto tvs = [&tbi=tbi, &table=table](const RoseVertex &v) {
return ((table == ROSE_FLOATING && tbi.isFloating(v))
|| (table == ROSE_ANCHORED && tbi.isAnchored(v)));
};
const auto &vr = vertices_range(g);
std::copy_if(begin(vr), end(vr), std::back_inserter(table_verts), tvs);
set<RoseVertex> reachable;
find_reachable(g, table_verts, &reachable);
@@ -222,11 +220,11 @@ u32 findMaxBAWidth(const RoseBuildImpl &tbi, enum rose_literal_table table) {
accept_eod node */
if (g[v].suffix) {
if (has_non_eod_accepts(g[v].suffix)) {
if (has_non_eod_accepts(suffix_id(g[v].suffix))) {
DEBUG_PRINTF("has accept\n");
return ROSE_BOUND_INF;
}
depth suffix_width = findMaxWidth(g[v].suffix);
depth suffix_width = findMaxWidth(suffix_id(g[v].suffix));
DEBUG_PRINTF("suffix max width %s\n", suffix_width.str().c_str());
assert(suffix_width.is_reachable());
if (!suffix_width.is_finite()) {

View File

@@ -104,7 +104,7 @@ void runAnchoredTableStream(const struct RoseEngine *t, const void *atable,
static really_inline
void saveStreamState(const struct NFA *nfa, struct mq *q, s64a loc) {
void saveStreamState(const struct NFA *nfa, const struct mq *q, s64a loc) {
DEBUG_PRINTF("offset=%llu, length=%zu, hlength=%zu, loc=%lld\n",
q->offset, q->length, q->hlength, loc);
nfaQueueCompressState(nfa, q, loc);
@@ -133,7 +133,7 @@ enum MiracleAction roseScanForMiracles(const struct RoseEngine *t, char *state,
struct hs_scratch *scratch, u32 qi,
const struct LeftNfaInfo *left,
const struct NFA *nfa) {
struct core_info *ci = &scratch->core_info;
const struct core_info *ci = &scratch->core_info;
const u32 qCount = t->queueCount;
struct mq *q = scratch->queues + qi;
@@ -211,7 +211,7 @@ char roseCatchUpLeftfix(const struct RoseEngine *t, char *state,
const struct LeftNfaInfo *left) {
assert(!left->transient); // active roses only
struct core_info *ci = &scratch->core_info;
const struct core_info *ci = &scratch->core_info;
const u32 qCount = t->queueCount;
struct mq *q = scratch->queues + qi;
const struct NFA *nfa = getNfaByQueue(t, qi);
@@ -382,7 +382,7 @@ void roseSaveNfaStreamState(const struct RoseEngine *t, char *state,
qi = mmbit_iterate(aa, aaCount, qi)) {
DEBUG_PRINTF("saving stream state for qi=%u\n", qi);
struct mq *q = queues + qi;
const struct mq *q = queues + qi;
// If it's active, it should have an active queue (as we should have
// done some work!)
@@ -517,7 +517,7 @@ void runEagerPrefixesStream(const struct RoseEngine *t,
static really_inline
int can_never_match(const struct RoseEngine *t, char *state,
struct hs_scratch *scratch, size_t length, u64a offset) {
struct RoseContext *tctxt = &scratch->tctxt;
const struct RoseContext *tctxt = &scratch->tctxt;
if (tctxt->groups) {
DEBUG_PRINTF("still has active groups\n");

View File

@@ -332,7 +332,7 @@ void storeLongLiteralState(const struct RoseEngine *t, char *state,
return;
}
struct core_info *ci = &scratch->core_info;
const struct core_info *ci = &scratch->core_info;
const struct RoseLongLitTable *ll_table =
getByOffset(t, t->longLitTableOffset);
assert(ll_table->maxLen);