diff --git a/benchmarks/benchmarks.cpp b/benchmarks/benchmarks.cpp index b9f8d8cb..2890737f 100644 --- a/benchmarks/benchmarks.cpp +++ b/benchmarks/benchmarks.cpp @@ -250,7 +250,7 @@ int main(){ u32 id = 1000; ue2::hwlmLiteral lit(str, true, id); b.nt = ue2::noodBuildTable(lit); - assert(b.nt != nullptr); + assert(b.nt.get() != nullptr); }, [&](MicroBenchmark &b) { noodExec(b.nt.get(), b.buf.data(), b.size, 0, diff --git a/examples/patbench.cc b/examples/patbench.cc index 6a5ce477..d8b836d9 100644 --- a/examples/patbench.cc +++ b/examples/patbench.cc @@ -202,7 +202,7 @@ struct FiveTuple { unsigned int dstPort; // Construct a FiveTuple from a TCP or UDP packet. - FiveTuple(const struct ip *iphdr) { + explicit FiveTuple(const struct ip *iphdr) { // IP fields protocol = iphdr->ip_p; srcAddr = iphdr->ip_src.s_addr; @@ -442,7 +442,7 @@ class Sigdata { public: Sigdata() {} - Sigdata(const char *filename) { + explicit Sigdata(const char *filename) { parseFile(filename, patterns, flags, ids, originals); } diff --git a/examples/pcapscan.cc b/examples/pcapscan.cc index 1ec98d78..5c2c7a9d 100644 --- a/examples/pcapscan.cc +++ b/examples/pcapscan.cc @@ -99,7 +99,7 @@ struct FiveTuple { unsigned int dstPort; // Construct a FiveTuple from a TCP or UDP packet. - FiveTuple(const struct ip *iphdr) { + explicit FiveTuple(const struct ip *iphdr) { // IP fields protocol = iphdr->ip_p; srcAddr = iphdr->ip_src.s_addr; diff --git a/src/compiler/asserts.cpp b/src/compiler/asserts.cpp index 51a052b0..946dfda1 100644 --- a/src/compiler/asserts.cpp +++ b/src/compiler/asserts.cpp @@ -176,7 +176,8 @@ void replaceAssertVertex(NGHolder &g, NFAVertex t, const ExpressionInfo &expr, auto ecit = edge_cache.find(cache_key); if (ecit == edge_cache.end()) { DEBUG_PRINTF("adding edge %zu %zu\n", g[u].index, g[v].index); - NFAEdge e = add_edge(u, v, g); + NFAEdge e; + std::tie(e, std::ignore) = add_edge(u, v, g); edge_cache.emplace(cache_key, e); g[e].assert_flags = flags; if (++assert_edge_count > MAX_ASSERT_EDGES) { diff --git a/src/compiler/compiler.cpp b/src/compiler/compiler.cpp index 35f46b3f..c4d74738 100644 --- a/src/compiler/compiler.cpp +++ b/src/compiler/compiler.cpp @@ -443,7 +443,7 @@ bytecode_ptr generateRoseEngine(NG &ng) { if (!rose) { DEBUG_PRINTF("error building rose\n"); assert(0); - return nullptr; + return bytecode_ptr(nullptr); } dumpReportManager(ng.rm, ng.cc.grey); diff --git a/src/hwlm/hwlm_build.cpp b/src/hwlm/hwlm_build.cpp index 7837819a..73f05921 100644 --- a/src/hwlm/hwlm_build.cpp +++ b/src/hwlm/hwlm_build.cpp @@ -143,7 +143,7 @@ bytecode_ptr hwlmBuild(const HWLMProto &proto, const CompileContext &cc, } if (!eng) { - return nullptr; + return bytecode_ptr(nullptr); } assert(engSize); diff --git a/src/nfa/castlecompile.cpp b/src/nfa/castlecompile.cpp index f2e7e1dc..9b0f3d8e 100644 --- a/src/nfa/castlecompile.cpp +++ b/src/nfa/castlecompile.cpp @@ -920,7 +920,7 @@ void addToHolder(NGHolder &g, u32 top, const PureRepeat &pr) { u32 min_bound = pr.bounds.min; // always finite if (min_bound == 0) { // Vacuous case, we can only do this once. assert(!edge(g.start, g.accept, g).second); - NFAEdge e = add_edge(g.start, g.accept, g); + NFAEdge e = add_edge(g.start, g.accept, g).first; g[e].tops.insert(top); g[u].reports.insert(pr.reports.begin(), pr.reports.end()); min_bound = 1; @@ -929,7 +929,7 @@ void addToHolder(NGHolder &g, u32 top, const PureRepeat &pr) { for (u32 i = 0; i < min_bound; i++) { NFAVertex v = add_vertex(g); g[v].char_reach = pr.reach; - NFAEdge e = add_edge(u, v, g); + NFAEdge e = add_edge(u, v, g).first; if (u == g.start) { g[e].tops.insert(top); } @@ -948,7 +948,7 @@ void addToHolder(NGHolder &g, u32 top, const PureRepeat &pr) { if (head != u) { add_edge(head, v, g); } - NFAEdge e = add_edge(u, v, g); + NFAEdge e = add_edge(u, v, g).first; if (u == g.start) { g[e].tops.insert(top); } diff --git a/src/nfa/goughcompile.cpp b/src/nfa/goughcompile.cpp index a23d570a..17193d25 100644 --- a/src/nfa/goughcompile.cpp +++ b/src/nfa/goughcompile.cpp @@ -1042,7 +1042,7 @@ bytecode_ptr goughCompile(raw_som_dfa &raw, u8 somPrecision, || !cc.streaming); if (!cc.grey.allowGough) { - return nullptr; + return bytecode_ptr(nullptr); } DEBUG_PRINTF("hello world\n"); @@ -1073,7 +1073,7 @@ bytecode_ptr goughCompile(raw_som_dfa &raw, u8 somPrecision, auto basic_dfa = mcclellanCompile_i(raw, gbs, cc); assert(basic_dfa); if (!basic_dfa) { - return nullptr; + return bytecode_ptr(nullptr); } u8 alphaShift diff --git a/src/nfa/limex_compile.cpp b/src/nfa/limex_compile.cpp index c1850c1d..7c539754 100644 --- a/src/nfa/limex_compile.cpp +++ b/src/nfa/limex_compile.cpp @@ -555,7 +555,8 @@ void filterAccelStates(NGHolder &g, const map> &tops, // Similarly, connect (start, startDs) if necessary. if (!edge(g.start, g.startDs, g).second) { - NFAEdge e = add_edge(g.start, g.startDs, g); + NFAEdge e; + std::tie(e, std::ignore) = add_edge(g.start, g.startDs, g); tempEdges.emplace_back(e); // Remove edge later. } @@ -2219,7 +2220,7 @@ struct Factory { static bytecode_ptr generateNfa(const build_info &args) { if (args.num_states > NFATraits::maxStates) { - return nullptr; + return bytecode_ptr(nullptr); } // Build bounded repeat structures. @@ -2578,7 +2579,7 @@ bytecode_ptr generate(NGHolder &h, if (!cc.grey.allowLimExNFA) { DEBUG_PRINTF("limex not allowed\n"); - return nullptr; + return bytecode_ptr(nullptr); } // If you ask for a particular type, it had better be an NFA. @@ -2613,7 +2614,7 @@ bytecode_ptr generate(NGHolder &h, if (scores.empty()) { DEBUG_PRINTF("No NFA returned a valid score for this case.\n"); - return nullptr; + return bytecode_ptr(nullptr); } // Sort acceptable models in priority order, lowest score first. @@ -2632,7 +2633,7 @@ bytecode_ptr generate(NGHolder &h, } DEBUG_PRINTF("NFA build failed.\n"); - return nullptr; + return bytecode_ptr(nullptr); } u32 countAccelStates(NGHolder &h, diff --git a/src/nfa/mcclellancompile.cpp b/src/nfa/mcclellancompile.cpp index 463cb46c..6ada4e50 100644 --- a/src/nfa/mcclellancompile.cpp +++ b/src/nfa/mcclellancompile.cpp @@ -625,7 +625,7 @@ bytecode_ptr mcclellanCompile16(dfa_info &info, const CompileContext &cc, if (!allocateFSN16(info, &count_real_states, &wide_limit)) { DEBUG_PRINTF("failed to allocate state numbers, %zu states total\n", info.size()); - return nullptr; + return bytecode_ptr(nullptr); } DEBUG_PRINTF("count_real_states: %d\n", count_real_states); diff --git a/src/nfa/mcsheng_compile.cpp b/src/nfa/mcsheng_compile.cpp index 8e2f2d11..9b5d8f59 100644 --- a/src/nfa/mcsheng_compile.cpp +++ b/src/nfa/mcsheng_compile.cpp @@ -1035,7 +1035,7 @@ bytecode_ptr mcshengCompile16(dfa_info &info, dstate_id_t sheng_end, if (!allocateImplId16(info, sheng_end, &sherman_limit)) { DEBUG_PRINTF("failed to allocate state numbers, %zu states total\n", info.size()); - return nullptr; + return bytecode_ptr(nullptr); } u16 count_real_states = sherman_limit - sheng_end; @@ -1189,7 +1189,7 @@ bytecode_ptr mcsheng64Compile16(dfa_info&info, dstate_id_t sheng_end, if (!allocateImplId16(info, sheng_end, &sherman_limit)) { DEBUG_PRINTF("failed to allocate state numbers, %zu states total\n", info.size()); - return nullptr; + return bytecode_ptr(nullptr); } u16 count_real_states = sherman_limit - sheng_end; @@ -1414,7 +1414,7 @@ bytecode_ptr mcsheng64Compile8(dfa_info &info, dstate_id_t sheng_end, bytecode_ptr mcshengCompile(raw_dfa &raw, const CompileContext &cc, const ReportManager &rm) { if (!cc.grey.allowMcSheng) { - return nullptr; + return bytecode_ptr(nullptr); } mcclellan_build_strat mbs(raw, rm, false); @@ -1435,7 +1435,7 @@ bytecode_ptr mcshengCompile(raw_dfa &raw, const CompileContext &cc, if (sheng_end <= DEAD_STATE + 1) { info.states = old_states; - return nullptr; + return bytecode_ptr(nullptr); } bytecode_ptr nfa; @@ -1462,12 +1462,12 @@ bytecode_ptr mcshengCompile(raw_dfa &raw, const CompileContext &cc, bytecode_ptr mcshengCompile64(raw_dfa &raw, const CompileContext &cc, const ReportManager &rm) { if (!cc.grey.allowMcSheng) { - return nullptr; + return bytecode_ptr(nullptr); } if (!cc.target_info.has_avx512vbmi()) { DEBUG_PRINTF("McSheng64 failed, no HS_CPU_FEATURES_AVX512VBMI!\n"); - return nullptr; + return bytecode_ptr(nullptr); } mcclellan_build_strat mbs(raw, rm, false); @@ -1488,7 +1488,7 @@ bytecode_ptr mcshengCompile64(raw_dfa &raw, const CompileContext &cc, sheng_end64 = find_sheng_states(info, accel_escape_info, MAX_SHENG64_STATES); if (sheng_end64 <= DEAD_STATE + 1) { - return nullptr; + return bytecode_ptr(nullptr); } else { using64state = true; } diff --git a/src/nfa/rdfa_graph.h b/src/nfa/rdfa_graph.h index 6d166c2f..d042560d 100644 --- a/src/nfa/rdfa_graph.h +++ b/src/nfa/rdfa_graph.h @@ -45,7 +45,7 @@ struct RdfaEdgeProps { }; struct RdfaGraph : public ue2_graph { - RdfaGraph(const raw_dfa &rdfa); + explicit RdfaGraph(const raw_dfa &rdfa); }; diff --git a/src/nfa/shengcompile.cpp b/src/nfa/shengcompile.cpp index ed1860fe..c8a6daca 100644 --- a/src/nfa/shengcompile.cpp +++ b/src/nfa/shengcompile.cpp @@ -690,7 +690,7 @@ bytecode_ptr shengCompile_int(raw_dfa &raw, const CompileContext &cc, } if (!createShuffleMasks((T *)getMutableImplNfa(nfa.get()), info, accelInfo)) { - return nullptr; + return bytecode_ptr(nullptr); } return nfa; @@ -701,7 +701,7 @@ bytecode_ptr shengCompile(raw_dfa &raw, const CompileContext &cc, set *accel_states) { if (!cc.grey.allowSheng) { DEBUG_PRINTF("Sheng is not allowed!\n"); - return nullptr; + return bytecode_ptr(nullptr); } sheng_build_strat strat(raw, rm, only_accel_init); @@ -716,7 +716,7 @@ bytecode_ptr shengCompile(raw_dfa &raw, const CompileContext &cc, info.can_die ? "can" : "cannot", info.size()); if (info.size() > 16) { DEBUG_PRINTF("Too many states\n"); - return nullptr; + return bytecode_ptr(nullptr); } return shengCompile_int(raw, cc, accel_states, strat, info); @@ -727,18 +727,18 @@ bytecode_ptr sheng32Compile(raw_dfa &raw, const CompileContext &cc, set *accel_states) { if (!cc.grey.allowSheng) { DEBUG_PRINTF("Sheng is not allowed!\n"); - return nullptr; + bytecode_ptr(nullptr); } #ifdef HAVE_SVE if (svcntb()<32) { DEBUG_PRINTF("Sheng32 failed, SVE width is too small!\n"); - return nullptr; + bytecode_ptr(nullptr); } #else if (!cc.target_info.has_avx512vbmi()) { DEBUG_PRINTF("Sheng32 failed, no HS_CPU_FEATURES_AVX512VBMI!\n"); - return nullptr; + bytecode_ptr(nullptr); } #endif @@ -755,7 +755,7 @@ bytecode_ptr sheng32Compile(raw_dfa &raw, const CompileContext &cc, assert(info.size() > 16); if (info.size() > 32) { DEBUG_PRINTF("Too many states\n"); - return nullptr; + return bytecode_ptr(nullptr); } return shengCompile_int(raw, cc, accel_states, strat, info); @@ -766,18 +766,18 @@ bytecode_ptr sheng64Compile(raw_dfa &raw, const CompileContext &cc, set *accel_states) { if (!cc.grey.allowSheng) { DEBUG_PRINTF("Sheng is not allowed!\n"); - return nullptr; + return bytecode_ptr(nullptr); } #ifdef HAVE_SVE if (svcntb()<64) { DEBUG_PRINTF("Sheng64 failed, SVE width is too small!\n"); - return nullptr; + return bytecode_ptr(nullptr); } #else if (!cc.target_info.has_avx512vbmi()) { DEBUG_PRINTF("Sheng64 failed, no HS_CPU_FEATURES_AVX512VBMI!\n"); - return nullptr; + return bytecode_ptr(nullptr); } #endif @@ -794,7 +794,7 @@ bytecode_ptr sheng64Compile(raw_dfa &raw, const CompileContext &cc, assert(info.size() > 32); if (info.size() > 64) { DEBUG_PRINTF("Too many states\n"); - return nullptr; + return bytecode_ptr(nullptr); } vector old_states; old_states = info.states; diff --git a/src/nfagraph/ng_asserts.cpp b/src/nfagraph/ng_asserts.cpp index 764ebed1..1dfcb95e 100644 --- a/src/nfagraph/ng_asserts.cpp +++ b/src/nfagraph/ng_asserts.cpp @@ -384,7 +384,10 @@ void resolveEdges(ReportManager &rm, NGHolder &g, const ExpressionInfo &expr, /* there may already be a different edge from start to eod if so * we need to make it unconditional and alive */ - if (NFAEdge start_eod = edge(u, g.acceptEod, g)) { + NFAEdge start_eod; + bool exists; + std::tie(start_eod, exists) = edge(u, g.acceptEod, g); + if (exists) { g[start_eod].assert_flags = 0; dead->erase(start_eod); } else { @@ -437,7 +440,10 @@ void resolveEdges(ReportManager &rm, NGHolder &g, const ExpressionInfo &expr, /* there may already be a different edge from start to eod if so * we need to make it unconditional and alive */ - if (NFAEdge start_eod = edge(u, g.acceptEod, g)) { + NFAEdge start_eod; + bool exists; + std::tie(start_eod, exists) = edge(u, g.acceptEod, g); + if (exists) { g[start_eod].assert_flags = 0; dead->erase(start_eod); } else { @@ -496,7 +502,8 @@ void ensureCodePointStart(ReportManager &rm, NGHolder &g, * boundaries. Assert resolution handles the badness coming from asserts. * The only other source of trouble is startDs->accept connections. */ - NFAEdge orig = edge(g.startDs, g.accept, g); + NFAEdge orig; + std::tie(orig, std::ignore) = edge(g.startDs, g.accept, g); if (expr.utf8 && orig) { DEBUG_PRINTF("rectifying %u\n", expr.report); Report ir = rm.getBasicInternalReport(expr); diff --git a/src/nfagraph/ng_equivalence.cpp b/src/nfagraph/ng_equivalence.cpp index c45096c5..7c9d0a2c 100644 --- a/src/nfagraph/ng_equivalence.cpp +++ b/src/nfagraph/ng_equivalence.cpp @@ -98,9 +98,9 @@ class ClassInfo { public: struct ClassDepth { ClassDepth() {} - ClassDepth(const NFAVertexDepth &d) + explicit ClassDepth(const NFAVertexDepth &d) : d1(d.fromStart), d2(d.fromStartDotStar) {} - ClassDepth(const NFAVertexRevDepth &rd) + explicit ClassDepth(const NFAVertexRevDepth &rd) : d1(rd.toAccept), d2(rd.toAcceptEod) {} DepthMinMax d1; DepthMinMax d2; @@ -337,9 +337,9 @@ vector partitionGraph(vector> &infos, ClassInfo::ClassDepth depth; if (eq == LEFT_EQUIVALENCE) { - depth = depths[vi->vert_index]; + depth = ClassInfo::ClassDepth(depths[vi->vert_index]); } else { - depth = rdepths[vi->vert_index]; + depth = ClassInfo::ClassDepth(rdepths[vi->vert_index]); } ClassInfo ci(g, *vi, depth, eq); @@ -547,8 +547,8 @@ void mergeClass(vector> &infos, NGHolder &g, pred_info->succ.erase(old_vertex_info); // if edge doesn't exist, create it - NFAEdge e = add_edge_if_not_present(pred_info->v, new_v, g); - + NFAEdge e; + std::tie(e, std::ignore) = add_edge_if_not_present(pred_info->v, new_v, g); // put edge tops, if applicable if (!edgetops.empty()) { assert(g[e].tops.empty() || g[e].tops == edgetops); @@ -558,7 +558,8 @@ void mergeClass(vector> &infos, NGHolder &g, pred_info->succ.insert(new_vertex_info); if (new_v_eod) { - NFAEdge ee = add_edge_if_not_present(pred_info->v, new_v_eod, + NFAEdge ee; + std::tie(ee, std::ignore) = add_edge_if_not_present(pred_info->v, new_v_eod, g); // put edge tops, if applicable diff --git a/src/nfagraph/ng_fuzzy.cpp b/src/nfagraph/ng_fuzzy.cpp index 67d6348a..12a70193 100644 --- a/src/nfagraph/ng_fuzzy.cpp +++ b/src/nfagraph/ng_fuzzy.cpp @@ -594,7 +594,8 @@ private: // find which accepts source vertex connects to flat_set targets; for (const auto &accept : accepts) { - NFAEdge e = edge(src, accept, g); + NFAEdge e; + std::tie(e, std::ignore) = edge(src, accept, g); if (e) { targets.insert(accept); } diff --git a/src/nfagraph/ng_lbr.cpp b/src/nfagraph/ng_lbr.cpp index 039eeb3b..a6630a95 100644 --- a/src/nfagraph/ng_lbr.cpp +++ b/src/nfagraph/ng_lbr.cpp @@ -154,7 +154,7 @@ bytecode_ptr buildLbrDot(const CharReach &cr, const depth &repeatMin, const depth &repeatMax, u32 minPeriod, bool is_reset, ReportID report) { if (!cr.all()) { - return nullptr; + return bytecode_ptr(nullptr); } enum RepeatType rtype = chooseRepeatType(repeatMin, repeatMax, minPeriod, @@ -176,7 +176,7 @@ bytecode_ptr buildLbrVerm(const CharReach &cr, const depth &repeatMin, const CharReach escapes(~cr); if (escapes.count() != 1) { - return nullptr; + return bytecode_ptr(nullptr); } enum RepeatType rtype = chooseRepeatType(repeatMin, repeatMax, minPeriod, @@ -199,7 +199,7 @@ bytecode_ptr buildLbrNVerm(const CharReach &cr, const depth &repeatMin, const CharReach escapes(cr); if (escapes.count() != 1) { - return nullptr; + return bytecode_ptr(nullptr); } enum RepeatType rtype = chooseRepeatType(repeatMin, repeatMax, minPeriod, @@ -228,7 +228,7 @@ bytecode_ptr buildLbrShuf(const CharReach &cr, const depth &repeatMin, minPeriod, rtype); if (shuftiBuildMasks(~cr, (u8 *)&ls->mask_lo, (u8 *)&ls->mask_hi) == -1) { - return nullptr; + return bytecode_ptr(nullptr); } DEBUG_PRINTF("built shuf lbr\n"); @@ -296,7 +296,7 @@ bytecode_ptr constructLBR(const CharReach &cr, const depth &repeatMin, if (!nfa) { assert(0); - return nullptr; + return bytecode_ptr(nullptr); } return nfa; @@ -307,11 +307,11 @@ bytecode_ptr constructLBR(const CastleProto &proto, const CompileContext &cc, const ReportManager &rm) { if (!cc.grey.allowLbr) { - return nullptr; + return bytecode_ptr(nullptr); } if (proto.repeats.size() != 1) { - return nullptr; + return bytecode_ptr(nullptr); } const PureRepeat &repeat = proto.repeats.begin()->second; @@ -319,7 +319,7 @@ bytecode_ptr constructLBR(const CastleProto &proto, if (repeat.reports.size() != 1) { DEBUG_PRINTF("too many reports\n"); - return nullptr; + return bytecode_ptr(nullptr); } bool is_reset; @@ -346,16 +346,16 @@ bytecode_ptr constructLBR(const NGHolder &g, const CompileContext &cc, const ReportManager &rm) { if (!cc.grey.allowLbr) { - return nullptr; + return bytecode_ptr(nullptr); } PureRepeat repeat; if (!isPureRepeat(g, repeat)) { - return nullptr; + return bytecode_ptr(nullptr); } if (repeat.reports.size() != 1) { DEBUG_PRINTF("too many reports\n"); - return nullptr; + return bytecode_ptr(nullptr); } CastleProto proto(g.kind, repeat); diff --git a/src/nfagraph/ng_lbr_sve.hpp b/src/nfagraph/ng_lbr_sve.hpp index 82df3ea1..80822f86 100644 --- a/src/nfagraph/ng_lbr_sve.hpp +++ b/src/nfagraph/ng_lbr_sve.hpp @@ -39,7 +39,7 @@ bytecode_ptr buildLbrVerm16(const CharReach &cr, const depth &repeatMin, const CharReach escapes(~cr); if (escapes.count() > 16) { - return nullptr; + return bytecode_ptr(nullptr); } enum RepeatType rtype = chooseRepeatType(repeatMin, repeatMax, minPeriod, @@ -62,7 +62,7 @@ bytecode_ptr buildLbrNVerm16(const CharReach &cr, const depth &repeatMin, const CharReach escapes(cr); if (escapes.count() > 16) { - return nullptr; + return bytecode_ptr(nullptr); } enum RepeatType rtype = chooseRepeatType(repeatMin, repeatMax, minPeriod, diff --git a/src/nfagraph/ng_limex.cpp b/src/nfagraph/ng_limex.cpp index e246a538..eb989ea5 100644 --- a/src/nfagraph/ng_limex.cpp +++ b/src/nfagraph/ng_limex.cpp @@ -652,7 +652,7 @@ constructNFA(const NGHolder &h_in, const ReportManager *rm, u32 numStates = countStates(state_ids); if (numStates > NFA_MAX_STATES) { DEBUG_PRINTF("Can't build an NFA with %u states\n", numStates); - return nullptr; + return bytecode_ptr(nullptr); } map br_cyclic; @@ -722,14 +722,14 @@ bytecode_ptr constructReversedNFA_i(const NGHolder &h_in, u32 hint, assert(h.kind == NFA_REV_PREFIX); /* triggered, raises internal callbacks */ // Do state numbering. - auto state_ids = numberStates(h, {}); + auto state_ids = numberStates(h, flat_set>>()); // Quick exit: if we've got an embarrassment of riches, i.e. more states // than we can implement in our largest NFA model, bail here. u32 numStates = countStates(state_ids); if (numStates > NFA_MAX_STATES) { DEBUG_PRINTF("Can't build an NFA with %u states\n", numStates); - return nullptr; + return bytecode_ptr(nullptr); } assert(sanityCheckGraph(h, state_ids)); diff --git a/src/nfagraph/ng_literal_analysis.h b/src/nfagraph/ng_literal_analysis.h index 6bb87556..b1e20053 100644 --- a/src/nfagraph/ng_literal_analysis.h +++ b/src/nfagraph/ng_literal_analysis.h @@ -70,7 +70,7 @@ bool bad_mixed_sensitivity(const ue2_literal &s); * Score all the edges in the given graph, returning them in \p scores indexed * by edge_index. */ std::vector scoreEdges(const NGHolder &h, - const flat_set &known_bad = {}); + const flat_set &known_bad = flat_set()); /** Returns a score for a literal set. Lower scores are better. */ u64a scoreSet(const std::set &s); diff --git a/src/nfagraph/ng_netflow.cpp b/src/nfagraph/ng_netflow.cpp index b48e33c4..6e65093f 100644 --- a/src/nfagraph/ng_netflow.cpp +++ b/src/nfagraph/ng_netflow.cpp @@ -93,7 +93,8 @@ void addReverseEdges(NGHolder &g, vector &reverseEdge, if (it == allEdges.end()) { // No reverse edge, add one. NFAVertex u = source(fwd, g), v = target(fwd, g); - NFAEdge rev = add_edge(v, u, g); + NFAEdge rev; + std::tie(rev, std::ignore) = add_edge(v, u, g); it = allEdges.insert(make_pair(make_pair(vidx, uidx), rev)).first; // Add to capacity map. u32 revIndex = g[rev].index; diff --git a/src/nfagraph/ng_redundancy.cpp b/src/nfagraph/ng_redundancy.cpp index 90da92f2..6da9a7fa 100644 --- a/src/nfagraph/ng_redundancy.cpp +++ b/src/nfagraph/ng_redundancy.cpp @@ -307,7 +307,9 @@ void markForRemoval(const NFAVertex v, VertexInfoMap &infoMap, static bool hasInEdgeTops(const NGHolder &g, NFAVertex v) { - NFAEdge e = edge(g.start, v, g); + + NFAEdge e; + std::tie(e, std::ignore) = edge(g.start, v, g); return e && !g[e].tops.empty(); } diff --git a/src/nfagraph/ng_repeat.cpp b/src/nfagraph/ng_repeat.cpp index 4345bf59..8f62ffbb 100644 --- a/src/nfagraph/ng_repeat.cpp +++ b/src/nfagraph/ng_repeat.cpp @@ -1135,7 +1135,7 @@ NFAVertex buildTriggerStates(NGHolder &g, const vector &trigger, g[v].char_reach = cr; add_edge(u, v, g); if (u == g.start) { - g[edge(u, v, g)].tops.insert(top); + g[edge(u, v, g).first].tops.insert(top); } u = v; } diff --git a/src/nfagraph/ng_restructuring.cpp b/src/nfagraph/ng_restructuring.cpp index 73b4d23e..a05f0f63 100644 --- a/src/nfagraph/ng_restructuring.cpp +++ b/src/nfagraph/ng_restructuring.cpp @@ -54,8 +54,8 @@ void wireStartToTops(NGHolder &g, const flat_set &tops, vector &tempEdges) { for (NFAVertex v : tops) { assert(!isLeafNode(v, g)); - - const NFAEdge &e = add_edge(g.start, v, g); + auto edge_result = add_edge(g.start, v, g); + const NFAEdge &e = edge_result.first; tempEdges.emplace_back(e); } } diff --git a/src/nfagraph/ng_split.cpp b/src/nfagraph/ng_split.cpp index 91a099fc..528b72f7 100644 --- a/src/nfagraph/ng_split.cpp +++ b/src/nfagraph/ng_split.cpp @@ -151,7 +151,8 @@ void splitRHS(const NGHolder &base, const vector &pivots, for (auto pivot : pivots) { assert(contains(*rhs_map, pivot)); - NFAEdge e = add_edge(rhs->start, (*rhs_map)[pivot], *rhs); + auto edge_result = add_edge(rhs->start, (*rhs_map)[pivot], *rhs); + NFAEdge e = edge_result.first; (*rhs)[e].tops.insert(DEFAULT_TOP); } diff --git a/src/nfagraph/ng_uncalc_components.cpp b/src/nfagraph/ng_uncalc_components.cpp index fc4ffe1a..92b52c43 100644 --- a/src/nfagraph/ng_uncalc_components.cpp +++ b/src/nfagraph/ng_uncalc_components.cpp @@ -196,10 +196,11 @@ u32 commonPrefixLength(const NGHolder &ga, const ranking_info &a_ranking, } a_count++; + NFAEdge b_edge; + bool b_edge_bool; + std::tie(b_edge, b_edge_bool) = edge(b_ranking.at(i), b_ranking.at(sid), gb); - NFAEdge b_edge = edge(b_ranking.at(i), b_ranking.at(sid), gb); - - if (!b_edge) { + if (!b_edge_bool) { max = i; DEBUG_PRINTF("lowering max to %u due to edge %zu->%u\n", max, i, sid); @@ -319,7 +320,7 @@ void mergeNfaComponent(NGHolder &dest, const NGHolder &vic, size_t common_len) { DEBUG_PRINTF("skipping common edge\n"); assert(edge(u, v, dest).second); // Should never merge edges with different top values. - assert(vic[e].tops == dest[edge(u, v, dest)].tops); + assert(vic[e].tops == dest[edge(u, v, dest).first].tops); continue; } else { assert(is_any_accept(v, dest)); @@ -505,16 +506,26 @@ bool mergeableStarts(const NGHolder &h1, const NGHolder &h2) { /* TODO: relax top checks if reports match */ // If both graphs have edge (start, accept), the tops must match. - NFAEdge e1_accept = edge(h1.start, h1.accept, h1); - NFAEdge e2_accept = edge(h2.start, h2.accept, h2); - if (e1_accept && e2_accept && h1[e1_accept].tops != h2[e2_accept].tops) { + bool bool_e1_accept; + NFAEdge e1_accept; + NFAEdge e2_accept; + std::tie(e1_accept, bool_e1_accept) = edge(h1.start, h1.accept, h1); + bool bool_e2_accept; + std::tie(e2_accept, bool_e2_accept) = edge(h2.start, h2.accept, h2); + + if (bool_e1_accept && bool_e2_accept && h1[e1_accept].tops != h2[e2_accept].tops) { return false; } // If both graphs have edge (start, acceptEod), the tops must match. - NFAEdge e1_eod = edge(h1.start, h1.acceptEod, h1); - NFAEdge e2_eod = edge(h2.start, h2.acceptEod, h2); - if (e1_eod && e2_eod && h1[e1_eod].tops != h2[e2_eod].tops) { + bool bool_e1_eod; + NFAEdge e1_eod; + NFAEdge e2_eod; + std::tie(e1_eod, bool_e1_eod) = edge(h1.start, h1.acceptEod, h1); + bool bool_e2_eod; + std::tie(e2_eod, bool_e2_eod) = edge(h2.start, h2.acceptEod, h2); + + if (bool_e1_eod && bool_e2_eod && h1[e1_eod].tops != h2[e2_eod].tops) { return false; } diff --git a/src/nfagraph/ng_util.cpp b/src/nfagraph/ng_util.cpp index da5187b4..8a05e95e 100644 --- a/src/nfagraph/ng_util.cpp +++ b/src/nfagraph/ng_util.cpp @@ -128,7 +128,7 @@ void clone_out_edges(NGHolder &g, NFAVertex source, NFAVertex dest) { if (edge(dest, t, g).second) { continue; } - NFAEdge clone = add_edge(dest, t, g); + NFAEdge clone = add_edge(dest, t, g).first; u32 idx = g[clone].index; g[clone] = g[e]; g[clone].index = idx; @@ -139,7 +139,7 @@ void clone_in_edges(NGHolder &g, NFAVertex s, NFAVertex dest) { for (const auto &e : in_edges_range(s, g)) { NFAVertex ss = source(e, g); assert(!edge(ss, dest, g).second); - NFAEdge clone = add_edge(ss, dest, g); + NFAEdge clone = add_edge(ss, dest, g).first; u32 idx = g[clone].index; g[clone] = g[e]; g[clone].index = idx; @@ -278,9 +278,11 @@ bool can_only_match_at_eod(const NGHolder &g) { } bool matches_everywhere(const NGHolder &h) { - NFAEdge e = edge(h.startDs, h.accept, h); + bool bool_e; + NFAEdge e; + std::tie(e, bool_e) = edge(h.startDs, h.accept, h); - return e && !h[e].assert_flags; + return bool_e && !h[e].assert_flags; } bool is_virtual_start(NFAVertex v, const NGHolder &g) { @@ -568,7 +570,7 @@ void cloneHolder(NGHolder &out, const NGHolder &in) { NFAVertex s = out_mapping[si]; NFAVertex t = out_mapping[ti]; - NFAEdge e2 = add_edge(s, t, out); + NFAEdge e2 = add_edge(s, t, out).first; out[e2] = in[e]; } @@ -718,7 +720,7 @@ u32 removeTrailingLiteralStates(NGHolder &g, const ue2_literal &lit, clearReports(g); for (auto v : predv) { - NFAEdge e = add_edge(v, g.accept, g); + NFAEdge e = add_edge(v, g.accept, g).first; g[v].reports.insert(0); if (is_triggered(g) && v == g.start) { g[e].tops.insert(DEFAULT_TOP); diff --git a/src/nfagraph/ng_violet.cpp b/src/nfagraph/ng_violet.cpp index 05a44117..bf44d192 100644 --- a/src/nfagraph/ng_violet.cpp +++ b/src/nfagraph/ng_violet.cpp @@ -1234,7 +1234,8 @@ void splitEdgesByCut(const NGHolder &h, RoseInGraph &vg, * makes a more svelte graphy */ clear_in_edges(temp_map[pivot], *new_lhs); NFAEdge pivot_edge = add_edge(temp_map[prev_v], temp_map[pivot], - *new_lhs); + *new_lhs).first; + if (is_triggered(h) && prev_v == h.start) { (*new_lhs)[pivot_edge].tops.insert(DEFAULT_TOP); } @@ -1957,7 +1958,7 @@ void restoreTrailingLiteralStates(NGHolder &g, const ue2_literal &lit, } for (auto v : lpreds) { - NFAEdge e = add_edge_if_not_present(v, prev, g); + NFAEdge e = add_edge_if_not_present(v, prev, g).first; if (v == g.start && is_triggered(g)) { g[e].tops.insert(DEFAULT_TOP); } @@ -2290,7 +2291,7 @@ void splitEdgesForSuffix(const NGHolder &base_graph, RoseInGraph &vg, add_edge(lhs->accept, lhs->acceptEod, *lhs); clearReports(*lhs); for (NFAVertex v : splitters) { - NFAEdge e = add_edge(v_map[v], lhs->accept, *lhs); + NFAEdge e = add_edge(v_map[v], lhs->accept, *lhs).first; if (v == base_graph.start) { (*lhs)[e].tops.insert(DEFAULT_TOP); } diff --git a/src/rose/rose_build_add.cpp b/src/rose/rose_build_add.cpp index 925efe05..2f4e08a4 100644 --- a/src/rose/rose_build_add.cpp +++ b/src/rose/rose_build_add.cpp @@ -131,7 +131,7 @@ RoseVertex createVertex(RoseBuildImpl *build, const RoseVertex parent, /* fill in report information */ g[v].reports.insert(reports.begin(), reports.end()); - RoseEdge e = add_edge(parent, v, g); + RoseEdge e = add_edge(parent, v, g).first; DEBUG_PRINTF("adding edge (%u, %u) to parent\n", minBound, maxBound); g[e].minBound = minBound; @@ -161,7 +161,7 @@ RoseVertex createAnchoredVertex(RoseBuildImpl *build, u32 literalId, DEBUG_PRINTF("created anchored vertex %zu with lit id %u\n", g[v].index, literalId); - RoseEdge e = add_edge(build->anchored_root, v, g); + RoseEdge e = add_edge(build->anchored_root, v, g).first; g[e].minBound = min_offset; g[e].maxBound = max_offset; @@ -307,7 +307,7 @@ void createVertices(RoseBuildImpl *tbi, RoseVertex p = pv.first; - RoseEdge e = add_edge(p, w, g); + RoseEdge e = add_edge(p, w, g).first; DEBUG_PRINTF("adding edge (%u,%u) to parent\n", edge_props.minBound, edge_props.maxBound); g[e].minBound = edge_props.minBound; @@ -345,7 +345,7 @@ void createVertices(RoseBuildImpl *tbi, for (const auto &pv : parents) { const RoseInEdgeProps &edge_props = bd.ig[pv.second]; - RoseEdge e = add_edge(pv.first, g_v, tbi->g); + RoseEdge e = add_edge(pv.first, g_v, tbi->g).first; g[e].minBound = edge_props.minBound; g[e].maxBound = edge_props.maxBound; g[e].history = selectHistory(*tbi, bd, pv.second, e); @@ -698,7 +698,7 @@ void makeEodEventLeftfix(RoseBuildImpl &build, RoseVertex u, g[v].left.graph = eod_leftfix; g[v].left.leftfix_report = report_mapping.second; g[v].left.lag = 0; - RoseEdge e1 = add_edge(u, v, g); + RoseEdge e1 = add_edge(u, v, g).first; g[e1].minBound = 0; g[e1].maxBound = ROSE_BOUND_INF; g[v].min_offset = add_rose_depth(g[u].min_offset, @@ -718,7 +718,7 @@ void makeEodEventLeftfix(RoseBuildImpl &build, RoseVertex u, g[w].reports = report_mapping.first; g[w].min_offset = g[v].min_offset; g[w].max_offset = g[v].max_offset; - RoseEdge e = add_edge(v, w, g); + RoseEdge e = add_edge(v, w, g).first; g[e].minBound = 0; g[e].maxBound = 0; /* No need to set history as the event is only delivered at the last @@ -794,7 +794,7 @@ void doRoseAcceptVertex(RoseBuildImpl *tbi, g[w].reports = ig[iv].reports; g[w].min_offset = g[u].min_offset; g[w].max_offset = g[u].max_offset; - RoseEdge e = add_edge(u, w, g); + RoseEdge e = add_edge(u, w, g).first; g[e].minBound = 0; g[e].maxBound = 0; g[e].history = ROSE_ROLE_HISTORY_LAST_BYTE; @@ -1719,7 +1719,7 @@ bool addEodOutfix(RoseBuildImpl &build, const NGHolder &h) { g[v].left.graph = eod_leftfix; g[v].left.leftfix_report = report_mapping.second; g[v].left.lag = 0; - RoseEdge e1 = add_edge(build.anchored_root, v, g); + RoseEdge e1 = add_edge(build.anchored_root, v, g).first; g[e1].minBound = 0; g[e1].maxBound = ROSE_BOUND_INF; g[v].min_offset = findMinWidth(*eod_leftfix); @@ -1737,7 +1737,7 @@ bool addEodOutfix(RoseBuildImpl &build, const NGHolder &h) { g[w].reports = report_mapping.first; g[w].min_offset = g[v].min_offset; g[w].max_offset = g[v].max_offset; - RoseEdge e = add_edge(v, w, g); + RoseEdge e = add_edge(v, w, g).first; g[e].minBound = 0; g[e].maxBound = 0; g[e].history = ROSE_ROLE_HISTORY_NONE; diff --git a/src/rose/rose_build_add_mask.cpp b/src/rose/rose_build_add_mask.cpp index aa36ecd3..05f3a620 100644 --- a/src/rose/rose_build_add_mask.cpp +++ b/src/rose/rose_build_add_mask.cpp @@ -539,7 +539,7 @@ void addTransientMask(RoseBuildImpl &build, const vector &mask, g[v].left.leftfix_report = mask_report; } else { // Make sure our edge bounds are correct. - RoseEdge e = edge(parent, v, g); + RoseEdge e = edge(parent, v, g).first; g[e].minBound = 0; g[e].maxBound = anchored ? 0 : ROSE_BOUND_INF; g[e].history = anchored ? ROSE_ROLE_HISTORY_ANCH @@ -551,7 +551,7 @@ void addTransientMask(RoseBuildImpl &build, const vector &mask, g[v].max_offset = v_max_offset; if (eod) { - RoseEdge e = add_edge(v, eod_v, g); + RoseEdge e = add_edge(v, eod_v, g).first; g[e].minBound = 0; g[e].maxBound = 0; g[e].history = ROSE_ROLE_HISTORY_LAST_BYTE; @@ -581,7 +581,7 @@ unique_ptr buildMaskRhs(const flat_set &reports, asucc = u; } - NFAEdge e = add_edge(h.start, asucc, h); + NFAEdge e = add_edge(h.start, asucc, h).first; h[e].tops.insert(DEFAULT_TOP); return rhs; diff --git a/src/rose/rose_build_anchored.cpp b/src/rose/rose_build_anchored.cpp index 7071c385..25b137d1 100644 --- a/src/rose/rose_build_anchored.cpp +++ b/src/rose/rose_build_anchored.cpp @@ -873,7 +873,7 @@ buildAnchoredMatcher(RoseBuildImpl &build, const vector &fragments, if (dfas.empty()) { DEBUG_PRINTF("empty\n"); - return nullptr; + return bytecode_ptr(nullptr); } for (auto &rdfa : dfas) { diff --git a/src/rose/rose_build_bytecode.cpp b/src/rose/rose_build_bytecode.cpp index 75b34084..a7e4f044 100644 --- a/src/rose/rose_build_bytecode.cpp +++ b/src/rose/rose_build_bytecode.cpp @@ -1054,7 +1054,7 @@ left_id updateLeftfixWithEager(RoseGraph &g, const eager_info &ei, DEBUG_PRINTF("added %u literal chars back, new lag %u\n", lag_adjust, g[v].left.lag); } - left_id leftfix = g[vsuccs[0]].left; + left_id leftfix = left_id(g[vsuccs[0]].left); if (leftfix.graph()) { assert(leftfix.graph()->kind == NFA_PREFIX @@ -1593,7 +1593,7 @@ void findSuffixTriggers(const RoseBuildImpl &tbi, continue; } PredTopPair ptp(v, g[v].suffix.top); - (*suffixTriggers)[g[v].suffix].insert(ptp); + (*suffixTriggers)[suffix_id(g[v].suffix)].insert(ptp); } } @@ -1613,7 +1613,7 @@ public: explicit OutfixBuilder(const RoseBuildImpl &build_in) : build(build_in) {} bytecode_ptr operator()(boost::blank&) const { - return nullptr; + return bytecode_ptr(nullptr); }; bytecode_ptr operator()(unique_ptr &rdfa) const { @@ -1660,7 +1660,7 @@ public: bytecode_ptr operator()(UNUSED const MpvProto &mpv) const { // MPV construction handled separately. assert(mpv.puffettes.empty()); - return nullptr; + return bytecode_ptr(nullptr); } private: @@ -2304,12 +2304,12 @@ bool anyEndfixMpvTriggers(const RoseBuildImpl &build) { if (!g[v].suffix) { continue; } - if (contains(done, g[v].suffix)) { + if (contains(done, suffix_id(g[v].suffix))) { continue; /* already done */ } - done.insert(g[v].suffix); + done.insert(suffix_id(g[v].suffix)); - if (hasMpvTrigger(all_reports(g[v].suffix), build.rm)) { + if (hasMpvTrigger(all_reports(suffix_id(g[v].suffix)), build.rm)) { return true; } } @@ -2369,7 +2369,7 @@ void recordResources(RoseResources &resources, const RoseBuildImpl &build, resources.has_eod = true; break; } - if (g[v].suffix && has_eod_accepts(g[v].suffix)) { + if (g[v].suffix && has_eod_accepts(suffix_id(g[v].suffix))) { resources.has_eod = true; break; } @@ -2454,7 +2454,7 @@ bool hasEodAnchors(const RoseBuildImpl &build, const build_context &bc, DEBUG_PRINTF("literally report eod\n"); return true; } - if (g[v].suffix && has_eod_accepts(g[v].suffix)) { + if (g[v].suffix && has_eod_accepts(suffix_id(g[v].suffix))) { DEBUG_PRINTF("eod suffix\n"); return true; } @@ -2529,7 +2529,7 @@ void writeNfaInfo(const RoseBuildImpl &build, build_context &bc, if (!g[v].suffix) { continue; } - u32 qi = bc.suffixes.at(g[v].suffix); + u32 qi = bc.suffixes.at(suffix_id(g[v].suffix)); assert(qi < infos.size()); if (build.isInETable(v)) { infos.at(qi).eod = 1; @@ -3185,7 +3185,7 @@ set findEngineReports(const RoseBuildImpl &build) { const auto &g = build.g; for (auto v : vertices_range(g)) { if (g[v].suffix) { - insert(&reports, all_reports(g[v].suffix)); + insert(&reports, all_reports(suffix_id(g[v].suffix))); } } @@ -3641,7 +3641,7 @@ bytecode_ptr RoseBuildImpl::buildFinalEngine(u32 minWidth) { prepMpv(*this, bc, &historyRequired, &mpv_as_outfix); proto.outfixBeginQueue = qif.allocated_count(); if (!prepOutfixes(*this, bc, &historyRequired)) { - return nullptr; + return bytecode_ptr(nullptr); } proto.outfixEndQueue = qif.allocated_count(); proto.leftfixBeginQueue = proto.outfixEndQueue; @@ -3652,7 +3652,7 @@ bytecode_ptr RoseBuildImpl::buildFinalEngine(u32 minWidth) { /* Note: buildNfas may reduce the lag for vertices that have prefixes */ if (!buildNfas(*this, bc, qif, &no_retrigger_queues, &eager_queues, &proto.leftfixBeginQueue)) { - return nullptr; + return bytecode_ptr(nullptr); } u32 eodNfaIterOffset = buildEodNfaIterator(bc, proto.leftfixBeginQueue); buildCountingMiracles(bc); diff --git a/src/rose/rose_build_castle.cpp b/src/rose/rose_build_castle.cpp index 990f0c55..6fcb319c 100644 --- a/src/rose/rose_build_castle.cpp +++ b/src/rose/rose_build_castle.cpp @@ -252,11 +252,11 @@ bool unmakeCastles(RoseBuildImpl &tbi) { for (auto v : vertices_range(g)) { const LeftEngInfo &left = g[v].left; if (left.castle && left.castle->repeats.size() > 1) { - left_castles[left].emplace_back(v); + left_castles[left_id(left)].emplace_back(v); } const RoseSuffixInfo &suffix = g[v].suffix; if (suffix.castle && suffix.castle->repeats.size() > 1) { - suffix_castles[suffix].emplace_back(v); + suffix_castles[suffix_id(suffix)].emplace_back(v); } } diff --git a/src/rose/rose_build_compile.cpp b/src/rose/rose_build_compile.cpp index 97cc95eb..f12a58c6 100644 --- a/src/rose/rose_build_compile.cpp +++ b/src/rose/rose_build_compile.cpp @@ -811,7 +811,7 @@ void RoseBuildImpl::findTransientLeftfixes(void) { continue; } - const left_id &left(g[v].left); + const left_id &left(left_id(g[v].left)); if (::ue2::isAnchored(left) && !isInETable(v)) { /* etable prefixes currently MUST be transient as we do not know @@ -863,7 +863,7 @@ map> findLeftSucc(const RoseBuildImpl &build) { for (auto v : vertices_range(build.g)) { if (build.g[v].left) { const LeftEngInfo &lei = build.g[v].left; - leftfixes[lei].emplace_back(v); + leftfixes[left_id(lei)].emplace_back(v); } } return leftfixes; @@ -1250,7 +1250,7 @@ void buildRoseSquashMasks(RoseBuildImpl &tbi) { if (!info.delayed_ids.empty() || !all_of_in(info.vertices, [&](RoseVertex v) { - return left == tbi.g[v].left; })) { + return left == left_id(tbi.g[v].left); })) { DEBUG_PRINTF("group %llu is unsquashable\n", info.group_mask); unsquashable |= info.group_mask; } @@ -1393,7 +1393,7 @@ void addSmallBlockLiteral(RoseBuildImpl &tbi, const simple_anchored_info &sai, g[v].max_offset = sai.max_bound + sai.literal.length(); lit_info.vertices.insert(v); - RoseEdge e = add_edge(anchored_root, v, g); + RoseEdge e = add_edge(anchored_root, v, g).first; g[e].minBound = sai.min_bound; g[e].maxBound = sai.max_bound; } @@ -1417,7 +1417,7 @@ void addSmallBlockLiteral(RoseBuildImpl &tbi, const ue2_literal &lit, g[v].literals.insert(lit_id); g[v].reports = reports; - RoseEdge e = add_edge(tbi.root, v, g); + RoseEdge e = add_edge(tbi.root, v, g).first; g[e].minBound = 0; g[e].maxBound = ROSE_BOUND_INF; g[v].min_offset = 1; diff --git a/src/rose/rose_build_convert.cpp b/src/rose/rose_build_convert.cpp index 9f1e563e..35ffc728 100644 --- a/src/rose/rose_build_convert.cpp +++ b/src/rose/rose_build_convert.cpp @@ -99,7 +99,7 @@ unique_ptr makeFloodProneSuffix(const ue2_literal &s, size_t len, NFAVertex u = h->start; for (auto it = s.begin() + s.length() - len; it != s.end(); ++it) { NFAVertex v = addHolderVertex(*it, *h); - NFAEdge e = add_edge(u, v, *h); + NFAEdge e = add_edge(u, v, *h).first; if (u == h->start) { (*h)[e].tops.insert(DEFAULT_TOP); } @@ -410,7 +410,7 @@ bool handleStartPrefixCliche(const NGHolder &h, RoseGraph &g, RoseVertex v, assert(g[e_old].maxBound >= bound_max); setEdgeBounds(g, e_old, bound_min, bound_max); } else { - RoseEdge e_new = add_edge(ar, v, g); + RoseEdge e_new = add_edge(ar, v, g).first; setEdgeBounds(g, e_new, bound_min, bound_max); to_delete->emplace_back(e_old); } @@ -603,7 +603,7 @@ bool handleMixedPrefixCliche(const NGHolder &h, RoseGraph &g, RoseVertex v, if (source(e_old, g) == ar) { setEdgeBounds(g, e_old, ri.repeatMin + width, ri.repeatMax + width); } else { - RoseEdge e_new = add_edge(ar, v, g); + RoseEdge e_new = add_edge(ar, v, g).first; setEdgeBounds(g, e_new, ri.repeatMin + width, ri.repeatMax + width); to_delete->emplace_back(e_old); } diff --git a/src/rose/rose_build_dedupe.cpp b/src/rose/rose_build_dedupe.cpp index 0a19480a..c7cc78ba 100644 --- a/src/rose/rose_build_dedupe.cpp +++ b/src/rose/rose_build_dedupe.cpp @@ -129,7 +129,7 @@ RoseDedupeAuxImpl::RoseDedupeAuxImpl(const RoseBuildImpl &build_in) // Several vertices may share a suffix, so we collect the set of // suffixes first to avoid repeating work. if (g[v].suffix) { - suffixes.insert(g[v].suffix); + suffixes.insert(suffix_id(g[v].suffix)); } } diff --git a/src/rose/rose_build_groups.cpp b/src/rose/rose_build_groups.cpp index 94fab54f..a3b5edd8 100644 --- a/src/rose/rose_build_groups.cpp +++ b/src/rose/rose_build_groups.cpp @@ -77,7 +77,7 @@ static bool eligibleForAlwaysOnGroup(const RoseBuildImpl &build, u32 id) { auto eligble = [&](RoseVertex v) { return build.isRootSuccessor(v) - && (!build.g[v].left || !isAnchored(build.g[v].left)); + && (!build.g[v].left || !isAnchored(left_id(build.g[v].left))); }; if (any_of_in(build.literal_info[id].vertices, eligble)) { @@ -208,7 +208,7 @@ void allocateGroupForEvent(RoseBuildImpl &build, u32 group_always_on, bool new_group = !groupCount[group_always_on]; for (RoseVertex v : info.vertices) { - if (build.g[v].left && !isAnchored(build.g[v].left)) { + if (build.g[v].left && !isAnchored(left_id(build.g[v].left))) { new_group = false; } } diff --git a/src/rose/rose_build_impl.h b/src/rose/rose_build_impl.h index d0ed84df..3aa530fb 100644 --- a/src/rose/rose_build_impl.h +++ b/src/rose/rose_build_impl.h @@ -80,7 +80,7 @@ class SmallWriteBuild; class SomSlotManager; struct suffix_id { - suffix_id(const RoseSuffixInfo &in) + explicit suffix_id(const RoseSuffixInfo &in) : g(in.graph.get()), c(in.castle.get()), d(in.rdfa.get()), h(in.haig.get()), t(in.tamarama.get()), dfa_min_width(in.dfa_min_width), @@ -181,7 +181,7 @@ depth findMaxWidth(const suffix_id &s, u32 top); /** \brief represents an engine to the left of a rose role */ struct left_id { - left_id(const LeftEngInfo &in) + explicit left_id(const LeftEngInfo &in) : g(in.graph.get()), c(in.castle.get()), d(in.dfa.get()), h(in.haig.get()), dfa_min_width(in.dfa_min_width), dfa_max_width(in.dfa_max_width) { diff --git a/src/rose/rose_build_instructions.h b/src/rose/rose_build_instructions.h index f18f4a47..01ff8d88 100644 --- a/src/rose/rose_build_instructions.h +++ b/src/rose/rose_build_instructions.h @@ -2319,7 +2319,7 @@ class RoseInstrSetCombination public: u32 ckey; - RoseInstrSetCombination(u32 ckey_in) : ckey(ckey_in) {} + explicit RoseInstrSetCombination(u32 ckey_in) : ckey(ckey_in) {} bool operator==(const RoseInstrSetCombination &ri) const { return ckey == ri.ckey; @@ -2361,7 +2361,7 @@ class RoseInstrSetExhaust public: u32 ekey; - RoseInstrSetExhaust(u32 ekey_in) : ekey(ekey_in) {} + explicit RoseInstrSetExhaust(u32 ekey_in) : ekey(ekey_in) {} bool operator==(const RoseInstrSetExhaust &ri) const { return ekey == ri.ekey; diff --git a/src/rose/rose_build_lookaround.cpp b/src/rose/rose_build_lookaround.cpp index cdc57acd..c8582dce 100644 --- a/src/rose/rose_build_lookaround.cpp +++ b/src/rose/rose_build_lookaround.cpp @@ -280,13 +280,13 @@ void findForwardReach(const RoseGraph &g, const RoseVertex v, return; } rose_look.emplace_back(map()); - getRoseForwardReach(g[t].left, g[e].rose_top, rose_look.back()); + getRoseForwardReach(left_id(g[t].left), g[e].rose_top, rose_look.back()); } if (g[v].suffix) { DEBUG_PRINTF("suffix engine\n"); rose_look.emplace_back(map()); - getSuffixForwardReach(g[v].suffix, g[v].suffix.top, rose_look.back()); + getSuffixForwardReach(suffix_id(g[v].suffix), g[v].suffix.top, rose_look.back()); } combineForwardMasks(rose_look, look); diff --git a/src/rose/rose_build_matchers.cpp b/src/rose/rose_build_matchers.cpp index db00e1ae..c1450b7a 100644 --- a/src/rose/rose_build_matchers.cpp +++ b/src/rose/rose_build_matchers.cpp @@ -477,10 +477,10 @@ bool isNoRunsVertex(const RoseBuildImpl &build, RoseVertex u) { DEBUG_PRINTF("u=%zu is not a root role\n", g[u].index); return false; } + auto edge_result = edge(build.root, u, g); + RoseEdge e = edge_result.first; - RoseEdge e = edge(build.root, u, g); - - if (!e) { + if (!edge_result.second) { DEBUG_PRINTF("u=%zu is not a root role\n", g[u].index); return false; } @@ -635,7 +635,7 @@ u64a literalMinReportOffset(const RoseBuildImpl &build, } if (g[v].suffix) { - depth suffix_width = findMinWidth(g[v].suffix, g[v].suffix.top); + depth suffix_width = findMinWidth(suffix_id(g[v].suffix), g[v].suffix.top); assert(suffix_width.is_reachable()); DEBUG_PRINTF("suffix with width %s\n", suffix_width.str().c_str()); min_offset = min(min_offset, vert_offset + suffix_width); @@ -886,7 +886,7 @@ void buildAccel(const RoseBuildImpl &build, bytecode_ptr buildHWLMMatcher(const RoseBuildImpl &build, const LitProto *litProto) { if (!litProto) { - return nullptr; + return bytecode_ptr(nullptr); } auto hwlm = hwlmBuild(*litProto->hwlmProto, build.cc, build.getInitialGroups()); diff --git a/src/rose/rose_build_merge.cpp b/src/rose/rose_build_merge.cpp index 66786f37..985315d9 100644 --- a/src/rose/rose_build_merge.cpp +++ b/src/rose/rose_build_merge.cpp @@ -145,7 +145,7 @@ namespace { /** Key used to group sets of leftfixes by the dedupeLeftfixes path. */ struct RoseGroup { RoseGroup(const RoseBuildImpl &build, RoseVertex v) - : left_hash(hashLeftfix(build.g[v].left)), + : left_hash(hashLeftfix(left_id(build.g[v].left))), lag(build.g[v].left.lag), eod_table(build.isInETable(v)) { const RoseGraph &g = build.g; assert(in_degree(v, g) == 1); @@ -262,8 +262,8 @@ bool dedupeLeftfixes(RoseBuildImpl &tbi) { // Scan the rest of the list for dupes. for (auto kt = std::next(jt); kt != jte; ++kt) { if (g[v].left == g[*kt].left - || !is_equal(g[v].left, g[v].left.leftfix_report, - g[*kt].left, g[*kt].left.leftfix_report)) { + || !is_equal(left_id(g[v].left), g[v].left.leftfix_report, + left_id(g[*kt].left), g[*kt].left.leftfix_report)) { continue; } @@ -547,8 +547,8 @@ bool checkPrefix(const rose_literal_id &ul, const u32 ulag, static bool hasSameEngineType(const RoseVertexProps &u_prop, const RoseVertexProps &v_prop) { - const left_id u_left = u_prop.left; - const left_id v_left = v_prop.left; + const left_id u_left = left_id(u_prop.left); + const left_id v_left = left_id(v_prop.left); return !u_left.haig() == !v_left.haig() && !u_left.dfa() == !v_left.dfa() @@ -1345,8 +1345,8 @@ insertion_ordered_map> get_eng_verts(const RoseGraph if (!left) { continue; } - assert(contains(all_reports(left), left.leftfix_report)); - eng_verts[left].emplace_back(v); + assert(contains(all_reports(left_id(left)), left.leftfix_report)); + eng_verts[left_id(left)].emplace_back(v); } return eng_verts; @@ -2033,7 +2033,7 @@ void mergeCastleLeftfixes(RoseBuildImpl &build) { continue; } - eng_verts[g[v].left].emplace_back(v); + eng_verts[left_id(g[v].left)].emplace_back(v); } map> by_reach; @@ -2198,7 +2198,7 @@ void mergeAcyclicSuffixes(RoseBuildImpl &tbi) { continue; } - suffixes.insert(g[v].suffix, v); + suffixes.insert(suffix_id(g[v].suffix), v); } deque suff_groups; @@ -2260,7 +2260,7 @@ void mergeSmallSuffixes(RoseBuildImpl &tbi) { continue; } - suffixes.insert(g[v].suffix, v); + suffixes.insert(suffix_id(g[v].suffix), v); } deque suff_groups; diff --git a/src/rose/rose_build_misc.cpp b/src/rose/rose_build_misc.cpp index de0ae706..d0c63027 100644 --- a/src/rose/rose_build_misc.cpp +++ b/src/rose/rose_build_misc.cpp @@ -513,8 +513,8 @@ bool roseHasTops(const RoseBuildImpl &build, RoseVertex v) { graph_tops.insert(g[e].rose_top); } } - - return is_subset_of(graph_tops, all_tops(g[v].left)); + + return is_subset_of(graph_tops, all_tops(left_id(g[v].left))); } #endif @@ -1006,14 +1006,14 @@ bool hasOrphanedTops(const RoseBuildImpl &build) { if (g[v].left) { if (!build.isRootSuccessor(v)) { // Tops for infixes come from the in-edges. - set &tops = leftfixes[g[v].left]; + set &tops = leftfixes[left_id(g[v].left)]; for (const auto &e : in_edges_range(v, g)) { tops.insert(g[e].rose_top); } } } if (g[v].suffix) { - suffixes[g[v].suffix].insert(g[v].suffix.top); + suffixes[suffix_id(g[v].suffix)].insert(g[v].suffix.top); } } diff --git a/src/rose/rose_build_program.cpp b/src/rose/rose_build_program.cpp index 861855b5..9459836d 100644 --- a/src/rose/rose_build_program.cpp +++ b/src/rose/rose_build_program.cpp @@ -1918,8 +1918,8 @@ void makeRoleSuffix(const RoseBuildImpl &build, if (!g[v].suffix) { return; } - assert(contains(suffixes, g[v].suffix)); - u32 queue = suffixes.at(g[v].suffix); + assert(contains(suffixes, suffix_id(g[v].suffix))); + u32 queue = suffixes.at(suffix_id(g[v].suffix)); u32 event; assert(contains(engine_info_by_queue, queue)); const auto eng_info = engine_info_by_queue.at(queue); @@ -1991,7 +1991,7 @@ void makeRoleInfixTriggers(const RoseBuildImpl &build, make_pair(g[v].index, g[e].rose_top)); assert(top < MQE_INVALID); } else if (!isMultiTopType(eng_info.type)) { - assert(num_tops(g[v].left) == 1); + assert(num_tops(left_id(g[v].left)) == 1); top = MQE_TOP; } else { top = MQE_TOP_FIRST + g[e].rose_top; @@ -2178,7 +2178,7 @@ void makeGroupSquashInstruction(const RoseBuildImpl &build, u32 lit_id, namespace { struct ProgKey { - ProgKey(const RoseProgram &p) : prog(&p) {} + explicit ProgKey(const RoseProgram &p) : prog(&p) {} bool operator==(const ProgKey &b) const { return RoseProgramEquivalence()(*prog, *b.prog); @@ -2200,7 +2200,7 @@ RoseProgram assembleProgramBlocks(vector &&blocks_in) { ue2_unordered_set seen; for (auto &block : blocks_in) { - if (contains(seen, block)) { + if (contains(seen, ProgKey(block))) { continue; } diff --git a/src/rose/rose_build_role_aliasing.cpp b/src/rose/rose_build_role_aliasing.cpp index d41b7bde..8826f23d 100644 --- a/src/rose/rose_build_role_aliasing.cpp +++ b/src/rose/rose_build_role_aliasing.cpp @@ -159,13 +159,13 @@ private: }; struct RoseAliasingInfo { - RoseAliasingInfo(const RoseBuildImpl &build) { + explicit RoseAliasingInfo(const RoseBuildImpl &build) { const auto &g = build.g; // Populate reverse leftfix map. for (auto v : vertices_range(g)) { if (g[v].left) { - rev_leftfix[g[v].left].insert(v); + rev_leftfix[left_id(g[v].left)].insert(v); } } @@ -259,8 +259,10 @@ bool samePredecessors(RoseVertex a, RoseVertex b, const RoseGraph &g) { } for (const auto &e_a : in_edges_range(a, g)) { - RoseEdge e = edge(source(e_a, g), b, g); - if (!e || g[e].rose_top != g[e_a].rose_top) { + auto edge_result = edge(source(e_a, g), b, g); + RoseEdge e = edge_result.first; + + if (!edge_result.second || g[e].rose_top != g[e_a].rose_top) { DEBUG_PRINTF("bad tops\n"); return false; } @@ -274,7 +276,9 @@ static bool hasCommonSuccWithBadBounds(RoseVertex a, RoseVertex b, const RoseGraph &g) { for (const auto &e_a : out_edges_range(a, g)) { - if (RoseEdge e = edge(b, target(e_a, g), g)) { + auto edge_result = edge(b, target(e_a, g), g); + RoseEdge e = edge_result.first; + if (edge_result.second) { if (g[e_a].maxBound < g[e].minBound || g[e].maxBound < g[e_a].minBound) { return true; @@ -293,7 +297,9 @@ static bool hasCommonPredWithBadBounds(RoseVertex a, RoseVertex b, const RoseGraph &g) { for (const auto &e_a : in_edges_range(a, g)) { - if (RoseEdge e = edge(source(e_a, g), b, g)) { + auto edge_result = edge(source(e_a, g), b, g); + RoseEdge e = edge_result.first; + if (edge_result.second) { if (g[e_a].maxBound < g[e].minBound || g[e].maxBound < g[e_a].minBound) { return true; @@ -700,7 +706,9 @@ bool hasCommonPredWithDiffRoses(RoseVertex a, RoseVertex b, const bool equal_roses = hasEqualLeftfixes(a, b, g); for (const auto &e_a : in_edges_range(a, g)) { - if (RoseEdge e = edge(source(e_a, g), b, g)) { + auto edge_result = edge(source(e_a, g), b, g); + RoseEdge e = edge_result.first; + if (edge_result.second) { DEBUG_PRINTF("common pred, e_r=%d r_t %u,%u\n", (int)equal_roses, g[e].rose_top, g[e_a].rose_top); if (!equal_roses) { @@ -907,9 +915,9 @@ bool mergeSameCastle(RoseBuildImpl &build, RoseVertex a, RoseVertex b, } } - assert(contains(rai.rev_leftfix[b_left], b)); - rai.rev_leftfix[b_left].erase(b); - rai.rev_leftfix[a_left].insert(b); + assert(contains(rai.rev_leftfix[left_id(b_left)], b)); + rai.rev_leftfix[left_id(b_left)].erase(b); + rai.rev_leftfix[left_id(a_left)].insert(b); a_left.leftfix_report = new_report; b_left.leftfix_report = new_report; @@ -918,7 +926,7 @@ bool mergeSameCastle(RoseBuildImpl &build, RoseVertex a, RoseVertex b, updateEdgeTops(g, a, a_top_map); updateEdgeTops(g, b, b_top_map); - pruneUnusedTops(castle, g, rai.rev_leftfix[a_left]); + pruneUnusedTops(castle, g, rai.rev_leftfix[left_id(a_left)]); return true; } @@ -1026,9 +1034,9 @@ bool attemptRoseCastleMerge(RoseBuildImpl &build, bool preds_same, RoseVertex a, b_left.castle = new_castle; assert(a_left == b_left); - rai.rev_leftfix[a_left].insert(a); - rai.rev_leftfix[a_left].insert(b); - pruneUnusedTops(*new_castle, g, rai.rev_leftfix[a_left]); + rai.rev_leftfix[left_id(a_left)].insert(a); + rai.rev_leftfix[left_id(a_left)].insert(b); + pruneUnusedTops(*new_castle, g, rai.rev_leftfix[left_id(a_left)]); return true; } @@ -1079,7 +1087,9 @@ bool attemptRoseCastleMerge(RoseBuildImpl &build, bool preds_same, RoseVertex a, // We should be protected from merging common preds with tops leading // to completely different repeats by earlier checks, but just in // case... - if (RoseEdge a_edge = edge(source(e, g), a, g)) { + auto edge_result = edge(source(e, g), a, g); + RoseEdge a_edge = edge_result.first; + if (edge_result.second) { u32 a_top = g[a_edge].rose_top; const PureRepeat &a_pr = m_castle->repeats[a_top]; // new report if (pr != a_pr) { @@ -1112,9 +1122,9 @@ bool attemptRoseCastleMerge(RoseBuildImpl &build, bool preds_same, RoseVertex a, b_left.leftfix_report = new_report; assert(a_left == b_left); - rai.rev_leftfix[a_left].insert(a); - rai.rev_leftfix[a_left].insert(b); - pruneUnusedTops(*m_castle, g, rai.rev_leftfix[a_left]); + rai.rev_leftfix[left_id(a_left)].insert(a); + rai.rev_leftfix[left_id(a_left)].insert(b); + pruneUnusedTops(*m_castle, g, rai.rev_leftfix[left_id(a_left)]); return true; } @@ -1237,9 +1247,9 @@ bool attemptRoseGraphMerge(RoseBuildImpl &build, bool preds_same, RoseVertex a, a_left.graph = new_graph; b_left.graph = new_graph; - rai.rev_leftfix[a_left].insert(a); - rai.rev_leftfix[a_left].insert(b); - pruneUnusedTops(*new_graph, g, rai.rev_leftfix[a_left]); + rai.rev_leftfix[left_id(a_left)].insert(a); + rai.rev_leftfix[left_id(a_left)].insert(b); + pruneUnusedTops(*new_graph, g, rai.rev_leftfix[left_id(a_left)]); return true; } @@ -1258,7 +1268,7 @@ bool attemptRoseGraphMerge(RoseBuildImpl &build, bool preds_same, RoseVertex a, DEBUG_PRINTF("attempting merge of roses on vertices %zu and %zu\n", g[a].index, g[b].index); - set &b_verts = rai.rev_leftfix[b_left]; + set &b_verts = rai.rev_leftfix[left_id(b_left)]; set aa; aa.insert(a); @@ -1280,7 +1290,7 @@ bool attemptRoseGraphMerge(RoseBuildImpl &build, bool preds_same, RoseVertex a, ReportID new_report = build.getNewNfaReport(); duplicateReport(*b_h, b_left.leftfix_report, new_report); b_left.leftfix_report = new_report; - pruneReportIfUnused(build, b_h, rai.rev_leftfix[b_left_id], b_oldreport); + pruneReportIfUnused(build, b_h, rai.rev_leftfix[left_id(b_left_id)], b_oldreport); NGHolder victim; cloneHolder(victim, *a_h); @@ -1316,16 +1326,16 @@ bool attemptRoseGraphMerge(RoseBuildImpl &build, bool preds_same, RoseVertex a, a_left.graph = b_h; a_left.leftfix_report = new_report; - assert(contains(rai.rev_leftfix[a_left_id], a)); - assert(contains(rai.rev_leftfix[b_left_id], b)); - rai.rev_leftfix[a_left_id].erase(a); - rai.rev_leftfix[b_left_id].insert(a); + assert(contains(rai.rev_leftfix[left_id(a_left_id)], a)); + assert(contains(rai.rev_leftfix[left_id(b_left_id)], b)); + rai.rev_leftfix[left_id(a_left_id)].erase(a); + rai.rev_leftfix[left_id(b_left_id)].insert(a); - pruneUnusedTops(*a_h, g, rai.rev_leftfix[a_left_id]); - pruneUnusedTops(*b_h, g, rai.rev_leftfix[b_left_id]); + pruneUnusedTops(*a_h, g, rai.rev_leftfix[left_id(a_left_id)]); + pruneUnusedTops(*b_h, g, rai.rev_leftfix[left_id(b_left_id)]); // Prune A's report from its old prefix if it was only used by A. - pruneReportIfUnused(build, a_h, rai.rev_leftfix[a_left_id], a_oldreport); + pruneReportIfUnused(build, a_h, rai.rev_leftfix[left_id(a_left_id)], a_oldreport); reduceImplementableGraph(*b_h, SOM_NONE, nullptr, build.cc); @@ -2136,7 +2146,9 @@ void mergeDupeLeaves(RoseBuildImpl &build) { for (const auto &e : in_edges_range(v, g)) { RoseVertex u = source(e, g); DEBUG_PRINTF("u index=%zu\n", g[u].index); - if (RoseEdge et = edge(u, t, g)) { + auto edge_result = edge(u, t, g); + RoseEdge et = edge_result.first; + if (edge_result.second) { if (g[et].minBound <= g[e].minBound && g[et].maxBound >= g[e].maxBound) { DEBUG_PRINTF("remove more constrained edge\n"); diff --git a/src/rose/rose_build_width.cpp b/src/rose/rose_build_width.cpp index 327911ea..20c78181 100644 --- a/src/rose/rose_build_width.cpp +++ b/src/rose/rose_build_width.cpp @@ -95,7 +95,7 @@ u32 findMinWidth(const RoseBuildImpl &tbi, enum rose_literal_table table) { } if (g[v].suffix) { - depth suffix_width = findMinWidth(g[v].suffix, g[v].suffix.top); + depth suffix_width = findMinWidth(suffix_id(g[v].suffix), g[v].suffix.top); assert(suffix_width.is_reachable()); DEBUG_PRINTF("%zu has suffix with top %u (width %s), can fire " "report at %u\n", @@ -145,10 +145,10 @@ u32 findMaxBAWidth(const RoseBuildImpl &tbi) { u64a w = g[v].max_offset; if (g[v].suffix) { - if (has_non_eod_accepts(g[v].suffix)) { + if (has_non_eod_accepts(suffix_id(g[v].suffix))) { return ROSE_BOUND_INF; } - depth suffix_width = findMaxWidth(g[v].suffix, g[v].suffix.top); + depth suffix_width = findMaxWidth(suffix_id(g[v].suffix), g[v].suffix.top); DEBUG_PRINTF("suffix max width for top %u is %s\n", g[v].suffix.top, suffix_width.str().c_str()); assert(suffix_width.is_reachable()); @@ -222,11 +222,11 @@ u32 findMaxBAWidth(const RoseBuildImpl &tbi, enum rose_literal_table table) { accept_eod node */ if (g[v].suffix) { - if (has_non_eod_accepts(g[v].suffix)) { + if (has_non_eod_accepts(suffix_id(g[v].suffix))) { DEBUG_PRINTF("has accept\n"); return ROSE_BOUND_INF; } - depth suffix_width = findMaxWidth(g[v].suffix); + depth suffix_width = findMaxWidth(suffix_id(g[v].suffix)); DEBUG_PRINTF("suffix max width %s\n", suffix_width.str().c_str()); assert(suffix_width.is_reachable()); if (!suffix_width.is_finite()) { diff --git a/src/smallwrite/smallwrite_build.cpp b/src/smallwrite/smallwrite_build.cpp index e1d2f1f3..c72bbef2 100644 --- a/src/smallwrite/smallwrite_build.cpp +++ b/src/smallwrite/smallwrite_build.cpp @@ -789,7 +789,7 @@ bytecode_ptr getDfa(raw_dfa &rdfa, const CompileContext &cc, bool only_accel_init = !has_non_literals; bool trust_daddy_states = !has_non_literals; - bytecode_ptr dfa = nullptr; + bytecode_ptr dfa = bytecode_ptr(nullptr); if (cc.grey.allowSmallWriteSheng) { dfa = shengCompile(rdfa, cc, rm, only_accel_init, &accel_states); if (!dfa) { @@ -819,27 +819,27 @@ bytecode_ptr prepEngine(raw_dfa &rdfa, u32 roseQuality, auto nfa = getDfa(rdfa, cc, rm, has_non_literals, accel_states); if (!nfa) { DEBUG_PRINTF("DFA compile failed for smallwrite NFA\n"); - return nullptr; + return bytecode_ptr(nullptr); } if (is_slow(rdfa, accel_states, roseQuality)) { DEBUG_PRINTF("is slow\n"); *small_region = cc.grey.smallWriteLargestBufferBad; if (*small_region <= *start_offset) { - return nullptr; + return bytecode_ptr(nullptr); } if (clear_deeper_reports(rdfa, *small_region - *start_offset)) { minimize_hopcroft(rdfa, cc.grey); if (rdfa.start_anchored == DEAD_STATE) { DEBUG_PRINTF("all patterns pruned out\n"); - return nullptr; + return bytecode_ptr(nullptr); } nfa = getDfa(rdfa, cc, rm, has_non_literals, accel_states); if (!nfa) { DEBUG_PRINTF("DFA compile failed for smallwrite NFA\n"); assert(0); /* able to build orig dfa but not the trimmed? */ - return nullptr; + return bytecode_ptr(nullptr); } } } else { @@ -850,7 +850,7 @@ bytecode_ptr prepEngine(raw_dfa &rdfa, u32 roseQuality, if (nfa->length > cc.grey.limitSmallWriteOutfixSize || nfa->length > cc.grey.limitDFASize) { DEBUG_PRINTF("smallwrite outfix size too large\n"); - return nullptr; /* this is just a soft failure - don't build smwr */ + return bytecode_ptr(nullptr); /* this is just a soft failure - don't build smwr */ } nfa->queueIndex = 0; /* dummy, small write API does not use queue */ @@ -870,12 +870,12 @@ bytecode_ptr SmallWriteBuildImpl::build(u32 roseQuality) { if (dfas.empty() && !has_literals) { DEBUG_PRINTF("no smallwrite engine\n"); poisoned = true; - return nullptr; + return bytecode_ptr(nullptr); } if (poisoned) { DEBUG_PRINTF("some pattern could not be made into a smallwrite dfa\n"); - return nullptr; + return bytecode_ptr(nullptr); } // We happen to know that if the rose is high quality, we're going to limit @@ -903,12 +903,12 @@ bytecode_ptr SmallWriteBuildImpl::build(u32 roseQuality) { if (dfas.empty()) { DEBUG_PRINTF("no dfa, pruned everything away\n"); - return nullptr; + return bytecode_ptr(nullptr); } if (!mergeDfas(dfas, rm, cc)) { dfas.clear(); - return nullptr; + return bytecode_ptr(nullptr); } assert(dfas.size() == 1); @@ -925,7 +925,7 @@ bytecode_ptr SmallWriteBuildImpl::build(u32 roseQuality) { DEBUG_PRINTF("some smallwrite outfix could not be prepped\n"); /* just skip the smallwrite optimization */ poisoned = true; - return nullptr; + return bytecode_ptr(nullptr); } u32 size = sizeof(SmallWriteEngine) + nfa->length; diff --git a/src/util/alloc.h b/src/util/alloc.h index 49b4a824..c5dea4c5 100644 --- a/src/util/alloc.h +++ b/src/util/alloc.h @@ -68,7 +68,7 @@ public: AlignedAllocator() noexcept {} template - AlignedAllocator(const AlignedAllocator &) noexcept {} + explicit AlignedAllocator(const AlignedAllocator &) noexcept {} template struct rebind { using other = AlignedAllocator; diff --git a/src/util/bitfield.h b/src/util/bitfield.h index 4a3fbd6e..1f29dfce 100644 --- a/src/util/bitfield.h +++ b/src/util/bitfield.h @@ -64,7 +64,7 @@ public: assert(none()); } - bitfield(const boost::dynamic_bitset<> &a) : bits{{0}} { + explicit bitfield(const boost::dynamic_bitset<> &a) : bits{{0}} { assert(a.size() == requested_size); assert(none()); for (auto i = a.find_first(); i != a.npos; i = a.find_next(i)) { diff --git a/src/util/bytecode_ptr.h b/src/util/bytecode_ptr.h index f1f2e5ef..ab2b9171 100644 --- a/src/util/bytecode_ptr.h +++ b/src/util/bytecode_ptr.h @@ -66,7 +66,7 @@ public: } } - bytecode_ptr(std::nullptr_t) {} + explicit bytecode_ptr(std::nullptr_t) {} T *get() const { return ptr.get(); } diff --git a/src/util/flat_containers.h b/src/util/flat_containers.h index 41452eb4..9ba7e232 100644 --- a/src/util/flat_containers.h +++ b/src/util/flat_containers.h @@ -195,10 +195,10 @@ public: // Constructors. - flat_set(const Compare &compare = Compare(), + explicit flat_set(const Compare &compare = Compare(), const Allocator &alloc = Allocator()) : base_type(compare, alloc) {} - + template flat_set(InputIt first, InputIt last, const Compare &compare = Compare(), const Allocator &alloc = Allocator()) @@ -425,7 +425,7 @@ public: // Constructors. - flat_map(const Compare &compare = Compare(), + explicit flat_map(const Compare &compare = Compare(), const Allocator &alloc = Allocator()) : base_type(compare, alloc) {} @@ -615,7 +615,7 @@ public: friend class flat_map; protected: Compare c; - value_compare(Compare c_in) : c(c_in) {} + explicit value_compare(Compare c_in) : c(c_in) {} public: bool operator()(const value_type &lhs, const value_type &rhs) { return c(lhs.first, rhs.first); diff --git a/src/util/hash_dynamic_bitset.h b/src/util/hash_dynamic_bitset.h index 65bc29c3..fecb0c68 100644 --- a/src/util/hash_dynamic_bitset.h +++ b/src/util/hash_dynamic_bitset.h @@ -56,7 +56,7 @@ struct hash_output_it { using reference = void; using iterator_category = std::output_iterator_tag; - hash_output_it(size_t *hash_out = nullptr) : out(hash_out) {} + explicit hash_output_it(size_t *hash_out = nullptr) : out(hash_out) {} hash_output_it &operator++() { return *this; } @@ -65,7 +65,7 @@ struct hash_output_it { } struct deref_proxy { - deref_proxy(size_t *hash_out) : out(hash_out) {} + explicit deref_proxy(size_t *hash_out) : out(hash_out) {} template void operator=(const T &val) const { @@ -76,7 +76,7 @@ struct hash_output_it { size_t *out; /* output location of the owning iterator */ }; - deref_proxy operator*() { return {out}; } + deref_proxy operator*() { return deref_proxy(out); } private: size_t *out; /* location to output the hashes to */ diff --git a/src/util/ue2_graph.h b/src/util/ue2_graph.h index aa9718d7..d615d36a 100644 --- a/src/util/ue2_graph.h +++ b/src/util/ue2_graph.h @@ -210,7 +210,7 @@ public: * edge() and add_edge(). As we have null_edges and we always allow * parallel edges, the bool component of the return from these functions is * not required. */ - edge_descriptor(const std::pair &tup) + explicit edge_descriptor(const std::pair &tup) : p(tup.first.p), serial(tup.first.serial) { assert(tup.second == (bool)tup.first); } @@ -432,7 +432,7 @@ public: vertex_descriptor> { using super = typename adjacency_iterator::iterator_adaptor_; public: - adjacency_iterator(out_edge_iterator a) : super(std::move(a)) { } + explicit adjacency_iterator(out_edge_iterator a) : super(std::move(a)) { } adjacency_iterator() { } vertex_descriptor dereference() const { @@ -448,7 +448,7 @@ public: vertex_descriptor> { using super = typename inv_adjacency_iterator::iterator_adaptor_; public: - inv_adjacency_iterator(in_edge_iterator a) : super(std::move(a)) { } + explicit inv_adjacency_iterator(in_edge_iterator a) : super(std::move(a)) { } inv_adjacency_iterator() { } vertex_descriptor dereference() const { @@ -791,7 +791,7 @@ public: typedef typename boost::lvalue_property_map_tag category; - prop_map(value_type P_of::*m_in) : member(m_in) { } + explicit prop_map(value_type P_of::*m_in) : member(m_in) { } reference operator[](key_type k) const { return k.raw()->props.*member; diff --git a/unit/internal/fdr.cpp b/unit/internal/fdr.cpp index 87ab0974..46f19be7 100644 --- a/unit/internal/fdr.cpp +++ b/unit/internal/fdr.cpp @@ -61,10 +61,10 @@ using namespace ue2; #define CHECK_WITH_TEDDY_OK_TO_FAIL(fdr, hint) \ { \ auto descr = getTeddyDescription(hint); \ - if (descr && fdr == nullptr) { \ + if (descr && fdr.get() == nullptr) { \ return; /* cannot build Teddy for this set of literals */ \ } else { \ - ASSERT_TRUE(fdr != nullptr); \ + ASSERT_TRUE(fdr.get() != nullptr); \ } \ } #endif @@ -145,7 +145,7 @@ bytecode_ptr buildFDREngineHinted(std::vector &lits, auto proto = fdrBuildProtoHinted(HWLM_ENGINE_FDR, lits, make_small, hint, target, grey); if (!proto) { - return nullptr; + return ue2::bytecode_ptr(nullptr); } return fdrBuildTable(*proto, grey); } @@ -156,7 +156,7 @@ bytecode_ptr buildFDREngine(std::vector &lits, const Grey &grey) { auto proto = fdrBuildProto(HWLM_ENGINE_FDR, lits, make_small, target, grey); if (!proto) { - return nullptr; + return bytecode_ptr(nullptr); } return fdrBuildTable(*proto, grey); } @@ -421,7 +421,7 @@ TEST_P(FDRp, moveByteStream) { size_t size = fdrSize(fdrTable0.get()); auto fdrTable = make_bytecode_ptr(size, 64); - EXPECT_NE(nullptr, fdrTable); + EXPECT_NE(nullptr, fdrTable.get()); memcpy(fdrTable.get(), fdrTable0.get(), size); @@ -706,7 +706,7 @@ TEST(FDR, FDRTermS) { lits.push_back(hwlmLiteral("ff", 0, 1)); auto fdr = buildFDREngine(lits, false, get_current_target(), Grey()); - ASSERT_TRUE(fdr != nullptr); + ASSERT_TRUE(fdr.get() != nullptr); // check matches @@ -729,7 +729,7 @@ TEST(FDR, FDRTermB) { lits.push_back(hwlmLiteral("ff", 0, 1)); auto fdr = buildFDREngine(lits, false, get_current_target(), Grey()); - ASSERT_TRUE(fdr != nullptr); + ASSERT_TRUE(fdr.get() != nullptr); // check matches struct hs_scratch scratch; diff --git a/unit/internal/fdr_flood.cpp b/unit/internal/fdr_flood.cpp index fd8a9734..a3b0cc96 100644 --- a/unit/internal/fdr_flood.cpp +++ b/unit/internal/fdr_flood.cpp @@ -55,10 +55,10 @@ using namespace ue2; #define CHECK_WITH_TEDDY_OK_TO_FAIL(fdr, hint) \ { \ auto descr = getTeddyDescription(hint); \ - if (descr && fdr != nullptr) { \ + if (descr && fdr.get() != nullptr) { \ return; \ } else { \ - ASSERT_TRUE(fdr != nullptr); \ + ASSERT_TRUE(fdr.get() != nullptr); \ } \ } #endif diff --git a/unit/internal/lbr.cpp b/unit/internal/lbr.cpp index 2c585ae5..0b782569 100644 --- a/unit/internal/lbr.cpp +++ b/unit/internal/lbr.cpp @@ -98,7 +98,7 @@ protected: ParsedExpression parsed(0, pattern.c_str(), flags, 0); auto built_expr = buildGraph(rm, cc, parsed); const auto &g = built_expr.g; - ASSERT_TRUE(g != nullptr); + ASSERT_TRUE(static_cast(g)); clearReports(*g); rm.setProgramOffset(0, MATCH_REPORT); @@ -106,7 +106,7 @@ protected: /* LBR triggered by dot */ vector> triggers = {{CharReach::dot()}}; nfa = constructLBR(*g, triggers, cc, rm); - ASSERT_TRUE(nfa != nullptr); + ASSERT_TRUE(static_cast(nfa)); full_state = make_bytecode_ptr(nfa->scratchStateSize, 64); stream_state = make_bytecode_ptr(nfa->streamStateSize); diff --git a/unit/internal/limex_nfa.cpp b/unit/internal/limex_nfa.cpp index 28433c96..80b9159b 100644 --- a/unit/internal/limex_nfa.cpp +++ b/unit/internal/limex_nfa.cpp @@ -87,7 +87,7 @@ protected: nfa = constructNFA(*g, &rm, fixed_depth_tops, triggers, compress_state, fast_nfa, type, cc); - ASSERT_TRUE(nfa != nullptr); + ASSERT_TRUE(nfa.get() != nullptr); full_state = make_bytecode_ptr(nfa->scratchStateSize, 64); stream_state = make_bytecode_ptr(nfa->streamStateSize); @@ -134,7 +134,7 @@ INSTANTIATE_TEST_CASE_P( Range((int)LIMEX_NFA_32, (int)LIMEX_NFA_512)); TEST_P(LimExModelTest, StateSize) { - ASSERT_TRUE(nfa != nullptr); + ASSERT_TRUE(nfa.get() != nullptr); hs_platform_info plat; hs_error_t err = hs_populate_platform(&plat); @@ -150,7 +150,7 @@ TEST_P(LimExModelTest, StateSize) { } TEST_P(LimExModelTest, QueueExec) { - ASSERT_TRUE(nfa != nullptr); + ASSERT_TRUE(nfa.get() != nullptr); initQueue(); nfaQueueInitState(nfa.get(), &q); @@ -165,7 +165,7 @@ TEST_P(LimExModelTest, QueueExec) { } TEST_P(LimExModelTest, CompressExpand) { - ASSERT_TRUE(nfa != nullptr); + ASSERT_TRUE(nfa.get() != nullptr); u32 real_state_size = nfa->scratchStateSize; /* Only look at 8 bytes for limex 64 (rather than the padding) */ @@ -197,7 +197,7 @@ TEST_P(LimExModelTest, CompressExpand) { } TEST_P(LimExModelTest, InitCompressedState0) { - ASSERT_TRUE(nfa != nullptr); + ASSERT_TRUE(nfa.get() != nullptr); // 64-bit NFAs assume during compression that they have >= 5 bytes of // compressed NFA state, which isn't true for our 8-state test pattern. We @@ -212,7 +212,7 @@ TEST_P(LimExModelTest, InitCompressedState0) { } TEST_P(LimExModelTest, QueueExecToMatch) { - ASSERT_TRUE(nfa != nullptr); + ASSERT_TRUE(nfa.get() != nullptr); initQueue(); nfaQueueInitState(nfa.get(), &q); @@ -256,7 +256,7 @@ TEST_P(LimExModelTest, QueueExecToMatch) { } TEST_P(LimExModelTest, QueueExecRose) { - ASSERT_TRUE(nfa != nullptr); + ASSERT_TRUE(nfa.get() != nullptr); initQueue(); // For rose, there's no callback or context. @@ -277,7 +277,7 @@ TEST_P(LimExModelTest, QueueExecRose) { } TEST_P(LimExModelTest, CheckFinalState) { - ASSERT_TRUE(nfa != nullptr); + ASSERT_TRUE(nfa.get() != nullptr); initQueue(); nfaQueueInitState(nfa.get(), &q); @@ -321,7 +321,7 @@ protected: } nfa = constructReversedNFA(g_rev, type, cc); - ASSERT_TRUE(nfa != nullptr); + ASSERT_TRUE(nfa.get() != nullptr); } // NFA type (enum NFAEngineType) @@ -338,7 +338,7 @@ INSTANTIATE_TEST_CASE_P(LimExReverse, LimExReverseTest, Range((int)LIMEX_NFA_32, (int)LIMEX_NFA_512)); TEST_P(LimExReverseTest, BlockExecReverse) { - ASSERT_TRUE(nfa != nullptr); + ASSERT_TRUE(nfa.get() != nullptr); u64a offset = 0; const u8 *buf = (const u8 *)SCAN_DATA.c_str(); @@ -381,7 +381,7 @@ protected: nfa = constructNFA(*g, &rm, fixed_depth_tops, triggers, compress_state, fast_nfa, type, cc); - ASSERT_TRUE(nfa != nullptr); + ASSERT_TRUE(nfa.get() != nullptr); full_state = make_bytecode_ptr(nfa->scratchStateSize, 64); stream_state = make_bytecode_ptr(nfa->streamStateSize); @@ -427,7 +427,7 @@ INSTANTIATE_TEST_CASE_P(LimExZombie, LimExZombieTest, Range((int)LIMEX_NFA_32, (int)LIMEX_NFA_512)); TEST_P(LimExZombieTest, GetZombieStatus) { - ASSERT_TRUE(nfa != nullptr); + ASSERT_TRUE(nfa.get() != nullptr); ASSERT_TRUE(nfa->flags & NFA_ZOMBIE); initQueue(); diff --git a/unit/internal/noodle.cpp b/unit/internal/noodle.cpp index 16c257b8..c1723744 100644 --- a/unit/internal/noodle.cpp +++ b/unit/internal/noodle.cpp @@ -70,7 +70,7 @@ void noodleMatch(const u8 *data, size_t data_len, const char *lit_str, u32 id = 1000; hwlmLiteral lit(std::string(lit_str, lit_len), nocase, id); auto n = noodBuildTable(lit); - ASSERT_TRUE(n != nullptr); + ASSERT_TRUE(static_cast(n)); hwlm_error_t rv; struct hs_scratch scratch; diff --git a/util/ng_corpus_editor.cpp b/util/ng_corpus_editor.cpp index c1149216..c3bfd75f 100644 --- a/util/ng_corpus_editor.cpp +++ b/util/ng_corpus_editor.cpp @@ -66,7 +66,7 @@ size_t choosePosition(const SeqT &corpus, CorpusProperties &props) { class CorpusEditor { public: - CorpusEditor(CorpusProperties &p) : props(p) {} + explicit CorpusEditor(CorpusProperties &p) : props(p) {} // Apply edits to a corpus void applyEdits(string &corpus); @@ -171,7 +171,7 @@ u8 CorpusEditor::chooseByte() { class CorpusEditorUtf8 { public: - CorpusEditorUtf8(CorpusProperties &p) : props(p) {} + explicit CorpusEditorUtf8(CorpusProperties &p) : props(p) {} // Apply edits to a corpus. void applyEdits(vector &corpus);