nfa: switch to using bytecode_ptr<NFA>

This commit is contained in:
Justin Viiret 2017-04-03 17:21:37 +10:00 committed by Matthew Barr
parent 905ac78061
commit a197074c5d
27 changed files with 251 additions and 241 deletions

View File

@ -26,9 +26,11 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
/**
* \file
* \brief Castle: multi-tenant repeat engine, compiler code.
*/
#include "castlecompile.h"
#include "castle_internal.h"
@ -439,7 +441,7 @@ void buildSubcastles(const CastleProto &proto, vector<SubCastle> &subs,
}
}
aligned_unique_ptr<NFA>
bytecode_ptr<NFA>
buildCastle(const CastleProto &proto,
const map<u32, vector<vector<CharReach>>> &triggers,
const CompileContext &cc, const ReportManager &rm) {
@ -577,7 +579,7 @@ buildCastle(const CastleProto &proto,
total_size = ROUNDUP_N(total_size, alignof(mmbit_sparse_iter));
total_size += byte_length(stale_iter); // stale sparse iter
aligned_unique_ptr<NFA> nfa = aligned_zmalloc_unique<NFA>(total_size);
auto nfa = make_bytecode_ptr<NFA>(total_size);
nfa->type = verify_u8(CASTLE_NFA);
nfa->length = verify_u32(total_size);
nfa->nPositions = verify_u32(subs.size());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2016, Intel Corporation
* Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -26,7 +26,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
/**
* \file
* \brief Castle: multi-tenant repeat engine, compiler code.
*/
@ -36,7 +37,7 @@
#include "nfa_kind.h"
#include "ue2common.h"
#include "nfagraph/ng_repeat.h"
#include "util/alloc.h"
#include "util/bytecode_ptr.h"
#include "util/depth.h"
#include "util/ue2_containers.h"
@ -120,7 +121,7 @@ void remapCastleTops(CastleProto &proto, std::map<u32, u32> &top_map);
* NOTE: Tops must be contiguous, i.e. \ref remapCastleTops must have been run
* first.
*/
ue2::aligned_unique_ptr<NFA>
bytecode_ptr<NFA>
buildCastle(const CastleProto &proto,
const std::map<u32, std::vector<std::vector<CharReach>>> &triggers,
const CompileContext &cc, const ReportManager &rm);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2016, Intel Corporation
* Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -35,7 +35,6 @@
#include "grey.h"
#include "mcclellancompile.h"
#include "nfa_internal.h"
#include "util/alloc.h"
#include "util/compile_context.h"
#include "util/container.h"
#include "util/graph_range.h"
@ -1036,9 +1035,9 @@ void update_accel_prog_offset(const gough_build_strat &gbs,
}
}
aligned_unique_ptr<NFA> goughCompile(raw_som_dfa &raw, u8 somPrecision,
const CompileContext &cc,
const ReportManager &rm) {
bytecode_ptr<NFA> goughCompile(raw_som_dfa &raw, u8 somPrecision,
const CompileContext &cc,
const ReportManager &rm) {
assert(somPrecision == 2 || somPrecision == 4 || somPrecision == 8
|| !cc.streaming);
@ -1071,7 +1070,7 @@ aligned_unique_ptr<NFA> goughCompile(raw_som_dfa &raw, u8 somPrecision,
map<dstate_id_t, gough_accel_state_info> accel_allowed;
find_allowed_accel_states(*cfg, blocks, &accel_allowed);
gough_build_strat gbs(raw, *cfg, rm, accel_allowed);
aligned_unique_ptr<NFA> basic_dfa = mcclellanCompile_i(raw, gbs, cc);
auto basic_dfa = mcclellanCompile_i(raw, gbs, cc);
assert(basic_dfa);
if (!basic_dfa) {
return nullptr;
@ -1117,7 +1116,7 @@ aligned_unique_ptr<NFA> goughCompile(raw_som_dfa &raw, u8 somPrecision,
gi.stream_som_loc_width = somPrecision;
u32 gough_size = ROUNDUP_N(curr_offset, 16);
aligned_unique_ptr<NFA> gough_dfa = aligned_zmalloc_unique<NFA>(gough_size);
auto gough_dfa = make_bytecode_ptr<NFA>(gough_size);
memcpy(gough_dfa.get(), basic_dfa.get(), basic_dfa->length);
memcpy((char *)gough_dfa.get() + haig_offset, &gi, sizeof(gi));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -32,7 +32,7 @@
#include "mcclellancompile.h"
#include "nfa_kind.h"
#include "ue2common.h"
#include "util/alloc.h"
#include "util/bytecode_ptr.h"
#include "util/ue2_containers.h"
#include "util/order_check.h"
@ -88,10 +88,10 @@ struct raw_som_dfa : public raw_dfa {
* som */
};
aligned_unique_ptr<NFA> goughCompile(raw_som_dfa &raw, u8 somPrecision,
const CompileContext &cc,
const ReportManager &rm);
bytecode_ptr<NFA> goughCompile(raw_som_dfa &raw, u8 somPrecision,
const CompileContext &cc,
const ReportManager &rm);
} // namespace ue2
#endif
#endif // GOUGHCOMPILE_H

View File

@ -26,9 +26,11 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
/**
* \file
* \brief Main NFA build code.
*/
#include "limex_compile.h"
#include "accel.h"
@ -2193,7 +2195,7 @@ struct Factory {
}
static
aligned_unique_ptr<NFA> generateNfa(const build_info &args) {
bytecode_ptr<NFA> generateNfa(const build_info &args) {
if (args.num_states > NFATraits<dtype>::maxStates) {
return nullptr;
}
@ -2295,7 +2297,7 @@ struct Factory {
size_t nfaSize = sizeof(NFA) + offset;
DEBUG_PRINTF("nfa size %zu\n", nfaSize);
auto nfa = aligned_zmalloc_unique<NFA>(nfaSize);
auto nfa = make_bytecode_ptr<NFA>(nfaSize);
assert(nfa); // otherwise we would have thrown std::bad_alloc
implNFA_t *limex = (implNFA_t *)getMutableImplNfa(nfa.get());
@ -2381,7 +2383,7 @@ struct Factory {
template<NFAEngineType dtype>
struct generateNfa {
static aligned_unique_ptr<NFA> call(const build_info &args) {
static bytecode_ptr<NFA> call(const build_info &args) {
return Factory<dtype>::generateNfa(args);
}
};
@ -2478,17 +2480,15 @@ u32 max_state(const ue2::unordered_map<NFAVertex, u32> &state_ids) {
return rv;
}
aligned_unique_ptr<NFA> generate(NGHolder &h,
const ue2::unordered_map<NFAVertex, u32> &states,
const vector<BoundedRepeatData> &repeats,
const map<NFAVertex, NFAStateSet> &reportSquashMap,
const map<NFAVertex, NFAStateSet> &squashMap,
const map<u32, set<NFAVertex>> &tops,
const set<NFAVertex> &zombies,
bool do_accel,
bool stateCompression,
u32 hint,
const CompileContext &cc) {
bytecode_ptr<NFA> generate(NGHolder &h,
const ue2::unordered_map<NFAVertex, u32> &states,
const vector<BoundedRepeatData> &repeats,
const map<NFAVertex, NFAStateSet> &reportSquashMap,
const map<NFAVertex, NFAStateSet> &squashMap,
const map<u32, set<NFAVertex>> &tops,
const set<NFAVertex> &zombies, bool do_accel,
bool stateCompression, u32 hint,
const CompileContext &cc) {
const u32 num_states = max_state(states) + 1;
DEBUG_PRINTF("total states: %u\n", num_states);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2016, Intel Corporation
* Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -26,7 +26,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
/**
* \file
* \brief Main NFA build code.
*/
@ -37,10 +38,10 @@
#include <memory>
#include <vector>
#include "ue2common.h"
#include "nfagraph/ng_holder.h"
#include "nfagraph/ng_squash.h" // for NFAStateSet
#include "util/alloc.h"
#include "ue2common.h"
#include "util/bytecode_ptr.h"
#include "util/ue2_containers.h"
struct NFA;
@ -50,7 +51,8 @@ namespace ue2 {
struct BoundedRepeatData;
struct CompileContext;
/** \brief Construct a LimEx NFA from an NGHolder.
/**
* \brief Construct a LimEx NFA from an NGHolder.
*
* \param g Input NFA graph. Must have state IDs assigned.
* \param repeats Bounded repeat information, if any.
@ -66,7 +68,7 @@ struct CompileContext;
* \return a built NFA, or nullptr if no NFA could be constructed for this
* graph.
*/
aligned_unique_ptr<NFA> generate(NGHolder &g,
bytecode_ptr<NFA> generate(NGHolder &g,
const ue2::unordered_map<NFAVertex, u32> &states,
const std::vector<BoundedRepeatData> &repeats,
const std::map<NFAVertex, NFAStateSet> &reportSquashMap,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2016, Intel Corporation
* Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -456,9 +456,8 @@ bool allocateFSN16(dfa_info &info, dstate_id_t *sherman_base) {
}
static
aligned_unique_ptr<NFA> mcclellanCompile16(dfa_info &info,
const CompileContext &cc,
set<dstate_id_t> *accel_states) {
bytecode_ptr<NFA> mcclellanCompile16(dfa_info &info, const CompileContext &cc,
set<dstate_id_t> *accel_states) {
DEBUG_PRINTF("building mcclellan 16\n");
vector<u32> reports; /* index in ri for the appropriate report list */
@ -497,7 +496,7 @@ aligned_unique_ptr<NFA> mcclellanCompile16(dfa_info &info,
accel_offset -= sizeof(NFA); /* adj accel offset to be relative to m */
assert(ISALIGNED_N(accel_offset, alignof(union AccelAux)));
aligned_unique_ptr<NFA> nfa = aligned_zmalloc_unique<NFA>(total_size);
auto nfa = make_bytecode_ptr<NFA>(total_size);
char *nfa_base = (char *)nfa.get();
populateBasicInfo(sizeof(u16), info, total_size, aux_offset, accel_offset,
@ -685,9 +684,8 @@ void allocateFSN8(dfa_info &info,
}
static
aligned_unique_ptr<NFA> mcclellanCompile8(dfa_info &info,
const CompileContext &cc,
set<dstate_id_t> *accel_states) {
bytecode_ptr<NFA> mcclellanCompile8(dfa_info &info, const CompileContext &cc,
set<dstate_id_t> *accel_states) {
DEBUG_PRINTF("building mcclellan 8\n");
vector<u32> reports;
@ -717,12 +715,13 @@ aligned_unique_ptr<NFA> mcclellanCompile8(dfa_info &info,
accel_offset -= sizeof(NFA); /* adj accel offset to be relative to m */
assert(ISALIGNED_N(accel_offset, alignof(union AccelAux)));
aligned_unique_ptr<NFA> nfa = aligned_zmalloc_unique<NFA>(total_size);
auto nfa = bytecode_ptr<NFA>(total_size);
char *nfa_base = (char *)nfa.get();
mcclellan *m = (mcclellan *)getMutableImplNfa(nfa.get());
allocateFSN8(info, accel_escape_info, &m->accel_limit_8, &m->accept_limit_8);
allocateFSN8(info, accel_escape_info, &m->accel_limit_8,
&m->accept_limit_8);
populateBasicInfo(sizeof(u8), info, total_size, aux_offset, accel_offset,
accel_escape_info.size(), arb, single, nfa.get());
@ -939,9 +938,9 @@ bool is_cyclic_near(const raw_dfa &raw, dstate_id_t root) {
return false;
}
aligned_unique_ptr<NFA> mcclellanCompile_i(raw_dfa &raw, accel_dfa_build_strat &strat,
const CompileContext &cc,
set<dstate_id_t> *accel_states) {
bytecode_ptr<NFA> mcclellanCompile_i(raw_dfa &raw, accel_dfa_build_strat &strat,
const CompileContext &cc,
set<dstate_id_t> *accel_states) {
u16 total_daddy = 0;
dfa_info info(strat);
bool using8bit = cc.grey.allowMcClellan8 && info.size() <= 256;
@ -965,7 +964,7 @@ aligned_unique_ptr<NFA> mcclellanCompile_i(raw_dfa &raw, accel_dfa_build_strat &
info.size() * info.impl_alpha_size, info.size(),
info.impl_alpha_size);
aligned_unique_ptr<NFA> nfa;
bytecode_ptr<NFA> nfa;
if (!using8bit) {
nfa = mcclellanCompile16(info, cc, accel_states);
} else {
@ -980,9 +979,9 @@ aligned_unique_ptr<NFA> mcclellanCompile_i(raw_dfa &raw, accel_dfa_build_strat &
return nfa;
}
aligned_unique_ptr<NFA> mcclellanCompile(raw_dfa &raw, const CompileContext &cc,
const ReportManager &rm,
set<dstate_id_t> *accel_states) {
bytecode_ptr<NFA> mcclellanCompile(raw_dfa &raw, const CompileContext &cc,
const ReportManager &rm,
set<dstate_id_t> *accel_states) {
mcclellan_build_strat mbs(raw, rm);
return mcclellanCompile_i(raw, mbs, cc, accel_states);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2016, Intel Corporation
* Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -32,7 +32,7 @@
#include "accel_dfa_build_strat.h"
#include "rdfa.h"
#include "ue2common.h"
#include "util/alloc.h"
#include "util/bytecode_ptr.h"
#include "util/ue2_containers.h"
#include <memory>
@ -55,7 +55,7 @@ public:
std::vector<u32> &reports /* out */,
std::vector<u32> &reports_eod /* out */,
u8 *isSingleReport /* out */,
ReportID *arbReport /* out */) const override;
ReportID *arbReport /* out */) const override;
size_t accelSize(void) const override;
u32 max_allowed_offset_accel() const override;
u32 max_stop_char() const override;
@ -67,13 +67,13 @@ private:
/* accel_states: (optional) on success, is filled with the set of accelerable
* states */
ue2::aligned_unique_ptr<NFA>
bytecode_ptr<NFA>
mcclellanCompile(raw_dfa &raw, const CompileContext &cc,
const ReportManager &rm,
std::set<dstate_id_t> *accel_states = nullptr);
/* used internally by mcclellan/haig/gough compile process */
ue2::aligned_unique_ptr<NFA>
bytecode_ptr<NFA>
mcclellanCompile_i(raw_dfa &raw, accel_dfa_build_strat &strat,
const CompileContext &cc,
std::set<dstate_id_t> *accel_states = nullptr);
@ -89,4 +89,4 @@ bool has_accel_mcclellan(const NFA *nfa);
} // namespace ue2
#endif
#endif // MCCLELLANCOMPILE_H

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Intel Corporation
* Copyright (c) 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -821,7 +821,7 @@ void fill_in_sherman(NFA *nfa, dfa_info &info, UNUSED u16 sherman_limit) {
}
static
aligned_unique_ptr<NFA> mcshengCompile16(dfa_info &info, dstate_id_t sheng_end,
bytecode_ptr<NFA> mcshengCompile16(dfa_info &info, dstate_id_t sheng_end,
const map<dstate_id_t, AccelScheme> &accel_escape_info,
const Grey &grey) {
DEBUG_PRINTF("building mcsheng 16\n");
@ -872,7 +872,7 @@ aligned_unique_ptr<NFA> mcshengCompile16(dfa_info &info, dstate_id_t sheng_end,
accel_offset -= sizeof(NFA); /* adj accel offset to be relative to m */
assert(ISALIGNED_N(accel_offset, alignof(union AccelAux)));
aligned_unique_ptr<NFA> nfa = aligned_zmalloc_unique<NFA>(total_size);
auto nfa = make_bytecode_ptr<NFA>(total_size);
mcsheng *m = (mcsheng *)getMutableImplNfa(nfa.get());
populateBasicInfo(sizeof(u16), info, total_size, aux_offset, accel_offset,
@ -967,7 +967,7 @@ void allocateImplId8(dfa_info &info, dstate_id_t sheng_end,
}
static
aligned_unique_ptr<NFA> mcshengCompile8(dfa_info &info, dstate_id_t sheng_end,
bytecode_ptr<NFA> mcshengCompile8(dfa_info &info, dstate_id_t sheng_end,
const map<dstate_id_t, AccelScheme> &accel_escape_info) {
DEBUG_PRINTF("building mcsheng 8\n");
@ -998,7 +998,7 @@ aligned_unique_ptr<NFA> mcshengCompile8(dfa_info &info, dstate_id_t sheng_end,
accel_offset -= sizeof(NFA); /* adj accel offset to be relative to m */
assert(ISALIGNED_N(accel_offset, alignof(union AccelAux)));
aligned_unique_ptr<NFA> nfa = aligned_zmalloc_unique<NFA>(total_size);
auto nfa = make_bytecode_ptr<NFA>(total_size);
mcsheng *m = (mcsheng *)getMutableImplNfa(nfa.get());
allocateImplId8(info, sheng_end, accel_escape_info, &m->accel_limit_8,
@ -1019,8 +1019,8 @@ aligned_unique_ptr<NFA> mcshengCompile8(dfa_info &info, dstate_id_t sheng_end,
return nfa;
}
aligned_unique_ptr<NFA> mcshengCompile(raw_dfa &raw, const CompileContext &cc,
const ReportManager &rm) {
bytecode_ptr<NFA> mcshengCompile(raw_dfa &raw, const CompileContext &cc,
const ReportManager &rm) {
if (!cc.grey.allowMcSheng) {
return nullptr;
}
@ -1044,7 +1044,7 @@ aligned_unique_ptr<NFA> mcshengCompile(raw_dfa &raw, const CompileContext &cc,
return nullptr;
}
aligned_unique_ptr<NFA> nfa;
bytecode_ptr<NFA> nfa;
if (!using8bit) {
nfa = mcshengCompile16(info, sheng_end, accel_escape_info, cc.grey);
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Intel Corporation
* Copyright (c) 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -29,13 +29,8 @@
#ifndef MCSHENGCOMPILE_H
#define MCSHENGCOMPILE_H
#include "accel_dfa_build_strat.h"
#include "rdfa.h"
#include "ue2common.h"
#include "util/alloc.h"
#include "util/ue2_containers.h"
#include <memory>
#include "util/bytecode_ptr.h"
struct NFA;
@ -43,10 +38,10 @@ namespace ue2 {
class ReportManager;
struct CompileContext;
struct raw_dfa;
ue2::aligned_unique_ptr<NFA>
mcshengCompile(raw_dfa &raw, const CompileContext &cc,
const ReportManager &rm);
bytecode_ptr<NFA> mcshengCompile(raw_dfa &raw, const CompileContext &cc,
const ReportManager &rm);
bool has_accel_mcsheng(const NFA *nfa);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Intel Corporation
* Copyright (c) 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -450,10 +450,9 @@ bool has_accel_sheng(const NFA *) {
return true; /* consider the sheng region as accelerated */
}
aligned_unique_ptr<NFA> shengCompile(raw_dfa &raw,
const CompileContext &cc,
const ReportManager &rm,
set<dstate_id_t> *accel_states) {
bytecode_ptr<NFA> shengCompile(raw_dfa &raw, const CompileContext &cc,
const ReportManager &rm,
set<dstate_id_t> *accel_states) {
if (!cc.grey.allowSheng) {
DEBUG_PRINTF("Sheng is not allowed!\n");
return nullptr;
@ -508,7 +507,7 @@ aligned_unique_ptr<NFA> shengCompile(raw_dfa &raw,
DEBUG_PRINTF("NFA: %u, aux: %u, reports: %u, accel: %u, total: %u\n",
nfa_size, total_aux, total_reports, total_accel, total_size);
aligned_unique_ptr<NFA> nfa = aligned_zmalloc_unique<NFA>(total_size);
auto nfa = make_bytecode_ptr<NFA>(total_size);
populateBasicInfo(nfa.get(), info, accelInfo, nfa_size, reports_offset,
accel_offset, total_size, total_size - sizeof(NFA));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Intel Corporation
* Copyright (c) 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -26,12 +26,12 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SHENGCOMPILE_H_
#define SHENGCOMPILE_H_
#ifndef SHENGCOMPILE_H
#define SHENGCOMPILE_H
#include "accel_dfa_build_strat.h"
#include "rdfa.h"
#include "util/alloc.h"
#include "util/bytecode_ptr.h"
#include "util/charreach.h"
#include "util/ue2_containers.h"
@ -62,9 +62,9 @@ private:
raw_dfa &rdfa;
};
aligned_unique_ptr<NFA>
shengCompile(raw_dfa &raw, const CompileContext &cc, const ReportManager &rm,
std::set<dstate_id_t> *accel_states = nullptr);
bytecode_ptr<NFA> shengCompile(raw_dfa &raw, const CompileContext &cc,
const ReportManager &rm,
std::set<dstate_id_t> *accel_states = nullptr);
struct sheng_escape_info {
CharReach outs;
@ -77,4 +77,4 @@ bool has_accel_sheng(const NFA *nfa);
} // namespace ue2
#endif /* SHENGCOMPILE_H_ */
#endif /* SHENGCOMPILE_H */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Intel Corporation
* Copyright (c) 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -26,9 +26,9 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
* \brief Tamarama: container engine for exclusive engines,
* compiler code.
/**
* \file
* \brief Tamarama: container engine for exclusive engines, compiler code.
*/
#include "config.h"
@ -111,8 +111,9 @@ void copyInSubnfas(const char *base_offset, NFA &nfa,
* returns via out_top_remap, a mapping indicating how tops in the subengines in
* relate to the tamarama's tops.
*/
aligned_unique_ptr<NFA> buildTamarama(const TamaInfo &tamaInfo, const u32 queue,
map<pair<const NFA *, u32>, u32> &out_top_remap) {
bytecode_ptr<NFA>
buildTamarama(const TamaInfo &tamaInfo, const u32 queue,
map<pair<const NFA *, u32>, u32> &out_top_remap) {
vector<u32> top_base;
remapTops(tamaInfo, top_base, out_top_remap);
@ -133,7 +134,7 @@ aligned_unique_ptr<NFA> buildTamarama(const TamaInfo &tamaInfo, const u32 queue,
// use subSize as a sentinel value for no active subengines,
// so add one to subSize here
u32 activeIdxSize = calcPackedBytes(subSize + 1);
aligned_unique_ptr<NFA> nfa = aligned_zmalloc_unique<NFA>(total_size);
auto nfa = make_bytecode_ptr<NFA>(total_size);
nfa->type = verify_u8(TAMARAMA_NFA);
nfa->length = verify_u32(total_size);
nfa->queueIndex = queue;
@ -148,7 +149,7 @@ aligned_unique_ptr<NFA> buildTamarama(const TamaInfo &tamaInfo, const u32 queue,
copy_bytes(ptr, top_base);
ptr += byte_length(top_base);
u32 *offsets = (u32*)ptr;
u32 *offsets = (u32 *)ptr;
char *sub_nfa_offset = ptr + sizeof(u32) * subSize;
copyInSubnfas(base_offset, *nfa, tamaInfo, offsets, sub_nfa_offset,
activeIdxSize);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Intel Corporation
* Copyright (c) 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -26,15 +26,16 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
* \brief Tamarama: container engine for exclusive engines, compiler code.
/**
* \file
* \brief Tamarama: container engine for exclusive engines, compiler code.
*/
#ifndef NFA_TAMARAMACOMPILE_H
#define NFA_TAMARAMACOMPILE_H
#include "ue2common.h"
#include "util/alloc.h"
#include "util/bytecode_ptr.h"
#include <map>
#include <set>
@ -45,7 +46,7 @@ struct NFA;
namespace ue2 {
/**
* \brief A TamaProto that contains top remapping and reports info
* \brief A TamaProto that contains top remapping and reports info.
*/
struct TamaProto {
void add(const NFA *n, const u32 id, const u32 top,
@ -59,7 +60,7 @@ struct TamaProto {
};
/**
* \brief Contruction info for a Tamarama engine:
* \brief Construction info for a Tamarama engine:
* contains at least two subengines.
*
* A TamaInfo is converted into a single NFA, with each top triggering a
@ -70,7 +71,7 @@ struct TamaInfo {
static constexpr size_t max_occupancy = 65536; // arbitrary limit
/** \brief Add a new subengine. */
void add(NFA* sub, const std::set<u32> &top);
void add(NFA *sub, const std::set<u32> &top);
/** \brief All the subengines */
std::vector<NFA *> subengines;
@ -86,9 +87,10 @@ std::set<ReportID> all_reports(const TamaProto &proto);
* returns via out_top_remap, a mapping indicating how tops in the subengines in
* relate to the tamarama's tops.
*/
ue2::aligned_unique_ptr<NFA> buildTamarama(const TamaInfo &tamaInfo,
const u32 queue,
std::map<std::pair<const NFA *, u32>, u32> &out_top_remap);
bytecode_ptr<NFA>
buildTamarama(const TamaInfo &tamaInfo, const u32 queue,
std::map<std::pair<const NFA *, u32>, u32> &out_top_remap);
} // namespace ue2
#endif // NFA_TAMARAMACOMPILE_H

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2016, Intel Corporation
* Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -26,7 +26,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
/**
* \file
* \brief Large Bounded Repeat (LBR) engine build code.
*/
@ -128,25 +129,24 @@ void fillNfa(NFA *nfa, lbr_common *c, ReportID report, const depth &repeatMin,
}
template <class LbrStruct> static
aligned_unique_ptr<NFA> makeLbrNfa(NFAEngineType nfa_type,
enum RepeatType rtype,
const depth &repeatMax) {
bytecode_ptr<NFA> makeLbrNfa(NFAEngineType nfa_type, enum RepeatType rtype,
const depth &repeatMax) {
size_t tableLen = 0;
if (rtype == REPEAT_SPARSE_OPTIMAL_P) {
tableLen = sizeof(u64a) * (repeatMax + 1);
}
size_t len = sizeof(NFA) + sizeof(LbrStruct) + sizeof(RepeatInfo) +
tableLen + sizeof(u64a);
aligned_unique_ptr<NFA> nfa = aligned_zmalloc_unique<NFA>(len);
auto nfa = make_bytecode_ptr<NFA>(len);
nfa->type = verify_u8(nfa_type);
nfa->length = verify_u32(len);
return nfa;
}
static
aligned_unique_ptr<NFA> buildLbrDot(const CharReach &cr, const depth &repeatMin,
const depth &repeatMax, u32 minPeriod,
bool is_reset, ReportID report) {
bytecode_ptr<NFA> buildLbrDot(const CharReach &cr, const depth &repeatMin,
const depth &repeatMax, u32 minPeriod,
bool is_reset, ReportID report) {
if (!cr.all()) {
return nullptr;
}
@ -164,10 +164,9 @@ aligned_unique_ptr<NFA> buildLbrDot(const CharReach &cr, const depth &repeatMin,
}
static
aligned_unique_ptr<NFA> buildLbrVerm(const CharReach &cr,
const depth &repeatMin,
const depth &repeatMax, u32 minPeriod,
bool is_reset, ReportID report) {
bytecode_ptr<NFA> buildLbrVerm(const CharReach &cr, const depth &repeatMin,
const depth &repeatMax, u32 minPeriod,
bool is_reset, ReportID report) {
const CharReach escapes(~cr);
if (escapes.count() != 1) {
@ -188,10 +187,9 @@ aligned_unique_ptr<NFA> buildLbrVerm(const CharReach &cr,
}
static
aligned_unique_ptr<NFA> buildLbrNVerm(const CharReach &cr,
const depth &repeatMin,
const depth &repeatMax, u32 minPeriod,
bool is_reset, ReportID report) {
bytecode_ptr<NFA> buildLbrNVerm(const CharReach &cr, const depth &repeatMin,
const depth &repeatMax, u32 minPeriod,
bool is_reset, ReportID report) {
const CharReach escapes(cr);
if (escapes.count() != 1) {
@ -212,10 +210,9 @@ aligned_unique_ptr<NFA> buildLbrNVerm(const CharReach &cr,
}
static
aligned_unique_ptr<NFA> buildLbrShuf(const CharReach &cr,
const depth &repeatMin,
const depth &repeatMax, u32 minPeriod,
bool is_reset, ReportID report) {
bytecode_ptr<NFA> buildLbrShuf(const CharReach &cr, const depth &repeatMin,
const depth &repeatMax, u32 minPeriod,
bool is_reset, ReportID report) {
enum RepeatType rtype = chooseRepeatType(repeatMin, repeatMax, minPeriod,
is_reset);
auto nfa = makeLbrNfa<lbr_shuf>(LBR_NFA_SHUF, rtype, repeatMax);
@ -233,10 +230,9 @@ aligned_unique_ptr<NFA> buildLbrShuf(const CharReach &cr,
}
static
aligned_unique_ptr<NFA> buildLbrTruf(const CharReach &cr,
const depth &repeatMin,
const depth &repeatMax, u32 minPeriod,
bool is_reset, ReportID report) {
bytecode_ptr<NFA> buildLbrTruf(const CharReach &cr, const depth &repeatMin,
const depth &repeatMax, u32 minPeriod,
bool is_reset, ReportID report) {
enum RepeatType rtype = chooseRepeatType(repeatMin, repeatMax, minPeriod,
is_reset);
auto nfa = makeLbrNfa<lbr_truf>(LBR_NFA_TRUF, rtype, repeatMax);
@ -252,10 +248,9 @@ aligned_unique_ptr<NFA> buildLbrTruf(const CharReach &cr,
}
static
aligned_unique_ptr<NFA> constructLBR(const CharReach &cr,
const depth &repeatMin,
const depth &repeatMax, u32 minPeriod,
bool is_reset, ReportID report) {
bytecode_ptr<NFA> constructLBR(const CharReach &cr, const depth &repeatMin,
const depth &repeatMax, u32 minPeriod,
bool is_reset, ReportID report) {
DEBUG_PRINTF("bounds={%s,%s}, cr=%s (count %zu), report=%u\n",
repeatMin.str().c_str(), repeatMax.str().c_str(),
describeClass(cr, 20, CC_OUT_TEXT).c_str(), cr.count(),
@ -263,8 +258,8 @@ aligned_unique_ptr<NFA> constructLBR(const CharReach &cr,
assert(repeatMin <= repeatMax);
assert(repeatMax.is_reachable());
aligned_unique_ptr<NFA> nfa
= buildLbrDot(cr, repeatMin, repeatMax, minPeriod, is_reset, report);
auto nfa =
buildLbrDot(cr, repeatMin, repeatMax, minPeriod, is_reset, report);
if (!nfa) {
nfa = buildLbrVerm(cr, repeatMin, repeatMax, minPeriod, is_reset,
@ -291,10 +286,10 @@ aligned_unique_ptr<NFA> constructLBR(const CharReach &cr,
return nfa;
}
aligned_unique_ptr<NFA> constructLBR(const CastleProto &proto,
const vector<vector<CharReach>> &triggers,
const CompileContext &cc,
const ReportManager &rm) {
bytecode_ptr<NFA> constructLBR(const CastleProto &proto,
const vector<vector<CharReach>> &triggers,
const CompileContext &cc,
const ReportManager &rm) {
if (!cc.grey.allowLbr) {
return nullptr;
}
@ -330,10 +325,10 @@ aligned_unique_ptr<NFA> constructLBR(const CastleProto &proto,
}
/** \brief Construct an LBR engine from the given graph \p g. */
aligned_unique_ptr<NFA> constructLBR(const NGHolder &g,
const vector<vector<CharReach>> &triggers,
const CompileContext &cc,
const ReportManager &rm) {
bytecode_ptr<NFA> constructLBR(const NGHolder &g,
const vector<vector<CharReach>> &triggers,
const CompileContext &cc,
const ReportManager &rm) {
if (!cc.grey.allowLbr) {
return nullptr;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2016, Intel Corporation
* Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -26,7 +26,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
/**
* \file
* \brief Large Bounded Repeat (LBR) engine build code.
*/
@ -34,7 +35,7 @@
#define NG_LBR_H
#include "ue2common.h"
#include "util/alloc.h"
#include "util/bytecode_ptr.h"
#include <memory>
#include <vector>
@ -51,14 +52,16 @@ struct CompileContext;
struct Grey;
/** \brief Construct an LBR engine from the given graph \p g. */
aligned_unique_ptr<NFA>
bytecode_ptr<NFA>
constructLBR(const NGHolder &g,
const std::vector<std::vector<CharReach>> &triggers,
const CompileContext &cc, const ReportManager &rm);
/** \brief Construct an LBR engine from the given CastleProto, which should
* contain only one repeat. */
aligned_unique_ptr<NFA>
/**
* \brief Construct an LBR engine from the given CastleProto, which should
* contain only one repeat.
*/
bytecode_ptr<NFA>
constructLBR(const CastleProto &proto,
const std::vector<std::vector<CharReach>> &triggers,
const CompileContext &cc, const ReportManager &rm);

View File

@ -26,9 +26,11 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
/**
* \file
* \brief Limex NFA construction code.
*/
#include "ng_limex.h"
#include "grey.h"
@ -623,7 +625,7 @@ void remapReportsToPrograms(NGHolder &h, const ReportManager &rm) {
}
static
aligned_unique_ptr<NFA>
bytecode_ptr<NFA>
constructNFA(const NGHolder &h_in, const ReportManager *rm,
const map<u32, u32> &fixed_depth_tops,
const map<u32, vector<vector<CharReach>>> &triggers,
@ -682,7 +684,7 @@ constructNFA(const NGHolder &h_in, const ReportManager *rm,
zombies, do_accel, compress_state, hint, cc);
}
aligned_unique_ptr<NFA>
bytecode_ptr<NFA>
constructNFA(const NGHolder &h_in, const ReportManager *rm,
const map<u32, u32> &fixed_depth_tops,
const map<u32, vector<vector<CharReach>>> &triggers,
@ -696,7 +698,7 @@ constructNFA(const NGHolder &h_in, const ReportManager *rm,
#ifndef RELEASE_BUILD
// Variant that allows a hint to be specified.
aligned_unique_ptr<NFA>
bytecode_ptr<NFA>
constructNFA(const NGHolder &h_in, const ReportManager *rm,
const map<u32, u32> &fixed_depth_tops,
const map<u32, vector<vector<CharReach>>> &triggers,
@ -709,8 +711,8 @@ constructNFA(const NGHolder &h_in, const ReportManager *rm,
#endif // RELEASE_BUILD
static
aligned_unique_ptr<NFA> constructReversedNFA_i(const NGHolder &h_in, u32 hint,
const CompileContext &cc) {
bytecode_ptr<NFA> constructReversedNFA_i(const NGHolder &h_in, u32 hint,
const CompileContext &cc) {
// Make a mutable copy of the graph that we can renumber etc.
NGHolder h;
cloneHolder(h, h_in);
@ -739,16 +741,16 @@ aligned_unique_ptr<NFA> constructReversedNFA_i(const NGHolder &h_in, u32 hint,
zombies, false, false, hint, cc);
}
aligned_unique_ptr<NFA> constructReversedNFA(const NGHolder &h_in,
const CompileContext &cc) {
bytecode_ptr<NFA> constructReversedNFA(const NGHolder &h_in,
const CompileContext &cc) {
u32 hint = INVALID_NFA; // no hint
return constructReversedNFA_i(h_in, hint, cc);
}
#ifndef RELEASE_BUILD
// Variant that allows a hint to be specified.
aligned_unique_ptr<NFA> constructReversedNFA(const NGHolder &h_in, u32 hint,
const CompileContext &cc) {
bytecode_ptr<NFA> constructReversedNFA(const NGHolder &h_in, u32 hint,
const CompileContext &cc) {
return constructReversedNFA_i(h_in, hint, cc);
}
#endif // RELEASE_BUILD

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -26,7 +26,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
/**
* \file
* \brief Limex NFA construction code.
*/
@ -35,7 +36,7 @@
#include "ue2common.h"
#include "som/som.h"
#include "util/alloc.h"
#include "util/bytecode_ptr.h"
#include <map>
#include <memory>
@ -51,7 +52,8 @@ class NGHolder;
class ReportManager;
struct CompileContext;
/** \brief Determine if the given graph is implementable as an NFA.
/**
* \brief Determine if the given graph is implementable as an NFA.
*
* Returns zero if the NFA is not implementable (usually because it has too
* many states for any of our models). Otherwise returns the number of states.
@ -62,11 +64,14 @@ struct CompileContext;
u32 isImplementableNFA(const NGHolder &g, const ReportManager *rm,
const CompileContext &cc);
/** \brief Late-stage graph reductions.
/**
* \brief Late-stage graph reductions.
*
* This will call \ref removeRedundancy and apply its changes to the given
* holder only if it is implementable afterwards. */
void reduceImplementableGraph(NGHolder &g, som_type som, const ReportManager *rm,
* holder only if it is implementable afterwards.
*/
void reduceImplementableGraph(NGHolder &g, som_type som,
const ReportManager *rm,
const CompileContext &cc);
/**
@ -79,7 +84,8 @@ void reduceImplementableGraph(NGHolder &g, som_type som, const ReportManager *rm
u32 countAccelStates(const NGHolder &g, const ReportManager *rm,
const CompileContext &cc);
/** \brief Construct an NFA from the given NFAGraph.
/**
* \brief Construct an NFA from the given graph.
*
* Returns zero if the NFA is not implementable (usually because it has too
* many states for any of our models). Otherwise returns the number of states.
@ -90,23 +96,25 @@ u32 countAccelStates(const NGHolder &g, const ReportManager *rm,
* Note: this variant of the function allows a model to be specified with the
* \a hint parameter.
*/
aligned_unique_ptr<NFA>
bytecode_ptr<NFA>
constructNFA(const NGHolder &g, const ReportManager *rm,
const std::map<u32, u32> &fixed_depth_tops,
const std::map<u32, std::vector<std::vector<CharReach>>> &triggers,
bool compress_state, const CompileContext &cc);
/** \brief Build a reverse NFA from the graph given, which should have already
/**
* \brief Build a reverse NFA from the graph given, which should have already
* been reversed.
*
* Used for reverse NFAs used in SOM mode.
*/
aligned_unique_ptr<NFA> constructReversedNFA(const NGHolder &h,
const CompileContext &cc);
bytecode_ptr<NFA> constructReversedNFA(const NGHolder &h,
const CompileContext &cc);
#ifndef RELEASE_BUILD
/** \brief Construct an NFA (with model type hint) from the given NFAGraph.
/**
* \brief Construct an NFA (with model type hint) from the given graph.
*
* Returns zero if the NFA is not implementable (usually because it has too
* many states for any of our models). Otherwise returns the number of states.
@ -117,19 +125,20 @@ aligned_unique_ptr<NFA> constructReversedNFA(const NGHolder &h,
* Note: this variant of the function allows a model to be specified with the
* \a hint parameter.
*/
aligned_unique_ptr<NFA>
bytecode_ptr<NFA>
constructNFA(const NGHolder &g, const ReportManager *rm,
const std::map<u32, u32> &fixed_depth_tops,
const std::map<u32, std::vector<std::vector<CharReach>>> &triggers,
bool compress_state, u32 hint, const CompileContext &cc);
/** \brief Build a reverse NFA (with model type hint) from the graph given,
/**
* \brief Build a reverse NFA (with model type hint) from the graph given,
* which should have already been reversed.
*
* Used for reverse NFAs used in SOM mode.
*/
aligned_unique_ptr<NFA> constructReversedNFA(const NGHolder &h, u32 hint,
const CompileContext &cc);
bytecode_ptr<NFA> constructReversedNFA(const NGHolder &h, u32 hint,
const CompileContext &cc);
#endif // RELEASE_BUILD

View File

@ -26,7 +26,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
/**
* \file
* \brief SOM ("Start of Match") analysis.
*/
@ -1731,19 +1732,19 @@ void clearProperInEdges(NGHolder &g, const NFAVertex sink) {
namespace {
struct SomRevNfa {
SomRevNfa(NFAVertex s, ReportID r, aligned_unique_ptr<NFA> n)
SomRevNfa(NFAVertex s, ReportID r, bytecode_ptr<NFA> n)
: sink(s), report(r), nfa(move(n)) {}
SomRevNfa(SomRevNfa&& s) // MSVC2013 needs this for emplace
: sink(s.sink), report(s.report), nfa(move(s.nfa)) {}
NFAVertex sink;
ReportID report;
aligned_unique_ptr<NFA> nfa;
bytecode_ptr<NFA> nfa;
};
}
static
aligned_unique_ptr<NFA> makeBareSomRevNfa(const NGHolder &g,
const CompileContext &cc) {
bytecode_ptr<NFA> makeBareSomRevNfa(const NGHolder &g,
const CompileContext &cc) {
// Create a reversed anchored version of this NFA which fires a zero report
// ID on accept.
NGHolder g_rev;
@ -1759,7 +1760,7 @@ aligned_unique_ptr<NFA> makeBareSomRevNfa(const NGHolder &g,
DEBUG_PRINTF("building a rev NFA with %zu vertices\n", num_vertices(g_rev));
aligned_unique_ptr<NFA> nfa = constructReversedNFA(g_rev, cc);
auto nfa = constructReversedNFA(g_rev, cc);
if (!nfa) {
return nfa;
}
@ -1794,7 +1795,7 @@ bool makeSomRevNfa(vector<SomRevNfa> &som_nfas, const NGHolder &g,
renumber_vertices(g2); // for findMinWidth, findMaxWidth.
aligned_unique_ptr<NFA> nfa = makeBareSomRevNfa(g2, cc);
auto nfa = makeBareSomRevNfa(g2, cc);
if (!nfa) {
DEBUG_PRINTF("couldn't build rev nfa\n");
return false;

View File

@ -813,7 +813,7 @@ vector<unique_ptr<raw_dfa>> getAnchoredDfas(RoseBuildImpl &build,
*/
static
size_t buildNfas(vector<raw_dfa> &anchored_dfas,
vector<aligned_unique_ptr<NFA>> *nfas,
vector<bytecode_ptr<NFA>> *nfas,
vector<u32> *start_offset, const CompileContext &cc,
const ReportManager &rm) {
const size_t num_dfas = anchored_dfas.size();
@ -883,7 +883,7 @@ buildAnchoredMatcher(RoseBuildImpl &build, const vector<LitFragment> &fragments,
remapIdsToPrograms(fragments, rdfa);
}
vector<aligned_unique_ptr<NFA>> nfas;
vector<bytecode_ptr<NFA>> nfas;
vector<u32> start_offset; // start offset for each dfa (dots removed)
size_t total_size = buildNfas(dfas, &nfas, &start_offset, cc, build.rm);

View File

@ -74,7 +74,6 @@
#include "nfagraph/ng_width.h"
#include "smallwrite/smallwrite_build.h"
#include "som/slot_manager.h"
#include "util/alloc.h"
#include "util/bitutils.h"
#include "util/boundary_reports.h"
#include "util/charreach.h"
@ -274,7 +273,7 @@ struct ProgramBuild : noncopyable {
/** \brief subengine info including built engine and
* corresponding triggering rose vertices */
struct ExclusiveSubengine {
aligned_unique_ptr<NFA> nfa;
bytecode_ptr<NFA> nfa;
vector<RoseVertex> vertices;
};
@ -655,8 +654,8 @@ void findFixedDepthTops(const RoseGraph &g, const set<PredTopPair> &triggers,
* engine.
*/
static
aligned_unique_ptr<NFA> pickImpl(aligned_unique_ptr<NFA> dfa_impl,
aligned_unique_ptr<NFA> nfa_impl) {
bytecode_ptr<NFA> pickImpl(bytecode_ptr<NFA> dfa_impl,
bytecode_ptr<NFA> nfa_impl) {
assert(nfa_impl);
assert(dfa_impl);
assert(isDfaType(dfa_impl->type));
@ -708,7 +707,7 @@ aligned_unique_ptr<NFA> pickImpl(aligned_unique_ptr<NFA> dfa_impl,
* otherwise a Castle.
*/
static
aligned_unique_ptr<NFA>
bytecode_ptr<NFA>
buildRepeatEngine(const CastleProto &proto,
const map<u32, vector<vector<CharReach>>> &triggers,
const CompileContext &cc, const ReportManager &rm) {
@ -724,7 +723,7 @@ buildRepeatEngine(const CastleProto &proto,
}
static
aligned_unique_ptr<NFA> getDfa(raw_dfa &rdfa, bool is_transient,
bytecode_ptr<NFA> getDfa(raw_dfa &rdfa, bool is_transient,
const CompileContext &cc,
const ReportManager &rm) {
// Unleash the Sheng!!
@ -744,7 +743,7 @@ aligned_unique_ptr<NFA> getDfa(raw_dfa &rdfa, bool is_transient,
/* builds suffix nfas */
static
aligned_unique_ptr<NFA>
bytecode_ptr<NFA>
buildSuffix(const ReportManager &rm, const SomSlotManager &ssm,
const map<u32, u32> &fixed_depth_tops,
const map<u32, vector<vector<CharReach>>> &triggers,
@ -873,14 +872,15 @@ void findTriggerSequences(const RoseBuildImpl &tbi,
}
}
static aligned_unique_ptr<NFA>
makeLeftNfa(const RoseBuildImpl &tbi, left_id &left,
const bool is_prefix, const bool is_transient,
const map<left_id, set<PredTopPair> > &infixTriggers,
static
bytecode_ptr<NFA>
makeLeftNfa(const RoseBuildImpl &tbi, left_id &left, const bool is_prefix,
const bool is_transient,
const map<left_id, set<PredTopPair>> &infixTriggers,
const CompileContext &cc) {
const ReportManager &rm = tbi.rm;
aligned_unique_ptr<NFA> n;
bytecode_ptr<NFA> n;
// Should compress state if this rose is non-transient and we're in
// streaming mode.
@ -1181,7 +1181,7 @@ bool buildLeftfix(RoseBuildImpl &build, build_context &bc, bool prefix, u32 qi,
leftfix = updateLeftfixWithEager(g, eager.at(leftfix), succs);
}
aligned_unique_ptr<NFA> nfa;
bytecode_ptr<NFA> nfa;
// Need to build NFA, which is either predestined to be a Haig (in SOM mode)
// or could be all manner of things.
if (leftfix.haig()) {
@ -1669,26 +1669,26 @@ bool hasNonSmallBlockOutfix(const vector<OutfixInfo> &outfixes) {
}
namespace {
class OutfixBuilder : public boost::static_visitor<aligned_unique_ptr<NFA>> {
class OutfixBuilder : public boost::static_visitor<bytecode_ptr<NFA>> {
public:
explicit OutfixBuilder(const RoseBuildImpl &build_in) : build(build_in) {}
aligned_unique_ptr<NFA> operator()(boost::blank&) const {
bytecode_ptr<NFA> operator()(boost::blank&) const {
return nullptr;
};
aligned_unique_ptr<NFA> operator()(unique_ptr<raw_dfa> &rdfa) const {
bytecode_ptr<NFA> operator()(unique_ptr<raw_dfa> &rdfa) const {
// Unleash the mighty DFA!
return getDfa(*rdfa, false, build.cc, build.rm);
}
aligned_unique_ptr<NFA> operator()(unique_ptr<raw_som_dfa> &haig) const {
bytecode_ptr<NFA> operator()(unique_ptr<raw_som_dfa> &haig) const {
// Unleash the Goughfish!
return goughCompile(*haig, build.ssm.somPrecision(), build.cc,
build.rm);
}
aligned_unique_ptr<NFA> operator()(unique_ptr<NGHolder> &holder) const {
bytecode_ptr<NFA> operator()(unique_ptr<NGHolder> &holder) const {
const CompileContext &cc = build.cc;
const ReportManager &rm = build.rm;
@ -1717,7 +1717,7 @@ public:
return n;
}
aligned_unique_ptr<NFA> operator()(UNUSED MpvProto &mpv) const {
bytecode_ptr<NFA> operator()(UNUSED MpvProto &mpv) const {
// MPV construction handled separately.
assert(mpv.puffettes.empty());
return nullptr;
@ -1729,7 +1729,7 @@ private:
}
static
aligned_unique_ptr<NFA> buildOutfix(const RoseBuildImpl &build, OutfixInfo &outfix) {
bytecode_ptr<NFA> buildOutfix(const RoseBuildImpl &build, OutfixInfo &outfix) {
assert(!outfix.is_dead()); // should not be marked dead.
auto n = boost::apply_visitor(OutfixBuilder(build), outfix.proto);

View File

@ -403,10 +403,10 @@ bool is_slow(const raw_dfa &rdfa, const set<dstate_id_t> &accel,
}
static
aligned_unique_ptr<NFA> getDfa(raw_dfa &rdfa, const CompileContext &cc,
const ReportManager &rm,
set<dstate_id_t> &accel_states) {
aligned_unique_ptr<NFA> dfa = nullptr;
bytecode_ptr<NFA> getDfa(raw_dfa &rdfa, const CompileContext &cc,
const ReportManager &rm,
set<dstate_id_t> &accel_states) {
bytecode_ptr<NFA> dfa = nullptr;
if (cc.grey.allowSmallWriteSheng) {
dfa = shengCompile(rdfa, cc, rm, &accel_states);
}
@ -417,10 +417,9 @@ aligned_unique_ptr<NFA> getDfa(raw_dfa &rdfa, const CompileContext &cc,
}
static
aligned_unique_ptr<NFA> prepEngine(raw_dfa &rdfa, u32 roseQuality,
const CompileContext &cc,
const ReportManager &rm, u32 *start_offset,
u32 *small_region) {
bytecode_ptr<NFA> prepEngine(raw_dfa &rdfa, u32 roseQuality,
const CompileContext &cc, const ReportManager &rm,
u32 *start_offset, u32 *small_region) {
*start_offset = remove_leading_dots(rdfa);
// Unleash the McClellan!

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -26,9 +26,11 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
/**
* \file
* \brief SOM Slot Manager.
*/
#include "slot_manager.h"
#include "slot_manager_internal.h"
@ -245,7 +247,7 @@ u32 SomSlotManager::numSomSlots() const {
return nextSomSlot;
}
u32 SomSlotManager::addRevNfa(aligned_unique_ptr<NFA> nfa, u32 maxWidth) {
u32 SomSlotManager::addRevNfa(bytecode_ptr<NFA> nfa, u32 maxWidth) {
u32 rv = verify_u32(rev_nfas.size());
rev_nfas.push_back(move(nfa));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2016, Intel Corporation
* Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -26,7 +26,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
/**
* \file
* \brief SOM Slot Manager.
*/
@ -35,7 +36,7 @@
#include "ue2common.h"
#include "nfagraph/ng_holder.h"
#include "util/alloc.h"
#include "util/bytecode_ptr.h"
#include "util/noncopyable.h"
#include "util/ue2_containers.h"
@ -78,11 +79,11 @@ public:
u32 numSomSlots() const;
const std::deque<aligned_unique_ptr<NFA>> &getRevNfas() const {
const std::deque<bytecode_ptr<NFA>> &getRevNfas() const {
return rev_nfas;
}
u32 addRevNfa(aligned_unique_ptr<NFA> nfa, u32 maxWidth);
u32 addRevNfa(bytecode_ptr<NFA> nfa, u32 maxWidth);
u32 somHistoryRequired() const { return historyRequired; }
@ -97,7 +98,7 @@ private:
std::unique_ptr<SlotCache> cache;
/** \brief Reverse NFAs used for SOM support. */
std::deque<aligned_unique_ptr<NFA>> rev_nfas;
std::deque<bytecode_ptr<NFA>> rev_nfas;
/** \brief In streaming mode, the amount of history we've committed to
* using for SOM rev NFAs. */

View File

@ -63,8 +63,7 @@ public:
bytecode_ptr(std::nullptr_t) {}
T *get() { return ptr.get(); };
const T *get() const { return ptr.get(); };
T *get() const { return ptr.get(); };
T &operator*() { return *ptr; }
const T &operator*() const { return *ptr; }

View File

@ -152,7 +152,7 @@ protected:
unsigned matches;
// Compiled NFA structure.
aligned_unique_ptr<NFA> nfa;
bytecode_ptr<NFA> nfa;
// Space for full state.
aligned_unique_ptr<char> full_state;

View File

@ -116,7 +116,7 @@ protected:
unsigned matches;
// Compiled NFA structure.
aligned_unique_ptr<NFA> nfa;
bytecode_ptr<NFA> nfa;
// Space for full state.
aligned_unique_ptr<char> full_state;
@ -187,8 +187,7 @@ TEST_P(LimExModelTest, CompressExpand) {
// Expand state into a new copy and check that it matches the original
// uncompressed state.
aligned_unique_ptr<char> state_copy =
aligned_zmalloc_unique<char>(nfa->scratchStateSize);
auto state_copy = aligned_zmalloc_unique<char>(nfa->scratchStateSize);
char *dest = state_copy.get();
memset(dest, 0xff, nfa->scratchStateSize);
nfaExpandState(nfa.get(), dest, q.streamState, q.offset,
@ -331,7 +330,7 @@ protected:
unsigned matches;
// Compiled NFA structure.
aligned_unique_ptr<NFA> nfa;
bytecode_ptr<NFA> nfa;
};
INSTANTIATE_TEST_CASE_P(LimExReverse, LimExReverseTest,
@ -410,7 +409,7 @@ protected:
unsigned matches;
// Compiled NFA structure.
aligned_unique_ptr<NFA> nfa;
bytecode_ptr<NFA> nfa;
// Space for full state.
aligned_unique_ptr<char> full_state;