mirror of
https://github.com/VectorCamp/vectorscan.git
synced 2025-06-28 16:41:01 +03:00
NFA: Move NFAContext to stack (from scratch)
This commit is contained in:
parent
7b54856642
commit
3e002f8181
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2015, Intel Corporation
|
* Copyright (c) 2015-2016, Intel Corporation
|
||||||
*
|
*
|
||||||
* Redistribution and use in source and binary forms, with or without
|
* Redistribution and use in source and binary forms, with or without
|
||||||
* modification, are permitted provided that the following conditions are met:
|
* modification, are permitted provided that the following conditions are met:
|
||||||
@ -176,8 +176,6 @@ char STREAM_FN(const IMPL_NFA_T *limex, const u8 *input, size_t length,
|
|||||||
const EXCEPTION_T *exceptions = getExceptionTable(EXCEPTION_T, limex);
|
const EXCEPTION_T *exceptions = getExceptionTable(EXCEPTION_T, limex);
|
||||||
const ReportID *exReports = getExReports(limex);
|
const ReportID *exReports = getExReports(limex);
|
||||||
const u32 *exceptionMap = limex->exceptionMap;
|
const u32 *exceptionMap = limex->exceptionMap;
|
||||||
assert(ISALIGNED_CL(ctx));
|
|
||||||
assert(ISALIGNED_CL(&ctx->s));
|
|
||||||
STATE_T s = LOAD_STATE(&ctx->s);
|
STATE_T s = LOAD_STATE(&ctx->s);
|
||||||
|
|
||||||
/* assert(ISALIGNED_16(exceptions)); */
|
/* assert(ISALIGNED_16(exceptions)); */
|
||||||
@ -533,17 +531,16 @@ char JOIN(LIMEX_API_ROOT, _Q)(const struct NFA *n, struct mq *q, s64a end) {
|
|||||||
|
|
||||||
assert(q->cur + 1 < q->end); /* require at least two items */
|
assert(q->cur + 1 < q->end); /* require at least two items */
|
||||||
|
|
||||||
struct CONTEXT_T *ctx = q->scratch->nfaContext;
|
struct CONTEXT_T ctx;
|
||||||
assert(ISALIGNED_CL(ctx));
|
ctx.repeat_ctrl = getRepeatControlBase(q->state, sizeof(STATE_T));
|
||||||
ctx->repeat_ctrl = getRepeatControlBase(q->state, sizeof(STATE_T));
|
ctx.repeat_state = q->streamState + limex->stateSize;
|
||||||
ctx->repeat_state = q->streamState + limex->stateSize;
|
ctx.callback = q->cb;
|
||||||
ctx->callback = q->cb;
|
ctx.context = q->context;
|
||||||
ctx->context = q->context;
|
STORE_STATE(&ctx.cached_estate, ZERO_STATE);
|
||||||
STORE_STATE(&ctx->cached_estate, ZERO_STATE);
|
|
||||||
|
|
||||||
assert(q->items[q->cur].location >= 0);
|
assert(q->items[q->cur].location >= 0);
|
||||||
DEBUG_PRINTF("LOAD STATE\n");
|
DEBUG_PRINTF("LOAD STATE\n");
|
||||||
STORE_STATE(&ctx->s, LOAD_STATE(q->state));
|
STORE_STATE(&ctx.s, LOAD_STATE(q->state));
|
||||||
assert(q->items[q->cur].type == MQE_START);
|
assert(q->items[q->cur].type == MQE_START);
|
||||||
|
|
||||||
u64a offset = q->offset;
|
u64a offset = q->offset;
|
||||||
@ -565,7 +562,7 @@ char JOIN(LIMEX_API_ROOT, _Q)(const struct NFA *n, struct mq *q, s64a end) {
|
|||||||
/* do main buffer region */
|
/* do main buffer region */
|
||||||
DEBUG_PRINTF("MAIN BUFFER SCAN\n");
|
DEBUG_PRINTF("MAIN BUFFER SCAN\n");
|
||||||
assert(ep - offset <= q->length);
|
assert(ep - offset <= q->length);
|
||||||
if (STREAMCB_FN(limex, q->buffer + sp - offset, ep - sp, ctx, sp)
|
if (STREAMCB_FN(limex, q->buffer + sp - offset, ep - sp, &ctx, sp)
|
||||||
== MO_HALT_MATCHING) {
|
== MO_HALT_MATCHING) {
|
||||||
STORE_STATE(q->state, ZERO_STATE);
|
STORE_STATE(q->state, ZERO_STATE);
|
||||||
return 0;
|
return 0;
|
||||||
@ -584,19 +581,19 @@ char JOIN(LIMEX_API_ROOT, _Q)(const struct NFA *n, struct mq *q, s64a end) {
|
|||||||
q->items[q->cur].type = MQE_START;
|
q->items[q->cur].type = MQE_START;
|
||||||
q->items[q->cur].location = sp - offset;
|
q->items[q->cur].location = sp - offset;
|
||||||
DEBUG_PRINTF("bailing q->cur %u q->end %u\n", q->cur, q->end);
|
DEBUG_PRINTF("bailing q->cur %u q->end %u\n", q->cur, q->end);
|
||||||
STORE_STATE(q->state, LOAD_STATE(&ctx->s));
|
STORE_STATE(q->state, LOAD_STATE(&ctx.s));
|
||||||
return MO_ALIVE;
|
return MO_ALIVE;
|
||||||
}
|
}
|
||||||
|
|
||||||
JOIN(LIMEX_API_ROOT, _HandleEvent)(limex, q, ctx, sp);
|
JOIN(LIMEX_API_ROOT, _HandleEvent)(limex, q, &ctx, sp);
|
||||||
|
|
||||||
q->cur++;
|
q->cur++;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPIRE_ESTATE_FN(limex, ctx, sp);
|
EXPIRE_ESTATE_FN(limex, &ctx, sp);
|
||||||
|
|
||||||
DEBUG_PRINTF("END\n");
|
DEBUG_PRINTF("END\n");
|
||||||
STORE_STATE(q->state, LOAD_STATE(&ctx->s));
|
STORE_STATE(q->state, LOAD_STATE(&ctx.s));
|
||||||
|
|
||||||
if (q->cur != q->end) {
|
if (q->cur != q->end) {
|
||||||
q->cur--;
|
q->cur--;
|
||||||
@ -605,7 +602,7 @@ char JOIN(LIMEX_API_ROOT, _Q)(const struct NFA *n, struct mq *q, s64a end) {
|
|||||||
return MO_ALIVE;
|
return MO_ALIVE;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ISNONZERO_STATE(LOAD_STATE(&ctx->s));
|
return ISNONZERO_STATE(LOAD_STATE(&ctx.s));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* used by suffix execution in Rose */
|
/* used by suffix execution in Rose */
|
||||||
@ -628,16 +625,15 @@ char JOIN(LIMEX_API_ROOT, _Q2)(const struct NFA *n, struct mq *q, s64a end) {
|
|||||||
|
|
||||||
assert(q->cur + 1 < q->end); /* require at least two items */
|
assert(q->cur + 1 < q->end); /* require at least two items */
|
||||||
|
|
||||||
struct CONTEXT_T *ctx = q->scratch->nfaContext;
|
struct CONTEXT_T ctx;
|
||||||
assert(ISALIGNED_CL(ctx));
|
ctx.repeat_ctrl = getRepeatControlBase(q->state, sizeof(STATE_T));
|
||||||
ctx->repeat_ctrl = getRepeatControlBase(q->state, sizeof(STATE_T));
|
ctx.repeat_state = q->streamState + limex->stateSize;
|
||||||
ctx->repeat_state = q->streamState + limex->stateSize;
|
ctx.callback = q->cb;
|
||||||
ctx->callback = q->cb;
|
ctx.context = q->context;
|
||||||
ctx->context = q->context;
|
STORE_STATE(&ctx.cached_estate, ZERO_STATE);
|
||||||
STORE_STATE(&ctx->cached_estate, ZERO_STATE);
|
|
||||||
|
|
||||||
DEBUG_PRINTF("LOAD STATE\n");
|
DEBUG_PRINTF("LOAD STATE\n");
|
||||||
STORE_STATE(&ctx->s, LOAD_STATE(q->state));
|
STORE_STATE(&ctx.s, LOAD_STATE(q->state));
|
||||||
assert(q->items[q->cur].type == MQE_START);
|
assert(q->items[q->cur].type == MQE_START);
|
||||||
|
|
||||||
u64a offset = q->offset;
|
u64a offset = q->offset;
|
||||||
@ -661,7 +657,7 @@ char JOIN(LIMEX_API_ROOT, _Q2)(const struct NFA *n, struct mq *q, s64a end) {
|
|||||||
/* do main buffer region */
|
/* do main buffer region */
|
||||||
u64a final_look = 0;
|
u64a final_look = 0;
|
||||||
assert(ep - offset <= q->length);
|
assert(ep - offset <= q->length);
|
||||||
if (STREAMFIRST_FN(limex, q->buffer + sp - offset, ep - sp, ctx, sp,
|
if (STREAMFIRST_FN(limex, q->buffer + sp - offset, ep - sp, &ctx, sp,
|
||||||
&final_look) == MO_HALT_MATCHING) {
|
&final_look) == MO_HALT_MATCHING) {
|
||||||
DEBUG_PRINTF("final_look:%llu sp:%llu end_abs:%llu offset:%llu\n",
|
DEBUG_PRINTF("final_look:%llu sp:%llu end_abs:%llu offset:%llu\n",
|
||||||
final_look, sp, end_abs, offset);
|
final_look, sp, end_abs, offset);
|
||||||
@ -669,7 +665,7 @@ char JOIN(LIMEX_API_ROOT, _Q2)(const struct NFA *n, struct mq *q, s64a end) {
|
|||||||
q->cur--;
|
q->cur--;
|
||||||
q->items[q->cur].type = MQE_START;
|
q->items[q->cur].type = MQE_START;
|
||||||
q->items[q->cur].location = sp + final_look - offset;
|
q->items[q->cur].location = sp + final_look - offset;
|
||||||
STORE_STATE(q->state, LOAD_STATE(&ctx->s));
|
STORE_STATE(q->state, LOAD_STATE(&ctx.s));
|
||||||
return MO_MATCHES_PENDING;
|
return MO_MATCHES_PENDING;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -685,19 +681,19 @@ char JOIN(LIMEX_API_ROOT, _Q2)(const struct NFA *n, struct mq *q, s64a end) {
|
|||||||
q->items[q->cur].type = MQE_START;
|
q->items[q->cur].type = MQE_START;
|
||||||
q->items[q->cur].location = sp - offset;
|
q->items[q->cur].location = sp - offset;
|
||||||
DEBUG_PRINTF("bailing q->cur %u q->end %u\n", q->cur, q->end);
|
DEBUG_PRINTF("bailing q->cur %u q->end %u\n", q->cur, q->end);
|
||||||
STORE_STATE(q->state, LOAD_STATE(&ctx->s));
|
STORE_STATE(q->state, LOAD_STATE(&ctx.s));
|
||||||
return MO_ALIVE;
|
return MO_ALIVE;
|
||||||
}
|
}
|
||||||
|
|
||||||
JOIN(LIMEX_API_ROOT, _HandleEvent)(limex, q, ctx, sp);
|
JOIN(LIMEX_API_ROOT, _HandleEvent)(limex, q, &ctx, sp);
|
||||||
|
|
||||||
q->cur++;
|
q->cur++;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPIRE_ESTATE_FN(limex, ctx, sp);
|
EXPIRE_ESTATE_FN(limex, &ctx, sp);
|
||||||
|
|
||||||
DEBUG_PRINTF("END\n");
|
DEBUG_PRINTF("END\n");
|
||||||
STORE_STATE(q->state, LOAD_STATE(&ctx->s));
|
STORE_STATE(q->state, LOAD_STATE(&ctx.s));
|
||||||
|
|
||||||
if (q->cur != q->end) {
|
if (q->cur != q->end) {
|
||||||
q->cur--;
|
q->cur--;
|
||||||
@ -706,7 +702,7 @@ char JOIN(LIMEX_API_ROOT, _Q2)(const struct NFA *n, struct mq *q, s64a end) {
|
|||||||
return MO_ALIVE;
|
return MO_ALIVE;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ISNONZERO_STATE(LOAD_STATE(&ctx->s));
|
return ISNONZERO_STATE(LOAD_STATE(&ctx.s));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Used for execution Rose prefix/infixes.
|
// Used for execution Rose prefix/infixes.
|
||||||
@ -720,15 +716,15 @@ char JOIN(LIMEX_API_ROOT, _QR)(const struct NFA *n, struct mq *q,
|
|||||||
|
|
||||||
assert(q->cur + 1 < q->end); /* require at least two items */
|
assert(q->cur + 1 < q->end); /* require at least two items */
|
||||||
|
|
||||||
struct CONTEXT_T *ctx = q->scratch->nfaContext;
|
struct CONTEXT_T ctx;
|
||||||
ctx->repeat_ctrl = getRepeatControlBase(q->state, sizeof(STATE_T));
|
ctx.repeat_ctrl = getRepeatControlBase(q->state, sizeof(STATE_T));
|
||||||
ctx->repeat_state = q->streamState + limex->stateSize;
|
ctx.repeat_state = q->streamState + limex->stateSize;
|
||||||
ctx->callback = NULL;
|
ctx.callback = NULL;
|
||||||
ctx->context = NULL;
|
ctx.context = NULL;
|
||||||
STORE_STATE(&ctx->cached_estate, ZERO_STATE);
|
STORE_STATE(&ctx.cached_estate, ZERO_STATE);
|
||||||
|
|
||||||
DEBUG_PRINTF("LOAD STATE\n");
|
DEBUG_PRINTF("LOAD STATE\n");
|
||||||
STORE_STATE(&ctx->s, LOAD_STATE(q->state));
|
STORE_STATE(&ctx.s, LOAD_STATE(q->state));
|
||||||
assert(q->items[q->cur].type == MQE_START);
|
assert(q->items[q->cur].type == MQE_START);
|
||||||
|
|
||||||
u64a offset = q->offset;
|
u64a offset = q->offset;
|
||||||
@ -740,7 +736,7 @@ char JOIN(LIMEX_API_ROOT, _QR)(const struct NFA *n, struct mq *q,
|
|||||||
if (n->maxWidth) {
|
if (n->maxWidth) {
|
||||||
if (ep - sp > n->maxWidth) {
|
if (ep - sp > n->maxWidth) {
|
||||||
sp = ep - n->maxWidth;
|
sp = ep - n->maxWidth;
|
||||||
STORE_STATE(&ctx->s, INITIAL_FN(limex, !!sp));
|
STORE_STATE(&ctx.s, INITIAL_FN(limex, !!sp));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert(ep >= sp);
|
assert(ep >= sp);
|
||||||
@ -751,7 +747,7 @@ char JOIN(LIMEX_API_ROOT, _QR)(const struct NFA *n, struct mq *q,
|
|||||||
u64a local_ep = MIN(offset, ep);
|
u64a local_ep = MIN(offset, ep);
|
||||||
/* we are starting inside the history buffer */
|
/* we are starting inside the history buffer */
|
||||||
STREAMSILENT_FN(limex, q->history + q->hlength + sp - offset,
|
STREAMSILENT_FN(limex, q->history + q->hlength + sp - offset,
|
||||||
local_ep - sp, ctx, sp);
|
local_ep - sp, &ctx, sp);
|
||||||
|
|
||||||
sp = local_ep;
|
sp = local_ep;
|
||||||
}
|
}
|
||||||
@ -763,30 +759,30 @@ char JOIN(LIMEX_API_ROOT, _QR)(const struct NFA *n, struct mq *q,
|
|||||||
/* do main buffer region */
|
/* do main buffer region */
|
||||||
DEBUG_PRINTF("MAIN BUFFER SCAN\n");
|
DEBUG_PRINTF("MAIN BUFFER SCAN\n");
|
||||||
assert(ep - offset <= q->length);
|
assert(ep - offset <= q->length);
|
||||||
STREAMSILENT_FN(limex, q->buffer + sp - offset, ep - sp, ctx, sp);
|
STREAMSILENT_FN(limex, q->buffer + sp - offset, ep - sp, &ctx, sp);
|
||||||
|
|
||||||
DEBUG_PRINTF("SCAN DONE\n");
|
DEBUG_PRINTF("SCAN DONE\n");
|
||||||
scan_done:
|
scan_done:
|
||||||
sp = ep;
|
sp = ep;
|
||||||
|
|
||||||
JOIN(LIMEX_API_ROOT, _HandleEvent)(limex, q, ctx, sp);
|
JOIN(LIMEX_API_ROOT, _HandleEvent)(limex, q, &ctx, sp);
|
||||||
|
|
||||||
q->cur++;
|
q->cur++;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPIRE_ESTATE_FN(limex, ctx, sp);
|
EXPIRE_ESTATE_FN(limex, &ctx, sp);
|
||||||
|
|
||||||
DEBUG_PRINTF("END, nfa is %s\n",
|
DEBUG_PRINTF("END, nfa is %s\n",
|
||||||
ISNONZERO_STATE(ctx->s) ? "still alive" : "dead");
|
ISNONZERO_STATE(ctx.s) ? "still alive" : "dead");
|
||||||
|
|
||||||
STORE_STATE(q->state, LOAD_STATE(&ctx->s));
|
STORE_STATE(q->state, LOAD_STATE(&ctx.s));
|
||||||
|
|
||||||
if (JOIN(limexInAccept, SIZE)(limex, LOAD_STATE(&ctx->s), ctx->repeat_ctrl,
|
if (JOIN(limexInAccept, SIZE)(limex, LOAD_STATE(&ctx.s), ctx.repeat_ctrl,
|
||||||
ctx->repeat_state, sp + 1, report)) {
|
ctx.repeat_state, sp + 1, report)) {
|
||||||
return MO_MATCHES_PENDING;
|
return MO_MATCHES_PENDING;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ISNONZERO_STATE(LOAD_STATE(&ctx->s));
|
return ISNONZERO_STATE(LOAD_STATE(&ctx.s));
|
||||||
}
|
}
|
||||||
|
|
||||||
char JOIN(LIMEX_API_ROOT, _testEOD)(const struct NFA *n, const char *state,
|
char JOIN(LIMEX_API_ROOT, _testEOD)(const struct NFA *n, const char *state,
|
||||||
@ -813,42 +809,40 @@ char JOIN(LIMEX_API_ROOT, _reportCurrent)(const struct NFA *n, struct mq *q) {
|
|||||||
|
|
||||||
// Block mode reverse scan.
|
// Block mode reverse scan.
|
||||||
char JOIN(LIMEX_API_ROOT, _B_Reverse)(const struct NFA *n, u64a offset,
|
char JOIN(LIMEX_API_ROOT, _B_Reverse)(const struct NFA *n, u64a offset,
|
||||||
const u8 *buf, size_t buflen,
|
const u8 *buf, size_t buflen,
|
||||||
const u8 *hbuf, size_t hlen,
|
const u8 *hbuf, size_t hlen,
|
||||||
struct hs_scratch *scratch,
|
UNUSED struct hs_scratch *scratch,
|
||||||
NfaCallback cb, void *context) {
|
NfaCallback cb, void *context) {
|
||||||
assert(buf || hbuf);
|
assert(buf || hbuf);
|
||||||
assert(buflen || hlen);
|
assert(buflen || hlen);
|
||||||
|
|
||||||
/* This may be called INSIDE another NFA, so we need a separate
|
struct CONTEXT_T ctx;
|
||||||
* context --> Hence the nfaContextSom */
|
ctx.repeat_ctrl = NULL;
|
||||||
struct CONTEXT_T *ctx = scratch->nfaContextSom;
|
ctx.repeat_state = NULL;
|
||||||
ctx->repeat_ctrl = NULL;
|
ctx.callback = cb;
|
||||||
ctx->repeat_state = NULL;
|
ctx.context = context;
|
||||||
ctx->callback = cb;
|
STORE_STATE(&ctx.cached_estate, ZERO_STATE);
|
||||||
ctx->context = context;
|
|
||||||
STORE_STATE(&ctx->cached_estate, ZERO_STATE);
|
|
||||||
|
|
||||||
const IMPL_NFA_T *limex = getImplNfa(n);
|
const IMPL_NFA_T *limex = getImplNfa(n);
|
||||||
STORE_STATE(&ctx->s, INITIAL_FN(limex, 0)); // always anchored
|
STORE_STATE(&ctx.s, INITIAL_FN(limex, 0)); // always anchored
|
||||||
|
|
||||||
// 'buf' may be null, for example when we're scanning at EOD time.
|
// 'buf' may be null, for example when we're scanning at EOD time.
|
||||||
if (buflen) {
|
if (buflen) {
|
||||||
assert(buf);
|
assert(buf);
|
||||||
DEBUG_PRINTF("MAIN BUFFER SCAN, %zu bytes\n", buflen);
|
DEBUG_PRINTF("MAIN BUFFER SCAN, %zu bytes\n", buflen);
|
||||||
offset -= buflen;
|
offset -= buflen;
|
||||||
REV_STREAM_FN(limex, buf, buflen, ctx, offset);
|
REV_STREAM_FN(limex, buf, buflen, &ctx, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hlen) {
|
if (hlen) {
|
||||||
assert(hbuf);
|
assert(hbuf);
|
||||||
DEBUG_PRINTF("HISTORY BUFFER SCAN, %zu bytes\n", hlen);
|
DEBUG_PRINTF("HISTORY BUFFER SCAN, %zu bytes\n", hlen);
|
||||||
offset -= hlen;
|
offset -= hlen;
|
||||||
REV_STREAM_FN(limex, hbuf, hlen, ctx, offset);
|
REV_STREAM_FN(limex, hbuf, hlen, &ctx, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (offset == 0 && ISNONZERO_STATE(LOAD_STATE(&ctx->s))) {
|
if (offset == 0 && ISNONZERO_STATE(LOAD_STATE(&ctx.s))) {
|
||||||
TESTEOD_REV_FN(limex, &ctx->s, offset, cb, context);
|
TESTEOD_REV_FN(limex, &ctx.s, offset, cb, context);
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: return value is unused.
|
// NOTE: return value is unused.
|
||||||
|
@ -40,7 +40,6 @@
|
|||||||
#include "state.h"
|
#include "state.h"
|
||||||
#include "ue2common.h"
|
#include "ue2common.h"
|
||||||
#include "database.h"
|
#include "database.h"
|
||||||
#include "nfa/limex_context.h" // for NFAContext128 etc
|
|
||||||
#include "nfa/nfa_api_queue.h"
|
#include "nfa/nfa_api_queue.h"
|
||||||
#include "rose/rose_internal.h"
|
#include "rose/rose_internal.h"
|
||||||
#include "util/fatbit.h"
|
#include "util/fatbit.h"
|
||||||
@ -101,13 +100,10 @@ hs_error_t alloc_scratch(const hs_scratch_t *proto, hs_scratch_t **scratch) {
|
|||||||
size_t delay_region_size =
|
size_t delay_region_size =
|
||||||
fatbit_array_size(DELAY_SLOT_COUNT, proto->delay_count);
|
fatbit_array_size(DELAY_SLOT_COUNT, proto->delay_count);
|
||||||
|
|
||||||
size_t nfa_context_size = 2 * sizeof(struct NFAContext512) + 127;
|
|
||||||
|
|
||||||
// the size is all the allocated stuff, not including the struct itself
|
// the size is all the allocated stuff, not including the struct itself
|
||||||
size_t size = queue_size + 63
|
size_t size = queue_size + 63
|
||||||
+ bStateSize + tStateSize
|
+ bStateSize + tStateSize
|
||||||
+ fullStateSize + 63 /* cacheline padding */
|
+ fullStateSize + 63 /* cacheline padding */
|
||||||
+ nfa_context_size
|
|
||||||
+ fatbit_size(proto->handledKeyCount) /* handled roles */
|
+ fatbit_size(proto->handledKeyCount) /* handled roles */
|
||||||
+ fatbit_size(queueCount) /* active queue array */
|
+ fatbit_size(queueCount) /* active queue array */
|
||||||
+ 2 * fatbit_size(deduperCount) /* need odd and even logs */
|
+ 2 * fatbit_size(deduperCount) /* need odd and even logs */
|
||||||
@ -202,13 +198,6 @@ hs_error_t alloc_scratch(const hs_scratch_t *proto, hs_scratch_t **scratch) {
|
|||||||
current += tStateSize;
|
current += tStateSize;
|
||||||
|
|
||||||
current = ROUNDUP_PTR(current, 64);
|
current = ROUNDUP_PTR(current, 64);
|
||||||
assert(ISALIGNED_CL(current));
|
|
||||||
s->nfaContext = current;
|
|
||||||
current += sizeof(struct NFAContext512);
|
|
||||||
current = ROUNDUP_PTR(current, 64);
|
|
||||||
assert(ISALIGNED_CL(current));
|
|
||||||
s->nfaContextSom = current;
|
|
||||||
current += sizeof(struct NFAContext512);
|
|
||||||
|
|
||||||
assert(ISALIGNED_N(current, 8));
|
assert(ISALIGNED_N(current, 8));
|
||||||
s->deduper.som_start_log[0] = (u64a *)current;
|
s->deduper.som_start_log[0] = (u64a *)current;
|
||||||
|
@ -153,8 +153,6 @@ struct ALIGN_CL_DIRECTIVE hs_scratch {
|
|||||||
char *bstate; /**< block mode states */
|
char *bstate; /**< block mode states */
|
||||||
char *tstate; /**< state for transient roses */
|
char *tstate; /**< state for transient roses */
|
||||||
char *qNfaState; /**< queued NFA temp state */
|
char *qNfaState; /**< queued NFA temp state */
|
||||||
void *nfaContext; /**< use for your NFAContextNNN struct */
|
|
||||||
void *nfaContextSom; /**< use for your NFAContextNNN struct by som_runtime */
|
|
||||||
char *fullState; /**< uncompressed NFA state */
|
char *fullState; /**< uncompressed NFA state */
|
||||||
struct mq *queues;
|
struct mq *queues;
|
||||||
struct fatbit *aqa; /**< active queue array; fatbit of queues that are valid
|
struct fatbit *aqa; /**< active queue array; fatbit of queues that are valid
|
||||||
|
@ -91,7 +91,6 @@ protected:
|
|||||||
// Mock up a scratch structure that contains the pieces that we need
|
// Mock up a scratch structure that contains the pieces that we need
|
||||||
// for NFA execution.
|
// for NFA execution.
|
||||||
scratch = aligned_zmalloc_unique<hs_scratch>(sizeof(struct hs_scratch));
|
scratch = aligned_zmalloc_unique<hs_scratch>(sizeof(struct hs_scratch));
|
||||||
scratch->nfaContext = nfa_context.get();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void initQueue() {
|
virtual void initQueue() {
|
||||||
@ -339,7 +338,6 @@ protected:
|
|||||||
// Mock up a scratch structure that contains the pieces that we need
|
// Mock up a scratch structure that contains the pieces that we need
|
||||||
// for reverse NFA execution.
|
// for reverse NFA execution.
|
||||||
scratch = aligned_zmalloc_unique<hs_scratch>(sizeof(struct hs_scratch));
|
scratch = aligned_zmalloc_unique<hs_scratch>(sizeof(struct hs_scratch));
|
||||||
scratch->nfaContextSom = nfa_context.get();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NFA type (enum NFAEngineType)
|
// NFA type (enum NFAEngineType)
|
||||||
@ -409,7 +407,6 @@ protected:
|
|||||||
// Mock up a scratch structure that contains the pieces that we need
|
// Mock up a scratch structure that contains the pieces that we need
|
||||||
// for NFA execution.
|
// for NFA execution.
|
||||||
scratch = aligned_zmalloc_unique<hs_scratch>(sizeof(struct hs_scratch));
|
scratch = aligned_zmalloc_unique<hs_scratch>(sizeof(struct hs_scratch));
|
||||||
scratch->nfaContext = nfa_context.get();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void initQueue() {
|
virtual void initQueue() {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user