Rose: clean up use of scratch, RoseContext

This commit is contained in:
Justin Viiret 2016-02-04 12:46:53 +11:00 committed by Matthew Barr
parent 9e9bb6a960
commit 09bf568d95
8 changed files with 267 additions and 283 deletions

View File

@ -105,7 +105,6 @@ void init_outfixes_for_block(const struct RoseEngine *t,
size_t len = nfaRevAccelCheck(nfa, scratch->core_info.buf,
scratch->core_info.len);
if (len) {
struct RoseContext *tctxt = &scratch->tctxt;
u8 *activeArray = getActiveLeafArray(t, state);
const u32 activeArraySize = t->activeArrayCount;
const u32 qCount = t->queueCount;
@ -114,7 +113,7 @@ void init_outfixes_for_block(const struct RoseEngine *t,
fatbit_set(scratch->aqa, qCount, 0);
struct mq *q = scratch->queues;
initQueue(q, 0, t, tctxt);
initQueue(q, 0, t, scratch);
q->length = len; /* adjust for rev_accel */
nfaQueueInitState(nfa, q);
pushQueueAt(q, 0, MQE_START, 0);
@ -258,11 +257,11 @@ void roseBlockExec_i(const struct RoseEngine *t, struct hs_scratch *scratch,
}
exit:;
if (cleanUpDelayed(length, 0, scratch) == HWLM_TERMINATE_MATCHING) {
if (cleanUpDelayed(t, scratch, length, 0) == HWLM_TERMINATE_MATCHING) {
return;
}
assert(!can_stop_matching(scratch));
roseCatchUpTo(t, state, length, scratch, 0);
roseCatchUpTo(t, scratch, length, 0);
}

View File

@ -41,9 +41,9 @@ typedef struct queue_match PQ_T;
#include "util/pqueue.h"
static really_inline
int handleReportInternally(struct hs_scratch *scratch, ReportID id,
int handleReportInternally(const struct RoseEngine *t,
struct hs_scratch *scratch, ReportID id,
u64a offset) {
const struct RoseEngine *t = scratch->core_info.rose;
const struct internal_report *ri = getInternalReport(t, id);
if (ri->type == EXTERNAL_CALLBACK) {
return 0;
@ -53,7 +53,7 @@ int handleReportInternally(struct hs_scratch *scratch, ReportID id,
return 1;
}
if (ri->type == INTERNAL_ROSE_CHAIN) {
roseHandleChainMatch(t, id, offset, &scratch->tctxt, 0, 1);
roseHandleChainMatch(t, scratch, id, offset, 0, 1);
return 1;
}
@ -61,9 +61,9 @@ int handleReportInternally(struct hs_scratch *scratch, ReportID id,
}
static really_inline
int handleReportInternallyNoChain(struct hs_scratch *scratch, ReportID id,
int handleReportInternallyNoChain(const struct RoseEngine *t,
struct hs_scratch *scratch, ReportID id,
u64a offset) {
const struct RoseEngine *t = scratch->core_info.rose;
const struct internal_report *ri = getInternalReport(t, id);
if (ri->type == EXTERNAL_CALLBACK) {
return 0;
@ -100,11 +100,11 @@ void currentAnchoredMatch(const struct RoseEngine *t,
}
static rose_inline
void nextAnchoredMatch(const struct RoseEngine *t, struct RoseContext *tctxt,
void nextAnchoredMatch(const struct RoseEngine *t, struct hs_scratch *scratch,
ReportID *reportId, u64a *end) {
struct RoseContext *tctxt = &scratch->tctxt;
assert(tctxt->curr_anchored_loc != MMB_INVALID);
struct hs_scratch *scratch = tctxtToScratch(tctxt);
struct fatbit **anchoredRows = getAnchoredLog(scratch);
u32 region_width = t->anchoredMatches;
@ -141,8 +141,8 @@ void nextAnchoredMatch(const struct RoseEngine *t, struct RoseContext *tctxt,
}
static really_inline
void deactivateQueue(u8 *aa, u32 qi, struct hs_scratch *scratch) {
const struct RoseEngine *t = scratch->core_info.rose;
void deactivateQueue(const struct RoseEngine *t, u8 *aa, u32 qi,
struct hs_scratch *scratch) {
u32 aaCount = t->activeArrayCount;
u32 qCount = t->queueCount;
@ -160,7 +160,7 @@ void ensureQueueActive(const struct RoseEngine *t, u32 qi, u32 qCount,
struct mq *q, struct hs_scratch *scratch) {
if (!fatbit_set(scratch->aqa, qCount, qi)) {
DEBUG_PRINTF("initing %u\n", qi);
initQueue(q, qi, t, &scratch->tctxt);
initQueue(q, qi, t, scratch);
loadStreamState(q->nfa, q, 0);
pushQueueAt(q, 0, MQE_START, 0);
}
@ -211,7 +211,8 @@ s64a pq_top_loc(struct catchup_pq *pq) {
/* requires that we are the top item on the pq */
static really_inline
hwlmcb_rv_t runExistingNfaToNextMatch(u32 qi, struct mq *q, s64a loc,
hwlmcb_rv_t runExistingNfaToNextMatch(const struct RoseEngine *t, u32 qi,
struct mq *q, s64a loc,
struct hs_scratch *scratch, u8 *aa,
char report_curr) {
assert(pq_top(scratch->catchup_pq.qm)->queue == qi);
@ -242,7 +243,7 @@ hwlmcb_rv_t runExistingNfaToNextMatch(u32 qi, struct mq *q, s64a loc,
return HWLM_TERMINATE_MATCHING;
}
deactivateQueue(aa, qi, scratch);
deactivateQueue(t, aa, qi, scratch);
} else if (q->cur == q->end) {
DEBUG_PRINTF("queue %u finished, nfa lives\n", qi);
q->cur = q->end = 0;
@ -267,7 +268,8 @@ hwlmcb_rv_t runExistingNfaToNextMatch(u32 qi, struct mq *q, s64a loc,
}
static really_inline
hwlmcb_rv_t runNewNfaToNextMatch(u32 qi, struct mq *q, s64a loc,
hwlmcb_rv_t runNewNfaToNextMatch(const struct RoseEngine *t, u32 qi,
struct mq *q, s64a loc,
struct hs_scratch *scratch, u8 *aa,
s64a report_ok_loc) {
assert(!q->report_current);
@ -300,7 +302,7 @@ restart:
return HWLM_TERMINATE_MATCHING;
}
deactivateQueue(aa, qi, scratch);
deactivateQueue(t, aa, qi, scratch);
} else if (q->cur == q->end) {
DEBUG_PRINTF("queue %u finished, nfa lives\n", qi);
q->cur = q->end = 0;
@ -327,6 +329,7 @@ static UNUSED
int roseNfaFinalBlastAdaptor(u64a offset, ReportID id, void *context) {
struct RoseContext *tctxt = context;
struct hs_scratch *scratch = tctxtToScratch(tctxt);
const struct RoseEngine *t = scratch->core_info.rose;
DEBUG_PRINTF("called\n");
@ -334,7 +337,7 @@ int roseNfaFinalBlastAdaptor(u64a offset, ReportID id, void *context) {
offset, id);
updateLastMatchOffset(tctxt, offset);
if (handleReportInternallyNoChain(scratch, id, offset)) {
if (handleReportInternallyNoChain(t, scratch, id, offset)) {
return MO_CONTINUE_MATCHING;
}
@ -345,7 +348,7 @@ int roseNfaFinalBlastAdaptor(u64a offset, ReportID id, void *context) {
return MO_CONTINUE_MATCHING;
} else {
assert(cb_rv == MO_CONTINUE_MATCHING);
return !roseSuffixIsExhausted(scratch->core_info.rose, 0,
return !roseSuffixIsExhausted(t, 0,
scratch->core_info.exhaustionVector);
}
}
@ -356,6 +359,7 @@ int roseNfaFinalBlastAdaptorNoInternal(u64a offset, ReportID id,
void *context) {
struct RoseContext *tctxt = context;
struct hs_scratch *scratch = tctxtToScratch(tctxt);
const struct RoseEngine *t = scratch->core_info.rose;
DEBUG_PRINTF("called\n");
/* chained nfas are run under the control of the anchored catchup */
@ -371,7 +375,7 @@ int roseNfaFinalBlastAdaptorNoInternal(u64a offset, ReportID id,
return MO_CONTINUE_MATCHING;
} else {
assert(cb_rv == MO_CONTINUE_MATCHING);
return !roseSuffixIsExhausted(scratch->core_info.rose, 0,
return !roseSuffixIsExhausted(t, 0,
scratch->core_info.exhaustionVector);
}
}
@ -395,7 +399,7 @@ hwlmcb_rv_t add_to_queue(const struct RoseEngine *t, struct mq *queues,
if (roseSuffixInfoIsExhausted(t, info,
scratch->core_info.exhaustionVector)) {
deactivateQueue(aa, qi, scratch);
deactivateQueue(t, aa, qi, scratch);
return HWLM_CONTINUE_MATCHING;
}
@ -408,7 +412,7 @@ hwlmcb_rv_t add_to_queue(const struct RoseEngine *t, struct mq *queues,
ensureEnd(q, qi, loc);
return runNewNfaToNextMatch(qi, q, loc, scratch, aa, report_ok_loc);
return runNewNfaToNextMatch(t, qi, q, loc, scratch, aa, report_ok_loc);
}
static really_inline
@ -429,8 +433,9 @@ s64a findSecondPlace(struct catchup_pq *pq, s64a loc_limit) {
}
}
hwlmcb_rv_t roseCatchUpMPV_i(const struct RoseEngine *t, char *state, s64a loc,
hwlmcb_rv_t roseCatchUpMPV_i(const struct RoseEngine *t, s64a loc,
struct hs_scratch *scratch) {
char *state = scratch->core_info.state;
struct mq *queues = scratch->queues;
u8 *aa = getActiveLeafArray(t, state);
UNUSED u32 aaCount = t->activeArrayCount;
@ -453,7 +458,7 @@ hwlmcb_rv_t roseCatchUpMPV_i(const struct RoseEngine *t, char *state, s64a loc,
if (roseSuffixInfoIsExhausted(t, info,
scratch->core_info.exhaustionVector)) {
deactivateQueue(aa, qi, scratch);
deactivateQueue(t, aa, qi, scratch);
goto done;
}
@ -487,7 +492,7 @@ hwlmcb_rv_t roseCatchUpMPV_i(const struct RoseEngine *t, char *state, s64a loc,
if (!next_pos_match_loc) { /* 0 means dead */
DEBUG_PRINTF("mpv is pining for the fjords\n");
if (can_stop_matching(scratch)) {
deactivateQueue(aa, qi, scratch);
deactivateQueue(t, aa, qi, scratch);
return HWLM_TERMINATE_MATCHING;
}
@ -527,9 +532,8 @@ int roseNfaBlastAdaptor(u64a offset, ReportID id, void *context) {
DEBUG_PRINTF("called\n");
if (ri->type != INTERNAL_ROSE_CHAIN) {
/* INTERNAL_ROSE_CHAIN are not visible externally */
if (roseCatchUpMPV(t, scratch->core_info.state,
offset - scratch->core_info.buf_offset, scratch)
== HWLM_TERMINATE_MATCHING) {
if (roseCatchUpMPV(t, offset - scratch->core_info.buf_offset,
scratch) == HWLM_TERMINATE_MATCHING) {
DEBUG_PRINTF("done\n");
return MO_HALT_MATCHING;
}
@ -538,7 +542,7 @@ int roseNfaBlastAdaptor(u64a offset, ReportID id, void *context) {
DEBUG_PRINTF("masky got himself a blasted match @%llu id %u !woot!\n",
offset, id);
if (handleReportInternally(scratch, id, offset)) {
if (handleReportInternally(t, scratch, id, offset)) {
return MO_CONTINUE_MATCHING;
}
@ -563,9 +567,8 @@ int roseNfaBlastAdaptorNoInternal(u64a offset, ReportID id, void *context) {
const struct RoseEngine *t = scratch->core_info.rose;
DEBUG_PRINTF("called\n");
if (roseCatchUpMPV(t, scratch->core_info.state,
offset - scratch->core_info.buf_offset,
scratch) == HWLM_TERMINATE_MATCHING) {
if (roseCatchUpMPV(t, offset - scratch->core_info.buf_offset, scratch) ==
HWLM_TERMINATE_MATCHING) {
DEBUG_PRINTF("done\n");
return MO_HALT_MATCHING;
}
@ -590,13 +593,14 @@ static UNUSED
int roseNfaBlastAdaptorNoChain(u64a offset, ReportID id, void *context) {
struct RoseContext *tctxt = context;
struct hs_scratch *scratch = tctxtToScratch(tctxt);
const struct RoseEngine *t = scratch->core_info.rose;
DEBUG_PRINTF("masky got himself a blasted match @%llu id %u !woot!\n",
offset, id);
updateLastMatchOffset(tctxt, offset);
if (handleReportInternallyNoChain(scratch, id, offset)) {
if (handleReportInternallyNoChain(t, scratch, id, offset)) {
return MO_CONTINUE_MATCHING;
}
@ -607,7 +611,7 @@ int roseNfaBlastAdaptorNoChain(u64a offset, ReportID id, void *context) {
return MO_CONTINUE_MATCHING;
} else {
assert(cb_rv == MO_CONTINUE_MATCHING);
return !roseSuffixIsExhausted(scratch->core_info.rose, tctxt->curr_qi,
return !roseSuffixIsExhausted(t, tctxt->curr_qi,
scratch->core_info.exhaustionVector);
}
}
@ -617,6 +621,7 @@ int roseNfaBlastAdaptorNoInternalNoChain(u64a offset, ReportID id,
void *context) {
struct RoseContext *tctxt = context;
struct hs_scratch *scratch = tctxtToScratch(tctxt);
const struct RoseEngine *t = scratch->core_info.rose;
/* chained nfas are run under the control of the anchored catchup */
@ -631,7 +636,7 @@ int roseNfaBlastAdaptorNoInternalNoChain(u64a offset, ReportID id,
return MO_CONTINUE_MATCHING;
} else {
assert(cb_rv == MO_CONTINUE_MATCHING);
return !roseSuffixIsExhausted(scratch->core_info.rose, tctxt->curr_qi,
return !roseSuffixIsExhausted(t, tctxt->curr_qi,
scratch->core_info.exhaustionVector);
}
}
@ -644,9 +649,8 @@ int roseNfaBlastSomAdaptor(u64a from_offset, u64a offset, ReportID id,
const struct RoseEngine *t = scratch->core_info.rose;
DEBUG_PRINTF("called\n");
if (roseCatchUpMPV(t, scratch->core_info.state,
offset - scratch->core_info.buf_offset,
scratch) == HWLM_TERMINATE_MATCHING) {
if (roseCatchUpMPV(t, offset - scratch->core_info.buf_offset, scratch) ==
HWLM_TERMINATE_MATCHING) {
DEBUG_PRINTF("roseCatchUpNfas done\n");
return MO_HALT_MATCHING;
}
@ -675,12 +679,12 @@ int roseNfaAdaptor(u64a offset, ReportID id, void *context) {
updateLastMatchOffset(tctxt, offset);
struct hs_scratch *scratch = tctxtToScratch(tctxt);
if (handleReportInternally(scratch, id, offset)) {
const struct RoseEngine *t = scratch->core_info.rose;
if (handleReportInternally(t, scratch, id, offset)) {
return MO_CONTINUE_MATCHING;
}
int cb_rv = tctxt->cb(offset, id, scratch);
return cb_rv;
return tctxt->cb(offset, id, scratch);
}
int roseNfaAdaptorNoInternal(u64a offset, ReportID id, void *context) {
@ -748,7 +752,7 @@ hwlmcb_rv_t buildSufPQ_final(const struct RoseEngine *t, s64a report_ok_loc,
if (roseSuffixInfoIsExhausted(t, info,
scratch->core_info.exhaustionVector)) {
deactivateQueue(aa, a_qi, scratch);
deactivateQueue(t, aa, a_qi, scratch);
return HWLM_CONTINUE_MATCHING;
}
@ -776,7 +780,7 @@ hwlmcb_rv_t buildSufPQ_final(const struct RoseEngine *t, s64a report_ok_loc,
return HWLM_TERMINATE_MATCHING;
}
deactivateQueue(aa, a_qi, scratch);
deactivateQueue(t, aa, a_qi, scratch);
} else if (q->cur == q->end) {
DEBUG_PRINTF("queue %u finished, nfa lives [%lld]\n", a_qi, final_loc);
@ -792,8 +796,8 @@ hwlmcb_rv_t buildSufPQ_final(const struct RoseEngine *t, s64a report_ok_loc,
assert(second_place_loc < final_loc);
assert(q_cur_loc(q) >= second_place_loc);
if (runNewNfaToNextMatch(a_qi, q, final_loc, scratch, aa, report_ok_loc)
== HWLM_TERMINATE_MATCHING) {
if (runNewNfaToNextMatch(t, a_qi, q, final_loc, scratch, aa,
report_ok_loc) == HWLM_TERMINATE_MATCHING) {
DEBUG_PRINTF("roseCatchUpNfas done\n");
return HWLM_TERMINATE_MATCHING;
}
@ -833,7 +837,7 @@ void streamInitSufPQ(const struct RoseEngine *t, char *state,
pq_insert_with(&scratch->catchup_pq, scratch, qi, qcl);
} else if (!alive) {
deactivateQueue(aa, qi, scratch);
deactivateQueue(t, aa, qi, scratch);
} else {
assert(q->cur == q->end);
/* TODO: can this be simplified? the nfa will never produce any
@ -880,7 +884,7 @@ void blockInitSufPQ(const struct RoseEngine *t, char *state,
mmbit_set(aa, aaCount, qi);
fatbit_set(aqa, qCount, qi);
struct mq *q = queues + qi;
initQueue(q, qi, t, &scratch->tctxt);
initQueue(q, qi, t, scratch);
q->length = len; /* adjust for rev_accel */
nfaQueueInitState(nfa, q);
pushQueueAt(q, 0, MQE_START, 0);
@ -897,7 +901,7 @@ void blockInitSufPQ(const struct RoseEngine *t, char *state,
pq_insert_with(&scratch->catchup_pq, scratch, qi, qcl);
} else if (!alive) {
deactivateQueue(aa, qi, scratch);
deactivateQueue(t, aa, qi, scratch);
} else {
assert(q->cur == q->end);
/* TODO: can this be simplified? the nfa will never produce any
@ -952,7 +956,7 @@ hwlmcb_rv_t buildSufPQ(const struct RoseEngine *t, char *state, s64a safe_loc,
s64a report_ok_loc = tctxt->minNonMpvMatchOffset + 1
- scratch->core_info.buf_offset;
hwlmcb_rv_t rv = roseCatchUpMPV(t, state, report_ok_loc, scratch);
hwlmcb_rv_t rv = roseCatchUpMPV(t, report_ok_loc, scratch);
if (rv != HWLM_CONTINUE_MATCHING) {
return rv;
}
@ -989,7 +993,7 @@ hwlmcb_rv_t buildSufPQ(const struct RoseEngine *t, char *state, s64a safe_loc,
}
static never_inline
hwlmcb_rv_t roseCatchUpNfas(const struct RoseEngine *t, char *state, s64a loc,
hwlmcb_rv_t roseCatchUpNfas(const struct RoseEngine *t, s64a loc,
s64a final_loc, struct hs_scratch *scratch) {
struct RoseContext *tctxt = &scratch->tctxt;
assert(t->activeArrayCount);
@ -999,6 +1003,7 @@ hwlmcb_rv_t roseCatchUpNfas(const struct RoseEngine *t, char *state, s64a loc,
DEBUG_PRINTF("min non mpv match offset %llu\n",
scratch->tctxt.minNonMpvMatchOffset);
char *state = scratch->core_info.state;
struct mq *queues = scratch->queues;
u8 *aa = getActiveLeafArray(t, state);
@ -1019,7 +1024,7 @@ hwlmcb_rv_t roseCatchUpNfas(const struct RoseEngine *t, char *state, s64a loc,
}
/* catch up char matches to this point */
if (roseCatchUpMPV(t, state, match_loc, scratch)
if (roseCatchUpMPV(t, match_loc, scratch)
== HWLM_TERMINATE_MATCHING) {
DEBUG_PRINTF("roseCatchUpNfas done\n");
return HWLM_TERMINATE_MATCHING;
@ -1046,7 +1051,7 @@ hwlmcb_rv_t roseCatchUpNfas(const struct RoseEngine *t, char *state, s64a loc,
DEBUG_PRINTF("second place %lld loc %lld\n", second_place_loc, loc);
if (second_place_loc == q_cur_loc(q)) {
if (runExistingNfaToNextMatch(qi, q, q_final_loc, scratch, aa, 1)
if (runExistingNfaToNextMatch(t, qi, q, q_final_loc, scratch, aa, 1)
== HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
@ -1061,7 +1066,7 @@ hwlmcb_rv_t roseCatchUpNfas(const struct RoseEngine *t, char *state, s64a loc,
return HWLM_TERMINATE_MATCHING;
}
deactivateQueue(aa, qi, scratch);
deactivateQueue(t, aa, qi, scratch);
pq_pop_nice(&scratch->catchup_pq);
} else if (q->cur == q->end) {
DEBUG_PRINTF("queue %u finished, nfa lives [%lld]\n", qi, loc);
@ -1075,7 +1080,7 @@ hwlmcb_rv_t roseCatchUpNfas(const struct RoseEngine *t, char *state, s64a loc,
} else {
DEBUG_PRINTF("queue %u not finished, %u/%u [%lld/%lld]\n",
qi, q->cur, q->end, q->items[q->cur].location, loc);
runExistingNfaToNextMatch(qi, q, q_final_loc, scratch, aa, 0);
runExistingNfaToNextMatch(t, qi, q, q_final_loc, scratch, aa, 0);
}
}
exit:;
@ -1085,16 +1090,16 @@ exit:;
}
static really_inline
hwlmcb_rv_t roseCatchUpNfasAndMpv(const struct RoseEngine *t, char *state,
hwlmcb_rv_t roseCatchUpNfasAndMpv(const struct RoseEngine *t,
s64a loc, s64a final_loc,
struct hs_scratch *scratch) {
hwlmcb_rv_t rv = roseCatchUpNfas(t, state, loc, final_loc, scratch);
hwlmcb_rv_t rv = roseCatchUpNfas(t, loc, final_loc, scratch);
if (rv != HWLM_CONTINUE_MATCHING) {
return rv;
}
return roseCatchUpMPV(t, state, loc, scratch);
return roseCatchUpMPV(t, loc, scratch);
}
@ -1126,7 +1131,7 @@ hwlmcb_rv_t roseCatchUpAll_i(s64a loc, struct hs_scratch *scratch,
}
/* buildSufPQ may have caught only part of the pq upto anchored_end */
rv = roseCatchUpNfas(t, scratch->core_info.state,
rv = roseCatchUpNfas(t,
anchored_end - scratch->core_info.buf_offset, loc,
scratch);
@ -1137,7 +1142,7 @@ hwlmcb_rv_t roseCatchUpAll_i(s64a loc, struct hs_scratch *scratch,
while (anchored_report != MO_INVALID_IDX
&& anchored_end <= current_offset) {
if (anchored_end != tctxt->minMatchOffset) {
rv = roseCatchUpNfasAndMpv(t, scratch->core_info.state,
rv = roseCatchUpNfasAndMpv(t,
anchored_end - scratch->core_info.buf_offset,
loc, scratch);
if (rv != HWLM_CONTINUE_MATCHING) {
@ -1149,7 +1154,7 @@ hwlmcb_rv_t roseCatchUpAll_i(s64a loc, struct hs_scratch *scratch,
assert(anchored_end == tctxt->minMatchOffset);
updateLastMatchOffset(tctxt, anchored_end);
if (handleReportInternally(scratch, anchored_report, anchored_end)) {
if (handleReportInternally(t, scratch, anchored_report, anchored_end)) {
goto next;
}
@ -1159,7 +1164,7 @@ hwlmcb_rv_t roseCatchUpAll_i(s64a loc, struct hs_scratch *scratch,
return HWLM_TERMINATE_MATCHING;
}
next:
nextAnchoredMatch(t, tctxt, &anchored_report, &anchored_end);
nextAnchoredMatch(t, scratch, &anchored_report, &anchored_end);
DEBUG_PRINTF("catch up %u %llu\n", anchored_report, anchored_end);
}
@ -1169,7 +1174,7 @@ hwlmcb_rv_t roseCatchUpAll_i(s64a loc, struct hs_scratch *scratch,
return HWLM_CONTINUE_MATCHING;
}
rv = roseCatchUpNfas(t, scratch->core_info.state, loc, loc, scratch);
rv = roseCatchUpNfas(t, loc, loc, scratch);
if (rv != HWLM_CONTINUE_MATCHING) {
return rv;
@ -1180,7 +1185,7 @@ hwlmcb_rv_t roseCatchUpAll_i(s64a loc, struct hs_scratch *scratch,
if (do_full_mpv) {
/* finish off any outstanding chained matches */
rv = roseCatchUpMPV(t, scratch->core_info.state, loc, scratch);
rv = roseCatchUpMPV(t, loc, scratch);
}
DEBUG_PRINTF("catchup all done %llu\n", current_offset);
@ -1212,12 +1217,12 @@ hwlmcb_rv_t roseCatchUpSufAndChains(s64a loc, struct hs_scratch *scratch) {
return rv;
}
rv = roseCatchUpNfas(t, state, loc, loc, scratch);
rv = roseCatchUpNfas(t, loc, loc, scratch);
if (rv != HWLM_CONTINUE_MATCHING) {
return rv;
}
rv = roseCatchUpMPV(t, state, loc, scratch);
rv = roseCatchUpMPV(t, loc, scratch);
assert(rv != HWLM_CONTINUE_MATCHING
|| scratch->catchup_pq.qm_size <= t->outfixEndQueue);
return rv;
@ -1237,7 +1242,7 @@ hwlmcb_rv_t roseCatchUpSuf(s64a loc, struct hs_scratch *scratch) {
return rv;
}
rv = roseCatchUpNfas(t, state, loc, loc, scratch);
rv = roseCatchUpNfas(t, loc, loc, scratch);
assert(rv != HWLM_CONTINUE_MATCHING ||
scratch->catchup_pq.qm_size <= t->outfixEndQueue);
@ -1264,7 +1269,7 @@ hwlmcb_rv_t roseCatchUpAnchoredOnly(s64a loc, struct hs_scratch *scratch) {
updateLastMatchOffset(tctxt, anchored_end);
/* as we require that there are no leaf nfas - there must be no nfa */
if (handleReportInternallyNoChain(scratch, anchored_report,
if (handleReportInternallyNoChain(t, scratch, anchored_report,
anchored_end)) {
goto next;
}
@ -1275,7 +1280,7 @@ hwlmcb_rv_t roseCatchUpAnchoredOnly(s64a loc, struct hs_scratch *scratch) {
return HWLM_TERMINATE_MATCHING;
}
next:
nextAnchoredMatch(t, tctxt, &anchored_report, &anchored_end);
nextAnchoredMatch(t, scratch, &anchored_report, &anchored_end);
DEBUG_PRINTF("catch up %u %llu\n", anchored_report, anchored_end);
}

View File

@ -72,8 +72,7 @@ hwlmcb_rv_t roseCatchUpSuf(s64a loc, struct hs_scratch *scratch);
/* will only catch mpv upto last reported external match */
hwlmcb_rv_t roseCatchUpAnchoredAndSuf(s64a loc, struct hs_scratch *scratch);
hwlmcb_rv_t roseCatchUpMPV_i(const struct RoseEngine *t, char *state, s64a loc,
hwlmcb_rv_t roseCatchUpMPV_i(const struct RoseEngine *t, s64a loc,
struct hs_scratch *scratch);
void blockInitSufPQ(const struct RoseEngine *t, char *state,
@ -82,8 +81,8 @@ void streamInitSufPQ(const struct RoseEngine *t, char *state,
struct hs_scratch *scratch);
static really_inline
hwlmcb_rv_t roseCatchUpMPV(const struct RoseEngine *t, char *state,
s64a loc, struct hs_scratch *scratch) {
hwlmcb_rv_t roseCatchUpMPV(const struct RoseEngine *t, s64a loc,
struct hs_scratch *scratch) {
u64a cur_offset = loc + scratch->core_info.buf_offset;
assert(cur_offset >= scratch->tctxt.minMatchOffset);
@ -115,7 +114,7 @@ hwlmcb_rv_t roseCatchUpMPV(const struct RoseEngine *t, char *state,
assert(t->outfixBeginQueue == 1); /* if it exists mpv is queue 0 */
u8 *aa = getActiveLeafArray(t, state);
u8 *aa = getActiveLeafArray(t, scratch->core_info.state);
u32 aaCount = t->activeArrayCount;
if (!mmbit_isset(aa, aaCount, 0)){
@ -126,7 +125,7 @@ hwlmcb_rv_t roseCatchUpMPV(const struct RoseEngine *t, char *state,
* they may have events pushed on during this process which may be before
* the catch up point */
return roseCatchUpMPV_i(t, state, loc, scratch);
return roseCatchUpMPV_i(t, loc, scratch);
}
static really_inline
@ -140,8 +139,9 @@ u64a currentAnchoredEnd(const struct RoseEngine *t, struct RoseContext *tctxt) {
/* catches up nfas, anchored matches and the mpv */
static rose_inline
hwlmcb_rv_t roseCatchUpTo(const struct RoseEngine *t, char *state, u64a end,
struct hs_scratch *scratch, char in_anchored) {
hwlmcb_rv_t roseCatchUpTo(const struct RoseEngine *t,
struct hs_scratch *scratch, u64a end,
char in_anchored) {
/* no need to catch up if we are at the same offset as last time */
if (end <= scratch->tctxt.minMatchOffset) {
/* we must already be up to date */
@ -149,11 +149,12 @@ hwlmcb_rv_t roseCatchUpTo(const struct RoseEngine *t, char *state, u64a end,
return HWLM_CONTINUE_MATCHING;
}
char *state = scratch->core_info.state;
s64a loc = end - scratch->core_info.buf_offset;
if (end <= scratch->tctxt.minNonMpvMatchOffset) {
/* only need to catch up the mpv */
return roseCatchUpMPV(t, state, loc, scratch);
return roseCatchUpMPV(t, loc, scratch);
}
assert(scratch->tctxt.minMatchOffset >= scratch->core_info.buf_offset);
@ -188,8 +189,8 @@ hwlmcb_rv_t roseCatchUpTo(const struct RoseEngine *t, char *state, u64a end,
* and suf/outfixes. The MPV will be run only to intersperse matches in
* the output match stream if external matches are raised. */
static rose_inline
hwlmcb_rv_t roseCatchUpMpvFeeders(const struct RoseEngine *t, char *state,
u64a end, struct hs_scratch *scratch,
hwlmcb_rv_t roseCatchUpMpvFeeders(const struct RoseEngine *t,
struct hs_scratch *scratch, u64a end,
char in_anchored) {
/* no need to catch up if we are at the same offset as last time */
if (end <= scratch->tctxt.minNonMpvMatchOffset) {
@ -213,6 +214,7 @@ hwlmcb_rv_t roseCatchUpMpvFeeders(const struct RoseEngine *t, char *state,
/* sadly, this branch rarely gets taken as the mpv itself is usually
* alive. */
char *state = scratch->core_info.state;
if (!mmbit_any(getActiveLeafArray(t, state), t->activeArrayCount)) {
scratch->tctxt.minNonMpvMatchOffset = end;
return HWLM_CONTINUE_MATCHING;

View File

@ -98,7 +98,7 @@ hwlmcb_rv_t roseEodRunMatcher(const struct RoseEngine *t, u64a offset,
hwlmExec(etable, eod_data, eod_len, adj, roseCallback, tctxt, tctxt->groups);
// We may need to fire delayed matches
return cleanUpDelayed(0, offset, scratch);
return cleanUpDelayed(t, scratch, 0, offset);
}
static rose_inline
@ -111,8 +111,8 @@ int roseEodRunIterator(const struct RoseEngine *t, u64a offset,
DEBUG_PRINTF("running eod program at offset %u\n", t->eodIterProgramOffset);
const size_t match_len = 0;
if (roseRunProgram(t, t->eodIterProgramOffset, offset, match_len,
&(scratch->tctxt), 0) == HWLM_TERMINATE_MATCHING) {
if (roseRunProgram(t, scratch, t->eodIterProgramOffset, offset, match_len,
0) == HWLM_TERMINATE_MATCHING) {
return MO_HALT_MATCHING;
}
@ -203,12 +203,10 @@ int roseCheckNfaEod(const struct RoseEngine *t, char *state,
}
static rose_inline
void cleanupAfterEodMatcher(const struct RoseEngine *t, char *state,
u64a offset, struct hs_scratch *scratch) {
struct RoseContext *tctxt = &scratch->tctxt;
void cleanupAfterEodMatcher(const struct RoseEngine *t, u64a offset,
struct hs_scratch *scratch) {
// Flush history to make sure it's consistent.
roseFlushLastByteHistory(t, state, offset, tctxt);
roseFlushLastByteHistory(t, scratch, offset);
}
static rose_inline
@ -265,8 +263,8 @@ int roseRunEodProgram(const struct RoseEngine *t, u64a offset,
assert(!scratch->tctxt.filledDelayedSlots);
const size_t match_len = 0;
if (roseRunProgram(t, t->eodProgramOffset, offset, match_len,
&scratch->tctxt, 0) == HWLM_TERMINATE_MATCHING) {
if (roseRunProgram(t, scratch, t->eodProgramOffset, offset, match_len, 0) ==
HWLM_TERMINATE_MATCHING) {
return MO_HALT_MATCHING;
}
@ -313,7 +311,7 @@ void roseEodExec_i(const struct RoseEngine *t, char *state, u64a offset,
return;
}
cleanupAfterEodMatcher(t, state, offset, scratch);
cleanupAfterEodMatcher(t, offset, scratch);
// Fire any new EOD reports.
if (roseEodRunIterator(t, offset, scratch) == MO_HALT_MATCHING) {
@ -350,10 +348,10 @@ void roseEodExec(const struct RoseEngine *t, u64a offset,
}
static rose_inline
void prepForEod(const struct RoseEngine *t, char *state, size_t length,
struct RoseContext *tctxt) {
roseFlushLastByteHistory(t, state, length, tctxt);
tctxt->lastEndOffset = length;
void prepForEod(const struct RoseEngine *t, struct hs_scratch *scratch,
size_t length) {
roseFlushLastByteHistory(t, scratch, length);
scratch->tctxt.lastEndOffset = length;
}
void roseBlockEodExec(const struct RoseEngine *t, u64a offset,
@ -367,7 +365,7 @@ void roseBlockEodExec(const struct RoseEngine *t, u64a offset,
char *state = scratch->core_info.state;
// Ensure that history is correct before we look for EOD matches
prepForEod(t, state, scratch->core_info.len, &scratch->tctxt);
prepForEod(t, scratch, scratch->core_info.len);
roseEodExec_i(t, state, offset, scratch, 0);
}

View File

@ -102,7 +102,7 @@ hwlmcb_rv_t roseDelayRebuildCallback(size_t start, size_t end, u32 id,
if (programOffset) {
const size_t match_len = end - start + 1;
UNUSED hwlmcb_rv_t rv =
roseRunProgram(t, programOffset, real_end, match_len, tctx, 0);
roseRunProgram(t, scratch, programOffset, real_end, match_len, 0);
assert(rv != HWLM_TERMINATE_MATCHING);
}
@ -121,10 +121,8 @@ hwlmcb_rv_t ensureMpvQueueFlushed(const struct RoseEngine *t,
}
static rose_inline
void recordAnchoredMatch(struct RoseContext *tctxt, ReportID reportId,
u64a end) {
struct hs_scratch *scratch = tctxtToScratch(tctxt);
const struct RoseEngine *t = scratch->core_info.rose;
void recordAnchoredMatch(const struct RoseEngine *t, struct hs_scratch *scratch,
ReportID reportId, u64a end) {
struct fatbit **anchoredRows = getAnchoredLog(scratch);
DEBUG_PRINTF("record %u @ %llu\n", reportId, end);
@ -145,11 +143,10 @@ void recordAnchoredMatch(struct RoseContext *tctxt, ReportID reportId,
}
static rose_inline
void recordAnchoredLiteralMatch(struct RoseContext *tctxt, u32 literal_id,
void recordAnchoredLiteralMatch(const struct RoseEngine *t,
struct hs_scratch *scratch, u32 literal_id,
u64a end) {
assert(end);
struct hs_scratch *scratch = tctxtToScratch(tctxt);
const struct RoseEngine *t = scratch->core_info.rose;
struct fatbit **anchoredLiteralRows = getAnchoredLiteralLog(scratch);
DEBUG_PRINTF("record %u @ %llu\n", literal_id, end);
@ -167,10 +164,9 @@ void recordAnchoredLiteralMatch(struct RoseContext *tctxt, u32 literal_id,
fatbit_set(anchoredLiteralRows[end - 1], t->anchored_count, rel_idx);
}
hwlmcb_rv_t roseHandleChainMatch(const struct RoseEngine *t, ReportID r,
u64a end, struct RoseContext *tctxt,
char in_anchored, char in_catchup) {
struct hs_scratch *scratch = tctxtToScratch(tctxt);
hwlmcb_rv_t roseHandleChainMatch(const struct RoseEngine *t,
struct hs_scratch *scratch, ReportID r,
u64a end, char in_anchored, char in_catchup) {
struct core_info *ci = &scratch->core_info;
u8 *aa = getActiveLeafArray(t, scratch->core_info.state);
@ -197,7 +193,7 @@ hwlmcb_rv_t roseHandleChainMatch(const struct RoseEngine *t, ReportID r,
assert(loc <= (s64a)ci->len && loc >= -(s64a)ci->hlen);
if (!mmbit_set(aa, aaCount, qi)) {
initQueue(q, qi, t, tctxt);
initQueue(q, qi, t, scratch);
nfaQueueInitState(q->nfa, q);
pushQueueAt(q, 0, MQE_START, loc);
fatbit_set(activeQueues, qCount, qi);
@ -206,7 +202,7 @@ hwlmcb_rv_t roseHandleChainMatch(const struct RoseEngine *t, ReportID r,
/* nfa only needs one top; we can go home now */
return HWLM_CONTINUE_MATCHING;
} else if (!fatbit_set(activeQueues, qCount, qi)) {
initQueue(q, qi, t, tctxt);
initQueue(q, qi, t, scratch);
loadStreamState(q->nfa, q, 0);
pushQueueAt(q, 0, MQE_START, 0);
} else if (isQueueFull(q)) {
@ -238,7 +234,7 @@ event_enqueued:
pushQueueNoMerge(q, MQE_END, loc);
char alive = nfaQueueExec(q->nfa, q, loc);
if (alive) {
tctxt->mpv_inactive = 0;
scratch->tctxt.mpv_inactive = 0;
q->cur = q->end = 0;
pushQueueAt(q, 0, MQE_START, loc);
} else {
@ -248,8 +244,8 @@ event_enqueued:
}
DEBUG_PRINTF("added mpv event at %lld\n", loc);
tctxt->next_mpv_offset = 0; /* the top event may result in matches earlier
* than expected */
scratch->tctxt.next_mpv_offset = 0; /* the top event may result in matches
* earlier than expected */
return HWLM_CONTINUE_MATCHING;
}
@ -278,12 +274,10 @@ hwlmcb_rv_t roseHandleMatch(const struct RoseEngine *t, ReportID id, u64a end,
/* handles catchup, som, cb, etc */
static really_inline
hwlmcb_rv_t roseHandleReport(const struct RoseEngine *t, char *state,
struct RoseContext *tctxt, ReportID id,
hwlmcb_rv_t roseHandleReport(const struct RoseEngine *t,
struct hs_scratch *scratch, ReportID id,
u64a offset, char in_anchored) {
struct hs_scratch *scratch = tctxtToScratch(tctxt);
if (roseCatchUpTo(t, state, offset, scratch, in_anchored) ==
if (roseCatchUpTo(t, scratch, offset, in_anchored) ==
HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
@ -294,7 +288,7 @@ hwlmcb_rv_t roseHandleReport(const struct RoseEngine *t, char *state,
roseHandleSom(t, scratch, id, offset);
return HWLM_CONTINUE_MATCHING;
} else if (ri->type == INTERNAL_ROSE_CHAIN) {
return roseCatchUpAndHandleChainMatch(t, state, id, offset, tctxt,
return roseCatchUpAndHandleChainMatch(t, scratch, id, offset,
in_anchored);
}
}
@ -304,25 +298,23 @@ hwlmcb_rv_t roseHandleReport(const struct RoseEngine *t, char *state,
static really_inline
hwlmcb_rv_t roseHandleAnchoredDirectReport(const struct RoseEngine *t,
char *state,
struct RoseContext *tctxt,
struct hs_scratch *scratch,
u64a real_end, ReportID report) {
DEBUG_PRINTF("direct report %u, real_end=%llu\n", report, real_end);
if (real_end > t->maxSafeAnchoredDROffset) {
DEBUG_PRINTF("match in overlapped anchored region --> stash\n");
recordAnchoredMatch(tctxt, report, real_end);
recordAnchoredMatch(t, scratch, report, real_end);
return HWLM_CONTINUE_MATCHING;
}
return roseHandleReport(t, state, tctxt, report, real_end,
1 /* in anchored */);
return roseHandleReport(t, scratch, report, real_end, 1 /* in anchored */);
}
int roseAnchoredCallback(u64a end, u32 id, void *ctx) {
struct RoseContext *tctxt = ctx;
struct core_info *ci = &tctxtToScratch(tctxt)->core_info;
char *state = ci->state;
struct hs_scratch *scratch = tctxtToScratch(tctxt);
struct core_info *ci = &scratch->core_info;
const struct RoseEngine *t = ci->rose;
u64a real_end = ci->buf_offset + end; // index after last byte
@ -330,7 +322,7 @@ int roseAnchoredCallback(u64a end, u32 id, void *ctx) {
DEBUG_PRINTF("MATCH id=%u offsets=[???,%llu]\n", id, real_end);
DEBUG_PRINTF("STATE groups=0x%016llx\n", tctxt->groups);
if (can_stop_matching(tctxtToScratch(tctxt))) {
if (can_stop_matching(scratch)) {
DEBUG_PRINTF("received a match when we're already dead!\n");
return MO_HALT_MATCHING;
}
@ -351,8 +343,7 @@ int roseAnchoredCallback(u64a end, u32 id, void *ctx) {
(const ReportID *)((const char *)t + t->multidirectOffset) +
mdr_offset;
for (; *report != MO_INVALID_IDX; report++) {
rv = roseHandleAnchoredDirectReport(t, state, tctxt, real_end,
*report);
rv = roseHandleAnchoredDirectReport(t, scratch, real_end, *report);
if (rv == HWLM_TERMINATE_MATCHING) {
return MO_HALT_MATCHING;
}
@ -361,7 +352,7 @@ int roseAnchoredCallback(u64a end, u32 id, void *ctx) {
} else if (isLiteralDR(id)) {
// Single direct report.
ReportID report = literalToReport(id);
rv = roseHandleAnchoredDirectReport(t, state, tctxt, real_end, report);
rv = roseHandleAnchoredDirectReport(t, scratch, real_end, report);
if (rv == HWLM_TERMINATE_MATCHING) {
return MO_HALT_MATCHING;
}
@ -379,14 +370,14 @@ int roseAnchoredCallback(u64a end, u32 id, void *ctx) {
DEBUG_PRINTF("literal id=%u\n", id);
if (real_end <= t->floatingMinLiteralMatchOffset) {
roseFlushLastByteHistory(t, state, real_end, tctxt);
roseFlushLastByteHistory(t, scratch, real_end);
tctxt->lastEndOffset = real_end;
}
const size_t match_len = 0;
if (roseRunProgram(t, programOffset, real_end, match_len, tctxt, 1) ==
if (roseRunProgram(t, scratch, programOffset, real_end, match_len, 1) ==
HWLM_TERMINATE_MATCHING) {
assert(can_stop_matching(tctxtToScratch(tctxt)));
assert(can_stop_matching(scratch));
DEBUG_PRINTF("caller requested termination\n");
return MO_HALT_MATCHING;
}
@ -394,7 +385,7 @@ int roseAnchoredCallback(u64a end, u32 id, void *ctx) {
DEBUG_PRINTF("DONE groups=0x%016llx\n", tctxt->groups);
if (real_end > t->floatingMinLiteralMatchOffset) {
recordAnchoredLiteralMatch(tctxt, id, real_end);
recordAnchoredLiteralMatch(t, scratch, id, real_end);
}
return MO_CONTINUE_MATCHING;
@ -403,14 +394,10 @@ int roseAnchoredCallback(u64a end, u32 id, void *ctx) {
// Rose match-processing workhorse
/* assumes not in_anchored */
static really_inline
hwlmcb_rv_t roseProcessMatch_i(const struct RoseEngine *t, u64a end,
size_t match_len, u32 id,
struct RoseContext *tctxt, char in_delay_play,
hwlmcb_rv_t roseProcessMatch_i(const struct RoseEngine *t,
struct hs_scratch *scratch, u64a end,
size_t match_len, u32 id, char in_delay_play,
char in_anch_playback) {
/* assert(!tctxt->in_anchored); */
struct hs_scratch *scratch = tctxtToScratch(tctxt);
char *state = scratch->core_info.state;
DEBUG_PRINTF("id=%u\n", id);
if (!in_anch_playback && !in_delay_play) {
@ -422,7 +409,7 @@ hwlmcb_rv_t roseProcessMatch_i(const struct RoseEngine *t, u64a end,
mdr_offset;
for (; *report != MO_INVALID_IDX; report++) {
DEBUG_PRINTF("handle multi-direct report %u\n", *report);
hwlmcb_rv_t rv = roseHandleReport(t, state, tctxt, *report, end,
hwlmcb_rv_t rv = roseHandleReport(t, scratch, *report, end,
0 /* in anchored */);
if (rv == HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
@ -433,40 +420,42 @@ hwlmcb_rv_t roseProcessMatch_i(const struct RoseEngine *t, u64a end,
// Single direct report.
ReportID report = literalToReport(id);
DEBUG_PRINTF("handle direct report %u\n", report);
return roseHandleReport(t, state, tctxt, report, end,
return roseHandleReport(t, scratch, report, end,
0 /* in anchored */);
}
}
assert(id < t->literalCount);
const u32 *programs = getByOffset(t, t->litProgramOffset);
return roseRunProgram(t, programs[id], end, match_len, tctxt, 0);
return roseRunProgram(t, scratch, programs[id], end, match_len, 0);
}
static never_inline
hwlmcb_rv_t roseProcessDelayedMatch(const struct RoseEngine *t, u64a end,
u32 id, struct RoseContext *tctxt) {
hwlmcb_rv_t roseProcessDelayedMatch(const struct RoseEngine *t,
struct hs_scratch *scratch, u64a end,
u32 id) {
size_t match_len = 0;
return roseProcessMatch_i(t, end, match_len, id, tctxt, 1, 0);
return roseProcessMatch_i(t, scratch, end, match_len, id, 1, 0);
}
static never_inline
hwlmcb_rv_t roseProcessDelayedAnchoredMatch(const struct RoseEngine *t,
u64a end, u32 id,
struct RoseContext *tctxt) {
struct hs_scratch *scratch,
u64a end, u32 id) {
size_t match_len = 0;
return roseProcessMatch_i(t, end, match_len, id, tctxt, 0, 1);
return roseProcessMatch_i(t, scratch, end, match_len, id, 0, 1);
}
static really_inline
hwlmcb_rv_t roseProcessMainMatch(const struct RoseEngine *t, u64a end,
size_t match_len, u32 id,
struct RoseContext *tctxt) {
return roseProcessMatch_i(t, end, match_len, id, tctxt, 0, 0);
hwlmcb_rv_t roseProcessMainMatch(const struct RoseEngine *t,
struct hs_scratch *scratch, u64a end,
size_t match_len, u32 id) {
return roseProcessMatch_i(t, scratch, end, match_len, id, 0, 0);
}
static rose_inline
hwlmcb_rv_t playDelaySlot(const struct RoseEngine *t, struct RoseContext *tctxt,
hwlmcb_rv_t playDelaySlot(const struct RoseEngine *t,
struct hs_scratch *scratch,
struct fatbit **delaySlots, u32 vicIndex,
u64a offset) {
/* assert(!tctxt->in_anchored); */
@ -479,8 +468,8 @@ hwlmcb_rv_t playDelaySlot(const struct RoseEngine *t, struct RoseContext *tctxt,
return HWLM_CONTINUE_MATCHING;
}
struct hs_scratch *scratch = tctxtToScratch(tctxt);
roseFlushLastByteHistory(t, scratch->core_info.state, offset, tctxt);
struct RoseContext *tctxt = &scratch->tctxt;
roseFlushLastByteHistory(t, scratch, offset);
tctxt->lastEndOffset = offset;
for (u32 it = fatbit_iterate(vicSlot, delay_count, MMB_INVALID);
@ -490,7 +479,8 @@ hwlmcb_rv_t playDelaySlot(const struct RoseEngine *t, struct RoseContext *tctxt,
UNUSED rose_group old_groups = tctxt->groups;
DEBUG_PRINTF("DELAYED MATCH id=%u offset=%llu\n", literal_id, offset);
hwlmcb_rv_t rv = roseProcessDelayedMatch(t, offset, literal_id, tctxt);
hwlmcb_rv_t rv =
roseProcessDelayedMatch(t, scratch, offset, literal_id);
DEBUG_PRINTF("DONE groups=0x%016llx\n", tctxt->groups);
/* delayed literals can't safely set groups.
@ -509,8 +499,9 @@ hwlmcb_rv_t playDelaySlot(const struct RoseEngine *t, struct RoseContext *tctxt,
static really_inline
hwlmcb_rv_t flushAnchoredLiteralAtLoc(const struct RoseEngine *t,
struct RoseContext *tctxt, u32 curr_loc) {
struct hs_scratch *scratch = tctxtToScratch(tctxt);
struct hs_scratch *scratch,
u32 curr_loc) {
struct RoseContext *tctxt = &scratch->tctxt;
struct fatbit *curr_row = getAnchoredLiteralLog(scratch)[curr_loc - 1];
u32 region_width = t->anchored_count;
@ -523,8 +514,8 @@ hwlmcb_rv_t flushAnchoredLiteralAtLoc(const struct RoseEngine *t,
rose_group old_groups = tctxt->groups;
DEBUG_PRINTF("ANCH REPLAY MATCH id=%u offset=%u\n", literal_id,
curr_loc);
hwlmcb_rv_t rv = roseProcessDelayedAnchoredMatch(t, curr_loc,
literal_id, tctxt);
hwlmcb_rv_t rv =
roseProcessDelayedAnchoredMatch(t, scratch, curr_loc, literal_id);
DEBUG_PRINTF("DONE groups=0x%016llx\n", tctxt->groups);
/* anchored literals can't safely set groups.
@ -546,23 +537,22 @@ hwlmcb_rv_t flushAnchoredLiteralAtLoc(const struct RoseEngine *t,
}
static really_inline
u32 anchored_it_begin(struct RoseContext *tctxt) {
struct hs_scratch *scratch = tctxtToScratch(tctxt);
u32 anchored_it_begin(struct hs_scratch *scratch) {
struct RoseContext *tctxt = &scratch->tctxt;
if (tctxt->lastEndOffset >= scratch->anchored_literal_region_len) {
return MMB_INVALID;
}
u32 begin = tctxt->lastEndOffset;
begin--;
return bf64_iterate(tctxtToScratch(tctxt)->al_log_sum, begin);
return bf64_iterate(scratch->al_log_sum, begin);
}
static really_inline
hwlmcb_rv_t flushAnchoredLiterals(const struct RoseEngine *t,
struct RoseContext *tctxt,
struct hs_scratch *scratch,
u32 *anchored_it_param, u64a to_off) {
struct hs_scratch *scratch = tctxtToScratch(tctxt);
char *state = scratch->core_info.state;
struct RoseContext *tctxt = &scratch->tctxt;
u32 anchored_it = *anchored_it_param;
/* catch up any remaining anchored matches */
for (; anchored_it != MMB_INVALID && anchored_it < to_off;
@ -570,10 +560,10 @@ hwlmcb_rv_t flushAnchoredLiterals(const struct RoseEngine *t,
assert(anchored_it < scratch->anchored_literal_region_len);
DEBUG_PRINTF("loc_it = %u\n", anchored_it);
u32 curr_off = anchored_it + 1;
roseFlushLastByteHistory(t, state, curr_off, tctxt);
roseFlushLastByteHistory(t, scratch, curr_off);
tctxt->lastEndOffset = curr_off;
if (flushAnchoredLiteralAtLoc(t, tctxt, curr_off)
if (flushAnchoredLiteralAtLoc(t, scratch, curr_off)
== HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
@ -584,22 +574,20 @@ hwlmcb_rv_t flushAnchoredLiterals(const struct RoseEngine *t,
}
static really_inline
hwlmcb_rv_t playVictims(const struct RoseEngine *t, struct RoseContext *tctxt,
hwlmcb_rv_t playVictims(const struct RoseEngine *t, struct hs_scratch *scratch,
u32 *anchored_it, u64a lastEnd, u64a victimDelaySlots,
struct fatbit **delaySlots) {
/* assert (!tctxt->in_anchored); */
while (victimDelaySlots) {
u32 vic = findAndClearLSB_64(&victimDelaySlots);
DEBUG_PRINTF("vic = %u\n", vic);
u64a vicOffset = vic + (lastEnd & ~(u64a)DELAY_MASK);
if (flushAnchoredLiterals(t, tctxt, anchored_it, vicOffset)
if (flushAnchoredLiterals(t, scratch, anchored_it, vicOffset)
== HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
if (playDelaySlot(t, tctxt, delaySlots, vic % DELAY_SLOT_COUNT,
if (playDelaySlot(t, scratch, delaySlots, vic % DELAY_SLOT_COUNT,
vicOffset) == HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
@ -609,18 +597,16 @@ hwlmcb_rv_t playVictims(const struct RoseEngine *t, struct RoseContext *tctxt,
}
/* call flushQueuedLiterals instead */
hwlmcb_rv_t flushQueuedLiterals_i(struct RoseContext *tctxt, u64a currEnd) {
struct hs_scratch *scratch = tctxtToScratch(tctxt);
const struct RoseEngine *t = scratch->core_info.rose;
/* assert(!tctxt->in_anchored); */
hwlmcb_rv_t flushQueuedLiterals_i(const struct RoseEngine *t,
struct hs_scratch *scratch, u64a currEnd) {
struct RoseContext *tctxt = &scratch->tctxt;
u64a lastEnd = tctxt->delayLastEndOffset;
DEBUG_PRINTF("flushing backed up matches @%llu up from %llu\n", currEnd,
lastEnd);
assert(currEnd != lastEnd); /* checked in main entry point */
u32 anchored_it = anchored_it_begin(tctxt);
u32 anchored_it = anchored_it_begin(scratch);
if (!tctxt->filledDelayedSlots) {
DEBUG_PRINTF("no delayed, no flush\n");
@ -628,7 +614,7 @@ hwlmcb_rv_t flushQueuedLiterals_i(struct RoseContext *tctxt, u64a currEnd) {
}
{
struct fatbit **delaySlots = getDelaySlots(tctxtToScratch(tctxt));
struct fatbit **delaySlots = getDelaySlots(scratch);
u32 lastIndex = lastEnd & DELAY_MASK;
u32 currIndex = currEnd & DELAY_MASK;
@ -681,14 +667,14 @@ hwlmcb_rv_t flushQueuedLiterals_i(struct RoseContext *tctxt, u64a currEnd) {
second_half, victimDelaySlots, lastIndex);
}
if (playVictims(t, tctxt, &anchored_it, lastEnd, victimDelaySlots,
if (playVictims(t, scratch, &anchored_it, lastEnd, victimDelaySlots,
delaySlots) == HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
}
anchored_leftovers:;
hwlmcb_rv_t rv = flushAnchoredLiterals(t, tctxt, &anchored_it, currEnd);
hwlmcb_rv_t rv = flushAnchoredLiterals(t, scratch, &anchored_it, currEnd);
tctxt->delayLastEndOffset = currEnd;
return rv;
}
@ -715,11 +701,11 @@ hwlmcb_rv_t roseCallback(size_t start, size_t end, u32 id, void *ctxt) {
return HWLM_TERMINATE_MATCHING;
}
hwlmcb_rv_t rv = flushQueuedLiterals(tctx, real_end);
hwlmcb_rv_t rv = flushQueuedLiterals(t, scratch, real_end);
/* flushDelayed may have advanced tctx->lastEndOffset */
if (real_end >= t->floatingMinLiteralMatchOffset) {
roseFlushLastByteHistory(t, scratch->core_info.state, real_end, tctx);
roseFlushLastByteHistory(t, scratch, real_end);
tctx->lastEndOffset = real_end;
}
@ -728,7 +714,7 @@ hwlmcb_rv_t roseCallback(size_t start, size_t end, u32 id, void *ctxt) {
}
size_t match_len = end - start + 1;
rv = roseProcessMainMatch(t, real_end, match_len, id, tctx);
rv = roseProcessMainMatch(t, scratch, real_end, match_len, id);
DEBUG_PRINTF("DONE groups=0x%016llx\n", tctx->groups);

View File

@ -77,14 +77,13 @@ void resetAnchoredLog(const struct RoseEngine *t, struct hs_scratch *scratch) {
tctxt->curr_row_offset);
}
hwlmcb_rv_t roseHandleChainMatch(const struct RoseEngine *t, ReportID r,
u64a end, struct RoseContext *tctxt,
char in_anchored, char in_catchup);
hwlmcb_rv_t roseHandleChainMatch(const struct RoseEngine *t,
struct hs_scratch *scratch, ReportID r,
u64a end, char in_anchored, char in_catchup);
static really_inline
void initQueue(struct mq *q, u32 qi, const struct RoseEngine *t,
struct RoseContext *tctxt) {
struct hs_scratch *scratch = tctxtToScratch(tctxt);
struct hs_scratch *scratch) {
const struct NfaInfo *info = getNfaInfoByQueue(t, qi);
assert(scratch->fullState);
q->nfa = getNfaByInfo(t, info);
@ -103,7 +102,7 @@ void initQueue(struct mq *q, u32 qi, const struct RoseEngine *t,
q->cb = roseNfaAdaptor;
}
q->som_cb = roseNfaSomAdaptor;
q->context = tctxt;
q->context = &scratch->tctxt;
q->report_current = 0;
DEBUG_PRINTF("qi=%u, offset=%llu, fullState=%u, streamState=%u, "
@ -114,8 +113,7 @@ void initQueue(struct mq *q, u32 qi, const struct RoseEngine *t,
static really_inline
void initRoseQueue(const struct RoseEngine *t, u32 qi,
const struct LeftNfaInfo *left,
struct RoseContext *tctxt) {
struct hs_scratch *scratch = tctxtToScratch(tctxt);
struct hs_scratch *scratch) {
struct mq *q = scratch->queues + qi;
const struct NfaInfo *info = getNfaInfoByQueue(t, qi);
q->nfa = getNfaByInfo(t, info);
@ -219,36 +217,41 @@ char isZombie(const struct RoseEngine *t, const char *state,
return leftfixDelay[di] == OWB_ZOMBIE_ALWAYS_YES;
}
hwlmcb_rv_t flushQueuedLiterals_i(struct RoseContext *tctxt, u64a end);
hwlmcb_rv_t flushQueuedLiterals_i(const struct RoseEngine *t,
struct hs_scratch *scratch, u64a end);
static really_inline
hwlmcb_rv_t flushQueuedLiterals(struct RoseContext *tctxt, u64a end) {
hwlmcb_rv_t flushQueuedLiterals(const struct RoseEngine *t,
struct hs_scratch *scratch, u64a end) {
struct RoseContext *tctxt = &scratch->tctxt;
if (tctxt->delayLastEndOffset == end) {
DEBUG_PRINTF("no progress, no flush\n");
return HWLM_CONTINUE_MATCHING;
}
if (!tctxt->filledDelayedSlots && !tctxtToScratch(tctxt)->al_log_sum) {
if (!tctxt->filledDelayedSlots && !scratch->al_log_sum) {
tctxt->delayLastEndOffset = end;
return HWLM_CONTINUE_MATCHING;
}
return flushQueuedLiterals_i(tctxt, end);
return flushQueuedLiterals_i(t, scratch, end);
}
static really_inline
hwlmcb_rv_t cleanUpDelayed(size_t length, u64a offset,
struct hs_scratch *scratch) {
hwlmcb_rv_t cleanUpDelayed(const struct RoseEngine *t,
struct hs_scratch *scratch, size_t length,
u64a offset) {
if (can_stop_matching(scratch)) {
return HWLM_TERMINATE_MATCHING;
}
struct RoseContext *tctxt = &scratch->tctxt;
if (flushQueuedLiterals(tctxt, length + offset)
if (flushQueuedLiterals(t, scratch, length + offset)
== HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
struct RoseContext *tctxt = &scratch->tctxt;
if (tctxt->filledDelayedSlots) {
DEBUG_PRINTF("dirty\n");
scratch->core_info.status |= STATUS_DELAY_DIRTY;
@ -263,13 +266,13 @@ hwlmcb_rv_t cleanUpDelayed(size_t length, u64a offset,
}
static rose_inline
void roseFlushLastByteHistory(const struct RoseEngine *t, char *state,
u64a currEnd, struct RoseContext *tctxt) {
void roseFlushLastByteHistory(const struct RoseEngine *t,
struct hs_scratch *scratch, u64a currEnd) {
if (!t->lastByteHistoryIterOffset) {
return;
}
struct hs_scratch *scratch = tctxtToScratch(tctxt);
struct RoseContext *tctxt = &scratch->tctxt;
struct core_info *ci = &scratch->core_info;
/* currEnd is last byte of string + 1 */
@ -286,7 +289,7 @@ void roseFlushLastByteHistory(const struct RoseEngine *t, char *state,
assert(ISALIGNED(it));
const u32 numStates = t->rolesWithStateCount;
void *role_state = getRoleState(state);
void *role_state = getRoleState(scratch->core_info.state);
struct mmbit_sparse_state si_state[MAX_SPARSE_ITER_STATES];

View File

@ -114,21 +114,22 @@ int roseCheckBenefits(const struct core_info *ci, u64a end, u32 mask_rewind,
}
static rose_inline
void rosePushDelayedMatch(const struct RoseEngine *t, u32 delay,
u32 delay_index, u64a offset,
struct RoseContext *tctxt) {
void rosePushDelayedMatch(const struct RoseEngine *t,
struct hs_scratch *scratch, u32 delay,
u32 delay_index, u64a offset) {
assert(delay);
const u32 src_slot_index = delay;
u32 slot_index = (src_slot_index + offset) & DELAY_MASK;
struct RoseContext *tctxt = &scratch->tctxt;
if (offset + src_slot_index <= tctxt->delayLastEndOffset) {
DEBUG_PRINTF("skip too late\n");
return;
}
const u32 delay_count = t->delay_count;
struct fatbit **delaySlots = getDelaySlots(tctxtToScratch(tctxt));
struct fatbit **delaySlots = getDelaySlots(scratch);
struct fatbit *slot = delaySlots[slot_index];
DEBUG_PRINTF("pushing tab %u into slot %u\n", delay_index, slot_index);
@ -248,16 +249,15 @@ hwlmcb_rv_t ensureQueueFlushed_i(const struct RoseEngine *t,
if (loc + scratch->core_info.buf_offset
<= tctxt->minNonMpvMatchOffset) {
DEBUG_PRINTF("flushing chained\n");
if (roseCatchUpMPV(t, scratch->core_info.state, loc,
scratch) == HWLM_TERMINATE_MATCHING) {
if (roseCatchUpMPV(t, loc, scratch) ==
HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
goto done_queue_empty;
}
}
if (roseCatchUpTo(t, scratch->core_info.state,
loc + scratch->core_info.buf_offset, scratch,
if (roseCatchUpTo(t, scratch, loc + scratch->core_info.buf_offset,
in_anchored) == HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
@ -266,14 +266,13 @@ hwlmcb_rv_t ensureQueueFlushed_i(const struct RoseEngine *t,
assert(is_mpv);
DEBUG_PRINTF("flushing chained\n");
tctxt->next_mpv_offset = 0; /* force us to catch the mpv */
if (roseCatchUpMPV(t, scratch->core_info.state, loc, scratch)
== HWLM_TERMINATE_MATCHING) {
if (roseCatchUpMPV(t, loc, scratch) == HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
}
done_queue_empty:
if (!mmbit_set(aa, aaCount, qi)) {
initQueue(q, qi, t, tctxt);
initQueue(q, qi, t, scratch);
nfaQueueInitState(q->nfa, q);
pushQueueAt(q, 0, MQE_START, loc);
fatbit_set(activeQueues, qCount, qi);
@ -292,26 +291,24 @@ hwlmcb_rv_t ensureQueueFlushed(const struct RoseEngine *t,
}
static rose_inline
hwlmcb_rv_t roseHandleSuffixTrigger(const struct RoseEngine *t,
u32 qi, u32 top, u64a som,
u64a end, struct RoseContext *tctxt,
char in_anchored) {
hwlmcb_rv_t roseTriggerSuffix(const struct RoseEngine *t,
struct hs_scratch *scratch, u32 qi, u32 top,
u64a som, u64a end, char in_anchored) {
DEBUG_PRINTF("suffix qi=%u, top event=%u\n", qi, top);
struct hs_scratch *scratch = tctxtToScratch(tctxt);
u8 *aa = getActiveLeafArray(t, scratch->core_info.state);
struct core_info *ci = &scratch->core_info;
u8 *aa = getActiveLeafArray(t, ci->state);
const u32 aaCount = t->activeArrayCount;
const u32 qCount = t->queueCount;
struct mq *q = &scratch->queues[qi];
const struct NfaInfo *info = getNfaInfoByQueue(t, qi);
const struct NFA *nfa = getNfaByInfo(t, info);
struct core_info *ci = &scratch->core_info;
s64a loc = (s64a)end - ci->buf_offset;
assert(loc <= (s64a)ci->len && loc >= -(s64a)ci->hlen);
if (!mmbit_set(aa, aaCount, qi)) {
initQueue(q, qi, t, tctxt);
initQueue(q, qi, t, scratch);
nfaQueueInitState(nfa, q);
pushQueueAt(q, 0, MQE_START, loc);
fatbit_set(scratch->aqa, qCount, qi);
@ -320,7 +317,7 @@ hwlmcb_rv_t roseHandleSuffixTrigger(const struct RoseEngine *t,
/* nfa only needs one top; we can go home now */
return HWLM_CONTINUE_MATCHING;
} else if (!fatbit_set(scratch->aqa, qCount, qi)) {
initQueue(q, qi, t, tctxt);
initQueue(q, qi, t, scratch);
loadStreamState(nfa, q, 0);
pushQueueAt(q, 0, MQE_START, 0);
} else if (isQueueFull(q)) {
@ -359,10 +356,8 @@ hwlmcb_rv_t roseHandleSuffixTrigger(const struct RoseEngine *t,
}
static really_inline
char roseTestLeftfix(const struct RoseEngine *t, u32 qi, u32 leftfixLag,
ReportID leftfixReport, u64a end,
struct RoseContext *tctxt) {
struct hs_scratch *scratch = tctxtToScratch(tctxt);
char roseTestLeftfix(const struct RoseEngine *t, struct hs_scratch *scratch,
u32 qi, u32 leftfixLag, ReportID leftfixReport, u64a end) {
struct core_info *ci = &scratch->core_info;
u32 ri = queueToLeftIndex(t, qi);
@ -400,7 +395,7 @@ char roseTestLeftfix(const struct RoseEngine *t, u32 qi, u32 leftfixLag,
if (!fatbit_set(scratch->aqa, qCount, qi)) {
DEBUG_PRINTF("initing q %u\n", qi);
initRoseQueue(t, qi, left, tctxt);
initRoseQueue(t, qi, left, scratch);
if (ci->buf_offset) { // there have been writes before us!
s32 sp;
if (left->transient) {
@ -470,7 +465,7 @@ char roseTestLeftfix(const struct RoseEngine *t, u32 qi, u32 leftfixLag,
DEBUG_PRINTF("leftfix %u died while trying to catch up\n", ri);
mmbit_unset(activeLeftArray, arCount, ri);
assert(!mmbit_isset(activeLeftArray, arCount, ri));
tctxt->groups &= left->squash_mask;
scratch->tctxt.groups &= left->squash_mask;
return 0;
}
@ -490,9 +485,9 @@ char roseTestLeftfix(const struct RoseEngine *t, u32 qi, u32 leftfixLag,
}
static rose_inline
void roseTriggerInfix(const struct RoseEngine *t, u64a start, u64a end, u32 qi,
u32 topEvent, u8 cancel, struct RoseContext *tctxt) {
struct core_info *ci = &tctxtToScratch(tctxt)->core_info;
void roseTriggerInfix(const struct RoseEngine *t, struct hs_scratch *scratch,
u64a start, u64a end, u32 qi, u32 topEvent, u8 cancel) {
struct core_info *ci = &scratch->core_info;
s64a loc = (s64a)end - ci->buf_offset;
u32 ri = queueToLeftIndex(t, qi);
@ -503,11 +498,10 @@ void roseTriggerInfix(const struct RoseEngine *t, u64a start, u64a end, u32 qi,
DEBUG_PRINTF("rose %u (qi=%u) event %u\n", ri, qi, topEvent);
struct mq *q = tctxtToScratch(tctxt)->queues + qi;
struct mq *q = scratch->queues + qi;
const struct NfaInfo *info = getNfaInfoByQueue(t, qi);
struct hs_scratch *scratch = tctxtToScratch(tctxt);
char *state = scratch->core_info.state;
char *state = ci->state;
u8 *activeLeftArray = getActiveLeftArray(t, state);
const u32 arCount = t->activeLeftCount;
char alive = mmbit_set(activeLeftArray, arCount, ri);
@ -529,12 +523,12 @@ void roseTriggerInfix(const struct RoseEngine *t, u64a start, u64a end, u32 qi,
if (cancel) {
DEBUG_PRINTF("dominating top: (re)init\n");
fatbit_set(aqa, qCount, qi);
initRoseQueue(t, qi, left, tctxt);
initRoseQueue(t, qi, left, scratch);
pushQueueAt(q, 0, MQE_START, loc);
nfaQueueInitState(q->nfa, q);
} else if (!fatbit_set(aqa, qCount, qi)) {
DEBUG_PRINTF("initing %u\n", qi);
initRoseQueue(t, qi, left, tctxt);
initRoseQueue(t, qi, left, scratch);
if (alive) {
s32 sp = -(s32)loadRoseDelay(t, state, left);
pushQueueAt(q, 0, MQE_START, sp);
@ -590,17 +584,15 @@ hwlmcb_rv_t roseReport(const struct RoseEngine *t, struct hs_scratch *scratch,
* up */
static rose_inline
hwlmcb_rv_t roseCatchUpAndHandleChainMatch(const struct RoseEngine *t,
char *state, ReportID r, u64a end,
struct RoseContext *tctxt,
struct hs_scratch *scratch,
ReportID r, u64a end,
char in_anchored) {
struct hs_scratch *scratch = tctxtToScratch(tctxt);
if (roseCatchUpMpvFeeders(t, state, end, scratch, in_anchored)
== HWLM_TERMINATE_MATCHING) {
if (roseCatchUpMpvFeeders(t, scratch, end, in_anchored) ==
HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
return roseHandleChainMatch(t, r, end, tctxt, in_anchored, 0);
return roseHandleChainMatch(t, scratch, r, end, in_anchored, 0);
}
static really_inline
@ -667,13 +659,13 @@ int reachHasBit(const u8 *reach, u8 c) {
* are satisfied.
*/
static rose_inline
int roseCheckLookaround(const struct RoseEngine *t, u32 lookaroundIndex,
u32 lookaroundCount, u64a end,
struct RoseContext *tctxt) {
int roseCheckLookaround(const struct RoseEngine *t,
const struct hs_scratch *scratch, u32 lookaroundIndex,
u32 lookaroundCount, u64a end) {
assert(lookaroundIndex != MO_INVALID_IDX);
assert(lookaroundCount > 0);
const struct core_info *ci = &tctxtToScratch(tctxt)->core_info;
const struct core_info *ci = &scratch->core_info;
DEBUG_PRINTF("end=%llu, buf_offset=%llu, buf_end=%llu\n", end,
ci->buf_offset, ci->buf_offset + ci->len);
@ -765,9 +757,8 @@ int roseNfaEarliestSom(u64a from_offset, UNUSED u64a offset, UNUSED ReportID id,
}
static rose_inline
u64a roseGetHaigSom(const struct RoseEngine *t, const u32 qi,
UNUSED const u32 leftfixLag,
struct RoseContext *tctxt) {
u64a roseGetHaigSom(const struct RoseEngine *t, struct hs_scratch *scratch,
const u32 qi, UNUSED const u32 leftfixLag) {
u32 ri = queueToLeftIndex(t, qi);
UNUSED const struct LeftNfaInfo *left = getLeftTable(t) + ri;
@ -778,7 +769,7 @@ u64a roseGetHaigSom(const struct RoseEngine *t, const u32 qi,
assert(leftfixLag <= left->maxLag);
struct mq *q = tctxtToScratch(tctxt)->queues + qi;
struct mq *q = scratch->queues + qi;
u64a start = ~0ULL;
@ -816,9 +807,9 @@ char roseCheckBounds(u64a end, u64a min_bound, u64a max_bound) {
}
static rose_inline
hwlmcb_rv_t roseRunProgram(const struct RoseEngine *t, u32 programOffset,
u64a end, size_t match_len,
struct RoseContext *tctxt, char in_anchored) {
hwlmcb_rv_t roseRunProgram(const struct RoseEngine *t,
struct hs_scratch *scratch, u32 programOffset,
u64a end, size_t match_len, char in_anchored) {
DEBUG_PRINTF("program begins at offset %u\n", programOffset);
assert(programOffset);
@ -837,7 +828,7 @@ hwlmcb_rv_t roseRunProgram(const struct RoseEngine *t, u32 programOffset,
// allow the program to squash groups).
int work_done = 0;
struct hs_scratch *scratch = tctxtToScratch(tctxt);
struct RoseContext *tctxt = &scratch->tctxt;
assert(*(const u8 *)pc != ROSE_INSTR_END);
@ -922,7 +913,8 @@ hwlmcb_rv_t roseRunProgram(const struct RoseEngine *t, u32 programOffset,
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(CHECK_LOOKAROUND) {
if (!roseCheckLookaround(t, ri->index, ri->count, end, tctxt)) {
if (!roseCheckLookaround(t, scratch, ri->index, ri->count,
end)) {
DEBUG_PRINTF("failed lookaround check\n");
assert(ri->fail_jump); // must progress
pc += ri->fail_jump;
@ -932,9 +924,9 @@ hwlmcb_rv_t roseRunProgram(const struct RoseEngine *t, u32 programOffset,
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(CHECK_LEFTFIX) {
if (!roseTestLeftfix(t, ri->queue, ri->lag, ri->report, end,
tctxt)) {
DEBUG_PRINTF("failed lookaround check\n");
if (!roseTestLeftfix(t, scratch, ri->queue, ri->lag, ri->report,
end)) {
DEBUG_PRINTF("failed leftfix check\n");
assert(ri->fail_jump); // must progress
pc += ri->fail_jump;
continue;
@ -943,13 +935,13 @@ hwlmcb_rv_t roseRunProgram(const struct RoseEngine *t, u32 programOffset,
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(PUSH_DELAYED) {
rosePushDelayedMatch(t, ri->delay, ri->index, end, tctxt);
rosePushDelayedMatch(t, scratch, ri->delay, ri->index, end);
}
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(CATCH_UP) {
if (roseCatchUpTo(t, scratch->core_info.state, end, scratch,
in_anchored) == HWLM_TERMINATE_MATCHING) {
if (roseCatchUpTo(t, scratch, end, in_anchored) ==
HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
}
@ -963,7 +955,7 @@ hwlmcb_rv_t roseRunProgram(const struct RoseEngine *t, u32 programOffset,
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(SOM_LEFTFIX) {
som = roseGetHaigSom(t, ri->queue, ri->lag, tctxt);
som = roseGetHaigSom(t, scratch, ri->queue, ri->lag);
DEBUG_PRINTF("som from leftfix is %llu\n", som);
}
PROGRAM_NEXT_INSTRUCTION
@ -983,16 +975,16 @@ hwlmcb_rv_t roseRunProgram(const struct RoseEngine *t, u32 programOffset,
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(TRIGGER_INFIX) {
roseTriggerInfix(t, som, end, ri->queue, ri->event, ri->cancel,
tctxt);
roseTriggerInfix(t, scratch, som, end, ri->queue, ri->event,
ri->cancel);
work_done = 1;
}
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(TRIGGER_SUFFIX) {
if (roseHandleSuffixTrigger(t, ri->queue, ri->event, som, end,
tctxt, in_anchored) ==
HWLM_TERMINATE_MATCHING) {
if (roseTriggerSuffix(t, scratch, ri->queue, ri->event, som,
end, in_anchored)
== HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
work_done = 1;
@ -1037,9 +1029,9 @@ hwlmcb_rv_t roseRunProgram(const struct RoseEngine *t, u32 programOffset,
PROGRAM_NEXT_INSTRUCTION
PROGRAM_CASE(REPORT_CHAIN) {
if (roseCatchUpAndHandleChainMatch(
t, scratch->core_info.state, ri->report, end,
tctxt, in_anchored) == HWLM_TERMINATE_MATCHING) {
if (roseCatchUpAndHandleChainMatch(t, scratch, ri->report, end,
in_anchored) ==
HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
work_done = 1;

View File

@ -190,7 +190,7 @@ found_miracle:
miracle_loc);
if (!q_active) {
fatbit_set(scratch->aqa, qCount, qi);
initRoseQueue(t, qi, left, &scratch->tctxt);
initRoseQueue(t, qi, left, scratch);
}
q->cur = q->end = 0;
pushQueueAt(q, 0, MQE_START, miracle_loc);
@ -236,7 +236,7 @@ char roseCatchUpLeftfix(const struct RoseEngine *t, char *state,
}
if (!fatbit_set(scratch->aqa, qCount, qi)) {
initRoseQueue(t, qi, left, &scratch->tctxt);
initRoseQueue(t, qi, left, scratch);
s32 sp;
if (ci->buf_offset) {
@ -396,14 +396,13 @@ void ensureStreamNeatAndTidy(const struct RoseEngine *t, char *state,
u64a offset) {
struct RoseContext *tctxt = &scratch->tctxt;
if (roseCatchUpTo(t, state, length + scratch->core_info.buf_offset, scratch,
0)
== HWLM_TERMINATE_MATCHING) {
if (roseCatchUpTo(t, scratch, length + scratch->core_info.buf_offset, 0) ==
HWLM_TERMINATE_MATCHING) {
return; /* dead; no need to clean up state. */
}
roseSaveNfaStreamState(t, state, scratch);
roseCatchUpLeftfixes(t, state, scratch);
roseFlushLastByteHistory(t, state, offset + length, tctxt);
roseFlushLastByteHistory(t, scratch, offset + length);
tctxt->lastEndOffset = offset + length;
storeGroups(t, state, tctxt->groups);
}
@ -550,7 +549,7 @@ void roseStreamExec(const struct RoseEngine *t, struct hs_scratch *scratch,
flush_delay_and_exit:
DEBUG_PRINTF("flushing floating\n");
if (cleanUpDelayed(length, offset, scratch) == HWLM_TERMINATE_MATCHING) {
if (cleanUpDelayed(t, scratch, length, offset) == HWLM_TERMINATE_MATCHING) {
return;
}