borrow cache prefetching tricks from the Marvell port, seem to improve performance by 5-28%

This commit is contained in:
Konstantinos Margaritis
2021-01-15 17:42:11 +02:00
committed by Konstantinos Margaritis
parent 51dcfa8571
commit b62247a36e
4 changed files with 37 additions and 7 deletions

View File

@@ -634,6 +634,11 @@ char nfaExecMcClellan16_Q2i(const struct NFA *n, u64a offset, const u8 *buffer,
assert(ISALIGNED_N(q->state, 2));
u32 s = *(u16 *)q->state;
__builtin_prefetch(&m->remap[0]);
__builtin_prefetch(&m->remap[64]);
__builtin_prefetch(&m->remap[128]);
__builtin_prefetch(&m->remap[192]);
if (q->report_current) {
assert(s);
assert(get_aux(m, s)->accept);
@@ -790,6 +795,11 @@ char nfaExecMcClellan8_Q2i(const struct NFA *n, u64a offset, const u8 *buffer,
u32 s = *(u8 *)q->state;
__builtin_prefetch(&m->remap[0]);
__builtin_prefetch(&m->remap[64]);
__builtin_prefetch(&m->remap[128]);
__builtin_prefetch(&m->remap[192]);
if (q->report_current) {
assert(s);
assert(s >= m->accept_limit_8);

View File

@@ -889,6 +889,11 @@ char nfaExecMcSheng16_Q2i(const struct NFA *n, u64a offset, const u8 *buffer,
return MO_ALIVE;
}
__builtin_prefetch(&m->remap[0]);
__builtin_prefetch(&m->remap[64]);
__builtin_prefetch(&m->remap[128]);
__builtin_prefetch(&m->remap[192]);
while (1) {
assert(q->cur < q->end);
s64a ep = q->items[q->cur].location;
@@ -1017,6 +1022,11 @@ char nfaExecMcSheng8_Q2i(const struct NFA *n, u64a offset, const u8 *buffer,
return MO_ALIVE;
}
__builtin_prefetch(&m->remap[0]);
__builtin_prefetch(&m->remap[64]);
__builtin_prefetch(&m->remap[128]);
__builtin_prefetch(&m->remap[192]);
while (1) {
DEBUG_PRINTF("%s @ %llu\n", q->items[q->cur].type == MQE_TOP ? "TOP" :
q->items[q->cur].type == MQE_END ? "END" : "???",

View File

@@ -109,7 +109,8 @@ DUMP_MSK(128)
#endif
#define GET_LO_4(chars) and128(chars, low4bits)
#define GET_HI_4(chars) rshift64_m128(andnot128(low4bits, chars), 4)
#define GET_HI_4(chars) and128(rshift64_m128(chars, 4), low4bits)
//#define GET_HI_4(chars) rshift64_m128(andnot128(low4bits, chars), 4)
static really_inline
u32 block(m128 mask_lo, m128 mask_hi, m128 chars, const m128 low4bits,
@@ -177,6 +178,10 @@ const u8 *shuftiExec(m128 mask_lo, m128 mask_hi, const u8 *buf,
// Reroll FTW.
const u8 *last_block = buf_end - 16;
for (const u8 *itPtr = buf; itPtr + 4*16 <= last_block; itPtr += 4*16) {
__builtin_prefetch(itPtr);
}
while (buf < last_block) {
m128 lchars = load128(buf);
rv = fwdBlock(mask_lo, mask_hi, lchars, buf, low4bits, zeroes);