simplify and make scanSingle*()/scanDouble*() more uniform

This commit is contained in:
Konstantinos Margaritis 2021-02-19 12:16:43 +02:00 committed by Konstantinos Margaritis
parent 2f13ad0674
commit e3e101b412
4 changed files with 117 additions and 281 deletions

View File

@ -83,6 +83,7 @@ struct cb_info {
} \ } \
} }
#define SINGLE_ZSCAN() \ #define SINGLE_ZSCAN() \
do { \ do { \
while (unlikely(z)) { \ while (unlikely(z)) { \
@ -140,6 +141,32 @@ match:
return HWLM_SUCCESS; return HWLM_SUCCESS;
} }
static really_really_inline
hwlm_error_t single_zscan(const struct noodTable *n,const u8 *d, const u8 *buf,
Z_TYPE z, size_t len, const struct cb_info *cbi) {
while (unlikely(z)) {
Z_TYPE pos = JOIN(findAndClearLSB_, Z_BITS)(&z);
size_t matchPos = d - buf + pos;
DEBUG_PRINTF("match pos %zu\n", matchPos);
hwlmcb_rv_t rv = final(n, buf, len, 1, cbi, matchPos);
RETURN_IF_TERMINATED(rv);
}
return HWLM_SUCCESS;
}
static really_really_inline
hwlm_error_t double_zscan(const struct noodTable *n,const u8 *d, const u8 *buf,
Z_TYPE z, size_t len, const struct cb_info *cbi) {
while (unlikely(z)) {
Z_TYPE pos = JOIN(findAndClearLSB_, Z_BITS)(&z);
size_t matchPos = d - buf + pos - 1; \
DEBUG_PRINTF("match pos %zu\n", matchPos);
hwlmcb_rv_t rv = final(n, buf, len, 0, cbi, matchPos);
RETURN_IF_TERMINATED(rv);
}
return HWLM_SUCCESS;
}
#if defined(HAVE_AVX512) #if defined(HAVE_AVX512)
#define CHUNKSIZE 64 #define CHUNKSIZE 64
#define MASK_TYPE m512 #define MASK_TYPE m512
@ -157,6 +184,7 @@ match:
#include "noodle_engine_sse.c" #include "noodle_engine_sse.c"
#endif #endif
static really_inline static really_inline
hwlm_error_t scanSingleMain(const struct noodTable *n, const u8 *buf, hwlm_error_t scanSingleMain(const struct noodTable *n, const u8 *buf,
size_t len, size_t start, bool noCase, size_t len, size_t start, bool noCase,
@ -169,15 +197,8 @@ hwlm_error_t scanSingleMain(const struct noodTable *n, const u8 *buf,
size_t end = len; size_t end = len;
assert(offset < end); assert(offset < end);
#if !defined(HAVE_AVX512)
hwlm_error_t rv; hwlm_error_t rv;
/* if (end - offset <= CHUNKSIZE) {
rv = scanSingleShort(n, buf, len, noCase, caseMask, mask1, cbi, offset,
end);
return rv;
}*/
if (end - offset <= CHUNKSIZE) { if (end - offset <= CHUNKSIZE) {
rv = scanSingleUnaligned(n, buf, len, offset, caseMask, mask1, rv = scanSingleUnaligned(n, buf, len, offset, caseMask, mask1,
cbi, offset, end); cbi, offset, end);
@ -217,10 +238,6 @@ hwlm_error_t scanSingleMain(const struct noodTable *n, const u8 *buf,
s2End, len); s2End, len);
return rv; return rv;
#else // HAVE_AVX512
return scanSingle512(n, buf, len, noCase, caseMask, mask1, cbi, offset,
end);
#endif
} }
static really_inline static really_inline
@ -238,14 +255,8 @@ hwlm_error_t scanDoubleMain(const struct noodTable *n, const u8 *buf,
const MASK_TYPE mask1 = getMask(n->key0, noCase); const MASK_TYPE mask1 = getMask(n->key0, noCase);
const MASK_TYPE mask2 = getMask(n->key1, noCase); const MASK_TYPE mask2 = getMask(n->key1, noCase);
#if !defined(HAVE_AVX512)
hwlm_error_t rv; hwlm_error_t rv;
/* if (end - offset <= CHUNKSIZE) {
rv = scanDoubleShort(n, buf, len, noCase, caseMask, mask1, mask2, cbi,
offset, end);
return rv;
}*/
if (end - offset <= CHUNKSIZE) { if (end - offset <= CHUNKSIZE) {
rv = scanDoubleUnaligned(n, buf, len, offset, caseMask, mask1, rv = scanDoubleUnaligned(n, buf, len, offset, caseMask, mask1,
mask2, cbi, offset, end); mask2, cbi, offset, end);
@ -295,13 +306,8 @@ hwlm_error_t scanDoubleMain(const struct noodTable *n, const u8 *buf,
mask2, cbi, off, end); mask2, cbi, off, end);
return rv; return rv;
#else // AVX512
return scanDouble512(n, buf, len, caseMask, mask1, mask2, cbi,
offset, end);
#endif // AVX512
} }
static really_inline static really_inline
hwlm_error_t scanSingleNoCase(const struct noodTable *n, const u8 *buf, hwlm_error_t scanSingleNoCase(const struct noodTable *n, const u8 *buf,
size_t len, size_t start, size_t len, size_t start,

View File

@ -39,8 +39,7 @@ static really_inline m256 getCaseMask(void) {
static really_inline static really_inline
hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf, hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf,
size_t len, size_t offset, size_t len, size_t offset, m256 caseMask, m256 mask1,
m256 caseMask, m256 mask1,
const struct cb_info *cbi, size_t start, const struct cb_info *cbi, size_t start,
size_t end) { size_t end) {
const u8 *d = buf + offset; const u8 *d = buf + offset;
@ -56,15 +55,12 @@ hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf,
z &= mask; z &= mask;
SINGLE_ZSCAN(); return single_zscan(n, d, buf, z, len, cbi);
return HWLM_SUCCESS;
} }
static really_inline static really_inline
hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf, hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf,
size_t len, size_t offset, size_t len, size_t offset, m256 caseMask, m256 mask1, m256 mask2,
m256 caseMask, m256 mask1, m256 mask2,
const struct cb_info *cbi, size_t start, const struct cb_info *cbi, size_t start,
size_t end) { size_t end) {
const u8 *d = buf + offset; const u8 *d = buf + offset;
@ -82,90 +78,8 @@ hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf,
DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z); DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z);
z &= mask; z &= mask;
DOUBLE_ZSCAN(); return double_zscan(n, d, buf, z, len, cbi);
return HWLM_SUCCESS;
} }
/*
// The short scan routine. It is used both to scan data up to an
// alignment boundary if needed and to finish off data that the aligned scan
// function can't handle (due to small/unaligned chunk at end)
static really_inline
hwlm_error_t scanSingleShort(const struct noodTable *n, const u8 *buf,
size_t len, m256 caseMask, m256 mask1,
const struct cb_info *cbi, size_t start,
size_t end) {
const u8 *d = buf + start;
size_t l = end - start;
DEBUG_PRINTF("l %zu\n", l);
assert(l <= 32);
if (!l) {
return HWLM_SUCCESS;
}
m256 v;
if (l < 4) {
u8 *vp = (u8*)&v;
switch (l) {
case 3: vp[2] = d[2]; // fallthrough
case 2: vp[1] = d[1]; // fallthrough
case 1: vp[0] = d[0]; // fallthrough
}
} else {
v = masked_move256_len(d, l);
}
m256 v = and256(v, caseMask);
// mask out where we can't match
u32 mask = (0xFFFFFFFF >> (32 - l));
u32 z = mask & movemask256(eq256(mask1, v));
SINGLE_ZSCAN();
return HWLM_SUCCESS;
}
static really_inline
hwlm_error_t scanDoubleShort(const struct noodTable *n, const u8 *buf,
size_t len, m256 caseMask, m256 mask1,
m256 mask2, const struct cb_info *cbi,
size_t start, size_t end) {
const u8 *d = buf + start;
size_t l = end - start;
if (!l) {
return HWLM_SUCCESS;
}
assert(l <= 32);
u32 mask = (0xFFFFFFFF >> (32 - l));
m256 v;
DEBUG_PRINTF("d %zu\n", d - buf);
if (l < 4) {
u8 *vp = (u8*)&v;
switch (l) {
case 3: vp[2] = d[2]; // fallthrough
case 2: vp[1] = d[1]; // fallthrough
case 1: vp[0] = d[0]; // fallthrough
}
} else {
v = masked_move256_len(d, l);
}
m256 v = and256(v, caseMask);
u32 z0 = movemask256(eq256(mask1, v));
u32 z1 = movemask256(eq256(mask2, v));
u32 z = (z0 << 1) & z1;
// mask out where we can't match
z &= mask;
DOUBLE_ZSCAN();
return HWLM_SUCCESS;
}*/
static really_inline static really_inline
hwlm_error_t scanSingleFast(const struct noodTable *n, const u8 *buf, hwlm_error_t scanSingleFast(const struct noodTable *n, const u8 *buf,
@ -183,7 +97,10 @@ hwlm_error_t scanSingleFast(const struct noodTable *n, const u8 *buf,
// On large packet buffers, this prefetch appears to get us about 2%. // On large packet buffers, this prefetch appears to get us about 2%.
__builtin_prefetch(d + 128); __builtin_prefetch(d + 128);
SINGLE_ZSCAN(); hwlm_error_t result = single_zscan(n, d, buf, z, len, cbi);
if (unlikely(result != HWLM_SUCCESS))
return result;
} }
return HWLM_SUCCESS; return HWLM_SUCCESS;
} }
@ -211,7 +128,9 @@ hwlm_error_t scanDoubleFast(const struct noodTable *n, const u8 *buf,
// On large packet buffers, this prefetch appears to get us about 2%. // On large packet buffers, this prefetch appears to get us about 2%.
__builtin_prefetch(d + 128); __builtin_prefetch(d + 128);
DOUBLE_ZSCAN(); hwlm_error_t result = double_zscan(n, d, buf, z, len, cbi);
if (unlikely(result != HWLM_SUCCESS))
return result;
} }
return HWLM_SUCCESS; return HWLM_SUCCESS;

View File

@ -43,149 +43,107 @@ m512 getCaseMask(void) {
// alignment boundary if needed and to finish off data that the aligned scan // alignment boundary if needed and to finish off data that the aligned scan
// function can't handle (due to small/unaligned chunk at end) // function can't handle (due to small/unaligned chunk at end)
static really_inline static really_inline
hwlm_error_t scanSingleShort(const struct noodTable *n, const u8 *buf, hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf,
size_t len, bool noCase, m512 caseMask, m512 mask1, size_t len, size_t offset, m512 caseMask, m512 mask1,
const struct cb_info *cbi, size_t start, const struct cb_info *cbi, size_t start,
size_t end) { size_t end) {
const u8 *d = buf + start; const u8 *d = buf + offset;
ptrdiff_t scan_len = end - start; DEBUG_PRINTF("start %zu end %zu offset %zu\n", start, end, offset);
DEBUG_PRINTF("scan_len %zu\n", scan_len); const size_t l = end - start;
assert(scan_len <= 64); assert(l <= 64);
if (!scan_len) { if (!l) {
return HWLM_SUCCESS; return HWLM_SUCCESS;
} }
__mmask64 k = (~0ULL) >> (64 - scan_len); __mmask64 k = (~0ULL) >> (64 - l);
DEBUG_PRINTF("load mask 0x%016llx\n", k); DEBUG_PRINTF("load mask 0x%016llx\n", k);
m512 v = loadu_maskz_m512(k, d); m512 v = loadu_maskz_m512(k, d);
if (noCase) {
v = and512(v, caseMask); v = and512(v, caseMask);
}
// reuse the load mask to indicate valid bytes // reuse the load mask to indicate valid bytes
u64a z = masked_eq512mask(k, mask1, v); u64a z = masked_eq512mask(k, mask1, v);
SINGLE_ZSCAN(); return single_zscan(n, d, buf, z, len, cbi);
return HWLM_SUCCESS;
} }
static really_inline static really_inline
hwlm_error_t scanSingle512(const struct noodTable *n, const u8 *buf, size_t len, hwlm_error_t scanSingleFast(const struct noodTable *n, const u8 *buf,
bool noCase, m512 caseMask, m512 mask1, size_t len, m512 caseMask, m512 mask1,
const struct cb_info *cbi, size_t start, const struct cb_info *cbi, size_t start,
size_t end) { size_t end) {
const u8 *d = buf + start; const u8 *d = buf + start, *e = buf + end;
const u8 *e = buf + end;
DEBUG_PRINTF("start %p end %p \n", d, e);
assert(d < e); assert(d < e);
if (d + 64 >= e) {
goto tail;
}
// peel off first part to cacheline boundary for (; d < e; d += 64) {
const u8 *d1 = ROUNDUP_PTR(d, 64); m512 v = and512(load512(d), caseMask);
if (scanSingleShort(n, buf, len, noCase, caseMask, mask1, cbi, start,
d1 - buf) == HWLM_TERMINATED) {
return HWLM_TERMINATED;
}
d = d1;
for (; d + 64 < e; d += 64) {
DEBUG_PRINTF("d %p e %p \n", d, e);
m512 v = noCase ? and512(load512(d), caseMask) : load512(d);
u64a z = eq512mask(mask1, v); u64a z = eq512mask(mask1, v);
// On large packet buffers, this prefetch appears to get us about 2%.
__builtin_prefetch(d + 128); __builtin_prefetch(d + 128);
SINGLE_ZSCAN(); hwlm_error_t result = single_zscan(n, d, buf, z, len, cbi);
if (unlikely(result != HWLM_SUCCESS))
return result;
} }
return HWLM_SUCCESS;
tail:
DEBUG_PRINTF("d %p e %p \n", d, e);
// finish off tail
return scanSingleShort(n, buf, len, noCase, caseMask, mask1, cbi, d - buf,
e - buf);
} }
static really_inline static really_inline
hwlm_error_t scanDoubleShort(const struct noodTable *n, const u8 *buf, hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf,
size_t len, bool noCase, m512 caseMask, m512 mask1, size_t len, size_t offset, m512 caseMask,
m512 mask2, const struct cb_info *cbi, m512 mask1, m512 mask2,
u64a *lastz0, size_t start, size_t end) { const struct cb_info *cbi, size_t start,
DEBUG_PRINTF("start %zu end %zu last 0x%016llx\n", start, end, *lastz0); size_t end) {
const u8 *d = buf + start; const u8 *d = buf + offset;
ptrdiff_t scan_len = end - start; DEBUG_PRINTF("start %zu end %zu offset %zu\n", start, end, offset);
if (!scan_len) { const size_t l = end - start;
assert(l <= 64);
if (!l) {
return HWLM_SUCCESS; return HWLM_SUCCESS;
} }
assert(scan_len <= 64);
__mmask64 k = (~0ULL) >> (64 - scan_len); __mmask64 k = (~0ULL) >> (64 - l);
DEBUG_PRINTF("load mask 0x%016llx scan_len %zu\n", k, scan_len); DEBUG_PRINTF("load mask 0x%016llx\n", k);
m512 v = loadu_maskz_m512(k, d); m512 v = loadu_maskz_m512(k, d);
if (noCase) {
v = and512(v, caseMask); v = and512(v, caseMask);
}
u64a z0 = masked_eq512mask(k, mask1, v); u64a z0 = masked_eq512mask(k, mask1, v);
u64a z1 = masked_eq512mask(k, mask2, v); u64a z1 = masked_eq512mask(k, mask2, v);
u64a z = (*lastz0 | (z0 << 1)) & z1; u64a z = (z0 << 1) & z1;
DEBUG_PRINTF("z 0x%016llx\n", z); DEBUG_PRINTF("z 0x%016llx\n", z);
DOUBLE_ZSCAN(); return single_zscan(n, d, buf, z, len, cbi);
*lastz0 = z0 >> (scan_len - 1);
return HWLM_SUCCESS;
} }
static really_inline static really_inline
hwlm_error_t scanDouble512(const struct noodTable *n, const u8 *buf, size_t len, hwlm_error_t scanDoubleFast(const struct noodTable *n, const u8 *buf,
bool noCase, m512 caseMask, m512 mask1, m512 mask2, size_t len, m512 caseMask, m512 mask1,
const struct cb_info *cbi, size_t start, m512 mask2, const struct cb_info *cbi, size_t start,
size_t end) { size_t end) {
const u8 *d = buf + start; const u8 *d = buf + start, *e = buf + end;
const u8 *e = buf + end;
u64a lastz0 = 0;
DEBUG_PRINTF("start %zu end %zu \n", start, end); DEBUG_PRINTF("start %zu end %zu \n", start, end);
assert(d < e); assert(d < e);
if (d + 64 >= e) { u64a lastz0 = 0;
goto tail;
}
// peel off first part to cacheline boundary for (; d < e; d += 64) {
const u8 *d1 = ROUNDUP_PTR(d, 64); m512 v = and512(load512(d), caseMask);
if (scanDoubleShort(n, buf, len, noCase, caseMask, mask1, mask2, cbi,
&lastz0, start, d1 - buf) == HWLM_TERMINATED) {
return HWLM_TERMINATED;
}
d = d1;
for (; d + 64 < e; d += 64) { // we have to pull the masks out of the AVX registers because we can't
DEBUG_PRINTF("d %p e %p 0x%016llx\n", d, e, lastz0); // byte shift between the lanes
m512 v = noCase ? and512(load512(d), caseMask) : load512(d);
/* we have to pull the masks out of the AVX registers because we can't
byte shift between the lanes */
u64a z0 = eq512mask(mask1, v); u64a z0 = eq512mask(mask1, v);
u64a z1 = eq512mask(mask2, v); u64a z1 = eq512mask(mask2, v);
u64a z = (lastz0 | (z0 << 1)) & z1; u64a z = (lastz0 | (z0 << 1)) & z1;
lastz0 = z0 >> 63; lastz0 = z0 >> 63;
// On large packet buffers, this prefetch appears to get us about 2%. // On large packet buffers, this prefetch appears to get us about 2%.
__builtin_prefetch(d + 256); __builtin_prefetch(d + 128);
DEBUG_PRINTF("z 0x%016llx\n", z); hwlm_error_t result = double_zscan(n, d, buf, z, len, cbi);
if (unlikely(result != HWLM_SUCCESS))
DOUBLE_ZSCAN(); return result;
} }
return HWLM_SUCCESS;
tail:
DEBUG_PRINTF("d %p e %p off %zu \n", d, e, d - buf);
// finish off tail
return scanDoubleShort(n, buf, len, noCase, caseMask, mask1, mask2, cbi,
&lastz0, d - buf, end);
} }

View File

@ -36,34 +36,10 @@ static really_inline m128 getMask(u8 c, bool noCase) {
static really_inline m128 getCaseMask(void) { static really_inline m128 getCaseMask(void) {
return set1_16x8(0xdf); return set1_16x8(0xdf);
} }
/*
static really_inline
hwlm_error_t scanSingleShort(const struct noodTable *n, const u8 *buf,
size_t len, m128 caseMask, m128 mask1,
const struct cb_info *cbi, size_t start,
size_t end) {
const u8 *d = buf + start;
size_t l = end - start;
DEBUG_PRINTF("l %zu\n", l);
assert(l <= 16);
if (!l) {
return HWLM_SUCCESS;
}
m128 v = and128(loadu128(d), caseMask);
// mask out where we can't match
u32 mask = (0xFFFF >> (16 - l));
u32 z = mask & movemask128(eq128(mask1, v));
SINGLE_ZSCAN();
return HWLM_SUCCESS;
}*/
static really_inline static really_inline
hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf, hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf,
size_t len, size_t offset, size_t len, size_t offset, m128 caseMask, m128 mask1,
m128 caseMask, m128 mask1,
const struct cb_info *cbi, size_t start, const struct cb_info *cbi, size_t start,
size_t end) { size_t end) {
const u8 *d = buf + offset; const u8 *d = buf + offset;
@ -74,39 +50,11 @@ hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf,
u32 buf_off = start - offset; u32 buf_off = start - offset;
u32 mask = ((1 << l) - 1) << buf_off; u32 mask = ((1 << l) - 1) << buf_off;
DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z);
u32 z = mask & movemask128(eq128(mask1, v)); u32 z = mask & movemask128(eq128(mask1, v));
DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z);
return single_zscan(n, d, buf, z, len, cbi);
SINGLE_ZSCAN();
return HWLM_SUCCESS;
} }
/*
static really_inline
hwlm_error_t scanDoubleShort(const struct noodTable *n, const u8 *buf,
size_t len, m128 caseMask, m128 mask1,
m128 mask2, const struct cb_info *cbi,
size_t start, size_t end) {
const u8 *d = buf + start;
size_t l = end - start;
if (!l) {
return HWLM_SUCCESS;
}
assert(l <= 32);
DEBUG_PRINTF("d %zu\n", d - buf);
m128 v = and128(loadu128(d), caseMask);
// mask out where we can't match
u32 mask = (0xFFFF >> (16 - l));
u32 z = mask & movemask128(and128(lshiftbyte_m128(eq128(mask1, v), 1),
eq128(mask2, v)));
DOUBLE_ZSCAN();
return HWLM_SUCCESS;
}*/
static really_inline static really_inline
hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf, hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf,
@ -123,13 +71,11 @@ hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf,
// mask out where we can't match // mask out where we can't match
u32 mask = ((1 << l) - 1) << buf_off; u32 mask = ((1 << l) - 1) << buf_off;
DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z);
u32 z = mask & movemask128(and128(lshiftbyte_m128(eq128(mask1, v), 1), u32 z = mask & movemask128(and128(lshiftbyte_m128(eq128(mask1, v), 1),
eq128(mask2, v))); eq128(mask2, v)));
DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z);
DOUBLE_ZSCAN(); return double_zscan(n, d, buf, z, len, cbi);
return HWLM_SUCCESS;
} }
static really_inline static really_inline
@ -146,8 +92,11 @@ hwlm_error_t scanSingleFast(const struct noodTable *n, const u8 *buf,
// On large packet buffers, this prefetch appears to get us about 2%. // On large packet buffers, this prefetch appears to get us about 2%.
__builtin_prefetch(d + 128); __builtin_prefetch(d + 128);
DEBUG_PRINTF("z 0x%08x\n", z);
SINGLE_ZSCAN(); hwlm_error_t result = single_zscan(n, d, buf, z, len, cbi);
if (unlikely(result != HWLM_SUCCESS))
return result;
} }
return HWLM_SUCCESS; return HWLM_SUCCESS;
} }
@ -171,7 +120,11 @@ hwlm_error_t scanDoubleFast(const struct noodTable *n, const u8 *buf,
// On large packet buffers, this prefetch appears to get us about 2%. // On large packet buffers, this prefetch appears to get us about 2%.
__builtin_prefetch(d + 128); __builtin_prefetch(d + 128);
DEBUG_PRINTF("z 0x%08x\n", z); DEBUG_PRINTF("z 0x%08x\n", z);
DOUBLE_ZSCAN();
hwlm_error_t result = double_zscan(n, d, buf, z, len, cbi);
if (unlikely(result != HWLM_SUCCESS))
return result;
} }
return HWLM_SUCCESS; return HWLM_SUCCESS;
} }