From e3e101b412bd99bf5937fa92c7a2a5e6a941aa11 Mon Sep 17 00:00:00 2001 From: Konstantinos Margaritis Date: Fri, 19 Feb 2021 12:16:43 +0200 Subject: [PATCH] simplify and make scanSingle*()/scanDouble*() more uniform --- src/hwlm/noodle_engine.c | 50 +++++----- src/hwlm/noodle_engine_avx2.c | 103 +++---------------- src/hwlm/noodle_engine_avx512.c | 170 ++++++++++++-------------------- src/hwlm/noodle_engine_sse.c | 75 +++----------- 4 files changed, 117 insertions(+), 281 deletions(-) diff --git a/src/hwlm/noodle_engine.c b/src/hwlm/noodle_engine.c index 28a8f4a5..be56ccd9 100644 --- a/src/hwlm/noodle_engine.c +++ b/src/hwlm/noodle_engine.c @@ -83,6 +83,7 @@ struct cb_info { } \ } + #define SINGLE_ZSCAN() \ do { \ while (unlikely(z)) { \ @@ -140,6 +141,32 @@ match: return HWLM_SUCCESS; } +static really_really_inline +hwlm_error_t single_zscan(const struct noodTable *n,const u8 *d, const u8 *buf, + Z_TYPE z, size_t len, const struct cb_info *cbi) { + while (unlikely(z)) { + Z_TYPE pos = JOIN(findAndClearLSB_, Z_BITS)(&z); + size_t matchPos = d - buf + pos; + DEBUG_PRINTF("match pos %zu\n", matchPos); + hwlmcb_rv_t rv = final(n, buf, len, 1, cbi, matchPos); + RETURN_IF_TERMINATED(rv); + } + return HWLM_SUCCESS; +} + +static really_really_inline +hwlm_error_t double_zscan(const struct noodTable *n,const u8 *d, const u8 *buf, + Z_TYPE z, size_t len, const struct cb_info *cbi) { + while (unlikely(z)) { + Z_TYPE pos = JOIN(findAndClearLSB_, Z_BITS)(&z); + size_t matchPos = d - buf + pos - 1; \ + DEBUG_PRINTF("match pos %zu\n", matchPos); + hwlmcb_rv_t rv = final(n, buf, len, 0, cbi, matchPos); + RETURN_IF_TERMINATED(rv); + } + return HWLM_SUCCESS; +} + #if defined(HAVE_AVX512) #define CHUNKSIZE 64 #define MASK_TYPE m512 @@ -157,6 +184,7 @@ match: #include "noodle_engine_sse.c" #endif + static really_inline hwlm_error_t scanSingleMain(const struct noodTable *n, const u8 *buf, size_t len, size_t start, bool noCase, @@ -169,15 +197,8 @@ hwlm_error_t scanSingleMain(const struct noodTable *n, const u8 *buf, size_t end = len; assert(offset < end); -#if !defined(HAVE_AVX512) hwlm_error_t rv; -/* if (end - offset <= CHUNKSIZE) { - rv = scanSingleShort(n, buf, len, noCase, caseMask, mask1, cbi, offset, - end); - return rv; - }*/ - if (end - offset <= CHUNKSIZE) { rv = scanSingleUnaligned(n, buf, len, offset, caseMask, mask1, cbi, offset, end); @@ -217,10 +238,6 @@ hwlm_error_t scanSingleMain(const struct noodTable *n, const u8 *buf, s2End, len); return rv; -#else // HAVE_AVX512 - return scanSingle512(n, buf, len, noCase, caseMask, mask1, cbi, offset, - end); -#endif } static really_inline @@ -238,14 +255,8 @@ hwlm_error_t scanDoubleMain(const struct noodTable *n, const u8 *buf, const MASK_TYPE mask1 = getMask(n->key0, noCase); const MASK_TYPE mask2 = getMask(n->key1, noCase); -#if !defined(HAVE_AVX512) hwlm_error_t rv; -/* if (end - offset <= CHUNKSIZE) { - rv = scanDoubleShort(n, buf, len, noCase, caseMask, mask1, mask2, cbi, - offset, end); - return rv; - }*/ if (end - offset <= CHUNKSIZE) { rv = scanDoubleUnaligned(n, buf, len, offset, caseMask, mask1, mask2, cbi, offset, end); @@ -295,13 +306,8 @@ hwlm_error_t scanDoubleMain(const struct noodTable *n, const u8 *buf, mask2, cbi, off, end); return rv; -#else // AVX512 - return scanDouble512(n, buf, len, caseMask, mask1, mask2, cbi, - offset, end); -#endif // AVX512 } - static really_inline hwlm_error_t scanSingleNoCase(const struct noodTable *n, const u8 *buf, size_t len, size_t start, diff --git a/src/hwlm/noodle_engine_avx2.c b/src/hwlm/noodle_engine_avx2.c index bb3ce9dc..05c40cd2 100644 --- a/src/hwlm/noodle_engine_avx2.c +++ b/src/hwlm/noodle_engine_avx2.c @@ -39,8 +39,7 @@ static really_inline m256 getCaseMask(void) { static really_inline hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf, - size_t len, size_t offset, - m256 caseMask, m256 mask1, + size_t len, size_t offset, m256 caseMask, m256 mask1, const struct cb_info *cbi, size_t start, size_t end) { const u8 *d = buf + offset; @@ -56,15 +55,12 @@ hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf, z &= mask; - SINGLE_ZSCAN(); - - return HWLM_SUCCESS; + return single_zscan(n, d, buf, z, len, cbi); } static really_inline hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf, - size_t len, size_t offset, - m256 caseMask, m256 mask1, m256 mask2, + size_t len, size_t offset, m256 caseMask, m256 mask1, m256 mask2, const struct cb_info *cbi, size_t start, size_t end) { const u8 *d = buf + offset; @@ -82,90 +78,8 @@ hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf, DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z); z &= mask; - DOUBLE_ZSCAN(); - - return HWLM_SUCCESS; + return double_zscan(n, d, buf, z, len, cbi); } -/* -// The short scan routine. It is used both to scan data up to an -// alignment boundary if needed and to finish off data that the aligned scan -// function can't handle (due to small/unaligned chunk at end) -static really_inline -hwlm_error_t scanSingleShort(const struct noodTable *n, const u8 *buf, - size_t len, m256 caseMask, m256 mask1, - const struct cb_info *cbi, size_t start, - size_t end) { - const u8 *d = buf + start; - size_t l = end - start; - DEBUG_PRINTF("l %zu\n", l); - assert(l <= 32); - if (!l) { - return HWLM_SUCCESS; - } - m256 v; - - if (l < 4) { - u8 *vp = (u8*)&v; - switch (l) { - case 3: vp[2] = d[2]; // fallthrough - case 2: vp[1] = d[1]; // fallthrough - case 1: vp[0] = d[0]; // fallthrough - } - } else { - v = masked_move256_len(d, l); - } - - m256 v = and256(v, caseMask); - // mask out where we can't match - u32 mask = (0xFFFFFFFF >> (32 - l)); - - u32 z = mask & movemask256(eq256(mask1, v)); - - SINGLE_ZSCAN(); - - return HWLM_SUCCESS; -} - -static really_inline -hwlm_error_t scanDoubleShort(const struct noodTable *n, const u8 *buf, - size_t len, m256 caseMask, m256 mask1, - m256 mask2, const struct cb_info *cbi, - size_t start, size_t end) { - const u8 *d = buf + start; - size_t l = end - start; - if (!l) { - return HWLM_SUCCESS; - } - assert(l <= 32); - u32 mask = (0xFFFFFFFF >> (32 - l)); - - m256 v; - - DEBUG_PRINTF("d %zu\n", d - buf); - if (l < 4) { - u8 *vp = (u8*)&v; - switch (l) { - case 3: vp[2] = d[2]; // fallthrough - case 2: vp[1] = d[1]; // fallthrough - case 1: vp[0] = d[0]; // fallthrough - } - } else { - v = masked_move256_len(d, l); - } - - m256 v = and256(v, caseMask); - - u32 z0 = movemask256(eq256(mask1, v)); - u32 z1 = movemask256(eq256(mask2, v)); - u32 z = (z0 << 1) & z1; - - // mask out where we can't match - z &= mask; - - DOUBLE_ZSCAN(); - - return HWLM_SUCCESS; -}*/ static really_inline hwlm_error_t scanSingleFast(const struct noodTable *n, const u8 *buf, @@ -183,7 +97,10 @@ hwlm_error_t scanSingleFast(const struct noodTable *n, const u8 *buf, // On large packet buffers, this prefetch appears to get us about 2%. __builtin_prefetch(d + 128); - SINGLE_ZSCAN(); + hwlm_error_t result = single_zscan(n, d, buf, z, len, cbi); + if (unlikely(result != HWLM_SUCCESS)) + return result; + } return HWLM_SUCCESS; } @@ -211,7 +128,9 @@ hwlm_error_t scanDoubleFast(const struct noodTable *n, const u8 *buf, // On large packet buffers, this prefetch appears to get us about 2%. __builtin_prefetch(d + 128); - DOUBLE_ZSCAN(); + hwlm_error_t result = double_zscan(n, d, buf, z, len, cbi); + if (unlikely(result != HWLM_SUCCESS)) + return result; } return HWLM_SUCCESS; diff --git a/src/hwlm/noodle_engine_avx512.c b/src/hwlm/noodle_engine_avx512.c index 1a925fbf..f992e83f 100644 --- a/src/hwlm/noodle_engine_avx512.c +++ b/src/hwlm/noodle_engine_avx512.c @@ -43,149 +43,107 @@ m512 getCaseMask(void) { // alignment boundary if needed and to finish off data that the aligned scan // function can't handle (due to small/unaligned chunk at end) static really_inline -hwlm_error_t scanSingleShort(const struct noodTable *n, const u8 *buf, - size_t len, bool noCase, m512 caseMask, m512 mask1, - const struct cb_info *cbi, size_t start, - size_t end) { - const u8 *d = buf + start; - ptrdiff_t scan_len = end - start; - DEBUG_PRINTF("scan_len %zu\n", scan_len); - assert(scan_len <= 64); - if (!scan_len) { +hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf, + size_t len, size_t offset, m512 caseMask, m512 mask1, + const struct cb_info *cbi, size_t start, + size_t end) { + const u8 *d = buf + offset; + DEBUG_PRINTF("start %zu end %zu offset %zu\n", start, end, offset); + const size_t l = end - start; + assert(l <= 64); + if (!l) { return HWLM_SUCCESS; } - __mmask64 k = (~0ULL) >> (64 - scan_len); + __mmask64 k = (~0ULL) >> (64 - l); DEBUG_PRINTF("load mask 0x%016llx\n", k); m512 v = loadu_maskz_m512(k, d); - - if (noCase) { - v = and512(v, caseMask); - } + v = and512(v, caseMask); // reuse the load mask to indicate valid bytes u64a z = masked_eq512mask(k, mask1, v); - SINGLE_ZSCAN(); + return single_zscan(n, d, buf, z, len, cbi); +} +static really_inline +hwlm_error_t scanSingleFast(const struct noodTable *n, const u8 *buf, + size_t len, m512 caseMask, m512 mask1, + const struct cb_info *cbi, size_t start, + size_t end) { + const u8 *d = buf + start, *e = buf + end; + assert(d < e); + + for (; d < e; d += 64) { + m512 v = and512(load512(d), caseMask); + + u64a z = eq512mask(mask1, v); + + // On large packet buffers, this prefetch appears to get us about 2%. + __builtin_prefetch(d + 128); + + hwlm_error_t result = single_zscan(n, d, buf, z, len, cbi); + if (unlikely(result != HWLM_SUCCESS)) + return result; + } return HWLM_SUCCESS; } static really_inline -hwlm_error_t scanSingle512(const struct noodTable *n, const u8 *buf, size_t len, - bool noCase, m512 caseMask, m512 mask1, - const struct cb_info *cbi, size_t start, - size_t end) { - const u8 *d = buf + start; - const u8 *e = buf + end; - DEBUG_PRINTF("start %p end %p \n", d, e); - assert(d < e); - if (d + 64 >= e) { - goto tail; - } - - // peel off first part to cacheline boundary - const u8 *d1 = ROUNDUP_PTR(d, 64); - if (scanSingleShort(n, buf, len, noCase, caseMask, mask1, cbi, start, - d1 - buf) == HWLM_TERMINATED) { - return HWLM_TERMINATED; - } - d = d1; - - for (; d + 64 < e; d += 64) { - DEBUG_PRINTF("d %p e %p \n", d, e); - m512 v = noCase ? and512(load512(d), caseMask) : load512(d); - - u64a z = eq512mask(mask1, v); - __builtin_prefetch(d + 128); - - SINGLE_ZSCAN(); - } - -tail: - DEBUG_PRINTF("d %p e %p \n", d, e); - // finish off tail - - return scanSingleShort(n, buf, len, noCase, caseMask, mask1, cbi, d - buf, - e - buf); -} - -static really_inline -hwlm_error_t scanDoubleShort(const struct noodTable *n, const u8 *buf, - size_t len, bool noCase, m512 caseMask, m512 mask1, - m512 mask2, const struct cb_info *cbi, - u64a *lastz0, size_t start, size_t end) { - DEBUG_PRINTF("start %zu end %zu last 0x%016llx\n", start, end, *lastz0); - const u8 *d = buf + start; - ptrdiff_t scan_len = end - start; - if (!scan_len) { +hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf, + size_t len, size_t offset, m512 caseMask, + m512 mask1, m512 mask2, + const struct cb_info *cbi, size_t start, + size_t end) { + const u8 *d = buf + offset; + DEBUG_PRINTF("start %zu end %zu offset %zu\n", start, end, offset); + const size_t l = end - start; + assert(l <= 64); + if (!l) { return HWLM_SUCCESS; } - assert(scan_len <= 64); - __mmask64 k = (~0ULL) >> (64 - scan_len); - DEBUG_PRINTF("load mask 0x%016llx scan_len %zu\n", k, scan_len); + + __mmask64 k = (~0ULL) >> (64 - l); + DEBUG_PRINTF("load mask 0x%016llx\n", k); m512 v = loadu_maskz_m512(k, d); - if (noCase) { - v = and512(v, caseMask); - } + v = and512(v, caseMask); u64a z0 = masked_eq512mask(k, mask1, v); u64a z1 = masked_eq512mask(k, mask2, v); - u64a z = (*lastz0 | (z0 << 1)) & z1; + u64a z = (z0 << 1) & z1; DEBUG_PRINTF("z 0x%016llx\n", z); - DOUBLE_ZSCAN(); - *lastz0 = z0 >> (scan_len - 1); - return HWLM_SUCCESS; + return single_zscan(n, d, buf, z, len, cbi); } static really_inline -hwlm_error_t scanDouble512(const struct noodTable *n, const u8 *buf, size_t len, - bool noCase, m512 caseMask, m512 mask1, m512 mask2, - const struct cb_info *cbi, size_t start, - size_t end) { - const u8 *d = buf + start; - const u8 *e = buf + end; - u64a lastz0 = 0; +hwlm_error_t scanDoubleFast(const struct noodTable *n, const u8 *buf, + size_t len, m512 caseMask, m512 mask1, + m512 mask2, const struct cb_info *cbi, size_t start, + size_t end) { + const u8 *d = buf + start, *e = buf + end; DEBUG_PRINTF("start %zu end %zu \n", start, end); assert(d < e); - if (d + 64 >= e) { - goto tail; - } + u64a lastz0 = 0; - // peel off first part to cacheline boundary - const u8 *d1 = ROUNDUP_PTR(d, 64); - if (scanDoubleShort(n, buf, len, noCase, caseMask, mask1, mask2, cbi, - &lastz0, start, d1 - buf) == HWLM_TERMINATED) { - return HWLM_TERMINATED; - } - d = d1; + for (; d < e; d += 64) { + m512 v = and512(load512(d), caseMask); - for (; d + 64 < e; d += 64) { - DEBUG_PRINTF("d %p e %p 0x%016llx\n", d, e, lastz0); - m512 v = noCase ? and512(load512(d), caseMask) : load512(d); - - /* we have to pull the masks out of the AVX registers because we can't - byte shift between the lanes */ + // we have to pull the masks out of the AVX registers because we can't + // byte shift between the lanes u64a z0 = eq512mask(mask1, v); u64a z1 = eq512mask(mask2, v); u64a z = (lastz0 | (z0 << 1)) & z1; lastz0 = z0 >> 63; // On large packet buffers, this prefetch appears to get us about 2%. - __builtin_prefetch(d + 256); + __builtin_prefetch(d + 128); - DEBUG_PRINTF("z 0x%016llx\n", z); - - DOUBLE_ZSCAN(); + hwlm_error_t result = double_zscan(n, d, buf, z, len, cbi); + if (unlikely(result != HWLM_SUCCESS)) + return result; } - -tail: - DEBUG_PRINTF("d %p e %p off %zu \n", d, e, d - buf); - // finish off tail - - return scanDoubleShort(n, buf, len, noCase, caseMask, mask1, mask2, cbi, - &lastz0, d - buf, end); + return HWLM_SUCCESS; } diff --git a/src/hwlm/noodle_engine_sse.c b/src/hwlm/noodle_engine_sse.c index 5227c251..78033a47 100644 --- a/src/hwlm/noodle_engine_sse.c +++ b/src/hwlm/noodle_engine_sse.c @@ -36,34 +36,10 @@ static really_inline m128 getMask(u8 c, bool noCase) { static really_inline m128 getCaseMask(void) { return set1_16x8(0xdf); } -/* -static really_inline -hwlm_error_t scanSingleShort(const struct noodTable *n, const u8 *buf, - size_t len, m128 caseMask, m128 mask1, - const struct cb_info *cbi, size_t start, - size_t end) { - const u8 *d = buf + start; - size_t l = end - start; - DEBUG_PRINTF("l %zu\n", l); - assert(l <= 16); - if (!l) { - return HWLM_SUCCESS; - } - m128 v = and128(loadu128(d), caseMask); - - // mask out where we can't match - u32 mask = (0xFFFF >> (16 - l)); - u32 z = mask & movemask128(eq128(mask1, v)); - - SINGLE_ZSCAN(); - - return HWLM_SUCCESS; -}*/ static really_inline hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf, - size_t len, size_t offset, - m128 caseMask, m128 mask1, + size_t len, size_t offset, m128 caseMask, m128 mask1, const struct cb_info *cbi, size_t start, size_t end) { const u8 *d = buf + offset; @@ -74,39 +50,11 @@ hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf, u32 buf_off = start - offset; u32 mask = ((1 << l) - 1) << buf_off; - DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z); u32 z = mask & movemask128(eq128(mask1, v)); + DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z); - - SINGLE_ZSCAN(); - - return HWLM_SUCCESS; + return single_zscan(n, d, buf, z, len, cbi); } -/* -static really_inline -hwlm_error_t scanDoubleShort(const struct noodTable *n, const u8 *buf, - size_t len, m128 caseMask, m128 mask1, - m128 mask2, const struct cb_info *cbi, - size_t start, size_t end) { - const u8 *d = buf + start; - size_t l = end - start; - if (!l) { - return HWLM_SUCCESS; - } - assert(l <= 32); - - DEBUG_PRINTF("d %zu\n", d - buf); - m128 v = and128(loadu128(d), caseMask); - - // mask out where we can't match - u32 mask = (0xFFFF >> (16 - l)); - u32 z = mask & movemask128(and128(lshiftbyte_m128(eq128(mask1, v), 1), - eq128(mask2, v))); - - DOUBLE_ZSCAN(); - - return HWLM_SUCCESS; -}*/ static really_inline hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf, @@ -123,13 +71,11 @@ hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf, // mask out where we can't match u32 mask = ((1 << l) - 1) << buf_off; - DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z); u32 z = mask & movemask128(and128(lshiftbyte_m128(eq128(mask1, v), 1), eq128(mask2, v))); + DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z); - DOUBLE_ZSCAN(); - - return HWLM_SUCCESS; + return double_zscan(n, d, buf, z, len, cbi); } static really_inline @@ -146,8 +92,11 @@ hwlm_error_t scanSingleFast(const struct noodTable *n, const u8 *buf, // On large packet buffers, this prefetch appears to get us about 2%. __builtin_prefetch(d + 128); + DEBUG_PRINTF("z 0x%08x\n", z); - SINGLE_ZSCAN(); + hwlm_error_t result = single_zscan(n, d, buf, z, len, cbi); + if (unlikely(result != HWLM_SUCCESS)) + return result; } return HWLM_SUCCESS; } @@ -171,7 +120,11 @@ hwlm_error_t scanDoubleFast(const struct noodTable *n, const u8 *buf, // On large packet buffers, this prefetch appears to get us about 2%. __builtin_prefetch(d + 128); DEBUG_PRINTF("z 0x%08x\n", z); - DOUBLE_ZSCAN(); + + hwlm_error_t result = double_zscan(n, d, buf, z, len, cbi); + if (unlikely(result != HWLM_SUCCESS)) + return result; + } return HWLM_SUCCESS; }