simplify and make scanSingle*()/scanDouble*() more uniform

This commit is contained in:
Konstantinos Margaritis
2021-02-19 12:16:43 +02:00
parent d0c3764a4c
commit afe7061860
4 changed files with 117 additions and 281 deletions

View File

@@ -36,34 +36,10 @@ static really_inline m128 getMask(u8 c, bool noCase) {
static really_inline m128 getCaseMask(void) {
return set1_16x8(0xdf);
}
/*
static really_inline
hwlm_error_t scanSingleShort(const struct noodTable *n, const u8 *buf,
size_t len, m128 caseMask, m128 mask1,
const struct cb_info *cbi, size_t start,
size_t end) {
const u8 *d = buf + start;
size_t l = end - start;
DEBUG_PRINTF("l %zu\n", l);
assert(l <= 16);
if (!l) {
return HWLM_SUCCESS;
}
m128 v = and128(loadu128(d), caseMask);
// mask out where we can't match
u32 mask = (0xFFFF >> (16 - l));
u32 z = mask & movemask128(eq128(mask1, v));
SINGLE_ZSCAN();
return HWLM_SUCCESS;
}*/
static really_inline
hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf,
size_t len, size_t offset,
m128 caseMask, m128 mask1,
size_t len, size_t offset, m128 caseMask, m128 mask1,
const struct cb_info *cbi, size_t start,
size_t end) {
const u8 *d = buf + offset;
@@ -74,39 +50,11 @@ hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf,
u32 buf_off = start - offset;
u32 mask = ((1 << l) - 1) << buf_off;
DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z);
u32 z = mask & movemask128(eq128(mask1, v));
DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z);
SINGLE_ZSCAN();
return HWLM_SUCCESS;
return single_zscan(n, d, buf, z, len, cbi);
}
/*
static really_inline
hwlm_error_t scanDoubleShort(const struct noodTable *n, const u8 *buf,
size_t len, m128 caseMask, m128 mask1,
m128 mask2, const struct cb_info *cbi,
size_t start, size_t end) {
const u8 *d = buf + start;
size_t l = end - start;
if (!l) {
return HWLM_SUCCESS;
}
assert(l <= 32);
DEBUG_PRINTF("d %zu\n", d - buf);
m128 v = and128(loadu128(d), caseMask);
// mask out where we can't match
u32 mask = (0xFFFF >> (16 - l));
u32 z = mask & movemask128(and128(lshiftbyte_m128(eq128(mask1, v), 1),
eq128(mask2, v)));
DOUBLE_ZSCAN();
return HWLM_SUCCESS;
}*/
static really_inline
hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf,
@@ -123,13 +71,11 @@ hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf,
// mask out where we can't match
u32 mask = ((1 << l) - 1) << buf_off;
DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z);
u32 z = mask & movemask128(and128(lshiftbyte_m128(eq128(mask1, v), 1),
eq128(mask2, v)));
DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z);
DOUBLE_ZSCAN();
return HWLM_SUCCESS;
return double_zscan(n, d, buf, z, len, cbi);
}
static really_inline
@@ -146,8 +92,11 @@ hwlm_error_t scanSingleFast(const struct noodTable *n, const u8 *buf,
// On large packet buffers, this prefetch appears to get us about 2%.
__builtin_prefetch(d + 128);
DEBUG_PRINTF("z 0x%08x\n", z);
SINGLE_ZSCAN();
hwlm_error_t result = single_zscan(n, d, buf, z, len, cbi);
if (unlikely(result != HWLM_SUCCESS))
return result;
}
return HWLM_SUCCESS;
}
@@ -171,7 +120,11 @@ hwlm_error_t scanDoubleFast(const struct noodTable *n, const u8 *buf,
// On large packet buffers, this prefetch appears to get us about 2%.
__builtin_prefetch(d + 128);
DEBUG_PRINTF("z 0x%08x\n", z);
DOUBLE_ZSCAN();
hwlm_error_t result = double_zscan(n, d, buf, z, len, cbi);
if (unlikely(result != HWLM_SUCCESS))
return result;
}
return HWLM_SUCCESS;
}