optimize caseMask handling

This commit is contained in:
Konstantinos Margaritis
2021-02-16 22:10:42 +02:00
parent 2eab2ec47b
commit d0c3764a4c
3 changed files with 65 additions and 91 deletions

View File

@@ -36,10 +36,10 @@ static really_inline m128 getMask(u8 c, bool noCase) {
static really_inline m128 getCaseMask(void) {
return set1_16x8(0xdf);
}
/*
static really_inline
hwlm_error_t scanSingleShort(const struct noodTable *n, const u8 *buf,
size_t len, bool noCase, m128 caseMask, m128 mask1,
size_t len, m128 caseMask, m128 mask1,
const struct cb_info *cbi, size_t start,
size_t end) {
const u8 *d = buf + start;
@@ -49,22 +49,20 @@ hwlm_error_t scanSingleShort(const struct noodTable *n, const u8 *buf,
if (!l) {
return HWLM_SUCCESS;
}
m128 mask128 = noCase ? caseMask : ones128();
m128 v = and128(loadu128(d), mask128);
m128 v = and128(loadu128(d), caseMask);
// mask out where we can't match
u32 mask = (0xFFFF >> (16 - l));
u32 z = mask & movemask128(eq128(mask1, v));
SINGLE_ZSCAN();
return HWLM_SUCCESS;
}
}*/
static really_inline
hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf,
size_t len, size_t offset, bool noCase,
size_t len, size_t offset,
m128 caseMask, m128 mask1,
const struct cb_info *cbi, size_t start,
size_t end) {
@@ -72,26 +70,22 @@ hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf,
DEBUG_PRINTF("start %zu end %zu offset %zu\n", start, end, offset);
const size_t l = end - start;
m128 mask128 = noCase ? caseMask : ones128();
m128 v = and128(loadu128(d), mask128);
m128 v = and128(loadu128(d), caseMask);
u32 buf_off = start - offset;
u32 mask = ((1 << l) - 1) << buf_off;
DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z);
u32 z = mask & movemask128(eq128(mask1, v));
DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z);
z &= mask;
SINGLE_ZSCAN();
return HWLM_SUCCESS;
}
/*
static really_inline
hwlm_error_t scanDoubleShort(const struct noodTable *n, const u8 *buf,
size_t len, bool noCase, m128 caseMask, m128 mask1,
size_t len, m128 caseMask, m128 mask1,
m128 mask2, const struct cb_info *cbi,
size_t start, size_t end) {
const u8 *d = buf + start;
@@ -102,42 +96,36 @@ hwlm_error_t scanDoubleShort(const struct noodTable *n, const u8 *buf,
assert(l <= 32);
DEBUG_PRINTF("d %zu\n", d - buf);
m128 mask128 = noCase ? caseMask : ones128();
m128 v = and128(loadu128(d), mask128);
u32 z = movemask128(and128(lshiftbyte_m128(eq128(mask1, v), 1),
eq128(mask2, v)));
m128 v = and128(loadu128(d), caseMask);
// mask out where we can't match
u32 mask = (0xFFFF >> (16 - l));
z &= mask;
u32 z = mask & movemask128(and128(lshiftbyte_m128(eq128(mask1, v), 1),
eq128(mask2, v)));
DOUBLE_ZSCAN();
return HWLM_SUCCESS;
}
}*/
static really_inline
hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf,
size_t len, size_t offset, bool noCase,
size_t len, size_t offset,
m128 caseMask, m128 mask1, m128 mask2,
const struct cb_info *cbi, size_t start,
size_t end) {
const u8 *d = buf + offset;
DEBUG_PRINTF("start %zu end %zu offset %zu\n", start, end, offset);
size_t l = end - start;
u32 buf_off = start - offset;
m128 mask128 = noCase ? caseMask : ones128();
m128 v = and128(loadu128(d), mask128);
u32 z = movemask128(and128(lshiftbyte_m128(eq128(mask1, v), 1),
eq128(mask2, v)));
m128 v = and128(loadu128(d), caseMask);
// mask out where we can't match
u32 buf_off = start - offset;
u32 mask = ((1 << l) - 1) << buf_off;
DEBUG_PRINTF("mask 0x%08x z 0x%08x\n", mask, z);
z &= mask;
u32 z = mask & movemask128(and128(lshiftbyte_m128(eq128(mask1, v), 1),
eq128(mask2, v)));
DOUBLE_ZSCAN();
@@ -146,16 +134,14 @@ hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf,
static really_inline
hwlm_error_t scanSingleFast(const struct noodTable *n, const u8 *buf,
size_t len, bool noCase, m128 caseMask, m128 mask1,
size_t len, m128 caseMask, m128 mask1,
const struct cb_info *cbi, size_t start,
size_t end) {
const u8 *d = buf + start, *e = buf + end;
assert(d < e);
m128 mask128 = noCase ? caseMask : ones128();
for (; d < e; d += 16) {
m128 v = and128(load128(d), mask128);
m128 v = and128(load128(d), caseMask);
u32 z = movemask128(eq128(mask1, v));
// On large packet buffers, this prefetch appears to get us about 2%.
@@ -168,16 +154,15 @@ hwlm_error_t scanSingleFast(const struct noodTable *n, const u8 *buf,
static really_inline
hwlm_error_t scanDoubleFast(const struct noodTable *n, const u8 *buf,
size_t len, bool noCase, m128 caseMask, m128 mask1,
size_t len, m128 caseMask, m128 mask1,
m128 mask2, const struct cb_info *cbi, size_t start,
size_t end) {
const u8 *d = buf + start, *e = buf + end;
assert(d < e);
m128 lastz1 = zeroes128();
m128 mask128 = noCase ? caseMask : ones128();
for (; d < e; d += 16) {
m128 v = and128(load128(d), mask128);
m128 v = and128(load128(d), caseMask);
m128 z1 = eq128(mask1, v);
m128 z2 = eq128(mask2, v);
u32 z = movemask128(and128(palignr(z1, lastz1, 15), z2));