optimize caseMask handling

This commit is contained in:
Konstantinos Margaritis
2021-02-16 22:10:42 +02:00
parent 26adf6e431
commit da6216e42d
3 changed files with 65 additions and 91 deletions

View File

@@ -39,19 +39,14 @@ static really_inline m256 getCaseMask(void) {
static really_inline
hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf,
size_t len, size_t offset, bool noCase,
m256 caseMask, m256 mask1,
size_t len, size_t offset,
m256 caseMask, m256 mask1,
const struct cb_info *cbi, size_t start,
size_t end) {
const u8 *d = buf + offset;
DEBUG_PRINTF("start %zu end %zu offset %zu\n", start, end, offset);
const size_t l = end - start;
m256 v = loadu256(d);
if (noCase) {
v = and256(v, caseMask);
}
m256 v = and256(loadu256(d), caseMask);
u32 z = movemask256(eq256(mask1, v));
@@ -68,19 +63,14 @@ hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf,
static really_inline
hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf,
size_t len, size_t offset, bool noCase,
size_t len, size_t offset,
m256 caseMask, m256 mask1, m256 mask2,
const struct cb_info *cbi, size_t start,
size_t end) {
const u8 *d = buf + offset;
DEBUG_PRINTF("start %zu end %zu offset %zu\n", start, end, offset);
size_t l = end - start;
m256 v = loadu256(d);
if (noCase) {
v = and256(v, caseMask);
}
m256 v = and256(loadu256(d), caseMask);
u32 z0 = movemask256(eq256(mask1, v));
u32 z1 = movemask256(eq256(mask2, v));
@@ -96,13 +86,13 @@ hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf,
return HWLM_SUCCESS;
}
/*
// The short scan routine. It is used both to scan data up to an
// alignment boundary if needed and to finish off data that the aligned scan
// function can't handle (due to small/unaligned chunk at end)
static really_inline
hwlm_error_t scanSingleShort(const struct noodTable *n, const u8 *buf,
size_t len, bool noCase, m256 caseMask, m256 mask1,
size_t len, m256 caseMask, m256 mask1,
const struct cb_info *cbi, size_t start,
size_t end) {
const u8 *d = buf + start;
@@ -112,7 +102,6 @@ hwlm_error_t scanSingleShort(const struct noodTable *n, const u8 *buf,
if (!l) {
return HWLM_SUCCESS;
}
m256 v;
if (l < 4) {
@@ -126,10 +115,7 @@ hwlm_error_t scanSingleShort(const struct noodTable *n, const u8 *buf,
v = masked_move256_len(d, l);
}
if (noCase) {
v = and256(v, caseMask);
}
m256 v = and256(v, caseMask);
// mask out where we can't match
u32 mask = (0xFFFFFFFF >> (32 - l));
@@ -142,7 +128,7 @@ hwlm_error_t scanSingleShort(const struct noodTable *n, const u8 *buf,
static really_inline
hwlm_error_t scanDoubleShort(const struct noodTable *n, const u8 *buf,
size_t len, bool noCase, m256 caseMask, m256 mask1,
size_t len, m256 caseMask, m256 mask1,
m256 mask2, const struct cb_info *cbi,
size_t start, size_t end) {
const u8 *d = buf + start;
@@ -151,6 +137,8 @@ hwlm_error_t scanDoubleShort(const struct noodTable *n, const u8 *buf,
return HWLM_SUCCESS;
}
assert(l <= 32);
u32 mask = (0xFFFFFFFF >> (32 - l));
m256 v;
DEBUG_PRINTF("d %zu\n", d - buf);
@@ -164,33 +152,31 @@ hwlm_error_t scanDoubleShort(const struct noodTable *n, const u8 *buf,
} else {
v = masked_move256_len(d, l);
}
if (noCase) {
v = and256(v, caseMask);
}
m256 v = and256(v, caseMask);
u32 z0 = movemask256(eq256(mask1, v));
u32 z1 = movemask256(eq256(mask2, v));
u32 z = (z0 << 1) & z1;
// mask out where we can't match
u32 mask = (0xFFFFFFFF >> (32 - l));
z &= mask;
DOUBLE_ZSCAN();
return HWLM_SUCCESS;
}
}*/
static really_inline
hwlm_error_t scanSingleFast(const struct noodTable *n, const u8 *buf,
size_t len, bool noCase, m256 caseMask, m256 mask1,
size_t len, m256 caseMask, m256 mask1,
const struct cb_info *cbi, size_t start,
size_t end) {
const u8 *d = buf + start, *e = buf + end;
assert(d < e);
for (; d < e; d += 32) {
m256 v = noCase ? and256(load256(d), caseMask) : load256(d);
m256 v = and256(load256(d), caseMask);
u32 z = movemask256(eq256(mask1, v));
@@ -204,7 +190,7 @@ hwlm_error_t scanSingleFast(const struct noodTable *n, const u8 *buf,
static really_inline
hwlm_error_t scanDoubleFast(const struct noodTable *n, const u8 *buf,
size_t len, bool noCase, m256 caseMask, m256 mask1,
size_t len, m256 caseMask, m256 mask1,
m256 mask2, const struct cb_info *cbi, size_t start,
size_t end) {
const u8 *d = buf + start, *e = buf + end;
@@ -213,7 +199,7 @@ hwlm_error_t scanDoubleFast(const struct noodTable *n, const u8 *buf,
u32 lastz0 = 0;
for (; d < e; d += 32) {
m256 v = noCase ? and256(load256(d), caseMask) : load256(d);
m256 v = and256(load256(d), caseMask);
// we have to pull the masks out of the AVX registers because we can't
// byte shift between the lanes