refactor shufti algorithm to use SuperVector class, WIP

This commit is contained in:
Konstantinos Margaritis 2021-06-10 13:34:38 +03:00 committed by Konstantinos Margaritis
parent 3ee7b75ee0
commit 23b075cbd4
5 changed files with 508 additions and 1104 deletions

View File

@ -691,7 +691,7 @@ set (hs_exec_SRCS
src/nfa/sheng_impl.h
src/nfa/sheng_impl4.h
src/nfa/sheng_internal.h
src/nfa/shufti.c
src/nfa/shufti.cpp
src/nfa/shufti.h
src/nfa/tamarama.c
src/nfa/tamarama.h
@ -753,6 +753,18 @@ set (hs_exec_SRCS
src/database.h
)
if (NOT OPTIMISE)
if (ARCH_IA32 OR ARCH_X86_64)
set (hs_exec_SRCS
${hs_exec_SRCS}
src/util/simd/arch/x86/impl.cpp)
else (ARCH_ARM32 OR ARCH_AARCH64)
set (hs_exec_SRCS
${hs_exec_SRCS}
src/util/simd/arch/arm/impl.cpp)
endif ()
endif()
set (hs_exec_avx2_SRCS
src/fdr/teddy_avx2.c
src/util/arch/x86/masked_move.c

File diff suppressed because it is too large Load Diff

127
src/nfa/shufti.cpp Normal file
View File

@ -0,0 +1,127 @@
/*
* Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
* \brief Shufti: character class acceleration.
*
* Utilises the SSSE3 pshufb shuffle instruction
*/
#include "shufti.h"
#include "ue2common.h"
#include "util/arch.h"
#include "util/bitutils.h"
#ifdef DEBUG
#include <ctype.h>
#define DUMP_MSK(_t) \
static UNUSED \
void dumpMsk##_t(m##_t msk) { \
u8 * mskAsU8 = (u8 *)&msk; \
for (unsigned i = 0; i < sizeof(msk); i++) { \
u8 c = mskAsU8[i]; \
for (int j = 0; j < 8; j++) { \
if ((c >> (7-j)) & 0x1) \
printf("1"); \
else \
printf("0"); \
} \
printf(" "); \
} \
} \
static UNUSED \
void dumpMsk##_t##AsChars(m##_t msk) { \
u8 * mskAsU8 = (u8 *)&msk; \
for (unsigned i = 0; i < sizeof(msk); i++) { \
u8 c = mskAsU8[i]; \
if (isprint(c)) \
printf("%c",c); \
else \
printf("."); \
} \
}
#endif
#ifdef DEBUG
DUMP_MSK(128)
#endif
/** \brief Naive byte-by-byte implementation. */
static really_inline
const u8 *shuftiFwdSlow(const u8 *lo, const u8 *hi, const u8 *buf,
const u8 *buf_end) {
assert(buf < buf_end);
DEBUG_PRINTF("buf %p end %p \n", buf, buf_end);
for (; buf < buf_end; ++buf) {
u8 c = *buf;
if (lo[c & 0xf] & hi[c >> 4]) {
break;
}
}
return buf;
}
/** \brief Naive byte-by-byte implementation. */
static really_inline
const u8 *shuftiRevSlow(const u8 *lo, const u8 *hi, const u8 *buf,
const u8 *buf_end) {
assert(buf < buf_end);
for (buf_end--; buf_end >= buf; buf_end--) {
u8 c = *buf_end;
if (lo[c & 0xf] & hi[c >> 4]) {
break;
}
}
return buf_end;
}
#if !defined(HAVE_SVE)
#include "shufti_simd.hpp"
const u8 *shuftiExec(m128 mask_lo, m128 mask_hi, const u8 *buf,
const u8 *buf_end) {
return shuftiExecReal<VECTORSIZE>(mask_lo, mask_hi, buf, buf_end);
}
const u8 *rshuftiExec(m128 mask_lo, m128 mask_hi, const u8 *buf,
const u8 *buf_end) {
return rshuftiExecReal<VECTORSIZE>(mask_lo, mask_hi, buf, buf_end);
}
const u8 *shuftiDoubleExec(m128 mask1_lo, m128 mask1_hi,
m128 mask2_lo, m128 mask2_hi,
const u8 *buf, const u8 *buf_end) {
return shuftiDoubleExecReal<VECTORSIZE>(mask1_lo, mask1_hi, mask2_lo, mask2_hi, buf, buf_end);
}
#endif

View File

@ -36,7 +36,7 @@
#define SHUFTI_H
#include "ue2common.h"
#include "util/simd_utils.h"
#include "util/simd_types.h"
#ifdef __cplusplus
extern "C"

367
src/nfa/shufti_simd.hpp Normal file
View File

@ -0,0 +1,367 @@
/*
* Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
* \brief Shufti: character class acceleration.
*
* Utilises the SSSE3 pshufb shuffle instruction
*/
#include "shufti.h"
#include "ue2common.h"
#include "util/arch.h"
#include "util/bitutils.h"
#include "util/unaligned.h"
#include "util/simd/types.hpp"
#define GET1_LO_4(chars, low4bits) and128(chars, low4bits)
#define GET1_HI_4(chars, low4bits) and128(rshift64_m128(chars, 4), low4bits)
template <uint16_t S>
static really_inline
typename SuperVector<S>::movemask_type block(SuperVector<S> mask_lo, SuperVector<S> mask_hi,
SuperVector<S> chars, const SuperVector<S> low4bits) {
SuperVector<S> c_lo = chars & low4bits;
print_m128_16x8("c_lo", c_lo.u.v128[0]);
c_lo = mask_lo.pshufb(c_lo);
print_m128_16x8("c_lo", c_lo.u.v128[0]);
SuperVector<S> c_hi = mask_hi.pshufb(chars.rshift64(4) & low4bits);
SuperVector<S> t = c_lo & c_hi;
print_m128_16x8("low4bits", low4bits.u.v128[0]);
print_m128_16x8("mask_lo", mask_lo.u.v128[0]);
print_m128_16x8("mask_hi", mask_hi.u.v128[0]);
print_m128_16x8("chars", chars.u.v128[0]);
print_m128_16x8("c_lo", c_lo.u.v128[0]);
print_m128_16x8("c_hi", c_hi.u.v128[0]);
print_m128_16x8("t", t.u.v128[0]);
return t.eqmask(SuperVector<S>::Zeroes());
}
template <uint16_t S>
const u8 *firstMatch(const u8 *buf, typename SuperVector<S>::movemask_type z);
template <uint16_t S>
const u8 *lastMatch(const u8 *buf, typename SuperVector<S>::movemask_type z);
template <>
really_inline
const u8 *firstMatch<16>(const u8 *buf, typename SuperVector<16>::movemask_type z) {
DEBUG_PRINTF("buf %p z %08x \n", buf, z);
DEBUG_PRINTF("z %08x\n", z);
if (unlikely(z != 0xffff)) {
u32 pos = ctz32(~z & 0xffff);
DEBUG_PRINTF("~z %08x\n", ~z);
DEBUG_PRINTF("match @ pos %u\n", pos);
assert(pos < 16);
return buf + pos;
} else {
return NULL; // no match
}
}
template <>
really_inline
const u8 *firstMatch<64>(const u8 *buf, typename SuperVector<64>::movemask_type z) {
DEBUG_PRINTF("z 0x%016llx\n", z);
if (unlikely(z != ~0ULL)) {
u32 pos = ctz64(~z);
DEBUG_PRINTF("match @ pos %u\n", pos);
assert(pos < 64);
return buf + pos;
} else {
return NULL; // no match
}
}
template <uint16_t S>
static really_inline
const u8 *fwdBlock(SuperVector<S> mask_lo, SuperVector<S> mask_hi, SuperVector<S> chars,
const SuperVector<S> low4bits, const u8 *buf) {
typename SuperVector<S>::movemask_type z = block(mask_lo, mask_hi, chars, low4bits);
DEBUG_PRINTF("z %08x\n", z);
return firstMatch<S>(buf, z);
}
template <uint16_t S>
static really_inline
const u8 *shortShufti(SuperVector<S> mask_lo, SuperVector<S> mask_hi, const u8 *buf,
const u8 *buf_end, const SuperVector<S> low4bits) {
DEBUG_PRINTF("short shufti %p len %zu\n", buf, buf_end - buf);
uintptr_t len = buf_end - buf;
assert(len <= S);
SuperVector<S> chars = SuperVector<S>::loadu_maskz(buf, static_cast<uint8_t>(len));
print_m128_16x8("chars", chars.u.v128[0]);
uint8_t alignment = (uintptr_t)(buf) & 15;
typename SuperVector<S>::movemask_type maskb = 1 << alignment;
typename SuperVector<S>::movemask_type maske = SINGLE_LOAD_MASK(len - alignment);
typename SuperVector<S>::movemask_type z = block(mask_lo, mask_hi, chars, low4bits);
// reuse the load mask to indicate valid bytes
DEBUG_PRINTF("z %08x\n", z);
z &= maskb | maske;
DEBUG_PRINTF("z %08x\n", z);
return firstMatch<S>(buf, z);
}
template <>
really_inline
const u8 *lastMatch<16>(const u8 *buf, typename SuperVector<16>::movemask_type z) {
DEBUG_PRINTF("buf %p z %08x \n", buf, z);
DEBUG_PRINTF("z %08x\n", z);
if (unlikely(z != 0xffff)) {
u32 pos = clz32(~z & 0xffff);
DEBUG_PRINTF("~z %08x\n", ~z);
DEBUG_PRINTF("match @ pos %u\n", pos);
assert(pos >= 16 && pos < 32);
return buf + (31 - pos);
} else {
return NULL; // no match
}
}
template <>
really_inline
const u8 *lastMatch<64>(const u8 *buf, typename SuperVector<64>::movemask_type z) {
DEBUG_PRINTF("z 0x%016llx\n", z);
if (unlikely(z != ~0ULL)) {
u32 pos = clz64(~z);
DEBUG_PRINTF("match @ pos %u\n", pos);
assert(pos < 64);
return buf + pos;
} else {
return NULL; // no match
}
}
template <uint16_t S>
static really_inline
const u8 *revBlock(SuperVector<S> mask_lo, SuperVector<S> mask_hi, SuperVector<S> chars,
const SuperVector<S> low4bits, const u8 *buf) {
typename SuperVector<S>::movemask_type z = block(mask_lo, mask_hi, chars, low4bits);
DEBUG_PRINTF("z %08x\n", z);
return lastMatch<S>(buf, z);
}
template <uint16_t S>
const u8 *shuftiExecReal(m128 mask_lo, m128 mask_hi, const u8 *buf, const u8 *buf_end) {
assert(buf && buf_end);
assert(buf < buf_end);
DEBUG_PRINTF("shufti %p len %zu\n", buf, buf_end - buf);
DEBUG_PRINTF("b %s\n", buf);
const SuperVector<S> low4bits = SuperVector<S>::set1u_16x8(0xf);
const SuperVector<S> wide_mask_lo(mask_lo);
const SuperVector<S> wide_mask_hi(mask_hi);
const u8 *d = buf;
const u8 *rv;
DEBUG_PRINTF("start %p end %p \n", d, buf_end);
assert(d < buf_end);
if (d + S <= buf_end) {
// peel off first part to cacheline boundary
const u8 *d1 = ROUNDUP_PTR(d, S);
DEBUG_PRINTF("until aligned %p \n", d1);
if (d1 != d) {
rv = shuftiFwdSlow((const u8 *)&mask_lo, (const u8 *)&mask_hi, d, d1);
// rv = shortShufti(wide_mask_lo, wide_mask_hi, d, d1, low4bits);
if (rv != d1) {
return rv;
}
d = d1;
}
size_t loops = (buf_end - d) / S;
DEBUG_PRINTF("loops %ld \n", loops);
for (size_t i = 0; i < loops; i++, d+= S) {
DEBUG_PRINTF("d %p \n", d);
const u8 *base = ROUNDUP_PTR(d, S);
// On large packet buffers, this prefetch appears to get us about 2%.
__builtin_prefetch(base + 256);
SuperVector<S> chars = SuperVector<S>::load(d);
rv = fwdBlock(wide_mask_lo, wide_mask_hi, chars, low4bits, d);
if (rv) return rv;
}
}
DEBUG_PRINTF("d %p e %p \n", d, buf_end);
// finish off tail
rv = buf_end;
if (d != buf_end) {
rv = shuftiFwdSlow((const u8 *)&mask_lo, (const u8 *)&mask_hi, d, buf_end);
// rv = shortShufti(wide_mask_lo, wide_mask_hi, buf_end - S, buf_end, low4bits);
DEBUG_PRINTF("rv %p \n", rv);
}
return rv;
}
template <uint16_t S>
const u8 *rshuftiExecReal(m128 mask_lo, m128 mask_hi, const u8 *buf, const u8 *buf_end) {
assert(buf && buf_end);
assert(buf < buf_end);
DEBUG_PRINTF("shufti %p len %zu\n", buf, buf_end - buf);
DEBUG_PRINTF("b %s\n", buf);
const SuperVector<S> low4bits = SuperVector<S>::set1u_16x8(0xf);
const SuperVector<S> wide_mask_lo(mask_lo);
const SuperVector<S> wide_mask_hi(mask_hi);
const u8 *d = buf_end;
const u8 *rv;
DEBUG_PRINTF("start %p end %p \n", buf, d);
assert(d > buf);
if (d - S >= buf) {
// peel off first part to cacheline boundary
const u8 *d1 = ROUNDDOWN_PTR(d, S);
DEBUG_PRINTF("until aligned %p \n", d1);
if (d1 != d) {
rv = shuftiRevSlow((const u8 *)&mask_lo, (const u8 *)&mask_hi, d1, d);
DEBUG_PRINTF("rv %p \n", rv);
// rv = shortShufti(wide_mask_lo, wide_mask_hi, d, d1, low4bits);
if (rv != d1 - 1) return rv;
d = d1;
}
while (d - S >= buf) {
d -= S;
DEBUG_PRINTF("d %p \n", d);
const u8 *base = ROUNDDOWN_PTR(buf, S);
// On large packet buffers, this prefetch appears to get us about 2%.
__builtin_prefetch(base + 256);
SuperVector<S> chars = SuperVector<S>::load(d);
rv = revBlock(wide_mask_lo, wide_mask_hi, chars, low4bits, d);
if (rv) return rv;
}
}
DEBUG_PRINTF("d %p e %p \n", buf, d);
// finish off tail
if (d != buf) {
rv = shuftiRevSlow((const u8 *)&mask_lo, (const u8 *)&mask_hi, buf, d);
// rv = shortShufti(wide_mask_lo, wide_mask_hi, buf_end - S, buf_end, low4bits);
DEBUG_PRINTF("rv %p \n", rv);
if (rv != d - 1) return rv;
}
return buf - 1;
}
template <uint16_t S>
static really_inline
const u8 *fwdBlockDouble(SuperVector<S> mask1_lo, SuperVector<S> mask1_hi, SuperVector<S> mask2_lo, SuperVector<S> mask2_hi,
SuperVector<S> chars, const SuperVector<S> low4bits, const u8 *buf) {
SuperVector<S> chars_lo = chars & low4bits;
SuperVector<S> chars_hi = chars.rshift64(4) & low4bits;
SuperVector<S> c1_lo = mask1_lo.pshufb(chars_lo);
SuperVector<S> c1_hi = mask1_hi.pshufb(chars_hi);
SuperVector<S> t1 = c1_lo | c1_hi;
SuperVector<S> c2_lo = mask2_lo.pshufb(chars_lo);
SuperVector<S> c2_hi = mask2_hi.pshufb(chars_hi);
SuperVector<S> t2 = c2_lo | c2_hi;
SuperVector<S> t = t1 | (t2 >> 1);
typename SuperVector<S>::movemask_type z = t.eqmask(SuperVector<S>::Ones());
DEBUG_PRINTF(" z: 0x%08x\n", z);
return firstMatch<S>(buf, z);
}
template <uint16_t S>
const u8 *shuftiDoubleExecReal(m128 mask1_lo, m128 mask1_hi,
m128 mask2_lo, m128 mask2_hi,
const u8 *buf, const u8 *buf_end) {
assert(buf && buf_end);
assert(buf < buf_end);
DEBUG_PRINTF("shufti %p len %zu\n", buf, buf_end - buf);
DEBUG_PRINTF("b %s\n", buf);
const SuperVector<S> low4bits = SuperVector<S>::set1u_16x8(0xf);
const SuperVector<S> wide_mask1_lo(mask1_lo);
const SuperVector<S> wide_mask1_hi(mask1_hi);
const SuperVector<S> wide_mask2_lo(mask2_lo);
const SuperVector<S> wide_mask2_hi(mask2_hi);
const u8 *d = buf;
const u8 *rv;
DEBUG_PRINTF("start %p end %p \n", d, buf_end);
assert(d < buf_end);
if (d + S <= buf_end) {
// peel off first part to cacheline boundary
const u8 *d1 = ROUNDUP_PTR(d, S);
DEBUG_PRINTF("until aligned %p \n", d1);
if (d1 != d) {
SuperVector<S> chars = SuperVector<S>::loadu(d);
rv = fwdBlockDouble(wide_mask1_lo, wide_mask1_hi, wide_mask2_lo, wide_mask2_hi, chars, low4bits, d);
if (rv) return rv;
d = d1;
}
size_t loops = (buf_end - d) / S;
DEBUG_PRINTF("loops %ld \n", loops);
for (size_t i = 0; i < loops; i++, d+= S) {
DEBUG_PRINTF("d %p \n", d);
const u8 *base = ROUNDUP_PTR(d, S);
// On large packet buffers, this prefetch appears to get us about 2%.
__builtin_prefetch(base + 256);
SuperVector<S> chars = SuperVector<S>::load(d);
rv = fwdBlockDouble(wide_mask1_lo, wide_mask1_hi, wide_mask2_lo, wide_mask2_hi, chars, low4bits, d);
if (rv) return rv;
}
}
DEBUG_PRINTF("d %p e %p \n", d, buf_end);
// finish off tail
if (d != buf_end) {
SuperVector<S> chars = SuperVector<S>::loadu(buf_end - S);
rv = fwdBlockDouble(wide_mask1_lo, wide_mask1_hi, wide_mask2_lo, wide_mask2_hi, chars, low4bits, buf_end - S);
DEBUG_PRINTF("rv %p \n", rv);
if (rv) return rv;
}
return buf_end;
}