diff --git a/CMakeLists.txt b/CMakeLists.txt index 7d12e2f2..e3b5a2ee 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -632,9 +632,9 @@ set (hs_exec_SRCS src/nfa/tamarama_internal.h src/nfa/truffle.cpp src/nfa/truffle.h - src/nfa/vermicelli.h + src/nfa/vermicelli.hpp src/nfa/vermicelli_run.h - src/nfa/vermicelli_sse.h + src/nfa/vermicelli_simd.cpp src/som/som.h src/som/som_operation.h src/som/som_runtime.h diff --git a/benchmarks/benchmarks.cpp b/benchmarks/benchmarks.cpp index 49990bd7..91cab3f8 100644 --- a/benchmarks/benchmarks.cpp +++ b/benchmarks/benchmarks.cpp @@ -191,6 +191,34 @@ int main(){ ); } + for (size_t i = 0; i < std::size(sizes); i++) { + MicroBenchmark bench("Vermicelli", sizes[i]); + run_benchmarks(sizes[i], MAX_LOOPS / sizes[i], matches[m], false, bench, + [&](MicroBenchmark &b) { + b.chars.set('a'); + ue2::truffleBuildMasks(b.chars, (u8 *)&b.lo, (u8 *)&b.hi); + memset(b.buf.data(), 'b', b.size); + }, + [&](MicroBenchmark &b) { + return vermicelliExec('a', 'b', b.buf.data(), b.buf.data() + b.size); + } + ); + } + + for (size_t i = 0; i < std::size(sizes); i++) { + MicroBenchmark bench("Reverse Vermicelli", sizes[i]); + run_benchmarks(sizes[i], MAX_LOOPS / sizes[i], matches[m], true, bench, + [&](MicroBenchmark &b) { + b.chars.set('a'); + ue2::truffleBuildMasks(b.chars, (u8 *)&b.lo, (u8 *)&b.hi); + memset(b.buf.data(), 'b', b.size); + }, + [&](MicroBenchmark &b) { + return rvermicelliExec('a', 'b', b.buf.data(), b.buf.data() + b.size); + } + ); + } + for (size_t i = 0; i < std::size(sizes); i++) { //we imitate the noodle unit tests std::string str; diff --git a/benchmarks/benchmarks.hpp b/benchmarks/benchmarks.hpp index 37326523..974d2234 100644 --- a/benchmarks/benchmarks.hpp +++ b/benchmarks/benchmarks.hpp @@ -30,6 +30,7 @@ #include "nfa/shufticompile.h" #include "nfa/truffle.h" #include "nfa/trufflecompile.h" +#include "nfa/vermicelli.hpp" #include "hwlm/noodle_build.h" #include "hwlm/noodle_engine.h" #include "hwlm/noodle_internal.h" diff --git a/src/hwlm/hwlm.c b/src/hwlm/hwlm.c index c1c2837f..e50deff7 100644 --- a/src/hwlm/hwlm.c +++ b/src/hwlm/hwlm.c @@ -39,7 +39,7 @@ #include "nfa/accel.h" #include "nfa/shufti.h" #include "nfa/truffle.h" -#include "nfa/vermicelli.h" +#include "nfa/vermicelli.hpp" #include #define MIN_ACCEL_LEN_BLOCK 16 diff --git a/src/hwlm/noodle_engine_simd.hpp b/src/hwlm/noodle_engine_simd.hpp index d5f6a8d0..dfe7eea1 100644 --- a/src/hwlm/noodle_engine_simd.hpp +++ b/src/hwlm/noodle_engine_simd.hpp @@ -30,26 +30,7 @@ /* SIMD engine agnostic noodle scan parts */ #include "util/supervector/supervector.hpp" - -static u8 CASEMASK[] = { 0xff, 0xdf }; - -static really_inline -u8 caseClear8(u8 x, bool noCase) -{ - return static_cast(x & CASEMASK[(u8)noCase]); -} - -template -static really_inline SuperVector getMask(u8 c, bool noCase) { - u8 k = caseClear8(c, noCase); - return SuperVector(k); -} - -template -static really_inline SuperVector getCaseMask(void) { - return SuperVector(CASEMASK[1]); -} - +#include "util/supervector/casemask.hpp" static really_really_inline hwlm_error_t single_zscan(const struct noodTable *n,const u8 *d, const u8 *buf, diff --git a/src/nfa/accel.c b/src/nfa/accel.c index 34bd24a9..7661b7a7 100644 --- a/src/nfa/accel.c +++ b/src/nfa/accel.c @@ -30,7 +30,7 @@ #include "accel.h" #include "shufti.h" #include "truffle.h" -#include "vermicelli.h" +#include "vermicelli.hpp" #include "ue2common.h" const u8 *run_accel(const union AccelAux *accel, const u8 *c, const u8 *c_end) { diff --git a/src/nfa/arm/shufti.hpp b/src/nfa/arm/shufti.hpp index 76461175..e710fd16 100644 --- a/src/nfa/arm/shufti.hpp +++ b/src/nfa/arm/shufti.hpp @@ -1,7 +1,6 @@ /* * Copyright (c) 2015-2017, Intel Corporation * Copyright (c) 2020-2021, VectorCamp PC - * Copyright (c) 2021, Arm Limited * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -30,7 +29,6 @@ /** \file * \brief Shufti: character class acceleration. - * */ template @@ -73,4 +71,4 @@ SuperVector blockDoubleMask(SuperVector mask1_lo, SuperVector mask1_hi, t.print8("t"); return !t.eq(SuperVector::Ones()); -} +} \ No newline at end of file diff --git a/src/nfa/arm/vermicelli.hpp b/src/nfa/arm/vermicelli.hpp new file mode 100644 index 00000000..d790fa1f --- /dev/null +++ b/src/nfa/arm/vermicelli.hpp @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2015-2020, Intel Corporation + * Copyright (c) 2020-2021, VectorCamp PC + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * \brief Vermicelli: single-byte and double-byte acceleration. + */ + +template +static really_inline +const u8 *vermicelliBlock(SuperVector const data, SuperVector const chars, SuperVector const casemask, u8 const *buf, u16 const len) { + + SuperVector mask = chars.eq(casemask & data); + return first_non_zero_match(buf, mask, len); +} + +template +static really_inline +const u8 *vermicelliBlockNeg(SuperVector const data, SuperVector const chars, SuperVector const casemask, u8 const *buf, u16 const len) { + + SuperVector mask = !chars.eq(casemask & data); + return first_zero_match_inverted(buf, mask, len); +} + +template +static really_inline +const u8 *rvermicelliBlock(SuperVector const data, SuperVector const chars, SuperVector const casemask, u8 const *buf, u16 const len) { + + SuperVector mask = chars.eq(casemask & data); + return last_non_zero_match(buf, mask, len); +} + +template +static really_inline +const u8 *rvermicelliBlockNeg(SuperVector const data, SuperVector const chars, SuperVector const casemask, const u8 *buf, u16 const len) { + + data.print8("data"); + chars.print8("chars"); + casemask.print8("casemask"); + SuperVector mask = !chars.eq(casemask & data); + mask.print8("mask"); + return last_zero_match_inverted(buf, mask, len); +} + +template +static really_inline +const u8 *vermicelliDoubleBlock(SuperVector const data, SuperVector const chars1, SuperVector const chars2, SuperVector const casemask, + u8 const c1, u8 const c2, u8 const casechar, u8 const *buf, u16 const len) { + + SuperVector v = casemask & data; + SuperVector mask1 = chars1.eq(v); + SuperVector mask2 = chars2.eq(v); + SuperVector mask = mask1 & (mask2 >> 1); + + DEBUG_PRINTF("rv[0] = %02hhx, rv[-1] = %02hhx\n", buf[0], buf[-1]); + bool partial_match = (((buf[0] & casechar) == c2) && ((buf[-1] & casechar) == c1)); + DEBUG_PRINTF("partial = %d\n", partial_match); + if (partial_match) return buf - 1; + + return first_non_zero_match(buf, mask, len); +} + +template +static really_inline +const u8 *rvermicelliDoubleBlock(SuperVector const data, SuperVector const chars1, SuperVector const chars2, SuperVector const casemask, + u8 const c1, u8 const c2, u8 const casechar, u8 const *buf, u16 const len) { + + SuperVector v = casemask & data; + SuperVector mask1 = chars1.eq(v); + SuperVector mask2 = chars2.eq(v); + SuperVector mask = (mask1 << 1)& mask2; + + DEBUG_PRINTF("buf[0] = %02hhx, buf[-1] = %02hhx\n", buf[0], buf[-1]); + bool partial_match = (((buf[0] & casechar) == c2) && ((buf[-1] & casechar) == c1)); + DEBUG_PRINTF("partial = %d\n", partial_match); + if (partial_match) { + mask = mask | (SuperVector::Ones() >> (S-1)); + } + + return last_non_zero_match(buf, mask, len); +} + +template +static really_inline +const u8 *vermicelliDoubleMaskedBlock(SuperVector const data, SuperVector const chars1, SuperVector const chars2, + SuperVector const mask1, SuperVector const mask2, + u8 const c1, u8 const c2, u8 const m1, u8 const m2, u8 const *buf, u16 const len) { + + SuperVector v1 = chars1.eq(data & mask1); + SuperVector v2 = chars2.eq(data & mask2); + SuperVector mask = v1 & (v2 >> 1); + + DEBUG_PRINTF("rv[0] = %02hhx, rv[-1] = %02hhx\n", buf[0], buf[-1]); + bool partial_match = (((buf[0] & m1) == c2) && ((buf[-1] & m2) == c1)); + DEBUG_PRINTF("partial = %d\n", partial_match); + if (partial_match) return buf - 1; + + return first_non_zero_match(buf, mask, len); +} + diff --git a/src/nfa/castle.c b/src/nfa/castle.c index c7dd6d50..29208f8d 100644 --- a/src/nfa/castle.c +++ b/src/nfa/castle.c @@ -40,7 +40,7 @@ #include "repeat.h" #include "shufti.h" #include "truffle.h" -#include "vermicelli.h" +#include "vermicelli.hpp" #include "util/bitutils.h" #include "util/multibit.h" #include "util/partial_store.h" diff --git a/src/nfa/lbr.c b/src/nfa/lbr.c index 68e8e3f4..52e81ad6 100644 --- a/src/nfa/lbr.c +++ b/src/nfa/lbr.c @@ -40,7 +40,7 @@ #include "repeat_internal.h" #include "shufti.h" #include "truffle.h" -#include "vermicelli.h" +#include "vermicelli.hpp" #include "util/partial_store.h" #include "util/unaligned.h" @@ -533,4 +533,4 @@ char lbrFwdScanTruf(const struct NFA *nfa, const u8 *buf, #ifdef HAVE_SVE2 #include "lbr_sve.h" -#endif \ No newline at end of file +#endif diff --git a/src/nfa/limex_accel.c b/src/nfa/limex_accel.c index 4834b6a5..a85d5a07 100644 --- a/src/nfa/limex_accel.c +++ b/src/nfa/limex_accel.c @@ -40,7 +40,7 @@ #include "shufti.h" #include "truffle.h" #include "ue2common.h" -#include "vermicelli.h" +#include "vermicelli.hpp" #include "util/arch.h" #include "util/bitutils.h" #include "util/simd_utils.h" diff --git a/src/nfa/mpv.c b/src/nfa/mpv.c index 5829d43d..cba3d159 100644 --- a/src/nfa/mpv.c +++ b/src/nfa/mpv.c @@ -36,7 +36,7 @@ #include "shufti.h" #include "truffle.h" #include "ue2common.h" -#include "vermicelli.h" +#include "vermicelli.hpp" #include "vermicelli_run.h" #include "util/multibit.h" #include "util/partial_store.h" diff --git a/src/nfa/nfa_rev_api.h b/src/nfa/nfa_rev_api.h index 370f96ef..d82c52a4 100644 --- a/src/nfa/nfa_rev_api.h +++ b/src/nfa/nfa_rev_api.h @@ -35,7 +35,7 @@ #include "accel.h" #include "nfa_internal.h" -#include "vermicelli.h" +#include "vermicelli.hpp" #include "util/unaligned.h" static really_inline diff --git a/src/nfa/shufti_simd.hpp b/src/nfa/shufti_simd.hpp index 83ab428b..887f2468 100644 --- a/src/nfa/shufti_simd.hpp +++ b/src/nfa/shufti_simd.hpp @@ -65,7 +65,7 @@ static really_inline const u8 *fwdBlock(SuperVector mask_lo, SuperVector mask_hi, SuperVector chars, const u8 *buf) { SuperVector v = blockSingleMask(mask_lo, mask_hi, chars); - return firstMatch(buf, v); + return first_zero_match_inverted(buf, v); } template @@ -73,7 +73,7 @@ static really_inline const u8 *revBlock(SuperVector mask_lo, SuperVector mask_hi, SuperVector chars, const u8 *buf) { SuperVector v = blockSingleMask(mask_lo, mask_hi, chars); - return lastMatch(buf, v); + return last_zero_match_inverted(buf, v); } template @@ -82,7 +82,7 @@ const u8 *fwdBlockDouble(SuperVector mask1_lo, SuperVector mask1_hi, Super SuperVector mask = blockDoubleMask(mask1_lo, mask1_hi, mask2_lo, mask2_hi, chars); - return firstMatch(buf, mask); + return first_zero_match_inverted(buf, mask); } template diff --git a/src/nfa/truffle_simd.hpp b/src/nfa/truffle_simd.hpp index 51b9ee68..8d9911fd 100644 --- a/src/nfa/truffle_simd.hpp +++ b/src/nfa/truffle_simd.hpp @@ -57,7 +57,7 @@ template static really_inline const u8 *fwdBlock(SuperVector shuf_mask_lo_highclear, SuperVector shuf_mask_lo_highset, SuperVector chars, const u8 *buf) { SuperVector res = blockSingleMask(shuf_mask_lo_highclear, shuf_mask_lo_highset, chars); - return firstMatch(buf, res); + return first_zero_match_inverted(buf, res); } template @@ -121,7 +121,7 @@ static really_inline const u8 *revBlock(SuperVector shuf_mask_lo_highclear, SuperVector shuf_mask_lo_highset, SuperVector v, const u8 *buf) { SuperVector res = blockSingleMask(shuf_mask_lo_highclear, shuf_mask_lo_highset, v); - return lastMatch(buf, res); + return last_zero_match_inverted(buf, res); } template diff --git a/src/nfa/vermicelli.h b/src/nfa/vermicelli.h deleted file mode 100644 index 9defd899..00000000 --- a/src/nfa/vermicelli.h +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright (c) 2015-2020, Intel Corporation - * Copyright (c) 2021, Arm Limited - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Intel Corporation nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/** \file - * \brief Vermicelli: single-byte and double-byte acceleration. - */ - -#ifndef VERMICELLI_H -#define VERMICELLI_H - -#include "util/bitutils.h" -#include "util/simd_utils.h" -#include "util/unaligned.h" - -#if !defined(HAVE_AVX512) -#include "vermicelli_common.h" -#endif - -#ifdef HAVE_SVE2 -#include "vermicelli_sve.h" -#else -#include "vermicelli_sse.h" -#endif - -static really_inline -const u8 *vermicelliDoubleMaskedExec(char c1, char c2, char m1, char m2, - const u8 *buf, const u8 *buf_end) { - DEBUG_PRINTF("double verm scan (\\x%02hhx&\\x%02hhx)(\\x%02hhx&\\x%02hhx) " - "over %zu bytes\n", c1, m1, c2, m2, (size_t)(buf_end - buf)); - assert(buf < buf_end); - - VERM_TYPE chars1 = VERM_SET_FN(c1); - VERM_TYPE chars2 = VERM_SET_FN(c2); - VERM_TYPE mask1 = VERM_SET_FN(m1); - VERM_TYPE mask2 = VERM_SET_FN(m2); - -#ifdef HAVE_AVX512 - if (buf_end - buf <= VERM_BOUNDARY) { - const u8 *ptr = dvermMiniMasked(chars1, chars2, mask1, mask2, buf, - buf_end); - if (ptr) { - return ptr; - } - - /* check for partial match at end */ - if ((buf_end[-1] & m1) == (u8)c1) { - DEBUG_PRINTF("partial!!!\n"); - return buf_end - 1; - } - - return buf_end; - } -#endif - - assert((buf_end - buf) >= VERM_BOUNDARY); - uintptr_t min = (uintptr_t)buf % VERM_BOUNDARY; - if (min) { - // Input isn't aligned, so we need to run one iteration with an - // unaligned load, then skip buf forward to the next aligned address. - // There's some small overlap here, but we don't mind scanning it twice - // if we can do it quickly, do we? - const u8 *p = dvermPreconditionMasked(chars1, chars2, mask1, mask2, buf); - if (p) { - return p; - } - - buf += VERM_BOUNDARY - min; - assert(buf < buf_end); - } - - // Aligned loops from here on in - const u8 *ptr = dvermSearchAlignedMasked(chars1, chars2, mask1, mask2, c1, - c2, m1, m2, buf, buf_end); - if (ptr) { - return ptr; - } - - // Tidy up the mess at the end - ptr = dvermPreconditionMasked(chars1, chars2, mask1, mask2, - buf_end - VERM_BOUNDARY); - - if (ptr) { - return ptr; - } - - /* check for partial match at end */ - if ((buf_end[-1] & m1) == (u8)c1) { - DEBUG_PRINTF("partial!!!\n"); - return buf_end - 1; - } - - return buf_end; -} - -#endif /* VERMICELLI_H */ diff --git a/src/nfa/vermicelli_common.h b/src/nfa/vermicelli.hpp similarity index 51% rename from src/nfa/vermicelli_common.h rename to src/nfa/vermicelli.hpp index aca58dcb..105194b1 100644 --- a/src/nfa/vermicelli_common.h +++ b/src/nfa/vermicelli.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2015-2020, Intel Corporation + * Copyright (c) 2020-2021, VectorCamp PC * Copyright (c) 2021, Arm Limited * * Redistribution and use in source and binary forms, with or without @@ -28,52 +29,72 @@ */ /** \file - * \brief Vermicelli: Implementation shared between architectures. - * - * (users should include vermicelli.h instead of this) + * \brief Vermicelli: single-byte and double-byte acceleration. */ -#define VERM_BOUNDARY 16 -#define VERM_TYPE m128 -#define VERM_SET_FN set1_16x8 +#ifndef VERMICELLI_HPP +#define VERMICELLI_HPP -// returns NULL if not found -static really_inline -const u8 *dvermPreconditionMasked(m128 chars1, m128 chars2, - m128 mask1, m128 mask2, const u8 *buf) { - m128 data = loadu128(buf); // unaligned - m128 v1 = eq128(chars1, and128(data, mask1)); - m128 v2 = eq128(chars2, and128(data, mask2)); - u32 z = movemask128(and128(v1, rshiftbyte_m128(v2, 1))); +#include "util/bitutils.h" - /* no fixup of the boundary required - the aligned run will pick it up */ - if (unlikely(z)) { - u32 pos = ctz32(z); - return buf + pos; - } - return NULL; +#ifdef HAVE_SVE2 +#include "vermicelli_sve.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif +const u8 *vermicelliExec(char c, char noCase, const u8 *buf, const u8 *buf_end); +#ifdef __cplusplus } +#endif -static really_inline -const u8 *dvermSearchAlignedMasked(m128 chars1, m128 chars2, - m128 mask1, m128 mask2, u8 c1, u8 c2, u8 m1, - u8 m2, const u8 *buf, const u8 *buf_end) { - assert((size_t)buf % 16 == 0); +#ifdef __cplusplus +extern "C" { +#endif +const u8 *nvermicelliExec(char c, char noCase, const u8 *buf, const u8 *buf_end); +#ifdef __cplusplus +} +#endif - for (; buf + 16 < buf_end; buf += 16) { - m128 data = load128(buf); - m128 v1 = eq128(chars1, and128(data, mask1)); - m128 v2 = eq128(chars2, and128(data, mask2)); - u32 z = movemask128(and128(v1, rshiftbyte_m128(v2, 1))); +#ifdef __cplusplus +extern "C" { +#endif +const u8 *rvermicelliExec(char c, char nocase, const u8 *buf, const u8 *buf_end); +#ifdef __cplusplus +} +#endif - if ((buf[15] & m1) == c1 && (buf[16] & m2) == c2) { - z |= (1 << 15); - } - if (unlikely(z)) { - u32 pos = ctz32(z); - return buf + pos; - } - } +#ifdef __cplusplus +extern "C" { +#endif +const u8 *rnvermicelliExec(char c, char nocase, const u8 *buf, const u8 *buf_end); +#ifdef __cplusplus +} +#endif - return NULL; -} \ No newline at end of file +#ifdef __cplusplus +extern "C" { +#endif +const u8 *vermicelliDoubleExec(char c1, char c2, char nocase, const u8 *buf, const u8 *buf_end); +#ifdef __cplusplus +} +#endif + +#ifdef __cplusplus +extern "C" { +#endif +const u8 *rvermicelliDoubleExec(char c1, char c2, char nocase, const u8 *buf, const u8 *buf_end); +#ifdef __cplusplus +} +#endif + +#ifdef __cplusplus +extern "C" { +#endif +const u8 *vermicelliDoubleMaskedExec(char c1, char c2, char m1, char m2, const u8 *buf, const u8 *buf_end); +#ifdef __cplusplus +} +#endif + +#endif /* VERMICELLI_HPP */ \ No newline at end of file diff --git a/src/nfa/vermicelli_run.h b/src/nfa/vermicelli_run.h index d6fe7ec7..1deda48a 100644 --- a/src/nfa/vermicelli_run.h +++ b/src/nfa/vermicelli_run.h @@ -26,7 +26,10 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include "vermicelli.h" +#include "vermicelli.hpp" + +#define VERM_BOUNDARY 16 +#define VERM_TYPE m128 static really_inline const u8 *find_xverm_run(char c, char nocase, u32 repeat, UNUSED const u8 *buf, diff --git a/src/nfa/vermicelli_simd.cpp b/src/nfa/vermicelli_simd.cpp new file mode 100644 index 00000000..dbce6dc4 --- /dev/null +++ b/src/nfa/vermicelli_simd.cpp @@ -0,0 +1,549 @@ +/* + * Copyright (c) 2015-2020, Intel Corporation + * Copyright (c) 2020-2021, VectorCamp PC + * Copyright (c) 2021, Arm Limited + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * \brief Vermicelli: single-byte and double-byte acceleration. + */ + +#include "util/bitutils.h" +#include "util/simd_utils.h" + +#include "vermicelli.hpp" +#include "util/supervector/casemask.hpp" +#include "util/match.hpp" + +template +static really_inline +const u8 *vermicelliBlock(SuperVector const data, SuperVector const chars, SuperVector const casemask, u8 const *buf, u16 const len); + +template +static really_inline +const u8 *vermicelliBlockNeg(SuperVector const data, SuperVector const chars, SuperVector const casemask, u8 const *buf, u16 const len); + +template +static really_inline +const u8 *rvermicelliBlock(SuperVector const data, SuperVector const chars, SuperVector const casemask, u8 const *buf, u16 const len); + +template +static really_inline +const u8 *rvermicelliBlockNeg(SuperVector const data, SuperVector const chars, SuperVector const casemask, const u8 *buf, u16 const len); + +template +static really_inline +const u8 *vermicelliDoubleBlock(SuperVector const data, SuperVector const chars1, SuperVector const chars2, SuperVector const casemask, + u8 const c1, u8 const c2, u8 const casechar, u8 const *buf, u16 const len); + +template +static really_inline +const u8 *rvermicelliDoubleBlock(SuperVector const data, SuperVector const chars1, SuperVector const chars2, SuperVector const casemask, + u8 const c1, u8 const c2, u8 const casechar, u8 const *buf, u16 const len); + +template +static really_inline +const u8 *vermicelliDoubleMaskedBlock(SuperVector const data, SuperVector const chars1, SuperVector const chars2, + SuperVector const mask1, SuperVector const mask2, + u8 const c1, u8 const c2, u8 const m1, u8 const m2, u8 const *buf, u16 const len); + +#if defined(ARCH_IA32) || defined(ARCH_X86_64) +#include "x86/vermicelli.hpp" +#elif defined(ARCH_ARM32) || defined(ARCH_AARCH64) +#include "arm/vermicelli.hpp" +#endif + +template +static const u8 *vermicelliExecReal(SuperVector const chars, SuperVector const casemask, u8 const *buf, u8 const *buf_end) { + assert(buf && buf_end); + assert(buf < buf_end); + DEBUG_PRINTF("verm %p len %zu\n", buf, buf_end - buf); + DEBUG_PRINTF("b %s\n", buf); + + const u8 *d = buf; + const u8 *rv; + + __builtin_prefetch(d + 64); + __builtin_prefetch(d + 2*64); + __builtin_prefetch(d + 3*64); + __builtin_prefetch(d + 4*64); + DEBUG_PRINTF("start %p end %p \n", d, buf_end); + assert(d < buf_end); + if (d + S <= buf_end) { + // Reach vector aligned boundaries + DEBUG_PRINTF("until aligned %p \n", ROUNDUP_PTR(d, S)); + if (!ISALIGNED_N(d, S)) { + u8 const *d1 = ROUNDUP_PTR(d, S); + SuperVector data = SuperVector::loadu(d); + rv = vermicelliBlock(data, chars, casemask, d, S); + if (rv) return rv; + d = d1; + } + + while(d + S <= buf_end) { + __builtin_prefetch(d + 64); + DEBUG_PRINTF("d %p \n", d); + SuperVector data = SuperVector::load(d); + rv = vermicelliBlock(data, chars, casemask, d, S); + if (rv) return rv; + d += S; + } + } + + DEBUG_PRINTF("d %p e %p \n", d, buf_end); + // finish off tail + + if (d != buf_end) { + SuperVector data = SuperVector::loadu_maskz(d, buf_end - d); + rv = vermicelliBlock(data, chars, casemask, d, buf_end - d); + DEBUG_PRINTF("rv %p \n", rv); + if (rv && rv < buf_end) return rv; + } + + return buf_end; +} + +template +static const u8 *nvermicelliExecReal(SuperVector const chars, SuperVector const casemask, const u8 *buf, const u8 *buf_end) { + assert(buf && buf_end); + assert(buf < buf_end); + DEBUG_PRINTF("verm %p len %zu\n", buf, buf_end - buf); + DEBUG_PRINTF("b %s\n", buf); + + const u8 *d = buf; + const u8 *rv; + + __builtin_prefetch(d + 64); + __builtin_prefetch(d + 2*64); + __builtin_prefetch(d + 3*64); + __builtin_prefetch(d + 4*64); + DEBUG_PRINTF("start %p end %p \n", d, buf_end); + assert(d < buf_end); + if (d + S <= buf_end) { + // Reach vector aligned boundaries + DEBUG_PRINTF("until aligned %p \n", ROUNDUP_PTR(d, S)); + if (!ISALIGNED_N(d, S)) { + u8 const *d1 = ROUNDUP_PTR(d, S); + SuperVector data = SuperVector::loadu(d); + rv = vermicelliBlockNeg(data, chars, casemask, d, S); + if (rv) return rv; + d = d1; + } + + while(d + S <= buf_end) { + __builtin_prefetch(d + 64); + DEBUG_PRINTF("d %p \n", d); + SuperVector data = SuperVector::load(d); + rv = vermicelliBlockNeg(data, chars, casemask, d, S); + if (rv) return rv; + d += S; + } + } + + DEBUG_PRINTF("d %p e %p \n", d, buf_end); + // finish off tail + + if (d != buf_end) { + SuperVector data = SuperVector::loadu_maskz(d, buf_end - d); + rv = vermicelliBlockNeg(data, chars, casemask, d, buf_end - d); + DEBUG_PRINTF("rv %p \n", rv); + if (rv && rv < buf_end) return rv; + } + + return buf_end; +} + +// Reverse vermicelli scan. Provides exact semantics and returns (buf - 1) if +// character not found. +template +const u8 *rvermicelliExecReal(SuperVector const chars, SuperVector const casemask, const u8 *buf, const u8 *buf_end) { + assert(buf && buf_end); + assert(buf < buf_end); + DEBUG_PRINTF("rverm %p len %zu\n", buf, buf_end - buf); + DEBUG_PRINTF("b %s\n", buf); + + const u8 *d = buf_end; + const u8 *rv; + + __builtin_prefetch(d - 64); + __builtin_prefetch(d - 2*64); + __builtin_prefetch(d - 3*64); + __builtin_prefetch(d - 4*64); + DEBUG_PRINTF("start %p end %p \n", buf, d); + assert(d > buf); + if (d - S >= buf) { + // Reach vector aligned boundaries + DEBUG_PRINTF("until aligned %p \n", ROUNDDOWN_PTR(d, S)); + if (!ISALIGNED_N(d, S)) { + u8 const *d1 = ROUNDDOWN_PTR(d, S); + SuperVector data = SuperVector::loadu(d - S); + rv = rvermicelliBlock(data, chars, casemask, d - S, S); + DEBUG_PRINTF("rv %p \n", rv); + if (rv) return rv; + d = d1; + } + + while (d - S >= buf) { + DEBUG_PRINTF("aligned %p \n", d); + // On large packet buffers, this prefetch appears to get us about 2%. + __builtin_prefetch(d - 64); + + d -= S; + SuperVector data = SuperVector::load(d); + rv = rvermicelliBlock(data, chars, casemask, d, S); + if (rv) return rv; + } + } + + DEBUG_PRINTF("tail d %p e %p \n", buf, d); + // finish off head + + if (d != buf) { + SuperVector data = SuperVector::loadu(buf); + rv = rvermicelliBlock(data, chars, casemask, buf, d - buf); + DEBUG_PRINTF("rv %p \n", rv); + if (rv && rv < buf_end) return rv; + } + + return buf - 1; +} + +// Reverse vermicelli scan. Provides exact semantics and returns (buf - 1) if +// character not found. +template +const u8 *rnvermicelliExecReal(SuperVector const chars, SuperVector const casemask, const u8 *buf, const u8 *buf_end) { + assert(buf && buf_end); + assert(buf < buf_end); + DEBUG_PRINTF("rverm %p len %zu\n", buf, buf_end - buf); + DEBUG_PRINTF("b %s\n", buf); + + const u8 *d = buf_end; + const u8 *rv; + + __builtin_prefetch(d - 64); + __builtin_prefetch(d - 2*64); + __builtin_prefetch(d - 3*64); + __builtin_prefetch(d - 4*64); + DEBUG_PRINTF("start %p end %p \n", buf, d); + assert(d > buf); + if (d - S >= buf) { + // Reach vector aligned boundaries + DEBUG_PRINTF("until aligned %p \n", ROUNDDOWN_PTR(d, S)); + if (!ISALIGNED_N(d, S)) { + u8 const *d1 = ROUNDDOWN_PTR(d, S); + SuperVector data = SuperVector::loadu(d - S); + rv = rvermicelliBlockNeg(data, chars, casemask, d - S, S); + DEBUG_PRINTF("rv %p \n", rv); + if (rv) return rv; + d = d1; + } + + while (d - S >= buf) { + DEBUG_PRINTF("aligned %p \n", d); + // On large packet buffers, this prefetch appears to get us about 2%. + __builtin_prefetch(d - 64); + + d -= S; + SuperVector data = SuperVector::load(d); + rv = rvermicelliBlockNeg(data, chars, casemask, d, S); + if (rv) return rv; + } + } + + DEBUG_PRINTF("tail d %p e %p \n", buf, d); + // finish off head + + if (d != buf) { + SuperVector data = SuperVector::loadu(buf); + rv = rvermicelliBlockNeg(data, chars, casemask, buf, d - buf); + DEBUG_PRINTF("rv %p \n", rv); + if (rv && rv < buf_end) return rv; + } + + return buf - 1; +} + +template +static const u8 *vermicelliDoubleExecReal(u8 const c1, u8 const c2, SuperVector const casemask, + const u8 *buf, const u8 *buf_end) { + assert(buf && buf_end); + assert(buf < buf_end); + DEBUG_PRINTF("verm %p len %zu\n", buf, buf_end - buf); + DEBUG_PRINTF("b %s\n", buf); + + const u8 *d = buf; + const u8 *rv; + // SuperVector lastmask1{0}; + const SuperVector chars1 = SuperVector::dup_u8(c1); + const SuperVector chars2 = SuperVector::dup_u8(c2); + const u8 casechar = casemask.u.u8[0]; + + __builtin_prefetch(d + 64); + __builtin_prefetch(d + 2*64); + __builtin_prefetch(d + 3*64); + __builtin_prefetch(d + 4*64); + DEBUG_PRINTF("start %p end %p \n", d, buf_end); + assert(d < buf_end); + if (d + S <= buf_end) { + // Reach vector aligned boundaries + DEBUG_PRINTF("until aligned %p \n", ROUNDUP_PTR(d, S)); + if (!ISALIGNED_N(d, S)) { + u8 const *d1 = ROUNDUP_PTR(d, S); + SuperVector data = SuperVector::loadu(d); + rv = vermicelliDoubleBlock(data, chars1, chars2, casemask, c1, c2, casechar, d, S); + if (rv) return rv; + d = d1; + } + + while(d + S <= buf_end) { + __builtin_prefetch(d + 64); + DEBUG_PRINTF("d %p \n", d); + SuperVector data = SuperVector::load(d); + rv = vermicelliDoubleBlock(data, chars1, chars2, casemask, c1, c2, casechar, d, S); + if (rv) return rv; + d += S; + } + } + + DEBUG_PRINTF("tail d %p e %p \n", d, buf_end); + // finish off tail + + if (d != buf_end) { + SuperVector data = SuperVector::loadu_maskz(d, buf_end - d); + rv = vermicelliDoubleBlock(data, chars1, chars2, casemask, c1, c2, casechar, d, buf_end - d); + DEBUG_PRINTF("rv %p \n", rv); + if (rv && rv < buf_end) return rv; + } + + DEBUG_PRINTF("real tail d %p e %p \n", d, buf_end); + /* check for partial match at end */ + u8 mask = casemask.u.u8[0]; + if ((buf_end[-1] & mask) == (u8)c1) { + DEBUG_PRINTF("partial!!!\n"); + return buf_end - 1; + } + + return buf_end; +} + +// /* returns highest offset of c2 (NOTE: not c1) */ +template +const u8 *rvermicelliDoubleExecReal(char c1, char c2, SuperVector const casemask, const u8 *buf, const u8 *buf_end) { + assert(buf && buf_end); + assert(buf < buf_end); + DEBUG_PRINTF("rverm %p len %zu\n", buf, buf_end - buf); + DEBUG_PRINTF("b %s\n", buf); + char s[255]; + snprintf(s, buf_end - buf + 1, "%s", buf); + DEBUG_PRINTF("b %s\n", s); + + const u8 *d = buf_end; + const u8 *rv; + const SuperVector chars1 = SuperVector::dup_u8(c1); + const SuperVector chars2 = SuperVector::dup_u8(c2); + const u8 casechar = casemask.u.u8[0]; + + __builtin_prefetch(d - 64); + __builtin_prefetch(d - 2*64); + __builtin_prefetch(d - 3*64); + __builtin_prefetch(d - 4*64); + DEBUG_PRINTF("start %p end %p \n", buf, d); + assert(d > buf); + if (d - S >= buf) { + // Reach vector aligned boundaries + DEBUG_PRINTF("until aligned %p \n", ROUNDDOWN_PTR(d, S)); + if (!ISALIGNED_N(d, S)) { + u8 const *d1 = ROUNDDOWN_PTR(d, S); + SuperVector data = SuperVector::loadu(d - S); + rv = rvermicelliDoubleBlock(data, chars1, chars2, casemask, c1, c2, casechar, d - S, S); + DEBUG_PRINTF("rv %p \n", rv); + if (rv && rv < buf_end) return rv; + d = d1; + } + + while (d - S >= buf) { + DEBUG_PRINTF("aligned %p \n", d); + // On large packet buffers, this prefetch appears to get us about 2%. + __builtin_prefetch(d - 64); + + d -= S; + SuperVector data = SuperVector::load(d); + rv = rvermicelliDoubleBlock(data, chars1, chars2, casemask, c1, c2, casechar, d, S); + if (rv) return rv; + } + } + + DEBUG_PRINTF("tail d %p e %p \n", buf, d); + // finish off head + + if (d != buf) { + SuperVector data = SuperVector::loadu(buf); + rv = rvermicelliDoubleBlock(data, chars1, chars2, casemask, c1, c2, casechar, buf, d - buf); + DEBUG_PRINTF("rv %p \n", rv); + if (rv && rv < buf_end) return rv; + } + + return buf - 1; +} + +template +static const u8 *vermicelliDoubleMaskedExecReal(u8 const c1, u8 const c2, u8 const m1, u8 const m2, + const u8 *buf, const u8 *buf_end) { + assert(buf && buf_end); + assert(buf < buf_end); + DEBUG_PRINTF("verm %p len %zu\n", buf, buf_end - buf); + DEBUG_PRINTF("b %s\n", buf); + + const u8 *d = buf; + const u8 *rv; + // SuperVector lastmask1{0}; + const SuperVector chars1 = SuperVector::dup_u8(c1); + const SuperVector chars2 = SuperVector::dup_u8(c2); + const SuperVector mask1 = SuperVector::dup_u8(m1); + const SuperVector mask2 = SuperVector::dup_u8(m2); + + __builtin_prefetch(d + 64); + __builtin_prefetch(d + 2*64); + __builtin_prefetch(d + 3*64); + __builtin_prefetch(d + 4*64); + DEBUG_PRINTF("start %p end %p \n", d, buf_end); + assert(d < buf_end); + if (d + S <= buf_end) { + // Reach vector aligned boundaries + DEBUG_PRINTF("until aligned %p \n", ROUNDUP_PTR(d, S)); + if (!ISALIGNED_N(d, S)) { + u8 const *d1 = ROUNDUP_PTR(d, S); + SuperVector data = SuperVector::loadu(d); + rv = vermicelliDoubleMaskedBlock(data, chars1, chars2, mask1, mask2, c1, c2, m1, m2, d, S); + if (rv) return rv; + d = d1; + } + + while(d + S <= buf_end) { + __builtin_prefetch(d + 64); + DEBUG_PRINTF("d %p \n", d); + SuperVector data = SuperVector::load(d); + rv = vermicelliDoubleMaskedBlock(data, chars1, chars2, mask1, mask2, c1, c2, m1, m2, d, S); + if (rv) return rv; + d += S; + } + } + + DEBUG_PRINTF("tail d %p e %p \n", d, buf_end); + // finish off tail + + if (d != buf_end) { + SuperVector data = SuperVector::loadu_maskz(d, buf_end - d); + rv = vermicelliDoubleMaskedBlock(data, chars1, chars2, mask1, mask2, c1, c2, m1, m2, d, buf_end - d); + DEBUG_PRINTF("rv %p \n", rv); + if (rv && rv < buf_end) return rv; + } + + DEBUG_PRINTF("real tail d %p e %p \n", d, buf_end); + /* check for partial match at end */ + if ((buf_end[-1] & m1) == (u8)c1) { + DEBUG_PRINTF("partial!!!\n"); + return buf_end - 1; + } + + return buf_end; +} + +extern "C" const u8 *vermicelliExec(char c, char nocase, const u8 *buf, const u8 *buf_end) { + DEBUG_PRINTF("verm scan %s\\x%02hhx over %zu bytes\n", + nocase ? "nocase " : "", c, (size_t)(buf_end - buf)); + assert(buf < buf_end); + + const SuperVector chars = SuperVector::dup_u8(c); + const SuperVector casemask{nocase ? getCaseMask() : SuperVector::Ones()}; + + return vermicelliExecReal(chars, casemask, buf, buf_end); +} + +/* like vermicelliExec except returns the address of the first character which + * is not c */ +extern "C" const u8 *nvermicelliExec(char c, char nocase, const u8 *buf, const u8 *buf_end) { + DEBUG_PRINTF("nverm scan %s\\x%02hhx over %zu bytes\n", + nocase ? "nocase " : "", c, (size_t)(buf_end - buf)); + assert(buf < buf_end); + + const SuperVector chars = SuperVector::dup_u8(c); + const SuperVector casemask{nocase ? getCaseMask() : SuperVector::Ones()}; + + return nvermicelliExecReal(chars, casemask, buf, buf_end); +} + +extern "C" const u8 *rvermicelliExec(char c, char nocase, const u8 *buf, const u8 *buf_end) { + DEBUG_PRINTF("rev verm scan %s\\x%02hhx over %zu bytes\n", + nocase ? "nocase " : "", c, (size_t)(buf_end - buf)); + assert(buf < buf_end); + + const SuperVector chars = SuperVector::dup_u8(c); + const SuperVector casemask{nocase ? getCaseMask() : SuperVector::Ones()}; + + return rvermicelliExecReal(chars, casemask, buf, buf_end); +} + +extern "C" const u8 *rnvermicelliExec(char c, char nocase, const u8 *buf, const u8 *buf_end) { + DEBUG_PRINTF("rev verm scan %s\\x%02hhx over %zu bytes\n", + nocase ? "nocase " : "", c, (size_t)(buf_end - buf)); + assert(buf < buf_end); + + const SuperVector chars = SuperVector::dup_u8(c); + const SuperVector casemask{nocase ? getCaseMask() : SuperVector::Ones()}; + + return rnvermicelliExecReal(chars, casemask, buf, buf_end); +} + +extern "C" const u8 *vermicelliDoubleExec(char c1, char c2, char nocase, const u8 *buf, const u8 *buf_end) { + DEBUG_PRINTF("double verm scan %s\\x%02hhx%02hhx over %zu bytes\n", + nocase ? "nocase " : "", c1, c2, (size_t)(buf_end - buf)); + assert(buf < buf_end); + + const SuperVector casemask{nocase ? getCaseMask() : SuperVector::Ones()}; + + return vermicelliDoubleExecReal(c1, c2, casemask, buf, buf_end); +} + +extern "C" const u8 *rvermicelliDoubleExec(char c1, char c2, char nocase, const u8 *buf, const u8 *buf_end) { + DEBUG_PRINTF("rev double verm scan %s\\x%02hhx%02hhx over %zu bytes\n", + nocase ? "nocase " : "", c1, c2, (size_t)(buf_end - buf)); + assert(buf < buf_end); + + const SuperVector casemask{nocase ? getCaseMask() : SuperVector::Ones()}; + + return rvermicelliDoubleExecReal(c1, c2, casemask, buf, buf_end); +} + +extern "C" const u8 *vermicelliDoubleMaskedExec(char c1, char c2, char m1, char m2, + const u8 *buf, const u8 *buf_end) { + DEBUG_PRINTF("double verm scan (\\x%02hhx&\\x%02hhx)(\\x%02hhx&\\x%02hhx) " + "over %zu bytes\n", c1, m1, c2, m2, (size_t)(buf_end - buf)); + assert(buf < buf_end); + + return vermicelliDoubleMaskedExecReal(c1, c2, m1, m2, buf, buf_end); +} diff --git a/src/nfa/x86/shufti.hpp b/src/nfa/x86/shufti.hpp index 79ef7481..6fb34b2f 100644 --- a/src/nfa/x86/shufti.hpp +++ b/src/nfa/x86/shufti.hpp @@ -31,12 +31,6 @@ * \brief Shufti: character class acceleration. */ -#ifndef SHUFTI_SIMD_X86_HPP -#define SHUFTI_SIMD_X86_HPP - -#include "util/supervector/supervector.hpp" -#include "util/match.hpp" - template static really_inline const SuperVector blockSingleMask(SuperVector mask_lo, SuperVector mask_hi, SuperVector chars) { @@ -44,12 +38,10 @@ const SuperVector blockSingleMask(SuperVector mask_lo, SuperVector mask SuperVector c_lo = chars & low4bits; SuperVector c_hi = chars.template vshr_64_imm<4>() & low4bits; - c_lo = mask_lo.template pshufb(c_lo); - c_hi = mask_hi.template pshufb(c_hi); + c_lo = mask_lo.pshufb(c_lo); + c_hi = mask_hi.pshufb(c_hi); - SuperVector c = c_lo & c_hi; - - return c.eq(SuperVector::Zeroes()); + return (c_lo & c_hi).eq(SuperVector::Zeroes()); } template @@ -80,5 +72,3 @@ SuperVector blockDoubleMask(SuperVector mask1_lo, SuperVector mask1_hi, return c.eq(SuperVector::Ones()); } - -#endif // SHUFTI_SIMD_X86_HPP diff --git a/src/nfa/x86/vermicelli.hpp b/src/nfa/x86/vermicelli.hpp new file mode 100644 index 00000000..8b461dfe --- /dev/null +++ b/src/nfa/x86/vermicelli.hpp @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2015-2020, Intel Corporation + * Copyright (c) 2020-2021, VectorCamp PC + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * \brief Vermicelli: single-byte and double-byte acceleration. + */ + +template +static really_inline +const u8 *vermicelliBlock(SuperVector const data, SuperVector const chars, SuperVector const casemask, u8 const *buf, u16 const len) { + + SuperVector mask = chars.eq(casemask & data); + return first_non_zero_match(buf, mask, len); +} + +template +static really_inline +const u8 *vermicelliBlockNeg(SuperVector const data, SuperVector const chars, SuperVector const casemask, u8 const *buf, u16 const len) { + + SuperVector mask = chars.eq(casemask & data); + return first_zero_match_inverted(buf, mask, len); +} + +template +static really_inline +const u8 *rvermicelliBlock(SuperVector const data, SuperVector const chars, SuperVector const casemask, u8 const *buf, u16 const len) { + + SuperVector mask = chars.eq(casemask & data); + return last_non_zero_match(buf, mask, len); +} + +template +static really_inline +const u8 *rvermicelliBlockNeg(SuperVector const data, SuperVector const chars, SuperVector const casemask, const u8 *buf, u16 const len) { + + data.print8("data"); + chars.print8("chars"); + casemask.print8("casemask"); + SuperVector mask = chars.eq(casemask & data); + mask.print8("mask"); + return last_zero_match_inverted(buf, mask, len); +} + +template +static really_inline +const u8 *vermicelliDoubleBlock(SuperVector const data, SuperVector const chars1, SuperVector const chars2, SuperVector const casemask, + u8 const c1, u8 const c2, u8 const casechar, u8 const *buf, u16 const len) { + + SuperVector v = casemask & data; + SuperVector mask1 = chars1.eq(v); + SuperVector mask2 = chars2.eq(v); + SuperVector mask = mask1 & (mask2 >> 1); + + DEBUG_PRINTF("rv[0] = %02hhx, rv[-1] = %02hhx\n", buf[0], buf[-1]); + bool partial_match = (((buf[0] & casechar) == c2) && ((buf[-1] & casechar) == c1)); + DEBUG_PRINTF("partial = %d\n", partial_match); + if (partial_match) return buf - 1; + + return first_non_zero_match(buf, mask, len); +} + +template +static really_inline +const u8 *rvermicelliDoubleBlock(SuperVector const data, SuperVector const chars1, SuperVector const chars2, SuperVector const casemask, + u8 const c1, u8 const c2, u8 const casechar, u8 const *buf, u16 const len) { + + SuperVector v = casemask & data; + SuperVector mask1 = chars1.eq(v); + SuperVector mask2 = chars2.eq(v); + SuperVector mask = (mask1 << 1)& mask2; + + DEBUG_PRINTF("buf[0] = %02hhx, buf[-1] = %02hhx\n", buf[0], buf[-1]); + bool partial_match = (((buf[0] & casechar) == c2) && ((buf[-1] & casechar) == c1)); + DEBUG_PRINTF("partial = %d\n", partial_match); + if (partial_match) { + mask = mask | (SuperVector::Ones() >> (S-1)); + } + + return last_non_zero_match(buf, mask, len); +} + +template +static really_inline +const u8 *vermicelliDoubleMaskedBlock(SuperVector const data, SuperVector const chars1, SuperVector const chars2, + SuperVector const mask1, SuperVector const mask2, + u8 const c1, u8 const c2, u8 const m1, u8 const m2, u8 const *buf, u16 const len) { + + SuperVector v1 = chars1.eq(data & mask1); + SuperVector v2 = chars2.eq(data & mask2); + SuperVector mask = v1 & (v2 >> 1); + + DEBUG_PRINTF("rv[0] = %02hhx, rv[-1] = %02hhx\n", buf[0], buf[-1]); + bool partial_match = (((buf[0] & m1) == c2) && ((buf[-1] & m2) == c1)); + DEBUG_PRINTF("partial = %d\n", partial_match); + if (partial_match) return buf - 1; + + return first_non_zero_match(buf, mask, len); +} + diff --git a/src/util/arch/arm/match.hpp b/src/util/arch/arm/match.hpp index 46d84d06..892c3877 100644 --- a/src/util/arch/arm/match.hpp +++ b/src/util/arch/arm/match.hpp @@ -29,9 +29,46 @@ template <> really_really_inline -const u8 *firstMatch<16>(const u8 *buf, SuperVector<16> mask) { - uint32x4_t res_t = vreinterpretq_u32_u8(mask.u.v128[0]); - uint64_t vmax = vgetq_lane_u64 (vreinterpretq_u64_u32 (vpmaxq_u32(res_t, res_t)), 0); +const u8 *first_non_zero_match<16>(const u8 *buf, SuperVector<16> mask, u16 const UNUSED len) { + uint32x4_t m = mask.u.u32x4[0]; + uint64_t vmax = vgetq_lane_u64 (vreinterpretq_u64_u32 (vpmaxq_u32(m, m)), 0); + if (vmax != 0) { + typename SuperVector<16>::movemask_type z = mask.movemask(); + DEBUG_PRINTF("z %08x\n", z); + DEBUG_PRINTF("buf %p z %08x \n", buf, z); + u32 pos = ctz32(z & 0xffff); + DEBUG_PRINTF("match @ pos %u\n", pos); + assert(pos < 16); + DEBUG_PRINTF("buf + pos %p\n", buf + pos); + return buf + pos; + } else { + return NULL; // no match + } +} + +template <> +really_really_inline +const u8 *last_non_zero_match<16>(const u8 *buf, SuperVector<16> mask, u16 const UNUSED len) { + uint32x4_t m = mask.u.u32x4[0]; + uint64_t vmax = vgetq_lane_u64 (vreinterpretq_u64_u32 (vpmaxq_u32(m, m)), 0); + if (vmax != 0) { + typename SuperVector<16>::movemask_type z = mask.movemask(); + DEBUG_PRINTF("buf %p z %08x \n", buf, z); + DEBUG_PRINTF("z %08x\n", z); + u32 pos = clz32(z & 0xffff); + DEBUG_PRINTF("match @ pos %u\n", pos); + assert(pos >= 16 && pos < 32); + return buf + (31 - pos); + } else { + return NULL; // no match + } +} + +template <> +really_really_inline +const u8 *first_zero_match_inverted<16>(const u8 *buf, SuperVector<16> mask, u16 const UNUSED len) { + uint32x4_t m = mask.u.u32x4[0]; + uint64_t vmax = vgetq_lane_u64 (vreinterpretq_u64_u32 (vpmaxq_u32(m, m)), 0); if (vmax != 0) { typename SuperVector<16>::movemask_type z = mask.movemask(); DEBUG_PRINTF("z %08x\n", z); @@ -48,9 +85,9 @@ const u8 *firstMatch<16>(const u8 *buf, SuperVector<16> mask) { template <> really_really_inline -const u8 *lastMatch<16>(const u8 *buf, SuperVector<16> mask) { - uint32x4_t res_t = vreinterpretq_u32_u8(mask.u.v128[0]); - uint64_t vmax = vgetq_lane_u64 (vreinterpretq_u64_u32 (vpmaxq_u32(res_t, res_t)), 0); +const u8 *last_zero_match_inverted<16>(const u8 *buf, SuperVector<16> mask, u16 const UNUSED len) { + uint32x4_t m = mask.u.u32x4[0]; + uint64_t vmax = vgetq_lane_u64 (vreinterpretq_u64_u32 (vpmaxq_u32(m, m)), 0); if (vmax != 0) { typename SuperVector<16>::movemask_type z = mask.movemask(); DEBUG_PRINTF("buf %p z %08x \n", buf, z); diff --git a/src/util/arch/arm/simd_utils.h b/src/util/arch/arm/simd_utils.h index 24851773..630cac93 100644 --- a/src/util/arch/arm/simd_utils.h +++ b/src/util/arch/arm/simd_utils.h @@ -100,7 +100,7 @@ static really_inline int isnonzero128(m128 a) { */ static really_inline u32 diffrich128(m128 a, m128 b) { static const uint32x4_t movemask = { 1, 2, 4, 8 }; - return vaddvq_u32(vandq_u32(vmvnq_s32(vceqq_s32((int32x4_t)a, (int32x4_t)b)), movemask)); + return vaddvq_u32(vandq_u32(vmvnq_u32(vceqq_u32((uint32x4_t)a, (uint32x4_t)b)), movemask)); } /** @@ -109,53 +109,53 @@ static really_inline u32 diffrich128(m128 a, m128 b) { */ static really_inline u32 diffrich64_128(m128 a, m128 b) { static const uint64x2_t movemask = { 1, 4 }; - return vaddvq_u64(vandq_u64(vmvnq_s32(vceqq_s64((int64x2_t)a, (int64x2_t)b)), movemask)); + return (u32) vaddvq_u64(vandq_u64((uint64x2_t)vmvnq_u32((uint32x4_t)vceqq_u64((uint64x2_t)a, (uint64x2_t)b)), movemask)); } static really_really_inline m128 add_2x64(m128 a, m128 b) { - return (m128) vaddq_u64((int64x2_t)a, (int64x2_t)b); + return (m128) vaddq_u64((uint64x2_t)a, (uint64x2_t)b); } static really_really_inline m128 sub_2x64(m128 a, m128 b) { - return (m128) vsubq_u64((int64x2_t)a, (int64x2_t)b); + return (m128) vsubq_u64((uint64x2_t)a, (uint64x2_t)b); } static really_really_inline m128 lshift_m128(m128 a, unsigned b) { - return (m128) vshlq_n_s32((int64x2_t)a, b); + return (m128) vshlq_n_u32((uint32x4_t)a, b); } static really_really_inline m128 rshift_m128(m128 a, unsigned b) { - return (m128) vshrq_n_s32((int64x2_t)a, b); + return (m128) vshrq_n_u32((uint32x4_t)a, b); } static really_really_inline m128 lshift64_m128(m128 a, unsigned b) { - return (m128) vshlq_n_s64((int64x2_t)a, b); + return (m128) vshlq_n_u64((uint64x2_t)a, b); } static really_really_inline m128 rshift64_m128(m128 a, unsigned b) { - return (m128) vshrq_n_s64((int64x2_t)a, b); + return (m128) vshrq_n_u64((uint64x2_t)a, b); } static really_inline m128 eq128(m128 a, m128 b) { - return (m128) vceqq_s8((int8x16_t)a, (int8x16_t)b); + return (m128) vceqq_u8((uint8x16_t)a, (uint8x16_t)b); } static really_inline m128 eq64_m128(m128 a, m128 b) { - return (m128) vceqq_u64((int64x2_t)a, (int64x2_t)b); + return (m128) vceqq_u64((uint64x2_t)a, (uint64x2_t)b); } static really_inline u32 movemask128(m128 a) { static const uint8x16_t powers = { 1, 2, 4, 8, 16, 32, 64, 128, 1, 2, 4, 8, 16, 32, 64, 128 }; // Compute the mask from the input - uint64x2_t mask = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(vandq_u8((uint8x16_t)a, powers)))); - uint64x2_t mask1 = (m128)vextq_s8(mask, zeroes128(), 7); + uint8x16_t mask = (uint8x16_t) vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(vandq_u8((uint8x16_t)a, powers)))); + uint8x16_t mask1 = vextq_u8(mask, (uint8x16_t)zeroes128(), 7); mask = vorrq_u8(mask, mask1); // Get the resulting bytes @@ -187,7 +187,7 @@ static really_inline u64a movq(const m128 in) { /* another form of movq */ static really_inline m128 load_m128_from_u64a(const u64a *p) { - return (m128) vsetq_lane_u64(*p, zeroes128(), 0); + return (m128) vsetq_lane_u64(*p, (uint64x2_t) zeroes128(), 0); } static really_inline u32 extract32from128(const m128 in, unsigned imm) { @@ -220,10 +220,10 @@ static really_inline u64a extract64from128(const m128 in, unsigned imm) { #else switch (imm) { case 0: - return vgetq_lane_u64((uint32x4_t) in, 0); + return vgetq_lane_u64((uint64x2_t) in, 0); break; case 1: - return vgetq_lane_u64((uint32x4_t) in, 1); + return vgetq_lane_u64((uint64x2_t) in, 1); break; default: return 0; @@ -233,11 +233,11 @@ static really_inline u64a extract64from128(const m128 in, unsigned imm) { } static really_inline m128 low64from128(const m128 in) { - return vcombine_u64(vget_low_u64(in), vdup_n_u64(0)); + return (m128) vcombine_u64(vget_low_u64((uint64x2_t)in), vdup_n_u64(0)); } static really_inline m128 high64from128(const m128 in) { - return vcombine_u64(vget_high_u64(in), vdup_n_u64(0)); + return (m128) vcombine_u64(vget_high_u64((uint64x2_t)in), vdup_n_u64(0)); } static really_inline m128 add128(m128 a, m128 b) { @@ -257,7 +257,7 @@ static really_inline m128 or128(m128 a, m128 b) { } static really_inline m128 andnot128(m128 a, m128 b) { - return (m128) (m128) vandq_s8( vmvnq_s8(a), b); + return (m128) vandq_s8( vmvnq_s8((int8x16_t) a), (int8x16_t) b); } // aligned load @@ -401,12 +401,12 @@ m128 pshufb_m128(m128 a, m128 b) { static really_inline m128 max_u8_m128(m128 a, m128 b) { - return (m128) vmaxq_u8((int8x16_t)a, (int8x16_t)b); + return (m128) vmaxq_u8((uint8x16_t)a, (uint8x16_t)b); } static really_inline m128 min_u8_m128(m128 a, m128 b) { - return (m128) vminq_u8((int8x16_t)a, (int8x16_t)b); + return (m128) vminq_u8((uint8x16_t)a, (uint8x16_t)b); } static really_inline diff --git a/src/util/arch/x86/match.hpp b/src/util/arch/x86/match.hpp index 159f7355..cbf4ab6b 100644 --- a/src/util/arch/x86/match.hpp +++ b/src/util/arch/x86/match.hpp @@ -29,7 +29,106 @@ template <> really_really_inline -const u8 *firstMatch<16>(const u8 *buf, SuperVector<16> v) { +const u8 *first_non_zero_match<16>(const u8 *buf, SuperVector<16> v, u16 const UNUSED len) { + SuperVector<16>::movemask_type z = v.movemask(); + DEBUG_PRINTF("buf %p z %08x \n", buf, z); + DEBUG_PRINTF("z %08x\n", z); + if (unlikely(z)) { + u32 pos = ctz32(z); + DEBUG_PRINTF("~z %08x\n", ~z); + DEBUG_PRINTF("match @ pos %u\n", pos); + assert(pos < 16); + return buf + pos; + } else { + return NULL; // no match + } +} + +template <> +really_really_inline +const u8 *first_non_zero_match<32>(const u8 *buf, SuperVector<32> v, u16 const UNUSED len) { + SuperVector<32>::movemask_type z = v.movemask(); + DEBUG_PRINTF("z 0x%08x\n", z); + if (unlikely(z)) { + u32 pos = ctz32(z); + assert(pos < 32); + DEBUG_PRINTF("match @ pos %u\n", pos); + return buf + pos; + } else { + return NULL; // no match + } +} +template <> +really_really_inline +const u8 *first_non_zero_match<64>(const u8 *buf, SuperVector<64>v, u16 const len) { + SuperVector<64>::movemask_type z = v.movemask(); + DEBUG_PRINTF("z 0x%016llx\n", z); + u64a mask = (~0ULL) >> (64 - len); + DEBUG_PRINTF("mask %016llx\n", mask); + z &= mask; + DEBUG_PRINTF("z 0x%016llx\n", z); + if (unlikely(z)) { + u32 pos = ctz64(z); + DEBUG_PRINTF("match @ pos %u\n", pos); + assert(pos < 64); + return buf + pos; + } else { + return NULL; // no match + } +} + +template <> +really_really_inline +const u8 *last_non_zero_match<16>(const u8 *buf, SuperVector<16> v, u16 const UNUSED len) { + SuperVector<16>::movemask_type z = v.movemask(); + DEBUG_PRINTF("buf %p z %08x \n", buf, z); + DEBUG_PRINTF("z %08x\n", z); + if (unlikely(z)) { + u32 pos = clz32(z); + DEBUG_PRINTF("match @ pos %u\n", pos); + assert(pos >= 16 && pos < 32); + return buf + (31 - pos); + } else { + return NULL; // no match + } +} + +template <> +really_really_inline +const u8 *last_non_zero_match<32>(const u8 *buf, SuperVector<32> v, u16 const UNUSED len) { + SuperVector<32>::movemask_type z = v.movemask(); + DEBUG_PRINTF("z 0x%08x\n", z); + if (unlikely(z)) { + u32 pos = clz32(z); + assert(pos < 32); + DEBUG_PRINTF("match @ pos %u\n", pos); + return buf + (31 - pos); + } else { + return NULL; // no match + } +} +template <> +really_really_inline +const u8 *last_non_zero_match<64>(const u8 *buf, SuperVector<64>v, u16 const len) { + SuperVector<64>::movemask_type z = v.movemask(); + DEBUG_PRINTF("z 0x%016llx\n", z); + u64a mask = (~0ULL) >> (64 - len); + DEBUG_PRINTF("mask %016llx\n", mask); + z &= mask; + DEBUG_PRINTF("z 0x%016llx\n", z); + if (unlikely(z)) { + u32 pos = clz64(z); + DEBUG_PRINTF("match @ pos %u\n", pos); + assert(pos < 64); + return buf + (63 - pos); + } else { + return NULL; // no match + } +} + +template <> +really_really_inline +const u8 *first_zero_match_inverted<16>(const u8 *buf, SuperVector<16> v, u16 const UNUSED len) { SuperVector<16>::movemask_type z = v.movemask(); DEBUG_PRINTF("buf %p z %08x \n", buf, z); DEBUG_PRINTF("z %08x\n", z); @@ -46,7 +145,7 @@ const u8 *firstMatch<16>(const u8 *buf, SuperVector<16> v) { template <> really_really_inline -const u8 *firstMatch<32>(const u8 *buf, SuperVector<32> v) { +const u8 *first_zero_match_inverted<32>(const u8 *buf, SuperVector<32> v, u16 const UNUSED len) { SuperVector<32>::movemask_type z = v.movemask(); DEBUG_PRINTF("z 0x%08x\n", z); if (unlikely(z != 0xffffffff)) { @@ -60,11 +159,15 @@ const u8 *firstMatch<32>(const u8 *buf, SuperVector<32> v) { } template <> really_really_inline -const u8 *firstMatch<64>(const u8 *buf, SuperVector<64>v) { +const u8 *first_zero_match_inverted<64>(const u8 *buf, SuperVector<64>v, u16 const len) { SuperVector<64>::movemask_type z = v.movemask(); DEBUG_PRINTF("z 0x%016llx\n", z); - if (unlikely(z != ~0ULL)) { - u32 pos = ctz64(~z); + u64a mask = (~0ULL) >> (64 - len); + DEBUG_PRINTF("mask %016llx\n", mask); + z = ~z & mask; + DEBUG_PRINTF("z 0x%016llx\n", z); + if (unlikely(z)) { + u32 pos = ctz64(z); DEBUG_PRINTF("match @ pos %u\n", pos); assert(pos < 64); return buf + pos; @@ -75,7 +178,7 @@ const u8 *firstMatch<64>(const u8 *buf, SuperVector<64>v) { template <> really_really_inline -const u8 *lastMatch<16>(const u8 *buf, SuperVector<16> v) { +const u8 *last_zero_match_inverted<16>(const u8 *buf, SuperVector<16> v, uint16_t UNUSED len ) { SuperVector<16>::movemask_type z = v.movemask(); DEBUG_PRINTF("buf %p z %08x \n", buf, z); DEBUG_PRINTF("z %08x\n", z); @@ -92,10 +195,10 @@ const u8 *lastMatch<16>(const u8 *buf, SuperVector<16> v) { template<> really_really_inline -const u8 *lastMatch<32>(const u8 *buf, SuperVector<32> v) { +const u8 *last_zero_match_inverted<32>(const u8 *buf, SuperVector<32> v, uint16_t UNUSED len) { SuperVector<32>::movemask_type z = v.movemask(); if (unlikely(z != 0xffffffff)) { - u32 pos = clz32(~z); + u32 pos = clz32(~z & 0xffffffff); DEBUG_PRINTF("buf=%p, pos=%u\n", buf, pos); assert(pos < 32); return buf + (31 - pos); @@ -106,11 +209,17 @@ const u8 *lastMatch<32>(const u8 *buf, SuperVector<32> v) { template <> really_really_inline -const u8 *lastMatch<64>(const u8 *buf, SuperVector<64> v) { +const u8 *last_zero_match_inverted<64>(const u8 *buf, SuperVector<64> v, uint16_t len) { + v.print8("v"); SuperVector<64>::movemask_type z = v.movemask(); DEBUG_PRINTF("z 0x%016llx\n", z); - if (unlikely(z != ~0ULL)) { - u32 pos = clz64(~z); + u64a mask = (~0ULL) >> (64 - len); + DEBUG_PRINTF("mask %016llx\n", mask); + z = ~z & mask; + DEBUG_PRINTF("z 0x%016llx\n", z); + if (unlikely(z)) { + u32 pos = clz64(z); + DEBUG_PRINTF("~z 0x%016llx\n", ~z); DEBUG_PRINTF("match @ pos %u\n", pos); assert(pos < 64); return buf + (63 - pos); diff --git a/src/util/match.hpp b/src/util/match.hpp index e3dd2e02..003c665f 100644 --- a/src/util/match.hpp +++ b/src/util/match.hpp @@ -38,10 +38,16 @@ #include "util/supervector/supervector.hpp" template -const u8 *firstMatch(const u8 *buf, SuperVector v); +const u8 *first_non_zero_match(const u8 *buf, SuperVector v, u16 const len = S); template -const u8 *lastMatch(const u8 *buf, SuperVector v); +const u8 *last_non_zero_match(const u8 *buf, SuperVector v, u16 const len = S); + +template +const u8 *first_zero_match_inverted(const u8 *buf, SuperVector v, u16 const len = S); + +template +const u8 *last_zero_match_inverted(const u8 *buf, SuperVector v, u16 len = S); #if defined(ARCH_IA32) || defined(ARCH_X86_64) #include "util/arch/x86/match.hpp" diff --git a/src/util/supervector/arch/arm/impl.cpp b/src/util/supervector/arch/arm/impl.cpp index 34e5486d..f804abeb 100644 --- a/src/util/supervector/arch/arm/impl.cpp +++ b/src/util/supervector/arch/arm/impl.cpp @@ -45,72 +45,114 @@ really_inline SuperVector<16>::SuperVector(typename base_type::type const v) template<> template<> -really_inline SuperVector<16>::SuperVector(int8x16_t const other) +really_inline SuperVector<16>::SuperVector(int8x16_t other) { - u.v128[0] = static_cast(other); + u.s8x16[0] = other; } template<> template<> -really_inline SuperVector<16>::SuperVector(uint8x16_t const other) +really_inline SuperVector<16>::SuperVector(uint8x16_t other) { - u.v128[0] = static_cast(other); + u.u8x16[0] = other; +} + +template<> +template<> +really_inline SuperVector<16>::SuperVector(int16x8_t other) +{ + u.s16x8[0] = other; +} + +template<> +template<> +really_inline SuperVector<16>::SuperVector(uint16x8_t other) +{ + u.u16x8[0] = other; +} + +template<> +template<> +really_inline SuperVector<16>::SuperVector(int32x4_t other) +{ + u.s32x4[0] = other; +} + +template<> +template<> +really_inline SuperVector<16>::SuperVector(uint32x4_t other) +{ + u.u32x4[0] = other; +} + +template<> +template<> +really_inline SuperVector<16>::SuperVector(int64x2_t other) +{ + u.s64x2[0] = other; +} + +template<> +template<> +really_inline SuperVector<16>::SuperVector(uint64x2_t other) +{ + u.u64x2[0] = other; } template<> template<> really_inline SuperVector<16>::SuperVector(int8_t const other) { - u.v128[0] = vdupq_n_s8(other); + u.s8x16[0] = vdupq_n_s8(other); } template<> template<> really_inline SuperVector<16>::SuperVector(uint8_t const other) { - u.v128[0] = vdupq_n_u8(other); + u.u8x16[0] = vdupq_n_u8(other); } template<> template<> really_inline SuperVector<16>::SuperVector(int16_t const other) { - u.v128[0] = vdupq_n_s16(other); + u.s16x8[0] = vdupq_n_s16(other); } template<> template<> really_inline SuperVector<16>::SuperVector(uint16_t const other) { - u.v128[0] = vdupq_n_u16(other); + u.u16x8[0] = vdupq_n_u16(other); } template<> template<> really_inline SuperVector<16>::SuperVector(int32_t const other) { - u.v128[0] = vdupq_n_s32(other); + u.s32x4[0] = vdupq_n_s32(other); } template<> template<> really_inline SuperVector<16>::SuperVector(uint32_t const other) { - u.v128[0] = vdupq_n_u32(other); + u.u32x4[0] = vdupq_n_u32(other); } template<> template<> really_inline SuperVector<16>::SuperVector(int64_t const other) { - u.v128[0] = vdupq_n_s64(other); + u.s64x2[0] = vdupq_n_s64(other); } template<> template<> really_inline SuperVector<16>::SuperVector(uint64_t const other) { - u.v128[0] = vdupq_n_u64(other); + u.u64x2[0] = vdupq_n_u64(other); } // Constants @@ -137,37 +179,37 @@ really_inline void SuperVector<16>::operator=(SuperVector<16> const &other) template <> really_inline SuperVector<16> SuperVector<16>::operator&(SuperVector<16> const &b) const { - return {vandq_s8(u.v128[0], b.u.v128[0])}; + return {vandq_u8(u.u8x16[0], b.u.u8x16[0])}; } template <> really_inline SuperVector<16> SuperVector<16>::operator|(SuperVector<16> const &b) const { - return {vorrq_s8(u.v128[0], b.u.v128[0])}; + return {vorrq_u8(u.u8x16[0], b.u.u8x16[0])}; } template <> really_inline SuperVector<16> SuperVector<16>::operator^(SuperVector<16> const &b) const { - return {veorq_s8(u.v128[0], b.u.v128[0])}; + return {veorq_u8(u.u8x16[0], b.u.u8x16[0])}; } template <> really_inline SuperVector<16> SuperVector<16>::operator!() const { - return {vmvnq_s8(u.v128[0])}; + return {vmvnq_u8(u.u8x16[0])}; } template <> really_inline SuperVector<16> SuperVector<16>::opandnot(SuperVector<16> const &b) const { - return {vandq_s8(vmvnq_s8(u.v128[0]), b.u.v128[0])}; + return {vandq_u8(vmvnq_u8(u.u8x16[0]), b.u.u8x16[0])}; } template <> really_inline SuperVector<16> SuperVector<16>::operator==(SuperVector<16> const &b) const { - return {vceqq_s8((int16x8_t)u.v128[0], (int16x8_t)b.u.v128[0])}; + return {vceqq_u8(u.u8x16[0], b.u.u8x16[0])}; } template <> @@ -179,25 +221,25 @@ really_inline SuperVector<16> SuperVector<16>::operator!=(SuperVector<16> const template <> really_inline SuperVector<16> SuperVector<16>::operator>(SuperVector<16> const &b) const { - return {vcgtq_s8((int16x8_t)u.v128[0], (int16x8_t)b.u.v128[0])}; + return {vcgtq_s8(u.s8x16[0], b.u.s8x16[0])}; } template <> really_inline SuperVector<16> SuperVector<16>::operator>=(SuperVector<16> const &b) const { - return {vcgeq_s8((int16x8_t)u.v128[0], (int16x8_t)b.u.v128[0])}; + return {vcgeq_u8(u.u8x16[0], b.u.u8x16[0])}; } template <> really_inline SuperVector<16> SuperVector<16>::operator<(SuperVector<16> const &b) const { - return {vcltq_s8((int16x8_t)u.v128[0], (int16x8_t)b.u.v128[0])}; + return {vcltq_s8(u.s8x16[0], b.u.s8x16[0])}; } template <> really_inline SuperVector<16> SuperVector<16>::operator<=(SuperVector<16> const &b) const { - return {vcgeq_s8((int16x8_t)u.v128[0], (int16x8_t)b.u.v128[0])}; + return {vcgeq_s8(u.s8x16[0], b.u.s8x16[0])}; } template <> @@ -212,9 +254,9 @@ really_inline typename SuperVector<16>::movemask_type SuperVector<16>::movemask( SuperVector powers{0x8040201008040201UL}; // Compute the mask from the input - uint64x2_t mask = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(vandq_u8((uint16x8_t)u.v128[0], powers.u.v128[0])))); - uint64x2_t mask1 = (m128)vextq_s8(mask, vdupq_n_u8(0), 7); - mask = vorrq_u8(mask, mask1); + uint8x16_t mask = (uint8x16_t) vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(vandq_u8(u.u8x16[0], powers.u.u8x16[0])))); + uint64x2_t mask1 = (uint64x2_t) vextq_u8(mask, vdupq_n_u8(0), 7); + mask = vorrq_u8(mask, (uint8x16_t) mask1); // Get the resulting bytes uint16_t output; @@ -232,35 +274,35 @@ template <> template really_inline SuperVector<16> SuperVector<16>::vshl_8_imm() const { - return {(m128)vshlq_n_s8(u.v128[0], N)}; + return {vshlq_n_u8(u.u8x16[0], N)}; } template <> template really_inline SuperVector<16> SuperVector<16>::vshl_16_imm() const { - return {(m128)vshlq_n_s16(u.v128[0], N)}; + return {vshlq_n_u16(u.u16x8[0], N)}; } template <> template really_inline SuperVector<16> SuperVector<16>::vshl_32_imm() const { - return {(m128)vshlq_n_s32(u.v128[0], N)}; + return {vshlq_n_u32(u.u32x4[0], N)}; } template <> template really_inline SuperVector<16> SuperVector<16>::vshl_64_imm() const { - return {(m128)vshlq_n_s64(u.v128[0], N)}; + return {vshlq_n_u64(u.u64x2[0], N)}; } template <> template really_inline SuperVector<16> SuperVector<16>::vshl_128_imm() const { - return {vextq_s8(vdupq_n_u8(0), (int16x8_t)u.v128[0], 16 - N)}; + return {vextq_u8(vdupq_n_u8(0), u.u8x16[0], 16 - N)}; } template <> @@ -274,35 +316,35 @@ template <> template really_inline SuperVector<16> SuperVector<16>::vshr_8_imm() const { - return {(m128)vshrq_n_s8(u.v128[0], N)}; + return {vshrq_n_u8(u.u8x16[0], N)}; } template <> template really_inline SuperVector<16> SuperVector<16>::vshr_16_imm() const { - return {(m128)vshrq_n_s16(u.v128[0], N)}; + return {vshrq_n_u16(u.u16x8[0], N)}; } template <> template really_inline SuperVector<16> SuperVector<16>::vshr_32_imm() const { - return {(m128)vshrq_n_s32(u.v128[0], N)}; + return {vshrq_n_u32(u.u32x4[0], N)}; } template <> template really_inline SuperVector<16> SuperVector<16>::vshr_64_imm() const { - return {(m128)vshrq_n_s64(u.v128[0], N)}; + return {vshrq_n_u64(u.u64x2[0], N)}; } template <> template really_inline SuperVector<16> SuperVector<16>::vshr_128_imm() const { - return {vextq_s8((int16x8_t)u.v128[0], vdupq_n_u8(0), N)}; + return {vextq_u8(u.u8x16[0], vdupq_n_u8(0), N)}; } template <> @@ -334,7 +376,7 @@ really_inline SuperVector<16> SuperVector<16>::vshl_8 (uint8_t const N) const if (N == 0) return *this; if (N == 16) return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128)vshlq_n_s8(u.v128[0], n)}; }); + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {vshlq_n_u8(u.u8x16[0], n)}; }); return result; } @@ -344,7 +386,7 @@ really_inline SuperVector<16> SuperVector<16>::vshl_16 (uint8_t const N) const if (N == 0) return *this; if (N == 16) return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128)vshlq_n_s16(u.v128[0], n)}; }); + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {vshlq_n_u16(u.u16x8[0], n)}; }); return result; } @@ -354,7 +396,7 @@ really_inline SuperVector<16> SuperVector<16>::vshl_32 (uint8_t const N) const if (N == 0) return *this; if (N == 16) return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128)vshlq_n_s32(u.v128[0], n)}; }); + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {vshlq_n_u32(u.u32x4[0], n)}; }); return result; } @@ -364,7 +406,7 @@ really_inline SuperVector<16> SuperVector<16>::vshl_64 (uint8_t const N) const if (N == 0) return *this; if (N == 16) return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128)vshlq_n_s64(u.v128[0], n)}; }); + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {vshlq_n_u64(u.u64x2[0], n)}; }); return result; } @@ -374,7 +416,7 @@ really_inline SuperVector<16> SuperVector<16>::vshl_128(uint8_t const N) const if (N == 0) return *this; if (N == 16) return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {vextq_s8(vdupq_n_u8(0), (int16x8_t)u.v128[0], 16 - n)}; }); + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {vextq_u8(vdupq_n_u8(0), u.u8x16[0], 16 - n)}; }); return result; } @@ -390,7 +432,7 @@ really_inline SuperVector<16> SuperVector<16>::vshr_8 (uint8_t const N) const if (N == 0) return *this; if (N == 16) return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128)vshrq_n_s8(u.v128[0], n)}; }); + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {vshrq_n_u8(u.u8x16[0], n)}; }); return result; } @@ -400,7 +442,7 @@ really_inline SuperVector<16> SuperVector<16>::vshr_16 (uint8_t const N) const if (N == 0) return *this; if (N == 16) return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128)vshrq_n_s16(u.v128[0], n)}; }); + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {vshrq_n_u16(u.u16x8[0], n)}; }); return result; } @@ -410,7 +452,7 @@ really_inline SuperVector<16> SuperVector<16>::vshr_32 (uint8_t const N) const if (N == 0) return *this; if (N == 16) return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128)vshrq_n_s32(u.v128[0], n)}; }); + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {vshrq_n_u32(u.u32x4[0], n)}; }); return result; } @@ -420,7 +462,7 @@ really_inline SuperVector<16> SuperVector<16>::vshr_64 (uint8_t const N) const if (N == 0) return *this; if (N == 16) return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128)vshrq_n_s64(u.v128[0], n)}; }); + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {vshrq_n_u64(u.u64x2[0], n)}; }); return result; } @@ -430,7 +472,7 @@ really_inline SuperVector<16> SuperVector<16>::vshr_128(uint8_t const N) const if (N == 0) return *this; if (N == 16) return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {vextq_s8((int16x8_t)u.v128[0], vdupq_n_u8(0), n)}; }); + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {vextq_u8(u.u8x16[0], vdupq_n_u8(0), n)}; }); return result; } @@ -444,7 +486,7 @@ really_inline SuperVector<16> SuperVector<16>::vshr(uint8_t const N) const template <> really_inline SuperVector<16> SuperVector<16>::operator>>(uint8_t const N) const { - return {vextq_s8((int16x8_t)u.v128[0], vdupq_n_u8(0), N)}; + return {vextq_u8(u.u8x16[0], vdupq_n_u8(0), N)}; } #else template <> @@ -458,7 +500,7 @@ really_inline SuperVector<16> SuperVector<16>::operator>>(uint8_t const N) const template <> really_inline SuperVector<16> SuperVector<16>::operator<<(uint8_t const N) const { - return {vextq_s8(vdupq_n_u8(0), (int16x8_t)u.v128[0], 16 - N)}; + return {vextq_u8(vdupq_n_u8(0), u.u8x16[0], 16 - N)}; } #else template <> @@ -512,7 +554,7 @@ really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, in if (offset == 16) { return *this; } else { - return {vextq_s8((int16x8_t)other.u.v128[0], (int16x8_t)u.v128[0], offset)}; + return {vextq_u8(other.u.u8x16[0], u.u8x16[0], offset)}; } } #else @@ -521,21 +563,21 @@ really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, in { switch(offset) { case 0: return other; break; - case 1: return {vextq_s8((int16x8_t) other.u.v128[0], (int16x8_t) u.v128[0], 1)}; break; - case 2: return {vextq_s8((int16x8_t) other.u.v128[0], (int16x8_t) u.v128[0], 2)}; break; - case 3: return {vextq_s8((int16x8_t) other.u.v128[0], (int16x8_t) u.v128[0], 3)}; break; - case 4: return {vextq_s8((int16x8_t) other.u.v128[0], (int16x8_t) u.v128[0], 4)}; break; - case 5: return {vextq_s8((int16x8_t) other.u.v128[0], (int16x8_t) u.v128[0], 5)}; break; - case 6: return {vextq_s8((int16x8_t) other.u.v128[0], (int16x8_t) u.v128[0], 6)}; break; - case 7: return {vextq_s8((int16x8_t) other.u.v128[0], (int16x8_t) u.v128[0], 7)}; break; - case 8: return {vextq_s8((int16x8_t) other.u.v128[0], (int16x8_t) u.v128[0], 8)}; break; - case 9: return {vextq_s8((int16x8_t) other.u.v128[0], (int16x8_t) u.v128[0], 9)}; break; - case 10: return {vextq_s8((int16x8_t) other.u.v128[0], (int16x8_t) u.v128[0], 10)}; break; - case 11: return {vextq_s8((int16x8_t) other.u.v128[0], (int16x8_t) u.v128[0], 11)}; break; - case 12: return {vextq_s8((int16x8_t) other.u.v128[0], (int16x8_t) u.v128[0], 12)}; break; - case 13: return {vextq_s8((int16x8_t) other.u.v128[0], (int16x8_t) u.v128[0], 13)}; break; - case 14: return {vextq_s8((int16x8_t) other.u.v128[0], (int16x8_t) u.v128[0], 14)}; break; - case 15: return {vextq_s8((int16x8_t) other.u.v128[0], (int16x8_t) u.v128[0], 15)}; break; + case 1: return {vextq_u8( other.u.u8x16[0], u.u8x16[0], 1)}; break; + case 2: return {vextq_u8( other.u.u8x16[0], u.u8x16[0], 2)}; break; + case 3: return {vextq_u8( other.u.u8x16[0], u.u8x16[0], 3)}; break; + case 4: return {vextq_u8( other.u.u8x16[0], u.u8x16[0], 4)}; break; + case 5: return {vextq_u8( other.u.u8x16[0], u.u8x16[0], 5)}; break; + case 6: return {vextq_u8( other.u.u8x16[0], u.u8x16[0], 6)}; break; + case 7: return {vextq_u8( other.u.u8x16[0], u.u8x16[0], 7)}; break; + case 8: return {vextq_u8( other.u.u8x16[0], u.u8x16[0], 8)}; break; + case 9: return {vextq_u8( other.u.u8x16[0], u.u8x16[0], 9)}; break; + case 10: return {vextq_u8( other.u.u8x16[0], u.u8x16[0], 10)}; break; + case 11: return {vextq_u8( other.u.u8x16[0], u.u8x16[0], 11)}; break; + case 12: return {vextq_u8( other.u.u8x16[0], u.u8x16[0], 12)}; break; + case 13: return {vextq_u8( other.u.u8x16[0], u.u8x16[0], 13)}; break; + case 14: return {vextq_u8( other.u.u8x16[0], u.u8x16[0], 14)}; break; + case 15: return {vextq_u8( other.u.u8x16[0], u.u8x16[0], 15)}; break; case 16: return *this; break; default: break; } @@ -547,7 +589,7 @@ template<> template<> really_inline SuperVector<16> SuperVector<16>::pshufb(SuperVector<16> b) { - return {vqtbl1q_s8((int8x16_t)u.v128[0], (uint8x16_t)b.u.v128[0])}; + return {vqtbl1q_u8(u.u8x16[0], b.u.u8x16[0])}; } template<> @@ -565,7 +607,7 @@ template<> really_inline SuperVector<16> SuperVector<16>::pshufb_maskz(SuperVector<16> b, uint8_t const len) { SuperVector mask = Ones_vshr(16 -len); - return mask & pshufb(b); + return mask & pshufb(b); } #endif // SIMD_IMPL_HPP diff --git a/src/util/supervector/casemask.hpp b/src/util/supervector/casemask.hpp new file mode 100644 index 00000000..10fa5f1a --- /dev/null +++ b/src/util/supervector/casemask.hpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2017, Intel Corporation + * Copyright (c) 2020-2021, VectorCamp PC + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CASEMASK_HPP +#define CASEMASK_HPP + +#include "util/supervector/supervector.hpp" + +static u8 CASEMASK[] = { 0xff, 0xdf }; + +static really_inline +u8 caseClear8(u8 x, bool noCase) +{ + return static_cast(x & CASEMASK[(u8)noCase]); +} + +template +static really_inline SuperVector getMask(u8 c, bool noCase) { + u8 k = caseClear8(c, noCase); + return SuperVector(k); +} + +template +static really_inline SuperVector getCaseMask(void) { + return SuperVector(CASEMASK[1]); +} + +#endif // CASEMASK_HPP \ No newline at end of file diff --git a/src/util/supervector/supervector.hpp b/src/util/supervector/supervector.hpp index 4cd10144..ed9d266a 100644 --- a/src/util/supervector/supervector.hpp +++ b/src/util/supervector/supervector.hpp @@ -164,6 +164,18 @@ public: typename BaseVector<16>::type ALIGN_ATTR(BaseVector<16>::size) v128[SIZE / BaseVector<16>::size]; typename BaseVector<32>::type ALIGN_ATTR(BaseVector<32>::size) v256[SIZE / BaseVector<32>::size]; typename BaseVector<64>::type ALIGN_ATTR(BaseVector<64>::size) v512[SIZE / BaseVector<64>::size]; + +#if defined(ARCH_ARM32) || defined(ARCH_AARCH64) + uint64x2_t ALIGN_ATTR(BaseVector<16>::size) u64x2[SIZE / BaseVector<16>::size]; + int64x2_t ALIGN_ATTR(BaseVector<16>::size) s64x2[SIZE / BaseVector<16>::size]; + uint32x4_t ALIGN_ATTR(BaseVector<16>::size) u32x4[SIZE / BaseVector<16>::size]; + int32x4_t ALIGN_ATTR(BaseVector<16>::size) s32x4[SIZE / BaseVector<16>::size]; + uint16x8_t ALIGN_ATTR(BaseVector<16>::size) u16x8[SIZE / BaseVector<16>::size]; + int16x8_t ALIGN_ATTR(BaseVector<16>::size) s16x8[SIZE / BaseVector<16>::size]; + uint8x16_t ALIGN_ATTR(BaseVector<16>::size) u8x16[SIZE / BaseVector<16>::size]; + int8x16_t ALIGN_ATTR(BaseVector<16>::size) s8x16[SIZE / BaseVector<16>::size]; +#endif + uint64_t u64[SIZE / sizeof(uint64_t)]; int64_t s64[SIZE / sizeof(int64_t)]; uint32_t u32[SIZE / sizeof(uint32_t)]; @@ -182,7 +194,7 @@ public: SuperVector(typename base_type::type const v); template - SuperVector(T const other); + SuperVector(T other); SuperVector(SuperVector const lo, SuperVector const hi); SuperVector(previous_type const lo, previous_type const hi); diff --git a/unit/internal/rvermicelli.cpp b/unit/internal/rvermicelli.cpp index d89067d0..5cd52e4d 100644 --- a/unit/internal/rvermicelli.cpp +++ b/unit/internal/rvermicelli.cpp @@ -30,7 +30,7 @@ #include "config.h" #include "gtest/gtest.h" -#include "nfa/vermicelli.h" +#include "nfa/vermicelli.hpp" #define BOUND (~(VERM_BOUNDARY - 1)) @@ -563,4 +563,4 @@ TEST(RNVermicelli16, Exec5) { } } -#endif // HAVE_SVE2 \ No newline at end of file +#endif // HAVE_SVE2 diff --git a/unit/internal/simd_utils.cpp b/unit/internal/simd_utils.cpp index b1b9bfb1..23640034 100644 --- a/unit/internal/simd_utils.cpp +++ b/unit/internal/simd_utils.cpp @@ -671,6 +671,7 @@ TEST(SimdUtilsTest, movq) { #elif defined(ARCH_PPC64EL) int64x2_t a = {0x123456789abcdefLL, ~0LL }; simd = (m128) a; + simd = vreinterpretq_s32_s64(a); #endif #endif r = movq(simd); diff --git a/unit/internal/vermicelli.cpp b/unit/internal/vermicelli.cpp index dc458cb9..e6d976ad 100644 --- a/unit/internal/vermicelli.cpp +++ b/unit/internal/vermicelli.cpp @@ -30,7 +30,7 @@ #include "config.h" #include "gtest/gtest.h" -#include "nfa/vermicelli.h" +#include "nfa/vermicelli.hpp" TEST(Vermicelli, ExecNoMatch1) { char t1[] = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"; @@ -1150,4 +1150,4 @@ TEST(DoubleVermicelliMasked16, Exec5) { } } -#endif // HAVE_SVE2 \ No newline at end of file +#endif // HAVE_SVE2