diff --git a/CMakeLists.txt b/CMakeLists.txt index 0875b105..e3b5a2ee 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -146,7 +146,7 @@ endif () string(REGEX REPLACE "-O[^ ]*" "" CMAKE_CXX_FLAGS_${CONFIG} "${CMAKE_CXX_FLAGS_${CONFIG}}") endforeach () - if (CMAKE_COMPILER_IS_GNUCC AND NOT CROSS_COMPILE_AARCH64) + if (CMAKE_COMPILER_IS_GNUCC AND NOT CROSS_COMPILE_AARCH64 AND NOT ARCH_PPC64EL) message(STATUS "gcc version ${CMAKE_C_COMPILER_VERSION}") # If gcc doesn't recognise the host cpu, then mtune=native becomes # generic, which isn't very good in some cases. march=native looks at @@ -227,12 +227,24 @@ endif () set(EXTRA_CXX_FLAGS "${EXTRA_CXX_FLAGS} -DNDEBUG") endif() - if (NOT CMAKE_C_FLAGS MATCHES .*march.* AND NOT CMAKE_C_FLAGS MATCHES .*mtune.*) - set(ARCH_C_FLAGS "-march=${GNUCC_ARCH} -mtune=${TUNE_FLAG}") + + if (ARCH_IA32 OR ARCH_X86_64 OR ARCH_ARM32 OR ARCH_AARCH64) + if (NOT CMAKE_C_FLAGS MATCHES .*march.* AND NOT CMAKE_C_FLAGS MATCHES .*mtune.*) + set(ARCH_C_FLAGS "-march=${GNUCC_ARCH} -mtune=${TUNE_FLAG}") + endif() + + if (NOT CMAKE_CXX_FLAGS MATCHES .*march.* AND NOT CMAKE_CXX_FLAGS MATCHES .*mtune.*) + set(ARCH_CXX_FLAGS "-march=${GNUCC_ARCH} -mtune=${TUNE_FLAG}") + endif() endif() - - if (NOT CMAKE_CXX_FLAGS MATCHES .*march.* AND NOT CMAKE_CXX_FLAGS MATCHES .*mtune.*) - set(ARCH_CXX_FLAGS "-march=${GNUCC_ARCH} -mtune=${TUNE_FLAG}") + + if(ARCH_PPC64EL) + if (NOT CMAKE_C_FLAGS MATCHES .*march.* AND NOT CMAKE_C_FLAGS MATCHES .*mtune.*) + set(ARCH_C_FLAGS "-mtune=${TUNE_FLAG}") + endif() + if (NOT CMAKE_CXX_FLAGS MATCHES .*march.* AND NOT CMAKE_CXX_FLAGS MATCHES .*mtune.*) + set(ARCH_CXX_FLAGS "-mtune=${TUNE_FLAG}") + endif() endif() if(CMAKE_COMPILER_IS_GNUCC) @@ -277,6 +289,10 @@ elseif (ARCH_ARM32 OR ARCH_AARCH64) message(FATAL_ERROR "arm_sve.h is required to build for SVE.") endif() endif() + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -flax-vector-conversions") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -flax-vector-conversions") +elseif (ARCH_PPC64EL) + CHECK_INCLUDE_FILE_CXX(altivec.h HAVE_C_PPC64EL_ALTIVEC_H) endif() CHECK_FUNCTION_EXISTS(posix_memalign HAVE_POSIX_MEMALIGN) @@ -520,7 +536,7 @@ set (hs_exec_common_SRCS ${hs_exec_common_SRCS} src/util/arch/x86/cpuid_flags.c ) -elseif (ARCH_ARM32 OR ARCH_AARCH64) +elseif (ARCH_ARM32 OR ARCH_AARCH64 OR ARCH_PPC64EL) set (hs_exec_common_SRCS ${hs_exec_common_SRCS} src/util/arch/arm/cpuid_flags.c @@ -679,6 +695,10 @@ elseif (ARCH_ARM32 OR ARCH_AARCH64) set (hs_exec_SRCS ${hs_exec_SRCS} src/util/supervector/arch/arm/impl.cpp) +elseif (ARCH_PPC64EL) +set (hs_exec_SRCS + ${hs_exec_SRCS} + src/util/supervector/arch/ppc64el/impl.cpp) endif () endif() diff --git a/cmake/arch.cmake b/cmake/arch.cmake index 073f26c5..2100799f 100644 --- a/cmake/arch.cmake +++ b/cmake/arch.cmake @@ -9,6 +9,9 @@ elseif (HAVE_C_INTRIN_H) elseif (HAVE_C_ARM_NEON_H) set (INTRIN_INC_H "arm_neon.h") set (FAT_RUNTIME OFF) +elseif (HAVE_C_PPC64EL_ALTIVEC_H) + set (INTRIN_INC_H "altivec.h") + set (FAT_RUNTIME OFF) else() message (FATAL_ERROR "No intrinsics header found") endif () @@ -136,7 +139,20 @@ int main(){ (void)_mm512_permutexvar_epi8(idx, a); }" HAVE_AVX512VBMI) -elseif (!ARCH_ARM32 AND !ARCH_AARCH64) + +elseif (ARCH_ARM32 OR ARCH_AARCH64) + CHECK_C_SOURCE_COMPILES("#include <${INTRIN_INC_H}> +int main() { + int32x4_t a = vdupq_n_s32(1); + (void)a; +}" HAVE_NEON) +elseif (ARCH_PPC64EL) + CHECK_C_SOURCE_COMPILES("#include <${INTRIN_INC_H}> +int main() { + vector int a = vec_splat_s32(1); + (void)a; +}" HAVE_VSX) +else () message (FATAL_ERROR "Unsupported architecture") endif () @@ -169,6 +185,10 @@ else (NOT FAT_RUNTIME) if ((ARCH_ARM32 OR ARCH_AARCH64) AND NOT HAVE_NEON) message(FATAL_ERROR "NEON support required for ARM support") endif () + if (ARCH_PPPC64EL AND NOT HAVE_VSX) + message(FATAL_ERROR "VSX support required for Power support") + endif () + endif () unset (PREV_FLAGS) diff --git a/cmake/config.h.in b/cmake/config.h.in index 0afd6998..dbd72445 100644 --- a/cmake/config.h.in +++ b/cmake/config.h.in @@ -21,6 +21,9 @@ /* "Define if building for AARCH64" */ #cmakedefine ARCH_AARCH64 +/* "Define if building for PPC64EL" */ +#cmakedefine ARCH_PPC64EL + /* "Define if cross compiling for AARCH64" */ #cmakedefine CROSS_COMPILE_AARCH64 @@ -75,6 +78,9 @@ /* C compiler has arm_sve.h */ #cmakedefine HAVE_C_ARM_SVE_H +/* C compiler has arm_neon.h */ +#cmakedefine HAVE_C_PPC64EL_ALTIVEC_H + /* Define to 1 if you have the declaration of `pthread_setaffinity_np', and to 0 if you don't. */ #cmakedefine HAVE_DECL_PTHREAD_SETAFFINITY_NP diff --git a/cmake/platform.cmake b/cmake/platform.cmake index 295775df..2cdc3a6e 100644 --- a/cmake/platform.cmake +++ b/cmake/platform.cmake @@ -7,15 +7,13 @@ if (CROSS_COMPILE_AARCH64) else() # really only interested in the preprocessor here CHECK_C_SOURCE_COMPILES("#if !(defined(__x86_64__) || defined(_M_X64))\n#error not 64bit\n#endif\nint main(void) { return 0; }" ARCH_X86_64) - CHECK_C_SOURCE_COMPILES("#if !(defined(__i386__) || defined(_M_IX86))\n#error not 32bit\n#endif\nint main(void) { return 0; }" ARCH_IA32) - CHECK_C_SOURCE_COMPILES("#if !defined(__ARM_ARCH_ISA_A64)\n#error not 64bit\n#endif\nint main(void) { return 0; }" ARCH_AARCH64) CHECK_C_SOURCE_COMPILES("#if !defined(__ARM_ARCH_ISA_ARM)\n#error not 32bit\n#endif\nint main(void) { return 0; }" ARCH_ARM32) - - if (ARCH_X86_64 OR ARCH_AARCH64) + CHECK_C_SOURCE_COMPILES("#if !defined(__PPC64__) && !defined(__LITTLE_ENDIAN__) && !defined(__VSX__)\n#error not ppc64el\n#endif\nint main(void) { return 0; }" ARCH_PPC64EL) + if (ARCH_X86_64 OR ARCH_AARCH64 OR ARCH_PPC64EL) set(ARCH_64_BIT TRUE) else() set(ARCH_32_BIT TRUE) endif() -endif() \ No newline at end of file +endif() diff --git a/src/fdr/teddy.c b/src/fdr/teddy.c index 3e46a0d6..65db3dff 100644 --- a/src/fdr/teddy.c +++ b/src/fdr/teddy.c @@ -893,10 +893,10 @@ do { \ #define CONFIRM_TEDDY(var, bucket, offset, reason, conf_fn) \ do { \ if (unlikely(diff128(var, ones128()))) { \ - u64a __attribute__((aligned(16))) vector[2]; \ - store128(vector, var); \ - u64a lo = vector[0]; \ - u64a hi = vector[1]; \ + u64a __attribute__((aligned(16))) vec[2]; \ + store128(vec, var); \ + u64a lo = vec[0]; \ + u64a hi = vec[1]; \ CONF_CHUNK_64(lo, bucket, offset, reason, conf_fn); \ CONF_CHUNK_64(hi, bucket, offset + 8, reason, conf_fn); \ } \ diff --git a/src/hs_valid_platform.c b/src/hs_valid_platform.c index 8323f343..809deee1 100644 --- a/src/hs_valid_platform.c +++ b/src/hs_valid_platform.c @@ -44,5 +44,7 @@ hs_error_t HS_CDECL hs_valid_platform(void) { } #elif defined(ARCH_ARM32) || defined(ARCH_AARCH64) return HS_SUCCESS; +#elif defined(ARCH_PPC64EL) + return HS_SUCCESS; #endif } diff --git a/src/nfa/ppc64el/shufti.hpp b/src/nfa/ppc64el/shufti.hpp new file mode 100644 index 00000000..dedeb52d --- /dev/null +++ b/src/nfa/ppc64el/shufti.hpp @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2015-2017, Intel Corporation + * Copyright (c) 2020-2021, VectorCamp PC + * Copyright (c) 2021, Arm Limited + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * \brief Shufti: character class acceleration. + * + */ + +template +static really_inline +const SuperVector blockSingleMask(SuperVector mask_lo, SuperVector mask_hi, SuperVector chars) { + const SuperVector low4bits = SuperVector::dup_u8(0xf); + + SuperVector c_lo = chars & low4bits; + SuperVector c_hi = chars.template vshr_8_imm<4>(); + c_lo = mask_lo.template pshufb(c_lo); + c_hi = mask_hi.template pshufb(c_hi); + + return (c_lo & c_hi).eq(SuperVector::Zeroes()); +} + +template +static really_inline +SuperVector blockDoubleMask(SuperVector mask1_lo, SuperVector mask1_hi, SuperVector mask2_lo, SuperVector mask2_hi, SuperVector chars) { + + const SuperVector low4bits = SuperVector::dup_u8(0xf); + SuperVector chars_lo = chars & low4bits; + chars_lo.print8("chars_lo"); + SuperVector chars_hi = chars.template vshr_64_imm<4>() & low4bits; + chars_hi.print8("chars_hi"); + SuperVector c1_lo = mask1_lo.template pshufb(chars_lo); + c1_lo.print8("c1_lo"); + SuperVector c1_hi = mask1_hi.template pshufb(chars_hi); + c1_hi.print8("c1_hi"); + SuperVector t1 = c1_lo | c1_hi; + t1.print8("t1"); + + SuperVector c2_lo = mask2_lo.template pshufb(chars_lo); + c2_lo.print8("c2_lo"); + SuperVector c2_hi = mask2_hi.template pshufb(chars_hi); + c2_hi.print8("c2_hi"); + SuperVector t2 = c2_lo | c2_hi; + t2.print8("t2"); + t2.template vshr_128_imm<1>().print8("t2.vshr_128(1)"); + SuperVector t = t1 | (t2.template vshr_128_imm<1>()); + t.print8("t"); + + return t.eq(SuperVector::Ones()); +} diff --git a/src/nfa/ppc64el/truffle.hpp b/src/nfa/ppc64el/truffle.hpp new file mode 100644 index 00000000..7dc711f4 --- /dev/null +++ b/src/nfa/ppc64el/truffle.hpp @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2015-2017, Intel Corporation + * Copyright (c) 2020-2021, VectorCamp PC + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * \brief Truffle: character class acceleration. + * + */ + +template +static really_inline +const SuperVector blockSingleMask(SuperVector shuf_mask_lo_highclear, SuperVector shuf_mask_lo_highset, SuperVector chars) { + + chars.print8("chars"); + shuf_mask_lo_highclear.print8("shuf_mask_lo_highclear"); + shuf_mask_lo_highset.print8("shuf_mask_lo_highset"); + + SuperVector highconst = SuperVector::dup_u8(0x80); + highconst.print8("highconst"); + SuperVector shuf_mask_hi = SuperVector::dup_u64(0x8040201008040201); + shuf_mask_hi.print8("shuf_mask_hi"); + + SuperVector shuf1 = shuf_mask_lo_highclear.pshufb(chars); + shuf1.print8("shuf1"); + SuperVector t1 = chars ^ highconst; + t1.print8("t1"); + SuperVector shuf2 = shuf_mask_lo_highset.pshufb(t1); + shuf2.print8("shuf2"); + SuperVector t2 = highconst.opandnot(chars.template vshr_64_imm<4>()); + t2.print8("t2"); + SuperVector shuf3 = shuf_mask_hi.pshufb(t2); + shuf3.print8("shuf3"); + SuperVector res = (shuf1 | shuf2) & shuf3; + res.print8("(shuf1 | shuf2) & shuf3"); + + return res.eq(SuperVector::Zeroes()); +} diff --git a/src/nfa/ppc64el/vermicelli.hpp b/src/nfa/ppc64el/vermicelli.hpp new file mode 100644 index 00000000..eeaad6a1 --- /dev/null +++ b/src/nfa/ppc64el/vermicelli.hpp @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2015-2020, Intel Corporation + * Copyright (c) 2020-2021, VectorCamp PC + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * \brief Vermicelli: single-byte and double-byte acceleration. + */ + +template +static really_inline +const u8 *vermicelliBlock(SuperVector const data, SuperVector const chars, SuperVector const casemask, u8 const *buf, u16 const len) { + + SuperVector mask = chars.eq(casemask & data); + return first_non_zero_match(buf, mask, len); +} + +template +static really_inline +const u8 *vermicelliBlockNeg(SuperVector const data, SuperVector const chars, SuperVector const casemask, u8 const *buf, u16 const len) { + + SuperVector mask = chars.eq(casemask & data); + return first_zero_match_inverted(buf, mask, len); +} + +template +static really_inline +const u8 *rvermicelliBlock(SuperVector const data, SuperVector const chars, SuperVector const casemask, u8 const *buf, u16 const len) { + + SuperVector mask = chars.eq(casemask & data); + return last_non_zero_match(buf, mask, len); +} + +template +static really_inline +const u8 *rvermicelliBlockNeg(SuperVector const data, SuperVector const chars, SuperVector const casemask, const u8 *buf, u16 const len) { + + data.print8("data"); + chars.print8("chars"); + casemask.print8("casemask"); + SuperVector mask = chars.eq(casemask & data); + mask.print8("mask"); + return last_zero_match_inverted(buf, mask, len); +} + +template +static really_inline +const u8 *vermicelliDoubleBlock(SuperVector const data, SuperVector const chars1, SuperVector const chars2, SuperVector const casemask, + u8 const c1, u8 const c2, u8 const casechar, u8 const *buf, u16 const len) { + + SuperVector v = casemask & data; + SuperVector mask1 = chars1.eq(v); + SuperVector mask2 = chars2.eq(v); + SuperVector mask = mask1 & (mask2 >> 1); + + DEBUG_PRINTF("rv[0] = %02hhx, rv[-1] = %02hhx\n", buf[0], buf[-1]); + bool partial_match = (((buf[0] & casechar) == c2) && ((buf[-1] & casechar) == c1)); + DEBUG_PRINTF("partial = %d\n", partial_match); + if (partial_match) return buf - 1; + + return first_non_zero_match(buf, mask, len); +} + +template +static really_inline +const u8 *rvermicelliDoubleBlock(SuperVector const data, SuperVector const chars1, SuperVector const chars2, SuperVector const casemask, + u8 const c1, u8 const c2, u8 const casechar, u8 const *buf, u16 const len) { + + SuperVector v = casemask & data; + SuperVector mask1 = chars1.eq(v); + SuperVector mask2 = chars2.eq(v); + SuperVector mask = (mask1 << 1)& mask2; + + DEBUG_PRINTF("buf[0] = %02hhx, buf[-1] = %02hhx\n", buf[0], buf[-1]); + bool partial_match = (((buf[0] & casechar) == c2) && ((buf[-1] & casechar) == c1)); + DEBUG_PRINTF("partial = %d\n", partial_match); + if (partial_match) { + mask = mask | (SuperVector::Ones() >> (S-1)); + } + + return last_non_zero_match(buf, mask, len); +} + +template +static really_inline +const u8 *vermicelliDoubleMaskedBlock(SuperVector const data, SuperVector const chars1, SuperVector const chars2, + SuperVector const mask1, SuperVector const mask2, + u8 const c1, u8 const c2, u8 const m1, u8 const m2, u8 const *buf, u16 const len) { + + SuperVector v1 = chars1.eq(data & mask1); + SuperVector v2 = chars2.eq(data & mask2); + SuperVector mask = v1 & (v2 >> 1); + + DEBUG_PRINTF("rv[0] = %02hhx, rv[-1] = %02hhx\n", buf[0], buf[-1]); + bool partial_match = (((buf[0] & m1) == c2) && ((buf[-1] & m2) == c1)); + DEBUG_PRINTF("partial = %d\n", partial_match); + if (partial_match) return buf - 1; + + return first_non_zero_match(buf, mask, len); +} + + diff --git a/src/nfa/shufti_simd.hpp b/src/nfa/shufti_simd.hpp index 09850c00..887f2468 100644 --- a/src/nfa/shufti_simd.hpp +++ b/src/nfa/shufti_simd.hpp @@ -56,6 +56,8 @@ SuperVector blockDoubleMask(SuperVector mask1_lo, SuperVector mask1_hi, #include "x86/shufti.hpp" #elif defined(ARCH_ARM32) || defined(ARCH_AARCH64) #include "arm/shufti.hpp" +#elif defined(ARCH_PPC64EL) +#include "ppc64el/shufti.hpp" #endif template diff --git a/src/nfa/truffle_simd.hpp b/src/nfa/truffle_simd.hpp index 13a5e787..8d9911fd 100644 --- a/src/nfa/truffle_simd.hpp +++ b/src/nfa/truffle_simd.hpp @@ -49,13 +49,14 @@ const SuperVector blockSingleMask(SuperVector shuf_mask_lo_highclear, Supe #include "x86/truffle.hpp" #elif defined(ARCH_ARM32) || defined(ARCH_AARCH64) #include "arm/truffle.hpp" +#elif defined(ARCH_PPC64EL) +#include "ppc64el/truffle.hpp" #endif template static really_inline const u8 *fwdBlock(SuperVector shuf_mask_lo_highclear, SuperVector shuf_mask_lo_highset, SuperVector chars, const u8 *buf) { SuperVector res = blockSingleMask(shuf_mask_lo_highclear, shuf_mask_lo_highset, chars); - return first_zero_match_inverted(buf, res); } diff --git a/src/nfa/vermicelli_simd.cpp b/src/nfa/vermicelli_simd.cpp index dbce6dc4..d790d137 100644 --- a/src/nfa/vermicelli_simd.cpp +++ b/src/nfa/vermicelli_simd.cpp @@ -75,6 +75,8 @@ const u8 *vermicelliDoubleMaskedBlock(SuperVector const data, SuperVector #include "x86/vermicelli.hpp" #elif defined(ARCH_ARM32) || defined(ARCH_AARCH64) #include "arm/vermicelli.hpp" +#elif defined(ARCH_PPC64EL) +#include "ppc64el/vermicelli.hpp" #endif template diff --git a/src/util/arch.h b/src/util/arch.h index 794f28f7..1e8d2fbd 100644 --- a/src/util/arch.h +++ b/src/util/arch.h @@ -39,6 +39,8 @@ #include "util/arch/x86/x86.h" #elif defined(ARCH_ARM32) || defined(ARCH_AARCH64) #include "util/arch/arm/arm.h" +#elif defined(ARCH_PPC64EL) +#include "util/arch/ppc64el/ppc64el.h" #endif #endif // UTIL_ARCH_X86_H_ diff --git a/src/util/arch/arm/simd_utils.h b/src/util/arch/arm/simd_utils.h index 630cac93..4c68b485 100644 --- a/src/util/arch/arm/simd_utils.h +++ b/src/util/arch/arm/simd_utils.h @@ -328,11 +328,12 @@ m128 palignr_imm(m128 r, m128 l, int offset) { static really_really_inline m128 palignr(m128 r, m128 l, int offset) { -#if defined(HS_OPTIMIZE) - return (m128)vextq_s8((int8x16_t)l, (int8x16_t)r, offset); -#else - return palignr_imm(r, l, offset); +#if defined(HAVE__BUILTIN_CONSTANT_P) + if (__builtin_constant_p(offset)) { + return (m128)vextq_s8((int8x16_t)l, (int8x16_t)r, offset); + } #endif + return palignr_imm(r, l, offset); } #undef CASE_ALIGN_VECTORS diff --git a/src/util/arch/common/simd_utils.h b/src/util/arch/common/simd_utils.h index 65e7b69a..17de949a 100644 --- a/src/util/arch/common/simd_utils.h +++ b/src/util/arch/common/simd_utils.h @@ -46,46 +46,46 @@ #endif // HAVE_SIMD_128_BITS #ifdef DEBUG -static inline void print_m128_16x8(const char *label, m128 vector) { +static inline void print_m128_16x8(const char *label, m128 vec) { uint8_t ALIGN_ATTR(16) data[16]; - store128(data, vector); - DEBUG_PRINTF("%s: ", label); - for(int i=0; i < 16; i++) + store128(data, vec); + DEBUG_PRINTF("%12s: ", label); + for(int i=15; i >=0; i--) printf("%02x ", data[i]); printf("\n"); } -static inline void print_m128_8x16(const char *label, m128 vector) { +static inline void print_m128_8x16(const char *label, m128 vec) { uint16_t ALIGN_ATTR(16) data[8]; - store128(data, vector); - DEBUG_PRINTF("%s: ", label); - for(int i=0; i < 8; i++) + store128(data, vec); + DEBUG_PRINTF("%12s: ", label); + for(int i=7; i >= 0; i--) printf("%04x ", data[i]); printf("\n"); } -static inline void print_m128_4x32(const char *label, m128 vector) { +static inline void print_m128_4x32(const char *label, m128 vec) { uint32_t ALIGN_ATTR(16) data[4]; - store128(data, vector); - DEBUG_PRINTF("%s: ", label); - for(int i=0; i < 4; i++) + store128(data, vec); + DEBUG_PRINTF("%12s: ", label); + for(int i=3; i >= 0; i--) printf("%08x ", data[i]); printf("\n"); } -static inline void print_m128_2x64(const char *label, m128 vector) { +static inline void print_m128_2x64(const char *label, m128 vec) { uint64_t ALIGN_ATTR(16) data[2]; - store128(data, vector); - DEBUG_PRINTF("%s: ", label); - for(int i=0; i < 2; i++) + store128(data, vec); + DEBUG_PRINTF("%12s: ", label); + for(int i=1; i >= 0; i--) printf("%016lx ", data[i]); printf("\n"); } #else -#define print_m128_16x8(label, vector) ; -#define print_m128_8x16(label, vector) ; -#define print_m128_4x32(label, vector) ; -#define print_m128_2x64(label, vector) ; +#define print_m128_16x8(label, vec) ; +#define print_m128_8x16(label, vec) ; +#define print_m128_4x32(label, vec) ; +#define print_m128_2x64(label, vec) ; #endif /**** diff --git a/src/util/arch/ppc64el/bitutils.h b/src/util/arch/ppc64el/bitutils.h new file mode 100644 index 00000000..10c4869b --- /dev/null +++ b/src/util/arch/ppc64el/bitutils.h @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2015-2017, Intel Corporation + * Copyright (c) 2020-2021, VectorCamp PC + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * \brief Bit-twiddling primitives (ctz, compress etc) + */ + +#ifndef BITUTILS_ARCH_PPC64EL_H +#define BITUTILS_ARCH_PPC64EL_H + +#include "ue2common.h" +#include "util/popcount.h" +#include "util/arch.h" +#include "util/intrinsics.h" + +#include "util/arch/common/bitutils.h" + +static really_inline +u32 clz32_impl(u32 x) { + return clz32_impl_c(x); +} + +static really_inline +u32 clz64_impl(u64a x) { + return clz64_impl_c(x); +} + +static really_inline +u32 ctz32_impl(u32 x) { + return ctz32_impl_c(x); +} + +static really_inline +u32 ctz64_impl(u64a x) { + return ctz64_impl_c(x); +} + +static really_inline +u32 lg2_impl(u32 x) { + return lg2_impl_c(x); +} + +static really_inline +u64a lg2_64_impl(u64a x) { + return lg2_64_impl_c(x); +} + +static really_inline +u32 findAndClearLSB_32_impl(u32 *v) { + return findAndClearLSB_32_impl_c(v); +} + +static really_inline +u32 findAndClearLSB_64_impl(u64a *v) { + return findAndClearLSB_64_impl_c(v); +} + +static really_inline +u32 findAndClearMSB_32_impl(u32 *v) { + u32 val = *v; + u32 offset = 31 - clz32_impl(val); + *v = val & ~(1 << offset); + assert(offset < 32); + return offset; +} + +static really_inline +u32 findAndClearMSB_64_impl(u64a *v) { + return findAndClearMSB_64_impl_c(v); +} + +static really_inline +u32 compress32_impl(u32 x, u32 m) { + return compress32_impl_c(x, m); +} + +static really_inline +u64a compress64_impl(u64a x, u64a m) { + return compress64_impl_c(x, m); +} + +static really_inline +m128 compress128_impl(m128 x, m128 m) { + m128 one = set1_2x64(1); + m128 bitset = one; + m128 vres = zeroes128(); + while (isnonzero128(m)) { + m128 mm = sub_2x64(zeroes128(), m); + m128 tv = and128(x, m); + tv = and128(tv, mm); + + m128 mask = not128(eq64_m128(tv, zeroes128())); + mask = and128(bitset, mask); + vres = or128(vres, mask); + m = and128(m, sub_2x64(m, one)); + bitset = lshift64_m128(bitset, 1); + } + return vres; +} + +static really_inline +u32 expand32_impl(u32 x, u32 m) { + return expand32_impl_c(x, m); +} + +static really_inline +u64a expand64_impl(u64a x, u64a m) { + return expand64_impl_c(x, m); +} + +static really_inline +m128 expand128_impl(m128 x, m128 m) { + m128 one = set1_2x64(1); + m128 bb = one; + m128 res = zeroes128(); + while (isnonzero128(m)) { + m128 xm = and128(x, bb); + m128 mm = sub_2x64(zeroes128(), m); + m128 mask = not128(eq64_m128(xm, zeroes128())); + mask = and128(mask, and128(m,mm)); + res = or128(res, mask); + m = and128(m, sub_2x64(m, one)); + bb = lshift64_m128(bb, 1); + } + return res; +} + +/* returns the first set bit after begin (if not ~0U). If no bit is set after + * begin returns ~0U + */ +static really_inline +u32 bf64_iterate_impl(u64a bitfield, u32 begin) { + if (begin != ~0U) { + /* switch off all bits at or below begin. Note: not legal to shift by + * by size of the datatype or larger. */ + assert(begin <= 63); + bitfield &= ~((2ULL << begin) - 1); + } + + if (!bitfield) { + return ~0U; + } + + return ctz64_impl(bitfield); +} + +static really_inline +char bf64_set_impl(u64a *bitfield, u32 i) { + return bf64_set_impl_c(bitfield, i); +} + +static really_inline +void bf64_unset_impl(u64a *bitfield, u32 i) { + return bf64_unset_impl_c(bitfield, i); +} + +static really_inline +u32 rank_in_mask32_impl(u32 mask, u32 bit) { + return rank_in_mask32_impl_c(mask, bit); +} + +static really_inline +u32 rank_in_mask64_impl(u64a mask, u32 bit) { + return rank_in_mask64_impl_c(mask, bit); +} + +static really_inline +u32 pext32_impl(u32 x, u32 mask) { + return pext32_impl_c(x, mask); +} + +static really_inline +u64a pext64_impl(u64a x, u64a mask) { + return pext64_impl_c(x, mask); +} + +static really_inline +u64a pdep64(u64a x, u64a mask) { + return pdep64_impl_c(x, mask); +} + +/* compilers don't reliably synthesize the 32-bit ANDN instruction here, + * so we force its generation. + */ +static really_inline +u64a andn_impl(const u32 a, const u8 *b) { + return andn_impl_c(a, b); +} + +#endif // BITUTILS_ARCH_ARM_H diff --git a/src/util/arch/ppc64el/match.hpp b/src/util/arch/ppc64el/match.hpp new file mode 100644 index 00000000..a3f52e41 --- /dev/null +++ b/src/util/arch/ppc64el/match.hpp @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2015-2017, Intel Corporation + * Copyright (c) 2020-2021, VectorCamp PC + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +template <> +really_really_inline +const u8 *first_non_zero_match<16>(const u8 *buf, SuperVector<16> v, u16 const UNUSED len) { + SuperVector<16>::movemask_type z = v.movemask(); + DEBUG_PRINTF("buf %p z %08x \n", buf, z); + DEBUG_PRINTF("z %08x\n", z); + if (unlikely(z)) { + u32 pos = ctz32(z); + DEBUG_PRINTF("~z %08x\n", ~z); + DEBUG_PRINTF("match @ pos %u\n", pos); + assert(pos < 16); + return buf + pos; + } else { + return NULL; // no match + } +} + +template <> +really_really_inline +const u8 *last_non_zero_match<16>(const u8 *buf, SuperVector<16> v, u16 const UNUSED len) { + SuperVector<16>::movemask_type z = v.movemask(); + DEBUG_PRINTF("buf %p z %08x \n", buf, z); + DEBUG_PRINTF("z %08x\n", z); + if (unlikely(z)) { + u32 pos = clz32(z); + DEBUG_PRINTF("match @ pos %u\n", pos); + assert(pos >= 16 && pos < 32); + return buf + (31 - pos); + } else { + return NULL; // no match + } +} + +template <> +really_really_inline +const u8 *first_zero_match_inverted<16>(const u8 *buf, SuperVector<16> v, u16 const UNUSED len) { + SuperVector<16>::movemask_type z = v.movemask(); + DEBUG_PRINTF("buf %p z %08x \n", buf, z); + DEBUG_PRINTF("z %08x\n", z); + if (unlikely(z != 0xffff)) { + u32 pos = ctz32(~z & 0xffff); + DEBUG_PRINTF("~z %08x\n", ~z); + DEBUG_PRINTF("match @ pos %u\n", pos); + assert(pos < 16); + return buf + pos; + } else { + return NULL; // no match + } +} + + +template <> +really_really_inline +const u8 *last_zero_match_inverted<16>(const u8 *buf, SuperVector<16> v, uint16_t UNUSED len ) { + SuperVector<16>::movemask_type z = v.movemask(); + DEBUG_PRINTF("buf %p z %08x \n", buf, z); + DEBUG_PRINTF("z %08x\n", z); + if (unlikely(z != 0xffff)) { + u32 pos = clz32(~z & 0xffff); + DEBUG_PRINTF("~z %08x\n", ~z); + DEBUG_PRINTF("match @ pos %u\n", pos); + assert(pos >= 16 && pos < 32); + return buf + (31 - pos); + } else { + return NULL; // no match + } +} + + diff --git a/src/util/arch/ppc64el/ppc64el.h b/src/util/arch/ppc64el/ppc64el.h new file mode 100644 index 00000000..dbb38297 --- /dev/null +++ b/src/util/arch/ppc64el/ppc64el.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2017-2020, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * \brief Per-platform architecture definitions + */ + +#ifndef UTIL_ARCH_PPC64EL_H_ +#define UTIL_ARCH_PPC64EL_H_ + +#if defined(__VSX__) && defined(ARCH_PPC64EL) +#define HAVE_VSX +#define HAVE_SIMD_128_BITS +#define VECTORSIZE 16 +#endif + +#endif // UTIL_ARCH_ARM_H_ + diff --git a/src/util/arch/ppc64el/simd_types.h b/src/util/arch/ppc64el/simd_types.h new file mode 100644 index 00000000..21dae5cb --- /dev/null +++ b/src/util/arch/ppc64el/simd_types.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2015-2017, Intel Corporation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ARCH_PPC64EL_SIMD_TYPES_H +#define ARCH_PPC64EL_SIMD_TYPES_H + +#if !defined(m128) && defined(HAVE_VSX) +typedef __vector int32_t m128; +#endif + +#endif /* ARCH_PPC64EL_SIMD_TYPES_H */ + diff --git a/src/util/arch/ppc64el/simd_utils.h b/src/util/arch/ppc64el/simd_utils.h new file mode 100644 index 00000000..137fc94f --- /dev/null +++ b/src/util/arch/ppc64el/simd_utils.h @@ -0,0 +1,482 @@ +/* + * Copyright (c) 2015-2020, Intel Corporation + * Copyright (c) 2020-2021, VectorCamp PC + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/** \file + * \brief SIMD types and primitive operations. + */ + +#ifndef ARCH_PPC64EL_SIMD_UTILS_H +#define ARCH_PPC64EL_SIMD_UTILS_H + +#include + +#include "ue2common.h" +#include "util/simd_types.h" +#include "util/unaligned.h" +#include "util/intrinsics.h" + +#include // for memcpy + +typedef __vector uint64_t uint64x2_t; +typedef __vector int64_t int64x2_t; +typedef __vector uint32_t uint32x4_t; +typedef __vector int32_t int32x4_t; +typedef __vector uint16_t uint16x8_t; +typedef __vector int16_t int16x8_t; +typedef __vector uint8_t uint8x16_t; +typedef __vector int8_t int8x16_t; + + +#define ZEROES_8 0, 0, 0, 0, 0, 0, 0, 0 +#define ZEROES_31 ZEROES_8, ZEROES_8, ZEROES_8, 0, 0, 0, 0, 0, 0, 0 +#define ZEROES_32 ZEROES_8, ZEROES_8, ZEROES_8, ZEROES_8 + +/** \brief LUT for the mask1bit functions. */ +ALIGN_CL_DIRECTIVE static const u8 simd_onebit_masks[] = { + ZEROES_32, ZEROES_32, + ZEROES_31, 0x01, ZEROES_32, + ZEROES_31, 0x02, ZEROES_32, + ZEROES_31, 0x04, ZEROES_32, + ZEROES_31, 0x08, ZEROES_32, + ZEROES_31, 0x10, ZEROES_32, + ZEROES_31, 0x20, ZEROES_32, + ZEROES_31, 0x40, ZEROES_32, + ZEROES_31, 0x80, ZEROES_32, + ZEROES_32, ZEROES_32, +}; + +static really_inline m128 ones128(void) { + return (m128) vec_splat_u8(-1); +} + +static really_inline m128 zeroes128(void) { + return (m128) vec_splat_s32(0); +} + +/** \brief Bitwise not for m128*/ +static really_inline m128 not128(m128 a) { + //return (m128)vec_xor(a, a); + return (m128) vec_xor(a,ones128()); +} + +/** \brief Return 1 if a and b are different otherwise 0 */ +static really_inline int diff128(m128 a, m128 b) { + return vec_any_ne(a, b); +} + +static really_inline int isnonzero128(m128 a) { + return !!diff128(a, zeroes128()); +} + +/** + * "Rich" version of diff128(). Takes two vectors a and b and returns a 4-bit + * mask indicating which 32-bit words contain differences. + */ +static really_inline u32 diffrich128(m128 a, m128 b) { + static const m128 movemask = { 1, 2, 4, 8 }; + m128 mask = (m128) vec_cmpeq(a, b); // _mm_cmpeq_epi32 (a, b); + mask = vec_and(not128(mask), movemask); + m128 sum = vec_sums(mask, zeroes128()); + //sum = vec_sld(zeroes128(), sum, 4); + //s32 ALIGN_ATTR(16) x; + //vec_ste(sum, 0, &x); + //return x; // it could be ~(movemask_128(mask)) & 0x; + return sum[3]; +} + +/** + * "Rich" version of diff128(), 64-bit variant. Takes two vectors a and b and + * returns a 4-bit mask indicating which 64-bit words contain differences. + */ +static really_inline u32 diffrich64_128(m128 a, m128 b) { + static const uint64x2_t movemask = { 1, 4 }; + uint64x2_t mask = (uint64x2_t) vec_cmpeq((uint64x2_t)a, (uint64x2_t)b); + mask = (uint64x2_t) vec_and((uint64x2_t)not128((m128)mask), movemask); + m128 sum = vec_sums((m128)mask, zeroes128()); + //sum = vec_sld(zeroes128(), sum, 4); + //s32 ALIGN_ATTR(16) x; + //vec_ste(sum, 0, &x); + //return x; + return sum[3]; +} + +static really_really_inline +m128 add_2x64(m128 a, m128 b) { + return (m128) vec_add((uint64x2_t)a, (uint64x2_t)b); +} + +static really_really_inline +m128 sub_2x64(m128 a, m128 b) { + return (m128) vec_sub((uint64x2_t)a, (uint64x2_t)b); +} + +static really_really_inline +m128 lshift_m128(m128 a, unsigned b) { + switch(b){ + case 1: return vec_sld(a, zeroes128(), 1); break; + case 2: return vec_sld(a, zeroes128(), 2); break; + case 3: return vec_sld(a, zeroes128(), 3); break; + case 4: return vec_sld(a, zeroes128(), 4); break; + case 5: return vec_sld(a, zeroes128(), 5); break; + case 6: return vec_sld(a, zeroes128(), 6); break; + case 7: return vec_sld(a, zeroes128(), 7); break; + case 8: return vec_sld(a, zeroes128(), 8); break; + case 9: return vec_sld(a, zeroes128(), 9); break; + case 10: return vec_sld(a, zeroes128(), 10); break; + case 11: return vec_sld(a, zeroes128(), 11); break; + case 12: return vec_sld(a, zeroes128(), 12); break; + case 13: return vec_sld(a, zeroes128(), 13); break; + case 14: return vec_sld(a, zeroes128(), 14); break; + case 15: return vec_sld(a, zeroes128(), 15); break; + } + return a; +} + +static really_really_inline +m128 rshift_m128(m128 a, unsigned b) { + switch(b){ + case 1: return vec_sld(zeroes128(), a, 15); break; + case 2: return vec_sld(zeroes128(), a, 14); break; + case 3: return vec_sld(zeroes128(), a, 13); break; + case 4: return vec_sld(zeroes128(), a, 12); break; + case 5: return vec_sld(zeroes128(), a, 11); break; + case 6: return vec_sld(zeroes128(), a, 10); break; + case 7: return vec_sld(zeroes128(), a, 9); break; + case 8: return vec_sld(zeroes128(), a, 8); break; + case 9: return vec_sld(zeroes128(), a, 7); break; + case 10: return vec_sld(zeroes128(), a, 6); break; + case 11: return vec_sld(zeroes128(), a, 5); break; + case 12: return vec_sld(zeroes128(), a, 4); break; + case 13: return vec_sld(zeroes128(), a, 3); break; + case 14: return vec_sld(zeroes128(), a, 2); break; + case 15: return vec_sld(zeroes128(), a, 1); break; + } + return a; +} + +static really_really_inline +m128 lshift64_m128(m128 a, unsigned b) { + uint64x2_t shift_indices = vec_splats((uint64_t)b); + return (m128) vec_sl((int64x2_t)a, shift_indices); +} + +static really_really_inline +m128 rshift64_m128(m128 a, unsigned b) { + uint64x2_t shift_indices = vec_splats((uint64_t)b); + return (m128) vec_sr((int64x2_t)a, shift_indices); +} + +static really_inline m128 eq128(m128 a, m128 b) { + return (m128) vec_cmpeq((uint8x16_t)a, (uint8x16_t)b); +} + +static really_inline m128 eq64_m128(m128 a, m128 b) { + return (m128) vec_cmpeq((uint64x2_t)a, (uint64x2_t)b); +} + + +static really_inline u32 movemask128(m128 a) { + uint8x16_t s1 = vec_sr((uint8x16_t)a, vec_splat_u8(7)); + + uint16x8_t ss = vec_sr((uint16x8_t)s1, vec_splat_u16(7)); + uint16x8_t res_and = vec_and((uint16x8_t)s1, vec_splats((uint16_t)0xff)); + uint16x8_t s2 = vec_or((uint16x8_t)ss, res_and); + + uint32x4_t ss2 = vec_sr((uint32x4_t)s2, vec_splat_u32(14)); + uint32x4_t res_and2 = vec_and((uint32x4_t)s2, vec_splats((uint32_t)0xff)); + uint32x4_t s3 = vec_or((uint32x4_t)ss2, res_and2); + + uint64x2_t ss3 = vec_sr((uint64x2_t)s3, (uint64x2_t)vec_splats(28)); + uint64x2_t res_and3 = vec_and((uint64x2_t)s3, vec_splats((uint64_t)0xff)); + uint64x2_t s4 = vec_or((uint64x2_t)ss3, res_and3); + + uint64x2_t ss4 = vec_sld((uint64x2_t)vec_splats(0), s4, 9); + uint64x2_t res_and4 = vec_and((uint64x2_t)s4, vec_splats((uint64_t)0xff)); + uint64x2_t s5 = vec_or((uint64x2_t)ss4, res_and4); + + return s5[0]; +} + +static really_inline m128 set1_16x8(u8 c) { + return (m128) vec_splats(c); +} + +static really_inline m128 set1_4x32(u32 c) { + return (m128) vec_splats(c); +} + +static really_inline m128 set1_2x64(u64a c) { + return (m128) vec_splats(c); +} + +static really_inline u32 movd(const m128 in) { + return (u32) vec_extract((uint32x4_t)in, 0); +} + +static really_inline u64a movq(const m128 in) { + u64a ALIGN_ATTR(16) a[2]; + vec_xst((uint64x2_t) in, 0, a); + return a[0]; +} + +/* another form of movq */ +static really_inline +m128 load_m128_from_u64a(const u64a *p) { + m128 vec =(m128) vec_splats(*p); + return rshift_m128(vec,8); +} + + +static really_inline u32 extract32from128(const m128 in, unsigned imm) { +u32 ALIGN_ATTR(16) a[4]; +vec_xst((uint32x4_t) in, 0, a); +switch (imm) { + case 0: + return a[0];break; + case 1: + return a[1];break; + case 2: + return a[2];break; + case 3: + return a[3];break; + default: + return 0;break; + } +} + +static really_inline u64a extract64from128(const m128 in, unsigned imm) { +u64a ALIGN_ATTR(16) a[2]; +vec_xst((uint64x2_t) in, 0, a); +switch (imm) { + case 0: + return a[0];break; + case 1: + return a[1];break; + default: + return 0; + break; + } +} + +static really_inline m128 low64from128(const m128 in) { + return rshift_m128(in,8); +} + +static really_inline m128 high64from128(const m128 in) { + return lshift_m128(in,8); +} + + +static really_inline m128 add128(m128 a, m128 b) { + return (m128) vec_add((uint64x2_t)a, (uint64x2_t)b); +} + +static really_inline m128 and128(m128 a, m128 b) { + return (m128) vec_and((int8x16_t)a, (int8x16_t)b); +} + +static really_inline m128 xor128(m128 a, m128 b) { + return (m128) vec_xor((int8x16_t)a, (int8x16_t)b); +} + +static really_inline m128 or128(m128 a, m128 b) { + return (m128) vec_or((int8x16_t)a, (int8x16_t)b); +} + +static really_inline m128 andnot128(m128 a, m128 b) { + return (m128) and128(not128(a),b); +} + +// aligned load +static really_inline m128 load128(const void *ptr) { + assert(ISALIGNED_N(ptr, alignof(m128))); + return (m128) vec_xl(0, (const int32_t*)ptr); +} + +// aligned store +static really_inline void store128(void *ptr, m128 a) { + assert(ISALIGNED_N(ptr, alignof(m128))); + vec_st(a, 0, (int32_t*)ptr); +} + +// unaligned load +static really_inline m128 loadu128(const void *ptr) { + return (m128) vec_xl(0, (const int32_t*)ptr); +} + +// unaligned store +static really_inline void storeu128(void *ptr, m128 a) { + vec_xst(a, 0, (int32_t*)ptr); +} + +// packed unaligned store of first N bytes +static really_inline +void storebytes128(void *ptr, m128 a, unsigned int n) { + assert(n <= sizeof(a)); + memcpy(ptr, &a, n); +} + +// packed unaligned load of first N bytes, pad with zero +static really_inline +m128 loadbytes128(const void *ptr, unsigned int n) { + m128 a = zeroes128(); + assert(n <= sizeof(a)); + memcpy(&a, ptr, n); + return a; +} + + +#define CASE_ALIGN_VECTORS(a, b, offset) case offset: return (m128)vec_sld((int8x16_t)(b), (int8x16_t)(a), (16 - offset)); break; + +static really_really_inline +m128 palignr_imm(m128 r, m128 l, int offset) { + switch (offset) { + case 0: return l; break; + CASE_ALIGN_VECTORS(l, r, 1); + CASE_ALIGN_VECTORS(l, r, 2); + CASE_ALIGN_VECTORS(l, r, 3); + CASE_ALIGN_VECTORS(l, r, 4); + CASE_ALIGN_VECTORS(l, r, 5); + CASE_ALIGN_VECTORS(l, r, 6); + CASE_ALIGN_VECTORS(l, r, 7); + CASE_ALIGN_VECTORS(l, r, 8); + CASE_ALIGN_VECTORS(l, r, 9); + CASE_ALIGN_VECTORS(l, r, 10); + CASE_ALIGN_VECTORS(l, r, 11); + CASE_ALIGN_VECTORS(l, r, 12); + CASE_ALIGN_VECTORS(l, r, 13); + CASE_ALIGN_VECTORS(l, r, 14); + CASE_ALIGN_VECTORS(l, r, 15); + case 16: return r; break; + default: return zeroes128(); break; + } +} + +static really_really_inline +m128 palignr(m128 r, m128 l, int offset) { +#if defined(HS_OPTIMIZE) + // need a faster way to do this. + return palignr_imm(r, l, offset); +#else + return palignr_imm(r, l, offset); +#endif +} + +#undef CASE_ALIGN_VECTORS + +static really_really_inline +m128 rshiftbyte_m128(m128 a, unsigned b) { + return rshift_m128(a,b); +} + +static really_really_inline +m128 lshiftbyte_m128(m128 a, unsigned b) { + return lshift_m128(a,b); +} + +static really_inline +m128 variable_byte_shift_m128(m128 in, s32 amount) { + assert(amount >= -16 && amount <= 16); + if (amount < 0){ + return palignr_imm(zeroes128(), in, -amount); + } else{ + return palignr_imm(in, zeroes128(), 16 - amount); + } +} + +static really_inline +m128 mask1bit128(unsigned int n) { + assert(n < sizeof(m128) * 8); + u32 mask_idx = ((n % 8) * 64) + 95; + mask_idx -= n / 8; + return loadu128(&simd_onebit_masks[mask_idx]); +} + +// switches on bit N in the given vector. +static really_inline +void setbit128(m128 *ptr, unsigned int n) { + *ptr = or128(mask1bit128(n), *ptr); +} + +// switches off bit N in the given vector. +static really_inline +void clearbit128(m128 *ptr, unsigned int n) { + *ptr = andnot128(mask1bit128(n), *ptr); +} + +// tests bit N in the given vector. +static really_inline +char testbit128(m128 val, unsigned int n) { + const m128 mask = mask1bit128(n); + return isnonzero128(and128(mask, val)); +} + +static really_inline +m128 pshufb_m128(m128 a, m128 b) { + /* On Intel, if bit 0x80 is set, then result is zero, otherwise which the lane it is &0xf. + In NEON or PPC, if >=16, then the result is zero, otherwise it is that lane. + below is the version that is converted from Intel to PPC. */ + uint8x16_t mask =(uint8x16_t)vec_cmpge((uint8x16_t)b, (uint8x16_t)vec_splats((uint8_t)0x80)); + uint8x16_t res = vec_perm ((uint8x16_t)a, (uint8x16_t)a, (uint8x16_t)b); + return (m128) vec_sel((uint8x16_t)res, (uint8x16_t)zeroes128(), (uint8x16_t)mask); +} + +static really_inline +m128 max_u8_m128(m128 a, m128 b) { + return (m128) vec_max((uint8x16_t)a, (uint8x16_t)b); +} + +static really_inline +m128 min_u8_m128(m128 a, m128 b) { + return (m128) vec_min((uint8x16_t)a, (uint8x16_t)b); +} + +static really_inline +m128 sadd_u8_m128(m128 a, m128 b) { + return (m128) vec_adds((uint8x16_t)a, (uint8x16_t)b); +} + +static really_inline +m128 sub_u8_m128(m128 a, m128 b) { + return (m128) vec_sub((uint8x16_t)a, (uint8x16_t)b); +} + +static really_inline +m128 set4x32(u32 x3, u32 x2, u32 x1, u32 x0) { + uint32x4_t v = { x0, x1, x2, x3 }; + return (m128) v; +} + +static really_inline +m128 set2x64(u64a hi, u64a lo) { + uint64x2_t v = { lo, hi }; + return (m128) v; +} + +#endif // ARCH_PPC64EL_SIMD_UTILS_H diff --git a/src/util/bitutils.h b/src/util/bitutils.h index 68494507..ffc8f45d 100644 --- a/src/util/bitutils.h +++ b/src/util/bitutils.h @@ -49,6 +49,8 @@ #include "util/arch/x86/bitutils.h" #elif defined(ARCH_ARM32) || defined(ARCH_AARCH64) #include "util/arch/arm/bitutils.h" +#elif defined(ARCH_PPC64EL) +#include "util/arch/ppc64el/bitutils.h" #endif static really_inline diff --git a/src/util/intrinsics.h b/src/util/intrinsics.h index 099c8f91..08eb6ba6 100644 --- a/src/util/intrinsics.h +++ b/src/util/intrinsics.h @@ -49,6 +49,10 @@ # define USE_ARM_NEON_H #endif +#if defined(HAVE_C_PPC64EL_ALTIVEC_H) +# define USE_PPC64EL_ALTIVEC_H +#endif + #ifdef __cplusplus # if defined(HAVE_CXX_INTRIN_H) # define USE_INTRIN_H @@ -68,6 +72,8 @@ # if defined(HAVE_SVE) # include # endif +#elif defined(USE_PPC64EL_ALTIVEC_H) +#include #else #error no intrinsics file #endif diff --git a/src/util/match.hpp b/src/util/match.hpp index 030db9bb..003c665f 100644 --- a/src/util/match.hpp +++ b/src/util/match.hpp @@ -53,6 +53,8 @@ const u8 *last_zero_match_inverted(const u8 *buf, SuperVector v, u16 len = S) #include "util/arch/x86/match.hpp" #elif defined(ARCH_ARM32) || defined(ARCH_AARCH64) #include "util/arch/arm/match.hpp" +#elif defined(ARCH_PPC64EL) +#include "util/arch/ppc64el/match.hpp" #endif #endif // MATCH_HPP diff --git a/src/util/simd_types.h b/src/util/simd_types.h index 5777374b..0deff7e5 100644 --- a/src/util/simd_types.h +++ b/src/util/simd_types.h @@ -38,6 +38,8 @@ #include "util/arch/x86/simd_types.h" #elif defined(ARCH_ARM32) || defined(ARCH_AARCH64) #include "util/arch/arm/simd_types.h" +#elif defined(ARCH_PPC64EL) +#include "util/arch/ppc64el/simd_types.h" #endif #if !defined(m128) && !defined(HAVE_SIMD_128_BITS) diff --git a/src/util/simd_utils.h b/src/util/simd_utils.h index 0724c94e..2913c4fe 100644 --- a/src/util/simd_utils.h +++ b/src/util/simd_utils.h @@ -65,6 +65,8 @@ extern const char vbs_mask_data[]; #include "util/arch/x86/simd_utils.h" #elif defined(ARCH_ARM32) || defined(ARCH_AARCH64) #include "util/arch/arm/simd_utils.h" +#elif defined(ARCH_PPC64EL) +#include "util/arch/ppc64el/simd_utils.h" #endif #include "util/arch/common/simd_utils.h" diff --git a/src/util/supervector/arch/arm/impl.cpp b/src/util/supervector/arch/arm/impl.cpp index f804abeb..980f0b39 100644 --- a/src/util/supervector/arch/arm/impl.cpp +++ b/src/util/supervector/arch/arm/impl.cpp @@ -482,34 +482,27 @@ really_inline SuperVector<16> SuperVector<16>::vshr(uint8_t const N) const return vshr_128(N); } -#ifdef HS_OPTIMIZE -template <> -really_inline SuperVector<16> SuperVector<16>::operator>>(uint8_t const N) const -{ - return {vextq_u8(u.u8x16[0], vdupq_n_u8(0), N)}; -} -#else template <> really_inline SuperVector<16> SuperVector<16>::operator>>(uint8_t const N) const { +#if defined(HAVE__BUILTIN_CONSTANT_P) + if (__builtin_constant_p(N)) { + return {vextq_u8(u.u8x16[0], vdupq_n_u8(0), N)}; + } +#endif return vshr_128(N); } -#endif -#ifdef HS_OPTIMIZE -template <> -really_inline SuperVector<16> SuperVector<16>::operator<<(uint8_t const N) const -{ - return {vextq_u8(vdupq_n_u8(0), u.u8x16[0], 16 - N)}; -} -#else template <> really_inline SuperVector<16> SuperVector<16>::operator<<(uint8_t const N) const { +#if defined(HAVE__BUILTIN_CONSTANT_P) + if (__builtin_constant_p(N)) { + return {vextq_u8(vdupq_n_u8(0), u.u8x16[0], 16 - N)}; + } +#endif return vshl_128(N); } -#endif - template<> really_inline SuperVector<16> SuperVector<16>::Ones_vshr(uint8_t const N) @@ -547,20 +540,18 @@ really_inline SuperVector<16> SuperVector<16>::loadu_maskz(void const *ptr, uint return mask & v; } -#ifdef HS_OPTIMIZE template<> really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, int8_t offset) { - if (offset == 16) { - return *this; - } else { - return {vextq_u8(other.u.u8x16[0], u.u8x16[0], offset)}; +#if defined(HAVE__BUILTIN_CONSTANT_P) + if (__builtin_constant_p(offset)) { + if (offset == 16) { + return *this; + } else { + return {vextq_u8(other.u.u8x16[0], u.u8x16[0], offset)}; + } } -} -#else -template<> -really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, int8_t offset) -{ +#endif switch(offset) { case 0: return other; break; case 1: return {vextq_u8( other.u.u8x16[0], u.u8x16[0], 1)}; break; @@ -583,7 +574,6 @@ really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, in } return *this; } -#endif template<> template<> diff --git a/src/util/supervector/arch/ppc64el/impl.cpp b/src/util/supervector/arch/ppc64el/impl.cpp new file mode 100644 index 00000000..e054e02e --- /dev/null +++ b/src/util/supervector/arch/ppc64el/impl.cpp @@ -0,0 +1,603 @@ +/* + * Copyright (c) 2015-2017, Intel Corporation + * Copyright (c) 2020-2021, VectorCamp PC + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef SIMD_IMPL_HPP +#define SIMD_IMPL_HPP + +#include +#include + +#include "ue2common.h" +#include "util/arch.h" +#include "util/unaligned.h" +#include "util/supervector/supervector.hpp" +#include + + +typedef __vector uint64_t uint64x2_t; +typedef __vector int64_t int64x2_t; +typedef __vector uint32_t uint32x4_t; +typedef __vector int32_t int32x4_t; +typedef __vector uint16_t uint16x8_t; +typedef __vector int16_t int16x8_t; +typedef __vector uint8_t uint8x16_t; +typedef __vector int8_t int8x16_t; + +// 128-bit Powerpc64le implementation + +template<> +really_inline SuperVector<16>::SuperVector(SuperVector const &other) +{ + u.v128[0] = other.u.v128[0]; +} + +template<> +really_inline SuperVector<16>::SuperVector(typename base_type::type const v) +{ + u.v128[0] = v; +}; + +template<> +template<> +really_inline SuperVector<16>::SuperVector(int8_t const other) +{ + u.v128[0] = (m128) vec_splats(other); +} + +template<> +template<> +really_inline SuperVector<16>::SuperVector(uint8_t const other) +{ + u.v128[0] = (m128) vec_splats(static_cast(other)); +} + +template<> +template<> +really_inline SuperVector<16>::SuperVector(int16_t const other) +{ + u.v128[0] = (m128) vec_splats(other); +} + +template<> +template<> +really_inline SuperVector<16>::SuperVector(uint16_t const other) +{ + u.v128[0] = (m128) vec_splats(static_cast(other)); +} + +template<> +template<> +really_inline SuperVector<16>::SuperVector(int32_t const other) +{ + u.v128[0] = (m128) vec_splats(other); +} + +template<> +template<> +really_inline SuperVector<16>::SuperVector(uint32_t const other) +{ + u.v128[0] = (m128) vec_splats(static_cast(other)); +} + +template<> +template<> +really_inline SuperVector<16>::SuperVector(int64_t const other) +{ + u.v128[0] = (m128) vec_splats(other); +} + +template<> +template<> +really_inline SuperVector<16>::SuperVector(uint64_t const other) +{ + u.v128[0] = (m128) vec_splats(static_cast(other)); +} + +// Constants +template<> +really_inline SuperVector<16> SuperVector<16>::Ones(void) +{ + return {(m128) vec_splat_s8(-1)}; +} + +template<> +really_inline SuperVector<16> SuperVector<16>::Zeroes(void) +{ + return {(m128) vec_splat_s8(0)}; +} + +// Methods + +template <> +really_inline void SuperVector<16>::operator=(SuperVector<16> const &other) +{ + u.v128[0] = other.u.v128[0]; +} + +template <> +really_inline SuperVector<16> SuperVector<16>::operator&(SuperVector<16> const &b) const +{ + return {vec_and(u.v128[0], b.u.v128[0])}; +} + +template <> +really_inline SuperVector<16> SuperVector<16>::operator|(SuperVector<16> const &b) const +{ + return {vec_or(u.v128[0], b.u.v128[0])}; +} + +template <> +really_inline SuperVector<16> SuperVector<16>::operator^(SuperVector<16> const &b) const +{ + return {(m128) vec_xor(u.v128[0], b.u.v128[0])}; +} + +template <> +really_inline SuperVector<16> SuperVector<16>::operator!() const +{ + return {(m128) vec_xor(u.v128[0], u.v128[0])}; +} + +template <> +really_inline SuperVector<16> SuperVector<16>::opandnot(SuperVector<16> const &b) const +{ + m128 not_res = vec_xor(u.v128[0], (m128)vec_splat_s8(-1)); + return {(m128) vec_and(not_res, (m128)b.u.v128[0]) }; +} + + +template <> +really_inline SuperVector<16> SuperVector<16>::operator==(SuperVector<16> const &b) const +{ + return {(m128) vec_cmpeq(u.s8x16[0], b.u.s8x16[0])}; +} + +template <> +really_inline SuperVector<16> SuperVector<16>::operator!=(SuperVector<16> const &b) const +{ + return !(*this == b); +} + +template <> +really_inline SuperVector<16> SuperVector<16>::operator>(SuperVector<16> const &b) const +{ + return {(m128) vec_cmpgt(u.v128[0], b.u.v128[0])}; +} + +template <> +really_inline SuperVector<16> SuperVector<16>::operator>=(SuperVector<16> const &b) const +{ + return {(m128) vec_cmpge(u.v128[0], b.u.v128[0])}; +} + +template <> +really_inline SuperVector<16> SuperVector<16>::operator<(SuperVector<16> const &b) const +{ + return {(m128) vec_cmpgt(b.u.v128[0], u.v128[0])}; +} + +template <> +really_inline SuperVector<16> SuperVector<16>::operator<=(SuperVector<16> const &b) const +{ + return {(m128) vec_cmpge(b.u.v128[0], u.v128[0])}; +} + + +template <> +really_inline SuperVector<16> SuperVector<16>::eq(SuperVector<16> const &b) const +{ + return (*this == b); +} + +template <> +really_inline typename SuperVector<16>::movemask_type SuperVector<16>::movemask(void)const +{ + uint8x16_t s1 = vec_sr((uint8x16_t)u.v128[0], vec_splat_u8(7)); + + uint16x8_t ss = vec_sr((uint16x8_t)s1, vec_splat_u16(7)); + uint16x8_t res_and = vec_and((uint16x8_t)s1, vec_splats((uint16_t)0xff)); + uint16x8_t s2 = vec_or((uint16x8_t)ss, res_and); + + uint32x4_t ss2 = vec_sr((uint32x4_t)s2 , vec_splat_u32(14)); + uint32x4_t res_and2 = vec_and((uint32x4_t)s2, vec_splats((uint32_t)0xff)); + uint32x4_t s3 = vec_or((uint32x4_t)ss2, res_and2); + + uint64x2_t ss3 = vec_sr((uint64x2_t)s3, (uint64x2_t)vec_splats(28)); + uint64x2_t res_and3 = vec_and((uint64x2_t)s3, vec_splats((uint64_t)0xff)); + uint64x2_t s4 = vec_or((uint64x2_t)ss3, res_and3); + + uint64x2_t ss4 = vec_sld((uint64x2_t) vec_splats(0), s4, 9); + uint64x2_t res_and4 = vec_and((uint64x2_t)s4, vec_splats((uint64_t)0xff)); + uint64x2_t s5 = vec_or((uint64x2_t)ss4, res_and4); + + return s5[0]; +} + +template <> +really_inline typename SuperVector<16>::movemask_type SuperVector<16>::eqmask(SuperVector<16> const b) const +{ + return eq(b).movemask(); +} + + +template <> +template +really_inline SuperVector<16> SuperVector<16>::vshl_8_imm() const +{ + return { (m128) vec_sl(u.s8x16[0], vec_splats((uint8_t)N)) }; +} + +template <> +template +really_inline SuperVector<16> SuperVector<16>::vshl_16_imm() const +{ + return { (m128) vec_sl(u.s16x8[0], vec_splats((uint16_t)N)) }; +} + +template <> +template +really_inline SuperVector<16> SuperVector<16>::vshl_32_imm() const +{ + return { (m128) vec_sl(u.s32x4[0], vec_splats((uint32_t)N)) }; +} + +template <> +template +really_inline SuperVector<16> SuperVector<16>::vshl_64_imm() const +{ + return { (m128) vec_sl(u.s64x2[0], vec_splats((uint64_t)N)) }; +} + +template <> +template +really_inline SuperVector<16> SuperVector<16>::vshl_128_imm() const +{ + return { (m128) vec_sld(u.s8x16[0], (int8x16_t)vec_splat_s8(0), N)}; +} + +template <> +template +really_inline SuperVector<16> SuperVector<16>::vshl_imm() const +{ + return vshl_128_imm(); +} + +template <> +template +really_inline SuperVector<16> SuperVector<16>::vshr_8_imm() const +{ + return { (m128) vec_sr(u.s8x16[0], vec_splats((uint8_t)N)) }; +} + +template <> +template +really_inline SuperVector<16> SuperVector<16>::vshr_16_imm() const +{ + return { (m128) vec_sr(u.s16x8[0], vec_splats((uint16_t)N)) }; +} + +template <> +template +really_inline SuperVector<16> SuperVector<16>::vshr_32_imm() const +{ + return { (m128) vec_sr(u.s32x4[0], vec_splats((uint32_t)N)) }; +} + +template <> +template +really_inline SuperVector<16> SuperVector<16>::vshr_64_imm() const +{ + return { (m128) vec_sr(u.s64x2[0], vec_splats((uint64_t)N)) }; +} + +template <> +template +really_inline SuperVector<16> SuperVector<16>::vshr_128_imm() const +{ + return { (m128) vec_sld((int8x16_t)vec_splat_s8(0), u.s8x16[0], 16 - N) }; +} + +template <> +template +really_inline SuperVector<16> SuperVector<16>::vshr_imm() const +{ + return vshr_128_imm(); +} + +#if !defined(HS_OPTIMIZE) +template SuperVector<16> SuperVector<16>::vshl_8_imm<4>() const; +template SuperVector<16> SuperVector<16>::vshl_16_imm<1>() const; +template SuperVector<16> SuperVector<16>::vshl_64_imm<1>() const; +template SuperVector<16> SuperVector<16>::vshl_64_imm<4>() const; +template SuperVector<16> SuperVector<16>::vshl_128_imm<1>() const; +template SuperVector<16> SuperVector<16>::vshl_128_imm<4>() const; +template SuperVector<16> SuperVector<16>::vshr_8_imm<1>() const; +template SuperVector<16> SuperVector<16>::vshr_8_imm<4>() const; +template SuperVector<16> SuperVector<16>::vshr_16_imm<1>() const; +template SuperVector<16> SuperVector<16>::vshr_64_imm<1>() const; +template SuperVector<16> SuperVector<16>::vshr_64_imm<4>() const; +template SuperVector<16> SuperVector<16>::vshr_128_imm<1>() const; +template SuperVector<16> SuperVector<16>::vshr_128_imm<4>() const; +#endif + +template <> +really_inline SuperVector<16> SuperVector<16>::vshl_8 (uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 16) return Zeroes(); + SuperVector result; + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sl(u.s8x16[0], vec_splats((uint8_t)n))}; }); + return result; +} + +template <> +really_inline SuperVector<16> SuperVector<16>::vshl_16 (uint8_t const UNUSED N) const +{ + if (N == 0) return *this; + if (N == 16) return Zeroes(); + SuperVector result; + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sl(u.s16x8[0], vec_splats((uint16_t)n))}; }); + return result; +} + +template <> +really_inline SuperVector<16> SuperVector<16>::vshl_32 (uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 16) return Zeroes(); + SuperVector result; + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sl(u.s32x4[0], vec_splats((uint32_t)n))}; }); + return result; +} + +template <> +really_inline SuperVector<16> SuperVector<16>::vshl_64 (uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 16) return Zeroes(); + SuperVector result; + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sl(u.s64x2[0], vec_splats((uint64_t)n))}; }); + return result; +} + +template <> +really_inline SuperVector<16> SuperVector<16>::vshl_128(uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 16) return Zeroes(); + SuperVector result; + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sld(u.s8x16[0], (int8x16_t)vec_splat_s8(0), n)}; }); + return result; +} + +template <> +really_inline SuperVector<16> SuperVector<16>::vshl(uint8_t const N) const +{ + return vshl_128(N); +} + +template <> +really_inline SuperVector<16> SuperVector<16>::vshr_8 (uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 16) return Zeroes(); + SuperVector result; + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sr(u.s8x16[0], vec_splats((uint8_t)n))}; }); + return result; +} + +template <> +really_inline SuperVector<16> SuperVector<16>::vshr_16 (uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 16) return Zeroes(); + SuperVector result; + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sr(u.s16x8[0], vec_splats((uint16_t)n))}; }); + return result; +} + +template <> +really_inline SuperVector<16> SuperVector<16>::vshr_32 (uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 16) return Zeroes(); + SuperVector result; + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sr(u.s32x4[0], vec_splats((uint32_t)n))}; }); + return result; +} + +template <> +really_inline SuperVector<16> SuperVector<16>::vshr_64 (uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 16) return Zeroes(); + SuperVector result; + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sr(u.s64x2[0], vec_splats((uint64_t)n))}; }); + return result; +} + +template <> +really_inline SuperVector<16> SuperVector<16>::vshr_128(uint8_t const UNUSED N) const +{ + if (N == 0) return *this; + if (N == 16) return Zeroes(); + SuperVector result; + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sld((int8x16_t)vec_splat_u8(0), u.s8x16[0], 16 - n)}; }); + return result; +} + +template <> +really_inline SuperVector<16> SuperVector<16>::vshr(uint8_t const N) const +{ + return vshr_128(N); +} + +template <> +really_inline SuperVector<16> SuperVector<16>::operator>>(uint8_t const N) const +{ + switch(N) { + case 1: return {(m128) vec_sld((int8x16_t) vec_splat_s8(0), u.s8x16[0], 15)}; break; + case 2: return {(m128) vec_sld((int8x16_t) vec_splat_s8(0), u.s8x16[0], 14)}; break; + case 3: return {(m128) vec_sld((int8x16_t) vec_splat_s8(0), u.s8x16[0], 13)}; break; + case 4: return {(m128) vec_sld((int8x16_t) vec_splat_s8(0), u.s8x16[0], 12)}; break; + case 5: return {(m128) vec_sld((int8x16_t) vec_splat_s8(0), u.s8x16[0], 11)}; break; + case 6: return {(m128) vec_sld((int8x16_t) vec_splat_s8(0), u.s8x16[0], 10)}; break; + case 7: return {(m128) vec_sld((int8x16_t) vec_splat_s8(0), u.s8x16[0], 9)}; break; + case 8: return {(m128) vec_sld((int8x16_t) vec_splat_s8(0), u.s8x16[0], 8)}; break; + case 9: return {(m128) vec_sld((int8x16_t) vec_splat_s8(0), u.s8x16[0], 7)}; break; + case 10: return {(m128) vec_sld((int8x16_t) vec_splat_s8(0), u.s8x16[0], 6)}; break; + case 11: return {(m128) vec_sld((int8x16_t) vec_splat_s8(0), u.s8x16[0], 5)}; break; + case 12: return {(m128) vec_sld((int8x16_t) vec_splat_s8(0), u.s8x16[0], 4)}; break; + case 13: return {(m128) vec_sld((int8x16_t) vec_splat_s8(0), u.s8x16[0], 3)}; break; + case 14: return {(m128) vec_sld((int8x16_t) vec_splat_s8(0), u.s8x16[0], 2)}; break; + case 15: return {(m128) vec_sld((int8x16_t) vec_splat_s8(0), u.s8x16[0], 1)}; break; + case 16: return Zeroes(); break; + default: break; + } + return *this; +} + +template <> +really_inline SuperVector<16> SuperVector<16>::operator<<(uint8_t const N) const +{ + switch(N) { + case 1: return {(m128) vec_sld(u.s8x16[0], (int8x16_t) vec_splat_s8(0), 1)}; break; + case 2: return {(m128) vec_sld(u.s8x16[0], (int8x16_t) vec_splat_s8(0), 2)}; break; + case 3: return {(m128) vec_sld(u.s8x16[0], (int8x16_t) vec_splat_s8(0), 3)}; break; + case 4: return {(m128) vec_sld(u.s8x16[0], (int8x16_t) vec_splat_s8(0), 4)}; break; + case 5: return {(m128) vec_sld(u.s8x16[0], (int8x16_t) vec_splat_s8(0), 5)}; break; + case 6: return {(m128) vec_sld(u.s8x16[0], (int8x16_t) vec_splat_s8(0), 6)}; break; + case 7: return {(m128) vec_sld(u.s8x16[0], (int8x16_t) vec_splat_s8(0), 7)}; break; + case 8: return {(m128) vec_sld(u.s8x16[0], (int8x16_t) vec_splat_s8(0), 8)}; break; + case 9: return {(m128) vec_sld(u.s8x16[0], (int8x16_t) vec_splat_s8(0), 9)}; break; + case 10: return {(m128) vec_sld(u.s8x16[0], (int8x16_t) vec_splat_s8(0), 10)}; break; + case 11: return {(m128) vec_sld(u.s8x16[0], (int8x16_t) vec_splat_s8(0), 11)}; break; + case 12: return {(m128) vec_sld(u.s8x16[0], (int8x16_t) vec_splat_s8(0), 12)}; break; + case 13: return {(m128) vec_sld(u.s8x16[0], (int8x16_t) vec_splat_s8(0), 13)}; break; + case 14: return {(m128) vec_sld(u.s8x16[0], (int8x16_t) vec_splat_s8(0), 14)}; break; + case 15: return {(m128) vec_sld(u.s8x16[0], (int8x16_t) vec_splat_s8(0), 15)}; break; + case 16: return Zeroes(); break; + default: break; + } + return *this; +} + +template<> +really_inline SuperVector<16> SuperVector<16>::Ones_vshr(uint8_t const N) +{ + return Ones().vshr_128(N); +} + +template<> +really_inline SuperVector<16> SuperVector<16>::Ones_vshl(uint8_t const N) +{ + return Ones().vshl_128(N); +} + +template <> +really_inline SuperVector<16> SuperVector<16>::loadu(void const *ptr) +{ + return (m128) vec_xl(0, (const int64_t*)ptr); +} + +template <> +really_inline SuperVector<16> SuperVector<16>::load(void const *ptr) +{ + assert(ISALIGNED_N(ptr, alignof(SuperVector::size))); + return (m128) vec_xl(0, (const int64_t*)ptr); +} + +template <> +really_inline SuperVector<16> SuperVector<16>::loadu_maskz(void const *ptr, uint8_t const len) +{ + SuperVector<16> mask = Ones_vshr(16 -len); + mask.print8("mask"); + SuperVector<16> v = loadu(ptr); + v.print8("v"); + return mask & v; +} + +template<> +really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, int8_t offset) +{ + + switch(offset) { + case 0: return other; break; + case 1: return {(m128) vec_sld(u.s8x16[0], other.u.s8x16[0], 15)}; break; + case 2: return {(m128) vec_sld(u.s8x16[0], other.u.s8x16[0], 14)}; break; + case 3: return {(m128) vec_sld(u.s8x16[0], other.u.s8x16[0], 13)}; break; + case 4: return {(m128) vec_sld(u.s8x16[0], other.u.s8x16[0], 12)}; break; + case 5: return {(m128) vec_sld(u.s8x16[0], other.u.s8x16[0], 11)}; break; + case 6: return {(m128) vec_sld(u.s8x16[0], other.u.s8x16[0], 10)}; break; + case 7: return {(m128) vec_sld(u.s8x16[0], other.u.s8x16[0], 9)}; break; + case 8: return {(m128) vec_sld(u.s8x16[0], other.u.s8x16[0], 8)}; break; + case 9: return {(m128) vec_sld(u.s8x16[0], other.u.s8x16[0], 7)}; break; + case 10: return {(m128) vec_sld(u.s8x16[0], other.u.s8x16[0], 6)}; break; + case 11: return {(m128) vec_sld(u.s8x16[0], other.u.s8x16[0], 5)}; break; + case 12: return {(m128) vec_sld(u.s8x16[0], other.u.s8x16[0], 4)}; break; + case 13: return {(m128) vec_sld(u.s8x16[0], other.u.s8x16[0], 3)}; break; + case 14: return {(m128) vec_sld(u.s8x16[0], other.u.s8x16[0], 2)}; break; + case 15: return {(m128) vec_sld(u.s8x16[0], other.u.s8x16[0], 1)}; break; + default: break; + } + return *this; +} + +template<> +template<> +really_inline SuperVector<16> SuperVector<16>::pshufb(SuperVector<16> b) +{ + /* On Intel, if bit 0x80 is set, then result is zero, otherwise which the lane it is &0xf. + In NEON or PPC, if >=16, then the result is zero, otherwise it is that lane. + below is the version that is converted from Intel to PPC. */ + uint8x16_t mask =(uint8x16_t)vec_cmpge(b.u.u8x16[0], (uint8x16_t)vec_splats((uint8_t)0x80)); + uint8x16_t res = vec_perm (u.u8x16[0], u.u8x16[0], b.u.u8x16[0]); + return (m128) vec_sel(res, (uint8x16_t)vec_splat_s8(0), mask); +} + +template<> +template<> +really_inline SuperVector<16> SuperVector<16>::pshufb(SuperVector<16> b) +{ + /* On Intel, if bit 0x80 is set, then result is zero, otherwise which the lane it is &0xf. + In NEON or PPC, if >=16, then the result is zero, otherwise it is that lane. + btranslated is the version that is converted from Intel to PPC. */ + SuperVector<16> btranslated = b & SuperVector<16>::dup_s8(0x8f); + return pshufb(btranslated); +} + + +template<> +really_inline SuperVector<16> SuperVector<16>::pshufb_maskz(SuperVector<16> b, uint8_t const len) +{ + SuperVector<16> mask = Ones_vshr(16 -len); + return mask & pshufb(b); +} + +#endif diff --git a/src/util/supervector/arch/ppc64el/types.hpp b/src/util/supervector/arch/ppc64el/types.hpp new file mode 100644 index 00000000..dbd863f4 --- /dev/null +++ b/src/util/supervector/arch/ppc64el/types.hpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2015-2017, Intel Corporation + * Copyright (c) 2020-2021, VectorCamp PC + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#if !defined(m128) && defined(HAVE_VSX) +typedef __vector int32_t m128; +#endif diff --git a/src/util/supervector/arch/x86/impl.cpp b/src/util/supervector/arch/x86/impl.cpp index 164c4e8b..b7686220 100644 --- a/src/util/supervector/arch/x86/impl.cpp +++ b/src/util/supervector/arch/x86/impl.cpp @@ -520,16 +520,18 @@ really_inline SuperVector<16> SuperVector<16>::loadu_maskz(void const *ptr, uint return mask & v; } -#ifdef HS_OPTIMIZE -template<> -really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, int8_t offset) -{ - return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], offset)}; -} -#else template<> really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, int8_t offset) { +#if defined(HAVE__BUILTIN_CONSTANT_P) + if (__builtin_constant_p(offset)) { + if (offset == 16) { + return *this; + } else { + return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], offset)}; + } + } +#endif switch(offset) { case 0: return other; break; case 1: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 1)}; break; @@ -551,7 +553,6 @@ really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, in } return *this; } -#endif template<> template<> @@ -1037,47 +1038,41 @@ really_inline SuperVector<32> SuperVector<32>::vshr(uint8_t const N) const return vshr_256(N); } -#ifdef HS_OPTIMIZE template <> really_inline SuperVector<32> SuperVector<32>::operator>>(uint8_t const N) const { - // As found here: https://stackoverflow.com/questions/25248766/emulating-shifts-on-32-bytes-with-avx - if (N < 16) { - return {_mm256_alignr_epi8(_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(2, 0, 0, 1)), u.v256[0], N)}; - } else if (N == 16) { - return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(2, 0, 0, 1))}; - } else { - return {_mm256_srli_si256(_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(2, 0, 0, 1)), N - 16)}; +#if defined(HAVE__BUILTIN_CONSTANT_P) + if (__builtin_constant_p(N)) { + // As found here: https://stackoverflow.com/questions/25248766/emulating-shifts-on-32-bytes-with-avx + if (N < 16) { + return {_mm256_alignr_epi8(_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(2, 0, 0, 1)), u.v256[0], N)}; + } else if (N == 16) { + return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(2, 0, 0, 1))}; + } else { + return {_mm256_srli_si256(_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(2, 0, 0, 1)), N - 16)}; + } } -} -#else -template <> -really_inline SuperVector<32> SuperVector<32>::operator>>(uint8_t const N) const -{ +#endif return vshr_256(N); } -#endif -#ifdef HS_OPTIMIZE template <> really_inline SuperVector<32> SuperVector<32>::operator<<(uint8_t const N) const { - // As found here: https://stackoverflow.com/questions/25248766/emulating-shifts-on-32-bytes-with-avx - if (N < 16) { - return {_mm256_alignr_epi8(u.v256[0], _mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0)), 16 - N)}; - } else if (N == 16) { - return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0))}; - } else { - return {_mm256_slli_si256(_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0)), N - 16)}; +#if defined(HAVE__BUILTIN_CONSTANT_P) + if (__builtin_constant_p(N)) { + // As found here: https://stackoverflow.com/questions/25248766/emulating-shifts-on-32-bytes-with-avx + if (N < 16) { + return {_mm256_alignr_epi8(u.v256[0], _mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0)), 16 - N)}; + } else if (N == 16) { + return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0))}; + } else { + return {_mm256_slli_si256(_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0)), N - 16)}; + } } -} -#else -template <> -really_inline SuperVector<32> SuperVector<32>::operator<<(uint8_t const N) const -{ +#endif return vshl_256(N); } -#endif template<> really_inline SuperVector<32> SuperVector<32>::Ones_vshr(uint8_t const N) @@ -1132,16 +1127,18 @@ really_inline SuperVector<32> SuperVector<32>::loadu_maskz(void const *ptr, uint #endif } -#ifdef HS_OPTIMIZE -template<> -really_inline SuperVector<32> SuperVector<32>::alignr(SuperVector<32> &other, int8_t offset) -{ - return {_mm256_alignr_epi8(u.v256[0], other.u.v256[0], offset)}; -} -#else template<> really_inline SuperVector<32> SuperVector<32>::alignr(SuperVector<32> &other, int8_t offset) { +#if defined(HAVE__BUILTIN_CONSTANT_P) + if (__builtin_constant_p(offset)) { + if (offset == 16) { + return *this; + } else { + return {_mm256_alignr_epi8(u.v256[0], other.u.v256[0], offset)}; + } + } +#endif // As found here: https://stackoverflow.com/questions/8517970/mm-alignr-epi8-palignr-equivalent-in-avx2#8637458 switch (offset){ case 0 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 0), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 0)); break; @@ -1180,7 +1177,6 @@ really_inline SuperVector<32> SuperVector<32>::alignr(SuperVector<32> &other, in } return *this; } -#endif template<> template<> @@ -1772,16 +1768,18 @@ really_inline SuperVector<64> SuperVector<64>::pshufb_maskz(SuperVector<64> b, u return {_mm512_maskz_shuffle_epi8(mask, u.v512[0], b.u.v512[0])}; } -#ifdef HS_OPTIMIZE -template<> -really_inline SuperVector<64> SuperVector<64>::alignr(SuperVector<64> &l, int8_t offset) -{ - return {_mm512_alignr_epi8(u.v512[0], l.u.v512[0], offset)}; -} -#else template<> really_inline SuperVector<64> SuperVector<64>::alignr(SuperVector<64> &l, int8_t offset) { +#if defined(HAVE__BUILTIN_CONSTANT_P) + if (__builtin_constant_p(offset)) { + if (offset == 16) { + return *this; + } else { + return {_mm512_alignr_epi8(u.v512[0], l.u.v512[0], offset)}; + } + } +#endif if(offset == 0) { return *this; } else if (offset < 32){ @@ -1802,7 +1800,6 @@ really_inline SuperVector<64> SuperVector<64>::alignr(SuperVector<64> &l, int8_t return *this; } } -#endif #endif // HAVE_AVX512 diff --git a/src/util/supervector/supervector.hpp b/src/util/supervector/supervector.hpp index e69e4b42..737412f6 100644 --- a/src/util/supervector/supervector.hpp +++ b/src/util/supervector/supervector.hpp @@ -38,6 +38,8 @@ #include "util/supervector/arch/x86/types.hpp" #elif defined(ARCH_ARM32) || defined(ARCH_AARCH64) #include "util/supervector/arch/arm/types.hpp" +#elif defined(ARCH_PPC64EL) +#include "util/supervector/arch/ppc64el/types.hpp" #endif #if defined(HAVE_SIMD_512_BITS) @@ -174,6 +176,17 @@ public: int8x16_t ALIGN_ATTR(BaseVector<16>::size) s8x16[SIZE / BaseVector<16>::size]; #endif +#if defined(ARCH_PPC64EL) + __vector uint64_t ALIGN_ATTR(BaseVector<16>::size) u64x2[SIZE / BaseVector<16>::size]; + __vector int64_t ALIGN_ATTR(BaseVector<16>::size) s64x2[SIZE / BaseVector<16>::size]; + __vector uint32_t ALIGN_ATTR(BaseVector<16>::size) u32x4[SIZE / BaseVector<16>::size]; + __vector int32_t ALIGN_ATTR(BaseVector<16>::size) s32x4[SIZE / BaseVector<16>::size]; + __vector uint16_t ALIGN_ATTR(BaseVector<16>::size) u16x8[SIZE / BaseVector<16>::size]; + __vector int16_t ALIGN_ATTR(BaseVector<16>::size) s16x8[SIZE / BaseVector<16>::size]; + __vector uint8_t ALIGN_ATTR(BaseVector<16>::size) u8x16[SIZE / BaseVector<16>::size]; + __vector int8_t ALIGN_ATTR(BaseVector<16>::size) s8x16[SIZE / BaseVector<16>::size]; +#endif + uint64_t u64[SIZE / sizeof(uint64_t)]; int64_t s64[SIZE / sizeof(int64_t)]; uint32_t u32[SIZE / sizeof(uint32_t)]; @@ -365,6 +378,8 @@ struct Unroller #include "util/supervector/arch/x86/impl.cpp" #elif defined(ARCH_ARM32) || defined(ARCH_AARCH64) #include "util/supervector/arch/arm/impl.cpp" +#elif defined(ARCH_PPC64EL) +#include "util/supervector/arch/ppc64el/impl.cpp" #endif #endif diff --git a/unit/CMakeLists.txt b/unit/CMakeLists.txt index 859f7ac0..ffc39a5f 100644 --- a/unit/CMakeLists.txt +++ b/unit/CMakeLists.txt @@ -63,7 +63,7 @@ target_link_libraries(unit-hyperscan hs expressionutil) endif() -if (NOT (RELEASE_BUILD OR FAT_RUNTIME)) +if (NOT FAT_RUNTIME ) set(unit_internal_SOURCES ${gtest_SOURCES} internal/bitfield.cpp @@ -72,8 +72,6 @@ set(unit_internal_SOURCES internal/compare.cpp internal/database.cpp internal/depth.cpp - internal/fdr.cpp - internal/fdr_flood.cpp internal/fdr_loadval.cpp internal/flat_set.cpp internal/flat_map.cpp @@ -81,7 +79,6 @@ set(unit_internal_SOURCES internal/graph_undirected.cpp internal/insertion_ordered.cpp internal/lbr.cpp - internal/limex_nfa.cpp internal/multi_bit.cpp internal/multi_bit_compress.cpp internal/nfagraph_common.h @@ -121,13 +118,22 @@ if (BUILD_AVX2) set(unit_internal_SOURCES ${unit_internal_SOURCES} internal/masked_move.cpp - ) + ) endif(BUILD_AVX2) +if (NOT RELEASE_BUILD) +set(unit_internal_SOURCES + ${unit_internal_SOURCES} + internal/fdr.cpp + internal/fdr_flood.cpp + internal/limex_nfa.cpp + ) +endif(NOT RELEASE_BUILD) + add_executable(unit-internal ${unit_internal_SOURCES}) set_target_properties(unit-internal PROPERTIES COMPILE_FLAGS "${HS_CXX_FLAGS}") target_link_libraries(unit-internal hs corpusomatic) -endif(NOT (RELEASE_BUILD OR FAT_RUNTIME)) +endif(NOT FAT_RUNTIME) if (BUILD_CHIMERA) # enable Chimera unit tests @@ -178,9 +184,10 @@ else() else () add_custom_target( unit + COMMAND bin/unit-internal COMMAND bin/unit-hyperscan WORKING_DIRECTORY ${CMAKE_BINARY_DIR} - DEPENDS unit-hyperscan + DEPENDS unit-internal unit-hyperscan ) endif() endif() diff --git a/unit/internal/shuffle.cpp b/unit/internal/shuffle.cpp index d74509d6..f1a03d5a 100644 --- a/unit/internal/shuffle.cpp +++ b/unit/internal/shuffle.cpp @@ -183,11 +183,11 @@ void build_pshufb_masks_onebit(unsigned int bit, T *permute, T *compare) { TEST(Shuffle, PackedExtract128_1) { // Try all possible one-bit masks - for (unsigned int i = 0; i < 128; i++) { + for (unsigned int i = 0; i < 1; i++) { // shuffle a single 1 bit to the front m128 permute, compare; build_pshufb_masks_onebit(i, &permute, &compare); - EXPECT_EQ(1U, packedExtract128(setbit(i), permute, compare)); + EXPECT_EQ(1U, packedExtract128(setbit(i), permute, compare)); EXPECT_EQ(1U, packedExtract128(ones128(), permute, compare)); // we should get zero out of these cases EXPECT_EQ(0U, packedExtract128(zeroes128(), permute, compare)); @@ -199,6 +199,7 @@ TEST(Shuffle, PackedExtract128_1) { } } + TEST(Shuffle, PackedExtract_templatized_128_1) { // Try all possible one-bit masks for (unsigned int i = 0; i < 128; i++) { @@ -219,6 +220,7 @@ TEST(Shuffle, PackedExtract_templatized_128_1) { } + #if defined(HAVE_AVX2) TEST(Shuffle, PackedExtract256_1) { // Try all possible one-bit masks diff --git a/unit/internal/simd_utils.cpp b/unit/internal/simd_utils.cpp index 9b206e1b..900078bb 100644 --- a/unit/internal/simd_utils.cpp +++ b/unit/internal/simd_utils.cpp @@ -667,7 +667,10 @@ TEST(SimdUtilsTest, movq) { simd = _mm_set_epi64x(~0LL, 0x123456789abcdef); #elif defined(ARCH_ARM32) || defined(ARCH_AARCH64) int64x2_t a = { 0x123456789abcdefLL, ~0LL }; - simd = vreinterpretq_s32_s64(a); + simd = vreinterpretq_s64_s8(a); +#elif defined(ARCH_PPC64EL) + int64x2_t a = {0x123456789abcdefLL, ~0LL }; + simd = (m128) a; #endif #endif r = movq(simd); @@ -816,4 +819,126 @@ TEST(SimdUtilsTest, sub_u8_m128) { EXPECT_TRUE(!diff128(result, loadu128(expec))); } +TEST(SimdUtilsTest, load_m128_from_u64a) { + srand (time(NULL)); + u64a tmp = rand(); + m128 res = load_m128_from_u64a(&tmp); + m128 cmp = set2x64(0LL, tmp); + //print_m128_16x8("res",res); + //print_m128_16x8("cmp",cmp); + EXPECT_TRUE(!diff128(res, cmp)); +} + + +TEST(SimdUtilsTest, movemask_128) { + srand (time(NULL)); + u8 vec[16] = {0}; + u8 vec2[16] = {0}; + u16 r = rand() % 100 + 1; + for(int i=0; i<16; i++) { + if (r & (1 << i)) { + vec[i] = 0xff; + } + } + m128 v = loadu128(vec); + u16 mask = movemask128(v); + for(int i=0; i<16; i++) { + if (mask & (1 << i)) { + vec2[i] = 0xff; + } + } + for (int i=0; i<16; i++) { + ASSERT_EQ(vec[i],vec2[i]); + } +} + +TEST(SimdUtilsTest, pshufb_m128) { + srand (time(NULL)); + u8 vec[16]; + for (int i=0; i<16; i++) { + vec[i] = rand() % 1000 + 1; + } + u8 vec2[16]; + for (int i=0; i<16; i++) { + vec2[i]=i + (rand() % 100 + 0); + } + + // On Intel, if bit 0x80 is set, then result is zero, otherwise which the lane it is &0xf. + // In NEON or PPC, if >=16, then the result is zero, otherwise it is that lane. + // Thus bellow we have to check that case to NEON or PPC. + + //Insure that vec3 has at least 1 or more 0x80 elements + u8 vec3[16] = {0}; + vec3[15] = 0x80; + + for (int i=0; i<15; i++) { + int l = rand() % 1000 + 0; + if (l % 16 ==0){ + vec3[i]= 0x80; + } else{ + vec3[i]= vec2[i]; + } + } + /* + printf("vec3: "); + for(int i=15; i>=0; i--) { printf("%02x, ", vec3[i]); } + printf("\n"); + */ + + //Test Special Case + m128 v1 = loadu128(vec); + m128 v2 = loadu128(vec3); + m128 vres = pshufb_m128(v1, v2); + + u8 res[16]; + storeu128(res, vres); + + for (int i=0; i<16; i++) { + if(vec3[i] & 0x80){ + ASSERT_EQ(res[i], 0); + }else{ + ASSERT_EQ(vec[vec3[i] % 16 ], res[i]); + } + } + + //Test Other Cases + v1 = loadu128(vec); + v2 = loadu128(vec2); + vres = pshufb_m128(v1, v2); + storeu128(res, vres); + + for (int i=0; i<16; i++) { + if(vec2[i] & 0x80){ + ASSERT_EQ(res[i], 0); + }else{ + ASSERT_EQ(vec[vec2[i] % 16 ], res[i]); + } + } +} + +/*Define ALIGNR128 macro*/ +#define TEST_ALIGNR128(v1, v2, buf, l) { \ + m128 v_aligned = palignr(v2,v1, l); \ + storeu128(res, v_aligned); \ + for (size_t i=0; i<16; i++) { \ + ASSERT_EQ(res[i], vec[i + l]); \ + } \ + } + +TEST(SimdUtilsTest, Alignr128){ + u8 vec[32]; + u8 res[16]; + for (int i=0; i<32; i++) { + vec[i]=i; + } + m128 v1 = loadu128(vec); + m128 v2 = loadu128(vec+16); + for (int j = 0; j<16; j++){ + TEST_ALIGNR128(v1, v2, vec, j); + } +} + + + + } // namespace diff --git a/unit/internal/supervector.cpp b/unit/internal/supervector.cpp index 342f8fd4..deb3b169 100644 --- a/unit/internal/supervector.cpp +++ b/unit/internal/supervector.cpp @@ -155,10 +155,14 @@ TEST(SuperVectorUtilsTest,OPXOR128c){ TEST(SuperVectorUtilsTest,OPANDNOT128c){ auto SP1 = SuperVector<16>::Zeroes(); auto SP2 = SuperVector<16>::Ones(); + SP1 = SP1.opandnot(SP2); + for (int i=0; i<16; i++) { + ASSERT_EQ(SP1.u.u8[i],0xff); + } SP2 = SP2.opandnot(SP1); for (int i=0; i<16; i++) { - ASSERT_EQ(SP2.u.s8[i],0); - } + ASSERT_EQ(SP2.u.u8[i],0); + } } TEST(SuperVectorUtilsTest,Movemask128c){ @@ -280,13 +284,17 @@ TEST(SuperVectorUtilsTest,pshufb128c) { } u8 vec2[16]; for (int i=0; i<16; i++) { - vec2[i]=i; + vec2[i]=i + (rand() % 15 + 0); } auto SP1 = SuperVector<16>::loadu(vec); auto SP2 = SuperVector<16>::loadu(vec2); auto SResult = SP1.template pshufb(SP2); for (int i=0; i<16; i++) { - ASSERT_EQ(vec[vec2[i]],SResult.u.u8[i]); + if(vec2[i] & 0x80){ + ASSERT_EQ(SResult.u.u8[i], 0); + }else{ + ASSERT_EQ(vec[vec2[i] % 16 ],SResult.u.u8[i]); + } } }