Move limex specific shuffle utils and ssse3 funcs

This commit is contained in:
Matthew Barr 2016-06-06 11:54:21 +10:00
parent 9f98f4c7b2
commit 4d6934fc77
22 changed files with 182 additions and 371 deletions

View File

@ -445,6 +445,7 @@ set (hs_exec_SRCS
src/nfa/limex_internal.h
src/nfa/limex_runtime.h
src/nfa/limex_runtime_impl.h
src/nfa/limex_shuffle.h
src/nfa/limex_state_impl.h
src/nfa/mpv.h
src/nfa/mpv.c
@ -525,11 +526,8 @@ set (hs_exec_SRCS
src/util/pqueue.h
src/util/scatter.h
src/util/scatter_runtime.h
src/util/shuffle.h
src/util/shuffle_ssse3.h
src/util/simd_utils.h
src/util/simd_utils_ssse3.h
src/util/simd_utils_ssse3.c
src/util/simd_utils.c
src/util/state_compress.h
src/util/state_compress.c
src/util/unaligned.h
@ -887,7 +885,6 @@ SET (hs_SRCS
src/util/report_manager.cpp
src/util/report_manager.h
src/util/simd_utils.h
src/util/simd_utils_ssse3.h
src/util/target_info.cpp
src/util/target_info.h
src/util/ue2_containers.h

View File

@ -36,7 +36,6 @@
#include "teddy.h"
#include "teddy_internal.h"
#include "util/simd_utils.h"
#include "util/simd_utils_ssse3.h"
/** \brief number of bytes processed in each iteration */
#define ITER_BYTES 16

View File

@ -36,7 +36,6 @@
#include "teddy_internal.h"
#include "teddy_runtime_common.h"
#include "util/simd_utils.h"
#include "util/simd_utils_ssse3.h"
const u8 ALIGN_DIRECTIVE p_mask_arr[17][32] = {
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,

View File

@ -36,7 +36,6 @@
#include "teddy_internal.h"
#include "teddy_runtime_common.h"
#include "util/simd_utils.h"
#include "util/simd_utils_ssse3.h"
#if defined(__AVX2__)

View File

@ -37,7 +37,6 @@
#include "util/compare.h"
#include "util/masked_move.h"
#include "util/simd_utils.h"
#include "util/simd_utils_ssse3.h"
#include <ctype.h>
#include <stdbool.h>

View File

@ -35,6 +35,7 @@
#include "accel.h"
#include "limex_internal.h"
#include "limex_limits.h"
#include "limex_shuffle.h"
#include "nfa_internal.h"
#include "shufti.h"
#include "truffle.h"
@ -44,10 +45,7 @@
#include "ue2common.h"
#include "vermicelli.h"
#include "util/bitutils.h"
#include "util/shuffle.h"
#include "util/simd_utils.h"
#include "util/simd_utils_ssse3.h"
#include "util/shuffle_ssse3.h"
static really_inline
size_t accelScanWrapper(const u8 *accelTable, const union AccelAux *aux,
@ -80,7 +78,7 @@ size_t accelScanWrapper(const u8 *accelTable, const union AccelAux *aux,
size_t doAccel32(u32 s, u32 accel, const u8 *accelTable,
const union AccelAux *aux, const u8 *input, size_t i,
size_t end) {
u32 idx = shuffleDynamic32(s, accel);
u32 idx = packedExtract32(s, accel);
return accelScanWrapper(accelTable, aux, input, idx, i, end);
}
@ -92,7 +90,7 @@ size_t doAccel128(const m128 *state, const struct LimExNFA128 *limex,
DEBUG_PRINTF("using PSHUFB for 128-bit shuffle\n");
m128 accelPerm = limex->accelPermute;
m128 accelComp = limex->accelCompare;
idx = shufflePshufb128(s, accelPerm, accelComp);
idx = packedExtract128(s, accelPerm, accelComp);
return accelScanWrapper(accelTable, aux, input, idx, i, end);
}
@ -105,17 +103,13 @@ size_t doAccel256(const m256 *state, const struct LimExNFA256 *limex,
m256 accelPerm = limex->accelPermute;
m256 accelComp = limex->accelCompare;
#if !defined(__AVX2__)
u32 idx1 = shufflePshufb128(s.lo, accelPerm.lo, accelComp.lo);
u32 idx2 = shufflePshufb128(s.hi, accelPerm.hi, accelComp.hi);
#else
// TODO: learn you some avx2 shuffles for great good
u32 idx1 = shufflePshufb128(movdq_lo(s), movdq_lo(accelPerm),
movdq_lo(accelComp));
u32 idx2 = shufflePshufb128(movdq_hi(s), movdq_hi(accelPerm),
movdq_hi(accelComp));
#endif
u32 idx1 = packedExtract128(s.lo, accelPerm.lo, accelComp.lo);
u32 idx2 = packedExtract128(s.hi, accelPerm.hi, accelComp.hi);
assert((idx1 & idx2) == 0); // should be no shared bits
idx = idx1 | idx2;
#else
idx = packedExtract256(s, accelPerm, accelComp);
#endif
return accelScanWrapper(accelTable, aux, input, idx, i, end);
}
@ -127,9 +121,9 @@ size_t doAccel384(const m384 *state, const struct LimExNFA384 *limex,
DEBUG_PRINTF("using PSHUFB for 384-bit shuffle\n");
m384 accelPerm = limex->accelPermute;
m384 accelComp = limex->accelCompare;
u32 idx1 = shufflePshufb128(s.lo, accelPerm.lo, accelComp.lo);
u32 idx2 = shufflePshufb128(s.mid, accelPerm.mid, accelComp.mid);
u32 idx3 = shufflePshufb128(s.hi, accelPerm.hi, accelComp.hi);
u32 idx1 = packedExtract128(s.lo, accelPerm.lo, accelComp.lo);
u32 idx2 = packedExtract128(s.mid, accelPerm.mid, accelComp.mid);
u32 idx3 = packedExtract128(s.hi, accelPerm.hi, accelComp.hi);
assert((idx1 & idx2 & idx3) == 0); // should be no shared bits
idx = idx1 | idx2 | idx3;
return accelScanWrapper(accelTable, aux, input, idx, i, end);
@ -144,21 +138,17 @@ size_t doAccel512(const m512 *state, const struct LimExNFA512 *limex,
m512 accelPerm = limex->accelPermute;
m512 accelComp = limex->accelCompare;
#if !defined(__AVX2__)
u32 idx1 = shufflePshufb128(s.lo.lo, accelPerm.lo.lo, accelComp.lo.lo);
u32 idx2 = shufflePshufb128(s.lo.hi, accelPerm.lo.hi, accelComp.lo.hi);
u32 idx3 = shufflePshufb128(s.hi.lo, accelPerm.hi.lo, accelComp.hi.lo);
u32 idx4 = shufflePshufb128(s.hi.hi, accelPerm.hi.hi, accelComp.hi.hi);
#else
u32 idx1 = shufflePshufb128(movdq_lo(s.lo), movdq_lo(accelPerm.lo),
movdq_lo(accelComp.lo));
u32 idx2 = shufflePshufb128(movdq_hi(s.lo), movdq_hi(accelPerm.lo),
movdq_hi(accelComp.lo));
u32 idx3 = shufflePshufb128(movdq_lo(s.hi), movdq_lo(accelPerm.hi),
movdq_lo(accelComp.hi));
u32 idx4 = shufflePshufb128(movdq_hi(s.hi), movdq_hi(accelPerm.hi),
movdq_hi(accelComp.hi));
#endif
u32 idx1 = packedExtract128(s.lo.lo, accelPerm.lo.lo, accelComp.lo.lo);
u32 idx2 = packedExtract128(s.lo.hi, accelPerm.lo.hi, accelComp.lo.hi);
u32 idx3 = packedExtract128(s.hi.lo, accelPerm.hi.lo, accelComp.hi.lo);
u32 idx4 = packedExtract128(s.hi.hi, accelPerm.hi.hi, accelComp.hi.hi);
assert((idx1 & idx2 & idx3 & idx4) == 0); // should be no shared bits
idx = idx1 | idx2 | idx3 | idx4;
#else
u32 idx1 = packedExtract256(s.lo, accelPerm.lo, accelComp.lo);
u32 idx2 = packedExtract256(s.hi, accelPerm.hi, accelComp.hi);
assert((idx1 & idx2) == 0); // should be no shared bits
idx = idx1 | idx2;
#endif
return accelScanWrapper(accelTable, aux, input, idx, i, end);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -34,20 +34,19 @@
* be faster and actually correct if these assumptions don't hold true.
*/
#ifndef SHUFFLE_H
#define SHUFFLE_H
#ifndef LIMEX_SHUFFLE_H
#define LIMEX_SHUFFLE_H
#include "config.h"
#include "bitutils.h"
#include "simd_utils.h"
#include "ue2common.h"
#include "util/bitutils.h"
#include "util/simd_utils.h"
#if defined(__BMI2__) || (defined(_WIN32) && defined(__AVX2__))
#define HAVE_PEXT
#endif
static really_inline
u32 shuffleDynamic32(u32 x, u32 mask) {
u32 packedExtract32(u32 x, u32 mask) {
#if defined(HAVE_PEXT)
// Intel BMI2 can do this operation in one instruction.
return _pext_u32(x, mask);
@ -67,7 +66,7 @@ u32 shuffleDynamic32(u32 x, u32 mask) {
}
static really_inline
u32 shuffleDynamic64(u64a x, u64a mask) {
u32 packedExtract64(u64a x, u64a mask) {
#if defined(HAVE_PEXT) && defined(ARCH_64_BIT)
// Intel BMI2 can do this operation in one instruction.
return _pext_u64(x, mask);
@ -88,4 +87,24 @@ u32 shuffleDynamic64(u64a x, u64a mask) {
#undef HAVE_PEXT
#endif // SHUFFLE_H
static really_inline
u32 packedExtract128(m128 s, const m128 permute, const m128 compare) {
m128 shuffled = pshufb(s, permute);
m128 compared = and128(shuffled, compare);
u16 rv = ~cmpmsk8(compared, shuffled);
return (u32)rv;
}
#if defined(__AVX2__)
static really_inline
u32 packedExtract256(m256 s, const m256 permute, const m256 compare) {
// vpshufb doesn't cross lanes, so this is a bit of a cheat
m256 shuffled = vpshufb(s, permute);
m256 compared = and256(shuffled, compare);
u32 rv = ~movemask256(eq256(compared, shuffled));
// stitch the lane-wise results back together
return (u32)((rv >> 16) | (rv & 0xffffU));
}
#endif // AVX2
#endif // LIMEX_SHUFFLE_H

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -31,7 +31,6 @@
#include "ue2common.h"
#include "util/bitutils.h"
#include "util/simd_utils.h"
#include "util/simd_utils_ssse3.h"
static really_inline
const u8 *JOIN(MATCH_ALGO, fwdBlock)(m256 mask_lo, m256 mask_hi, m256 chars,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -31,7 +31,6 @@
#include "ue2common.h"
#include "util/bitutils.h"
#include "util/simd_utils.h"
#include "util/simd_utils_ssse3.h"
/* Normal SSSE3 shufti */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -32,7 +32,6 @@
#include "multitruffle.h"
#include "util/bitutils.h"
#include "util/simd_utils.h"
#include "util/simd_utils_ssse3.h"
#include "multiaccel_common.h"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -40,7 +40,6 @@
#include "shufti_common.h"
#include "util/simd_utils_ssse3.h"
/** \brief Naive byte-by-byte implementation. */
static really_inline

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -34,7 +34,6 @@
#include "util/bitutils.h"
#include "util/simd_utils.h"
#include "util/unaligned.h"
#include "util/simd_utils_ssse3.h"
/*
* Common stuff for all versions of shufti (single, multi and multidouble)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -35,7 +35,6 @@
#include "truffle.h"
#include "util/bitutils.h"
#include "util/simd_utils.h"
#include "util/simd_utils_ssse3.h"
#include "truffle_common.h"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -31,7 +31,6 @@
#include "util/bitutils.h"
#include "util/simd_utils.h"
#include "util/simd_utils_ssse3.h"
/*
* Common stuff for all versions of truffle (single, multi and multidouble)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -34,7 +34,6 @@
#include "rose_internal.h"
#include "nfa/nfa_api_queue.h"
#include "util/simd_utils.h"
#include "util/simd_utils_ssse3.h"
/** \brief Maximum number of bytes to scan when looking for a "counting miracle"
* stop character. */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -33,7 +33,6 @@
#include "unaligned.h"
#include "simd_utils.h"
#include "simd_utils_ssse3.h"
#ifdef __cplusplus
extern "C" {

View File

@ -1,79 +0,0 @@
/*
* Copyright (c) 2015, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SHUFFLE_SSSE3_H
#define SHUFFLE_SSSE3_H
#include "simd_utils_ssse3.h"
#ifdef DEBUG
#include "compare.h"
static really_inline void shufDumpMsk(m128 msk) {
u8 * mskAsU8 = (u8 *)&msk;
for (int i = 0; i < 16; i++) {
u8 c = mskAsU8[i];
for (int j = 0; j < 8; j++) {
if ((c >> (7-j)) & 0x1)
printf("1");
else
printf("0");
}
printf(" ");
}
}
static really_inline void shufDumpMskAsChars(m128 msk) {
u8 * mskAsU8 = (u8 *)&msk;
for (int i = 0; i < 16; i++) {
u8 c = mskAsU8[i];
if (ourisprint(c))
printf("%c",c);
else
printf(".");
}
}
#endif
#if !defined(NO_SSSE3)
static really_inline
u32 shufflePshufb128(m128 s, const m128 permute, const m128 compare) {
m128 shuffled = pshufb(s, permute);
m128 compared = and128(shuffled, compare);
#ifdef DEBUG
printf("State: "); shufDumpMsk(s); printf("\n");
printf("Permute: "); shufDumpMsk(permute); printf("\n");
printf("Compare: "); shufDumpMsk(compare); printf("\n");
printf("Shuffled: "); shufDumpMsk(shuffled); printf("\n");
printf("Compared: "); shufDumpMsk(compared); printf("\n");
#endif
u16 rv = ~cmpmsk8(compared, shuffled);
return (u32)rv;
}
#endif // NO_SSSE3
#endif // SHUFFLE_SSSE3_H

View File

@ -26,7 +26,7 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "simd_utils_ssse3.h"
#include "simd_utils.h"
const char vbs_mask_data[] ALIGN_CL_DIRECTIVE = {
0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,

View File

@ -33,6 +33,10 @@
#ifndef SIMD_UTILS
#define SIMD_UTILS
#if !defined(_WIN32) && !defined(__SSSE3__)
#error SSSE3 instructions must be enabled
#endif
#include "config.h"
#include <string.h> // for memcpy
@ -93,6 +97,14 @@
#define assume_aligned(x, y) (x)
#endif
#ifdef __cplusplus
extern "C" {
#endif
extern const char vbs_mask_data[];
#ifdef __cplusplus
}
#endif
static really_inline m128 ones128(void) {
#if !defined(NO_ASM)
// trick from Intel's optimization guide to generate all-ones. We have to
@ -160,7 +172,6 @@ static really_inline unsigned short cmpmsk8(m128 a, m128 b) {
#define eq128(a, b) _mm_cmpeq_epi8((a), (b))
#define movemask128(a) ((u32)_mm_movemask_epi8((a)))
// We found that this generated better code with gcc-4.1 and with the default
// tuning settings on gcc-4.4 than just using the _mm_set1_epi8() instrinsic.
static really_inline m128 set16x8(u8 c) {
@ -318,6 +329,36 @@ char testbit128(const m128 *ptr, unsigned int n) {
return !!(bytes[n / 8] & (1 << (n % 8)));
}
// offset must be an immediate
#define palignr(r, l, offset) _mm_alignr_epi8(r, l, offset)
static really_inline
m128 pshufb(m128 a, m128 b) {
m128 result;
result = _mm_shuffle_epi8(a, b);
return result;
}
static really_inline
m256 vpshufb(m256 a, m256 b) {
#if defined(__AVX2__)
return _mm256_shuffle_epi8(a, b);
#else
m256 rv;
rv.lo = pshufb(a.lo, b.lo);
rv.hi = pshufb(a.hi, b.hi);
return rv;
#endif
}
static really_inline
m128 variable_byte_shift_m128(m128 in, s32 amount) {
assert(amount >= -16 && amount <= 16);
m128 shift_mask = loadu128(vbs_mask_data + 16 - amount);
return pshufb(in, shift_mask);
}
/****
**** 256-bit Primitives
****/
@ -735,6 +776,7 @@ m256 shift256Left8Bits(m256 a) {
#define extractlow32from256(a) movd(cast256to128(a))
#define interleave256hi(a, b) _mm256_unpackhi_epi8(a, b);
#define interleave256lo(a, b) _mm256_unpacklo_epi8(a, b);
#define vpalignr(r, l, offset) _mm256_alignr_epi8(r, l, offset)
#endif //AVX2

View File

@ -1,166 +0,0 @@
/*
* Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
* \brief SIMD primitives specifically for Intel SSSE3 platforms.
*/
#ifndef SIMD_UTILS_SSSE3_H_E27DF795C9AA02
#define SIMD_UTILS_SSSE3_H_E27DF795C9AA02
#if !defined(_WIN32) && !defined(__SSSE3__)
#error SSSE3 instructions must be enabled
#endif
#include "simd_utils.h"
#include "ue2common.h"
// we may already have x86intrin.h
#if !defined(USE_X86INTRIN_H)
#if defined(HAVE_C_INTRIN_H)
#include <intrin.h>
#elif defined(HAVE_TMMINTRIN_H)
#include <tmmintrin.h> // SSSE3 intrinsics
#else
#define I_HAVE_BROKEN_INTRINSICS
#endif
#endif
#if !defined(I_HAVE_BROKEN_INTRINSICS)
// newish compilers get this right
#define palignr(r, l, offset) _mm_alignr_epi8(r, l, offset)
#else
// must be inline, even in weak-sauce debug builds.
// oldish compilers either don't have the intrinsic, or force one arg through memory
static really_really_inline
m128 palignr(m128 r, m128 l, const int offset) {
__asm__ ("palignr %2,%1,%0" : "+x"(r) : "x"(l), "i"(offset));
return r;
}
#endif
static really_inline
m128 pshufb(m128 a, m128 b) {
m128 result;
#if !defined(I_HAVE_BROKEN_INTRINSICS)
result = _mm_shuffle_epi8(a, b);
#else
__asm__("pshufb\t%1,%0" : "=x"(result) : "xm"(b), "0"(a));
#endif
return result;
}
#ifdef __cplusplus
extern "C" {
#endif
extern const char vbs_mask_data[];
#ifdef __cplusplus
}
#endif
static really_inline
m128 variable_byte_shift_m128(m128 in, s32 amount) {
assert(amount >= -16 && amount <= 16);
m128 shift_mask = loadu128(vbs_mask_data + 16 - amount);
return pshufb(in, shift_mask);
}
#if defined(__AVX2__)
static really_inline
m256 vpshufb(m256 a, m256 b) {
return _mm256_shuffle_epi8(a, b);
}
#if defined(USE_GCC_COMPOUND_STATEMENTS)
#define vpalignr(r, l, offset) ({ \
m256 res = _mm256_alignr_epi8(r, l, offset); \
res; \
})
#else
#define vpalignr(r, l, offset) _mm256_alignr_epi8(r, l, offset)
#endif
#else // not __AVX2__
static really_inline
m256 vpshufb(m256 a, m256 b) {
m256 rv;
rv.lo = pshufb(a.lo, b.lo);
rv.hi = pshufb(a.hi, b.hi);
return rv;
}
/* palignr requires the offset to be an immediate, which we can do with a
* compound macro, otherwise we have to enumerate the offsets and hope the
* compiler can throw the rest away. */
#if defined(USE_GCC_COMPOUND_STATEMENTS)
#define vpalignr(r, l, offset) ({ \
m256 res; \
res.lo = palignr(r.lo, l.lo, offset); \
res.hi = palignr(r.hi, l.hi, offset); \
res; \
})
#else
#define VPALIGN_CASE(N) case N: \
res.lo = palignr(r.lo, l.lo, N); \
res.hi = palignr(r.hi, l.hi, N); \
return res;
static really_inline
m256 vpalignr(m256 r, m256 l, const int offset) {
m256 res;
switch (offset) {
VPALIGN_CASE(0)
VPALIGN_CASE(1)
VPALIGN_CASE(2)
VPALIGN_CASE(3)
VPALIGN_CASE(4)
VPALIGN_CASE(5)
VPALIGN_CASE(6)
VPALIGN_CASE(7)
VPALIGN_CASE(8)
VPALIGN_CASE(9)
VPALIGN_CASE(10)
VPALIGN_CASE(11)
VPALIGN_CASE(12)
VPALIGN_CASE(13)
VPALIGN_CASE(14)
VPALIGN_CASE(15)
default:
assert(0);
return zeroes256();
}
}
#undef VPALIGN_CASE
#endif
#endif // __AVX2__
#endif /* SIMD_UTILS_SSSE3_H_E27DF795C9AA02 */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -31,8 +31,7 @@
#include "gtest/gtest.h"
#include "util/simd_utils.h"
#include "util/shuffle.h"
#include "util/shuffle_ssse3.h"
#include "nfa/limex_shuffle.h"
namespace {
@ -50,34 +49,34 @@ Mask setbit(unsigned int bit) {
return cf.simd;
}
TEST(Shuffle, ShuffleDynamic32_1) {
TEST(Shuffle, PackedExtract32_1) {
// Try all possible one-bit masks
for (unsigned int i = 0; i < 32; i++) {
// shuffle a single 1 bit to the front
u32 mask = 1U << i;
EXPECT_EQ(1U, shuffleDynamic32(mask, mask));
EXPECT_EQ(1U, shuffleDynamic32(~0U, mask));
EXPECT_EQ(1U, packedExtract32(mask, mask));
EXPECT_EQ(1U, packedExtract32(~0U, mask));
// we should get zero out of these cases
EXPECT_EQ(0U, shuffleDynamic32(0, mask));
EXPECT_EQ(0U, shuffleDynamic32(~mask, mask));
EXPECT_EQ(0U, packedExtract32(0, mask));
EXPECT_EQ(0U, packedExtract32(~mask, mask));
// we should get zero out of all the other bit positions
for (unsigned int j = 0; (j != i && j < 32); j++) {
EXPECT_EQ(0U, shuffleDynamic32((1U << j), mask));
EXPECT_EQ(0U, packedExtract32((1U << j), mask));
}
}
}
TEST(Shuffle, ShuffleDynamic32_2) {
TEST(Shuffle, PackedExtract32_2) {
// All 32 bits in mask are on
u32 mask = ~0U;
EXPECT_EQ(0U, shuffleDynamic32(0, mask));
EXPECT_EQ(mask, shuffleDynamic32(mask, mask));
EXPECT_EQ(0U, packedExtract32(0, mask));
EXPECT_EQ(mask, packedExtract32(mask, mask));
for (unsigned int i = 0; i < 32; i++) {
EXPECT_EQ(1U << i, shuffleDynamic32(1U << i, mask));
EXPECT_EQ(1U << i, packedExtract32(1U << i, mask));
}
}
TEST(Shuffle, ShuffleDynamic32_3) {
TEST(Shuffle, PackedExtract32_3) {
// Try setting every second bit
u32 mask = 0;
for (unsigned int i = 0; i < 32; i += 2) {
@ -85,63 +84,63 @@ TEST(Shuffle, ShuffleDynamic32_3) {
}
// Test both cases (all even bits, all odd bits)
EXPECT_EQ((1U << 16) - 1, shuffleDynamic32(mask, mask));
EXPECT_EQ((1U << 16) - 1, shuffleDynamic32(~mask, ~mask));
EXPECT_EQ(0U, shuffleDynamic32(~mask, mask));
EXPECT_EQ(0U, shuffleDynamic32(mask, ~mask));
EXPECT_EQ((1U << 16) - 1, packedExtract32(mask, mask));
EXPECT_EQ((1U << 16) - 1, packedExtract32(~mask, ~mask));
EXPECT_EQ(0U, packedExtract32(~mask, mask));
EXPECT_EQ(0U, packedExtract32(mask, ~mask));
for (unsigned int i = 0; i < 32; i += 2) {
EXPECT_EQ(1U << (i/2), shuffleDynamic32(1U << i, mask));
EXPECT_EQ(0U, shuffleDynamic32(1U << i, ~mask));
EXPECT_EQ(1U << (i/2), shuffleDynamic32(1U << (i+1), ~mask));
EXPECT_EQ(0U, shuffleDynamic32(1U << (i+1), mask));
EXPECT_EQ(1U << (i/2), packedExtract32(1U << i, mask));
EXPECT_EQ(0U, packedExtract32(1U << i, ~mask));
EXPECT_EQ(1U << (i/2), packedExtract32(1U << (i+1), ~mask));
EXPECT_EQ(0U, packedExtract32(1U << (i+1), mask));
}
}
TEST(Shuffle, ShuffleDynamic64_1) {
TEST(Shuffle, PackedExtract64_1) {
// Try all possible one-bit masks
for (unsigned int i = 0; i < 64; i++) {
// shuffle a single 1 bit to the front
u64a mask = 1ULL << i;
EXPECT_EQ(1U, shuffleDynamic64(mask, mask));
EXPECT_EQ(1U, shuffleDynamic64(~0ULL, mask));
EXPECT_EQ(1U, packedExtract64(mask, mask));
EXPECT_EQ(1U, packedExtract64(~0ULL, mask));
// we should get zero out of these cases
EXPECT_EQ(0U, shuffleDynamic64(0, mask));
EXPECT_EQ(0U, shuffleDynamic64(~mask, mask));
EXPECT_EQ(0U, packedExtract64(0, mask));
EXPECT_EQ(0U, packedExtract64(~mask, mask));
// we should get zero out of all the other bit positions
for (unsigned int j = 0; (j != i && j < 64); j++) {
EXPECT_EQ(0U, shuffleDynamic64((1ULL << j), mask));
EXPECT_EQ(0U, packedExtract64((1ULL << j), mask));
}
}
}
TEST(Shuffle, ShuffleDynamic64_2) {
TEST(Shuffle, PackedExtract64_2) {
// Fill first half of mask
u64a mask = 0x00000000ffffffffULL;
EXPECT_EQ(0U, shuffleDynamic64(0, mask));
EXPECT_EQ(0xffffffffU, shuffleDynamic64(mask, mask));
EXPECT_EQ(0U, packedExtract64(0, mask));
EXPECT_EQ(0xffffffffU, packedExtract64(mask, mask));
for (unsigned int i = 0; i < 32; i++) {
EXPECT_EQ(1U << i, shuffleDynamic64(1ULL << i, mask));
EXPECT_EQ(1U << i, packedExtract64(1ULL << i, mask));
}
// Fill second half of mask
mask = 0xffffffff00000000ULL;
EXPECT_EQ(0U, shuffleDynamic64(0, mask));
EXPECT_EQ(0xffffffffU, shuffleDynamic64(mask, mask));
EXPECT_EQ(0U, packedExtract64(0, mask));
EXPECT_EQ(0xffffffffU, packedExtract64(mask, mask));
for (unsigned int i = 32; i < 64; i++) {
EXPECT_EQ(1U << (i - 32), shuffleDynamic64(1ULL << i, mask));
EXPECT_EQ(1U << (i - 32), packedExtract64(1ULL << i, mask));
}
// Try one in the middle
mask = 0x0000ffffffff0000ULL;
EXPECT_EQ(0U, shuffleDynamic64(0, mask));
EXPECT_EQ(0xffffffffU, shuffleDynamic64(mask, mask));
EXPECT_EQ(0U, packedExtract64(0, mask));
EXPECT_EQ(0xffffffffU, packedExtract64(mask, mask));
for (unsigned int i = 16; i < 48; i++) {
EXPECT_EQ(1U << (i - 16), shuffleDynamic64(1ULL << i, mask));
EXPECT_EQ(1U << (i - 16), packedExtract64(1ULL << i, mask));
}
}
TEST(Shuffle, ShuffleDynamic64_3) {
TEST(Shuffle, PackedExtract64_3) {
// Try setting every second bit (note: 32 bits, the max we can shuffle)
u64a mask = 0;
for (unsigned int i = 0; i < 64; i += 2) {
@ -149,46 +148,69 @@ TEST(Shuffle, ShuffleDynamic64_3) {
}
// Test both cases (all even bits, all odd bits)
EXPECT_EQ(0xffffffffU, shuffleDynamic64(mask, mask));
EXPECT_EQ(0xffffffffU, shuffleDynamic64(~mask, ~mask));
EXPECT_EQ(0U, shuffleDynamic64(~mask, mask));
EXPECT_EQ(0U, shuffleDynamic64(mask, ~mask));
EXPECT_EQ(0xffffffffU, packedExtract64(mask, mask));
EXPECT_EQ(0xffffffffU, packedExtract64(~mask, ~mask));
EXPECT_EQ(0U, packedExtract64(~mask, mask));
EXPECT_EQ(0U, packedExtract64(mask, ~mask));
for (unsigned int i = 0; i < 64; i += 2) {
EXPECT_EQ(1U << (i/2), shuffleDynamic64(1ULL << i, mask));
EXPECT_EQ(0U, shuffleDynamic64(1ULL << i, ~mask));
EXPECT_EQ(1U << (i/2), shuffleDynamic64(1ULL << (i+1), ~mask));
EXPECT_EQ(0U, shuffleDynamic64(1ULL << (i+1), mask));
EXPECT_EQ(1U << (i/2), packedExtract64(1ULL << i, mask));
EXPECT_EQ(0U, packedExtract64(1ULL << i, ~mask));
EXPECT_EQ(1U << (i/2), packedExtract64(1ULL << (i+1), ~mask));
EXPECT_EQ(0U, packedExtract64(1ULL << (i+1), mask));
}
}
template<typename T>
static
void build_pshufb_masks_onebit(unsigned int bit, m128 *permute, m128 *compare) {
void build_pshufb_masks_onebit(unsigned int bit, T *permute, T *compare) {
static_assert(sizeof(T) == sizeof(m128) || sizeof(T) == sizeof(m256),
"should be valid type");
// permute mask has 0x80 in all bytes except the one we care about
memset(permute, 0x80, sizeof(*permute));
memset(compare, 0, sizeof(*compare));
char *pmsk = (char *)permute;
char *cmsk = (char *)compare;
pmsk[0] = bit/8;
cmsk[0] = ~(1 << (bit % 8));
u8 off = (bit >= 128) ? 0x10 : 0;
pmsk[off] = bit/8;
cmsk[off] = ~(1 << (bit % 8));
}
TEST(Shuffle, ShufflePshufb128_1) {
TEST(Shuffle, PackedExtract128_1) {
// Try all possible one-bit masks
for (unsigned int i = 0; i < 128; i++) {
// shuffle a single 1 bit to the front
m128 permute, compare;
build_pshufb_masks_onebit(i, &permute, &compare);
EXPECT_EQ(1U, shufflePshufb128(setbit<m128>(i), permute, compare));
EXPECT_EQ(1U, shufflePshufb128(ones128(), permute, compare));
EXPECT_EQ(1U, packedExtract128(setbit<m128>(i), permute, compare));
EXPECT_EQ(1U, packedExtract128(ones128(), permute, compare));
// we should get zero out of these cases
EXPECT_EQ(0U, shufflePshufb128(zeroes128(), permute, compare));
EXPECT_EQ(0U, shufflePshufb128(not128(setbit<m128>(i)), permute, compare));
EXPECT_EQ(0U, packedExtract128(zeroes128(), permute, compare));
EXPECT_EQ(0U, packedExtract128(not128(setbit<m128>(i)), permute, compare));
// we should get zero out of all the other bit positions
for (unsigned int j = 0; (j != i && j < 128); j++) {
EXPECT_EQ(0U, shufflePshufb128(setbit<m128>(j), permute, compare));
EXPECT_EQ(0U, packedExtract128(setbit<m128>(j), permute, compare));
}
}
}
#if defined(__AVX2__)
TEST(Shuffle, PackedExtract256_1) {
// Try all possible one-bit masks
for (unsigned int i = 0; i < 256; i++) {
// shuffle a single 1 bit to the front
m256 permute, compare;
build_pshufb_masks_onebit(i, &permute, &compare);
EXPECT_EQ(1U, packedExtract256(setbit<m256>(i), permute, compare));
EXPECT_EQ(1U, packedExtract256(ones256(), permute, compare));
// we should get zero out of these cases
EXPECT_EQ(0U, packedExtract256(zeroes256(), permute, compare));
EXPECT_EQ(0U, packedExtract256(not256(setbit<m256>(i)), permute, compare));
// we should get zero out of all the other bit positions
for (unsigned int j = 0; (j != i && j < 256); j++) {
EXPECT_EQ(0U, packedExtract256(setbit<m256>(j), permute, compare));
}
}
}
#endif
} // namespace

View File

@ -32,7 +32,6 @@
#include "util/alloc.h"
#include "util/make_unique.h"
#include "util/simd_utils.h"
#include "util/simd_utils_ssse3.h"
using namespace std;
using namespace ue2;