Move limex specific shuffle utils and ssse3 funcs

This commit is contained in:
Matthew Barr
2016-06-06 11:54:21 +10:00
parent 9f98f4c7b2
commit 4d6934fc77
22 changed files with 182 additions and 371 deletions

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Intel Corporation
* Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -33,7 +33,6 @@
#include "unaligned.h"
#include "simd_utils.h"
#include "simd_utils_ssse3.h"
#ifdef __cplusplus
extern "C" {

View File

@@ -1,91 +0,0 @@
/*
* Copyright (c) 2015, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
* \brief Naive dynamic shuffles.
*
* These are written with the assumption that the provided masks are sparsely
* populated and never contain more than 32 on bits. Other implementations will
* be faster and actually correct if these assumptions don't hold true.
*/
#ifndef SHUFFLE_H
#define SHUFFLE_H
#include "config.h"
#include "bitutils.h"
#include "simd_utils.h"
#include "ue2common.h"
#if defined(__BMI2__) || (defined(_WIN32) && defined(__AVX2__))
#define HAVE_PEXT
#endif
static really_inline
u32 shuffleDynamic32(u32 x, u32 mask) {
#if defined(HAVE_PEXT)
// Intel BMI2 can do this operation in one instruction.
return _pext_u32(x, mask);
#else
u32 result = 0, num = 1;
while (mask != 0) {
u32 bit = findAndClearLSB_32(&mask);
if (x & (1U << bit)) {
assert(num != 0); // more than 32 bits!
result |= num;
}
num <<= 1;
}
return result;
#endif
}
static really_inline
u32 shuffleDynamic64(u64a x, u64a mask) {
#if defined(HAVE_PEXT) && defined(ARCH_64_BIT)
// Intel BMI2 can do this operation in one instruction.
return _pext_u64(x, mask);
#else
u32 result = 0, num = 1;
while (mask != 0) {
u32 bit = findAndClearLSB_64(&mask);
if (x & (1ULL << bit)) {
assert(num != 0); // more than 32 bits!
result |= num;
}
num <<= 1;
}
return result;
#endif
}
#undef HAVE_PEXT
#endif // SHUFFLE_H

View File

@@ -1,79 +0,0 @@
/*
* Copyright (c) 2015, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SHUFFLE_SSSE3_H
#define SHUFFLE_SSSE3_H
#include "simd_utils_ssse3.h"
#ifdef DEBUG
#include "compare.h"
static really_inline void shufDumpMsk(m128 msk) {
u8 * mskAsU8 = (u8 *)&msk;
for (int i = 0; i < 16; i++) {
u8 c = mskAsU8[i];
for (int j = 0; j < 8; j++) {
if ((c >> (7-j)) & 0x1)
printf("1");
else
printf("0");
}
printf(" ");
}
}
static really_inline void shufDumpMskAsChars(m128 msk) {
u8 * mskAsU8 = (u8 *)&msk;
for (int i = 0; i < 16; i++) {
u8 c = mskAsU8[i];
if (ourisprint(c))
printf("%c",c);
else
printf(".");
}
}
#endif
#if !defined(NO_SSSE3)
static really_inline
u32 shufflePshufb128(m128 s, const m128 permute, const m128 compare) {
m128 shuffled = pshufb(s, permute);
m128 compared = and128(shuffled, compare);
#ifdef DEBUG
printf("State: "); shufDumpMsk(s); printf("\n");
printf("Permute: "); shufDumpMsk(permute); printf("\n");
printf("Compare: "); shufDumpMsk(compare); printf("\n");
printf("Shuffled: "); shufDumpMsk(shuffled); printf("\n");
printf("Compared: "); shufDumpMsk(compared); printf("\n");
#endif
u16 rv = ~cmpmsk8(compared, shuffled);
return (u32)rv;
}
#endif // NO_SSSE3
#endif // SHUFFLE_SSSE3_H

View File

@@ -26,7 +26,7 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "simd_utils_ssse3.h"
#include "simd_utils.h"
const char vbs_mask_data[] ALIGN_CL_DIRECTIVE = {
0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,

View File

@@ -33,6 +33,10 @@
#ifndef SIMD_UTILS
#define SIMD_UTILS
#if !defined(_WIN32) && !defined(__SSSE3__)
#error SSSE3 instructions must be enabled
#endif
#include "config.h"
#include <string.h> // for memcpy
@@ -93,6 +97,14 @@
#define assume_aligned(x, y) (x)
#endif
#ifdef __cplusplus
extern "C" {
#endif
extern const char vbs_mask_data[];
#ifdef __cplusplus
}
#endif
static really_inline m128 ones128(void) {
#if !defined(NO_ASM)
// trick from Intel's optimization guide to generate all-ones. We have to
@@ -160,7 +172,6 @@ static really_inline unsigned short cmpmsk8(m128 a, m128 b) {
#define eq128(a, b) _mm_cmpeq_epi8((a), (b))
#define movemask128(a) ((u32)_mm_movemask_epi8((a)))
// We found that this generated better code with gcc-4.1 and with the default
// tuning settings on gcc-4.4 than just using the _mm_set1_epi8() instrinsic.
static really_inline m128 set16x8(u8 c) {
@@ -318,6 +329,36 @@ char testbit128(const m128 *ptr, unsigned int n) {
return !!(bytes[n / 8] & (1 << (n % 8)));
}
// offset must be an immediate
#define palignr(r, l, offset) _mm_alignr_epi8(r, l, offset)
static really_inline
m128 pshufb(m128 a, m128 b) {
m128 result;
result = _mm_shuffle_epi8(a, b);
return result;
}
static really_inline
m256 vpshufb(m256 a, m256 b) {
#if defined(__AVX2__)
return _mm256_shuffle_epi8(a, b);
#else
m256 rv;
rv.lo = pshufb(a.lo, b.lo);
rv.hi = pshufb(a.hi, b.hi);
return rv;
#endif
}
static really_inline
m128 variable_byte_shift_m128(m128 in, s32 amount) {
assert(amount >= -16 && amount <= 16);
m128 shift_mask = loadu128(vbs_mask_data + 16 - amount);
return pshufb(in, shift_mask);
}
/****
**** 256-bit Primitives
****/
@@ -735,6 +776,7 @@ m256 shift256Left8Bits(m256 a) {
#define extractlow32from256(a) movd(cast256to128(a))
#define interleave256hi(a, b) _mm256_unpackhi_epi8(a, b);
#define interleave256lo(a, b) _mm256_unpacklo_epi8(a, b);
#define vpalignr(r, l, offset) _mm256_alignr_epi8(r, l, offset)
#endif //AVX2

View File

@@ -1,166 +0,0 @@
/*
* Copyright (c) 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \file
* \brief SIMD primitives specifically for Intel SSSE3 platforms.
*/
#ifndef SIMD_UTILS_SSSE3_H_E27DF795C9AA02
#define SIMD_UTILS_SSSE3_H_E27DF795C9AA02
#if !defined(_WIN32) && !defined(__SSSE3__)
#error SSSE3 instructions must be enabled
#endif
#include "simd_utils.h"
#include "ue2common.h"
// we may already have x86intrin.h
#if !defined(USE_X86INTRIN_H)
#if defined(HAVE_C_INTRIN_H)
#include <intrin.h>
#elif defined(HAVE_TMMINTRIN_H)
#include <tmmintrin.h> // SSSE3 intrinsics
#else
#define I_HAVE_BROKEN_INTRINSICS
#endif
#endif
#if !defined(I_HAVE_BROKEN_INTRINSICS)
// newish compilers get this right
#define palignr(r, l, offset) _mm_alignr_epi8(r, l, offset)
#else
// must be inline, even in weak-sauce debug builds.
// oldish compilers either don't have the intrinsic, or force one arg through memory
static really_really_inline
m128 palignr(m128 r, m128 l, const int offset) {
__asm__ ("palignr %2,%1,%0" : "+x"(r) : "x"(l), "i"(offset));
return r;
}
#endif
static really_inline
m128 pshufb(m128 a, m128 b) {
m128 result;
#if !defined(I_HAVE_BROKEN_INTRINSICS)
result = _mm_shuffle_epi8(a, b);
#else
__asm__("pshufb\t%1,%0" : "=x"(result) : "xm"(b), "0"(a));
#endif
return result;
}
#ifdef __cplusplus
extern "C" {
#endif
extern const char vbs_mask_data[];
#ifdef __cplusplus
}
#endif
static really_inline
m128 variable_byte_shift_m128(m128 in, s32 amount) {
assert(amount >= -16 && amount <= 16);
m128 shift_mask = loadu128(vbs_mask_data + 16 - amount);
return pshufb(in, shift_mask);
}
#if defined(__AVX2__)
static really_inline
m256 vpshufb(m256 a, m256 b) {
return _mm256_shuffle_epi8(a, b);
}
#if defined(USE_GCC_COMPOUND_STATEMENTS)
#define vpalignr(r, l, offset) ({ \
m256 res = _mm256_alignr_epi8(r, l, offset); \
res; \
})
#else
#define vpalignr(r, l, offset) _mm256_alignr_epi8(r, l, offset)
#endif
#else // not __AVX2__
static really_inline
m256 vpshufb(m256 a, m256 b) {
m256 rv;
rv.lo = pshufb(a.lo, b.lo);
rv.hi = pshufb(a.hi, b.hi);
return rv;
}
/* palignr requires the offset to be an immediate, which we can do with a
* compound macro, otherwise we have to enumerate the offsets and hope the
* compiler can throw the rest away. */
#if defined(USE_GCC_COMPOUND_STATEMENTS)
#define vpalignr(r, l, offset) ({ \
m256 res; \
res.lo = palignr(r.lo, l.lo, offset); \
res.hi = palignr(r.hi, l.hi, offset); \
res; \
})
#else
#define VPALIGN_CASE(N) case N: \
res.lo = palignr(r.lo, l.lo, N); \
res.hi = palignr(r.hi, l.hi, N); \
return res;
static really_inline
m256 vpalignr(m256 r, m256 l, const int offset) {
m256 res;
switch (offset) {
VPALIGN_CASE(0)
VPALIGN_CASE(1)
VPALIGN_CASE(2)
VPALIGN_CASE(3)
VPALIGN_CASE(4)
VPALIGN_CASE(5)
VPALIGN_CASE(6)
VPALIGN_CASE(7)
VPALIGN_CASE(8)
VPALIGN_CASE(9)
VPALIGN_CASE(10)
VPALIGN_CASE(11)
VPALIGN_CASE(12)
VPALIGN_CASE(13)
VPALIGN_CASE(14)
VPALIGN_CASE(15)
default:
assert(0);
return zeroes256();
}
}
#undef VPALIGN_CASE
#endif
#endif // __AVX2__
#endif /* SIMD_UTILS_SSSE3_H_E27DF795C9AA02 */