mirror of
https://github.com/VectorCamp/vectorscan.git
synced 2025-06-28 16:41:01 +03:00
add ARM version of simd_utils.h
This commit is contained in:
parent
a9212174ee
commit
31ac6718dd
288
src/util/arch/arm/simd_utils.h
Normal file
288
src/util/arch/arm/simd_utils.h
Normal file
@ -0,0 +1,288 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2015-2020, Intel Corporation
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in the
|
||||||
|
* documentation and/or other materials provided with the distribution.
|
||||||
|
* * Neither the name of Intel Corporation nor the names of its contributors
|
||||||
|
* may be used to endorse or promote products derived from this software
|
||||||
|
* without specific prior written permission.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||||
|
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||||
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||||
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||||
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||||
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||||
|
* POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/** \file
|
||||||
|
* \brief SIMD types and primitive operations.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ARCH_ARM_SIMD_UTILS_H
|
||||||
|
#define ARCH_ARM_SIMD_UTILS_H
|
||||||
|
|
||||||
|
#include "ue2common.h"
|
||||||
|
#include "util/simd_types.h"
|
||||||
|
#include "util/unaligned.h"
|
||||||
|
#include "util/intrinsics.h"
|
||||||
|
|
||||||
|
#include <string.h> // for memcpy
|
||||||
|
|
||||||
|
static really_inline m128 ones128(void) {
|
||||||
|
return (m128) vdupq_n_s32(0xFF);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_inline m128 zeroes128(void) {
|
||||||
|
return (m128) vdupq_n_s32(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** \brief Bitwise not for m128*/
|
||||||
|
static really_inline m128 not128(m128 a) {
|
||||||
|
return (m128) veorq_s32(a, a);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** \brief Return 1 if a and b are different otherwise 0 */
|
||||||
|
static really_inline int diff128(m128 a, m128 b) {
|
||||||
|
m128 t = (m128)vceqq_s8((int8x16_t)a, (int8x16_t)b);
|
||||||
|
return (16 != vaddvq_u8((uint8x16_t)t));
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_inline int isnonzero128(m128 a) {
|
||||||
|
return !!diff128(a, zeroes128());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* "Rich" version of diff128(). Takes two vectors a and b and returns a 4-bit
|
||||||
|
* mask indicating which 32-bit words contain differences.
|
||||||
|
*/
|
||||||
|
static really_inline u32 diffrich128(m128 a, m128 b) {
|
||||||
|
static const uint32x4_t movemask = { 1, 2, 4, 8 };
|
||||||
|
return vaddvq_u32(vandq_u32(vceqq_s32((int32x4_t)a, (int32x4_t)b), movemask));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* "Rich" version of diff128(), 64-bit variant. Takes two vectors a and b and
|
||||||
|
* returns a 4-bit mask indicating which 64-bit words contain differences.
|
||||||
|
*/
|
||||||
|
static really_inline u32 diffrich64_128(m128 a, m128 b) {
|
||||||
|
static const uint64x2_t movemask = { 1, 2 };
|
||||||
|
return vaddvq_u64(vandq_u64(vceqq_s64((int64x2_t)a, (int64x2_t)b), movemask));
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_really_inline
|
||||||
|
m128 lshift64_m128(m128 a, unsigned b) {
|
||||||
|
return (m128) vshlq_n_s64((int64x2_t)a, b);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_really_inline
|
||||||
|
m128 rshift64_m128(m128 a, unsigned b) {
|
||||||
|
return (m128) vshrq_n_s64((int64x2_t)a, b);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_inline m128 eq128(m128 a, m128 b) {
|
||||||
|
return (m128) vceqq_s8((int8x16_t)a, (int8x16_t)b);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define movemask128(a) ((u32)_mm_movemask_epi8((a)))
|
||||||
|
|
||||||
|
static really_inline m128 set1_16x8(u8 c) {
|
||||||
|
return (m128) vdupq_n_u8(c);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_inline m128 set1_4x32(u32 c) {
|
||||||
|
return (m128) vdupq_n_u32(c);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_inline m128 set1_2x64(u64a c) {
|
||||||
|
return (m128) vdupq_n_u64(c);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_inline u32 movd(const m128 in) {
|
||||||
|
return vgetq_lane_u32((uint32x4_t) in, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_inline u64a movq(const m128 in) {
|
||||||
|
return vgetq_lane_u64((uint64x2_t) in, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* another form of movq */
|
||||||
|
static really_inline
|
||||||
|
m128 load_m128_from_u64a(const u64a *p) {
|
||||||
|
return (m128) vdupq_n_u64(*p);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_really_inline
|
||||||
|
m128 rshiftbyte_m128(m128 a, unsigned b) {
|
||||||
|
return (m128) vshrq_n_s8((int8x16_t)a, b);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_really_inline
|
||||||
|
m128 lshiftbyte_m128(m128 a, unsigned b) {
|
||||||
|
return (m128) vshlq_n_s8((int8x16_t)a, b);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_inline u32 extract32from128(const m128 in, unsigned imm) {
|
||||||
|
return vgetq_lane_u32((uint32x4_t) in, imm);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_inline u32 extract64from128(const m128 in, unsigned imm) {
|
||||||
|
return vgetq_lane_u64((uint64x2_t) in, imm);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_inline m128 and128(m128 a, m128 b) {
|
||||||
|
return (m128) vandq_s8((int8x16_t)a, (int8x16_t)b);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_inline m128 xor128(m128 a, m128 b) {
|
||||||
|
return (m128) veorq_s8((int8x16_t)a, (int8x16_t)b);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_inline m128 or128(m128 a, m128 b) {
|
||||||
|
return (m128) vorrq_s8((int8x16_t)a, (int8x16_t)b);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_inline m128 andnot128(m128 a, m128 b) {
|
||||||
|
return (m128) vbicq_u32((uint32x4_t)a, (uint32x4_t)b);
|
||||||
|
}
|
||||||
|
|
||||||
|
// aligned load
|
||||||
|
static really_inline m128 load128(const void *ptr) {
|
||||||
|
assert(ISALIGNED_N(ptr, alignof(m128)));
|
||||||
|
ptr = assume_aligned(ptr, 16);
|
||||||
|
return (m128) vld1q_s32((const int32_t *)ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// aligned store
|
||||||
|
static really_inline void store128(void *ptr, m128 a) {
|
||||||
|
assert(ISALIGNED_N(ptr, alignof(m128)));
|
||||||
|
ptr = assume_aligned(ptr, 16);
|
||||||
|
vst1q_s32((int32_t *)ptr, a);
|
||||||
|
}
|
||||||
|
|
||||||
|
// unaligned load
|
||||||
|
static really_inline m128 loadu128(const void *ptr) {
|
||||||
|
return (m128) vld1q_s32((const int32_t *)ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// unaligned store
|
||||||
|
static really_inline void storeu128(void *ptr, m128 a) {
|
||||||
|
vst1q_s32((int32_t *)ptr, a);
|
||||||
|
}
|
||||||
|
|
||||||
|
// packed unaligned store of first N bytes
|
||||||
|
static really_inline
|
||||||
|
void storebytes128(void *ptr, m128 a, unsigned int n) {
|
||||||
|
assert(n <= sizeof(a));
|
||||||
|
memcpy(ptr, &a, n);
|
||||||
|
}
|
||||||
|
|
||||||
|
// packed unaligned load of first N bytes, pad with zero
|
||||||
|
static really_inline
|
||||||
|
m128 loadbytes128(const void *ptr, unsigned int n) {
|
||||||
|
m128 a = zeroes128();
|
||||||
|
assert(n <= sizeof(a));
|
||||||
|
memcpy(&a, ptr, n);
|
||||||
|
return a;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
extern const u8 simd_onebit_masks[];
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static really_inline
|
||||||
|
m128 mask1bit128(unsigned int n) {
|
||||||
|
assert(n < sizeof(m128) * 8);
|
||||||
|
u32 mask_idx = ((n % 8) * 64) + 95;
|
||||||
|
mask_idx -= n / 8;
|
||||||
|
return loadu128(&simd_onebit_masks[mask_idx]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// switches on bit N in the given vector.
|
||||||
|
static really_inline
|
||||||
|
void setbit128(m128 *ptr, unsigned int n) {
|
||||||
|
*ptr = or128(mask1bit128(n), *ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// switches off bit N in the given vector.
|
||||||
|
static really_inline
|
||||||
|
void clearbit128(m128 *ptr, unsigned int n) {
|
||||||
|
*ptr = andnot128(mask1bit128(n), *ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// tests bit N in the given vector.
|
||||||
|
static really_inline
|
||||||
|
char testbit128(m128 val, unsigned int n) {
|
||||||
|
const m128 mask = mask1bit128(n);
|
||||||
|
#if defined(HAVE_SSE41)
|
||||||
|
return !_mm_testz_si128(mask, val);
|
||||||
|
#else
|
||||||
|
return isnonzero128(and128(mask, val));
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
// offset must be an immediate
|
||||||
|
#define palignr(r, l, offset) _mm_alignr_epi8(r, l, offset)
|
||||||
|
|
||||||
|
static really_inline
|
||||||
|
m128 pshufb_m128(m128 a, m128 b) {
|
||||||
|
m128 result;
|
||||||
|
result = _mm_shuffle_epi8(a, b);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_inline
|
||||||
|
m128 variable_byte_shift_m128(m128 in, s32 amount) {
|
||||||
|
assert(amount >= -16 && amount <= 16);
|
||||||
|
m128 shift_mask = loadu128(vbs_mask_data + 16 - amount);
|
||||||
|
return pshufb_m128(in, shift_mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_inline
|
||||||
|
m128 max_u8_m128(m128 a, m128 b) {
|
||||||
|
return (m128) vmaxq_s8((int8x16_t)a, (int8x16_t)b);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_inline
|
||||||
|
m128 min_u8_m128(m128 a, m128 b) {
|
||||||
|
return (m128) vminq_s8((int8x16_t)a, (int8x16_t)b);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_inline
|
||||||
|
m128 sadd_u8_m128(m128 a, m128 b) {
|
||||||
|
return (m128) vqaddq_u8((uint8x16_t)a, (uint8x16_t)b);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_inline
|
||||||
|
m128 sub_u8_m128(m128 a, m128 b) {
|
||||||
|
return (m128) vsubq_u8((uint8x16_t)a, (uint8x16_t)b);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_inline
|
||||||
|
m128 set4x32(u32 x3, u32 x2, u32 x1, u32 x0) {
|
||||||
|
uint32_t __attribute__((aligned(16))) data[4] = { x3, x2, x1, x0 };
|
||||||
|
return (m128) vld1q_u32((uint32_t *) data);
|
||||||
|
}
|
||||||
|
|
||||||
|
static really_inline
|
||||||
|
m128 set2x64(u64a hi, u64a lo) {
|
||||||
|
uint64_t __attribute__((aligned(16))) data[2] = { hi, lo };
|
||||||
|
return (m128) vld1q_u64((uint64_t *) data);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // ARCH_ARM_SIMD_UTILS_H
|
@ -63,6 +63,8 @@ extern const char vbs_mask_data[];
|
|||||||
|
|
||||||
#if defined(ARCH_IA32) || defined(ARCH_X86_64)
|
#if defined(ARCH_IA32) || defined(ARCH_X86_64)
|
||||||
#include "util/arch/x86/simd_utils.h"
|
#include "util/arch/x86/simd_utils.h"
|
||||||
|
#elif defined(ARCH_ARM32) || defined(ARCH_AARCH64)
|
||||||
|
#include "util/arch/arm/simd_utils.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif // SIMD_UTILS_H
|
#endif // SIMD_UTILS_H
|
||||||
|
Loading…
x
Reference in New Issue
Block a user