mirror of
https://github.com/VectorCamp/vectorscan.git
synced 2025-06-28 16:41:01 +03:00
AVX512VBMI Fat Teddy.
This commit is contained in:
parent
007117146c
commit
00b697bb3b
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2017, Intel Corporation
|
||||
* Copyright (c) 2015-2020, Intel Corporation
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
@ -107,6 +107,25 @@ void dumpTeddyReinforced(const u8 *rmsk, const u32 num_tables, FILE *f) {
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
void dumpTeddyDupMasks(const u8 *dmsk, u32 numMasks, FILE *f) {
|
||||
// dump nibble masks
|
||||
u32 maskWidth = 2;
|
||||
fprintf(f, " dup nibble masks:\n");
|
||||
for (u32 i = 0; i < numMasks * 2; i++) {
|
||||
fprintf(f, " -%d%s: ", 1 + i / 2, (i % 2) ? "hi" : "lo");
|
||||
for (u32 j = 0; j < 16 * maskWidth * 2; j++) {
|
||||
u8 val = dmsk[i * 16 * maskWidth * 2 + j];
|
||||
for (u32 k = 0; k < 8; k++) {
|
||||
fprintf(f, "%s", ((val >> k) & 0x1) ? "1" : "0");
|
||||
}
|
||||
fprintf(f, " ");
|
||||
}
|
||||
fprintf(f, "\n");
|
||||
}
|
||||
fprintf(f, "\n");
|
||||
}
|
||||
|
||||
static
|
||||
void dumpTeddyMasks(const u8 *baseMsk, u32 numMasks, u32 maskWidth, FILE *f) {
|
||||
// dump nibble masks
|
||||
@ -146,12 +165,17 @@ void dumpTeddy(const Teddy *teddy, FILE *f) {
|
||||
|
||||
u32 maskWidth = des->getNumBuckets() / 8;
|
||||
size_t headerSize = sizeof(Teddy);
|
||||
size_t maskLen = des->numMasks * 16 * 2 * maskWidth;
|
||||
const u8 *teddy_base = (const u8 *)teddy;
|
||||
const u8 *baseMsk = teddy_base + ROUNDUP_CL(headerSize);
|
||||
const u8 *rmsk = baseMsk + ROUNDUP_CL(maskLen);
|
||||
dumpTeddyMasks(baseMsk, des->numMasks, maskWidth, f);
|
||||
dumpTeddyReinforced(rmsk, maskWidth, f);
|
||||
size_t maskLen = des->numMasks * 16 * 2 * maskWidth;
|
||||
const u8 *rdmsk = baseMsk + ROUNDUP_CL(maskLen);
|
||||
if (maskWidth == 1) { // reinforcement table in Teddy
|
||||
dumpTeddyReinforced(rdmsk, maskWidth, f);
|
||||
} else { // dup nibble mask table in Fat Teddy
|
||||
assert(maskWidth == 2);
|
||||
dumpTeddyDupMasks(rdmsk, des->numMasks, f);
|
||||
}
|
||||
dumpConfirms(teddy, teddy->confOffset, des->getNumBuckets(), f);
|
||||
}
|
||||
|
||||
|
@ -284,14 +284,6 @@ m512 prep_conf_teddy_m4(const m512 *lo_mask, const m512 *dup_mask,
|
||||
#define PREP_CONF_FN(val, n) \
|
||||
prep_conf_teddy_m##n(&lo_mask, dup_mask, sl_msk, val)
|
||||
|
||||
const u8 ALIGN_DIRECTIVE p_sh_mask_arr[80] = {
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
|
||||
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
|
||||
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
|
||||
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f
|
||||
};
|
||||
|
||||
#define TEDDY_VBMI_SL1_POS 15
|
||||
#define TEDDY_VBMI_SL2_POS 14
|
||||
#define TEDDY_VBMI_SL3_POS 13
|
||||
|
@ -109,6 +109,36 @@ const u8 ALIGN_AVX_DIRECTIVE p_mask_arr256[33][64] = {
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
|
||||
};
|
||||
|
||||
#if defined(HAVE_AVX512VBMI) // VBMI strong fat teddy
|
||||
|
||||
#define CONF_FAT_CHUNK_64(chunk, bucket, off, reason, pt, conf_fn) \
|
||||
do { \
|
||||
if (unlikely(chunk != ones_u64a)) { \
|
||||
chunk = ~chunk; \
|
||||
conf_fn(&chunk, bucket, off, confBase, reason, a, pt, \
|
||||
&control, &last_match); \
|
||||
CHECK_HWLM_TERMINATE_MATCHING; \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#define CONF_FAT_CHUNK_32(chunk, bucket, off, reason, pt, conf_fn) \
|
||||
do { \
|
||||
if (unlikely(chunk != ones_u32)) { \
|
||||
chunk = ~chunk; \
|
||||
conf_fn(&chunk, bucket, off, confBase, reason, a, pt, \
|
||||
&control, &last_match); \
|
||||
CHECK_HWLM_TERMINATE_MATCHING; \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
static really_inline
|
||||
const m512 *getDupMaskBase(const struct Teddy *teddy, u8 numMask) {
|
||||
return (const m512 *)((const u8 *)teddy + ROUNDUP_CL(sizeof(struct Teddy))
|
||||
+ ROUNDUP_CL(2 * numMask * sizeof(m256)));
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define CONF_FAT_CHUNK_64(chunk, bucket, off, reason, conf_fn) \
|
||||
do { \
|
||||
if (unlikely(chunk != ones_u64a)) { \
|
||||
@ -134,203 +164,200 @@ const m256 *getMaskBase_fat(const struct Teddy *teddy) {
|
||||
return (const m256 *)((const u8 *)teddy + ROUNDUP_CL(sizeof(struct Teddy)));
|
||||
}
|
||||
|
||||
#if defined(HAVE_AVX512_REVERT) // revert to AVX2 Fat Teddy
|
||||
#endif
|
||||
|
||||
static really_inline
|
||||
const u64a *getReinforcedMaskBase_fat(const struct Teddy *teddy, u8 numMask) {
|
||||
return (const u64a *)((const u8 *)getMaskBase_fat(teddy)
|
||||
+ ROUNDUP_CL(2 * numMask * sizeof(m256)));
|
||||
}
|
||||
#if defined(HAVE_AVX512VBMI) // VBMI strong fat teddy
|
||||
|
||||
const u8 ALIGN_AVX_DIRECTIVE p_mask_interleave[64] = {
|
||||
0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
|
||||
8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
|
||||
16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55,
|
||||
24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63
|
||||
};
|
||||
|
||||
#ifdef ARCH_64_BIT
|
||||
#define CONFIRM_FAT_TEDDY(var, bucket, offset, reason, conf_fn) \
|
||||
#define CONFIRM_FAT_TEDDY(var, bucket, offset, reason, pt, conf_fn) \
|
||||
do { \
|
||||
if (unlikely(diff512(var, ones512()))) { \
|
||||
m512 swap = swap256in512(var); \
|
||||
m512 r = interleave512lo(var, swap); \
|
||||
m512 msk_interleave = load512(p_mask_interleave); \
|
||||
m512 r = vpermb512(msk_interleave, var); \
|
||||
m128 r0 = extract128from512(r, 0); \
|
||||
m128 r1 = extract128from512(r, 1); \
|
||||
m128 r2 = extract128from512(r, 2); \
|
||||
m128 r3 = extract128from512(r, 3); \
|
||||
u64a part1 = movq(r0); \
|
||||
u64a part2 = extract64from128(r0, 1); \
|
||||
u64a part5 = movq(r1); \
|
||||
u64a part6 = extract64from128(r1, 1); \
|
||||
r = interleave512hi(var, swap); \
|
||||
r0 = extract128from512(r, 0); \
|
||||
r1 = extract128from512(r, 1); \
|
||||
u64a part3 = movq(r0); \
|
||||
u64a part4 = extract64from128(r0, 1); \
|
||||
u64a part7 = movq(r1); \
|
||||
u64a part8 = extract64from128(r1, 1); \
|
||||
CONF_FAT_CHUNK_64(part1, bucket, offset, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_64(part2, bucket, offset + 4, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_64(part3, bucket, offset + 8, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_64(part4, bucket, offset + 12, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_64(part5, bucket, offset + 16, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_64(part6, bucket, offset + 20, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_64(part7, bucket, offset + 24, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_64(part8, bucket, offset + 28, reason, conf_fn); \
|
||||
u64a part3 = movq(r1); \
|
||||
u64a part4 = extract64from128(r1, 1); \
|
||||
u64a part5 = movq(r2); \
|
||||
u64a part6 = extract64from128(r2, 1); \
|
||||
u64a part7 = movq(r3); \
|
||||
u64a part8 = extract64from128(r3, 1); \
|
||||
CONF_FAT_CHUNK_64(part1, bucket, offset, reason, pt, conf_fn); \
|
||||
CONF_FAT_CHUNK_64(part2, bucket, offset + 4, reason, pt, conf_fn); \
|
||||
CONF_FAT_CHUNK_64(part3, bucket, offset + 8, reason, pt, conf_fn); \
|
||||
CONF_FAT_CHUNK_64(part4, bucket, offset + 12, reason, pt, conf_fn); \
|
||||
CONF_FAT_CHUNK_64(part5, bucket, offset + 16, reason, pt, conf_fn); \
|
||||
CONF_FAT_CHUNK_64(part6, bucket, offset + 20, reason, pt, conf_fn); \
|
||||
CONF_FAT_CHUNK_64(part7, bucket, offset + 24, reason, pt, conf_fn); \
|
||||
CONF_FAT_CHUNK_64(part8, bucket, offset + 28, reason, pt, conf_fn); \
|
||||
} \
|
||||
} while(0)
|
||||
#else
|
||||
#define CONFIRM_FAT_TEDDY(var, bucket, offset, reason, conf_fn) \
|
||||
#define CONFIRM_FAT_TEDDY(var, bucket, offset, reason, pt, conf_fn) \
|
||||
do { \
|
||||
if (unlikely(diff512(var, ones512()))) { \
|
||||
m512 swap = swap256in512(var); \
|
||||
m512 r = interleave512lo(var, swap); \
|
||||
m512 msk_interleave = load512(p_mask_interleave); \
|
||||
m512 r = vpermb512(msk_interleave, var); \
|
||||
m128 r0 = extract128from512(r, 0); \
|
||||
m128 r1 = extract128from512(r, 1); \
|
||||
m128 r2 = extract128from512(r, 2); \
|
||||
m128 r3 = extract128from512(r, 3); \
|
||||
u32 part1 = movd(r0); \
|
||||
u32 part2 = extract32from128(r0, 1); \
|
||||
u32 part3 = extract32from128(r0, 2); \
|
||||
u32 part4 = extract32from128(r0, 3); \
|
||||
u32 part9 = movd(r1); \
|
||||
u32 part10 = extract32from128(r1, 1); \
|
||||
u32 part11 = extract32from128(r1, 2); \
|
||||
u32 part12 = extract32from128(r1, 3); \
|
||||
r = interleave512hi(var, swap); \
|
||||
r0 = extract128from512(r, 0); \
|
||||
r1 = extract128from512(r, 1); \
|
||||
u32 part5 = movd(r0); \
|
||||
u32 part6 = extract32from128(r0, 1); \
|
||||
u32 part7 = extract32from128(r0, 2); \
|
||||
u32 part8 = extract32from128(r0, 3); \
|
||||
u32 part13 = movd(r1); \
|
||||
u32 part14 = extract32from128(r1, 1); \
|
||||
u32 part15 = extract32from128(r1, 2); \
|
||||
u32 part16 = extract32from128(r1, 3); \
|
||||
CONF_FAT_CHUNK_32(part1, bucket, offset, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part2, bucket, offset + 2, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part3, bucket, offset + 4, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part4, bucket, offset + 6, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part5, bucket, offset + 8, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part6, bucket, offset + 10, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part7, bucket, offset + 12, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part8, bucket, offset + 14, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part9, bucket, offset + 16, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part10, bucket, offset + 18, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part11, bucket, offset + 20, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part12, bucket, offset + 22, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part13, bucket, offset + 24, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part14, bucket, offset + 26, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part15, bucket, offset + 28, reason, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part16, bucket, offset + 30, reason, conf_fn); \
|
||||
u32 part5 = movd(r1); \
|
||||
u32 part6 = extract32from128(r1, 1); \
|
||||
u32 part7 = extract32from128(r1, 2); \
|
||||
u32 part8 = extract32from128(r1, 3); \
|
||||
u32 part9 = movd(r2); \
|
||||
u32 part10 = extract32from128(r2, 1); \
|
||||
u32 part11 = extract32from128(r2, 2); \
|
||||
u32 part12 = extract32from128(r2, 3); \
|
||||
u32 part13 = movd(r3); \
|
||||
u32 part14 = extract32from128(r3, 1); \
|
||||
u32 part15 = extract32from128(r3, 2); \
|
||||
u32 part16 = extract32from128(r3, 3); \
|
||||
CONF_FAT_CHUNK_32(part1, bucket, offset, reason, pt, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part2, bucket, offset + 2, reason, pt, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part3, bucket, offset + 4, reason, pt, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part4, bucket, offset + 6, reason, pt, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part5, bucket, offset + 8, reason, pt, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part6, bucket, offset + 10, reason, pt, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part7, bucket, offset + 12, reason, pt, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part8, bucket, offset + 14, reason, pt, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part9, bucket, offset + 16, reason, pt, conf_fn); \
|
||||
CONF_FAT_CHUNK_32(part10, bucket, offset + 18, reason, pt, conf_fn);\
|
||||
CONF_FAT_CHUNK_32(part11, bucket, offset + 20, reason, pt, conf_fn);\
|
||||
CONF_FAT_CHUNK_32(part12, bucket, offset + 22, reason, pt, conf_fn);\
|
||||
CONF_FAT_CHUNK_32(part13, bucket, offset + 24, reason, pt, conf_fn);\
|
||||
CONF_FAT_CHUNK_32(part14, bucket, offset + 26, reason, pt, conf_fn);\
|
||||
CONF_FAT_CHUNK_32(part15, bucket, offset + 28, reason, pt, conf_fn);\
|
||||
CONF_FAT_CHUNK_32(part16, bucket, offset + 30, reason, pt, conf_fn);\
|
||||
} \
|
||||
} while(0)
|
||||
#endif
|
||||
|
||||
static really_inline
|
||||
m512 vectoredLoad2x256(m512 *p_mask, const u8 *ptr, const size_t start_offset,
|
||||
const u8 *lo, const u8 *hi,
|
||||
const u8 *buf_history, size_t len_history,
|
||||
const u32 nMasks) {
|
||||
m256 p_mask256;
|
||||
m512 ret = set2x256(vectoredLoad256(&p_mask256, ptr, start_offset, lo, hi,
|
||||
buf_history, len_history, nMasks));
|
||||
*p_mask = set2x256(p_mask256);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define PREP_FAT_SHUF_MASK_NO_REINFORCEMENT(val) \
|
||||
#define PREP_FAT_SHUF_MASK \
|
||||
m512 lo = and512(val, *lo_mask); \
|
||||
m512 hi = and512(rshift64_m512(val, 4), *lo_mask)
|
||||
|
||||
#define PREP_FAT_SHUF_MASK \
|
||||
PREP_FAT_SHUF_MASK_NO_REINFORCEMENT(set2x256(load256(ptr))); \
|
||||
*c_16 = *(ptr + 15); \
|
||||
m512 r_msk = set512_64(0ULL, r_msk_base_hi[*c_16], \
|
||||
0ULL, r_msk_base_hi[*c_0], \
|
||||
0ULL, r_msk_base_lo[*c_16], \
|
||||
0ULL, r_msk_base_lo[*c_0]); \
|
||||
*c_0 = *(ptr + 31)
|
||||
#define FAT_TEDDY_VBMI_PSHUFB_OR_M1 \
|
||||
m512 shuf_or_b0 = or512(pshufb_m512(dup_mask[0], lo), \
|
||||
pshufb_m512(dup_mask[1], hi));
|
||||
|
||||
#define FAT_TEDDY_VBMI_PSHUFB_OR_M2 \
|
||||
FAT_TEDDY_VBMI_PSHUFB_OR_M1 \
|
||||
m512 shuf_or_b1 = or512(pshufb_m512(dup_mask[2], lo), \
|
||||
pshufb_m512(dup_mask[3], hi));
|
||||
|
||||
#define FAT_TEDDY_VBMI_PSHUFB_OR_M3 \
|
||||
FAT_TEDDY_VBMI_PSHUFB_OR_M2 \
|
||||
m512 shuf_or_b2 = or512(pshufb_m512(dup_mask[4], lo), \
|
||||
pshufb_m512(dup_mask[5], hi));
|
||||
|
||||
#define FAT_TEDDY_VBMI_PSHUFB_OR_M4 \
|
||||
FAT_TEDDY_VBMI_PSHUFB_OR_M3 \
|
||||
m512 shuf_or_b3 = or512(pshufb_m512(dup_mask[6], lo), \
|
||||
pshufb_m512(dup_mask[7], hi));
|
||||
|
||||
#define FAT_TEDDY_VBMI_SL1_MASK 0xfffffffefffffffeULL
|
||||
#define FAT_TEDDY_VBMI_SL2_MASK 0xfffffffcfffffffcULL
|
||||
#define FAT_TEDDY_VBMI_SL3_MASK 0xfffffff8fffffff8ULL
|
||||
|
||||
#define FAT_TEDDY_VBMI_SHIFT_M1
|
||||
|
||||
#define FAT_TEDDY_VBMI_SHIFT_M2 \
|
||||
FAT_TEDDY_VBMI_SHIFT_M1 \
|
||||
m512 sl1 = maskz_vpermb512(FAT_TEDDY_VBMI_SL1_MASK, sl_msk[0], shuf_or_b1);
|
||||
|
||||
#define FAT_TEDDY_VBMI_SHIFT_M3 \
|
||||
FAT_TEDDY_VBMI_SHIFT_M2 \
|
||||
m512 sl2 = maskz_vpermb512(FAT_TEDDY_VBMI_SL2_MASK, sl_msk[1], shuf_or_b2);
|
||||
|
||||
#define FAT_TEDDY_VBMI_SHIFT_M4 \
|
||||
FAT_TEDDY_VBMI_SHIFT_M3 \
|
||||
m512 sl3 = maskz_vpermb512(FAT_TEDDY_VBMI_SL3_MASK, sl_msk[2], shuf_or_b3);
|
||||
|
||||
#define FAT_SHIFT_OR_M1 \
|
||||
or512(pshufb_m512(dup_mask[0], lo), pshufb_m512(dup_mask[1], hi))
|
||||
shuf_or_b0
|
||||
|
||||
#define FAT_SHIFT_OR_M2 \
|
||||
or512(lshift128_m512(or512(pshufb_m512(dup_mask[2], lo), \
|
||||
pshufb_m512(dup_mask[3], hi)), \
|
||||
1), FAT_SHIFT_OR_M1)
|
||||
or512(sl1, FAT_SHIFT_OR_M1)
|
||||
|
||||
#define FAT_SHIFT_OR_M3 \
|
||||
or512(lshift128_m512(or512(pshufb_m512(dup_mask[4], lo), \
|
||||
pshufb_m512(dup_mask[5], hi)), \
|
||||
2), FAT_SHIFT_OR_M2)
|
||||
or512(sl2, FAT_SHIFT_OR_M2)
|
||||
|
||||
#define FAT_SHIFT_OR_M4 \
|
||||
or512(lshift128_m512(or512(pshufb_m512(dup_mask[6], lo), \
|
||||
pshufb_m512(dup_mask[7], hi)), \
|
||||
3), FAT_SHIFT_OR_M3)
|
||||
or512(sl3, FAT_SHIFT_OR_M3)
|
||||
|
||||
static really_inline
|
||||
m512 prep_conf_fat_teddy_no_reinforcement_m1(const m512 *lo_mask,
|
||||
const m512 *dup_mask,
|
||||
const m512 val) {
|
||||
PREP_FAT_SHUF_MASK_NO_REINFORCEMENT(val);
|
||||
m512 prep_conf_fat_teddy_m1(const m512 *lo_mask, const m512 *dup_mask,
|
||||
UNUSED const m512 *sl_msk, const m512 val) {
|
||||
PREP_FAT_SHUF_MASK;
|
||||
FAT_TEDDY_VBMI_PSHUFB_OR_M1;
|
||||
FAT_TEDDY_VBMI_SHIFT_M1;
|
||||
return FAT_SHIFT_OR_M1;
|
||||
}
|
||||
|
||||
static really_inline
|
||||
m512 prep_conf_fat_teddy_no_reinforcement_m2(const m512 *lo_mask,
|
||||
const m512 *dup_mask,
|
||||
const m512 val) {
|
||||
PREP_FAT_SHUF_MASK_NO_REINFORCEMENT(val);
|
||||
m512 prep_conf_fat_teddy_m2(const m512 *lo_mask, const m512 *dup_mask,
|
||||
const m512 *sl_msk, const m512 val) {
|
||||
PREP_FAT_SHUF_MASK;
|
||||
FAT_TEDDY_VBMI_PSHUFB_OR_M2;
|
||||
FAT_TEDDY_VBMI_SHIFT_M2;
|
||||
return FAT_SHIFT_OR_M2;
|
||||
}
|
||||
|
||||
static really_inline
|
||||
m512 prep_conf_fat_teddy_no_reinforcement_m3(const m512 *lo_mask,
|
||||
const m512 *dup_mask,
|
||||
const m512 val) {
|
||||
PREP_FAT_SHUF_MASK_NO_REINFORCEMENT(val);
|
||||
m512 prep_conf_fat_teddy_m3(const m512 *lo_mask, const m512 *dup_mask,
|
||||
const m512 *sl_msk, const m512 val) {
|
||||
PREP_FAT_SHUF_MASK;
|
||||
FAT_TEDDY_VBMI_PSHUFB_OR_M3;
|
||||
FAT_TEDDY_VBMI_SHIFT_M3;
|
||||
return FAT_SHIFT_OR_M3;
|
||||
}
|
||||
|
||||
static really_inline
|
||||
m512 prep_conf_fat_teddy_no_reinforcement_m4(const m512 *lo_mask,
|
||||
const m512 *dup_mask,
|
||||
const m512 val) {
|
||||
PREP_FAT_SHUF_MASK_NO_REINFORCEMENT(val);
|
||||
m512 prep_conf_fat_teddy_m4(const m512 *lo_mask, const m512 *dup_mask,
|
||||
const m512 *sl_msk, const m512 val) {
|
||||
PREP_FAT_SHUF_MASK;
|
||||
FAT_TEDDY_VBMI_PSHUFB_OR_M4;
|
||||
FAT_TEDDY_VBMI_SHIFT_M4;
|
||||
return FAT_SHIFT_OR_M4;
|
||||
}
|
||||
|
||||
static really_inline
|
||||
m512 prep_conf_fat_teddy_m1(const m512 *lo_mask, const m512 *dup_mask,
|
||||
const u8 *ptr, const u64a *r_msk_base_lo,
|
||||
const u64a *r_msk_base_hi, u32 *c_0, u32 *c_16) {
|
||||
PREP_FAT_SHUF_MASK;
|
||||
return or512(FAT_SHIFT_OR_M1, r_msk);
|
||||
}
|
||||
#define PREP_CONF_FAT_FN(val, n) \
|
||||
prep_conf_fat_teddy_m##n(&lo_mask, dup_mask, sl_msk, val)
|
||||
|
||||
static really_inline
|
||||
m512 prep_conf_fat_teddy_m2(const m512 *lo_mask, const m512 *dup_mask,
|
||||
const u8 *ptr, const u64a *r_msk_base_lo,
|
||||
const u64a *r_msk_base_hi, u32 *c_0, u32 *c_16) {
|
||||
PREP_FAT_SHUF_MASK;
|
||||
return or512(FAT_SHIFT_OR_M2, r_msk);
|
||||
}
|
||||
#define FAT_TEDDY_VBMI_SL1_POS 15
|
||||
#define FAT_TEDDY_VBMI_SL2_POS 14
|
||||
#define FAT_TEDDY_VBMI_SL3_POS 13
|
||||
|
||||
static really_inline
|
||||
m512 prep_conf_fat_teddy_m3(const m512 *lo_mask, const m512 *dup_mask,
|
||||
const u8 *ptr, const u64a *r_msk_base_lo,
|
||||
const u64a *r_msk_base_hi, u32 *c_0, u32 *c_16) {
|
||||
PREP_FAT_SHUF_MASK;
|
||||
return or512(FAT_SHIFT_OR_M3, r_msk);
|
||||
}
|
||||
#define FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M1
|
||||
|
||||
static really_inline
|
||||
m512 prep_conf_fat_teddy_m4(const m512 *lo_mask, const m512 *dup_mask,
|
||||
const u8 *ptr, const u64a *r_msk_base_lo,
|
||||
const u64a *r_msk_base_hi, u32 *c_0, u32 *c_16) {
|
||||
PREP_FAT_SHUF_MASK;
|
||||
return or512(FAT_SHIFT_OR_M4, r_msk);
|
||||
}
|
||||
#define FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M2 \
|
||||
FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M1 \
|
||||
sl_msk[0] = loadu512(p_sh_mask_arr + FAT_TEDDY_VBMI_SL1_POS);
|
||||
|
||||
#define PREP_CONF_FAT_FN_NO_REINFORCEMENT(val, n) \
|
||||
prep_conf_fat_teddy_no_reinforcement_m##n(&lo_mask, dup_mask, val)
|
||||
#define FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M3 \
|
||||
FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M2 \
|
||||
sl_msk[1] = loadu512(p_sh_mask_arr + FAT_TEDDY_VBMI_SL2_POS);
|
||||
|
||||
#define PREP_CONF_FAT_FN(ptr, n) \
|
||||
prep_conf_fat_teddy_m##n(&lo_mask, dup_mask, ptr, \
|
||||
r_msk_base_lo, r_msk_base_hi, &c_0, &c_16)
|
||||
#define FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M4 \
|
||||
FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M3 \
|
||||
sl_msk[2] = loadu512(p_sh_mask_arr + FAT_TEDDY_VBMI_SL3_POS);
|
||||
|
||||
/*
|
||||
* In FAT teddy, it needs 2 bytes to represent result of each position,
|
||||
@ -355,31 +382,15 @@ m512 prep_conf_fat_teddy_m4(const m512 *lo_mask, const m512 *dup_mask,
|
||||
* then do pshufb_m512(AABB, XYXY).
|
||||
*/
|
||||
|
||||
#define DUP_FAT_MASK(a) mask_set2x256(set2x256(swap128in256(a)), 0xC3, a)
|
||||
|
||||
#define PREPARE_FAT_MASKS_1 \
|
||||
dup_mask[0] = DUP_FAT_MASK(maskBase[0]); \
|
||||
dup_mask[1] = DUP_FAT_MASK(maskBase[1]);
|
||||
|
||||
#define PREPARE_FAT_MASKS_2 \
|
||||
PREPARE_FAT_MASKS_1 \
|
||||
dup_mask[2] = DUP_FAT_MASK(maskBase[2]); \
|
||||
dup_mask[3] = DUP_FAT_MASK(maskBase[3]);
|
||||
|
||||
#define PREPARE_FAT_MASKS_3 \
|
||||
PREPARE_FAT_MASKS_2 \
|
||||
dup_mask[4] = DUP_FAT_MASK(maskBase[4]); \
|
||||
dup_mask[5] = DUP_FAT_MASK(maskBase[5]);
|
||||
|
||||
#define PREPARE_FAT_MASKS_4 \
|
||||
PREPARE_FAT_MASKS_3 \
|
||||
dup_mask[6] = DUP_FAT_MASK(maskBase[6]); \
|
||||
dup_mask[7] = DUP_FAT_MASK(maskBase[7]);
|
||||
|
||||
#define PREPARE_FAT_MASKS(n) \
|
||||
m512 lo_mask = set64x8(0xf); \
|
||||
m512 dup_mask[n * 2]; \
|
||||
PREPARE_FAT_MASKS_##n
|
||||
m512 sl_msk[n - 1]; \
|
||||
FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M##n
|
||||
|
||||
#define FAT_TEDDY_VBMI_CONF_MASK_HEAD (0xffffffffULL >> n_sh)
|
||||
#define FAT_TEDDY_VBMI_CONF_MASK_FULL ((0xffffffffULL << n_sh) & 0xffffffffULL)
|
||||
#define FAT_TEDDY_VBMI_CONF_MASK_VAR(n) (0xffffffffULL >> (32 - n) << overlap)
|
||||
#define FAT_TEDDY_VBMI_LOAD_MASK_PATCH (0xffffffffULL >> (32 - n_sh))
|
||||
|
||||
#define FDR_EXEC_FAT_TEDDY(fdr, a, control, n_msk, conf_fn) \
|
||||
do { \
|
||||
@ -389,67 +400,53 @@ do { \
|
||||
const u8 *tryFloodDetect = a->firstFloodDetect; \
|
||||
u32 last_match = ones_u32; \
|
||||
const struct Teddy *teddy = (const struct Teddy *)fdr; \
|
||||
const size_t iterBytes = 64; \
|
||||
const size_t iterBytes = 32; \
|
||||
u32 n_sh = n_msk - 1; \
|
||||
const size_t loopBytes = 32 - n_sh; \
|
||||
DEBUG_PRINTF("params: buf %p len %zu start_offset %zu\n", \
|
||||
a->buf, a->len, a->start_offset); \
|
||||
\
|
||||
const m256 *maskBase = getMaskBase_fat(teddy); \
|
||||
const m512 *dup_mask = getDupMaskBase(teddy, n_msk); \
|
||||
PREPARE_FAT_MASKS(n_msk); \
|
||||
const u32 *confBase = getConfBase(teddy); \
|
||||
\
|
||||
const u64a *r_msk_base_lo = getReinforcedMaskBase_fat(teddy, n_msk); \
|
||||
const u64a *r_msk_base_hi = r_msk_base_lo + (N_CHARS + 1); \
|
||||
u32 c_0 = 0x100; \
|
||||
u32 c_16 = 0x100; \
|
||||
const u8 *mainStart = ROUNDUP_PTR(ptr, 32); \
|
||||
DEBUG_PRINTF("derive: ptr: %p mainstart %p\n", ptr, mainStart); \
|
||||
if (ptr < mainStart) { \
|
||||
ptr = mainStart - 32; \
|
||||
m512 p_mask; \
|
||||
m512 val_0 = vectoredLoad2x256(&p_mask, ptr, a->start_offset, \
|
||||
a->buf, buf_end, \
|
||||
a->buf_history, a->len_history, n_msk); \
|
||||
m512 r_0 = PREP_CONF_FAT_FN_NO_REINFORCEMENT(val_0, n_msk); \
|
||||
r_0 = or512(r_0, p_mask); \
|
||||
CONFIRM_FAT_TEDDY(r_0, 16, 0, VECTORING, conf_fn); \
|
||||
ptr += 32; \
|
||||
u64a k = FAT_TEDDY_VBMI_CONF_MASK_FULL; \
|
||||
m512 p_mask = set_mask_m512(~((k << 32) | k)); \
|
||||
u32 overlap = 0; \
|
||||
u64a patch = 0; \
|
||||
if (likely(ptr + loopBytes <= buf_end)) { \
|
||||
u64a k0 = FAT_TEDDY_VBMI_CONF_MASK_HEAD; \
|
||||
m512 p_mask0 = set_mask_m512(~((k0 << 32) | k0)); \
|
||||
m512 r_0 = PREP_CONF_FAT_FN(set2x256(loadu256(ptr)), n_msk); \
|
||||
r_0 = or512(r_0, p_mask0); \
|
||||
CONFIRM_FAT_TEDDY(r_0, 16, 0, VECTORING, ptr, conf_fn); \
|
||||
ptr += loopBytes; \
|
||||
overlap = n_sh; \
|
||||
patch = FAT_TEDDY_VBMI_LOAD_MASK_PATCH; \
|
||||
} \
|
||||
\
|
||||
if (ptr + 32 <= buf_end) { \
|
||||
m512 r_0 = PREP_CONF_FAT_FN(ptr, n_msk); \
|
||||
CONFIRM_FAT_TEDDY(r_0, 16, 0, VECTORING, conf_fn); \
|
||||
ptr += 32; \
|
||||
} \
|
||||
\
|
||||
for (; ptr + iterBytes <= buf_end; ptr += iterBytes) { \
|
||||
__builtin_prefetch(ptr + (iterBytes * 4)); \
|
||||
for (; ptr + loopBytes <= buf_end; ptr += loopBytes) { \
|
||||
CHECK_FLOOD; \
|
||||
m512 r_0 = PREP_CONF_FAT_FN(ptr, n_msk); \
|
||||
CONFIRM_FAT_TEDDY(r_0, 16, 0, NOT_CAUTIOUS, conf_fn); \
|
||||
m512 r_1 = PREP_CONF_FAT_FN(ptr + 32, n_msk); \
|
||||
CONFIRM_FAT_TEDDY(r_1, 16, 32, NOT_CAUTIOUS, conf_fn); \
|
||||
} \
|
||||
\
|
||||
if (ptr + 32 <= buf_end) { \
|
||||
m512 r_0 = PREP_CONF_FAT_FN(ptr, n_msk); \
|
||||
CONFIRM_FAT_TEDDY(r_0, 16, 0, NOT_CAUTIOUS, conf_fn); \
|
||||
ptr += 32; \
|
||||
} \
|
||||
\
|
||||
assert(ptr + 32 > buf_end); \
|
||||
if (ptr < buf_end) { \
|
||||
m512 p_mask; \
|
||||
m512 val_0 = vectoredLoad2x256(&p_mask, ptr, 0, ptr, buf_end, \
|
||||
a->buf_history, a->len_history, n_msk); \
|
||||
m512 r_0 = PREP_CONF_FAT_FN_NO_REINFORCEMENT(val_0, n_msk); \
|
||||
m512 r_0 = PREP_CONF_FAT_FN(set2x256(loadu256(ptr - n_sh)), n_msk); \
|
||||
r_0 = or512(r_0, p_mask); \
|
||||
CONFIRM_FAT_TEDDY(r_0, 16, 0, VECTORING, conf_fn); \
|
||||
CONFIRM_FAT_TEDDY(r_0, 16, 0, NOT_CAUTIOUS, ptr - n_sh, conf_fn); \
|
||||
} \
|
||||
\
|
||||
assert(ptr + loopBytes > buf_end); \
|
||||
if (ptr < buf_end) { \
|
||||
u32 left = (u32)(buf_end - ptr); \
|
||||
u64a k1 = FAT_TEDDY_VBMI_CONF_MASK_VAR(left); \
|
||||
m512 p_mask1 = set_mask_m512(~((k1 << 32) | k1)); \
|
||||
m512 val_0 = set2x256(loadu_maskz_m256(k1 | patch, ptr - overlap)); \
|
||||
m512 r_0 = PREP_CONF_FAT_FN(val_0, n_msk); \
|
||||
r_0 = or512(r_0, p_mask1); \
|
||||
CONFIRM_FAT_TEDDY(r_0, 16, 0, VECTORING, ptr - overlap, conf_fn); \
|
||||
} \
|
||||
\
|
||||
return HWLM_SUCCESS; \
|
||||
} while(0)
|
||||
|
||||
#else // HAVE_AVX512
|
||||
#else // !HAVE_AVX512VBMI, AVX2 normal fat teddy
|
||||
|
||||
#ifdef ARCH_64_BIT
|
||||
#define CONFIRM_FAT_TEDDY(var, bucket, offset, reason, conf_fn) \
|
||||
@ -659,7 +656,7 @@ do { \
|
||||
return HWLM_SUCCESS; \
|
||||
} while(0)
|
||||
|
||||
#endif // HAVE_AVX512
|
||||
#endif // HAVE_AVX512VBMI
|
||||
|
||||
hwlm_error_t fdr_exec_fat_teddy_msks1(const struct FDR *fdr,
|
||||
const struct FDR_Runtime_Args *a,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2017, Intel Corporation
|
||||
* Copyright (c) 2015-2020, Intel Corporation
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
@ -353,6 +353,89 @@ void fillReinforcedMsk(u8 *rmsk, u16 c, u32 j, u8 bmsk) {
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
void fillDupNibbleMasks(const map<BucketIndex,
|
||||
vector<LiteralIndex>> &bucketToLits,
|
||||
const vector<hwlmLiteral> &lits,
|
||||
u32 numMasks, size_t maskLen,
|
||||
u8 *baseMsk) {
|
||||
u32 maskWidth = 2;
|
||||
memset(baseMsk, 0xff, maskLen);
|
||||
|
||||
for (const auto &b2l : bucketToLits) {
|
||||
const u32 &bucket_id = b2l.first;
|
||||
const vector<LiteralIndex> &ids = b2l.second;
|
||||
const u8 bmsk = 1U << (bucket_id % 8);
|
||||
|
||||
for (const LiteralIndex &lit_id : ids) {
|
||||
const hwlmLiteral &l = lits[lit_id];
|
||||
DEBUG_PRINTF("putting lit %u into bucket %u\n", lit_id, bucket_id);
|
||||
const u32 sz = verify_u32(l.s.size());
|
||||
|
||||
// fill in masks
|
||||
for (u32 j = 0; j < numMasks; j++) {
|
||||
const u32 msk_id_lo = j * 2 * maskWidth + (bucket_id / 8);
|
||||
const u32 msk_id_hi = (j * 2 + 1) * maskWidth + (bucket_id / 8);
|
||||
const u32 lo_base0 = msk_id_lo * 32;
|
||||
const u32 lo_base1 = msk_id_lo * 32 + 16;
|
||||
const u32 hi_base0 = msk_id_hi * 32;
|
||||
const u32 hi_base1 = msk_id_hi * 32 + 16;
|
||||
|
||||
// if we don't have a char at this position, fill in i
|
||||
// locations in these masks with '1'
|
||||
if (j >= sz) {
|
||||
for (u32 n = 0; n < 16; n++) {
|
||||
baseMsk[lo_base0 + n] &= ~bmsk;
|
||||
baseMsk[lo_base1 + n] &= ~bmsk;
|
||||
baseMsk[hi_base0 + n] &= ~bmsk;
|
||||
baseMsk[hi_base1 + n] &= ~bmsk;
|
||||
}
|
||||
} else {
|
||||
u8 c = l.s[sz - 1 - j];
|
||||
// if we do have a char at this position
|
||||
const u32 hiShift = 4;
|
||||
u32 n_hi = (c >> hiShift) & 0xf;
|
||||
u32 n_lo = c & 0xf;
|
||||
|
||||
if (j < l.msk.size() && l.msk[l.msk.size() - 1 - j]) {
|
||||
u8 m = l.msk[l.msk.size() - 1 - j];
|
||||
u8 m_hi = (m >> hiShift) & 0xf;
|
||||
u8 m_lo = m & 0xf;
|
||||
u8 cmp = l.cmp[l.msk.size() - 1 - j];
|
||||
u8 cmp_lo = cmp & 0xf;
|
||||
u8 cmp_hi = (cmp >> hiShift) & 0xf;
|
||||
|
||||
for (u8 cm = 0; cm < 0x10; cm++) {
|
||||
if ((cm & m_lo) == (cmp_lo & m_lo)) {
|
||||
baseMsk[lo_base0 + cm] &= ~bmsk;
|
||||
baseMsk[lo_base1 + cm] &= ~bmsk;
|
||||
}
|
||||
if ((cm & m_hi) == (cmp_hi & m_hi)) {
|
||||
baseMsk[hi_base0 + cm] &= ~bmsk;
|
||||
baseMsk[hi_base1 + cm] &= ~bmsk;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (l.nocase && ourisalpha(c)) {
|
||||
u32 cmHalfClear = (0xdf >> hiShift) & 0xf;
|
||||
u32 cmHalfSet = (0x20 >> hiShift) & 0xf;
|
||||
baseMsk[hi_base0 + (n_hi & cmHalfClear)] &= ~bmsk;
|
||||
baseMsk[hi_base1 + (n_hi & cmHalfClear)] &= ~bmsk;
|
||||
baseMsk[hi_base0 + (n_hi | cmHalfSet)] &= ~bmsk;
|
||||
baseMsk[hi_base1 + (n_hi | cmHalfSet)] &= ~bmsk;
|
||||
} else {
|
||||
baseMsk[hi_base0 + n_hi] &= ~bmsk;
|
||||
baseMsk[hi_base1 + n_hi] &= ~bmsk;
|
||||
}
|
||||
baseMsk[lo_base0 + n_lo] &= ~bmsk;
|
||||
baseMsk[lo_base1 + n_lo] &= ~bmsk;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
void fillNibbleMasks(const map<BucketIndex,
|
||||
vector<LiteralIndex>> &bucketToLits,
|
||||
@ -479,14 +562,17 @@ bytecode_ptr<FDR> TeddyCompiler::build() {
|
||||
|
||||
size_t headerSize = sizeof(Teddy);
|
||||
size_t maskLen = eng.numMasks * 16 * 2 * maskWidth;
|
||||
size_t reinforcedMaskLen = RTABLE_SIZE * maskWidth;
|
||||
size_t reinforcedDupMaskLen = RTABLE_SIZE * maskWidth;
|
||||
if (maskWidth == 2) { // dup nibble mask table in Fat Teddy
|
||||
reinforcedDupMaskLen = maskLen * 2;
|
||||
}
|
||||
|
||||
auto floodTable = setupFDRFloodControl(lits, eng, grey);
|
||||
auto confirmTable = setupFullConfs(lits, eng, bucketToLits, make_small);
|
||||
|
||||
// Note: we place each major structure here on a cacheline boundary.
|
||||
size_t size = ROUNDUP_CL(headerSize) + ROUNDUP_CL(maskLen) +
|
||||
ROUNDUP_CL(reinforcedMaskLen) +
|
||||
ROUNDUP_CL(reinforcedDupMaskLen) +
|
||||
ROUNDUP_CL(confirmTable.size()) + floodTable.size();
|
||||
|
||||
auto fdr = make_zeroed_bytecode_ptr<FDR>(size, 64);
|
||||
@ -502,7 +588,7 @@ bytecode_ptr<FDR> TeddyCompiler::build() {
|
||||
|
||||
// Write confirm structures.
|
||||
u8 *ptr = teddy_base + ROUNDUP_CL(headerSize) + ROUNDUP_CL(maskLen) +
|
||||
ROUNDUP_CL(reinforcedMaskLen);
|
||||
ROUNDUP_CL(reinforcedDupMaskLen);
|
||||
assert(ISALIGNED_CL(ptr));
|
||||
teddy->confOffset = verify_u32(ptr - teddy_base);
|
||||
memcpy(ptr, confirmTable.get(), confirmTable.size());
|
||||
@ -519,9 +605,16 @@ bytecode_ptr<FDR> TeddyCompiler::build() {
|
||||
fillNibbleMasks(bucketToLits, lits, eng.numMasks, maskWidth, maskLen,
|
||||
baseMsk);
|
||||
|
||||
if (maskWidth == 1) { // reinforcement table in Teddy
|
||||
// Write reinforcement masks.
|
||||
u8 *reinforcedMsk = baseMsk + ROUNDUP_CL(maskLen);
|
||||
fillReinforcedTable(bucketToLits, lits, reinforcedMsk, maskWidth);
|
||||
} else { // dup nibble mask table in Fat Teddy
|
||||
assert(maskWidth == 2);
|
||||
u8 *dupMsk = baseMsk + ROUNDUP_CL(maskLen);
|
||||
fillDupNibbleMasks(bucketToLits, lits, eng.numMasks,
|
||||
reinforcedDupMaskLen, dupMsk);
|
||||
}
|
||||
|
||||
return fdr;
|
||||
}
|
||||
|
@ -45,6 +45,16 @@ extern const u8 ALIGN_DIRECTIVE p_mask_arr[17][32];
|
||||
extern const u8 ALIGN_AVX_DIRECTIVE p_mask_arr256[33][64];
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_AVX512VBMI)
|
||||
static const u8 ALIGN_DIRECTIVE p_sh_mask_arr[80] = {
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
|
||||
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
|
||||
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
|
||||
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef ARCH_64_BIT
|
||||
#define TEDDY_CONF_TYPE u64a
|
||||
#define TEDDY_FIND_AND_CLEAR_LSB(conf) findAndClearLSB_64(conf)
|
||||
|
@ -1219,6 +1219,11 @@ static really_inline
|
||||
m512 set_mask_m512(__mmask64 k) {
|
||||
return _mm512_movm_epi8(k);
|
||||
}
|
||||
|
||||
static really_inline
|
||||
m256 loadu_maskz_m256(__mmask32 k, const void *ptr) {
|
||||
return _mm256_maskz_loadu_epi8(k, ptr);
|
||||
}
|
||||
#endif
|
||||
|
||||
// packed unaligned store of first N bytes
|
||||
|
Loading…
x
Reference in New Issue
Block a user