diff --git a/src/fdr/teddy.c b/src/fdr/teddy.c index 6898b6d4..3e46a0d6 100644 --- a/src/fdr/teddy.c +++ b/src/fdr/teddy.c @@ -468,7 +468,7 @@ do { \ *c_16 = *(ptr + 15); \ *c_32 = *(ptr + 31); \ *c_48 = *(ptr + 47); \ - m512 r_msk = set512_64(0ULL, r_msk_base[*c_48], 0ULL, r_msk_base[*c_32],\ + m512 r_msk = set8x64(0ULL, r_msk_base[*c_48], 0ULL, r_msk_base[*c_32],\ 0ULL, r_msk_base[*c_16], 0ULL, r_msk_base[*c_0]);\ *c_0 = *(ptr + 63) diff --git a/src/fdr/teddy_avx2.c b/src/fdr/teddy_avx2.c index 9bde3036..e17e7872 100644 --- a/src/fdr/teddy_avx2.c +++ b/src/fdr/teddy_avx2.c @@ -383,7 +383,7 @@ m512 prep_conf_fat_teddy_m4(const m512 *lo_mask, const m512 *dup_mask, */ #define PREPARE_FAT_MASKS(n) \ - m512 lo_mask = set64x8(0xf); \ + m512 lo_mask = set1_64x8(0xf); \ m512 sl_msk[n - 1]; \ FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M##n diff --git a/src/hwlm/noodle_engine_avx512.c b/src/hwlm/noodle_engine_avx512.c index 8cac1b15..1a925fbf 100644 --- a/src/hwlm/noodle_engine_avx512.c +++ b/src/hwlm/noodle_engine_avx512.c @@ -31,12 +31,12 @@ static really_inline m512 getMask(u8 c, bool noCase) { u8 k = caseClear8(c, noCase); - return set64x8(k); + return set1_64x8(k); } static really_inline m512 getCaseMask(void) { - return set64x8(CASE_CLEAR); + return set1_64x8(CASE_CLEAR); } // The short scan routine. It is used both to scan data up to an diff --git a/src/nfa/limex_exceptional.h b/src/nfa/limex_exceptional.h index 6c7335f1..c9de3aed 100644 --- a/src/nfa/limex_exceptional.h +++ b/src/nfa/limex_exceptional.h @@ -47,7 +47,7 @@ #define AND_STATE JOIN(and_, STATE_T) #define EQ_STATE(a, b) (!JOIN(noteq_, STATE_T)((a), (b))) #define OR_STATE JOIN(or_, STATE_T) -#define EXPAND_STATE JOIN(expand_, STATE_T) +#define EXPAND_STATE JOIN(broadcast_, STATE_T) #define SHUFFLE_BYTE_STATE JOIN(shuffle_byte_, STATE_T) #define TESTBIT_STATE JOIN(testbit_, STATE_T) #define EXCEPTION_T JOIN(struct NFAException, SIZE) diff --git a/src/nfa/mcsheng.c b/src/nfa/mcsheng.c index a656d4c5..c52bf31c 100644 --- a/src/nfa/mcsheng.c +++ b/src/nfa/mcsheng.c @@ -1490,7 +1490,7 @@ u32 doSheng64(const struct mcsheng64 *m, const u8 **c_inout, const u8 *soft_c_en assert(s_in); /* should not already be dead */ assert(soft_c_end <= hard_c_end); DEBUG_PRINTF("s_in = %u (adjusted %u)\n", s_in, s_in - 1); - m512 s = set64x8(s_in - 1); + m512 s = set1_64x8(s_in - 1); const u8 *c = *c_inout; const u8 *c_end = hard_c_end - SHENG_CHUNK + 1; if (!do_accel) { @@ -1509,8 +1509,8 @@ u32 doSheng64(const struct mcsheng64 *m, const u8 **c_inout, const u8 *soft_c_en #if defined(HAVE_BMI2) && defined(ARCH_64_BIT) u32 sheng_limit_x4 = sheng_limit * 0x01010101; - m512 simd_stop_limit = set16x32(sheng_stop_limit_x4); - m512 accel_delta = set64x8(sheng_limit - sheng_stop_limit); + m512 simd_stop_limit = set1_16x32(sheng_stop_limit_x4); + m512 accel_delta = set1_64x8(sheng_limit - sheng_stop_limit); DEBUG_PRINTF("end %hhu, accel %hu --> limit %hhu\n", sheng_limit, m->sheng_accel_limit, sheng_stop_limit); #endif diff --git a/src/nfa/sheng_impl.h b/src/nfa/sheng_impl.h index 17f929ab..1fa5c831 100644 --- a/src/nfa/sheng_impl.h +++ b/src/nfa/sheng_impl.h @@ -114,7 +114,7 @@ char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt, } DEBUG_PRINTF("Scanning %lli bytes\n", (s64a)(end - start)); - m512 cur_state = set64x8(*state); + m512 cur_state = set1_64x8(*state); const m512 *masks = s->succ_masks; while (likely(cur_buf != end)) { @@ -175,7 +175,7 @@ char SHENG64_IMPL(u8 *state, NfaCallback cb, void *ctxt, } DEBUG_PRINTF("Scanning %lli bytes\n", (s64a)(end - start)); - m512 cur_state = set64x8(*state); + m512 cur_state = set1_64x8(*state); const m512 *masks = s->succ_masks; while (likely(cur_buf != end)) { diff --git a/src/nfa/sheng_impl4.h b/src/nfa/sheng_impl4.h index a2c325fd..e5d3468f 100644 --- a/src/nfa/sheng_impl4.h +++ b/src/nfa/sheng_impl4.h @@ -320,7 +320,7 @@ char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt, return MO_CONTINUE_MATCHING; } - m512 cur_state = set64x8(*state); + m512 cur_state = set1_64x8(*state); const m512 *masks = s->succ_masks; while (likely(end - cur_buf >= 4)) { @@ -542,7 +542,7 @@ char SHENG64_IMPL(u8 *state, NfaCallback cb, void *ctxt, return MO_CONTINUE_MATCHING; } - m512 cur_state = set64x8(*state); + m512 cur_state = set1_64x8(*state); const m512 *masks = s->succ_masks; while (likely(end - cur_buf >= 4)) { diff --git a/src/nfa/shufti.c b/src/nfa/shufti.c index f1f2befc..4f7cae2e 100644 --- a/src/nfa/shufti.c +++ b/src/nfa/shufti.c @@ -829,10 +829,10 @@ const u8 *shuftiExec(m128 mask_lo, m128 mask_hi, const u8 *buf, DEBUG_PRINTF("shufti %p len %zu\n", buf, buf_end - buf); DEBUG_PRINTF("b %s\n", buf); - const m512 low4bits = set64x8(0xf); + const m512 low4bits = set1_64x8(0xf); const m512 zeroes = zeroes512(); - const m512 wide_mask_lo = set4x128(mask_lo); - const m512 wide_mask_hi = set4x128(mask_hi); + const m512 wide_mask_lo = set1_4x128(mask_lo); + const m512 wide_mask_hi = set1_4x128(mask_hi); const u8 *rv; // small cases. @@ -941,10 +941,10 @@ const u8 *rshuftiExec(m128 mask_lo, m128 mask_hi, const u8 *buf, assert(buf && buf_end); assert(buf < buf_end); - const m512 low4bits = set64x8(0xf); + const m512 low4bits = set1_64x8(0xf); const m512 zeroes = zeroes512(); - const m512 wide_mask_lo = set4x128(mask_lo); - const m512 wide_mask_hi = set4x128(mask_hi); + const m512 wide_mask_lo = set1_4x128(mask_lo); + const m512 wide_mask_hi = set1_4x128(mask_hi); const u8 *rv; if (buf_end - buf < 64) { @@ -1051,11 +1051,11 @@ const u8 *shuftiDoubleExec(m128 mask1_lo, m128 mask1_hi, DEBUG_PRINTF("buf %p len %zu\n", buf, buf_end - buf); const m512 ones = ones512(); - const m512 low4bits = set64x8(0xf); - const m512 wide_mask1_lo = set4x128(mask1_lo); - const m512 wide_mask1_hi = set4x128(mask1_hi); - const m512 wide_mask2_lo = set4x128(mask2_lo); - const m512 wide_mask2_hi = set4x128(mask2_hi); + const m512 low4bits = set1_64x8(0xf); + const m512 wide_mask1_lo = set1_4x128(mask1_lo); + const m512 wide_mask1_hi = set1_4x128(mask1_hi); + const m512 wide_mask2_lo = set1_4x128(mask2_lo); + const m512 wide_mask2_hi = set1_4x128(mask2_hi); const u8 *rv; if (buf_end - buf <= 64) { diff --git a/src/nfa/truffle.c b/src/nfa/truffle.c index 37af13ad..eff1d95e 100644 --- a/src/nfa/truffle.c +++ b/src/nfa/truffle.c @@ -452,8 +452,8 @@ const u8 *firstMatch(const u8 *buf, u64a z) { static really_inline u64a block(m512 shuf_mask_lo_highclear, m512 shuf_mask_lo_highset, m512 v) { - m512 highconst = set64x8(0x80); - m512 shuf_mask_hi = set8x64(0x8040201008040201); + m512 highconst = set1_64x8(0x80); + m512 shuf_mask_hi = set1_8x64(0x8040201008040201); // and now do the real work m512 shuf1 = pshufb_m512(shuf_mask_lo_highclear, v); @@ -501,8 +501,8 @@ const u8 *revBlock(m512 shuf_mask_lo_highclear, m512 shuf_mask_lo_highset, const u8 *truffleExec(m128 shuf_mask_lo_highclear, m128 shuf_mask_lo_highset, const u8 *buf, const u8 *buf_end) { DEBUG_PRINTF("len %zu\n", buf_end - buf); - const m512 wide_clear = set4x128(shuf_mask_lo_highclear); - const m512 wide_set = set4x128(shuf_mask_lo_highset); + const m512 wide_clear = set1_4x128(shuf_mask_lo_highclear); + const m512 wide_set = set1_4x128(shuf_mask_lo_highset); assert(buf && buf_end); assert(buf < buf_end); @@ -563,8 +563,8 @@ const u8 *truffleRevMini(m512 shuf_mask_lo_highclear, m512 shuf_mask_lo_highset, const u8 *rtruffleExec(m128 shuf_mask_lo_highclear, m128 shuf_mask_lo_highset, const u8 *buf, const u8 *buf_end) { - const m512 wide_clear = set4x128(shuf_mask_lo_highclear); - const m512 wide_set = set4x128(shuf_mask_lo_highset); + const m512 wide_clear = set1_4x128(shuf_mask_lo_highclear); + const m512 wide_set = set1_4x128(shuf_mask_lo_highset); assert(buf && buf_end); assert(buf < buf_end); const u8 *rv; diff --git a/src/nfa/vermicelli_sse.h b/src/nfa/vermicelli_sse.h index dc56a5f1..12001f4f 100644 --- a/src/nfa/vermicelli_sse.h +++ b/src/nfa/vermicelli_sse.h @@ -424,7 +424,7 @@ const u8 *vermMiniNocase(m512 chars, const u8 *buf, const u8 *buf_end, uintptr_t len = buf_end - buf; __mmask64 mask = (~0ULL) >> (64 - len); m512 data = loadu_maskz_m512(mask, buf); - m512 casemask = set64x8(CASE_CLEAR); + m512 casemask = set1_64x8(CASE_CLEAR); m512 v = and512(casemask, data); u64a z = eq512mask(chars, v); @@ -461,7 +461,7 @@ static really_inline const u8 *vermSearchAlignedNocase(m512 chars, const u8 *buf, const u8 *buf_end, char negate) { assert((size_t)buf % 64 == 0); - m512 casemask = set64x8(CASE_CLEAR); + m512 casemask = set1_64x8(CASE_CLEAR); for (; buf + 63 < buf_end; buf += 64) { m512 data = load512(buf); @@ -494,7 +494,7 @@ const u8 *vermUnalign(m512 chars, const u8 *buf, char negate) { // returns NULL if not found static really_inline const u8 *vermUnalignNocase(m512 chars, const u8 *buf, char negate) { - m512 casemask = set64x8(CASE_CLEAR); + m512 casemask = set1_64x8(CASE_CLEAR); m512 data = loadu512(buf); // unaligned u64a z = eq512mask(chars, and512(casemask, data)); if (negate) { @@ -529,7 +529,7 @@ const u8 *dvermMiniNocase(m512 chars1, m512 chars2, const u8 *buf, uintptr_t len = buf_end - buf; __mmask64 mask = (~0ULL) >> (64 - len); m512 data = loadu_maskz_m512(mask, buf); - m512 casemask = set64x8(CASE_CLEAR); + m512 casemask = set1_64x8(CASE_CLEAR); m512 v = and512(casemask, data); u64a z = eq512mask(chars1, v) & (eq512mask(chars2, v) >> 1); @@ -583,7 +583,7 @@ static really_inline const u8 *dvermSearchAlignedNocase(m512 chars1, m512 chars2, u8 c1, u8 c2, const u8 *buf, const u8 *buf_end) { assert((size_t)buf % 64 == 0); - m512 casemask = set64x8(CASE_CLEAR); + m512 casemask = set1_64x8(CASE_CLEAR); for (; buf + 64 < buf_end; buf += 64) { m512 data = load512(buf); @@ -643,7 +643,7 @@ const u8 *dvermPrecondition(m512 chars1, m512 chars2, const u8 *buf) { static really_inline const u8 *dvermPreconditionNocase(m512 chars1, m512 chars2, const u8 *buf) { /* due to laziness, nonalphas and nocase having interesting behaviour */ - m512 casemask = set64x8(CASE_CLEAR); + m512 casemask = set1_64x8(CASE_CLEAR); m512 data = loadu512(buf); // unaligned m512 v = and512(casemask, data); u64a z = eq512mask(chars1, v) & (eq512mask(chars2, v) >> 1); @@ -703,7 +703,7 @@ const u8 *rvermMiniNocase(m512 chars, const u8 *buf, const u8 *buf_end, uintptr_t len = buf_end - buf; __mmask64 mask = (~0ULL) >> (64 - len); m512 data = loadu_maskz_m512(mask, buf); - m512 casemask = set64x8(CASE_CLEAR); + m512 casemask = set1_64x8(CASE_CLEAR); m512 v = and512(casemask, data); u64a z = eq512mask(chars, v); @@ -739,7 +739,7 @@ static really_inline const u8 *rvermSearchAlignedNocase(m512 chars, const u8 *buf, const u8 *buf_end, char negate) { assert((size_t)buf_end % 64 == 0); - m512 casemask = set64x8(CASE_CLEAR); + m512 casemask = set1_64x8(CASE_CLEAR); for (; buf + 63 < buf_end; buf_end -= 64) { m512 data = load512(buf_end - 64); @@ -771,7 +771,7 @@ const u8 *rvermUnalign(m512 chars, const u8 *buf, char negate) { // returns NULL if not found static really_inline const u8 *rvermUnalignNocase(m512 chars, const u8 *buf, char negate) { - m512 casemask = set64x8(CASE_CLEAR); + m512 casemask = set1_64x8(CASE_CLEAR); m512 data = loadu512(buf); // unaligned u64a z = eq512mask(chars, and512(casemask, data)); if (negate) { @@ -805,7 +805,7 @@ const u8 *rdvermMiniNocase(m512 chars1, m512 chars2, const u8 *buf, uintptr_t len = buf_end - buf; __mmask64 mask = (~0ULL) >> (64 - len); m512 data = loadu_maskz_m512(mask, buf); - m512 casemask = set64x8(CASE_CLEAR); + m512 casemask = set1_64x8(CASE_CLEAR); m512 v = and512(casemask, data); u64a z = eq512mask(chars2, v) & (eq512mask(chars1, v) << 1); @@ -839,7 +839,7 @@ static really_inline const u8 *rdvermSearchAlignedNocase(m512 chars1, m512 chars2, u8 c1, u8 c2, const u8 *buf, const u8 *buf_end) { assert((size_t)buf_end % 64 == 0); - m512 casemask = set64x8(CASE_CLEAR); + m512 casemask = set1_64x8(CASE_CLEAR); for (; buf + 64 < buf_end; buf_end -= 64) { m512 data = load512(buf_end - 64); @@ -874,7 +874,7 @@ const u8 *rdvermPrecondition(m512 chars1, m512 chars2, const u8 *buf) { static really_inline const u8 *rdvermPreconditionNocase(m512 chars1, m512 chars2, const u8 *buf) { // due to laziness, nonalphas and nocase having interesting behaviour - m512 casemask = set64x8(CASE_CLEAR); + m512 casemask = set1_64x8(CASE_CLEAR); m512 data = loadu512(buf); m512 v = and512(casemask, data); u64a z = eq512mask(chars2, v) & (eq512mask(chars1, v) << 1); diff --git a/src/rose/validate_shufti.h b/src/rose/validate_shufti.h index 88427027..1ee7fa0a 100644 --- a/src/rose/validate_shufti.h +++ b/src/rose/validate_shufti.h @@ -180,7 +180,7 @@ static really_inline int validateShuftiMask64x8(const m512 data, const m512 hi_mask, const m512 lo_mask, const m512 and_mask, const u64a neg_mask, const u64a valid_data_mask) { - m512 low4bits = set64x8(0xf); + m512 low4bits = set1_64x8(0xf); m512 c_lo = pshufb_m512(lo_mask, and512(data, low4bits)); m512 c_hi = pshufb_m512(hi_mask, rshift64_m512(andnot512(low4bits, data), 4)); @@ -210,7 +210,7 @@ int validateShuftiMask64x16(const m512 data, const m512 lo_mask_1, const m512 lo_mask_2, const m512 and_mask_hi, const m512 and_mask_lo, const u64a neg_mask, const u64a valid_data_mask) { - m512 low4bits = set64x8(0xf); + m512 low4bits = set1_64x8(0xf); m512 data_lo = and512(data, low4bits); m512 data_hi = and512(rshift64_m512(data, 4), low4bits); m512 c_lo_1 = pshufb_m512(lo_mask_1, data_lo); diff --git a/src/util/state_compress.c b/src/util/state_compress.c index 66cd4daf..2040ffa1 100644 --- a/src/util/state_compress.c +++ b/src/util/state_compress.c @@ -592,7 +592,7 @@ m512 loadcompressed512_64bit(const void *ptr, m512 mvec) { expand64(v[6], m[6]), expand64(v[7], m[7]) }; #if defined(HAVE_AVX512) - m512 xvec = set64x8(x[7], x[6], x[5], x[4], + m512 xvec = set8x64(x[7], x[6], x[5], x[4], x[3], x[2], x[1], x[0]); #elif defined(HAVE_AVX2) m512 xvec = { .lo = set4x64(x[3], x[2], x[1], x[0]), diff --git a/unit/internal/simd_utils.cpp b/unit/internal/simd_utils.cpp index bc1426b1..da9bb62a 100644 --- a/unit/internal/simd_utils.cpp +++ b/unit/internal/simd_utils.cpp @@ -697,7 +697,7 @@ TEST(SimdUtilsTest, set32x8) { char cmp[sizeof(m256)]; for (unsigned i = 0; i < 256; i++) { - m256 simd = set32x8(i); + m256 simd = set1_32x8(i); memset(cmp, i, sizeof(simd)); ASSERT_EQ(0, memcmp(cmp, &simd, sizeof(simd))); } @@ -707,9 +707,9 @@ TEST(SimdUtilsTest, set2x128) { char cmp[sizeof(m256)]; for (unsigned i = 0; i < 256; i++) { - m128 x = set16x8(i); - m256 y = set32x8(i); - m256 z = set2x128(x); + m128 x = set1_16x8(i); + m256 y = set1_32x8(i); + m256 z = set1_2x128(x); memset(cmp, i, sizeof(z)); ASSERT_EQ(0, memcmp(cmp, &z, sizeof(z))); ASSERT_EQ(0, memcmp(&y, &z, sizeof(z)));