From d9d39d48c5a36c65201d10d494a4707a74146c77 Mon Sep 17 00:00:00 2001 From: apostolos Date: Mon, 1 Nov 2021 10:05:25 +0200 Subject: [PATCH] prints commants and formating fixes --- src/nfa/ppc64el/truffle.hpp | 2 +- src/nfa/truffle_simd.hpp | 1 - src/util/arch/ppc64el/simd_utils.h | 37 ++++---------------- src/util/supervector/arch/ppc64el/impl.cpp | 39 +++------------------- unit/internal/shuffle.cpp | 2 +- unit/internal/simd_utils.cpp | 12 +++---- 6 files changed, 19 insertions(+), 74 deletions(-) diff --git a/src/nfa/ppc64el/truffle.hpp b/src/nfa/ppc64el/truffle.hpp index 92333261..7dc711f4 100644 --- a/src/nfa/ppc64el/truffle.hpp +++ b/src/nfa/ppc64el/truffle.hpp @@ -58,5 +58,5 @@ const SuperVector blockSingleMask(SuperVector shuf_mask_lo_highclear, Supe SuperVector res = (shuf1 | shuf2) & shuf3; res.print8("(shuf1 | shuf2) & shuf3"); - return !res.eq(SuperVector::Zeroes()); + return res.eq(SuperVector::Zeroes()); } diff --git a/src/nfa/truffle_simd.hpp b/src/nfa/truffle_simd.hpp index b3a82266..51b9ee68 100644 --- a/src/nfa/truffle_simd.hpp +++ b/src/nfa/truffle_simd.hpp @@ -57,7 +57,6 @@ template static really_inline const u8 *fwdBlock(SuperVector shuf_mask_lo_highclear, SuperVector shuf_mask_lo_highset, SuperVector chars, const u8 *buf) { SuperVector res = blockSingleMask(shuf_mask_lo_highclear, shuf_mask_lo_highset, chars); - return firstMatch(buf, res); } diff --git a/src/util/arch/ppc64el/simd_utils.h b/src/util/arch/ppc64el/simd_utils.h index 6e93651e..d27832d4 100644 --- a/src/util/arch/ppc64el/simd_utils.h +++ b/src/util/arch/ppc64el/simd_utils.h @@ -202,43 +202,24 @@ static really_inline m128 eq64_m128(m128 a, m128 b) { static really_inline u32 movemask128(m128 a) { - //printf("input vector:"); - //for (int i=3; i>=0; i--) {printf("%04x, ", a[i]);} - //printf("\n"); uint8x16_t s1 = vec_sr((uint8x16_t)a, vec_splat_u8(7)); - //printf("s1:"); - //for (int i=15; i>=0; i--) {printf("%02x, ", s1[i]);} - //printf("\n"); + uint16x8_t ss = vec_sr((uint16x8_t)s1, vec_splat_u16(7)); uint16x8_t res_and = vec_and((uint16x8_t)s1, vec_splats((uint16_t)0xff)); uint16x8_t s2 = vec_or((uint16x8_t)ss, res_and); - //printf("s2:"); - //for (int i=7; i>=0; i--) {printf("%04x, ", s2[i]);} - //printf("\n"); - + uint32x4_t ss2 = vec_sr((uint32x4_t)s2, vec_splat_u32(14)); uint32x4_t res_and2 = vec_and((uint32x4_t)s2, vec_splats((uint32_t)0xff)); uint32x4_t s3 = vec_or((uint32x4_t)ss2, res_and2); - //printf("s3:"); - //for (int i=3; i>=0; i--) {printf("%08x, ", s3[i]);} - //printf("\n"); - + uint64x2_t ss3 = vec_sr((uint64x2_t)s3, (uint64x2_t)vec_splats(28)); uint64x2_t res_and3 = vec_and((uint64x2_t)s3, vec_splats((uint64_t)0xff)); uint64x2_t s4 = vec_or((uint64x2_t)ss3, res_and3); - //printf("s4:"); - //for (int i=1; i>=0; i--) {printf("%016llx, ", s4[i]);} - //printf("\n"); - + uint64x2_t ss4 = vec_sld((uint64x2_t)vec_splats(0), s4, 9); uint64x2_t res_and4 = vec_and((uint64x2_t)s4, vec_splats((uint64_t)0xff)); uint64x2_t s5 = vec_or((uint64x2_t)ss4, res_and4); - //printf("s5:"); - //for (int i=1; i>=0; i--) {printf("%016llx, ", s5[i]);} - //printf("\n"); - - - //printf("%lld and %lld\n", s5[0],s5[1]); + return s5[0]; } @@ -305,10 +286,6 @@ switch (imm) { } static really_inline m128 low64from128(const m128 in) { - //int64x2_t v = vec_perm((int64x2_t)in, (int64x2_t)vec_splats((uint64_t)0), (uint8x16_t)vec_splat_u8(1)); - //printf("v:"); - //for (int i=1; i>=0; i++) {printf("%016llx",v[i]);} - //printf("\n"); return (m128) vec_perm((int64x2_t)in, (int64x2_t)vec_splats((uint64_t)0), (uint8x16_t)vec_splat_u8(1)); } @@ -340,7 +317,7 @@ static really_inline m128 andnot128(m128 a, m128 b) { // aligned load static really_inline m128 load128(const void *ptr) { assert(ISALIGNED_N(ptr, alignof(m128))); - return (m128) vec_xl(0, (const int64_t*)ptr); + return (m128) vec_xl(0, (const int32_t*)ptr); } // aligned store @@ -351,7 +328,7 @@ static really_inline void store128(void *ptr, m128 a) { // unaligned load static really_inline m128 loadu128(const void *ptr) { - return (m128) vec_xl(0, (const int64_t*)ptr); + return (m128) vec_xl(0, (const int32_t*)ptr); } // unaligned store diff --git a/src/util/supervector/arch/ppc64el/impl.cpp b/src/util/supervector/arch/ppc64el/impl.cpp index ce975cec..acdb89d4 100644 --- a/src/util/supervector/arch/ppc64el/impl.cpp +++ b/src/util/supervector/arch/ppc64el/impl.cpp @@ -218,22 +218,11 @@ template <> really_inline typename SuperVector<16>::movemask_type SuperVector<16>::movemask(void)const { uint8x16_t s1 = vec_sr((uint8x16_t)u.v128[0], vec_splat_u8(7)); - //printf("s1:"); - //for(int i=15; i>=0; i--) {printf("%02x, ",s1[i]);} - //printf("\n"); + uint16x8_t ss = vec_sr((uint16x8_t)s1, vec_splat_u16(7)); - //printf("ss:"); - //for(int i=7; i>=0; i--) {printf("%04x, ",ss[i]);} - //printf("\n"); uint16x8_t res_and = vec_and((uint16x8_t)s1, vec_splats((uint16_t)0xff)); - //printf("res_and:"); - //for(int i=7; i>=0; i--) {printf("%04x, ",res_and[i]);} - //printf("\n"); uint16x8_t s2 = vec_or((uint16x8_t)ss, res_and); - //printf("s2:"); - //for(int i=7; i>=0; i--) {printf("%04x, ",s2[i]);} - //printf("\n"); - + uint32x4_t ss2 = vec_sr((uint32x4_t)s2 , vec_splat_u32(14)); uint32x4_t res_and2 = vec_and((uint32x4_t)s2, vec_splats((uint32_t)0xff)); uint32x4_t s3 = vec_or((uint32x4_t)ss2, res_and2); @@ -246,9 +235,6 @@ really_inline typename SuperVector<16>::movemask_type SuperVector<16>::movemask( uint64x2_t res_and4 = vec_and((uint64x2_t)s4, vec_splats((uint64_t)0xff)); uint64x2_t s5 = vec_or((uint64x2_t)ss4, res_and4); - //printf("s5:"); - //for(int i=1; i>=0; i--) {printf("%016llx, ",s5[i]);} - //printf("\n"); return s5[0]; } @@ -264,7 +250,6 @@ template really_inline SuperVector<16> SuperVector<16>::vshl_8_imm() const { return { (m128) vec_sl((int8x16_t)u.v128[0], vec_splats((uint8_t)N)) }; - //return {(m128)vshlq_n_s8(u.v128[0], N)}; } template <> @@ -272,7 +257,6 @@ template really_inline SuperVector<16> SuperVector<16>::vshl_16_imm() const { return { (m128) vec_sl((int16x8_t)u.v128[0], vec_splats((uint16_t)N)) }; - //return {(m128)vshlq_n_s16(u.v128[0], N)}; } template <> @@ -280,8 +264,6 @@ template really_inline SuperVector<16> SuperVector<16>::vshl_32_imm() const { return { (m128) vec_sl((int32x4_t)u.v128[0], vec_splats((uint32_t)N)) }; - //return {(m128)vshlq_n_s32(u.v128[0], N)}; - } template <> @@ -289,7 +271,6 @@ template really_inline SuperVector<16> SuperVector<16>::vshl_64_imm() const { return { (m128) vec_sl((int64x2_t)u.v128[0], vec_splats((uint64_t)N)) }; - //return {(m128)vshlq_n_s64(u.v128[0], N)}; } template <> @@ -297,7 +278,6 @@ template really_inline SuperVector<16> SuperVector<16>::vshl_128_imm() const { return { (m128) vec_sld((int8x16_t)u.v128[0], (int8x16_t)vec_splat_s8(0), N)}; - //return {vextq_s8(vdupq_n_u8(0), (int16x8_t)u.v128[0], 16 - N)}; } template <> @@ -312,7 +292,6 @@ template really_inline SuperVector<16> SuperVector<16>::vshr_8_imm() const { return { (m128) vec_sr((int8x16_t)u.v128[0], vec_splats((uint8_t)N)) }; - //return {(m128)vshrq_n_s8(u.v128[0], N)}; } template <> @@ -320,7 +299,6 @@ template really_inline SuperVector<16> SuperVector<16>::vshr_16_imm() const { return { (m128) vec_sr((int16x8_t)u.v128[0], vec_splats((uint16_t)N)) }; - //return {(m128)vshrq_n_s16(u.v128[0], N)}; } template <> @@ -328,7 +306,6 @@ template really_inline SuperVector<16> SuperVector<16>::vshr_32_imm() const { return { (m128) vec_sr((int32x4_t)u.v128[0], vec_splats((uint32_t)N)) }; - //return {(m128)vshrq_n_s32(u.v128[0], N)}; } template <> @@ -336,7 +313,6 @@ template really_inline SuperVector<16> SuperVector<16>::vshr_64_imm() const { return { (m128) vec_sr((int64x2_t)u.v128[0], vec_splats((uint64_t)N)) }; - //return {(m128)vshrq_n_s64(u.v128[0], N)}; } template <> @@ -344,7 +320,6 @@ template really_inline SuperVector<16> SuperVector<16>::vshr_128_imm() const { return { (m128) vec_sld((int8x16_t)vec_splat_s8(0), (int8x16_t)u.v128[0], 16 - N) }; - //return {vextq_s8((int16x8_t)u.v128[0], vdupq_n_u8(0), N)}; } template <> @@ -377,7 +352,6 @@ really_inline SuperVector<16> SuperVector<16>::vshl_8 (uint8_t const N) const if (N == 16) return Zeroes(); SuperVector result; Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sl((int8x16_t)u.v128[0], vec_splats((uint8_t)n))}; }); - //Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128)vshlq_n_s8(u.v128[0], n)}; }); return result; } @@ -388,7 +362,6 @@ really_inline SuperVector<16> SuperVector<16>::vshl_16 (uint8_t const UNUSED N) if (N == 16) return Zeroes(); SuperVector result; Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sl((int16x8_t)u.v128[0], vec_splats((uint16_t)n))}; }); - //Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128)vshlq_n_s16(u.v128[0], n)}; }); return result; } @@ -399,7 +372,6 @@ really_inline SuperVector<16> SuperVector<16>::vshl_32 (uint8_t const N) const if (N == 16) return Zeroes(); SuperVector result; Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sl((int32x4_t)u.v128[0], vec_splats((uint32_t)n))}; }); - //Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128)vshlq_n_s32(u.v128[0], n)}; }); return result; } @@ -436,7 +408,6 @@ really_inline SuperVector<16> SuperVector<16>::vshr_8 (uint8_t const N) const if (N == 16) return Zeroes(); SuperVector result; Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sr((int8x16_t)u.v128[0], vec_splats((uint8_t)n))}; }); - //Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128)vshrq_n_s8(u.v128[0], n)}; }); return result; } @@ -447,7 +418,6 @@ really_inline SuperVector<16> SuperVector<16>::vshr_16 (uint8_t const N) const if (N == 16) return Zeroes(); SuperVector result; Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sr((int16x8_t)u.v128[0], vec_splats((uint16_t)n))}; }); - //Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128)vshrq_n_s16(u.v128[0], n)}; }); return result; } @@ -458,7 +428,6 @@ really_inline SuperVector<16> SuperVector<16>::vshr_32 (uint8_t const N) const if (N == 16) return Zeroes(); SuperVector result; Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sr((int32x4_t)u.v128[0], vec_splats((uint32_t)n))}; }); - //Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128)vshrq_n_s32(u.v128[0], n)}; }); return result; } @@ -616,8 +585,8 @@ template<> really_inline SuperVector<16> SuperVector<16>::pshufb(SuperVector<16> b) { /* On Intel, if bit 0x80 is set, then result is zero, otherwise which the lane it is &0xf. - In NEON, if >=16, then the result is zero, otherwise it is that lane. - btranslated is the version that is converted from Intel to NEON. */ + In NEON or PPC, if >=16, then the result is zero, otherwise it is that lane. + btranslated is the version that is converted from Intel to PPC. */ SuperVector<16> btranslated = b & SuperVector<16>::dup_s8(0x8f); return pshufb(btranslated); } diff --git a/unit/internal/shuffle.cpp b/unit/internal/shuffle.cpp index 038c6193..f1a03d5a 100644 --- a/unit/internal/shuffle.cpp +++ b/unit/internal/shuffle.cpp @@ -187,7 +187,7 @@ TEST(Shuffle, PackedExtract128_1) { // shuffle a single 1 bit to the front m128 permute, compare; build_pshufb_masks_onebit(i, &permute, &compare); - EXPECT_EQ(1U, packedExtract128(setbit(i), permute, compare)); + EXPECT_EQ(1U, packedExtract128(setbit(i), permute, compare)); EXPECT_EQ(1U, packedExtract128(ones128(), permute, compare)); // we should get zero out of these cases EXPECT_EQ(0U, packedExtract128(zeroes128(), permute, compare)); diff --git a/unit/internal/simd_utils.cpp b/unit/internal/simd_utils.cpp index 1f16adcd..884f2d0a 100644 --- a/unit/internal/simd_utils.cpp +++ b/unit/internal/simd_utils.cpp @@ -852,11 +852,11 @@ TEST(SimdUtilsTest, pshufb_m128) { vec2[i]=i + (rand() % 100 + 0); } - /* On Intel, if bit 0x80 is set, then result is zero, otherwise which the lane it is &0xf. - In NEON or PPC, if >=16, then the result is zero, otherwise it is that lane. - Thus bellow we have to check that case to NEON or PPC. */ + // On Intel, if bit 0x80 is set, then result is zero, otherwise which the lane it is &0xf. + // In NEON or PPC, if >=16, then the result is zero, otherwise it is that lane. + // Thus bellow we have to check that case to NEON or PPC. - /*Insure that vec3 has at least 1 or more 0x80 elements*/ + //Insure that vec3 has at least 1 or more 0x80 elements u8 vec3[16] = {0}; vec3[15] = 0x80; @@ -874,7 +874,7 @@ TEST(SimdUtilsTest, pshufb_m128) { printf("\n"); */ - /*Test Special Case*/ + //Test Special Case m128 v1 = loadu128(vec); m128 v2 = loadu128(vec3); m128 vres = pshufb_m128(v1, v2); @@ -890,7 +890,7 @@ TEST(SimdUtilsTest, pshufb_m128) { } } - /*Test Other Cases*/ + //Test Other Cases v1 = loadu128(vec); v2 = loadu128(vec2); vres = pshufb_m128(v1, v2);