prints commants and formating fixes

This commit is contained in:
apostolos 2021-11-01 10:05:25 +02:00
parent 3f17750a27
commit d9d39d48c5
6 changed files with 19 additions and 74 deletions

View File

@ -58,5 +58,5 @@ const SuperVector<S> blockSingleMask(SuperVector<S> shuf_mask_lo_highclear, Supe
SuperVector<S> res = (shuf1 | shuf2) & shuf3;
res.print8("(shuf1 | shuf2) & shuf3");
return !res.eq(SuperVector<S>::Zeroes());
return res.eq(SuperVector<S>::Zeroes());
}

View File

@ -57,7 +57,6 @@ template <uint16_t S>
static really_inline
const u8 *fwdBlock(SuperVector<S> shuf_mask_lo_highclear, SuperVector<S> shuf_mask_lo_highset, SuperVector<S> chars, const u8 *buf) {
SuperVector<S> res = blockSingleMask(shuf_mask_lo_highclear, shuf_mask_lo_highset, chars);
return firstMatch<S>(buf, res);
}

View File

@ -202,43 +202,24 @@ static really_inline m128 eq64_m128(m128 a, m128 b) {
static really_inline u32 movemask128(m128 a) {
//printf("input vector:");
//for (int i=3; i>=0; i--) {printf("%04x, ", a[i]);}
//printf("\n");
uint8x16_t s1 = vec_sr((uint8x16_t)a, vec_splat_u8(7));
//printf("s1:");
//for (int i=15; i>=0; i--) {printf("%02x, ", s1[i]);}
//printf("\n");
uint16x8_t ss = vec_sr((uint16x8_t)s1, vec_splat_u16(7));
uint16x8_t res_and = vec_and((uint16x8_t)s1, vec_splats((uint16_t)0xff));
uint16x8_t s2 = vec_or((uint16x8_t)ss, res_and);
//printf("s2:");
//for (int i=7; i>=0; i--) {printf("%04x, ", s2[i]);}
//printf("\n");
uint32x4_t ss2 = vec_sr((uint32x4_t)s2, vec_splat_u32(14));
uint32x4_t res_and2 = vec_and((uint32x4_t)s2, vec_splats((uint32_t)0xff));
uint32x4_t s3 = vec_or((uint32x4_t)ss2, res_and2);
//printf("s3:");
//for (int i=3; i>=0; i--) {printf("%08x, ", s3[i]);}
//printf("\n");
uint64x2_t ss3 = vec_sr((uint64x2_t)s3, (uint64x2_t)vec_splats(28));
uint64x2_t res_and3 = vec_and((uint64x2_t)s3, vec_splats((uint64_t)0xff));
uint64x2_t s4 = vec_or((uint64x2_t)ss3, res_and3);
//printf("s4:");
//for (int i=1; i>=0; i--) {printf("%016llx, ", s4[i]);}
//printf("\n");
uint64x2_t ss4 = vec_sld((uint64x2_t)vec_splats(0), s4, 9);
uint64x2_t res_and4 = vec_and((uint64x2_t)s4, vec_splats((uint64_t)0xff));
uint64x2_t s5 = vec_or((uint64x2_t)ss4, res_and4);
//printf("s5:");
//for (int i=1; i>=0; i--) {printf("%016llx, ", s5[i]);}
//printf("\n");
//printf("%lld and %lld\n", s5[0],s5[1]);
return s5[0];
}
@ -305,10 +286,6 @@ switch (imm) {
}
static really_inline m128 low64from128(const m128 in) {
//int64x2_t v = vec_perm((int64x2_t)in, (int64x2_t)vec_splats((uint64_t)0), (uint8x16_t)vec_splat_u8(1));
//printf("v:");
//for (int i=1; i>=0; i++) {printf("%016llx",v[i]);}
//printf("\n");
return (m128) vec_perm((int64x2_t)in, (int64x2_t)vec_splats((uint64_t)0), (uint8x16_t)vec_splat_u8(1));
}
@ -340,7 +317,7 @@ static really_inline m128 andnot128(m128 a, m128 b) {
// aligned load
static really_inline m128 load128(const void *ptr) {
assert(ISALIGNED_N(ptr, alignof(m128)));
return (m128) vec_xl(0, (const int64_t*)ptr);
return (m128) vec_xl(0, (const int32_t*)ptr);
}
// aligned store
@ -351,7 +328,7 @@ static really_inline void store128(void *ptr, m128 a) {
// unaligned load
static really_inline m128 loadu128(const void *ptr) {
return (m128) vec_xl(0, (const int64_t*)ptr);
return (m128) vec_xl(0, (const int32_t*)ptr);
}
// unaligned store

View File

@ -218,22 +218,11 @@ template <>
really_inline typename SuperVector<16>::movemask_type SuperVector<16>::movemask(void)const
{
uint8x16_t s1 = vec_sr((uint8x16_t)u.v128[0], vec_splat_u8(7));
//printf("s1:");
//for(int i=15; i>=0; i--) {printf("%02x, ",s1[i]);}
//printf("\n");
uint16x8_t ss = vec_sr((uint16x8_t)s1, vec_splat_u16(7));
//printf("ss:");
//for(int i=7; i>=0; i--) {printf("%04x, ",ss[i]);}
//printf("\n");
uint16x8_t res_and = vec_and((uint16x8_t)s1, vec_splats((uint16_t)0xff));
//printf("res_and:");
//for(int i=7; i>=0; i--) {printf("%04x, ",res_and[i]);}
//printf("\n");
uint16x8_t s2 = vec_or((uint16x8_t)ss, res_and);
//printf("s2:");
//for(int i=7; i>=0; i--) {printf("%04x, ",s2[i]);}
//printf("\n");
uint32x4_t ss2 = vec_sr((uint32x4_t)s2 , vec_splat_u32(14));
uint32x4_t res_and2 = vec_and((uint32x4_t)s2, vec_splats((uint32_t)0xff));
uint32x4_t s3 = vec_or((uint32x4_t)ss2, res_and2);
@ -246,9 +235,6 @@ really_inline typename SuperVector<16>::movemask_type SuperVector<16>::movemask(
uint64x2_t res_and4 = vec_and((uint64x2_t)s4, vec_splats((uint64_t)0xff));
uint64x2_t s5 = vec_or((uint64x2_t)ss4, res_and4);
//printf("s5:");
//for(int i=1; i>=0; i--) {printf("%016llx, ",s5[i]);}
//printf("\n");
return s5[0];
}
@ -264,7 +250,6 @@ template<uint8_t N>
really_inline SuperVector<16> SuperVector<16>::vshl_8_imm() const
{
return { (m128) vec_sl((int8x16_t)u.v128[0], vec_splats((uint8_t)N)) };
//return {(m128)vshlq_n_s8(u.v128[0], N)};
}
template <>
@ -272,7 +257,6 @@ template<uint8_t N>
really_inline SuperVector<16> SuperVector<16>::vshl_16_imm() const
{
return { (m128) vec_sl((int16x8_t)u.v128[0], vec_splats((uint16_t)N)) };
//return {(m128)vshlq_n_s16(u.v128[0], N)};
}
template <>
@ -280,8 +264,6 @@ template<uint8_t N>
really_inline SuperVector<16> SuperVector<16>::vshl_32_imm() const
{
return { (m128) vec_sl((int32x4_t)u.v128[0], vec_splats((uint32_t)N)) };
//return {(m128)vshlq_n_s32(u.v128[0], N)};
}
template <>
@ -289,7 +271,6 @@ template<uint8_t N>
really_inline SuperVector<16> SuperVector<16>::vshl_64_imm() const
{
return { (m128) vec_sl((int64x2_t)u.v128[0], vec_splats((uint64_t)N)) };
//return {(m128)vshlq_n_s64(u.v128[0], N)};
}
template <>
@ -297,7 +278,6 @@ template<uint8_t N>
really_inline SuperVector<16> SuperVector<16>::vshl_128_imm() const
{
return { (m128) vec_sld((int8x16_t)u.v128[0], (int8x16_t)vec_splat_s8(0), N)};
//return {vextq_s8(vdupq_n_u8(0), (int16x8_t)u.v128[0], 16 - N)};
}
template <>
@ -312,7 +292,6 @@ template<uint8_t N>
really_inline SuperVector<16> SuperVector<16>::vshr_8_imm() const
{
return { (m128) vec_sr((int8x16_t)u.v128[0], vec_splats((uint8_t)N)) };
//return {(m128)vshrq_n_s8(u.v128[0], N)};
}
template <>
@ -320,7 +299,6 @@ template<uint8_t N>
really_inline SuperVector<16> SuperVector<16>::vshr_16_imm() const
{
return { (m128) vec_sr((int16x8_t)u.v128[0], vec_splats((uint16_t)N)) };
//return {(m128)vshrq_n_s16(u.v128[0], N)};
}
template <>
@ -328,7 +306,6 @@ template<uint8_t N>
really_inline SuperVector<16> SuperVector<16>::vshr_32_imm() const
{
return { (m128) vec_sr((int32x4_t)u.v128[0], vec_splats((uint32_t)N)) };
//return {(m128)vshrq_n_s32(u.v128[0], N)};
}
template <>
@ -336,7 +313,6 @@ template<uint8_t N>
really_inline SuperVector<16> SuperVector<16>::vshr_64_imm() const
{
return { (m128) vec_sr((int64x2_t)u.v128[0], vec_splats((uint64_t)N)) };
//return {(m128)vshrq_n_s64(u.v128[0], N)};
}
template <>
@ -344,7 +320,6 @@ template<uint8_t N>
really_inline SuperVector<16> SuperVector<16>::vshr_128_imm() const
{
return { (m128) vec_sld((int8x16_t)vec_splat_s8(0), (int8x16_t)u.v128[0], 16 - N) };
//return {vextq_s8((int16x8_t)u.v128[0], vdupq_n_u8(0), N)};
}
template <>
@ -377,7 +352,6 @@ really_inline SuperVector<16> SuperVector<16>::vshl_8 (uint8_t const N) const
if (N == 16) return Zeroes();
SuperVector result;
Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sl((int8x16_t)u.v128[0], vec_splats((uint8_t)n))}; });
//Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128)vshlq_n_s8(u.v128[0], n)}; });
return result;
}
@ -388,7 +362,6 @@ really_inline SuperVector<16> SuperVector<16>::vshl_16 (uint8_t const UNUSED N)
if (N == 16) return Zeroes();
SuperVector result;
Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sl((int16x8_t)u.v128[0], vec_splats((uint16_t)n))}; });
//Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128)vshlq_n_s16(u.v128[0], n)}; });
return result;
}
@ -399,7 +372,6 @@ really_inline SuperVector<16> SuperVector<16>::vshl_32 (uint8_t const N) const
if (N == 16) return Zeroes();
SuperVector result;
Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sl((int32x4_t)u.v128[0], vec_splats((uint32_t)n))}; });
//Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128)vshlq_n_s32(u.v128[0], n)}; });
return result;
}
@ -436,7 +408,6 @@ really_inline SuperVector<16> SuperVector<16>::vshr_8 (uint8_t const N) const
if (N == 16) return Zeroes();
SuperVector result;
Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sr((int8x16_t)u.v128[0], vec_splats((uint8_t)n))}; });
//Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128)vshrq_n_s8(u.v128[0], n)}; });
return result;
}
@ -447,7 +418,6 @@ really_inline SuperVector<16> SuperVector<16>::vshr_16 (uint8_t const N) const
if (N == 16) return Zeroes();
SuperVector result;
Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sr((int16x8_t)u.v128[0], vec_splats((uint16_t)n))}; });
//Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128)vshrq_n_s16(u.v128[0], n)}; });
return result;
}
@ -458,7 +428,6 @@ really_inline SuperVector<16> SuperVector<16>::vshr_32 (uint8_t const N) const
if (N == 16) return Zeroes();
SuperVector result;
Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128) vec_sr((int32x4_t)u.v128[0], vec_splats((uint32_t)n))}; });
//Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {(m128)vshrq_n_s32(u.v128[0], n)}; });
return result;
}
@ -616,8 +585,8 @@ template<>
really_inline SuperVector<16> SuperVector<16>::pshufb<true>(SuperVector<16> b)
{
/* On Intel, if bit 0x80 is set, then result is zero, otherwise which the lane it is &0xf.
In NEON, if >=16, then the result is zero, otherwise it is that lane.
btranslated is the version that is converted from Intel to NEON. */
In NEON or PPC, if >=16, then the result is zero, otherwise it is that lane.
btranslated is the version that is converted from Intel to PPC. */
SuperVector<16> btranslated = b & SuperVector<16>::dup_s8(0x8f);
return pshufb<false>(btranslated);
}

View File

@ -187,7 +187,7 @@ TEST(Shuffle, PackedExtract128_1) {
// shuffle a single 1 bit to the front
m128 permute, compare;
build_pshufb_masks_onebit(i, &permute, &compare);
EXPECT_EQ(1U, packedExtract128(setbit<m128>(i), permute, compare));
EXPECT_EQ(1U, packedExtract128(setbit<m128>(i), permute, compare));
EXPECT_EQ(1U, packedExtract128(ones128(), permute, compare));
// we should get zero out of these cases
EXPECT_EQ(0U, packedExtract128(zeroes128(), permute, compare));

View File

@ -852,11 +852,11 @@ TEST(SimdUtilsTest, pshufb_m128) {
vec2[i]=i + (rand() % 100 + 0);
}
/* On Intel, if bit 0x80 is set, then result is zero, otherwise which the lane it is &0xf.
In NEON or PPC, if >=16, then the result is zero, otherwise it is that lane.
Thus bellow we have to check that case to NEON or PPC. */
// On Intel, if bit 0x80 is set, then result is zero, otherwise which the lane it is &0xf.
// In NEON or PPC, if >=16, then the result is zero, otherwise it is that lane.
// Thus bellow we have to check that case to NEON or PPC.
/*Insure that vec3 has at least 1 or more 0x80 elements*/
//Insure that vec3 has at least 1 or more 0x80 elements
u8 vec3[16] = {0};
vec3[15] = 0x80;
@ -874,7 +874,7 @@ TEST(SimdUtilsTest, pshufb_m128) {
printf("\n");
*/
/*Test Special Case*/
//Test Special Case
m128 v1 = loadu128(vec);
m128 v2 = loadu128(vec3);
m128 vres = pshufb_m128(v1, v2);
@ -890,7 +890,7 @@ TEST(SimdUtilsTest, pshufb_m128) {
}
}
/*Test Other Cases*/
//Test Other Cases
v1 = loadu128(vec);
v2 = loadu128(vec2);
vres = pshufb_m128(v1, v2);