mirror of
https://github.com/VectorCamp/vectorscan.git
synced 2025-11-17 09:45:28 +03:00
Add truffle SVE implementation
Signed-off-by: Yoan Picchi <yoan.picchi@arm.com>
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2017, Intel Corporation
|
||||
* Copyright (c) 2020-2023, VectorCamp PC
|
||||
* Copyright (c) 2023, Arm Limited
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
@@ -41,9 +42,14 @@
|
||||
#include "util/supervector/supervector.hpp"
|
||||
#include "util/match.hpp"
|
||||
|
||||
#ifdef HAVE_SVE
|
||||
static really_inline
|
||||
svuint8_t blockSingleMask(svuint8_t shuf_mask_lo_highclear, svuint8_t shuf_mask_lo_highset, svuint8_t chars);
|
||||
#else
|
||||
template <uint16_t S>
|
||||
static really_inline
|
||||
const SuperVector<S> blockSingleMask(SuperVector<S> shuf_mask_lo_highclear, SuperVector<S> shuf_mask_lo_highset, SuperVector<S> chars);
|
||||
#endif //HAVE_SVE
|
||||
|
||||
#if defined(VS_SIMDE_BACKEND)
|
||||
#include "x86/truffle.hpp"
|
||||
@@ -57,6 +63,162 @@ const SuperVector<S> blockSingleMask(SuperVector<S> shuf_mask_lo_highclear, Supe
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_SVE
|
||||
|
||||
const u8 *truffleExecSVE(m128 shuf_mask_lo_highclear, m128 shuf_mask_lo_highset,
|
||||
const u8 *buf, const u8 *buf_end);
|
||||
|
||||
const u8 *rtruffleExecSVE(m128 shuf_mask_lo_highclear, m128 shuf_mask_lo_highset,
|
||||
const u8 *buf, const u8 *buf_end);
|
||||
|
||||
static really_inline
|
||||
const u8 *scanBlock(svuint8_t shuf_mask_lo_highclear, svuint8_t shuf_mask_lo_highset, svuint8_t chars, const u8 *buf, bool forward) {
|
||||
|
||||
const size_t vector_size_int_8 = svcntb();
|
||||
|
||||
const svuint8_t result_mask = blockSingleMask(shuf_mask_lo_highclear, shuf_mask_lo_highset, chars);
|
||||
uint64_t index;
|
||||
if (forward) {
|
||||
index = first_non_zero(vector_size_int_8, result_mask);
|
||||
} else {
|
||||
index = last_non_zero(vector_size_int_8, result_mask);
|
||||
}
|
||||
|
||||
if(index < vector_size_int_8) {
|
||||
return buf+index;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
really_inline
|
||||
const u8 *truffleExecSVE(m128 shuf_mask_lo_highclear, m128 shuf_mask_lo_highset, const u8 *buf, const u8 *buf_end) {
|
||||
const int vect_size_int8 = svcntb();
|
||||
// Activate only 16 lanes to read the m128 buffers
|
||||
const svbool_t lane_pred_16 = svwhilelt_b8(0, 16);
|
||||
assert(buf && buf_end);
|
||||
assert(buf < buf_end);
|
||||
DEBUG_PRINTF("truffle %p len %zu\n", buf, buf_end - buf);
|
||||
DEBUG_PRINTF("b %s\n", buf);
|
||||
|
||||
svuint8_t wide_shuf_mask_lo_highclear = svld1(lane_pred_16, (uint8_t*) &shuf_mask_lo_highclear);
|
||||
svuint8_t wide_shuf_mask_lo_highset = svld1(lane_pred_16, (uint8_t*) &shuf_mask_lo_highset);
|
||||
|
||||
const u8 *work_buffer = buf;
|
||||
const u8 *ret_val;
|
||||
|
||||
DEBUG_PRINTF("start %p end %p \n", work_buffer, buf_end);
|
||||
assert(work_buffer < buf_end);
|
||||
|
||||
__builtin_prefetch(work_buffer + 16*64);
|
||||
|
||||
if (work_buffer + vect_size_int8 <= buf_end) {
|
||||
// Reach vector aligned boundaries
|
||||
DEBUG_PRINTF("until aligned %p \n", ROUNDUP_PTR(work_buffer, vect_size_int8));
|
||||
if (!ISALIGNED_N(work_buffer, vect_size_int8)) {
|
||||
svuint8_t chars = svld1(svptrue_b8(), work_buffer);
|
||||
const u8 *alligned_buffer = ROUNDUP_PTR(work_buffer, vect_size_int8);
|
||||
ret_val = scanBlock(wide_shuf_mask_lo_highclear, wide_shuf_mask_lo_highset, chars, work_buffer, true);
|
||||
if (ret_val && ret_val < alligned_buffer) return ret_val;
|
||||
work_buffer = alligned_buffer;
|
||||
}
|
||||
|
||||
while(work_buffer + vect_size_int8 <= buf_end) {
|
||||
__builtin_prefetch(work_buffer + 16*64);
|
||||
DEBUG_PRINTF("work_buffer %p \n", work_buffer);
|
||||
svuint8_t chars = svld1(svptrue_b8(), work_buffer);
|
||||
ret_val = scanBlock(wide_shuf_mask_lo_highclear, wide_shuf_mask_lo_highset, chars, work_buffer, true);
|
||||
if (ret_val) return ret_val;
|
||||
work_buffer += vect_size_int8;
|
||||
}
|
||||
}
|
||||
|
||||
DEBUG_PRINTF("work_buffer %p e %p \n", work_buffer, buf_end);
|
||||
// finish off tail
|
||||
|
||||
if (work_buffer != buf_end) {
|
||||
svuint8_t chars;
|
||||
const u8* end_buf;
|
||||
if (buf_end - buf < vect_size_int8) {
|
||||
const svbool_t remaining_lanes = svwhilelt_b8(0ll, buf_end - buf);
|
||||
chars = svld1(remaining_lanes, buf);
|
||||
end_buf = buf;
|
||||
} else {
|
||||
chars = svld1(svptrue_b8(), buf_end - vect_size_int8);
|
||||
end_buf = buf_end - vect_size_int8;
|
||||
}
|
||||
ret_val = scanBlock(wide_shuf_mask_lo_highclear, wide_shuf_mask_lo_highset, chars, end_buf, true);
|
||||
DEBUG_PRINTF("ret_val %p \n", ret_val);
|
||||
if (ret_val && ret_val < buf_end) return ret_val;
|
||||
}
|
||||
|
||||
return buf_end;
|
||||
}
|
||||
|
||||
really_inline
|
||||
const u8 *rtruffleExecSVE(m128 shuf_mask_lo_highclear, m128 shuf_mask_lo_highset, const u8 *buf, const u8 *buf_end){
|
||||
const int vect_size_int8 = svcntb();
|
||||
// Activate only 16 lanes to read the m128 buffers
|
||||
const svbool_t lane_pred_16 = svwhilelt_b8(0, 16);
|
||||
assert(buf && buf_end);
|
||||
assert(buf < buf_end);
|
||||
DEBUG_PRINTF("truffle %p len %zu\n", buf, buf_end - buf);
|
||||
DEBUG_PRINTF("b %s\n", buf);
|
||||
|
||||
svuint8_t wide_shuf_mask_lo_highclear = svld1(lane_pred_16, (uint8_t*) &shuf_mask_lo_highclear);
|
||||
svuint8_t wide_shuf_mask_lo_highset = svld1(lane_pred_16, (uint8_t*) &shuf_mask_lo_highset);
|
||||
|
||||
const u8 *work_buffer = buf_end;
|
||||
const u8 *ret_val;
|
||||
|
||||
DEBUG_PRINTF("start %p end %p \n", buf, work_buffer);
|
||||
assert(work_buffer > buf);
|
||||
|
||||
__builtin_prefetch(work_buffer - 16*64);
|
||||
|
||||
if (work_buffer - vect_size_int8 >= buf) {
|
||||
// Reach vector aligned boundaries
|
||||
DEBUG_PRINTF("until aligned %p \n", ROUNDDOWN_PTR(work_buffer, vect_size_int8));
|
||||
if (!ISALIGNED_N(work_buffer, vect_size_int8)) {
|
||||
svuint8_t chars = svld1(svptrue_b8(), work_buffer - vect_size_int8);
|
||||
const u8 *alligned_buffer = ROUNDDOWN_PTR(work_buffer, vect_size_int8);
|
||||
ret_val = scanBlock(wide_shuf_mask_lo_highclear, wide_shuf_mask_lo_highset, chars, work_buffer - vect_size_int8, false);
|
||||
DEBUG_PRINTF("ret_val %p \n", ret_val);
|
||||
if (ret_val >= alligned_buffer) return ret_val;
|
||||
work_buffer = alligned_buffer;
|
||||
}
|
||||
|
||||
while (work_buffer - vect_size_int8 >= buf) {
|
||||
DEBUG_PRINTF("aligned %p \n", work_buffer);
|
||||
// On large packet buffers, this prefetch appears to get us about 2%.
|
||||
__builtin_prefetch(work_buffer - 16*64);
|
||||
|
||||
work_buffer -= vect_size_int8;
|
||||
svuint8_t chars = svld1(svptrue_b8(), work_buffer);
|
||||
ret_val = scanBlock(wide_shuf_mask_lo_highclear, wide_shuf_mask_lo_highset, chars, work_buffer, false);
|
||||
if (ret_val) return ret_val;
|
||||
}
|
||||
}
|
||||
|
||||
DEBUG_PRINTF("tail work_buffer %p e %p \n", buf, work_buffer);
|
||||
// finish off head
|
||||
|
||||
if (work_buffer != buf) {
|
||||
svuint8_t chars;
|
||||
if (buf_end - buf < vect_size_int8) {
|
||||
const svbool_t remaining_lanes = svwhilele_b8(0ll, buf_end - buf);
|
||||
chars = svld1(remaining_lanes, buf);
|
||||
} else {
|
||||
chars = svld1(svptrue_b8(), buf);
|
||||
}
|
||||
ret_val = scanBlock(wide_shuf_mask_lo_highclear, wide_shuf_mask_lo_highset, chars, buf, false);
|
||||
DEBUG_PRINTF("ret_val %p \n", ret_val);
|
||||
if (ret_val && ret_val < buf_end) return ret_val;
|
||||
}
|
||||
|
||||
return buf - 1;
|
||||
}
|
||||
#else
|
||||
template <uint16_t S>
|
||||
static really_inline
|
||||
const u8 *fwdBlock(SuperVector<S> shuf_mask_lo_highclear, SuperVector<S> shuf_mask_lo_highset, SuperVector<S> chars, const u8 *buf) {
|
||||
@@ -77,13 +239,7 @@ const u8 *truffleExecReal(m128 &shuf_mask_lo_highclear, m128 shuf_mask_lo_highse
|
||||
const u8 *d = buf;
|
||||
const u8 *rv;
|
||||
|
||||
DEBUG_PRINTF("start %p end %p \n", d, buf_end);
|
||||
assert(d < buf_end);
|
||||
|
||||
__builtin_prefetch(d + 64);
|
||||
__builtin_prefetch(d + 2*64);
|
||||
__builtin_prefetch(d + 3*64);
|
||||
__builtin_prefetch(d + 4*64);
|
||||
__builtin_prefetch(d + 16*64);
|
||||
DEBUG_PRINTF("start %p end %p \n", d, buf_end);
|
||||
assert(d < buf_end);
|
||||
if (d + S <= buf_end) {
|
||||
@@ -98,7 +254,7 @@ const u8 *truffleExecReal(m128 &shuf_mask_lo_highclear, m128 shuf_mask_lo_highse
|
||||
}
|
||||
|
||||
while(d + S <= buf_end) {
|
||||
__builtin_prefetch(d + 64);
|
||||
__builtin_prefetch(d + 16*64);
|
||||
DEBUG_PRINTF("d %p \n", d);
|
||||
SuperVector<S> chars = SuperVector<S>::load(d);
|
||||
rv = fwdBlock(wide_shuf_mask_lo_highclear, wide_shuf_mask_lo_highset, chars, d);
|
||||
@@ -149,10 +305,7 @@ const u8 *rtruffleExecReal(m128 shuf_mask_lo_highclear, m128 shuf_mask_lo_highse
|
||||
const u8 *d = buf_end;
|
||||
const u8 *rv;
|
||||
|
||||
__builtin_prefetch(d - 64);
|
||||
__builtin_prefetch(d - 2*64);
|
||||
__builtin_prefetch(d - 3*64);
|
||||
__builtin_prefetch(d - 4*64);
|
||||
__builtin_prefetch(d - 16*64);
|
||||
DEBUG_PRINTF("start %p end %p \n", buf, d);
|
||||
assert(d > buf);
|
||||
if (d - S >= buf) {
|
||||
@@ -170,7 +323,7 @@ const u8 *rtruffleExecReal(m128 shuf_mask_lo_highclear, m128 shuf_mask_lo_highse
|
||||
while (d - S >= buf) {
|
||||
DEBUG_PRINTF("aligned %p \n", d);
|
||||
// On large packet buffers, this prefetch appears to get us about 2%.
|
||||
__builtin_prefetch(d - 64);
|
||||
__builtin_prefetch(d - 16*64);
|
||||
|
||||
d -= S;
|
||||
SuperVector<S> chars = SuperVector<S>::load(d);
|
||||
@@ -196,3 +349,4 @@ const u8 *rtruffleExecReal(m128 shuf_mask_lo_highclear, m128 shuf_mask_lo_highse
|
||||
|
||||
return buf - 1;
|
||||
}
|
||||
#endif //HAVE_SVE
|
||||
Reference in New Issue
Block a user