From bd7423f4f0a02c226951760268b131043d93ee95 Mon Sep 17 00:00:00 2001 From: Brad Larsen Date: Wed, 6 Mar 2024 16:32:12 -0500 Subject: [PATCH 01/19] Add CMake options for more build granularity This adds three new CMake options, all defaulting to true, making it possible to opt-out of building parts of Vectorscan that are not essential for deployment of the matching runtime. These new options: - `BUILD_UNIT`: control whether the `unit` directory is included - `BUILD_DOC`: control whether the `doc` directory is included - `BUILD_TOOLS`: control whether the `tools` directory is included --- CMakeLists.txt | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index d7e07a9a..c6952f41 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1221,11 +1221,17 @@ if (NOT BUILD_STATIC_LIBS) endif () add_subdirectory(util) -add_subdirectory(unit) -if (EXISTS ${CMAKE_SOURCE_DIR}/tools/CMakeLists.txt) +option(BUILD_UNIT "Build Hyperscan unit tests (default TRUE)" TRUE) +if(BUILD_UNIT) + add_subdirectory(unit) +endif() + +option(BUILD_TOOLS "Build Hyperscan tools (default TRUE)" TRUE) +if(EXISTS ${CMAKE_SOURCE_DIR}/tools/CMakeLists.txt AND BUILD_TOOLS) add_subdirectory(tools) endif() + if (EXISTS ${CMAKE_SOURCE_DIR}/chimera/CMakeLists.txt AND BUILD_CHIMERA) add_subdirectory(chimera) endif() @@ -1240,4 +1246,7 @@ if(BUILD_BENCHMARKS) add_subdirectory(benchmarks) endif() -add_subdirectory(doc/dev-reference) +option(BUILD_DOC "Build the Hyperscan documentation (default TRUE)" TRUE) +if(BUILD_DOC) + add_subdirectory(doc/dev-reference) +endif() From d9a75dc3b96b4e1bf08253dd95f81663ba49acde Mon Sep 17 00:00:00 2001 From: Jeremy Linton Date: Thu, 15 Feb 2024 14:39:42 -0600 Subject: [PATCH 02/19] documentation: Add cmake option to build man pages Man pages tend to be preferred in some circles, lets add an option to build the vectorscan documentation that way. Signed-off-by: Jeremy Linton --- doc/dev-reference/CMakeLists.txt | 11 +++++++++++ doc/dev-reference/conf.py.in | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/doc/dev-reference/CMakeLists.txt b/doc/dev-reference/CMakeLists.txt index 449589f6..6f48e2e4 100644 --- a/doc/dev-reference/CMakeLists.txt +++ b/doc/dev-reference/CMakeLists.txt @@ -19,6 +19,7 @@ else() set(SPHINX_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/_build") set(SPHINX_CACHE_DIR "${CMAKE_CURRENT_BINARY_DIR}/_doctrees") set(SPHINX_HTML_DIR "${CMAKE_CURRENT_BINARY_DIR}/html") +set(SPHINX_MAN_DIR "${CMAKE_CURRENT_BINARY_DIR}/man") configure_file("${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in" "${CMAKE_CURRENT_BINARY_DIR}/conf.py" @ONLY) @@ -32,4 +33,14 @@ add_custom_target(dev-reference "${SPHINX_HTML_DIR}" DEPENDS dev-reference-doxygen COMMENT "Building HTML dev reference with Sphinx") + +add_custom_target(dev-reference-man + ${SPHINX_BUILD} + -b man + -c "${CMAKE_CURRENT_BINARY_DIR}" + -d "${SPHINX_CACHE_DIR}" + "${CMAKE_CURRENT_SOURCE_DIR}" + "${SPHINX_MAN_DIR}" + DEPENDS dev-reference-doxygen + COMMENT "Building man page reference with Sphinx") endif() diff --git a/doc/dev-reference/conf.py.in b/doc/dev-reference/conf.py.in index d0ef371b..ad97f088 100644 --- a/doc/dev-reference/conf.py.in +++ b/doc/dev-reference/conf.py.in @@ -233,7 +233,7 @@ latex_documents = [ # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'hyperscan', u'Hyperscan Documentation', - [u'Intel Corporation'], 1) + [u'Intel Corporation'], 7) ] # If true, show URL addresses after external links. From 2d23d24b678f39c92a9bc8b41af241b3701b73f1 Mon Sep 17 00:00:00 2001 From: Jeremy Linton Date: Thu, 15 Feb 2024 14:51:11 -0600 Subject: [PATCH 03/19] documentation: Update project name and copyright The project name in the documentation should probably be updated to reflect that this is vectorscan. Update the copyright too. Signed-off-by: Jeremy Linton --- doc/dev-reference/conf.py.in | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/dev-reference/conf.py.in b/doc/dev-reference/conf.py.in index ad97f088..298a54b1 100644 --- a/doc/dev-reference/conf.py.in +++ b/doc/dev-reference/conf.py.in @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Hyperscan documentation build configuration file, created by +# Vectorscan documentation build configuration file, created by # sphinx-quickstart on Tue Sep 29 15:59:19 2015. # # This file is execfile()d with the current directory set to its @@ -43,8 +43,8 @@ source_suffix = '.rst' master_doc = 'index' # General information about the project. -project = u'Hyperscan' -copyright = u'2015-2018, Intel Corporation' +project = u'Vectorscan' +copyright = u'2015-2020, Intel Corporation; 2020-2024, VectorCamp; and other contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -202,7 +202,7 @@ latex_elements = { # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - ('index', 'Hyperscan.tex', u'Hyperscan Documentation', + ('index', 'Hyperscan.tex', u'Vectorscan Documentation', u'Intel Corporation', 'manual'), ] @@ -232,7 +232,7 @@ latex_documents = [ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - ('index', 'hyperscan', u'Hyperscan Documentation', + ('index', 'vectorscan', u'Vectorscan Documentation', [u'Intel Corporation'], 7) ] @@ -246,8 +246,8 @@ man_pages = [ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ('index', 'Hyperscan', u'Hyperscan Documentation', - u'Intel Corporation', 'Hyperscan', 'High-performance regular expression matcher.', + ('index', 'Vectorscan', u'Vectorscan Documentation', + u'Intel Corporation; VectorCamp', 'Vectorscan', 'High-performance regular expression matcher.', 'Miscellaneous'), ] From 943f198ebf641c7511b12b8b3bb7ead8a6681228 Mon Sep 17 00:00:00 2001 From: Jeremy Linton Date: Thu, 15 Feb 2024 15:13:20 -0600 Subject: [PATCH 04/19] documentation: Replace project name with Vectorscan and general updates The generated documentation continues to refer to Hyperscan despite the project now being VectorScan. Lets replace many of the Hyperscan references with Vectorscan. At the same time, lets resync the documentation here with the vectorscan readme. This updates the supported platforms/compilers and build options. Signed-off-by: Jeremy Linton --- doc/dev-reference/chimera.rst | 22 ++-- doc/dev-reference/compilation.rst | 92 +++++++-------- doc/dev-reference/getting_started.rst | 159 +++++++++++++++++--------- doc/dev-reference/index.rst | 2 +- doc/dev-reference/intro.rst | 22 ++-- doc/dev-reference/performance.rst | 22 ++-- doc/dev-reference/preface.rst | 18 +-- doc/dev-reference/runtime.rst | 24 ++-- doc/dev-reference/serialization.rst | 20 ++-- doc/dev-reference/tools.rst | 44 +++---- 10 files changed, 239 insertions(+), 186 deletions(-) diff --git a/doc/dev-reference/chimera.rst b/doc/dev-reference/chimera.rst index d35b116f..cb8c84c4 100644 --- a/doc/dev-reference/chimera.rst +++ b/doc/dev-reference/chimera.rst @@ -11,10 +11,10 @@ Introduction ************ Chimera is a software regular expression matching engine that is a hybrid of -Hyperscan and PCRE. The design goals of Chimera are to fully support PCRE -syntax as well as to take advantage of the high performance nature of Hyperscan. +Vectorscan and PCRE. The design goals of Chimera are to fully support PCRE +syntax as well as to take advantage of the high performance nature of Vectorscan. -Chimera inherits the design guideline of Hyperscan with C APIs for compilation +Chimera inherits the design guideline of Vectorscan with C APIs for compilation and scanning. The Chimera API itself is composed of two major components: @@ -65,13 +65,13 @@ For a given database, Chimera provides several guarantees: .. note:: Chimera is designed to have the same matching behavior as PCRE, including greedy/ungreedy, capturing, etc. Chimera reports both **start offset** and **end offset** for each match like PCRE. Different - from the fashion of reporting all matches in Hyperscan, Chimera only reports + from the fashion of reporting all matches in Vectorscan, Chimera only reports non-overlapping matches. For example, the pattern :regexp:`/foofoo/` will match ``foofoofoofoo`` at offsets (0, 6) and (6, 12). -.. note:: Since Chimera is a hybrid of Hyperscan and PCRE in order to support +.. note:: Since Chimera is a hybrid of Vectorscan and PCRE in order to support full PCRE syntax, there will be extra performance overhead compared to - Hyperscan-only solution. Please always use Hyperscan for better performance + Vectorscan-only solution. Please always use Vectorscan for better performance unless you must need full PCRE syntax support. See :ref:`chruntime` for more details @@ -83,12 +83,12 @@ Requirements The PCRE library (http://pcre.org/) version 8.41 is required for Chimera. .. note:: Since Chimera needs to reference PCRE internal function, please place PCRE source - directory under Hyperscan root directory in order to build Chimera. + directory under Vectorscan root directory in order to build Chimera. -Beside this, both hardware and software requirements of Chimera are the same to Hyperscan. +Beside this, both hardware and software requirements of Chimera are the same to Vectorscan. See :ref:`hardware` and :ref:`software` for more details. -.. note:: Building Hyperscan will automatically generate Chimera library. +.. note:: Building Vectorscan will automatically generate Chimera library. Currently only static library is supported for Chimera, so please use static build type when configure CMake build options. @@ -119,7 +119,7 @@ databases: Compilation allows the Chimera library to analyze the given pattern(s) and pre-determine how to scan for these patterns in an optimized fashion using -Hyperscan and PCRE. +Vectorscan and PCRE. =============== Pattern Support @@ -134,7 +134,7 @@ Semantics ========= Chimera supports the exact same semantics of PCRE library. Moreover, it supports -multiple simultaneous pattern matching like Hyperscan and the multiple matches +multiple simultaneous pattern matching like Vectorscan and the multiple matches will be reported in order by end offset. .. _chruntime: diff --git a/doc/dev-reference/compilation.rst b/doc/dev-reference/compilation.rst index 6f5541ec..a0ae8c8b 100644 --- a/doc/dev-reference/compilation.rst +++ b/doc/dev-reference/compilation.rst @@ -9,7 +9,7 @@ Compiling Patterns Building a Database ******************* -The Hyperscan compiler API accepts regular expressions and converts them into a +The Vectorscan compiler API accepts regular expressions and converts them into a compiled pattern database that can then be used to scan data. The API provides three functions that compile regular expressions into @@ -24,7 +24,7 @@ databases: #. :c:func:`hs_compile_ext_multi`: compiles an array of expressions as above, but allows :ref:`extparam` to be specified for each expression. -Compilation allows the Hyperscan library to analyze the given pattern(s) and +Compilation allows the Vectorscan library to analyze the given pattern(s) and pre-determine how to scan for these patterns in an optimized fashion that would be far too expensive to compute at run-time. @@ -48,10 +48,10 @@ To compile patterns to be used in streaming mode, the ``mode`` parameter of block mode requires the use of :c:member:`HS_MODE_BLOCK` and vectored mode requires the use of :c:member:`HS_MODE_VECTORED`. A pattern database compiled for one mode (streaming, block or vectored) can only be used in that mode. The -version of Hyperscan used to produce a compiled pattern database must match the -version of Hyperscan used to scan with it. +version of Vectorscan used to produce a compiled pattern database must match the +version of Vectorscan used to scan with it. -Hyperscan provides support for targeting a database at a particular CPU +Vectorscan provides support for targeting a database at a particular CPU platform; see :ref:`instr_specialization` for details. ===================== @@ -75,14 +75,14 @@ characters exist in regular grammar like ``[``, ``]``, ``(``, ``)``, ``{``, While in pure literal case, all these meta characters lost extra meanings expect for that they are just common ASCII codes. -Hyperscan is initially designed to process common regular expressions. It is +Vectorscan is initially designed to process common regular expressions. It is hence embedded with a complex parser to do comprehensive regular grammar interpretation. Particularly, the identification of above meta characters is the basic step for the interpretation of far more complex regular grammars. However in real cases, patterns may not always be regular expressions. They could just be pure literals. Problem will come if the pure literals contain -regular meta characters. Supposing fed directly into traditional Hyperscan +regular meta characters. Supposing fed directly into traditional Vectorscan compile API, all these meta characters will be interpreted in predefined ways, which is unnecessary and the result is totally out of expectation. To avoid such misunderstanding by traditional API, users have to preprocess these @@ -90,7 +90,7 @@ literal patterns by converting the meta characters into some other formats: either by adding a backslash ``\`` before certain meta characters, or by converting all the characters into a hexadecimal representation. -In ``v5.2.0``, Hyperscan introduces 2 new compile APIs for pure literal patterns: +In ``v5.2.0``, Vectorscan introduces 2 new compile APIs for pure literal patterns: #. :c:func:`hs_compile_lit`: compiles a single pure literal into a pattern database. @@ -106,7 +106,7 @@ content directly into these APIs without worrying about writing regular meta characters in their patterns. No preprocessing work is needed any more. For new APIs, the ``length`` of each literal pattern is a newly added parameter. -Hyperscan needs to locate the end position of the input expression via clearly +Vectorscan needs to locate the end position of the input expression via clearly knowing each literal's length, not by simply identifying character ``\0`` of a string. @@ -127,19 +127,19 @@ Supported flags: :c:member:`HS_FLAG_CASELESS`, :c:member:`HS_FLAG_SINGLEMATCH`, Pattern Support *************** -Hyperscan supports the pattern syntax used by the PCRE library ("libpcre"), +Vectorscan supports the pattern syntax used by the PCRE library ("libpcre"), described at . However, not all constructs available in libpcre are supported. The use of unsupported constructs will result in compilation errors. -The version of PCRE used to validate Hyperscan's interpretation of this syntax +The version of PCRE used to validate Vectorscan's interpretation of this syntax is 8.41 or above. ==================== Supported Constructs ==================== -The following regex constructs are supported by Hyperscan: +The following regex constructs are supported by Vectorscan: * Literal characters and strings, with all libpcre quoting and character escapes. @@ -177,7 +177,7 @@ The following regex constructs are supported by Hyperscan: :c:member:`HS_FLAG_SINGLEMATCH` flag is on for that pattern. * Lazy modifiers (:regexp:`?` appended to another quantifier, e.g. - :regexp:`\\w+?`) are supported but ignored (as Hyperscan reports all + :regexp:`\\w+?`) are supported but ignored (as Vectorscan reports all matches). * Parenthesization, including the named and unnamed capturing and @@ -219,15 +219,15 @@ The following regex constructs are supported by Hyperscan: .. note:: At this time, not all patterns can be successfully compiled with the :c:member:`HS_FLAG_SOM_LEFTMOST` flag, which enables per-pattern support for :ref:`som`. The patterns that support this flag are a subset of patterns that - can be successfully compiled with Hyperscan; notably, many bounded repeat - forms that can be compiled with Hyperscan without the Start of Match flag + can be successfully compiled with Vectorscan; notably, many bounded repeat + forms that can be compiled with Vectorscan without the Start of Match flag enabled cannot be compiled with the flag enabled. ====================== Unsupported Constructs ====================== -The following regex constructs are not supported by Hyperscan: +The following regex constructs are not supported by Vectorscan: * Backreferences and capturing sub-expressions. * Arbitrary zero-width assertions. @@ -246,32 +246,32 @@ The following regex constructs are not supported by Hyperscan: Semantics ********* -While Hyperscan follows libpcre syntax, it provides different semantics. The +While Vectorscan follows libpcre syntax, it provides different semantics. The major departures from libpcre semantics are motivated by the requirements of streaming and multiple simultaneous pattern matching. The major departures from libpcre semantics are: -#. **Multiple pattern matching**: Hyperscan allows matches to be reported for +#. **Multiple pattern matching**: Vectorscan allows matches to be reported for several patterns simultaneously. This is not equivalent to separating the patterns by :regexp:`|` in libpcre, which evaluates alternations left-to-right. -#. **Lack of ordering**: the multiple matches that Hyperscan produces are not +#. **Lack of ordering**: the multiple matches that Vectorscan produces are not guaranteed to be ordered, although they will always fall within the bounds of the current scan. -#. **End offsets only**: Hyperscan's default behaviour is only to report the end +#. **End offsets only**: Vectorscan's default behaviour is only to report the end offset of a match. Reporting of the start offset can be enabled with per-expression flags at pattern compile time. See :ref:`som` for details. #. **"All matches" reported**: scanning :regexp:`/foo.*bar/` against - ``fooxyzbarbar`` will return two matches from Hyperscan -- at the points + ``fooxyzbarbar`` will return two matches from Vectorscan -- at the points corresponding to the ends of ``fooxyzbar`` and ``fooxyzbarbar``. In contrast, libpcre semantics by default would report only one match at ``fooxyzbarbar`` (greedy semantics) or, if non-greedy semantics were switched on, one match at ``fooxyzbar``. This means that switching between greedy and non-greedy - semantics is a no-op in Hyperscan. + semantics is a no-op in Vectorscan. To support libpcre quantifier semantics while accurately reporting streaming matches at the time they occur is impossible. For example, consider the pattern @@ -299,7 +299,7 @@ as in block 3 -- which would constitute a better match for the pattern. Start of Match ============== -In standard operation, Hyperscan will only provide the end offset of a match +In standard operation, Vectorscan will only provide the end offset of a match when the match callback is called. If the :c:member:`HS_FLAG_SOM_LEFTMOST` flag is specified for a particular pattern, then the same set of matches is returned, but each match will also provide the leftmost possible start offset @@ -308,7 +308,7 @@ corresponding to its end offset. Using the SOM flag entails a number of trade-offs and limitations: * Reduced pattern support: For many patterns, tracking SOM is complex and can - result in Hyperscan failing to compile a pattern with a "Pattern too + result in Vectorscan failing to compile a pattern with a "Pattern too large" error, even if the pattern is supported in normal operation. * Increased stream state: At scan time, state space is required to track potential SOM offsets, and this must be stored in persistent stream state in @@ -316,20 +316,20 @@ Using the SOM flag entails a number of trade-offs and limitations: required to match a pattern. * Performance overhead: Similarly, there is generally a performance cost associated with tracking SOM. -* Incompatible features: Some other Hyperscan pattern flags (such as +* Incompatible features: Some other Vectorscan pattern flags (such as :c:member:`HS_FLAG_SINGLEMATCH` and :c:member:`HS_FLAG_PREFILTER`) can not be used in combination with SOM. Specifying them together with :c:member:`HS_FLAG_SOM_LEFTMOST` will result in a compilation error. In streaming mode, the amount of precision delivered by SOM can be controlled -with the SOM horizon flags. These instruct Hyperscan to deliver accurate SOM +with the SOM horizon flags. These instruct Vectorscan to deliver accurate SOM information within a certain distance of the end offset, and return a special start offset of :c:member:`HS_OFFSET_PAST_HORIZON` otherwise. Specifying a small or medium SOM horizon will usually reduce the stream state required for a given database. .. note:: In streaming mode, the start offset returned for a match may refer to - a point in the stream *before* the current block being scanned. Hyperscan + a point in the stream *before* the current block being scanned. Vectorscan provides no facility for accessing earlier blocks; if the calling application needs to inspect historical data, then it must store it itself. @@ -341,7 +341,7 @@ Extended Parameters In some circumstances, more control over the matching behaviour of a pattern is required than can be specified easily using regular expression syntax. For -these scenarios, Hyperscan provides the :c:func:`hs_compile_ext_multi` function +these scenarios, Vectorscan provides the :c:func:`hs_compile_ext_multi` function that allows a set of "extended parameters" to be set on a per-pattern basis. Extended parameters are specified using an :c:type:`hs_expr_ext_t` structure, @@ -383,18 +383,18 @@ section. Prefiltering Mode ================= -Hyperscan provides a per-pattern flag, :c:member:`HS_FLAG_PREFILTER`, which can -be used to implement a prefilter for a pattern than Hyperscan would not +Vectorscan provides a per-pattern flag, :c:member:`HS_FLAG_PREFILTER`, which can +be used to implement a prefilter for a pattern than Vectorscan would not ordinarily support. -This flag instructs Hyperscan to compile an "approximate" version of this -pattern for use in a prefiltering application, even if Hyperscan does not +This flag instructs Vectorscan to compile an "approximate" version of this +pattern for use in a prefiltering application, even if Vectorscan does not support the pattern in normal operation. The set of matches returned when this flag is used is guaranteed to be a superset of the matches specified by the non-prefiltering expression. -If the pattern contains pattern constructs not supported by Hyperscan (such as +If the pattern contains pattern constructs not supported by Vectorscan (such as zero-width assertions, back-references or conditional references) these constructs will be replaced internally with broader constructs that may match more often. @@ -404,7 +404,7 @@ back-reference :regexp:`\\1`. In prefiltering mode, this pattern might be approximated by having its back-reference replaced with its referent, forming :regexp:`/\\w+ again \\w+/`. -Furthermore, in prefiltering mode Hyperscan may simplify a pattern that would +Furthermore, in prefiltering mode Vectorscan may simplify a pattern that would otherwise return a "Pattern too large" error at compile time, or for performance reasons (subject to the matching guarantee above). @@ -422,22 +422,22 @@ matches for the pattern. Instruction Set Specialization ****************************** -Hyperscan is able to make use of several modern instruction set features found +Vectorscan is able to make use of several modern instruction set features found on x86 processors to provide improvements in scanning performance. Some of these features are selected when the library is built; for example, -Hyperscan will use the native ``POPCNT`` instruction on processors where it is +Vectorscan will use the native ``POPCNT`` instruction on processors where it is available and the library has been optimized for the host architecture. -.. note:: By default, the Hyperscan runtime is built with the ``-march=native`` +.. note:: By default, the Vectorscan runtime is built with the ``-march=native`` compiler flag and (where possible) will make use of all instructions known by the host's C compiler. -To use some instruction set features, however, Hyperscan must build a +To use some instruction set features, however, Vectorscan must build a specialized database to support them. This means that the target platform must be specified at pattern compile time. -The Hyperscan compiler API functions all accept an optional +The Vectorscan compiler API functions all accept an optional :c:type:`hs_platform_info_t` argument, which describes the target platform for the database to be built. If this argument is NULL, the database will be targeted at the current host platform. @@ -467,7 +467,7 @@ See :ref:`api_constants` for the full list of CPU tuning and feature flags. Approximate matching ******************** -Hyperscan provides an experimental approximate matching mode, which will match +Vectorscan provides an experimental approximate matching mode, which will match patterns within a given edit distance. The exact matching behavior is defined as follows: @@ -492,7 +492,7 @@ follows: Here are a few examples of approximate matching: -* Pattern :regexp:`/foo/` can match ``foo`` when using regular Hyperscan +* Pattern :regexp:`/foo/` can match ``foo`` when using regular Vectorscan matching behavior. With approximate matching within edit distance 2, the pattern will produce matches when scanned against ``foo``, ``foooo``, ``f00``, ``f``, and anything else that lies within edit distance 2 of matching corpora @@ -513,7 +513,7 @@ matching support. Here they are, in a nutshell: * Reduced pattern support: * For many patterns, approximate matching is complex and can result in - Hyperscan failing to compile a pattern with a "Pattern too large" error, + Vectorscan failing to compile a pattern with a "Pattern too large" error, even if the pattern is supported in normal operation. * Additionally, some patterns cannot be approximately matched because they reduce to so-called "vacuous" patterns (patterns that match everything). For @@ -548,7 +548,7 @@ Logical Combinations ******************** For situations when a user requires behaviour that depends on the presence or -absence of matches from groups of patterns, Hyperscan provides support for the +absence of matches from groups of patterns, Vectorscan provides support for the logical combination of patterns in a given pattern set, with three operators: ``NOT``, ``AND`` and ``OR``. @@ -561,7 +561,7 @@ offset is *true* if the expression it refers to is *false* at this offset. For example, ``NOT 101`` means that expression 101 has not yet matched at this offset. -A logical combination is passed to Hyperscan at compile time as an expression. +A logical combination is passed to Vectorscan at compile time as an expression. This combination expression will raise matches at every offset where one of its sub-expressions matches and the logical value of the whole expression is *true*. @@ -603,7 +603,7 @@ In a logical combination expression: * Whitespace is ignored. To use a logical combination expression, it must be passed to one of the -Hyperscan compile functions (:c:func:`hs_compile_multi`, +Vectorscan compile functions (:c:func:`hs_compile_multi`, :c:func:`hs_compile_ext_multi`) along with the :c:member:`HS_FLAG_COMBINATION` flag, which identifies the pattern as a logical combination expression. The patterns referred to in the logical combination expression must be compiled together in @@ -613,7 +613,7 @@ When an expression has the :c:member:`HS_FLAG_COMBINATION` flag set, it ignores all other flags except the :c:member:`HS_FLAG_SINGLEMATCH` flag and the :c:member:`HS_FLAG_QUIET` flag. -Hyperscan will accept logical combination expressions at compile time that +Vectorscan will accept logical combination expressions at compile time that evaluate to *true* when no patterns have matched, and report the match for combination at end of data if no patterns have matched; for example: :: diff --git a/doc/dev-reference/getting_started.rst b/doc/dev-reference/getting_started.rst index aaff15ba..57d78211 100644 --- a/doc/dev-reference/getting_started.rst +++ b/doc/dev-reference/getting_started.rst @@ -7,43 +7,41 @@ Getting Started Very Quick Start **************** -#. Clone Hyperscan :: +#. Clone Vectorscan :: - cd - git clone git://github.com/intel/hyperscan + cd + git clone https://github.com/VectorCamp/vectorscan -#. Configure Hyperscan +#. Configure Vectorscan Ensure that you have the correct :ref:`dependencies ` present, and then: :: - cd + cd mkdir cd - cmake [-G ] [options] + cmake [-G ] [options] Known working generators: * ``Unix Makefiles`` --- make-compatible makefiles (default on Linux/FreeBSD/Mac OS X) * ``Ninja`` --- `Ninja `_ build files. - * ``Visual Studio 15 2017`` --- Visual Studio projects - Generators that might work include: + Unsupported generators that might work include: * ``Xcode`` --- OS X Xcode projects. -#. Build Hyperscan +#. Build Vectorscan Depending on the generator used: * ``cmake --build .`` --- will build everything * ``make -j`` --- use makefiles in parallel * ``ninja`` --- use Ninja build - * ``MsBuild.exe`` --- use Visual Studio MsBuild * etc. -#. Check Hyperscan +#. Check Vectorscan - Run the Hyperscan unit tests: :: + Run the Vectorscan unit tests: :: bin/unit-hyperscan @@ -55,20 +53,23 @@ Requirements Hardware ======== -Hyperscan will run on x86 processors in 64-bit (Intel\ |reg| 64 Architecture) and -32-bit (IA-32 Architecture) modes. +Vectorscan will run on x86 processors in 64-bit (Intel\ |reg| 64 Architecture) and +32-bit (IA-32 Architecture) modes as well as Arm v8.0+ aarch64, and POWER 8+ ppc64le +machines. Hyperscan is a high performance software library that takes advantage of recent -Intel architecture advances. At a minimum, support for Supplemental Streaming -SIMD Extensions 3 (SSSE3) is required, which should be available on any modern -x86 processor. +architecture advances. -Additionally, Hyperscan can make use of: +Additionally, Vectorscan can make use of: * Intel Streaming SIMD Extensions 4.2 (SSE4.2) * the POPCNT instruction * Bit Manipulation Instructions (BMI, BMI2) * Intel Advanced Vector Extensions 2 (Intel AVX2) + * Arm NEON + * Arm SVE and SVE2 + * Arm SVE2 BITPERM + * IBM Power8/Power9 VSX if present. @@ -79,40 +80,34 @@ These can be determined at library compile time, see :ref:`target_arch`. Software ======== -As a software library, Hyperscan doesn't impose any particular runtime -software requirements, however to build the Hyperscan library we require a -modern C and C++ compiler -- in particular, Hyperscan requires C99 and C++11 +As a software library, Vectorscan doesn't impose any particular runtime +software requirements, however to build the Vectorscan library we require a +modern C and C++ compiler -- in particular, Vectorscan requires C99 and C++17 compiler support. The supported compilers are: - * GCC, v4.8.1 or higher - * Clang, v3.4 or higher (with libstdc++ or libc++) - * Intel C++ Compiler v15 or higher - * Visual C++ 2017 Build Tools + * GCC, v9 or higher + * Clang, v5 or higher (with libstdc++ or libc++) -Examples of operating systems that Hyperscan is known to work on include: +Examples of operating systems that Vectorscan is known to work on include: Linux: -* Ubuntu 14.04 LTS or newer +* Ubuntu 20.04 LTS or newer * RedHat/CentOS 7 or newer +* Fedora 38 or newer +* Debian 10 FreeBSD: * 10.0 or newer -Windows: - -* 8 or newer - Mac OS X: * 10.8 or newer, using XCode/Clang -Hyperscan *may* compile and run on other platforms, but there is no guarantee. -We currently have experimental support for Windows using Intel C++ Compiler -or Visual Studio 2017. +Vectorscan *may* compile and run on other platforms, but there is no guarantee. -In addition, the following software is required for compiling the Hyperscan library: +In addition, the following software is required for compiling the Vectorscan library: ======================================================= =========== ====================================== Dependency Version Notes @@ -132,20 +127,20 @@ Ragel, you may use Cygwin to build it from source. Boost Headers ------------- -Compiling Hyperscan depends on a recent version of the Boost C++ header +Compiling Vectorscan depends on a recent version of the Boost C++ header library. If the Boost libraries are installed on the build machine in the usual paths, CMake will find them. If the Boost libraries are not installed, the location of the Boost source tree can be specified during the CMake configuration step using the ``BOOST_ROOT`` variable (described below). Another alternative is to put a copy of (or a symlink to) the boost -subdirectory in ``/include/boost``. +subdirectory in ``/include/boost``. For example: for the Boost-1.59.0 release: :: - ln -s boost_1_59_0/boost /include/boost + ln -s boost_1_59_0/boost /include/boost -As Hyperscan uses the header-only parts of Boost, it is not necessary to +As Vectorscan uses the header-only parts of Boost, it is not necessary to compile the Boost libraries. CMake Configuration @@ -168,11 +163,12 @@ Common options for CMake include: | | Valid options are Debug, Release, RelWithDebInfo, | | | and MinSizeRel. Default is RelWithDebInfo. | +------------------------+----------------------------------------------------+ -| BUILD_SHARED_LIBS | Build Hyperscan as a shared library instead of | +| BUILD_SHARED_LIBS | Build Vectorscan as a shared library instead of | | | the default static library. | +| | Default: Off | +------------------------+----------------------------------------------------+ -| BUILD_STATIC_AND_SHARED| Build both static and shared Hyperscan libs. | -| | Default off. | +| BUILD_STATIC_LIBS | Build Vectorscan as a static library. | +| | Default: On | +------------------------+----------------------------------------------------+ | BOOST_ROOT | Location of Boost source tree. | +------------------------+----------------------------------------------------+ @@ -180,12 +176,64 @@ Common options for CMake include: +------------------------+----------------------------------------------------+ | FAT_RUNTIME | Build the :ref:`fat runtime`. Default | | | true on Linux, not available elsewhere. | +| | Default: Off | ++------------------------+----------------------------------------------------+ +| USE_CPU_NATIVE | Native CPU detection is off by default, however it | +| | is possible to build a performance-oriented non-fat| +| | library tuned to your CPU. | +| | Default: Off | ++------------------------+----------------------------------------------------+ +| SANITIZE | Use libasan sanitizer to detect possible bugs. | +| | Valid options are address, memory and undefined. | ++------------------------+----------------------------------------------------+ +| SIMDE_BACKEND | Enable SIMDe backend. If this is chosen all native | +| | (SSE/AVX/AVX512/Neon/SVE/VSX) backends will be | +| | disabled and a SIMDe SSE4.2 emulation backend will | +| | be enabled. This will enable Vectorscan to build | +| | and run on architectures without SIMD. | +| | Default: Off | ++------------------------+----------------------------------------------------+ +| SIMDE_NATIVE | Enable SIMDe native emulation of x86 SSE4.2 | +| | intrinsics on the building platform. That is, | +| | SSE4.2 intrinsics will be emulated using Neon on | +| | an Arm platform, or VSX on a Power platform, etc. | +| | Default: Off | ++------------------------+----------------------------------------------------+ + +X86 platform specific options include: + ++------------------------+----------------------------------------------------+ +| Variable | Description | ++========================+====================================================+ +| BUILD_AVX2 | Enable code for AVX2. | ++------------------------+----------------------------------------------------+ +| BUILD_AVX512 | Enable code for AVX512. Implies BUILD_AVX2. | ++------------------------+----------------------------------------------------+ +| BUILD_AVX512VBMI | Enable code for AVX512 with VBMI extension. Implies| +| | BUILD_AVX512. | ++------------------------+----------------------------------------------------+ + +Arm platform specific options include: + ++------------------------+----------------------------------------------------+ +| Variable | Description | ++========================+====================================================+ +| BUILD_SVE | Enable code for SVE, like on AWS Graviton3 CPUs. | +| | Not much code is ported just for SVE , but enabling| +| | SVE code production, does improve code generation, | +| | see Benchmarks. | ++------------------------+----------------------------------------------------+ +| BUILD_SVE2 | Enable code for SVE2, implies BUILD_SVE. Most | +| | non-Neon code is written for SVE2. | ++------------------------+----------------------------------------------------+ +| BUILD_SVE2_BITPERM | Enable code for SVE2_BITPERM harwdare feature, | +| | implies BUILD_SVE2. | +------------------------+----------------------------------------------------+ For example, to generate a ``Debug`` build: :: cd - cmake -DCMAKE_BUILD_TYPE=Debug + cmake -DCMAKE_BUILD_TYPE=Debug @@ -193,7 +241,7 @@ Build Type ---------- CMake determines a number of features for a build based on the Build Type. -Hyperscan defaults to ``RelWithDebInfo``, i.e. "release with debugging +Vectorscan defaults to ``RelWithDebInfo``, i.e. "release with debugging information". This is a performance optimized build without runtime assertions but with debug symbols enabled. @@ -201,7 +249,7 @@ The other types of builds are: * ``Release``: as above, but without debug symbols * ``MinSizeRel``: a stripped release build - * ``Debug``: used when developing Hyperscan. Includes runtime assertions + * ``Debug``: used when developing Vectorscan. Includes runtime assertions (which has a large impact on runtime performance), and will also enable some other build features like building internal unit tests. @@ -211,7 +259,7 @@ The other types of builds are: Target Architecture ------------------- -Unless using the :ref:`fat runtime`, by default Hyperscan will be +Unless using the :ref:`fat runtime`, by default Vectorscan will be compiled to target the instruction set of the processor of the machine that being used for compilation. This is done via the use of ``-march=native``. The result of this means that a library built on one machine may not work on a @@ -223,7 +271,7 @@ CMake, or ``CMAKE_C_FLAGS`` and ``CMAKE_CXX_FLAGS`` on the CMake command line. F example, to set the instruction subsets up to ``SSE4.2`` using GCC 4.8: :: cmake -DCMAKE_C_FLAGS="-march=corei7" \ - -DCMAKE_CXX_FLAGS="-march=corei7" + -DCMAKE_CXX_FLAGS="-march=corei7" For more information, refer to :ref:`instr_specialization`. @@ -232,17 +280,17 @@ For more information, refer to :ref:`instr_specialization`. Fat Runtime ----------- -A feature introduced in Hyperscan v4.4 is the ability for the Hyperscan +A feature introduced in Hyperscan v4.4 is the ability for the Vectorscan library to dispatch the most appropriate runtime code for the host processor. -This feature is called the "fat runtime", as a single Hyperscan library +This feature is called the "fat runtime", as a single Vectorscan library contains multiple copies of the runtime code for different instruction sets. .. note:: The fat runtime feature is only available on Linux. Release builds of - Hyperscan will default to having the fat runtime enabled where supported. + Vectorscan will default to having the fat runtime enabled where supported. -When building the library with the fat runtime, the Hyperscan runtime code +When building the library with the fat runtime, the Vectorscan runtime code will be compiled multiple times for these different instruction sets, and these compiled objects are combined into one library. There are no changes to how user applications are built against this library. @@ -254,11 +302,11 @@ resolved so that the right version of each API function is used. There is no impact on function call performance, as this check and resolution is performed by the ELF loader once when the binary is loaded. -If the Hyperscan library is used on x86 systems without ``SSSE3``, the runtime +If the Vectorscan library is used on x86 systems without ``SSSE4.2``, the runtime API functions will resolve to functions that return :c:member:`HS_ARCH_ERROR` instead of potentially executing illegal instructions. The API function :c:func:`hs_valid_platform` can be used by application writers to determine if -the current platform is supported by Hyperscan. +the current platform is supported by Vectorscan. As of this release, the variants of the runtime that are built, and the CPU capability that is required, are the following: @@ -299,6 +347,11 @@ capability that is required, are the following: cmake -DBUILD_AVX512VBMI=on <...> + Vectorscan add support for Arm processors and SVE, SV2 and SVE2_BITPERM. + example: :: + + cmake -DBUILD_SVE=ON -DBUILD_SVE2=ON -DBUILD_SVE2_BITPERM=ON <...> + As the fat runtime requires compiler, libc, and binutils support, at this time it will only be enabled for Linux builds where the compiler supports the `indirect function "ifunc" function attribute diff --git a/doc/dev-reference/index.rst b/doc/dev-reference/index.rst index b5d6a54b..4046a298 100644 --- a/doc/dev-reference/index.rst +++ b/doc/dev-reference/index.rst @@ -1,5 +1,5 @@ ############################################### -Hyperscan |version| Developer's Reference Guide +Vectorscan |version| Developer's Reference Guide ############################################### ------- diff --git a/doc/dev-reference/intro.rst b/doc/dev-reference/intro.rst index 58879aef..71538eb0 100644 --- a/doc/dev-reference/intro.rst +++ b/doc/dev-reference/intro.rst @@ -5,11 +5,11 @@ Introduction ############ -Hyperscan is a software regular expression matching engine designed with +Vectorscan is a software regular expression matching engine designed with high performance and flexibility in mind. It is implemented as a library that exposes a straightforward C API. -The Hyperscan API itself is composed of two major components: +The Vectorscan API itself is composed of two major components: *********** Compilation @@ -17,7 +17,7 @@ Compilation These functions take a group of regular expressions, along with identifiers and option flags, and compile them into an immutable database that can be used by -the Hyperscan scanning API. This compilation process performs considerable +the Vectorscan scanning API. This compilation process performs considerable analysis and optimization work in order to build a database that will match the given expressions efficiently. @@ -36,8 +36,8 @@ See :ref:`compilation` for more detail. Scanning ******** -Once a Hyperscan database has been created, it can be used to scan data in -memory. Hyperscan provides several scanning modes, depending on whether the +Once a Vectorscan database has been created, it can be used to scan data in +memory. Vectorscan provides several scanning modes, depending on whether the data to be scanned is available as a single contiguous block, whether it is distributed amongst several blocks in memory at the same time, or whether it is to be scanned as a sequence of blocks in a stream. @@ -45,7 +45,7 @@ to be scanned as a sequence of blocks in a stream. Matches are delivered to the application via a user-supplied callback function that is called synchronously for each match. -For a given database, Hyperscan provides several guarantees: +For a given database, Vectorscan provides several guarantees: * No memory allocations occur at runtime with the exception of two fixed-size allocations, both of which should be done ahead of time for @@ -56,7 +56,7 @@ For a given database, Hyperscan provides several guarantees: call. - **Stream state**: in streaming mode only, some state space is required to store data that persists between scan calls for each stream. This allows - Hyperscan to track matches that span multiple blocks of data. + Vectorscan to track matches that span multiple blocks of data. * The sizes of the scratch space and stream state (in streaming mode) required for a given database are fixed and determined at database compile time. This @@ -64,7 +64,7 @@ For a given database, Hyperscan provides several guarantees: time, and these structures can be pre-allocated if required for performance reasons. -* Any pattern that has successfully been compiled by the Hyperscan compiler can +* Any pattern that has successfully been compiled by the Vectorscan compiler can be scanned against any input. There are no internal resource limits or other limitations at runtime that could cause a scan call to return an error. @@ -74,12 +74,12 @@ See :ref:`runtime` for more detail. Tools ***** -Some utilities for testing and benchmarking Hyperscan are included with the +Some utilities for testing and benchmarking Vectorscan are included with the library. See :ref:`tools` for more information. ************ Example Code ************ -Some simple example code demonstrating the use of the Hyperscan API is -available in the ``examples/`` subdirectory of the Hyperscan distribution. +Some simple example code demonstrating the use of the Vectorscan API is +available in the ``examples/`` subdirectory of the Vectorscan distribution. diff --git a/doc/dev-reference/performance.rst b/doc/dev-reference/performance.rst index 23781bd6..12074ea3 100644 --- a/doc/dev-reference/performance.rst +++ b/doc/dev-reference/performance.rst @@ -4,7 +4,7 @@ Performance Considerations ########################## -Hyperscan supports a wide range of patterns in all three scanning modes. It is +Vectorscan supports a wide range of patterns in all three scanning modes. It is capable of extremely high levels of performance, but certain patterns can reduce performance markedly. @@ -25,7 +25,7 @@ For example, caseless matching of :regexp:`/abc/` can be written as: * :regexp:`/(?i)abc(?-i)/` * :regexp:`/abc/i` -Hyperscan is capable of handling all these constructs. Unless there is a +Vectorscan is capable of handling all these constructs. Unless there is a specific reason otherwise, do not rewrite patterns from one form to another. As another example, matching of :regexp:`/foo(bar|baz)(frotz)?/` can be @@ -41,24 +41,24 @@ Library usage .. tip:: Do not hand-optimize library usage. -The Hyperscan library is capable of dealing with small writes, unusually large +The Vectorscan library is capable of dealing with small writes, unusually large and small pattern sets, etc. Unless there is a specific performance problem -with some usage of the library, it is best to use Hyperscan in a simple and +with some usage of the library, it is best to use Vectorscan in a simple and direct fashion. For example, it is unlikely for there to be much benefit in buffering input to the library into larger blocks unless streaming writes are tiny (say, 1-2 bytes at a time). -Unlike many other pattern matching products, Hyperscan will run faster with +Unlike many other pattern matching products, Vectorscan will run faster with small numbers of patterns and slower with large numbers of patterns in a smooth fashion (as opposed to, typically, running at a moderate speed up to some fixed limit then either breaking or running half as fast). -Hyperscan also provides high-throughput matching with a single thread of -control per core; if a database runs at 3.0 Gbps in Hyperscan it means that a +Vectorscan also provides high-throughput matching with a single thread of +control per core; if a database runs at 3.0 Gbps in Vectorscan it means that a 3000-bit block of data will be scanned in 1 microsecond in a single thread of control, not that it is required to scan 22 3000-bit blocks of data in 22 microseconds. Thus, it is not usually necessary to buffer data to supply -Hyperscan with available parallelism. +Vectorscan with available parallelism. ******************** Block-based matching @@ -72,7 +72,7 @@ accumulated before processing, it should be scanned in block rather than in streaming mode. Unnecessary use of streaming mode reduces the number of optimizations that can -be applied in Hyperscan and may make some patterns run slower. +be applied in Vectorscan and may make some patterns run slower. If there is a mixture of 'block' and 'streaming' mode patterns, these should be scanned in separate databases except in the case that the streaming patterns @@ -107,7 +107,7 @@ Allocate scratch ahead of time Scratch allocation is not necessarily a cheap operation. Since it is the first time (after compilation or deserialization) that a pattern database is used, -Hyperscan performs some validation checks inside :c:func:`hs_alloc_scratch` and +Vectorscan performs some validation checks inside :c:func:`hs_alloc_scratch` and must also allocate memory. Therefore, it is important to ensure that :c:func:`hs_alloc_scratch` is not @@ -329,7 +329,7 @@ Consequently, :regexp:`/foo.*bar/L` with a check on start of match values after the callback is considerably more expensive and general than :regexp:`/foo.{300}bar/`. -Similarly, the :c:member:`hs_expr_ext::min_length` extended parameter can be +Similarly, the :cpp:member:`hs_expr_ext::min_length` extended parameter can be used to specify a lower bound on the length of the matches for a pattern. Using this facility may be more lightweight in some circumstances than using the SOM flag and post-confirming match length in the calling application. diff --git a/doc/dev-reference/preface.rst b/doc/dev-reference/preface.rst index 68373b7f..5739690f 100644 --- a/doc/dev-reference/preface.rst +++ b/doc/dev-reference/preface.rst @@ -6,35 +6,35 @@ Preface Overview ******** -Hyperscan is a regular expression engine designed to offer high performance, the +Vectorscan is a regular expression engine designed to offer high performance, the ability to match multiple expressions simultaneously and flexibility in scanning operation. Patterns are provided to a compilation interface which generates an immutable pattern database. The scan interface then can be used to scan a target data buffer for the given patterns, returning any matching results from that data -buffer. Hyperscan also provides a streaming mode, in which matches that span +buffer. Vectorscan also provides a streaming mode, in which matches that span several blocks in a stream are detected. -This document is designed to facilitate code-level integration of the Hyperscan +This document is designed to facilitate code-level integration of the Vectorscan library with existing or new applications. -:ref:`intro` is a short overview of the Hyperscan library, with more detail on -the Hyperscan API provided in the subsequent sections: :ref:`compilation` and +:ref:`intro` is a short overview of the Vectorscan library, with more detail on +the Vectorscan API provided in the subsequent sections: :ref:`compilation` and :ref:`runtime`. :ref:`perf` provides details on various factors which may impact the -performance of a Hyperscan integration. +performance of a Vectorscan integration. :ref:`api_constants` and :ref:`api_files` provides a detailed summary of the -Hyperscan Application Programming Interface (API). +Vectorscan Application Programming Interface (API). ******** Audience ******** -This guide is aimed at developers interested in integrating Hyperscan into an -application. For information on building the Hyperscan library, see the Quick +This guide is aimed at developers interested in integrating Vectorscan into an +application. For information on building the Vectorscan library, see the Quick Start Guide. *********** diff --git a/doc/dev-reference/runtime.rst b/doc/dev-reference/runtime.rst index 396521c9..249fd235 100644 --- a/doc/dev-reference/runtime.rst +++ b/doc/dev-reference/runtime.rst @@ -4,7 +4,7 @@ Scanning for Patterns ##################### -Hyperscan provides three different scanning modes, each with its own scan +Vectorscan provides three different scanning modes, each with its own scan function beginning with ``hs_scan``. In addition, streaming mode has a number of other API functions for managing stream state. @@ -33,8 +33,8 @@ See :c:type:`match_event_handler` for more information. Streaming Mode ************** -The core of the Hyperscan streaming runtime API consists of functions to open, -scan, and close Hyperscan data streams: +The core of the Vectorscan streaming runtime API consists of functions to open, +scan, and close Vectorscan data streams: * :c:func:`hs_open_stream`: allocates and initializes a new stream for scanning. @@ -57,14 +57,14 @@ will return immediately with :c:member:`HS_SCAN_TERMINATED`. The caller must still call :c:func:`hs_close_stream` to complete the clean-up process for that stream. -Streams exist in the Hyperscan library so that pattern matching state can be +Streams exist in the Vectorscan library so that pattern matching state can be maintained across multiple blocks of target data -- without maintaining this state, it would not be possible to detect patterns that span these blocks of data. This, however, does come at the cost of requiring an amount of storage per-stream (the size of this storage is fixed at compile time), and a slight performance penalty in some cases to manage the state. -While Hyperscan does always support a strict ordering of multiple matches, +While Vectorscan does always support a strict ordering of multiple matches, streaming matches will not be delivered at offsets before the current stream write, with the exception of zero-width asserts, where constructs such as :regexp:`\\b` and :regexp:`$` can cause a match on the final character of a @@ -76,7 +76,7 @@ Stream Management ================= In addition to :c:func:`hs_open_stream`, :c:func:`hs_scan_stream`, and -:c:func:`hs_close_stream`, the Hyperscan API provides a number of other +:c:func:`hs_close_stream`, the Vectorscan API provides a number of other functions for the management of streams: * :c:func:`hs_reset_stream`: resets a stream to its initial state; this is @@ -98,10 +98,10 @@ A stream object is allocated as a fixed size region of memory which has been sized to ensure that no memory allocations are required during scan operations. When the system is under memory pressure, it may be useful to reduce the memory consumed by streams that are not expected to be used soon. The -Hyperscan API provides calls for translating a stream to and from a compressed +Vectorscan API provides calls for translating a stream to and from a compressed representation for this purpose. The compressed representation differs from the full stream object as it does not reserve space for components which are not -required given the current stream state. The Hyperscan API functions for this +required given the current stream state. The Vectorscan API functions for this functionality are: * :c:func:`hs_compress_stream`: fills the provided buffer with a compressed @@ -157,7 +157,7 @@ scanned in block mode. Scratch Space ************* -While scanning data, Hyperscan needs a small amount of temporary memory to store +While scanning data, Vectorscan needs a small amount of temporary memory to store on-the-fly internal data. This amount is unfortunately too large to fit on the stack, particularly for embedded applications, and allocating memory dynamically is too expensive, so a pre-allocated "scratch" space must be provided to the @@ -170,7 +170,7 @@ databases, only a single scratch region is necessary: in this case, calling will ensure that the scratch space is large enough to support scanning against any of the given databases. -While the Hyperscan library is re-entrant, the use of scratch spaces is not. +While the Vectorscan library is re-entrant, the use of scratch spaces is not. For example, if by design it is deemed necessary to run recursive or nested scanning (say, from the match callback function), then an additional scratch space is required for that context. @@ -219,11 +219,11 @@ For example: Custom Allocators ***************** -By default, structures used by Hyperscan at runtime (scratch space, stream +By default, structures used by Vectorscan at runtime (scratch space, stream state, etc) are allocated with the default system allocators, usually ``malloc()`` and ``free()``. -The Hyperscan API provides a facility for changing this behaviour to support +The Vectorscan API provides a facility for changing this behaviour to support applications that use custom memory allocators. These functions are: diff --git a/doc/dev-reference/serialization.rst b/doc/dev-reference/serialization.rst index 4f884c75..5950e607 100644 --- a/doc/dev-reference/serialization.rst +++ b/doc/dev-reference/serialization.rst @@ -4,7 +4,7 @@ Serialization ############# -For some applications, compiling Hyperscan pattern databases immediately prior +For some applications, compiling Vectorscan pattern databases immediately prior to use is not an appropriate design. Some users may wish to: * Compile pattern databases on a different host; @@ -14,9 +14,9 @@ to use is not an appropriate design. Some users may wish to: * Control the region of memory in which the compiled database is located. -Hyperscan pattern databases are not completely flat in memory: they contain +Vectorscan pattern databases are not completely flat in memory: they contain pointers and have specific alignment requirements. Therefore, they cannot be -copied (or otherwise relocated) directly. To enable these use cases, Hyperscan +copied (or otherwise relocated) directly. To enable these use cases, Vectorscan provides functionality for serializing and deserializing compiled pattern databases. @@ -40,10 +40,10 @@ The API provides the following functions: returns a string containing information about the database. This call is analogous to :c:func:`hs_database_info`. -.. note:: Hyperscan performs both version and platform compatibility checks +.. note:: Vectorscan performs both version and platform compatibility checks upon deserialization. The :c:func:`hs_deserialize_database` and :c:func:`hs_deserialize_database_at` functions will only permit the - deserialization of databases compiled with (a) the same version of Hyperscan + deserialization of databases compiled with (a) the same version of Vectorscan and (b) platform features supported by the current host platform. See :ref:`instr_specialization` for more information on platform specialization. @@ -51,17 +51,17 @@ The API provides the following functions: The Runtime Library =================== -The main Hyperscan library (``libhs``) contains both the compiler and runtime -portions of the library. This means that in order to support the Hyperscan +The main Vectorscan library (``libhs``) contains both the compiler and runtime +portions of the library. This means that in order to support the Vectorscan compiler, which is written in C++, it requires C++ linkage and has a dependency on the C++ standard library. Many embedded applications require only the scanning ("runtime") portion of the -Hyperscan library. In these cases, pattern compilation generally takes place on +Vectorscan library. In these cases, pattern compilation generally takes place on another host, and serialized pattern databases are delivered to the application for use. To support these applications without requiring the C++ dependency, a -runtime-only version of the Hyperscan library, called ``libhs_runtime``, is also +runtime-only version of the Vectorscan library, called ``libhs_runtime``, is also distributed. This library does not depend on the C++ standard library and -provides all Hyperscan functions other that those used to compile databases. +provides all Vectorscan functions other that those used to compile databases. diff --git a/doc/dev-reference/tools.rst b/doc/dev-reference/tools.rst index e0465fc6..f6d51515 100644 --- a/doc/dev-reference/tools.rst +++ b/doc/dev-reference/tools.rst @@ -4,14 +4,14 @@ Tools ##### -This section describes the set of utilities included with the Hyperscan library. +This section describes the set of utilities included with the Vectorscan library. ******************** Quick Check: hscheck ******************** -The ``hscheck`` tool allows the user to quickly check whether Hyperscan supports -a group of patterns. If a pattern is rejected by Hyperscan's compiler, the +The ``hscheck`` tool allows the user to quickly check whether Vectorscan supports +a group of patterns. If a pattern is rejected by Vectorscan's compiler, the compile error is provided on standard output. For example, given the following three patterns (the last of which contains a @@ -34,7 +34,7 @@ syntax error) in a file called ``/tmp/test``:: Benchmarker: hsbench ******************** -The ``hsbench`` tool provides an easy way to measure Hyperscan's performance +The ``hsbench`` tool provides an easy way to measure Vectorscan's performance for a particular set of patterns and corpus of data to be scanned. Patterns are supplied in the format described below in @@ -44,7 +44,7 @@ easy control of how a corpus is broken into blocks and streams. .. note:: A group of Python scripts for constructing corpora databases from various input types, such as PCAP network traffic captures or text files, can - be found in the Hyperscan source tree in ``tools/hsbench/scripts``. + be found in the Vectorscan source tree in ``tools/hsbench/scripts``. Running hsbench =============== @@ -56,7 +56,7 @@ produce output like this:: $ hsbench -e /tmp/patterns -c /tmp/corpus.db Signatures: /tmp/patterns - Hyperscan info: Version: 4.3.1 Features: AVX2 Mode: STREAM + Vectorscan info: Version: 5.4.11 Features: AVX2 Mode: STREAM Expression count: 200 Bytecode size: 342,540 bytes Database CRC: 0x6cd6b67c @@ -77,7 +77,7 @@ takes to perform all twenty scans. The number of repeats can be changed with the ``-n`` argument, and the results of each scan will be displayed if the ``--per-scan`` argument is specified. -To benchmark Hyperscan on more than one core, you can supply a list of cores +To benchmark Vectorscan on more than one core, you can supply a list of cores with the ``-T`` argument, which will instruct ``hsbench`` to start one benchmark thread per core given and compute the throughput from the time taken to complete all of them. @@ -91,17 +91,17 @@ Correctness Testing: hscollider ******************************* The ``hscollider`` tool, or Pattern Collider, provides a way to verify -Hyperscan's matching behaviour. It does this by compiling and scanning patterns +Vectorscan's matching behaviour. It does this by compiling and scanning patterns (either singly or in groups) against known corpora and comparing the results against another engine (the "ground truth"). Two sources of ground truth for comparison are available: * The PCRE library (http://pcre.org/). - * An NFA simulation run on Hyperscan's compile-time graph representation. This + * An NFA simulation run on Vectorscan's compile-time graph representation. This is used if PCRE cannot support the pattern or if PCRE execution fails due to a resource limit. -Much of Hyperscan's testing infrastructure is built on ``hscollider``, and the +Much of Vectorscan's testing infrastructure is built on ``hscollider``, and the tool is designed to take advantage of multiple cores and provide considerable flexibility in controlling the test. These options are described in the help (``hscollider -h``) and include: @@ -116,11 +116,11 @@ flexibility in controlling the test. These options are described in the help Using hscollider to debug a pattern =================================== -One common use-case for ``hscollider`` is to determine whether Hyperscan will +One common use-case for ``hscollider`` is to determine whether Vectorscan will match a pattern in the expected location, and whether this accords with PCRE's behaviour for the same case. -Here is an example. We put our pattern in a file in Hyperscan's pattern +Here is an example. We put our pattern in a file in Vectorscan's pattern format:: $ cat /tmp/pat @@ -172,7 +172,7 @@ individual matches are displayed in the output:: Total elapsed time: 0.00522815 secs. -We can see from this output that both PCRE and Hyperscan find matches ending at +We can see from this output that both PCRE and Vectorscan find matches ending at offset 33 and 45, and so ``hscollider`` considers this test case to have passed. @@ -180,13 +180,13 @@ passed. corpus alignment 0, and ``-T 1`` instructs us to only use one thread.) .. note:: In default operation, PCRE produces only one match for a scan, unlike - Hyperscan's automata semantics. The ``hscollider`` tool uses libpcre's - "callout" functionality to match Hyperscan's semantics. + Vectorscan's automata semantics. The ``hscollider`` tool uses libpcre's + "callout" functionality to match Vectorscan's semantics. Running a larger scan test ========================== -A set of patterns for testing purposes are distributed with Hyperscan, and these +A set of patterns for testing purposes are distributed with Vectorscan, and these can be tested via ``hscollider`` on an in-tree build. Two CMake targets are provided to do this easily: @@ -202,10 +202,10 @@ Debugging: hsdump ***************** When built in debug mode (using the CMake directive ``CMAKE_BUILD_TYPE`` set to -``Debug``), Hyperscan includes support for dumping information about its +``Debug``), Vectorscan includes support for dumping information about its internals during pattern compilation with the ``hsdump`` tool. -This information is mostly of use to Hyperscan developers familiar with the +This information is mostly of use to Vectorscan developers familiar with the library's internal structure, but can be used to diagnose issues with patterns and provide more information in bug reports. @@ -215,7 +215,7 @@ and provide more information in bug reports. Pattern Format ************** -All of the Hyperscan tools accept patterns in the same format, read from plain +All of the Vectorscan tools accept patterns in the same format, read from plain text files with one pattern per line. Each line looks like this: * ``://`` @@ -227,12 +227,12 @@ For example:: 3:/^.{10,20}hatstand/m The integer ID is the value that will be reported when a match is found by -Hyperscan and must be unique. +Vectorscan and must be unique. The pattern itself is a regular expression in PCRE syntax; see :ref:`compilation` for more information on supported features. -The flags are single characters that map to Hyperscan flags as follows: +The flags are single characters that map to Vectorscan flags as follows: ========= ================================= =========== Character API Flag Description @@ -256,7 +256,7 @@ between braces, separated by commas. For example:: 1:/hatstand.*teakettle/s{min_offset=50,max_offset=100} -All Hyperscan tools will accept a pattern file (or a directory containing +All Vectorscan tools will accept a pattern file (or a directory containing pattern files) with the ``-e`` argument. If no further arguments constraining the pattern set are given, all patterns in those files are used. From 0c57b6c89490303757aca3ba2d0515f7f8752765 Mon Sep 17 00:00:00 2001 From: Jeremy Linton Date: Tue, 20 Feb 2024 13:48:05 -0600 Subject: [PATCH 05/19] pkgconfig: Correct library description Correct the description in the pkgconfig file, but leave the name alone as we want to remain compatible with projects utilizing hyperscan. Signed-off-by: Jeremy Linton --- libhs.pc.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libhs.pc.in b/libhs.pc.in index 3ad2b90c..d1e3ffb0 100644 --- a/libhs.pc.in +++ b/libhs.pc.in @@ -4,7 +4,7 @@ libdir=@CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_LIBDIR@ includedir=@CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_INCLUDEDIR@ Name: libhs -Description: Intel(R) Hyperscan Library +Description: A portable fork of the high-performance regular expression matching library Version: @HS_VERSION@ Libs: -L${libdir} -lhs Cflags: -I${includedir}/hs From 6bbd4821f0ad090a68d6e3dfffbc3dc9ad5d4da1 Mon Sep 17 00:00:00 2001 From: Jeremy Linton Date: Tue, 20 Feb 2024 15:01:40 -0600 Subject: [PATCH 06/19] hsbench: Update test program output While fixing the documentation, it was noticed that the hsbench output was still referring to the project as Hyperscan. Lets correct it. Signed-off-by: Jeremy Linton --- tools/hsbench/engine_hyperscan.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/hsbench/engine_hyperscan.cpp b/tools/hsbench/engine_hyperscan.cpp index 95461de5..f3de35ef 100644 --- a/tools/hsbench/engine_hyperscan.cpp +++ b/tools/hsbench/engine_hyperscan.cpp @@ -248,7 +248,7 @@ void EngineHyperscan::printStats() const { printf("Signature set: %s\n", compile_stats.sigs_name.c_str()); } printf("Signatures: %s\n", compile_stats.signatures.c_str()); - printf("Hyperscan info: %s\n", compile_stats.db_info.c_str()); + printf("Vectorscan info: %s\n", compile_stats.db_info.c_str()); printf("Expression count: %'zu\n", compile_stats.expressionCount); printf("Bytecode size: %'zu bytes\n", compile_stats.compiledSize); printf("Database CRC: 0x%x\n", compile_stats.crc32); From f9e254ab415a22ae59bab86f235784cfbf2572d4 Mon Sep 17 00:00:00 2001 From: Yoan Picchi Date: Thu, 15 Feb 2024 13:51:19 +0000 Subject: [PATCH 07/19] Enable sheng32/64 for SVE Signed-off-by: Yoan Picchi --- src/nfa/sheng.c | 8 +- src/nfa/sheng.h | 8 +- src/nfa/sheng_defs.h | 70 +++---- src/nfa/sheng_impl.h | 127 ++++++++++++ src/nfa/sheng_impl4.h | 428 +++++++++++++++++++++++++++++++++++++++ src/nfa/shengcompile.cpp | 14 ++ 6 files changed, 612 insertions(+), 43 deletions(-) diff --git a/src/nfa/sheng.c b/src/nfa/sheng.c index 3f36e218..922e8f80 100644 --- a/src/nfa/sheng.c +++ b/src/nfa/sheng.c @@ -154,7 +154,7 @@ char fireReports(const struct sheng *sh, NfaCallback cb, void *ctxt, return MO_CONTINUE_MATCHING; /* continue execution */ } -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) // Sheng32 static really_inline const struct sheng32 *get_sheng32(const struct NFA *n) { @@ -351,7 +351,7 @@ char fireReports64(const struct sheng64 *sh, NfaCallback cb, void *ctxt, } return MO_CONTINUE_MATCHING; /* continue execution */ } -#endif // end of HAVE_AVX512VBMI +#endif // end of HAVE_AVX512VBMI || HAVE_SVE /* include Sheng function definitions */ #include "sheng_defs.h" @@ -871,7 +871,7 @@ char nfaExecSheng_expandState(UNUSED const struct NFA *nfa, void *dest, return 0; } -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) // Sheng32 static really_inline char runSheng32Cb(const struct sheng32 *sh, NfaCallback cb, void *ctxt, @@ -1874,4 +1874,4 @@ char nfaExecSheng64_expandState(UNUSED const struct NFA *nfa, void *dest, *(u8 *)dest = *(const u8 *)src; return 0; } -#endif // end of HAVE_AVX512VBMI +#endif // end of HAVE_AVX512VBMI || HAVE_SVE diff --git a/src/nfa/sheng.h b/src/nfa/sheng.h index 7b90e303..212bd3a4 100644 --- a/src/nfa/sheng.h +++ b/src/nfa/sheng.h @@ -58,7 +58,7 @@ char nfaExecSheng_reportCurrent(const struct NFA *n, struct mq *q); char nfaExecSheng_B(const struct NFA *n, u64a offset, const u8 *buffer, size_t length, NfaCallback cb, void *context); -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #define nfaExecSheng32_B_Reverse NFA_API_NO_IMPL #define nfaExecSheng32_zombie_status NFA_API_ZOMBIE_NO_IMPL @@ -106,8 +106,7 @@ char nfaExecSheng64_reportCurrent(const struct NFA *n, struct mq *q); char nfaExecSheng64_B(const struct NFA *n, u64a offset, const u8 *buffer, size_t length, NfaCallback cb, void *context); - -#else // !HAVE_AVX512VBMI +#else // !HAVE_AVX512VBMI && !HAVE_SVE #define nfaExecSheng32_B_Reverse NFA_API_NO_IMPL #define nfaExecSheng32_zombie_status NFA_API_ZOMBIE_NO_IMPL @@ -138,6 +137,7 @@ char nfaExecSheng64_B(const struct NFA *n, u64a offset, const u8 *buffer, #define nfaExecSheng64_testEOD NFA_API_NO_IMPL #define nfaExecSheng64_reportCurrent NFA_API_NO_IMPL #define nfaExecSheng64_B NFA_API_NO_IMPL -#endif // end of HAVE_AVX512VBMI +#endif // end of HAVE_AVX512VBMI || defined(HAVE_SVE) + #endif /* SHENG_H_ */ diff --git a/src/nfa/sheng_defs.h b/src/nfa/sheng_defs.h index 390af752..886af28e 100644 --- a/src/nfa/sheng_defs.h +++ b/src/nfa/sheng_defs.h @@ -52,7 +52,7 @@ u8 hasInterestingStates(const u8 a, const u8 b, const u8 c, const u8 d) { return (a | b | c | d) & (SHENG_STATE_FLAG_MASK); } -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) static really_inline u8 isDeadState32(const u8 a) { return a & SHENG32_STATE_DEAD; @@ -108,7 +108,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define SHENG_IMPL sheng_cod #define DEAD_FUNC isDeadState #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #define SHENG32_IMPL sheng32_cod #define DEAD_FUNC32 isDeadState32 #define ACCEPT_FUNC32 isAcceptState32 @@ -121,7 +121,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef SHENG_IMPL #undef DEAD_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #undef SHENG32_IMPL #undef DEAD_FUNC32 #undef ACCEPT_FUNC32 @@ -135,7 +135,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define SHENG_IMPL sheng_co #define DEAD_FUNC dummyFunc #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #define SHENG32_IMPL sheng32_co #define DEAD_FUNC32 dummyFunc #define ACCEPT_FUNC32 isAcceptState32 @@ -148,7 +148,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef SHENG_IMPL #undef DEAD_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #undef SHENG32_IMPL #undef DEAD_FUNC32 #undef ACCEPT_FUNC32 @@ -162,7 +162,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define SHENG_IMPL sheng_samd #define DEAD_FUNC isDeadState #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #define SHENG32_IMPL sheng32_samd #define DEAD_FUNC32 isDeadState32 #define ACCEPT_FUNC32 isAcceptState32 @@ -175,7 +175,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef SHENG_IMPL #undef DEAD_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #undef SHENG32_IMPL #undef DEAD_FUNC32 #undef ACCEPT_FUNC32 @@ -189,7 +189,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define SHENG_IMPL sheng_sam #define DEAD_FUNC dummyFunc #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #define SHENG32_IMPL sheng32_sam #define DEAD_FUNC32 dummyFunc #define ACCEPT_FUNC32 isAcceptState32 @@ -202,7 +202,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef SHENG_IMPL #undef DEAD_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #undef SHENG32_IMPL #undef DEAD_FUNC32 #undef ACCEPT_FUNC32 @@ -216,7 +216,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define SHENG_IMPL sheng_nmd #define DEAD_FUNC isDeadState #define ACCEPT_FUNC dummyFunc -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #define SHENG32_IMPL sheng32_nmd #define DEAD_FUNC32 isDeadState32 #define ACCEPT_FUNC32 dummyFunc @@ -229,7 +229,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef SHENG_IMPL #undef DEAD_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #undef SHENG32_IMPL #undef DEAD_FUNC32 #undef ACCEPT_FUNC32 @@ -243,7 +243,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define SHENG_IMPL sheng_nm #define DEAD_FUNC dummyFunc #define ACCEPT_FUNC dummyFunc -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #define SHENG32_IMPL sheng32_nm #define DEAD_FUNC32 dummyFunc #define ACCEPT_FUNC32 dummyFunc @@ -256,7 +256,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef SHENG_IMPL #undef DEAD_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #undef SHENG32_IMPL #undef DEAD_FUNC32 #undef ACCEPT_FUNC32 @@ -277,7 +277,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC isAccelState #define OUTER_ACCEL_FUNC dummyFunc #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #define SHENG32_IMPL sheng32_4_coda #define INTERESTING_FUNC32 hasInterestingStates32 #define INNER_DEAD_FUNC32 isDeadState32 @@ -296,7 +296,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 @@ -316,7 +316,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC dummyFunc #define OUTER_ACCEL_FUNC dummyFunc #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #define SHENG32_IMPL sheng32_4_cod #define INTERESTING_FUNC32 hasInterestingStates32 #define INNER_DEAD_FUNC32 isDeadState32 @@ -339,7 +339,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 @@ -363,7 +363,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC isAccelState #define OUTER_ACCEL_FUNC dummyFunc #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #define SHENG32_IMPL sheng32_4_coa #define INTERESTING_FUNC32 hasInterestingStates32 #define INNER_DEAD_FUNC32 dummyFunc @@ -382,7 +382,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 @@ -402,7 +402,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC dummyFunc #define OUTER_ACCEL_FUNC dummyFunc #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #define SHENG32_IMPL sheng32_4_co #define INTERESTING_FUNC32 hasInterestingStates32 #define INNER_DEAD_FUNC32 dummyFunc @@ -425,7 +425,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 @@ -449,7 +449,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC isAccelState #define OUTER_ACCEL_FUNC dummyFunc #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #define SHENG32_IMPL sheng32_4_samda #define INTERESTING_FUNC32 hasInterestingStates32 #define INNER_DEAD_FUNC32 isDeadState32 @@ -468,7 +468,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 @@ -488,7 +488,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC dummyFunc #define OUTER_ACCEL_FUNC dummyFunc #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #define SHENG32_IMPL sheng32_4_samd #define INTERESTING_FUNC32 hasInterestingStates32 #define INNER_DEAD_FUNC32 isDeadState32 @@ -511,7 +511,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 @@ -535,7 +535,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC isAccelState #define OUTER_ACCEL_FUNC dummyFunc #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #define SHENG32_IMPL sheng32_4_sama #define INTERESTING_FUNC32 hasInterestingStates32 #define INNER_DEAD_FUNC32 dummyFunc @@ -554,7 +554,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 @@ -574,7 +574,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC dummyFunc #define OUTER_ACCEL_FUNC dummyFunc #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #define SHENG32_IMPL sheng32_4_sam #define INTERESTING_FUNC32 hasInterestingStates32 #define INNER_DEAD_FUNC32 dummyFunc @@ -597,7 +597,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 @@ -623,7 +623,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC dummyFunc #define OUTER_ACCEL_FUNC isAccelState #define ACCEPT_FUNC dummyFunc -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #define SHENG32_IMPL sheng32_4_nmda #define INTERESTING_FUNC32 dummyFunc4 #define INNER_DEAD_FUNC32 dummyFunc @@ -642,7 +642,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 @@ -662,7 +662,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC dummyFunc #define OUTER_ACCEL_FUNC dummyFunc #define ACCEPT_FUNC dummyFunc -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #define SHENG32_IMPL sheng32_4_nmd #define INTERESTING_FUNC32 dummyFunc4 #define INNER_DEAD_FUNC32 dummyFunc @@ -685,7 +685,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 @@ -712,7 +712,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC dummyFunc #define OUTER_ACCEL_FUNC dummyFunc #define ACCEPT_FUNC dummyFunc -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #define SHENG32_IMPL sheng32_4_nm #define INTERESTING_FUNC32 dummyFunc4 #define INNER_DEAD_FUNC32 dummyFunc @@ -735,7 +735,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) +#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 diff --git a/src/nfa/sheng_impl.h b/src/nfa/sheng_impl.h index 1fa5c831..9634fa65 100644 --- a/src/nfa/sheng_impl.h +++ b/src/nfa/sheng_impl.h @@ -96,6 +96,133 @@ char SHENG_IMPL(u8 *state, NfaCallback cb, void *ctxt, const struct sheng *s, return MO_CONTINUE_MATCHING; } +#if defined(HAVE_SVE) + +static really_inline +char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt, + const struct sheng32 *s, + u8 *const cached_accept_state, + ReportID *const cached_accept_id, + u8 single, u64a base_offset, const u8 *buf, const u8 *start, + const u8 *end, const u8 **scan_end) { + DEBUG_PRINTF("Starting DFA execution in state %u\n", + *state & SHENG32_STATE_MASK); + const u8 *cur_buf = start; + if (DEAD_FUNC32(*state)) { + DEBUG_PRINTF("Dead on arrival\n"); + *scan_end = end; + return MO_CONTINUE_MATCHING; + } + DEBUG_PRINTF("Scanning %lli bytes\n", (s64a)(end - start)); + + const svbool_t lane_pred_32 = svwhilelt_b8(0, 32); + svuint8_t cur_state = svld1(lane_pred_32, state); + const m512 *masks = s->succ_masks; + + while (likely(cur_buf != end)) { + const u8 c = *cur_buf; + svuint8_t succ_mask = svld1(lane_pred_32, (const u8*)(masks + c)); + cur_state = svtbl(cur_state, succ_mask); + const u8 tmp = svlastb(lane_pred_32, cur_state); + + DEBUG_PRINTF("c: %02hhx '%c'\n", c, ourisprint(c) ? c : '?'); + DEBUG_PRINTF("s: %u (flag: %u)\n", tmp & SHENG32_STATE_MASK, + tmp & SHENG32_STATE_FLAG_MASK); + + if (unlikely(ACCEPT_FUNC32(tmp))) { + DEBUG_PRINTF("Accept state %u reached\n", tmp & SHENG32_STATE_MASK); + u64a match_offset = base_offset + (cur_buf - buf) + 1; + DEBUG_PRINTF("Match @ %llu\n", match_offset); + if (STOP_AT_MATCH) { + DEBUG_PRINTF("Stopping at match @ %lli\n", + (u64a)(cur_buf - start)); + *state = tmp; + *scan_end = cur_buf; + return MO_MATCHES_PENDING; + } + if (single) { + if (fireSingleReport(cb, ctxt, s->report, match_offset) == + MO_HALT_MATCHING) { + return MO_HALT_MATCHING; + } + } else { + if (fireReports32(s, cb, ctxt, tmp, match_offset, + cached_accept_state, cached_accept_id, + 0) == MO_HALT_MATCHING) { + return MO_HALT_MATCHING; + } + } + } + cur_buf++; + } + *state = svlastb(lane_pred_32, cur_state); + *scan_end = cur_buf; + return MO_CONTINUE_MATCHING; +} + +static really_inline +char SHENG64_IMPL(u8 *state, NfaCallback cb, void *ctxt, + const struct sheng64 *s, + u8 *const cached_accept_state, + ReportID *const cached_accept_id, + u8 single, u64a base_offset, const u8 *buf, const u8 *start, + const u8 *end, const u8 **scan_end) { + DEBUG_PRINTF("Starting DFA execution in state %u\n", + *state & SHENG64_STATE_MASK); + const u8 *cur_buf = start; + if (DEAD_FUNC64(*state)) { + DEBUG_PRINTF("Dead on arrival\n"); + *scan_end = end; + return MO_CONTINUE_MATCHING; + } + DEBUG_PRINTF("Scanning %lli bytes\n", (s64a)(end - start)); + + const svbool_t lane_pred_64 = svwhilelt_b8(0, 64); + svuint8_t cur_state = svld1(lane_pred_64, state); + const m512 *masks = s->succ_masks; + + while (likely(cur_buf != end)) { + const u8 c = *cur_buf; + svuint8_t succ_mask = svld1(lane_pred_64, (const u8*)(masks + c)); + cur_state = svtbl(cur_state, succ_mask); + const u8 tmp = svlastb(lane_pred_64, cur_state); + + DEBUG_PRINTF("c: %02hhx '%c'\n", c, ourisprint(c) ? c : '?'); + DEBUG_PRINTF("s: %u (flag: %u)\n", tmp & SHENG64_STATE_MASK, + tmp & SHENG64_STATE_FLAG_MASK); + + if (unlikely(ACCEPT_FUNC64(tmp))) { + DEBUG_PRINTF("Accept state %u reached\n", tmp & SHENG64_STATE_MASK); + u64a match_offset = base_offset + (cur_buf - buf) + 1; + DEBUG_PRINTF("Match @ %llu\n", match_offset); + if (STOP_AT_MATCH) { + DEBUG_PRINTF("Stopping at match @ %lli\n", + (u64a)(cur_buf - start)); + *state = tmp; + *scan_end = cur_buf; + return MO_MATCHES_PENDING; + } + if (single) { + if (fireSingleReport(cb, ctxt, s->report, match_offset) == + MO_HALT_MATCHING) { + return MO_HALT_MATCHING; + } + } else { + if (fireReports64(s, cb, ctxt, tmp, match_offset, + cached_accept_state, cached_accept_id, + 0) == MO_HALT_MATCHING) { + return MO_HALT_MATCHING; + } + } + } + cur_buf++; + } + *state = svlastb(lane_pred_64, cur_state); + *scan_end = cur_buf; + return MO_CONTINUE_MATCHING; +} +#endif + #if defined(HAVE_AVX512VBMI) static really_inline char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt, diff --git a/src/nfa/sheng_impl4.h b/src/nfa/sheng_impl4.h index e5d3468f..10ad4ea0 100644 --- a/src/nfa/sheng_impl4.h +++ b/src/nfa/sheng_impl4.h @@ -283,6 +283,434 @@ char SHENG_IMPL(u8 *state, NfaCallback cb, void *ctxt, const struct sheng *s, return MO_CONTINUE_MATCHING; } +#if defined(HAVE_SVE) +static really_inline +char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt, + const struct sheng32 *s, + u8 *const cached_accept_state, + ReportID *const cached_accept_id, + u8 single, u64a base_offset, const u8 *buf, const u8 *start, + const u8 *end, const u8 **scan_end) { + DEBUG_PRINTF("Starting DFAx4 execution in state %u\n", + *state & SHENG32_STATE_MASK); + const u8 *cur_buf = start; + const u8 *min_accel_dist = start; + base_offset++; + DEBUG_PRINTF("Scanning %llu bytes\n", (u64a)(end - start)); + + if (INNER_ACCEL_FUNC32(*state) || OUTER_ACCEL_FUNC32(*state)) { + DEBUG_PRINTF("Accel state reached @ 0\n"); + const union AccelAux *aaux = + get_accel32(s, *state & SHENG32_STATE_MASK); + const u8 *new_offset = run_accel(aaux, cur_buf, end); + if (new_offset < cur_buf + BAD_ACCEL_DIST) { + min_accel_dist = new_offset + BIG_ACCEL_PENALTY; + } else { + min_accel_dist = new_offset + SMALL_ACCEL_PENALTY; + } + DEBUG_PRINTF("Next accel chance: %llu\n", + (u64a)(min_accel_dist - start)); + DEBUG_PRINTF("Accel scanned %zu bytes\n", new_offset - cur_buf); + cur_buf = new_offset; + DEBUG_PRINTF("New offset: %lli\n", (s64a)(cur_buf - start)); + } + if (INNER_DEAD_FUNC32(*state) || OUTER_DEAD_FUNC32(*state)) { + DEBUG_PRINTF("Dead on arrival\n"); + *scan_end = end; + return MO_CONTINUE_MATCHING; + } + + const svbool_t lane_pred_32 = svwhilelt_b8(0, 32); + svuint8_t cur_state = svld1(lane_pred_32, state); + const m512 *masks = s->succ_masks; + + while (likely(end - cur_buf >= 4)) { + const u8 *b1 = cur_buf; + const u8 *b2 = cur_buf + 1; + const u8 *b3 = cur_buf + 2; + const u8 *b4 = cur_buf + 3; + const u8 c1 = *b1; + const u8 c2 = *b2; + const u8 c3 = *b3; + const u8 c4 = *b4; + svuint8_t succ_mask1 = svld1(lane_pred_32, (const u8*)(masks+c1)); + cur_state = svtbl(cur_state, succ_mask1); + const u8 a1 = svlastb(lane_pred_32, cur_state); + + svuint8_t succ_mask2 = svld1(lane_pred_32, (const u8*)(masks+c2)); + cur_state = svtbl(cur_state, succ_mask2); + const u8 a2 = svlastb(lane_pred_32, cur_state); + + svuint8_t succ_mask3 = svld1(lane_pred_32, (const u8*)(masks+c3)); + cur_state = svtbl(cur_state, succ_mask3); + const u8 a3 = svlastb(lane_pred_32, cur_state); + + svuint8_t succ_mask4 = svld1(lane_pred_32, (const u8*)(masks+c4)); + cur_state = svtbl(cur_state, succ_mask4); + const u8 a4 = svlastb(lane_pred_32, cur_state); + + DEBUG_PRINTF("c: %02hhx '%c'\n", c1, ourisprint(c1) ? c1 : '?'); + DEBUG_PRINTF("s: %u (flag: %u)\n", a1 & SHENG32_STATE_MASK, + a1 & SHENG32_STATE_FLAG_MASK); + + DEBUG_PRINTF("c: %02hhx '%c'\n", c2, ourisprint(c2) ? c2 : '?'); + DEBUG_PRINTF("s: %u (flag: %u)\n", a2 & SHENG32_STATE_MASK, + a2 & SHENG32_STATE_FLAG_MASK); + + DEBUG_PRINTF("c: %02hhx '%c'\n", c3, ourisprint(c3) ? c3 : '?'); + DEBUG_PRINTF("s: %u (flag: %u)\n", a3 & SHENG32_STATE_MASK, + a3 & SHENG32_STATE_FLAG_MASK); + + DEBUG_PRINTF("c: %02hhx '%c'\n", c4, ourisprint(c4) ? c4 : '?'); + DEBUG_PRINTF("s: %u (flag: %u)\n", a4 & SHENG32_STATE_MASK, + a4 & SHENG32_STATE_FLAG_MASK); + + if (unlikely(INTERESTING_FUNC32(a1, a2, a3, a4))) { + if (ACCEPT_FUNC32(a1)) { + u64a match_offset = base_offset + b1 - buf; + DEBUG_PRINTF("Accept state %u reached\n", + a1 & SHENG32_STATE_MASK); + DEBUG_PRINTF("Match @ %llu\n", match_offset); + if (STOP_AT_MATCH) { + DEBUG_PRINTF("Stopping at match @ %lli\n", + (s64a)(b1 - start)); + *scan_end = b1; + *state = a1; + return MO_MATCHES_PENDING; + } + if (single) { + if (fireSingleReport(cb, ctxt, s->report, match_offset) == + MO_HALT_MATCHING) { + return MO_HALT_MATCHING; + } + } else { + if (fireReports32(s, cb, ctxt, a1, match_offset, + cached_accept_state, cached_accept_id, + 0) == MO_HALT_MATCHING) { + return MO_HALT_MATCHING; + } + } + } + if (ACCEPT_FUNC32(a2)) { + u64a match_offset = base_offset + b2 - buf; + DEBUG_PRINTF("Accept state %u reached\n", + a2 & SHENG32_STATE_MASK); + DEBUG_PRINTF("Match @ %llu\n", match_offset); + if (STOP_AT_MATCH) { + DEBUG_PRINTF("Stopping at match @ %lli\n", + (s64a)(b2 - start)); + *scan_end = b2; + *state = a2; + return MO_MATCHES_PENDING; + } + if (single) { + if (fireSingleReport(cb, ctxt, s->report, match_offset) == + MO_HALT_MATCHING) { + return MO_HALT_MATCHING; + } + } else { + if (fireReports32(s, cb, ctxt, a2, match_offset, + cached_accept_state, cached_accept_id, + 0) == MO_HALT_MATCHING) { + return MO_HALT_MATCHING; + } + } + } + if (ACCEPT_FUNC32(a3)) { + u64a match_offset = base_offset + b3 - buf; + DEBUG_PRINTF("Accept state %u reached\n", + a3 & SHENG32_STATE_MASK); + DEBUG_PRINTF("Match @ %llu\n", match_offset); + if (STOP_AT_MATCH) { + DEBUG_PRINTF("Stopping at match @ %lli\n", + (s64a)(b3 - start)); + *scan_end = b3; + *state = a3; + return MO_MATCHES_PENDING; + } + if (single) { + if (fireSingleReport(cb, ctxt, s->report, match_offset) == + MO_HALT_MATCHING) { + return MO_HALT_MATCHING; + } + } else { + if (fireReports32(s, cb, ctxt, a3, match_offset, + cached_accept_state, cached_accept_id, + 0) == MO_HALT_MATCHING) { + return MO_HALT_MATCHING; + } + } + } + if (ACCEPT_FUNC32(a4)) { + u64a match_offset = base_offset + b4 - buf; + DEBUG_PRINTF("Accept state %u reached\n", + a4 & SHENG32_STATE_MASK); + DEBUG_PRINTF("Match @ %llu\n", match_offset); + if (STOP_AT_MATCH) { + DEBUG_PRINTF("Stopping at match @ %lli\n", + (s64a)(b4 - start)); + *scan_end = b4; + *state = a4; + return MO_MATCHES_PENDING; + } + if (single) { + if (fireSingleReport(cb, ctxt, s->report, match_offset) == + MO_HALT_MATCHING) { + return MO_HALT_MATCHING; + } + } else { + if (fireReports32(s, cb, ctxt, a4, match_offset, + cached_accept_state, cached_accept_id, + 0) == MO_HALT_MATCHING) { + return MO_HALT_MATCHING; + } + } + } + if (INNER_DEAD_FUNC32(a4)) { + DEBUG_PRINTF("Dead state reached @ %lli\n", (s64a)(b4 - buf)); + *scan_end = end; + *state = a4; + return MO_CONTINUE_MATCHING; + } + if (cur_buf > min_accel_dist && INNER_ACCEL_FUNC32(a4)) { + DEBUG_PRINTF("Accel state reached @ %lli\n", (s64a)(b4 - buf)); + const union AccelAux *aaux = + get_accel32(s, a4 & SHENG32_STATE_MASK); + const u8 *new_offset = run_accel(aaux, cur_buf + 4, end); + if (new_offset < cur_buf + 4 + BAD_ACCEL_DIST) { + min_accel_dist = new_offset + BIG_ACCEL_PENALTY; + } else { + min_accel_dist = new_offset + SMALL_ACCEL_PENALTY; + } + DEBUG_PRINTF("Next accel chance: %llu\n", + (u64a)(min_accel_dist - start)); + DEBUG_PRINTF("Accel scanned %llu bytes\n", + (u64a)(new_offset - cur_buf - 4)); + cur_buf = new_offset; + DEBUG_PRINTF("New offset: %llu\n", (u64a)(cur_buf - buf)); + continue; + } + } + if (OUTER_DEAD_FUNC32(a4)) { + DEBUG_PRINTF("Dead state reached @ %lli\n", (s64a)(cur_buf - buf)); + *scan_end = end; + *state = a4; + return MO_CONTINUE_MATCHING; + }; + if (cur_buf > min_accel_dist && OUTER_ACCEL_FUNC32(a4)) { + DEBUG_PRINTF("Accel state reached @ %lli\n", (s64a)(b4 - buf)); + const union AccelAux *aaux = + get_accel32(s, a4 & SHENG32_STATE_MASK); + const u8 *new_offset = run_accel(aaux, cur_buf + 4, end); + if (new_offset < cur_buf + 4 + BAD_ACCEL_DIST) { + min_accel_dist = new_offset + BIG_ACCEL_PENALTY; + } else { + min_accel_dist = new_offset + SMALL_ACCEL_PENALTY; + } + DEBUG_PRINTF("Next accel chance: %llu\n", + (u64a)(min_accel_dist - start)); + DEBUG_PRINTF("Accel scanned %llu bytes\n", + (u64a)(new_offset - cur_buf - 4)); + cur_buf = new_offset; + DEBUG_PRINTF("New offset: %llu\n", (u64a)(cur_buf - buf)); + continue; + }; + cur_buf += 4; + } + *state = svlastb(lane_pred_32, cur_state); + *scan_end = cur_buf; + return MO_CONTINUE_MATCHING; +} + +#if !defined(NO_SHENG64_IMPL) +static really_inline +char SHENG64_IMPL(u8 *state, NfaCallback cb, void *ctxt, + const struct sheng64 *s, + u8 *const cached_accept_state, + ReportID *const cached_accept_id, + u8 single, u64a base_offset, const u8 *buf, const u8 *start, + const u8 *end, const u8 **scan_end) { + DEBUG_PRINTF("Starting DFAx4 execution in state %u\n", + *state & SHENG64_STATE_MASK); + const u8 *cur_buf = start; + base_offset++; + DEBUG_PRINTF("Scanning %llu bytes\n", (u64a)(end - start)); + + if (INNER_DEAD_FUNC64(*state) || OUTER_DEAD_FUNC64(*state)) { + DEBUG_PRINTF("Dead on arrival\n"); + *scan_end = end; + return MO_CONTINUE_MATCHING; + } + + const svbool_t lane_pred_64 = svwhilelt_b8(0, 64); + svuint8_t cur_state = svld1(lane_pred_64, state); + const m512 *masks = s->succ_masks; + + while (likely(end - cur_buf >= 4)) { + const u8 *b1 = cur_buf; + const u8 *b2 = cur_buf + 1; + const u8 *b3 = cur_buf + 2; + const u8 *b4 = cur_buf + 3; + const u8 c1 = *b1; + const u8 c2 = *b2; + const u8 c3 = *b3; + const u8 c4 = *b4; + + svuint8_t succ_mask1 = svld1(lane_pred_64, (const u8*)(masks+c1)); + cur_state = svtbl(cur_state, succ_mask1); + const u8 a1 = svlastb(lane_pred_64, cur_state); + + svuint8_t succ_mask2 = svld1(lane_pred_64, (const u8*)(masks+c2)); + cur_state = svtbl(cur_state, succ_mask2); + const u8 a2 = svlastb(lane_pred_64, cur_state); + + svuint8_t succ_mask3 = svld1(lane_pred_64, (const u8*)(masks+c3)); + cur_state = svtbl(cur_state, succ_mask3); + const u8 a3 = svlastb(lane_pred_64, cur_state); + + svuint8_t succ_mask4 = svld1(lane_pred_64, (const u8*)(masks+c4)); + cur_state = svtbl(cur_state, succ_mask4); + const u8 a4 = svlastb(lane_pred_64, cur_state); + + DEBUG_PRINTF("c: %02hhx '%c'\n", c1, ourisprint(c1) ? c1 : '?'); + DEBUG_PRINTF("s: %u (flag: %u)\n", a1 & SHENG64_STATE_MASK, + a1 & SHENG64_STATE_FLAG_MASK); + + DEBUG_PRINTF("c: %02hhx '%c'\n", c2, ourisprint(c2) ? c2 : '?'); + DEBUG_PRINTF("s: %u (flag: %u)\n", a2 & SHENG64_STATE_MASK, + a2 & SHENG64_STATE_FLAG_MASK); + + DEBUG_PRINTF("c: %02hhx '%c'\n", c3, ourisprint(c3) ? c3 : '?'); + DEBUG_PRINTF("s: %u (flag: %u)\n", a3 & SHENG64_STATE_MASK, + a3 & SHENG64_STATE_FLAG_MASK); + + DEBUG_PRINTF("c: %02hhx '%c'\n", c4, ourisprint(c4) ? c4 : '?'); + DEBUG_PRINTF("s: %u (flag: %u)\n", a4 & SHENG64_STATE_MASK, + a4 & SHENG64_STATE_FLAG_MASK); + + if (unlikely(INTERESTING_FUNC64(a1, a2, a3, a4))) { + if (ACCEPT_FUNC64(a1)) { + u64a match_offset = base_offset + b1 - buf; + DEBUG_PRINTF("Accept state %u reached\n", + a1 & SHENG64_STATE_MASK); + DEBUG_PRINTF("Match @ %llu\n", match_offset); + if (STOP_AT_MATCH) { + DEBUG_PRINTF("Stopping at match @ %lli\n", + (s64a)(b1 - start)); + *scan_end = b1; + *state = a1; + return MO_MATCHES_PENDING; + } + if (single) { + if (fireSingleReport(cb, ctxt, s->report, match_offset) == + MO_HALT_MATCHING) { + return MO_HALT_MATCHING; + } + } else { + if (fireReports64(s, cb, ctxt, a1, match_offset, + cached_accept_state, cached_accept_id, + 0) == MO_HALT_MATCHING) { + return MO_HALT_MATCHING; + } + } + } + if (ACCEPT_FUNC64(a2)) { + u64a match_offset = base_offset + b2 - buf; + DEBUG_PRINTF("Accept state %u reached\n", + a2 & SHENG64_STATE_MASK); + DEBUG_PRINTF("Match @ %llu\n", match_offset); + if (STOP_AT_MATCH) { + DEBUG_PRINTF("Stopping at match @ %lli\n", + (s64a)(b2 - start)); + *scan_end = b2; + *state = a2; + return MO_MATCHES_PENDING; + } + if (single) { + if (fireSingleReport(cb, ctxt, s->report, match_offset) == + MO_HALT_MATCHING) { + return MO_HALT_MATCHING; + } + } else { + if (fireReports64(s, cb, ctxt, a2, match_offset, + cached_accept_state, cached_accept_id, + 0) == MO_HALT_MATCHING) { + return MO_HALT_MATCHING; + } + } + } + if (ACCEPT_FUNC64(a3)) { + u64a match_offset = base_offset + b3 - buf; + DEBUG_PRINTF("Accept state %u reached\n", + a3 & SHENG64_STATE_MASK); + DEBUG_PRINTF("Match @ %llu\n", match_offset); + if (STOP_AT_MATCH) { + DEBUG_PRINTF("Stopping at match @ %lli\n", + (s64a)(b3 - start)); + *scan_end = b3; + *state = a3; + return MO_MATCHES_PENDING; + } + if (single) { + if (fireSingleReport(cb, ctxt, s->report, match_offset) == + MO_HALT_MATCHING) { + return MO_HALT_MATCHING; + } + } else { + if (fireReports64(s, cb, ctxt, a3, match_offset, + cached_accept_state, cached_accept_id, + 0) == MO_HALT_MATCHING) { + return MO_HALT_MATCHING; + } + } + } + if (ACCEPT_FUNC64(a4)) { + u64a match_offset = base_offset + b4 - buf; + DEBUG_PRINTF("Accept state %u reached\n", + a4 & SHENG64_STATE_MASK); + DEBUG_PRINTF("Match @ %llu\n", match_offset); + if (STOP_AT_MATCH) { + DEBUG_PRINTF("Stopping at match @ %lli\n", + (s64a)(b4 - start)); + *scan_end = b4; + *state = a4; + return MO_MATCHES_PENDING; + } + if (single) { + if (fireSingleReport(cb, ctxt, s->report, match_offset) == + MO_HALT_MATCHING) { + return MO_HALT_MATCHING; + } + } else { + if (fireReports64(s, cb, ctxt, a4, match_offset, + cached_accept_state, cached_accept_id, + 0) == MO_HALT_MATCHING) { + return MO_HALT_MATCHING; + } + } + } + if (INNER_DEAD_FUNC64(a4)) { + DEBUG_PRINTF("Dead state reached @ %lli\n", (s64a)(b4 - buf)); + *scan_end = end; + *state = a4; + return MO_CONTINUE_MATCHING; + } + } + if (OUTER_DEAD_FUNC64(a4)) { + DEBUG_PRINTF("Dead state reached @ %lli\n", (s64a)(cur_buf - buf)); + *scan_end = end; + *state = a4; + return MO_CONTINUE_MATCHING; + } + cur_buf += 4; + } + *state = svlastb(lane_pred_64, cur_state); + *scan_end = cur_buf; + return MO_CONTINUE_MATCHING; +} +#endif +#endif + #if defined(HAVE_AVX512VBMI) static really_inline char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt, diff --git a/src/nfa/shengcompile.cpp b/src/nfa/shengcompile.cpp index 055e1971..0f93e139 100644 --- a/src/nfa/shengcompile.cpp +++ b/src/nfa/shengcompile.cpp @@ -730,10 +730,17 @@ bytecode_ptr sheng32Compile(raw_dfa &raw, const CompileContext &cc, return nullptr; } +#ifdef HAVE_SVE + if (svcntb()<32) { + DEBUG_PRINTF("Sheng32 failed, SVE width is too small!\n"); + return nullptr; + } +#else if (!cc.target_info.has_avx512vbmi()) { DEBUG_PRINTF("Sheng32 failed, no HS_CPU_FEATURES_AVX512VBMI!\n"); return nullptr; } +#endif sheng_build_strat strat(raw, rm, only_accel_init); dfa_info info(strat); @@ -762,10 +769,17 @@ bytecode_ptr sheng64Compile(raw_dfa &raw, const CompileContext &cc, return nullptr; } +#ifdef HAVE_SVE + if (svcntb()<64) { + DEBUG_PRINTF("Sheng64 failed, SVE width is too small!\n"); + return nullptr; + } +#else if (!cc.target_info.has_avx512vbmi()) { DEBUG_PRINTF("Sheng64 failed, no HS_CPU_FEATURES_AVX512VBMI!\n"); return nullptr; } +#endif sheng_build_strat strat(raw, rm, only_accel_init); dfa_info info(strat); From f5412b3509082a3278fd95a3bb0247916d4c0823 Mon Sep 17 00:00:00 2001 From: Konstantinos Margaritis Date: Tue, 19 Mar 2024 11:40:23 +0200 Subject: [PATCH 08/19] Revert "RFC Enable sheng32/64 for SVE" --- src/nfa/sheng.c | 8 +- src/nfa/sheng.h | 8 +- src/nfa/sheng_defs.h | 70 +++---- src/nfa/sheng_impl.h | 127 ------------ src/nfa/sheng_impl4.h | 428 --------------------------------------- src/nfa/shengcompile.cpp | 14 -- 6 files changed, 43 insertions(+), 612 deletions(-) diff --git a/src/nfa/sheng.c b/src/nfa/sheng.c index 922e8f80..3f36e218 100644 --- a/src/nfa/sheng.c +++ b/src/nfa/sheng.c @@ -154,7 +154,7 @@ char fireReports(const struct sheng *sh, NfaCallback cb, void *ctxt, return MO_CONTINUE_MATCHING; /* continue execution */ } -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) // Sheng32 static really_inline const struct sheng32 *get_sheng32(const struct NFA *n) { @@ -351,7 +351,7 @@ char fireReports64(const struct sheng64 *sh, NfaCallback cb, void *ctxt, } return MO_CONTINUE_MATCHING; /* continue execution */ } -#endif // end of HAVE_AVX512VBMI || HAVE_SVE +#endif // end of HAVE_AVX512VBMI /* include Sheng function definitions */ #include "sheng_defs.h" @@ -871,7 +871,7 @@ char nfaExecSheng_expandState(UNUSED const struct NFA *nfa, void *dest, return 0; } -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) // Sheng32 static really_inline char runSheng32Cb(const struct sheng32 *sh, NfaCallback cb, void *ctxt, @@ -1874,4 +1874,4 @@ char nfaExecSheng64_expandState(UNUSED const struct NFA *nfa, void *dest, *(u8 *)dest = *(const u8 *)src; return 0; } -#endif // end of HAVE_AVX512VBMI || HAVE_SVE +#endif // end of HAVE_AVX512VBMI diff --git a/src/nfa/sheng.h b/src/nfa/sheng.h index 212bd3a4..7b90e303 100644 --- a/src/nfa/sheng.h +++ b/src/nfa/sheng.h @@ -58,7 +58,7 @@ char nfaExecSheng_reportCurrent(const struct NFA *n, struct mq *q); char nfaExecSheng_B(const struct NFA *n, u64a offset, const u8 *buffer, size_t length, NfaCallback cb, void *context); -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #define nfaExecSheng32_B_Reverse NFA_API_NO_IMPL #define nfaExecSheng32_zombie_status NFA_API_ZOMBIE_NO_IMPL @@ -106,7 +106,8 @@ char nfaExecSheng64_reportCurrent(const struct NFA *n, struct mq *q); char nfaExecSheng64_B(const struct NFA *n, u64a offset, const u8 *buffer, size_t length, NfaCallback cb, void *context); -#else // !HAVE_AVX512VBMI && !HAVE_SVE + +#else // !HAVE_AVX512VBMI #define nfaExecSheng32_B_Reverse NFA_API_NO_IMPL #define nfaExecSheng32_zombie_status NFA_API_ZOMBIE_NO_IMPL @@ -137,7 +138,6 @@ char nfaExecSheng64_B(const struct NFA *n, u64a offset, const u8 *buffer, #define nfaExecSheng64_testEOD NFA_API_NO_IMPL #define nfaExecSheng64_reportCurrent NFA_API_NO_IMPL #define nfaExecSheng64_B NFA_API_NO_IMPL -#endif // end of HAVE_AVX512VBMI || defined(HAVE_SVE) - +#endif // end of HAVE_AVX512VBMI #endif /* SHENG_H_ */ diff --git a/src/nfa/sheng_defs.h b/src/nfa/sheng_defs.h index 886af28e..390af752 100644 --- a/src/nfa/sheng_defs.h +++ b/src/nfa/sheng_defs.h @@ -52,7 +52,7 @@ u8 hasInterestingStates(const u8 a, const u8 b, const u8 c, const u8 d) { return (a | b | c | d) & (SHENG_STATE_FLAG_MASK); } -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) static really_inline u8 isDeadState32(const u8 a) { return a & SHENG32_STATE_DEAD; @@ -108,7 +108,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define SHENG_IMPL sheng_cod #define DEAD_FUNC isDeadState #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #define SHENG32_IMPL sheng32_cod #define DEAD_FUNC32 isDeadState32 #define ACCEPT_FUNC32 isAcceptState32 @@ -121,7 +121,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef SHENG_IMPL #undef DEAD_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #undef SHENG32_IMPL #undef DEAD_FUNC32 #undef ACCEPT_FUNC32 @@ -135,7 +135,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define SHENG_IMPL sheng_co #define DEAD_FUNC dummyFunc #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #define SHENG32_IMPL sheng32_co #define DEAD_FUNC32 dummyFunc #define ACCEPT_FUNC32 isAcceptState32 @@ -148,7 +148,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef SHENG_IMPL #undef DEAD_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #undef SHENG32_IMPL #undef DEAD_FUNC32 #undef ACCEPT_FUNC32 @@ -162,7 +162,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define SHENG_IMPL sheng_samd #define DEAD_FUNC isDeadState #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #define SHENG32_IMPL sheng32_samd #define DEAD_FUNC32 isDeadState32 #define ACCEPT_FUNC32 isAcceptState32 @@ -175,7 +175,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef SHENG_IMPL #undef DEAD_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #undef SHENG32_IMPL #undef DEAD_FUNC32 #undef ACCEPT_FUNC32 @@ -189,7 +189,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define SHENG_IMPL sheng_sam #define DEAD_FUNC dummyFunc #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #define SHENG32_IMPL sheng32_sam #define DEAD_FUNC32 dummyFunc #define ACCEPT_FUNC32 isAcceptState32 @@ -202,7 +202,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef SHENG_IMPL #undef DEAD_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #undef SHENG32_IMPL #undef DEAD_FUNC32 #undef ACCEPT_FUNC32 @@ -216,7 +216,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define SHENG_IMPL sheng_nmd #define DEAD_FUNC isDeadState #define ACCEPT_FUNC dummyFunc -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #define SHENG32_IMPL sheng32_nmd #define DEAD_FUNC32 isDeadState32 #define ACCEPT_FUNC32 dummyFunc @@ -229,7 +229,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef SHENG_IMPL #undef DEAD_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #undef SHENG32_IMPL #undef DEAD_FUNC32 #undef ACCEPT_FUNC32 @@ -243,7 +243,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define SHENG_IMPL sheng_nm #define DEAD_FUNC dummyFunc #define ACCEPT_FUNC dummyFunc -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #define SHENG32_IMPL sheng32_nm #define DEAD_FUNC32 dummyFunc #define ACCEPT_FUNC32 dummyFunc @@ -256,7 +256,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef SHENG_IMPL #undef DEAD_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #undef SHENG32_IMPL #undef DEAD_FUNC32 #undef ACCEPT_FUNC32 @@ -277,7 +277,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC isAccelState #define OUTER_ACCEL_FUNC dummyFunc #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #define SHENG32_IMPL sheng32_4_coda #define INTERESTING_FUNC32 hasInterestingStates32 #define INNER_DEAD_FUNC32 isDeadState32 @@ -296,7 +296,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 @@ -316,7 +316,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC dummyFunc #define OUTER_ACCEL_FUNC dummyFunc #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #define SHENG32_IMPL sheng32_4_cod #define INTERESTING_FUNC32 hasInterestingStates32 #define INNER_DEAD_FUNC32 isDeadState32 @@ -339,7 +339,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 @@ -363,7 +363,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC isAccelState #define OUTER_ACCEL_FUNC dummyFunc #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #define SHENG32_IMPL sheng32_4_coa #define INTERESTING_FUNC32 hasInterestingStates32 #define INNER_DEAD_FUNC32 dummyFunc @@ -382,7 +382,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 @@ -402,7 +402,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC dummyFunc #define OUTER_ACCEL_FUNC dummyFunc #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #define SHENG32_IMPL sheng32_4_co #define INTERESTING_FUNC32 hasInterestingStates32 #define INNER_DEAD_FUNC32 dummyFunc @@ -425,7 +425,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 @@ -449,7 +449,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC isAccelState #define OUTER_ACCEL_FUNC dummyFunc #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #define SHENG32_IMPL sheng32_4_samda #define INTERESTING_FUNC32 hasInterestingStates32 #define INNER_DEAD_FUNC32 isDeadState32 @@ -468,7 +468,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 @@ -488,7 +488,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC dummyFunc #define OUTER_ACCEL_FUNC dummyFunc #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #define SHENG32_IMPL sheng32_4_samd #define INTERESTING_FUNC32 hasInterestingStates32 #define INNER_DEAD_FUNC32 isDeadState32 @@ -511,7 +511,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 @@ -535,7 +535,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC isAccelState #define OUTER_ACCEL_FUNC dummyFunc #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #define SHENG32_IMPL sheng32_4_sama #define INTERESTING_FUNC32 hasInterestingStates32 #define INNER_DEAD_FUNC32 dummyFunc @@ -554,7 +554,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 @@ -574,7 +574,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC dummyFunc #define OUTER_ACCEL_FUNC dummyFunc #define ACCEPT_FUNC isAcceptState -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #define SHENG32_IMPL sheng32_4_sam #define INTERESTING_FUNC32 hasInterestingStates32 #define INNER_DEAD_FUNC32 dummyFunc @@ -597,7 +597,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 @@ -623,7 +623,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC dummyFunc #define OUTER_ACCEL_FUNC isAccelState #define ACCEPT_FUNC dummyFunc -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #define SHENG32_IMPL sheng32_4_nmda #define INTERESTING_FUNC32 dummyFunc4 #define INNER_DEAD_FUNC32 dummyFunc @@ -642,7 +642,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 @@ -662,7 +662,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC dummyFunc #define OUTER_ACCEL_FUNC dummyFunc #define ACCEPT_FUNC dummyFunc -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #define SHENG32_IMPL sheng32_4_nmd #define INTERESTING_FUNC32 dummyFunc4 #define INNER_DEAD_FUNC32 dummyFunc @@ -685,7 +685,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 @@ -712,7 +712,7 @@ u8 dummyFunc(UNUSED const u8 a) { #define INNER_ACCEL_FUNC dummyFunc #define OUTER_ACCEL_FUNC dummyFunc #define ACCEPT_FUNC dummyFunc -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #define SHENG32_IMPL sheng32_4_nm #define INTERESTING_FUNC32 dummyFunc4 #define INNER_DEAD_FUNC32 dummyFunc @@ -735,7 +735,7 @@ u8 dummyFunc(UNUSED const u8 a) { #undef INNER_ACCEL_FUNC #undef OUTER_ACCEL_FUNC #undef ACCEPT_FUNC -#if defined(HAVE_AVX512VBMI) || defined(HAVE_SVE) +#if defined(HAVE_AVX512VBMI) #undef SHENG32_IMPL #undef INTERESTING_FUNC32 #undef INNER_DEAD_FUNC32 diff --git a/src/nfa/sheng_impl.h b/src/nfa/sheng_impl.h index 9634fa65..1fa5c831 100644 --- a/src/nfa/sheng_impl.h +++ b/src/nfa/sheng_impl.h @@ -96,133 +96,6 @@ char SHENG_IMPL(u8 *state, NfaCallback cb, void *ctxt, const struct sheng *s, return MO_CONTINUE_MATCHING; } -#if defined(HAVE_SVE) - -static really_inline -char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt, - const struct sheng32 *s, - u8 *const cached_accept_state, - ReportID *const cached_accept_id, - u8 single, u64a base_offset, const u8 *buf, const u8 *start, - const u8 *end, const u8 **scan_end) { - DEBUG_PRINTF("Starting DFA execution in state %u\n", - *state & SHENG32_STATE_MASK); - const u8 *cur_buf = start; - if (DEAD_FUNC32(*state)) { - DEBUG_PRINTF("Dead on arrival\n"); - *scan_end = end; - return MO_CONTINUE_MATCHING; - } - DEBUG_PRINTF("Scanning %lli bytes\n", (s64a)(end - start)); - - const svbool_t lane_pred_32 = svwhilelt_b8(0, 32); - svuint8_t cur_state = svld1(lane_pred_32, state); - const m512 *masks = s->succ_masks; - - while (likely(cur_buf != end)) { - const u8 c = *cur_buf; - svuint8_t succ_mask = svld1(lane_pred_32, (const u8*)(masks + c)); - cur_state = svtbl(cur_state, succ_mask); - const u8 tmp = svlastb(lane_pred_32, cur_state); - - DEBUG_PRINTF("c: %02hhx '%c'\n", c, ourisprint(c) ? c : '?'); - DEBUG_PRINTF("s: %u (flag: %u)\n", tmp & SHENG32_STATE_MASK, - tmp & SHENG32_STATE_FLAG_MASK); - - if (unlikely(ACCEPT_FUNC32(tmp))) { - DEBUG_PRINTF("Accept state %u reached\n", tmp & SHENG32_STATE_MASK); - u64a match_offset = base_offset + (cur_buf - buf) + 1; - DEBUG_PRINTF("Match @ %llu\n", match_offset); - if (STOP_AT_MATCH) { - DEBUG_PRINTF("Stopping at match @ %lli\n", - (u64a)(cur_buf - start)); - *state = tmp; - *scan_end = cur_buf; - return MO_MATCHES_PENDING; - } - if (single) { - if (fireSingleReport(cb, ctxt, s->report, match_offset) == - MO_HALT_MATCHING) { - return MO_HALT_MATCHING; - } - } else { - if (fireReports32(s, cb, ctxt, tmp, match_offset, - cached_accept_state, cached_accept_id, - 0) == MO_HALT_MATCHING) { - return MO_HALT_MATCHING; - } - } - } - cur_buf++; - } - *state = svlastb(lane_pred_32, cur_state); - *scan_end = cur_buf; - return MO_CONTINUE_MATCHING; -} - -static really_inline -char SHENG64_IMPL(u8 *state, NfaCallback cb, void *ctxt, - const struct sheng64 *s, - u8 *const cached_accept_state, - ReportID *const cached_accept_id, - u8 single, u64a base_offset, const u8 *buf, const u8 *start, - const u8 *end, const u8 **scan_end) { - DEBUG_PRINTF("Starting DFA execution in state %u\n", - *state & SHENG64_STATE_MASK); - const u8 *cur_buf = start; - if (DEAD_FUNC64(*state)) { - DEBUG_PRINTF("Dead on arrival\n"); - *scan_end = end; - return MO_CONTINUE_MATCHING; - } - DEBUG_PRINTF("Scanning %lli bytes\n", (s64a)(end - start)); - - const svbool_t lane_pred_64 = svwhilelt_b8(0, 64); - svuint8_t cur_state = svld1(lane_pred_64, state); - const m512 *masks = s->succ_masks; - - while (likely(cur_buf != end)) { - const u8 c = *cur_buf; - svuint8_t succ_mask = svld1(lane_pred_64, (const u8*)(masks + c)); - cur_state = svtbl(cur_state, succ_mask); - const u8 tmp = svlastb(lane_pred_64, cur_state); - - DEBUG_PRINTF("c: %02hhx '%c'\n", c, ourisprint(c) ? c : '?'); - DEBUG_PRINTF("s: %u (flag: %u)\n", tmp & SHENG64_STATE_MASK, - tmp & SHENG64_STATE_FLAG_MASK); - - if (unlikely(ACCEPT_FUNC64(tmp))) { - DEBUG_PRINTF("Accept state %u reached\n", tmp & SHENG64_STATE_MASK); - u64a match_offset = base_offset + (cur_buf - buf) + 1; - DEBUG_PRINTF("Match @ %llu\n", match_offset); - if (STOP_AT_MATCH) { - DEBUG_PRINTF("Stopping at match @ %lli\n", - (u64a)(cur_buf - start)); - *state = tmp; - *scan_end = cur_buf; - return MO_MATCHES_PENDING; - } - if (single) { - if (fireSingleReport(cb, ctxt, s->report, match_offset) == - MO_HALT_MATCHING) { - return MO_HALT_MATCHING; - } - } else { - if (fireReports64(s, cb, ctxt, tmp, match_offset, - cached_accept_state, cached_accept_id, - 0) == MO_HALT_MATCHING) { - return MO_HALT_MATCHING; - } - } - } - cur_buf++; - } - *state = svlastb(lane_pred_64, cur_state); - *scan_end = cur_buf; - return MO_CONTINUE_MATCHING; -} -#endif - #if defined(HAVE_AVX512VBMI) static really_inline char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt, diff --git a/src/nfa/sheng_impl4.h b/src/nfa/sheng_impl4.h index 10ad4ea0..e5d3468f 100644 --- a/src/nfa/sheng_impl4.h +++ b/src/nfa/sheng_impl4.h @@ -283,434 +283,6 @@ char SHENG_IMPL(u8 *state, NfaCallback cb, void *ctxt, const struct sheng *s, return MO_CONTINUE_MATCHING; } -#if defined(HAVE_SVE) -static really_inline -char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt, - const struct sheng32 *s, - u8 *const cached_accept_state, - ReportID *const cached_accept_id, - u8 single, u64a base_offset, const u8 *buf, const u8 *start, - const u8 *end, const u8 **scan_end) { - DEBUG_PRINTF("Starting DFAx4 execution in state %u\n", - *state & SHENG32_STATE_MASK); - const u8 *cur_buf = start; - const u8 *min_accel_dist = start; - base_offset++; - DEBUG_PRINTF("Scanning %llu bytes\n", (u64a)(end - start)); - - if (INNER_ACCEL_FUNC32(*state) || OUTER_ACCEL_FUNC32(*state)) { - DEBUG_PRINTF("Accel state reached @ 0\n"); - const union AccelAux *aaux = - get_accel32(s, *state & SHENG32_STATE_MASK); - const u8 *new_offset = run_accel(aaux, cur_buf, end); - if (new_offset < cur_buf + BAD_ACCEL_DIST) { - min_accel_dist = new_offset + BIG_ACCEL_PENALTY; - } else { - min_accel_dist = new_offset + SMALL_ACCEL_PENALTY; - } - DEBUG_PRINTF("Next accel chance: %llu\n", - (u64a)(min_accel_dist - start)); - DEBUG_PRINTF("Accel scanned %zu bytes\n", new_offset - cur_buf); - cur_buf = new_offset; - DEBUG_PRINTF("New offset: %lli\n", (s64a)(cur_buf - start)); - } - if (INNER_DEAD_FUNC32(*state) || OUTER_DEAD_FUNC32(*state)) { - DEBUG_PRINTF("Dead on arrival\n"); - *scan_end = end; - return MO_CONTINUE_MATCHING; - } - - const svbool_t lane_pred_32 = svwhilelt_b8(0, 32); - svuint8_t cur_state = svld1(lane_pred_32, state); - const m512 *masks = s->succ_masks; - - while (likely(end - cur_buf >= 4)) { - const u8 *b1 = cur_buf; - const u8 *b2 = cur_buf + 1; - const u8 *b3 = cur_buf + 2; - const u8 *b4 = cur_buf + 3; - const u8 c1 = *b1; - const u8 c2 = *b2; - const u8 c3 = *b3; - const u8 c4 = *b4; - svuint8_t succ_mask1 = svld1(lane_pred_32, (const u8*)(masks+c1)); - cur_state = svtbl(cur_state, succ_mask1); - const u8 a1 = svlastb(lane_pred_32, cur_state); - - svuint8_t succ_mask2 = svld1(lane_pred_32, (const u8*)(masks+c2)); - cur_state = svtbl(cur_state, succ_mask2); - const u8 a2 = svlastb(lane_pred_32, cur_state); - - svuint8_t succ_mask3 = svld1(lane_pred_32, (const u8*)(masks+c3)); - cur_state = svtbl(cur_state, succ_mask3); - const u8 a3 = svlastb(lane_pred_32, cur_state); - - svuint8_t succ_mask4 = svld1(lane_pred_32, (const u8*)(masks+c4)); - cur_state = svtbl(cur_state, succ_mask4); - const u8 a4 = svlastb(lane_pred_32, cur_state); - - DEBUG_PRINTF("c: %02hhx '%c'\n", c1, ourisprint(c1) ? c1 : '?'); - DEBUG_PRINTF("s: %u (flag: %u)\n", a1 & SHENG32_STATE_MASK, - a1 & SHENG32_STATE_FLAG_MASK); - - DEBUG_PRINTF("c: %02hhx '%c'\n", c2, ourisprint(c2) ? c2 : '?'); - DEBUG_PRINTF("s: %u (flag: %u)\n", a2 & SHENG32_STATE_MASK, - a2 & SHENG32_STATE_FLAG_MASK); - - DEBUG_PRINTF("c: %02hhx '%c'\n", c3, ourisprint(c3) ? c3 : '?'); - DEBUG_PRINTF("s: %u (flag: %u)\n", a3 & SHENG32_STATE_MASK, - a3 & SHENG32_STATE_FLAG_MASK); - - DEBUG_PRINTF("c: %02hhx '%c'\n", c4, ourisprint(c4) ? c4 : '?'); - DEBUG_PRINTF("s: %u (flag: %u)\n", a4 & SHENG32_STATE_MASK, - a4 & SHENG32_STATE_FLAG_MASK); - - if (unlikely(INTERESTING_FUNC32(a1, a2, a3, a4))) { - if (ACCEPT_FUNC32(a1)) { - u64a match_offset = base_offset + b1 - buf; - DEBUG_PRINTF("Accept state %u reached\n", - a1 & SHENG32_STATE_MASK); - DEBUG_PRINTF("Match @ %llu\n", match_offset); - if (STOP_AT_MATCH) { - DEBUG_PRINTF("Stopping at match @ %lli\n", - (s64a)(b1 - start)); - *scan_end = b1; - *state = a1; - return MO_MATCHES_PENDING; - } - if (single) { - if (fireSingleReport(cb, ctxt, s->report, match_offset) == - MO_HALT_MATCHING) { - return MO_HALT_MATCHING; - } - } else { - if (fireReports32(s, cb, ctxt, a1, match_offset, - cached_accept_state, cached_accept_id, - 0) == MO_HALT_MATCHING) { - return MO_HALT_MATCHING; - } - } - } - if (ACCEPT_FUNC32(a2)) { - u64a match_offset = base_offset + b2 - buf; - DEBUG_PRINTF("Accept state %u reached\n", - a2 & SHENG32_STATE_MASK); - DEBUG_PRINTF("Match @ %llu\n", match_offset); - if (STOP_AT_MATCH) { - DEBUG_PRINTF("Stopping at match @ %lli\n", - (s64a)(b2 - start)); - *scan_end = b2; - *state = a2; - return MO_MATCHES_PENDING; - } - if (single) { - if (fireSingleReport(cb, ctxt, s->report, match_offset) == - MO_HALT_MATCHING) { - return MO_HALT_MATCHING; - } - } else { - if (fireReports32(s, cb, ctxt, a2, match_offset, - cached_accept_state, cached_accept_id, - 0) == MO_HALT_MATCHING) { - return MO_HALT_MATCHING; - } - } - } - if (ACCEPT_FUNC32(a3)) { - u64a match_offset = base_offset + b3 - buf; - DEBUG_PRINTF("Accept state %u reached\n", - a3 & SHENG32_STATE_MASK); - DEBUG_PRINTF("Match @ %llu\n", match_offset); - if (STOP_AT_MATCH) { - DEBUG_PRINTF("Stopping at match @ %lli\n", - (s64a)(b3 - start)); - *scan_end = b3; - *state = a3; - return MO_MATCHES_PENDING; - } - if (single) { - if (fireSingleReport(cb, ctxt, s->report, match_offset) == - MO_HALT_MATCHING) { - return MO_HALT_MATCHING; - } - } else { - if (fireReports32(s, cb, ctxt, a3, match_offset, - cached_accept_state, cached_accept_id, - 0) == MO_HALT_MATCHING) { - return MO_HALT_MATCHING; - } - } - } - if (ACCEPT_FUNC32(a4)) { - u64a match_offset = base_offset + b4 - buf; - DEBUG_PRINTF("Accept state %u reached\n", - a4 & SHENG32_STATE_MASK); - DEBUG_PRINTF("Match @ %llu\n", match_offset); - if (STOP_AT_MATCH) { - DEBUG_PRINTF("Stopping at match @ %lli\n", - (s64a)(b4 - start)); - *scan_end = b4; - *state = a4; - return MO_MATCHES_PENDING; - } - if (single) { - if (fireSingleReport(cb, ctxt, s->report, match_offset) == - MO_HALT_MATCHING) { - return MO_HALT_MATCHING; - } - } else { - if (fireReports32(s, cb, ctxt, a4, match_offset, - cached_accept_state, cached_accept_id, - 0) == MO_HALT_MATCHING) { - return MO_HALT_MATCHING; - } - } - } - if (INNER_DEAD_FUNC32(a4)) { - DEBUG_PRINTF("Dead state reached @ %lli\n", (s64a)(b4 - buf)); - *scan_end = end; - *state = a4; - return MO_CONTINUE_MATCHING; - } - if (cur_buf > min_accel_dist && INNER_ACCEL_FUNC32(a4)) { - DEBUG_PRINTF("Accel state reached @ %lli\n", (s64a)(b4 - buf)); - const union AccelAux *aaux = - get_accel32(s, a4 & SHENG32_STATE_MASK); - const u8 *new_offset = run_accel(aaux, cur_buf + 4, end); - if (new_offset < cur_buf + 4 + BAD_ACCEL_DIST) { - min_accel_dist = new_offset + BIG_ACCEL_PENALTY; - } else { - min_accel_dist = new_offset + SMALL_ACCEL_PENALTY; - } - DEBUG_PRINTF("Next accel chance: %llu\n", - (u64a)(min_accel_dist - start)); - DEBUG_PRINTF("Accel scanned %llu bytes\n", - (u64a)(new_offset - cur_buf - 4)); - cur_buf = new_offset; - DEBUG_PRINTF("New offset: %llu\n", (u64a)(cur_buf - buf)); - continue; - } - } - if (OUTER_DEAD_FUNC32(a4)) { - DEBUG_PRINTF("Dead state reached @ %lli\n", (s64a)(cur_buf - buf)); - *scan_end = end; - *state = a4; - return MO_CONTINUE_MATCHING; - }; - if (cur_buf > min_accel_dist && OUTER_ACCEL_FUNC32(a4)) { - DEBUG_PRINTF("Accel state reached @ %lli\n", (s64a)(b4 - buf)); - const union AccelAux *aaux = - get_accel32(s, a4 & SHENG32_STATE_MASK); - const u8 *new_offset = run_accel(aaux, cur_buf + 4, end); - if (new_offset < cur_buf + 4 + BAD_ACCEL_DIST) { - min_accel_dist = new_offset + BIG_ACCEL_PENALTY; - } else { - min_accel_dist = new_offset + SMALL_ACCEL_PENALTY; - } - DEBUG_PRINTF("Next accel chance: %llu\n", - (u64a)(min_accel_dist - start)); - DEBUG_PRINTF("Accel scanned %llu bytes\n", - (u64a)(new_offset - cur_buf - 4)); - cur_buf = new_offset; - DEBUG_PRINTF("New offset: %llu\n", (u64a)(cur_buf - buf)); - continue; - }; - cur_buf += 4; - } - *state = svlastb(lane_pred_32, cur_state); - *scan_end = cur_buf; - return MO_CONTINUE_MATCHING; -} - -#if !defined(NO_SHENG64_IMPL) -static really_inline -char SHENG64_IMPL(u8 *state, NfaCallback cb, void *ctxt, - const struct sheng64 *s, - u8 *const cached_accept_state, - ReportID *const cached_accept_id, - u8 single, u64a base_offset, const u8 *buf, const u8 *start, - const u8 *end, const u8 **scan_end) { - DEBUG_PRINTF("Starting DFAx4 execution in state %u\n", - *state & SHENG64_STATE_MASK); - const u8 *cur_buf = start; - base_offset++; - DEBUG_PRINTF("Scanning %llu bytes\n", (u64a)(end - start)); - - if (INNER_DEAD_FUNC64(*state) || OUTER_DEAD_FUNC64(*state)) { - DEBUG_PRINTF("Dead on arrival\n"); - *scan_end = end; - return MO_CONTINUE_MATCHING; - } - - const svbool_t lane_pred_64 = svwhilelt_b8(0, 64); - svuint8_t cur_state = svld1(lane_pred_64, state); - const m512 *masks = s->succ_masks; - - while (likely(end - cur_buf >= 4)) { - const u8 *b1 = cur_buf; - const u8 *b2 = cur_buf + 1; - const u8 *b3 = cur_buf + 2; - const u8 *b4 = cur_buf + 3; - const u8 c1 = *b1; - const u8 c2 = *b2; - const u8 c3 = *b3; - const u8 c4 = *b4; - - svuint8_t succ_mask1 = svld1(lane_pred_64, (const u8*)(masks+c1)); - cur_state = svtbl(cur_state, succ_mask1); - const u8 a1 = svlastb(lane_pred_64, cur_state); - - svuint8_t succ_mask2 = svld1(lane_pred_64, (const u8*)(masks+c2)); - cur_state = svtbl(cur_state, succ_mask2); - const u8 a2 = svlastb(lane_pred_64, cur_state); - - svuint8_t succ_mask3 = svld1(lane_pred_64, (const u8*)(masks+c3)); - cur_state = svtbl(cur_state, succ_mask3); - const u8 a3 = svlastb(lane_pred_64, cur_state); - - svuint8_t succ_mask4 = svld1(lane_pred_64, (const u8*)(masks+c4)); - cur_state = svtbl(cur_state, succ_mask4); - const u8 a4 = svlastb(lane_pred_64, cur_state); - - DEBUG_PRINTF("c: %02hhx '%c'\n", c1, ourisprint(c1) ? c1 : '?'); - DEBUG_PRINTF("s: %u (flag: %u)\n", a1 & SHENG64_STATE_MASK, - a1 & SHENG64_STATE_FLAG_MASK); - - DEBUG_PRINTF("c: %02hhx '%c'\n", c2, ourisprint(c2) ? c2 : '?'); - DEBUG_PRINTF("s: %u (flag: %u)\n", a2 & SHENG64_STATE_MASK, - a2 & SHENG64_STATE_FLAG_MASK); - - DEBUG_PRINTF("c: %02hhx '%c'\n", c3, ourisprint(c3) ? c3 : '?'); - DEBUG_PRINTF("s: %u (flag: %u)\n", a3 & SHENG64_STATE_MASK, - a3 & SHENG64_STATE_FLAG_MASK); - - DEBUG_PRINTF("c: %02hhx '%c'\n", c4, ourisprint(c4) ? c4 : '?'); - DEBUG_PRINTF("s: %u (flag: %u)\n", a4 & SHENG64_STATE_MASK, - a4 & SHENG64_STATE_FLAG_MASK); - - if (unlikely(INTERESTING_FUNC64(a1, a2, a3, a4))) { - if (ACCEPT_FUNC64(a1)) { - u64a match_offset = base_offset + b1 - buf; - DEBUG_PRINTF("Accept state %u reached\n", - a1 & SHENG64_STATE_MASK); - DEBUG_PRINTF("Match @ %llu\n", match_offset); - if (STOP_AT_MATCH) { - DEBUG_PRINTF("Stopping at match @ %lli\n", - (s64a)(b1 - start)); - *scan_end = b1; - *state = a1; - return MO_MATCHES_PENDING; - } - if (single) { - if (fireSingleReport(cb, ctxt, s->report, match_offset) == - MO_HALT_MATCHING) { - return MO_HALT_MATCHING; - } - } else { - if (fireReports64(s, cb, ctxt, a1, match_offset, - cached_accept_state, cached_accept_id, - 0) == MO_HALT_MATCHING) { - return MO_HALT_MATCHING; - } - } - } - if (ACCEPT_FUNC64(a2)) { - u64a match_offset = base_offset + b2 - buf; - DEBUG_PRINTF("Accept state %u reached\n", - a2 & SHENG64_STATE_MASK); - DEBUG_PRINTF("Match @ %llu\n", match_offset); - if (STOP_AT_MATCH) { - DEBUG_PRINTF("Stopping at match @ %lli\n", - (s64a)(b2 - start)); - *scan_end = b2; - *state = a2; - return MO_MATCHES_PENDING; - } - if (single) { - if (fireSingleReport(cb, ctxt, s->report, match_offset) == - MO_HALT_MATCHING) { - return MO_HALT_MATCHING; - } - } else { - if (fireReports64(s, cb, ctxt, a2, match_offset, - cached_accept_state, cached_accept_id, - 0) == MO_HALT_MATCHING) { - return MO_HALT_MATCHING; - } - } - } - if (ACCEPT_FUNC64(a3)) { - u64a match_offset = base_offset + b3 - buf; - DEBUG_PRINTF("Accept state %u reached\n", - a3 & SHENG64_STATE_MASK); - DEBUG_PRINTF("Match @ %llu\n", match_offset); - if (STOP_AT_MATCH) { - DEBUG_PRINTF("Stopping at match @ %lli\n", - (s64a)(b3 - start)); - *scan_end = b3; - *state = a3; - return MO_MATCHES_PENDING; - } - if (single) { - if (fireSingleReport(cb, ctxt, s->report, match_offset) == - MO_HALT_MATCHING) { - return MO_HALT_MATCHING; - } - } else { - if (fireReports64(s, cb, ctxt, a3, match_offset, - cached_accept_state, cached_accept_id, - 0) == MO_HALT_MATCHING) { - return MO_HALT_MATCHING; - } - } - } - if (ACCEPT_FUNC64(a4)) { - u64a match_offset = base_offset + b4 - buf; - DEBUG_PRINTF("Accept state %u reached\n", - a4 & SHENG64_STATE_MASK); - DEBUG_PRINTF("Match @ %llu\n", match_offset); - if (STOP_AT_MATCH) { - DEBUG_PRINTF("Stopping at match @ %lli\n", - (s64a)(b4 - start)); - *scan_end = b4; - *state = a4; - return MO_MATCHES_PENDING; - } - if (single) { - if (fireSingleReport(cb, ctxt, s->report, match_offset) == - MO_HALT_MATCHING) { - return MO_HALT_MATCHING; - } - } else { - if (fireReports64(s, cb, ctxt, a4, match_offset, - cached_accept_state, cached_accept_id, - 0) == MO_HALT_MATCHING) { - return MO_HALT_MATCHING; - } - } - } - if (INNER_DEAD_FUNC64(a4)) { - DEBUG_PRINTF("Dead state reached @ %lli\n", (s64a)(b4 - buf)); - *scan_end = end; - *state = a4; - return MO_CONTINUE_MATCHING; - } - } - if (OUTER_DEAD_FUNC64(a4)) { - DEBUG_PRINTF("Dead state reached @ %lli\n", (s64a)(cur_buf - buf)); - *scan_end = end; - *state = a4; - return MO_CONTINUE_MATCHING; - } - cur_buf += 4; - } - *state = svlastb(lane_pred_64, cur_state); - *scan_end = cur_buf; - return MO_CONTINUE_MATCHING; -} -#endif -#endif - #if defined(HAVE_AVX512VBMI) static really_inline char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt, diff --git a/src/nfa/shengcompile.cpp b/src/nfa/shengcompile.cpp index 0f93e139..055e1971 100644 --- a/src/nfa/shengcompile.cpp +++ b/src/nfa/shengcompile.cpp @@ -730,17 +730,10 @@ bytecode_ptr sheng32Compile(raw_dfa &raw, const CompileContext &cc, return nullptr; } -#ifdef HAVE_SVE - if (svcntb()<32) { - DEBUG_PRINTF("Sheng32 failed, SVE width is too small!\n"); - return nullptr; - } -#else if (!cc.target_info.has_avx512vbmi()) { DEBUG_PRINTF("Sheng32 failed, no HS_CPU_FEATURES_AVX512VBMI!\n"); return nullptr; } -#endif sheng_build_strat strat(raw, rm, only_accel_init); dfa_info info(strat); @@ -769,17 +762,10 @@ bytecode_ptr sheng64Compile(raw_dfa &raw, const CompileContext &cc, return nullptr; } -#ifdef HAVE_SVE - if (svcntb()<64) { - DEBUG_PRINTF("Sheng64 failed, SVE width is too small!\n"); - return nullptr; - } -#else if (!cc.target_info.has_avx512vbmi()) { DEBUG_PRINTF("Sheng64 failed, no HS_CPU_FEATURES_AVX512VBMI!\n"); return nullptr; } -#endif sheng_build_strat strat(raw, rm, only_accel_init); dfa_info info(strat); From 50a62a17ffbfdbf4a9a30010bec61ebc271d95f8 Mon Sep 17 00:00:00 2001 From: gtsoul-tech Date: Mon, 1 Apr 2024 16:05:13 +0300 Subject: [PATCH 09/19] changed color output to csv output --- benchmarks/benchmarks.cpp | 168 ++++++++++++++++++++++---------------- 1 file changed, 97 insertions(+), 71 deletions(-) diff --git a/benchmarks/benchmarks.cpp b/benchmarks/benchmarks.cpp index 91cab3f8..c6e453ef 100644 --- a/benchmarks/benchmarks.cpp +++ b/benchmarks/benchmarks.cpp @@ -26,32 +26,30 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include #include +#include #include #include -#include -#include #include +#include +#include #include "benchmarks.hpp" -#define MAX_LOOPS 1000000000 -#define MAX_MATCHES 5 -#define N 8 +#define MAX_LOOPS 1000000000 +#define MAX_MATCHES 5 +#define N 8 struct hlmMatchEntry { size_t to; u32 id; - hlmMatchEntry(size_t end, u32 identifier) : - to(end), id(identifier) {} + hlmMatchEntry(size_t end, u32 identifier) : to(end), id(identifier) {} }; std::vector ctxt; -static -hwlmcb_rv_t hlmSimpleCallback(size_t to, u32 id, - UNUSED struct hs_scratch *scratch) { +static hwlmcb_rv_t hlmSimpleCallback(size_t to, u32 id, + UNUSED struct hs_scratch *scratch) { DEBUG_PRINTF("match @%zu = %u\n", to, id); ctxt.push_back(hlmMatchEntry(to, id)); @@ -59,10 +57,12 @@ hwlmcb_rv_t hlmSimpleCallback(size_t to, u32 id, return HWLM_CONTINUE_MATCHING; } -template -static void run_benchmarks(int size, int loops, int max_matches, bool is_reverse, MicroBenchmark &bench, InitFunc &&init, BenchFunc &&func) { +template +static void run_benchmarks(int size, int loops, int max_matches, + bool is_reverse, MicroBenchmark &bench, + InitFunc &&init, BenchFunc &&func) { init(bench); - double total_sec = 0.0; + double total_sec = 0.0; u64a total_size = 0; double bw = 0.0; double avg_bw = 0.0; @@ -70,29 +70,31 @@ static void run_benchmarks(int size, int loops, int max_matches, bool is_reverse double avg_time = 0.0; if (max_matches) { int pos = 0; - for(int j = 0; j < max_matches - 1; j++) { + for (int j = 0; j < max_matches - 1; j++) { bench.buf[pos] = 'b'; - pos = (j+1) *size / max_matches ; + pos = (j + 1) * size / max_matches; bench.buf[pos] = 'a'; u64a actual_size = 0; auto start = std::chrono::steady_clock::now(); - for(int i = 0; i < loops; i++) { + for (int i = 0; i < loops; i++) { const u8 *res = func(bench); - if (is_reverse) - actual_size += bench.buf.data() + size - res; - else - actual_size += res - bench.buf.data(); + if (is_reverse) + actual_size += bench.buf.data() + size - res; + else + actual_size += res - bench.buf.data(); } auto end = std::chrono::steady_clock::now(); - double dt = std::chrono::duration_cast(end - start).count(); + double dt = std::chrono::duration_cast( + end - start) + .count(); total_sec += dt; /*convert microseconds to seconds*/ /*calculate bandwidth*/ - bw = (actual_size / dt) * 1000000.0 / 1048576.0; - /*std::cout << "act_size = " << act_size << std::endl; - std::cout << "dt = " << dt << std::endl; - std::cout << "bw = " << bw << std::endl;*/ - avg_bw += bw; + bw = (actual_size / dt) * 1000000.0 / 1048576.0; + /*std::cout << "act_size = " << act_size << std::endl; + std::cout << "dt = " << dt << std::endl; + std::cout << "bw = " << bw << std::endl;*/ + avg_bw += bw; /*convert to MB/s*/ max_bw = std::max(bw, max_bw); /*calculate average time*/ @@ -100,18 +102,28 @@ static void run_benchmarks(int size, int loops, int max_matches, bool is_reverse } avg_time /= max_matches; avg_bw /= max_matches; - total_sec /= 1000000.0; + total_sec /= 1000000.0; /*convert average time to us*/ - printf(KMAG "%s: %u matches, %u * %u iterations," KBLU " total elapsed time =" RST " %.3f s, " - KBLU "average time per call =" RST " %.3f μs," KBLU " max bandwidth = " RST " %.3f MB/s," KBLU " average bandwidth =" RST " %.3f MB/s \n", - bench.label, max_matches, size ,loops, total_sec, avg_time, max_bw, avg_bw); + /* Keeping the color output + printf(KMAG "%s: %u matches, %u * %u iterations," KBLU + " total elapsed time =" RST " %.3f s, " KBLU + "average time per call =" RST " %.3f μs," KBLU + " max bandwidth = " RST " %.3f MB/s," KBLU + " average bandwidth =" RST " %.3f MB/s \n", + bench.label, max_matches, size, loops, total_sec, avg_time, + max_bw, avg_bw); + */ + printf("%s,%u,%u,%u,%.3f,%.3f,%.3f,%.3f\n", bench.label, max_matches, + size, loops, total_sec, avg_time, max_bw, avg_bw); } else { auto start = std::chrono::steady_clock::now(); for (int i = 0; i < loops; i++) { const u8 *res = func(bench); } auto end = std::chrono::steady_clock::now(); - total_sec += std::chrono::duration_cast(end - start).count(); + total_sec += + std::chrono::duration_cast(end - start) + .count(); /*calculate transferred size*/ total_size = size * loops; /*calculate average time*/ @@ -122,117 +134,131 @@ static void run_benchmarks(int size, int loops, int max_matches, bool is_reverse max_bw = total_size / total_sec; /*convert to MB/s*/ max_bw /= 1048576.0; - printf(KMAG "%s: no matches, %u * %u iterations," KBLU " total elapsed time =" RST " %.3f s, " - KBLU "average time per call =" RST " %.3f μs ," KBLU " bandwidth = " RST " %.3f MB/s \n", - bench.label, size ,loops, total_sec, avg_time, max_bw ); + /*Keeping the color output + printf(KMAG "%s: no matches, %u * %u iterations," KBLU " total elapsed + time =" RST " %.3f s, " KBLU "average time per call =" RST " %.3f μs ," + KBLU " bandwidth = " RST " %.3f MB/s \n", bench.label, size ,loops, + total_sec, avg_time, max_bw ); + */ + printf("%s,0,%u,%u,%.3f,%.3f,%.3f,0\n", bench.label, size, loops, + total_sec, avg_time, max_bw); } } -int main(){ +int main() { int matches[] = {0, MAX_MATCHES}; std::vector sizes; - for (size_t i = 0; i < N; i++) sizes.push_back(16000 << i*2); - const char charset[] = "aAaAaAaAAAaaaaAAAAaaaaAAAAAAaaaAAaaa"; - + for (size_t i = 0; i < N; i++) + sizes.push_back(16000 << i * 2); + const char charset[] = "aAaAaAaAAAaaaaAAAAaaaaAAAAAAaaaAAaaa"; + printf("Bench Label, max_matches, size,loops, total_sec, avg_time, " + "max_bw, avg_bw\n"); for (int m = 0; m < 2; m++) { for (size_t i = 0; i < std::size(sizes); i++) { MicroBenchmark bench("Shufti", sizes[i]); - run_benchmarks(sizes[i], MAX_LOOPS / sizes[i], matches[m], false, bench, + run_benchmarks( + sizes[i], MAX_LOOPS / sizes[i], matches[m], false, bench, [&](MicroBenchmark &b) { b.chars.set('a'); ue2::shuftiBuildMasks(b.chars, (u8 *)&b.lo, (u8 *)&b.hi); memset(b.buf.data(), 'b', b.size); }, [&](MicroBenchmark &b) { - return shuftiExec(b.lo, b.hi, b.buf.data(), b.buf.data() + b.size); - } - ); + return shuftiExec(b.lo, b.hi, b.buf.data(), + b.buf.data() + b.size); + }); } for (size_t i = 0; i < std::size(sizes); i++) { MicroBenchmark bench("Reverse Shufti", sizes[i]); - run_benchmarks(sizes[i], MAX_LOOPS / sizes[i], matches[m], true, bench, + run_benchmarks( + sizes[i], MAX_LOOPS / sizes[i], matches[m], true, bench, [&](MicroBenchmark &b) { b.chars.set('a'); ue2::shuftiBuildMasks(b.chars, (u8 *)&b.lo, (u8 *)&b.hi); memset(b.buf.data(), 'b', b.size); }, [&](MicroBenchmark &b) { - return rshuftiExec(b.lo, b.hi, b.buf.data(), b.buf.data() + b.size); - } - ); + return rshuftiExec(b.lo, b.hi, b.buf.data(), + b.buf.data() + b.size); + }); } for (size_t i = 0; i < std::size(sizes); i++) { MicroBenchmark bench("Truffle", sizes[i]); - run_benchmarks(sizes[i], MAX_LOOPS / sizes[i], matches[m], false, bench, + run_benchmarks( + sizes[i], MAX_LOOPS / sizes[i], matches[m], false, bench, [&](MicroBenchmark &b) { b.chars.set('a'); ue2::truffleBuildMasks(b.chars, (u8 *)&b.lo, (u8 *)&b.hi); memset(b.buf.data(), 'b', b.size); }, [&](MicroBenchmark &b) { - return truffleExec(b.lo, b.hi, b.buf.data(), b.buf.data() + b.size); - } - ); + return truffleExec(b.lo, b.hi, b.buf.data(), + b.buf.data() + b.size); + }); } for (size_t i = 0; i < std::size(sizes); i++) { MicroBenchmark bench("Reverse Truffle", sizes[i]); - run_benchmarks(sizes[i], MAX_LOOPS / sizes[i], matches[m], true, bench, + run_benchmarks( + sizes[i], MAX_LOOPS / sizes[i], matches[m], true, bench, [&](MicroBenchmark &b) { b.chars.set('a'); ue2::truffleBuildMasks(b.chars, (u8 *)&b.lo, (u8 *)&b.hi); memset(b.buf.data(), 'b', b.size); }, [&](MicroBenchmark &b) { - return rtruffleExec(b.lo, b.hi, b.buf.data(), b.buf.data() + b.size); - } - ); + return rtruffleExec(b.lo, b.hi, b.buf.data(), + b.buf.data() + b.size); + }); } for (size_t i = 0; i < std::size(sizes); i++) { MicroBenchmark bench("Vermicelli", sizes[i]); - run_benchmarks(sizes[i], MAX_LOOPS / sizes[i], matches[m], false, bench, + run_benchmarks( + sizes[i], MAX_LOOPS / sizes[i], matches[m], false, bench, [&](MicroBenchmark &b) { b.chars.set('a'); ue2::truffleBuildMasks(b.chars, (u8 *)&b.lo, (u8 *)&b.hi); memset(b.buf.data(), 'b', b.size); }, [&](MicroBenchmark &b) { - return vermicelliExec('a', 'b', b.buf.data(), b.buf.data() + b.size); - } - ); + return vermicelliExec('a', 'b', b.buf.data(), + b.buf.data() + b.size); + }); } for (size_t i = 0; i < std::size(sizes); i++) { MicroBenchmark bench("Reverse Vermicelli", sizes[i]); - run_benchmarks(sizes[i], MAX_LOOPS / sizes[i], matches[m], true, bench, + run_benchmarks( + sizes[i], MAX_LOOPS / sizes[i], matches[m], true, bench, [&](MicroBenchmark &b) { b.chars.set('a'); ue2::truffleBuildMasks(b.chars, (u8 *)&b.lo, (u8 *)&b.hi); memset(b.buf.data(), 'b', b.size); }, [&](MicroBenchmark &b) { - return rvermicelliExec('a', 'b', b.buf.data(), b.buf.data() + b.size); - } - ); + return rvermicelliExec('a', 'b', b.buf.data(), + b.buf.data() + b.size); + }); } for (size_t i = 0; i < std::size(sizes); i++) { - //we imitate the noodle unit tests + // we imitate the noodle unit tests std::string str; const size_t char_len = 5; str.resize(char_len + 1); - for (size_t j=0; j < char_len; j++) { - srand (time(NULL)); - int key = rand() % + 36 ; + for (size_t j = 0; j < char_len; j++) { + srand(time(NULL)); + int key = rand() % +36; str[char_len] = charset[key]; str[char_len + 1] = '\0'; } MicroBenchmark bench("Noodle", sizes[i]); - run_benchmarks(sizes[i], MAX_LOOPS / sizes[i], matches[m], false, bench, + run_benchmarks( + sizes[i], MAX_LOOPS / sizes[i], matches[m], false, bench, [&](MicroBenchmark &b) { ctxt.clear(); memset(b.buf.data(), 'a', b.size); @@ -242,10 +268,10 @@ int main(){ assert(b.nt != nullptr); }, [&](MicroBenchmark &b) { - noodExec(b.nt.get(), b.buf.data(), b.size, 0, hlmSimpleCallback, &b.scratch); + noodExec(b.nt.get(), b.buf.data(), b.size, 0, + hlmSimpleCallback, &b.scratch); return b.buf.data() + b.size; - } - ); + }); } } From b5a29155e4d4dee44f94ec44622bf8431e676ce4 Mon Sep 17 00:00:00 2001 From: gtsoul-tech Date: Tue, 2 Apr 2024 11:28:00 +0300 Subject: [PATCH 10/19] removed color output code --- benchmarks/benchmarks.cpp | 15 ------------- benchmarks/benchmarks.hpp | 46 +++++++++++++++------------------------ 2 files changed, 17 insertions(+), 44 deletions(-) diff --git a/benchmarks/benchmarks.cpp b/benchmarks/benchmarks.cpp index c6e453ef..14cccc10 100644 --- a/benchmarks/benchmarks.cpp +++ b/benchmarks/benchmarks.cpp @@ -104,15 +104,6 @@ static void run_benchmarks(int size, int loops, int max_matches, avg_bw /= max_matches; total_sec /= 1000000.0; /*convert average time to us*/ - /* Keeping the color output - printf(KMAG "%s: %u matches, %u * %u iterations," KBLU - " total elapsed time =" RST " %.3f s, " KBLU - "average time per call =" RST " %.3f μs," KBLU - " max bandwidth = " RST " %.3f MB/s," KBLU - " average bandwidth =" RST " %.3f MB/s \n", - bench.label, max_matches, size, loops, total_sec, avg_time, - max_bw, avg_bw); - */ printf("%s,%u,%u,%u,%.3f,%.3f,%.3f,%.3f\n", bench.label, max_matches, size, loops, total_sec, avg_time, max_bw, avg_bw); } else { @@ -134,12 +125,6 @@ static void run_benchmarks(int size, int loops, int max_matches, max_bw = total_size / total_sec; /*convert to MB/s*/ max_bw /= 1048576.0; - /*Keeping the color output - printf(KMAG "%s: no matches, %u * %u iterations," KBLU " total elapsed - time =" RST " %.3f s, " KBLU "average time per call =" RST " %.3f μs ," - KBLU " bandwidth = " RST " %.3f MB/s \n", bench.label, size ,loops, - total_sec, avg_time, max_bw ); - */ printf("%s,0,%u,%u,%.3f,%.3f,%.3f,0\n", bench.label, size, loops, total_sec, avg_time, max_bw); } diff --git a/benchmarks/benchmarks.hpp b/benchmarks/benchmarks.hpp index 974d2234..13f66fa5 100644 --- a/benchmarks/benchmarks.hpp +++ b/benchmarks/benchmarks.hpp @@ -26,44 +26,32 @@ * POSSIBILITY OF SUCH DAMAGE. */ +#include "hwlm/hwlm_literal.h" +#include "hwlm/noodle_build.h" +#include "hwlm/noodle_engine.h" +#include "hwlm/noodle_internal.h" #include "nfa/shufti.h" #include "nfa/shufticompile.h" #include "nfa/truffle.h" #include "nfa/trufflecompile.h" #include "nfa/vermicelli.hpp" -#include "hwlm/noodle_build.h" -#include "hwlm/noodle_engine.h" -#include "hwlm/noodle_internal.h" -#include "hwlm/hwlm_literal.h" -#include "util/bytecode_ptr.h" #include "scratch.h" +#include "util/bytecode_ptr.h" -/*define colour control characters*/ -#define RST "\x1B[0m" -#define KRED "\x1B[31m" -#define KGRN "\x1B[32m" -#define KYEL "\x1B[33m" -#define KBLU "\x1B[34m" -#define KMAG "\x1B[35m" -#define KCYN "\x1B[36m" -#define KWHT "\x1B[37m" - -class MicroBenchmark -{ +class MicroBenchmark { public: - char const *label; - size_t size; + char const *label; + size_t size; - // Shufti/Truffle - m128 lo, hi; - ue2::CharReach chars; - std::vector buf; + // Shufti/Truffle + m128 lo, hi; + ue2::CharReach chars; + std::vector buf; - // Noodle - struct hs_scratch scratch; - ue2::bytecode_ptr nt; + // Noodle + struct hs_scratch scratch; + ue2::bytecode_ptr nt; - MicroBenchmark(char const *label_, size_t size_) - :label(label_), size(size_), buf(size_) { - }; + MicroBenchmark(char const *label_, size_t size_) + : label(label_), size(size_), buf(size_){}; }; From 62a275e5764efeb8202af18e80174f210b9a3993 Mon Sep 17 00:00:00 2001 From: gtsoul-tech Date: Tue, 2 Apr 2024 13:32:51 +0300 Subject: [PATCH 11/19] change first column name csv --- benchmarks/benchmarks.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmarks.cpp b/benchmarks/benchmarks.cpp index 14cccc10..fd7aed47 100644 --- a/benchmarks/benchmarks.cpp +++ b/benchmarks/benchmarks.cpp @@ -136,7 +136,7 @@ int main() { for (size_t i = 0; i < N; i++) sizes.push_back(16000 << i * 2); const char charset[] = "aAaAaAaAAAaaaaAAAAaaaaAAAAAAaaaAAaaa"; - printf("Bench Label, max_matches, size,loops, total_sec, avg_time, " + printf("Matcher, max_matches, size,loops, total_sec, avg_time, " "max_bw, avg_bw\n"); for (int m = 0; m < 2; m++) { for (size_t i = 0; i < std::size(sizes); i++) { From 3670e52c873e5631871030e3559111ea0d3529a3 Mon Sep 17 00:00:00 2001 From: gtsoul-tech Date: Tue, 2 Apr 2024 14:56:27 +0300 Subject: [PATCH 12/19] output tabulated and csv --- benchmarks/benchmarks.cpp | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/benchmarks/benchmarks.cpp b/benchmarks/benchmarks.cpp index fd7aed47..b6106ed4 100644 --- a/benchmarks/benchmarks.cpp +++ b/benchmarks/benchmarks.cpp @@ -104,8 +104,9 @@ static void run_benchmarks(int size, int loops, int max_matches, avg_bw /= max_matches; total_sec /= 1000000.0; /*convert average time to us*/ - printf("%s,%u,%u,%u,%.3f,%.3f,%.3f,%.3f\n", bench.label, max_matches, - size, loops, total_sec, avg_time, max_bw, avg_bw); + printf("%-18s, %-12u, %-10u, %-6u, %-10.3f, %-9.3f, %-8.3f, %-7.3f\n", + bench.label, max_matches, size, loops, total_sec, avg_time, + max_bw, avg_bw); } else { auto start = std::chrono::steady_clock::now(); for (int i = 0; i < loops; i++) { @@ -125,8 +126,8 @@ static void run_benchmarks(int size, int loops, int max_matches, max_bw = total_size / total_sec; /*convert to MB/s*/ max_bw /= 1048576.0; - printf("%s,0,%u,%u,%.3f,%.3f,%.3f,0\n", bench.label, size, loops, - total_sec, avg_time, max_bw); + printf("%-18s, %-12s, %-10u, %-6u, %-10.3f, %-9.3f, %-8.3f, %-7s\n", + bench.label, "0", size, loops, total_sec, avg_time, max_bw, "0"); } } @@ -136,8 +137,9 @@ int main() { for (size_t i = 0; i < N; i++) sizes.push_back(16000 << i * 2); const char charset[] = "aAaAaAaAAAaaaaAAAAaaaaAAAAAAaaaAAaaa"; - printf("Matcher, max_matches, size,loops, total_sec, avg_time, " - "max_bw, avg_bw\n"); + printf("%-18s, %-12s, %-10s, %-6s, %-10s, %-9s, %-8s, %-7s\n", "Matcher", + "max_matches", "size", "loops", "total_sec", "avg_time", "max_bw", + "avg_bw"); for (int m = 0; m < 2; m++) { for (size_t i = 0; i < std::size(sizes); i++) { MicroBenchmark bench("Shufti", sizes[i]); From 3b37add4d87f223cc40e04d331e96beeb98f2d30 Mon Sep 17 00:00:00 2001 From: "G.E." Date: Wed, 17 Apr 2024 11:33:00 +0300 Subject: [PATCH 13/19] the rpath hack is only needed on arm --- cmake/osdetection.cmake | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/cmake/osdetection.cmake b/cmake/osdetection.cmake index 3369447a..8bfbd3bd 100644 --- a/cmake/osdetection.cmake +++ b/cmake/osdetection.cmake @@ -4,12 +4,14 @@ endif(CMAKE_SYSTEM_NAME MATCHES "Linux") if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD") set(FREEBSD true) - set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) - #FIXME: find a nicer and more general way of doing this - if(CMAKE_C_COMPILER MATCHES "/usr/local/bin/gcc12") - set(CMAKE_BUILD_RPATH "/usr/local/lib/gcc12") - elseif(CMAKE_C_COMPILER MATCHES "/usr/local/bin/gcc13") - set(CMAKE_BUILD_RPATH "/usr/local/lib/gcc13") + if(ARCH_AARCH64) + set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) + #FIXME: find a nicer and more general way of doing this + if(CMAKE_C_COMPILER MATCHES "/usr/local/bin/gcc12") + set(CMAKE_BUILD_RPATH "/usr/local/lib/gcc12") + elseif(CMAKE_C_COMPILER MATCHES "/usr/local/bin/gcc13") + set(CMAKE_BUILD_RPATH "/usr/local/lib/gcc13") + endif() endif() endif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD") From f2db0cdf01560cc8a6531989a23a3e1e4ead522a Mon Sep 17 00:00:00 2001 From: gtsoul-tech Date: Wed, 17 Apr 2024 13:33:48 +0300 Subject: [PATCH 14/19] gcc-14 compilation fix Closes:#245 --- src/util/supervector/arch/x86/impl.cpp | 1735 ++++++++++++++---------- 1 file changed, 987 insertions(+), 748 deletions(-) diff --git a/src/util/supervector/arch/x86/impl.cpp b/src/util/supervector/arch/x86/impl.cpp index b8a75c95..e0e9d966 100644 --- a/src/util/supervector/arch/x86/impl.cpp +++ b/src/util/supervector/arch/x86/impl.cpp @@ -35,170 +35,155 @@ #include "ue2common.h" #include "util/arch.h" -#include "util/unaligned.h" #include "util/supervector/supervector.hpp" +#include "util/unaligned.h" // 128-bit SSE implementation -#if !(!defined(RELEASE_BUILD) && defined(FAT_RUNTIME) && (defined(HAVE_AVX2) || defined(HAVE_AVX512))) && defined(HAVE_SIMD_128_BITS) +#if !(!defined(RELEASE_BUILD) && defined(FAT_RUNTIME) && \ + (defined(HAVE_AVX2) || defined(HAVE_AVX512))) && \ + defined(HAVE_SIMD_128_BITS) -template<> -really_inline SuperVector<16>::SuperVector(SuperVector const &other) -{ +template <> +really_inline SuperVector<16>::SuperVector(SuperVector const &other) { u.v128[0] = other.u.v128[0]; } -template<> -really_inline SuperVector<16>::SuperVector(typename base_type::type const v) -{ +template <> +really_inline SuperVector<16>::SuperVector(typename base_type::type const v) { u.v128[0] = v; }; -template<> -template<> -really_inline SuperVector<16>::SuperVector(int8_t const other) -{ +template <> +template <> +really_inline SuperVector<16>::SuperVector(int8_t const other) { u.v128[0] = _mm_set1_epi8(other); } -template<> -template<> -really_inline SuperVector<16>::SuperVector(uint8_t const other) -{ +template <> +template <> +really_inline SuperVector<16>::SuperVector(uint8_t const other) { u.v128[0] = _mm_set1_epi8(static_cast(other)); } -template<> -template<> -really_inline SuperVector<16>::SuperVector(int16_t const other) -{ +template <> +template <> +really_inline SuperVector<16>::SuperVector(int16_t const other) { u.v128[0] = _mm_set1_epi16(other); } -template<> -template<> -really_inline SuperVector<16>::SuperVector(uint16_t const other) -{ +template <> +template <> +really_inline SuperVector<16>::SuperVector(uint16_t const other) { u.v128[0] = _mm_set1_epi16(static_cast(other)); } -template<> -template<> -really_inline SuperVector<16>::SuperVector(int32_t const other) -{ +template <> +template <> +really_inline SuperVector<16>::SuperVector(int32_t const other) { u.v128[0] = _mm_set1_epi32(other); } -template<> -template<> -really_inline SuperVector<16>::SuperVector(uint32_t const other) -{ +template <> +template <> +really_inline SuperVector<16>::SuperVector(uint32_t const other) { u.v128[0] = _mm_set1_epi32(static_cast(other)); } -template<> -template<> -really_inline SuperVector<16>::SuperVector(int64_t const other) -{ +template <> +template <> +really_inline SuperVector<16>::SuperVector(int64_t const other) { u.v128[0] = _mm_set1_epi64x(other); } -template<> -template<> -really_inline SuperVector<16>::SuperVector(uint64_t const other) -{ +template <> +template <> +really_inline SuperVector<16>::SuperVector(uint64_t const other) { u.v128[0] = _mm_set1_epi64x(static_cast(other)); } // Constants -template<> -really_inline SuperVector<16> SuperVector<16>::Ones() -{ +template <> really_inline SuperVector<16> SuperVector<16>::Ones() { return {_mm_set1_epi8(0xFF)}; } -template<> -really_inline SuperVector<16> SuperVector<16>::Zeroes(void) -{ +template <> really_inline SuperVector<16> SuperVector<16>::Zeroes(void) { return {_mm_set1_epi8(0)}; } // Methods template <> -really_inline void SuperVector<16>::operator=(SuperVector<16> const &other) -{ +really_inline void SuperVector<16>::operator=(SuperVector<16> const &other) { u.v128[0] = other.u.v128[0]; } template <> -really_inline SuperVector<16> SuperVector<16>::operator&(SuperVector<16> const &b) const -{ +really_inline SuperVector<16> +SuperVector<16>::operator&(SuperVector<16> const &b) const { return {_mm_and_si128(u.v128[0], b.u.v128[0])}; } template <> -really_inline SuperVector<16> SuperVector<16>::operator|(SuperVector<16> const &b) const -{ +really_inline SuperVector<16> +SuperVector<16>::operator|(SuperVector<16> const &b) const { return {_mm_or_si128(u.v128[0], b.u.v128[0])}; } template <> -really_inline SuperVector<16> SuperVector<16>::operator^(SuperVector<16> const &b) const -{ +really_inline SuperVector<16> +SuperVector<16>::operator^(SuperVector<16> const &b) const { return {_mm_xor_si128(u.v128[0], b.u.v128[0])}; } -template <> -really_inline SuperVector<16> SuperVector<16>::operator!() const -{ +template <> really_inline SuperVector<16> SuperVector<16>::operator!() const { return {_mm_xor_si128(u.v128[0], u.v128[0])}; } template <> -really_inline SuperVector<16> SuperVector<16>::opandnot(SuperVector<16> const &b) const -{ +really_inline SuperVector<16> +SuperVector<16>::opandnot(SuperVector<16> const &b) const { return {_mm_andnot_si128(u.v128[0], b.u.v128[0])}; } template <> -really_inline SuperVector<16> SuperVector<16>::operator==(SuperVector<16> const &b) const -{ +really_inline SuperVector<16> +SuperVector<16>::operator==(SuperVector<16> const &b) const { return {_mm_cmpeq_epi8(u.v128[0], b.u.v128[0])}; } template <> -really_inline SuperVector<16> SuperVector<16>::operator!=(SuperVector<16> const &b) const -{ +really_inline SuperVector<16> +SuperVector<16>::operator!=(SuperVector<16> const &b) const { return !(*this == b); } template <> -really_inline SuperVector<16> SuperVector<16>::operator>(SuperVector<16> const &b) const -{ +really_inline SuperVector<16> +SuperVector<16>::operator>(SuperVector<16> const &b) const { return {_mm_cmpgt_epi8(u.v128[0], b.u.v128[0])}; } template <> -really_inline SuperVector<16> SuperVector<16>::operator<(SuperVector<16> const &b) const -{ +really_inline SuperVector<16> +SuperVector<16>::operator<(SuperVector<16> const &b) const { return {_mm_cmplt_epi8(u.v128[0], b.u.v128[0])}; } template <> -really_inline SuperVector<16> SuperVector<16>::operator>=(SuperVector<16> const &b) const -{ +really_inline SuperVector<16> +SuperVector<16>::operator>=(SuperVector<16> const &b) const { return !(*this < b); } template <> -really_inline SuperVector<16> SuperVector<16>::operator<=(SuperVector<16> const &b) const -{ +really_inline SuperVector<16> +SuperVector<16>::operator<=(SuperVector<16> const &b) const { return !(*this > b); } template <> -really_inline SuperVector<16> SuperVector<16>::eq(SuperVector<16> const &b) const -{ +really_inline SuperVector<16> +SuperVector<16>::eq(SuperVector<16> const &b) const { return (*this == b); } @@ -232,37 +217,32 @@ SuperVector<16>::iteration_mask( // } template <> -template -really_inline SuperVector<16> SuperVector<16>::vshl_16_imm() const -{ +template +really_inline SuperVector<16> SuperVector<16>::vshl_16_imm() const { return {_mm_slli_epi16(u.v128[0], N)}; } template <> -template -really_inline SuperVector<16> SuperVector<16>::vshl_32_imm() const -{ +template +really_inline SuperVector<16> SuperVector<16>::vshl_32_imm() const { return {_mm_slli_epi32(u.v128[0], N)}; } template <> -template -really_inline SuperVector<16> SuperVector<16>::vshl_64_imm() const -{ +template +really_inline SuperVector<16> SuperVector<16>::vshl_64_imm() const { return {_mm_slli_epi64(u.v128[0], N)}; } template <> -template -really_inline SuperVector<16> SuperVector<16>::vshl_128_imm() const -{ +template +really_inline SuperVector<16> SuperVector<16>::vshl_128_imm() const { return {_mm_slli_si128(u.v128[0], N)}; } template <> -template -really_inline SuperVector<16> SuperVector<16>::vshl_imm() const -{ +template +really_inline SuperVector<16> SuperVector<16>::vshl_imm() const { return vshl_128_imm(); } @@ -274,37 +254,32 @@ really_inline SuperVector<16> SuperVector<16>::vshl_imm() const // } template <> -template -really_inline SuperVector<16> SuperVector<16>::vshr_16_imm() const -{ +template +really_inline SuperVector<16> SuperVector<16>::vshr_16_imm() const { return {_mm_srli_epi16(u.v128[0], N)}; } template <> -template -really_inline SuperVector<16> SuperVector<16>::vshr_32_imm() const -{ +template +really_inline SuperVector<16> SuperVector<16>::vshr_32_imm() const { return {_mm_srli_epi32(u.v128[0], N)}; } - + template <> -template -really_inline SuperVector<16> SuperVector<16>::vshr_64_imm() const -{ +template +really_inline SuperVector<16> SuperVector<16>::vshr_64_imm() const { return {_mm_srli_epi64(u.v128[0], N)}; } template <> -template -really_inline SuperVector<16> SuperVector<16>::vshr_128_imm() const -{ +template +really_inline SuperVector<16> SuperVector<16>::vshr_128_imm() const { return {_mm_srli_si128(u.v128[0], N)}; } template <> -template -really_inline SuperVector<16> SuperVector<16>::vshr_imm() const -{ +template +really_inline SuperVector<16> SuperVector<16>::vshr_imm() const { return vshr_128_imm(); } @@ -322,156 +297,196 @@ template SuperVector<16> SuperVector<16>::vshr_128_imm<4>() const; #endif // template <> -// really_inline SuperVector<16> SuperVector<16>::vshl_8 (uint8_t const N) const +// really_inline SuperVector<16> SuperVector<16>::vshl_8 (uint8_t const N) +// const // { -// Unroller<0, 15>::iterator([&,v=this](int i) { if (N == i) return {_mm_slli_epi8(v->u.v128[0], i)}; }); -// if (N == 16) return Zeroes(); +// Unroller<0, 15>::iterator([&,v=this](int i) { if (N == i) return +// {_mm_slli_epi8(v->u.v128[0], i)}; }); if (N == 16) return Zeroes(); // } template <> -really_inline SuperVector<16> SuperVector<16>::vshl_16 (uint8_t const N) const -{ +really_inline SuperVector<16> SuperVector<16>::vshl_16(uint8_t const N) const { #if defined(HAVE__BUILTIN_CONSTANT_P) if (__builtin_constant_p(N)) { return {_mm_slli_epi16(u.v128[0], N)}; } #endif - if (N == 0) return *this; - if (N == 16) return Zeroes(); + if (N == 0) + return *this; + if (N == 16) + return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm_slli_epi16(v->u.v128[0], n)}; }); + Unroller<1, 16>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm_slli_epi16(v->u.v128[0], n)}; + }); return result; } template <> -really_inline SuperVector<16> SuperVector<16>::vshl_32 (uint8_t const N) const -{ +really_inline SuperVector<16> SuperVector<16>::vshl_32(uint8_t const N) const { #if defined(HAVE__BUILTIN_CONSTANT_P) if (__builtin_constant_p(N)) { return {_mm_slli_epi32(u.v128[0], N)}; } #endif - if (N == 0) return *this; - if (N == 16) return Zeroes(); + if (N == 0) + return *this; + if (N == 16) + return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm_slli_epi32(v->u.v128[0], n)}; }); + Unroller<1, 16>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm_slli_epi32(v->u.v128[0], n)}; + }); return result; } template <> -really_inline SuperVector<16> SuperVector<16>::vshl_64 (uint8_t const N) const -{ +really_inline SuperVector<16> SuperVector<16>::vshl_64(uint8_t const N) const { #if defined(HAVE__BUILTIN_CONSTANT_P) if (__builtin_constant_p(N)) { return {_mm_slli_epi64(u.v128[0], N)}; } #endif - if (N == 0) return *this; - if (N == 16) return Zeroes(); + if (N == 0) + return *this; + if (N == 16) + return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm_slli_epi64(v->u.v128[0], n)}; }); + Unroller<1, 16>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm_slli_epi64(v->u.v128[0], n)}; + }); return result; } template <> -really_inline SuperVector<16> SuperVector<16>::vshl_128(uint8_t const N) const -{ +really_inline SuperVector<16> SuperVector<16>::vshl_128(uint8_t const N) const { #if defined(HAVE__BUILTIN_CONSTANT_P) && !defined(VS_SIMDE_BACKEND) if (__builtin_constant_p(N)) { return {_mm_slli_si128(u.v128[0], N)}; } #endif - if (N == 0) return *this; - if (N == 16) return Zeroes(); + if (N == 0) + return *this; + if (N == 16) + return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm_slli_si128(v->u.v128[0], n)}; }); + Unroller<1, 16>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm_slli_si128(v->u.v128[0], n)}; + }); return result; } template <> -really_inline SuperVector<16> SuperVector<16>::vshl(uint8_t const N) const -{ +really_inline SuperVector<16> SuperVector<16>::vshl(uint8_t const N) const { return vshl_128(N); } // template <> -// really_inline SuperVector<16> SuperVector<16>::vshr_8 (uint8_t const N) const +// really_inline SuperVector<16> SuperVector<16>::vshr_8 (uint8_t const N) +// const // { // SuperVector<16> result; -// Unroller<0, 15>::iterator([&,v=this](uint8_t const i) { if (N == i) result = {_mm_srli_epi8(v->u.v128[0], i)}; }); -// if (N == 16) result = Zeroes(); -// return result; +// Unroller<0, 15>::iterator([&,v=this](uint8_t const i) { if (N == i) +// result = {_mm_srli_epi8(v->u.v128[0], i)}; }); if (N == 16) result = +// Zeroes(); return result; // } template <> -really_inline SuperVector<16> SuperVector<16>::vshr_16 (uint8_t const N) const -{ +really_inline SuperVector<16> SuperVector<16>::vshr_16(uint8_t const N) const { #if defined(HAVE__BUILTIN_CONSTANT_P) if (__builtin_constant_p(N)) { return {_mm_srli_epi16(u.v128[0], N)}; } #endif - if (N == 0) return *this; - if (N == 16) return Zeroes(); + if (N == 0) + return *this; + if (N == 16) + return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm_srli_epi16(v->u.v128[0], n)}; }); + Unroller<1, 16>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm_srli_epi16(v->u.v128[0], n)}; + }); return result; } template <> -really_inline SuperVector<16> SuperVector<16>::vshr_32 (uint8_t const N) const -{ +really_inline SuperVector<16> SuperVector<16>::vshr_32(uint8_t const N) const { #if defined(HAVE__BUILTIN_CONSTANT_P) if (__builtin_constant_p(N)) { return {_mm_srli_epi32(u.v128[0], N)}; } #endif - if (N == 0) return *this; - if (N == 16) return Zeroes(); + if (N == 0) + return *this; + if (N == 16) + return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm_srli_epi32(v->u.v128[0], n)}; }); + Unroller<1, 16>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm_srli_epi32(v->u.v128[0], n)}; + }); return result; } template <> -really_inline SuperVector<16> SuperVector<16>::vshr_64 (uint8_t const N) const -{ +really_inline SuperVector<16> SuperVector<16>::vshr_64(uint8_t const N) const { #if defined(HAVE__BUILTIN_CONSTANT_P) if (__builtin_constant_p(N)) { return {_mm_srli_epi64(u.v128[0], N)}; } #endif - if (N == 0) return *this; - if (N == 16) return Zeroes(); + if (N == 0) + return *this; + if (N == 16) + return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm_srli_epi64(v->u.v128[0], n)}; }); + Unroller<1, 16>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm_srli_epi64(v->u.v128[0], n)}; + }); return result; } template <> -really_inline SuperVector<16> SuperVector<16>::vshr_128(uint8_t const N) const -{ +really_inline SuperVector<16> SuperVector<16>::vshr_128(uint8_t const N) const { #if defined(HAVE__BUILTIN_CONSTANT_P) && !defined(VS_SIMDE_BACKEND) if (__builtin_constant_p(N)) { return {_mm_srli_si128(u.v128[0], N)}; } #endif - if (N == 0) return *this; - if (N == 16) return Zeroes(); + if (N == 0) + return *this; + if (N == 16) + return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm_srli_si128(v->u.v128[0], n)}; }); + Unroller<1, 16>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm_srli_si128(v->u.v128[0], n)}; + }); return result; } template <> -really_inline SuperVector<16> SuperVector<16>::vshr(uint8_t const N) const -{ +really_inline SuperVector<16> SuperVector<16>::vshr(uint8_t const N) const { return vshr_128(N); } template <> -really_inline SuperVector<16> SuperVector<16>::operator>>(uint8_t const N) const -{ +really_inline SuperVector<16> +SuperVector<16>::operator>>(uint8_t const N) const { #if defined(HAVE__BUILTIN_CONSTANT_P) && !defined(VS_SIMDE_BACKEND) if (__builtin_constant_p(N)) { return {_mm_srli_si128(u.v128[0], N)}; @@ -481,8 +496,8 @@ really_inline SuperVector<16> SuperVector<16>::operator>>(uint8_t const N) const } template <> -really_inline SuperVector<16> SuperVector<16>::operator<<(uint8_t const N) const -{ +really_inline SuperVector<16> +SuperVector<16>::operator<<(uint8_t const N) const { #if defined(HAVE__BUILTIN_CONSTANT_P) && !defined(VS_SIMDE_BACKEND) if (__builtin_constant_p(N)) { return {_mm_slli_si128(u.v128[0], N)}; @@ -491,45 +506,45 @@ really_inline SuperVector<16> SuperVector<16>::operator<<(uint8_t const N) const return vshl_128(N); } -template<> -really_inline SuperVector<16> SuperVector<16>::Ones_vshr(uint8_t const N) -{ - if (N == 0) return Ones(); - else return Ones().vshr_128(N); -} - -template<> -really_inline SuperVector<16> SuperVector<16>::Ones_vshl(uint8_t const N) -{ - if (N == 0) return Ones(); - else return Ones().vshr_128(N); +template <> +really_inline SuperVector<16> SuperVector<16>::Ones_vshr(uint8_t const N) { + if (N == 0) + return Ones(); + else + return Ones().vshr_128(N); } template <> -really_inline SuperVector<16> SuperVector<16>::loadu(void const *ptr) -{ +really_inline SuperVector<16> SuperVector<16>::Ones_vshl(uint8_t const N) { + if (N == 0) + return Ones(); + else + return Ones().vshr_128(N); +} + +template <> +really_inline SuperVector<16> SuperVector<16>::loadu(void const *ptr) { return _mm_loadu_si128((const m128 *)ptr); } template <> -really_inline SuperVector<16> SuperVector<16>::load(void const *ptr) -{ +really_inline SuperVector<16> SuperVector<16>::load(void const *ptr) { assert(ISALIGNED_N(ptr, alignof(SuperVector::size))); ptr = vectorscan_assume_aligned(ptr, SuperVector::size); return _mm_load_si128((const m128 *)ptr); } template <> -really_inline SuperVector<16> SuperVector<16>::loadu_maskz(void const *ptr, uint8_t const len) -{ - SuperVector mask = Ones_vshr(16 -len); +really_inline SuperVector<16> SuperVector<16>::loadu_maskz(void const *ptr, + uint8_t const len) { + SuperVector mask = Ones_vshr(16 - len); SuperVector v = _mm_loadu_si128((const m128 *)ptr); return mask & v; } -template<> -really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, int8_t offset) -{ +template <> +really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, + int8_t offset) { #if defined(HAVE__BUILTIN_CONSTANT_P) if (__builtin_constant_p(offset)) { if (offset == 16) { @@ -539,224 +554,239 @@ really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, in } } #endif - switch(offset) { - case 0: return other; break; - case 1: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 1)}; break; - case 2: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 2)}; break; - case 3: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 3)}; break; - case 4: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 4)}; break; - case 5: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 5)}; break; - case 6: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 6)}; break; - case 7: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 7)}; break; - case 8: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 8)}; break; - case 9: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 9)}; break; - case 10: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 10)}; break; - case 11: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 11)}; break; - case 12: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 12)}; break; - case 13: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 13)}; break; - case 14: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 14)}; break; - case 15: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 15)}; break; - default: break; + switch (offset) { + case 0: + return other; + break; + case 1: + return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 1)}; + break; + case 2: + return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 2)}; + break; + case 3: + return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 3)}; + break; + case 4: + return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 4)}; + break; + case 5: + return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 5)}; + break; + case 6: + return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 6)}; + break; + case 7: + return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 7)}; + break; + case 8: + return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 8)}; + break; + case 9: + return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 9)}; + break; + case 10: + return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 10)}; + break; + case 11: + return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 11)}; + break; + case 12: + return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 12)}; + break; + case 13: + return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 13)}; + break; + case 14: + return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 14)}; + break; + case 15: + return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 15)}; + break; + default: + break; } return *this; } -template<> -template<> -really_inline SuperVector<16> SuperVector<16>::pshufb(SuperVector<16> b) -{ +template <> +template <> +really_inline SuperVector<16> SuperVector<16>::pshufb(SuperVector<16> b) { return {_mm_shuffle_epi8(u.v128[0], b.u.v128[0])}; } -template<> -really_inline SuperVector<16> SuperVector<16>::pshufb_maskz(SuperVector<16> b, uint8_t const len) -{ - SuperVector mask = Ones_vshr(16 -len); +template <> +really_inline SuperVector<16> SuperVector<16>::pshufb_maskz(SuperVector<16> b, + uint8_t const len) { + SuperVector mask = Ones_vshr(16 - len); return mask & pshufb(b); } #endif // !defined(FAT_RUNTIME) && !defined(HAVE_AVX2) // 256-bit AVX2 implementation -#if !(!defined(RELEASE_BUILD) && defined(FAT_RUNTIME) && defined(HAVE_AVX512)) && defined(HAVE_AVX2) +#if !(!defined(RELEASE_BUILD) && defined(FAT_RUNTIME) && \ + defined(HAVE_AVX512)) && \ + defined(HAVE_AVX2) -template<> -really_inline SuperVector<32>::SuperVector(SuperVector const &other) -{ +template <> +really_inline SuperVector<32>::SuperVector(SuperVector const &other) { u.v256[0] = other.u.v256[0]; } -template<> -really_inline SuperVector<32>::SuperVector(typename base_type::type const v) -{ +template <> +really_inline SuperVector<32>::SuperVector(typename base_type::type const v) { u.v256[0] = v; }; -template<> -template<> -really_inline SuperVector<32>::SuperVector(m128 const v) -{ +template <> +template <> +really_inline SuperVector<32>::SuperVector(m128 const v) { u.v256[0] = _mm256_broadcastsi128_si256(v); }; -template<> -really_inline SuperVector<32>::SuperVector(m128 const lo, m128 const hi) -{ +template <> +really_inline SuperVector<32>::SuperVector(m128 const lo, m128 const hi) { u.v128[0] = lo; u.v128[1] = hi; }; -template<> -really_inline SuperVector<32>::SuperVector(SuperVector<16> const lo, SuperVector<16> const hi) -{ +template <> +really_inline SuperVector<32>::SuperVector(SuperVector<16> const lo, + SuperVector<16> const hi) { u.v128[0] = lo.u.v128[0]; u.v128[1] = hi.u.v128[0]; }; -template<> -template<> -really_inline SuperVector<32>::SuperVector(int8_t const other) -{ +template <> +template <> +really_inline SuperVector<32>::SuperVector(int8_t const other) { u.v256[0] = _mm256_set1_epi8(other); } -template<> -template<> -really_inline SuperVector<32>::SuperVector(uint8_t const other) -{ +template <> +template <> +really_inline SuperVector<32>::SuperVector(uint8_t const other) { u.v256[0] = _mm256_set1_epi8(static_cast(other)); } -template<> -template<> -really_inline SuperVector<32>::SuperVector(int16_t const other) -{ +template <> +template <> +really_inline SuperVector<32>::SuperVector(int16_t const other) { u.v256[0] = _mm256_set1_epi16(other); } -template<> -template<> -really_inline SuperVector<32>::SuperVector(uint16_t const other) -{ +template <> +template <> +really_inline SuperVector<32>::SuperVector(uint16_t const other) { u.v256[0] = _mm256_set1_epi16(static_cast(other)); } -template<> -template<> -really_inline SuperVector<32>::SuperVector(int32_t const other) -{ +template <> +template <> +really_inline SuperVector<32>::SuperVector(int32_t const other) { u.v256[0] = _mm256_set1_epi32(other); } -template<> -template<> -really_inline SuperVector<32>::SuperVector(uint32_t const other) -{ +template <> +template <> +really_inline SuperVector<32>::SuperVector(uint32_t const other) { u.v256[0] = _mm256_set1_epi32(static_cast(other)); } -template<> -template<> -really_inline SuperVector<32>::SuperVector(int64_t const other) -{ +template <> +template <> +really_inline SuperVector<32>::SuperVector(int64_t const other) { u.v256[0] = _mm256_set1_epi64x(other); } -template<> -template<> -really_inline SuperVector<32>::SuperVector(uint64_t const other) -{ +template <> +template <> +really_inline SuperVector<32>::SuperVector(uint64_t const other) { u.v256[0] = _mm256_set1_epi64x(static_cast(other)); } // Constants -template<> -really_inline SuperVector<32> SuperVector<32>::Ones(void) -{ +template <> really_inline SuperVector<32> SuperVector<32>::Ones(void) { return {_mm256_set1_epi8(0xFF)}; } -template<> -really_inline SuperVector<32> SuperVector<32>::Zeroes(void) -{ +template <> really_inline SuperVector<32> SuperVector<32>::Zeroes(void) { return {_mm256_set1_epi8(0)}; } template <> -really_inline void SuperVector<32>::operator=(SuperVector<32> const &other) -{ +really_inline void SuperVector<32>::operator=(SuperVector<32> const &other) { u.v256[0] = other.u.v256[0]; } template <> -really_inline SuperVector<32> SuperVector<32>::operator&(SuperVector<32> const &b) const -{ +really_inline SuperVector<32> +SuperVector<32>::operator&(SuperVector<32> const &b) const { return {_mm256_and_si256(u.v256[0], b.u.v256[0])}; } template <> -really_inline SuperVector<32> SuperVector<32>::operator|(SuperVector<32> const &b) const -{ +really_inline SuperVector<32> +SuperVector<32>::operator|(SuperVector<32> const &b) const { return {_mm256_or_si256(u.v256[0], b.u.v256[0])}; } template <> -really_inline SuperVector<32> SuperVector<32>::operator^(SuperVector<32> const &b) const -{ +really_inline SuperVector<32> +SuperVector<32>::operator^(SuperVector<32> const &b) const { return {_mm256_xor_si256(u.v256[0], b.u.v256[0])}; } -template <> -really_inline SuperVector<32> SuperVector<32>::operator!() const -{ +template <> really_inline SuperVector<32> SuperVector<32>::operator!() const { return {_mm256_xor_si256(u.v256[0], u.v256[0])}; } template <> -really_inline SuperVector<32> SuperVector<32>::opandnot(SuperVector<32> const &b) const -{ +really_inline SuperVector<32> +SuperVector<32>::opandnot(SuperVector<32> const &b) const { return {_mm256_andnot_si256(u.v256[0], b.u.v256[0])}; } template <> -really_inline SuperVector<32> SuperVector<32>::operator==(SuperVector<32> const &b) const -{ +really_inline SuperVector<32> +SuperVector<32>::operator==(SuperVector<32> const &b) const { return {_mm256_cmpeq_epi8(u.v256[0], b.u.v256[0])}; } template <> -really_inline SuperVector<32> SuperVector<32>::operator!=(SuperVector<32> const &b) const -{ +really_inline SuperVector<32> +SuperVector<32>::operator!=(SuperVector<32> const &b) const { return !(*this == b); } template <> -really_inline SuperVector<32> SuperVector<32>::operator>(SuperVector<32> const &b) const -{ +really_inline SuperVector<32> +SuperVector<32>::operator>(SuperVector<32> const &b) const { return {_mm256_cmpgt_epi8(u.v256[0], b.u.v256[0])}; } template <> -really_inline SuperVector<32> SuperVector<32>::operator<(SuperVector<32> const &b) const -{ +really_inline SuperVector<32> +SuperVector<32>::operator<(SuperVector<32> const &b) const { return (b > *this); } template <> -really_inline SuperVector<32> SuperVector<32>::operator>=(SuperVector<32> const &b) const -{ +really_inline SuperVector<32> +SuperVector<32>::operator>=(SuperVector<32> const &b) const { return !(*this < b); } template <> -really_inline SuperVector<32> SuperVector<32>::operator<=(SuperVector<32> const &b) const -{ +really_inline SuperVector<32> +SuperVector<32>::operator<=(SuperVector<32> const &b) const { return !(*this > b); } template <> -really_inline SuperVector<32> SuperVector<32>::eq(SuperVector<32> const &b) const -{ +really_inline SuperVector<32> +SuperVector<32>::eq(SuperVector<32> const &b) const { return (*this == b); } @@ -790,51 +820,56 @@ SuperVector<32>::iteration_mask( // } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshl_16_imm() const -{ +template +really_inline SuperVector<32> SuperVector<32>::vshl_16_imm() const { return {_mm256_slli_epi16(u.v256[0], N)}; } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshl_32_imm() const -{ +template +really_inline SuperVector<32> SuperVector<32>::vshl_32_imm() const { return {_mm256_slli_epi32(u.v256[0], N)}; } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshl_64_imm() const -{ +template +really_inline SuperVector<32> SuperVector<32>::vshl_64_imm() const { return {_mm256_slli_epi64(u.v256[0], N)}; } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshl_128_imm() const -{ +template +really_inline SuperVector<32> SuperVector<32>::vshl_128_imm() const { return {_mm256_slli_si256(u.v256[0], N)}; } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshl_256_imm() const -{ - if (N == 0) return *this; - if (N == 16) return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0))}; - if (N == 32) return Zeroes(); +template +really_inline SuperVector<32> SuperVector<32>::vshl_256_imm() const { + if (N == 0) + return *this; + if (N == 16) + return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], + _MM_SHUFFLE(0, 0, 2, 0))}; + if (N == 32) + return Zeroes(); if (N < 16) { - return {_mm256_alignr_epi8(u.v256[0], _mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0)), 16 - N)}; + return {_mm256_alignr_epi8( + u.v256[0], + _mm256_permute2x128_si256(u.v256[0], u.v256[0], + _MM_SHUFFLE(0, 0, 2, 0)), + 16 - N)}; } else { - return {_mm256_slli_si256(_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0)), N - 16)}; + return {_mm256_slli_si256( + _mm256_permute2x128_si256(u.v256[0], u.v256[0], + _MM_SHUFFLE(0, 0, 2, 0)), + N - 16)}; } } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshl_imm() const -{ +template +really_inline SuperVector<32> SuperVector<32>::vshl_imm() const { return vshl_256_imm(); } @@ -846,51 +881,56 @@ really_inline SuperVector<32> SuperVector<32>::vshl_imm() const // } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshr_16_imm() const -{ +template +really_inline SuperVector<32> SuperVector<32>::vshr_16_imm() const { return {_mm256_srli_epi16(u.v256[0], N)}; } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshr_32_imm() const -{ +template +really_inline SuperVector<32> SuperVector<32>::vshr_32_imm() const { return {_mm256_srli_epi32(u.v256[0], N)}; } - + template <> -template -really_inline SuperVector<32> SuperVector<32>::vshr_64_imm() const -{ +template +really_inline SuperVector<32> SuperVector<32>::vshr_64_imm() const { return {_mm256_srli_epi64(u.v256[0], N)}; } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshr_128_imm() const -{ +template +really_inline SuperVector<32> SuperVector<32>::vshr_128_imm() const { return {_mm256_srli_si256(u.v256[0], N)}; } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshr_256_imm() const -{ - if (N == 0) return *this; - if (N == 16) return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(2, 0, 0, 1))}; - if (N == 32) return Zeroes(); +template +really_inline SuperVector<32> SuperVector<32>::vshr_256_imm() const { + if (N == 0) + return *this; + if (N == 16) + return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], + _MM_SHUFFLE(2, 0, 0, 1))}; + if (N == 32) + return Zeroes(); if (N < 16) { - return {_mm256_alignr_epi8(u.v256[0], _mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0)), 16 - N)}; + return {_mm256_alignr_epi8( + u.v256[0], + _mm256_permute2x128_si256(u.v256[0], u.v256[0], + _MM_SHUFFLE(0, 0, 2, 0)), + 16 - N)}; } else { - return {_mm256_srli_si256(_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(2, 0, 0, 1)), N - 16)}; + return {_mm256_srli_si256( + _mm256_permute2x128_si256(u.v256[0], u.v256[0], + _MM_SHUFFLE(2, 0, 0, 1)), + N - 16)}; } } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshr_imm() const -{ +template +really_inline SuperVector<32> SuperVector<32>::vshr_imm() const { return vshr_256_imm(); } @@ -910,161 +950,233 @@ template SuperVector<32> SuperVector<32>::vshr_imm<1>() const; #endif // template <> -// really_inline SuperVector<16> SuperVector<16>::vshl_8 (uint8_t const N) const +// really_inline SuperVector<16> SuperVector<16>::vshl_8 (uint8_t const N) +// const // { -// Unroller<0, 15>::iterator([&,v=this](int i) { if (N == i) return {_mm256_slli_epi8(v->u.v256[0], i)}; }); -// if (N == 16) return Zeroes(); +// Unroller<0, 15>::iterator([&,v=this](int i) { if (N == i) return +// {_mm256_slli_epi8(v->u.v256[0], i)}; }); if (N == 16) return Zeroes(); // } template <> -really_inline SuperVector<32> SuperVector<32>::vshl_16 (uint8_t const N) const -{ - if (N == 0) return *this; - if (N == 32) return Zeroes(); +really_inline SuperVector<32> SuperVector<32>::vshl_16(uint8_t const N) const { + if (N == 0) + return *this; + if (N == 32) + return Zeroes(); SuperVector result; - Unroller<1, 32>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm256_slli_epi16(v->u.v256[0], n)}; }); - return result; -} - -template <> -really_inline SuperVector<32> SuperVector<32>::vshl_32 (uint8_t const N) const -{ - if (N == 0) return *this; - if (N == 32) return Zeroes(); - SuperVector result; - Unroller<1, 32>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm256_slli_epi32(v->u.v256[0], n)}; }); - return result; -} - -template <> -really_inline SuperVector<32> SuperVector<32>::vshl_64 (uint8_t const N) const -{ - if (N == 0) return *this; - if (N == 32) return Zeroes(); - SuperVector result; - Unroller<1, 32>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm256_slli_epi64(v->u.v256[0], n)}; }); - return result; -} - -template <> -really_inline SuperVector<32> SuperVector<32>::vshl_128(uint8_t const N) const -{ - if (N == 0) return *this; - if (N == 32) return Zeroes(); - SuperVector result; - Unroller<1, 32>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm256_slli_si256(v->u.v256[0], n)}; }); - return result; -} - -template <> -really_inline SuperVector<32> SuperVector<32>::vshl_256(uint8_t const N) const -{ - if (N == 0) return *this; - if (N == 16) return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0))}; - if (N == 32) return Zeroes(); - SuperVector result; - Unroller<1, 16>::iterator([&,v=this](auto const i) { + Unroller<1, 32>::iterator([&, v = this](auto const i) { constexpr uint8_t n = i.value; - if (N == n) result = {_mm256_alignr_epi8(u.v256[0], _mm256_permute2x128_si256(v->u.v256[0], v->u.v256[0], _MM_SHUFFLE(0, 0, 2, 0)), 16 - n)};; - }); - Unroller<17, 32>::iterator([&,v=this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) result = {_mm256_slli_si256(_mm256_permute2x128_si256(v->u.v256[0], v->u.v256[0], _MM_SHUFFLE(0, 0, 2, 0)), n - 16)}; + if (N == n) + result = {_mm256_slli_epi16(v->u.v256[0], n)}; }); return result; } template <> -really_inline SuperVector<32> SuperVector<32>::vshl(uint8_t const N) const -{ +really_inline SuperVector<32> SuperVector<32>::vshl_32(uint8_t const N) const { + if (N == 0) + return *this; + if (N == 32) + return Zeroes(); + SuperVector result; + Unroller<1, 32>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm256_slli_epi32(v->u.v256[0], n)}; + }); + return result; +} + +template <> +really_inline SuperVector<32> SuperVector<32>::vshl_64(uint8_t const N) const { + if (N == 0) + return *this; + if (N == 32) + return Zeroes(); + SuperVector result; + Unroller<1, 32>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm256_slli_epi64(v->u.v256[0], n)}; + }); + return result; +} + +template <> +really_inline SuperVector<32> SuperVector<32>::vshl_128(uint8_t const N) const { + if (N == 0) + return *this; + if (N == 32) + return Zeroes(); + SuperVector result; + Unroller<1, 32>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm256_slli_si256(v->u.v256[0], n)}; + }); + return result; +} + +template <> +really_inline SuperVector<32> SuperVector<32>::vshl_256(uint8_t const N) const { + if (N == 0) + return *this; + if (N == 16) + return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], + _MM_SHUFFLE(0, 0, 2, 0))}; + if (N == 32) + return Zeroes(); + SuperVector result; + Unroller<1, 16>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm256_alignr_epi8( + u.v256[0], + _mm256_permute2x128_si256(v->u.v256[0], v->u.v256[0], + _MM_SHUFFLE(0, 0, 2, 0)), + 16 - n)}; + ; + }); + Unroller<17, 32>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm256_slli_si256( + _mm256_permute2x128_si256(v->u.v256[0], v->u.v256[0], + _MM_SHUFFLE(0, 0, 2, 0)), + n - 16)}; + }); + return result; +} + +template <> +really_inline SuperVector<32> SuperVector<32>::vshl(uint8_t const N) const { return vshl_256(N); } // template <> -// really_inline SuperVector<16> SuperVector<16>::vshr_8 (uint8_t const N) const +// really_inline SuperVector<16> SuperVector<16>::vshr_8 (uint8_t const N) +// const // { // SuperVector<16> result; -// Unroller<0, 15>::iterator([&,v=this](uint8_t const i) { if (N == i) result = {_mm_srli_epi8(v->u.v128[0], i)}; }); -// if (N == 16) result = Zeroes(); -// return result; +// Unroller<0, 15>::iterator([&,v=this](uint8_t const i) { if (N == i) +// result = {_mm_srli_epi8(v->u.v128[0], i)}; }); if (N == 16) result = +// Zeroes(); return result; // } template <> -really_inline SuperVector<32> SuperVector<32>::vshr_16 (uint8_t const N) const -{ - if (N == 0) return *this; - if (N == 32) return Zeroes(); +really_inline SuperVector<32> SuperVector<32>::vshr_16(uint8_t const N) const { + if (N == 0) + return *this; + if (N == 32) + return Zeroes(); SuperVector result; - Unroller<1, 32>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm256_srli_epi16(v->u.v256[0], n)}; }); - return result; -} - -template <> -really_inline SuperVector<32> SuperVector<32>::vshr_32 (uint8_t const N) const -{ - if (N == 0) return *this; - if (N == 32) return Zeroes(); - SuperVector result; - Unroller<1, 32>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm256_srli_epi32(v->u.v256[0], n)}; }); - return result; -} - -template <> -really_inline SuperVector<32> SuperVector<32>::vshr_64 (uint8_t const N) const -{ - if (N == 0) return *this; - if (N == 32) return Zeroes(); - SuperVector result; - Unroller<1, 32>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm256_srli_epi64(v->u.v256[0], n)}; }); - return result; -} - -template <> -really_inline SuperVector<32> SuperVector<32>::vshr_128(uint8_t const N) const -{ - if (N == 0) return *this; - if (N == 32) return Zeroes(); - SuperVector result; - Unroller<1, 32>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm256_srli_si256(v->u.v256[0], n)}; }); - return result; -} - -template <> -really_inline SuperVector<32> SuperVector<32>::vshr_256(uint8_t const N) const -{ - if (N == 0) return *this; - if (N == 16) return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(2, 0, 0, 1))}; - if (N == 32) return Zeroes(); - SuperVector result; - Unroller<1, 16>::iterator([&,v=this](auto const i) { + Unroller<1, 32>::iterator([&, v = this](auto const i) { constexpr uint8_t n = i.value; - if (N == n) result = {_mm256_alignr_epi8(_mm256_permute2x128_si256(v->u.v256[0], v->u.v256[0], _MM_SHUFFLE(2, 0, 0, 1)), v->u.v256[0], n)}; - }); - Unroller<17, 32>::iterator([&,v=this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) result = {_mm256_srli_si256(_mm256_permute2x128_si256(v->u.v256[0], v->u.v256[0], _MM_SHUFFLE(2, 0, 0, 1)), n - 16)}; + if (N == n) + result = {_mm256_srli_epi16(v->u.v256[0], n)}; }); return result; } template <> -really_inline SuperVector<32> SuperVector<32>::vshr(uint8_t const N) const -{ +really_inline SuperVector<32> SuperVector<32>::vshr_32(uint8_t const N) const { + if (N == 0) + return *this; + if (N == 32) + return Zeroes(); + SuperVector result; + Unroller<1, 32>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm256_srli_epi32(v->u.v256[0], n)}; + }); + return result; +} + +template <> +really_inline SuperVector<32> SuperVector<32>::vshr_64(uint8_t const N) const { + if (N == 0) + return *this; + if (N == 32) + return Zeroes(); + SuperVector result; + Unroller<1, 32>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm256_srli_epi64(v->u.v256[0], n)}; + }); + return result; +} + +template <> +really_inline SuperVector<32> SuperVector<32>::vshr_128(uint8_t const N) const { + if (N == 0) + return *this; + if (N == 32) + return Zeroes(); + SuperVector result; + Unroller<1, 32>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm256_srli_si256(v->u.v256[0], n)}; + }); + return result; +} + +template <> +really_inline SuperVector<32> SuperVector<32>::vshr_256(uint8_t const N) const { + if (N == 0) + return *this; + if (N == 16) + return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], + _MM_SHUFFLE(2, 0, 0, 1))}; + if (N == 32) + return Zeroes(); + SuperVector result; + Unroller<1, 16>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm256_alignr_epi8( + _mm256_permute2x128_si256(v->u.v256[0], v->u.v256[0], + _MM_SHUFFLE(2, 0, 0, 1)), + v->u.v256[0], n)}; + }); + Unroller<17, 32>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm256_srli_si256( + _mm256_permute2x128_si256(v->u.v256[0], v->u.v256[0], + _MM_SHUFFLE(2, 0, 0, 1)), + n - 16)}; + }); + return result; +} + +template <> +really_inline SuperVector<32> SuperVector<32>::vshr(uint8_t const N) const { return vshr_256(N); } template <> -really_inline SuperVector<32> SuperVector<32>::operator>>(uint8_t const N) const -{ +really_inline SuperVector<32> +SuperVector<32>::operator>>(uint8_t const N) const { #if defined(HAVE__BUILTIN_CONSTANT_P) if (__builtin_constant_p(N)) { - // As found here: https://stackoverflow.com/questions/25248766/emulating-shifts-on-32-bytes-with-avx + // As found here: + // https://stackoverflow.com/questions/25248766/emulating-shifts-on-32-bytes-with-avx if (N < 16) { - return {_mm256_alignr_epi8(_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(2, 0, 0, 1)), u.v256[0], N)}; + return {_mm256_alignr_epi8( + _mm256_permute2x128_si256(u.v256[0], u.v256[0], + _MM_SHUFFLE(2, 0, 0, 1)), + u.v256[0], N)}; } else if (N == 16) { - return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(2, 0, 0, 1))}; + return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], + _MM_SHUFFLE(2, 0, 0, 1))}; } else { - return {_mm256_srli_si256(_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(2, 0, 0, 1)), N - 16)}; + return {_mm256_srli_si256( + _mm256_permute2x128_si256(u.v256[0], u.v256[0], + _MM_SHUFFLE(2, 0, 0, 1)), + N - 16)}; } } #endif @@ -1072,37 +1184,46 @@ really_inline SuperVector<32> SuperVector<32>::operator>>(uint8_t const N) const } template <> -really_inline SuperVector<32> SuperVector<32>::operator<<(uint8_t const N) const -{ +really_inline SuperVector<32> +SuperVector<32>::operator<<(uint8_t const N) const { #if defined(HAVE__BUILTIN_CONSTANT_P) if (__builtin_constant_p(N)) { - // As found here: https://stackoverflow.com/questions/25248766/emulating-shifts-on-32-bytes-with-avx + // As found here: + // https://stackoverflow.com/questions/25248766/emulating-shifts-on-32-bytes-with-avx if (N < 16) { - return {_mm256_alignr_epi8(u.v256[0], _mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0)), 16 - N)}; + return {_mm256_alignr_epi8( + u.v256[0], + _mm256_permute2x128_si256(u.v256[0], u.v256[0], + _MM_SHUFFLE(0, 0, 2, 0)), + 16 - N)}; } else if (N == 16) { - return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0))}; + return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], + _MM_SHUFFLE(0, 0, 2, 0))}; } else { - return {_mm256_slli_si256(_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0)), N - 16)}; + return {_mm256_slli_si256( + _mm256_permute2x128_si256(u.v256[0], u.v256[0], + _MM_SHUFFLE(0, 0, 2, 0)), + N - 16)}; } } #endif return vshl_256(N); } -template<> -really_inline SuperVector<32> SuperVector<32>::Ones_vshr(uint8_t const N) -{ - if (N == 0) return Ones(); +template <> +really_inline SuperVector<32> SuperVector<32>::Ones_vshr(uint8_t const N) { + if (N == 0) + return Ones(); if (N >= 16) return {SuperVector<16>::Ones_vshr(N - 16), SuperVector<16>::Zeroes()}; else return {SuperVector<16>::Ones(), SuperVector<16>::Ones_vshr(N)}; } -template<> -really_inline SuperVector<32> SuperVector<32>::Ones_vshl(uint8_t const N) -{ - if (N == 0) return Ones(); +template <> +really_inline SuperVector<32> SuperVector<32>::Ones_vshl(uint8_t const N) { + if (N == 0) + return Ones(); if (N >= 16) return {SuperVector<16>::Zeroes(), SuperVector<16>::Ones_vshl(N - 16)}; else @@ -1110,30 +1231,29 @@ really_inline SuperVector<32> SuperVector<32>::Ones_vshl(uint8_t const N) } template <> -really_inline SuperVector<32> SuperVector<32>::loadu(void const *ptr) -{ +really_inline SuperVector<32> SuperVector<32>::loadu(void const *ptr) { return {_mm256_loadu_si256((const m256 *)ptr)}; } template <> -really_inline SuperVector<32> SuperVector<32>::load(void const *ptr) -{ +really_inline SuperVector<32> SuperVector<32>::load(void const *ptr) { assert(ISALIGNED_N(ptr, alignof(SuperVector::size))); ptr = vectorscan_assume_aligned(ptr, SuperVector::size); return {_mm256_load_si256((const m256 *)ptr)}; } template <> -really_inline SuperVector<32> SuperVector<32>::loadu_maskz(void const *ptr, uint8_t const len) -{ +really_inline SuperVector<32> SuperVector<32>::loadu_maskz(void const *ptr, + uint8_t const len) { #ifdef HAVE_AVX512 u32 mask = (~0ULL) >> (32 - len); - SuperVector<32> v = _mm256_mask_loadu_epi8(Zeroes().u.v256[0], mask, (const m256 *)ptr); + SuperVector<32> v = + _mm256_mask_loadu_epi8(Zeroes().u.v256[0], mask, (const m256 *)ptr); v.print8("v"); return v; #else DEBUG_PRINTF("len = %d", len); - SuperVector<32> mask = Ones_vshr(32 -len); + SuperVector<32> mask = Ones_vshr(32 - len); mask.print8("mask"); (Ones() >> (32 - len)).print8("mask"); SuperVector<32> v = _mm256_loadu_si256((const m256 *)ptr); @@ -1142,10 +1262,11 @@ really_inline SuperVector<32> SuperVector<32>::loadu_maskz(void const *ptr, uint #endif } -template<> -really_inline SuperVector<32> SuperVector<32>::alignr(SuperVector<32> &other, int8_t offset) -{ -#if defined(HAVE__BUILTIN_CONSTANT_P) && !(defined(__GNUC__) && (__GNUC__ == 13)) +template <> +really_inline SuperVector<32> SuperVector<32>::alignr(SuperVector<32> &other, + int8_t offset) { +#if defined(HAVE__BUILTIN_CONSTANT_P) && \ + !(defined(__GNUC__) && ((__GNUC__ == 13) || (__GNUC__ == 14))) if (__builtin_constant_p(offset)) { if (offset == 16) { return *this; @@ -1154,262 +1275,359 @@ really_inline SuperVector<32> SuperVector<32>::alignr(SuperVector<32> &other, in } } #endif - // As found here: https://stackoverflow.com/questions/8517970/mm-alignr-epi8-palignr-equivalent-in-avx2#8637458 - switch (offset){ - case 0 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 0), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 0)); break; - case 1 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 1), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 1)); break; - case 2 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 2), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 2)); break; - case 3 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 3), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 3)); break; - case 4 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 4), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 4)); break; - case 5 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 5), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 5)); break; - case 6 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 6), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 6)); break; - case 7 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 7), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 7)); break; - case 8 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 8), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 8)); break; - case 9 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 9), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 9)); break; - case 10 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 10), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 10)); break; - case 11 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 11), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 11)); break; - case 12 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 12), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 12)); break; - case 13 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 13), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 13)); break; - case 14 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 14), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 14)); break; - case 15 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 15), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 15)); break; - case 16 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 0), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 0)); break; - case 17 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 1), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 1)); break; - case 18 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 2), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 2)); break; - case 19 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 3), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 3)); break; - case 20 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 4), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 4)); break; - case 21 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 5), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 5)); break; - case 22 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 6), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 6)); break; - case 23 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 7), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 7)); break; - case 24 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 8), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 8)); break; - case 25 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 9), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 9)); break; - case 26 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 10), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 10)); break; - case 27 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 11), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 11)); break; - case 28 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 12), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 12)); break; - case 29 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 13), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 13)); break; - case 30 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 14), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 14)); break; - case 31 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 15), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 15)); break; - default: break; + // As found here: + // https://stackoverflow.com/questions/8517970/mm-alignr-epi8-palignr-equivalent-in-avx2#8637458 + switch (offset) { + case 0: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 0), + _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 0)); + break; + case 1: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 1), + _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 1)); + break; + case 2: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 2), + _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 2)); + break; + case 3: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 3), + _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 3)); + break; + case 4: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 4), + _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 4)); + break; + case 5: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 5), + _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 5)); + break; + case 6: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 6), + _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 6)); + break; + case 7: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 7), + _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 7)); + break; + case 8: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 8), + _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 8)); + break; + case 9: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 9), + _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 9)); + break; + case 10: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 10), + _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 10)); + break; + case 11: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 11), + _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 11)); + break; + case 12: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 12), + _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 12)); + break; + case 13: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 13), + _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 13)); + break; + case 14: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 14), + _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 14)); + break; + case 15: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 15), + _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 15)); + break; + case 16: + return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 0), + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 0)); + break; + case 17: + return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 1), + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 1)); + break; + case 18: + return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 2), + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 2)); + break; + case 19: + return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 3), + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 3)); + break; + case 20: + return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 4), + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 4)); + break; + case 21: + return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 5), + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 5)); + break; + case 22: + return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 6), + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 6)); + break; + case 23: + return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 7), + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 7)); + break; + case 24: + return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 8), + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 8)); + break; + case 25: + return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 9), + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 9)); + break; + case 26: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[1], u.v128[0], 10), + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 10)); + break; + case 27: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[1], u.v128[0], 11), + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 11)); + break; + case 28: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[1], u.v128[0], 12), + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 12)); + break; + case 29: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[1], u.v128[0], 13), + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 13)); + break; + case 30: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[1], u.v128[0], 14), + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 14)); + break; + case 31: + return _mm256_set_m128i( + _mm_alignr_epi8(u.v128[1], u.v128[0], 15), + _mm_alignr_epi8(u.v128[0], other.u.v128[1], 15)); + break; + default: + break; } return *this; } -template<> -template<> -really_inline SuperVector<32> SuperVector<32>::pshufb(SuperVector<32> b) -{ +template <> +template <> +really_inline SuperVector<32> SuperVector<32>::pshufb(SuperVector<32> b) { return {_mm256_shuffle_epi8(u.v256[0], b.u.v256[0])}; } -template<> -really_inline SuperVector<32> SuperVector<32>::pshufb_maskz(SuperVector<32> b, uint8_t const len) -{ - SuperVector<32> mask = Ones_vshr(32 -len); +template <> +really_inline SuperVector<32> SuperVector<32>::pshufb_maskz(SuperVector<32> b, + uint8_t const len) { + SuperVector<32> mask = Ones_vshr(32 - len); return mask & pshufb(b); } #endif // HAVE_AVX2 - // 512-bit AVX512 implementation #if defined(HAVE_AVX512) -template<> -really_inline SuperVector<64>::SuperVector(SuperVector const &o) -{ +template <> really_inline SuperVector<64>::SuperVector(SuperVector const &o) { u.v512[0] = o.u.v512[0]; } -template<> -really_inline SuperVector<64>::SuperVector(typename base_type::type const v) -{ +template <> +really_inline SuperVector<64>::SuperVector(typename base_type::type const v) { u.v512[0] = v; }; -template<> -template<> -really_inline SuperVector<64>::SuperVector(m256 const v) -{ +template <> +template <> +really_inline SuperVector<64>::SuperVector(m256 const v) { u.v512[0] = _mm512_broadcast_i64x4(v); }; -template<> -really_inline SuperVector<64>::SuperVector(m256 const lo, m256 const hi) -{ +template <> +really_inline SuperVector<64>::SuperVector(m256 const lo, m256 const hi) { u.v256[0] = lo; u.v256[1] = hi; }; -template<> -really_inline SuperVector<64>::SuperVector(SuperVector<32> const lo, SuperVector<32> const hi) -{ +template <> +really_inline SuperVector<64>::SuperVector(SuperVector<32> const lo, + SuperVector<32> const hi) { u.v256[0] = lo.u.v256[0]; u.v256[1] = hi.u.v256[0]; }; -template<> -template<> -really_inline SuperVector<64>::SuperVector(m128 const v) -{ +template <> +template <> +really_inline SuperVector<64>::SuperVector(m128 const v) { u.v512[0] = _mm512_broadcast_i32x4(v); }; -template<> -template<> -really_inline SuperVector<64>::SuperVector(int8_t const o) -{ +template <> +template <> +really_inline SuperVector<64>::SuperVector(int8_t const o) { u.v512[0] = _mm512_set1_epi8(o); } -template<> -template<> -really_inline SuperVector<64>::SuperVector(uint8_t const o) -{ +template <> +template <> +really_inline SuperVector<64>::SuperVector(uint8_t const o) { u.v512[0] = _mm512_set1_epi8(static_cast(o)); } -template<> -template<> -really_inline SuperVector<64>::SuperVector(int16_t const o) -{ +template <> +template <> +really_inline SuperVector<64>::SuperVector(int16_t const o) { u.v512[0] = _mm512_set1_epi16(o); } -template<> -template<> -really_inline SuperVector<64>::SuperVector(uint16_t const o) -{ +template <> +template <> +really_inline SuperVector<64>::SuperVector(uint16_t const o) { u.v512[0] = _mm512_set1_epi16(static_cast(o)); } -template<> -template<> -really_inline SuperVector<64>::SuperVector(int32_t const o) -{ +template <> +template <> +really_inline SuperVector<64>::SuperVector(int32_t const o) { u.v512[0] = _mm512_set1_epi32(o); } -template<> -template<> -really_inline SuperVector<64>::SuperVector(uint32_t const o) -{ +template <> +template <> +really_inline SuperVector<64>::SuperVector(uint32_t const o) { u.v512[0] = _mm512_set1_epi32(static_cast(o)); } -template<> -template<> -really_inline SuperVector<64>::SuperVector(int64_t const o) -{ +template <> +template <> +really_inline SuperVector<64>::SuperVector(int64_t const o) { u.v512[0] = _mm512_set1_epi64(o); } -template<> -template<> -really_inline SuperVector<64>::SuperVector(uint64_t const o) -{ +template <> +template <> +really_inline SuperVector<64>::SuperVector(uint64_t const o) { u.v512[0] = _mm512_set1_epi64(static_cast(o)); } // Constants -template<> -really_inline SuperVector<64> SuperVector<64>::Ones(void) -{ +template <> really_inline SuperVector<64> SuperVector<64>::Ones(void) { return {_mm512_set1_epi8(0xFF)}; } -template<> -really_inline SuperVector<64> SuperVector<64>::Zeroes(void) -{ +template <> really_inline SuperVector<64> SuperVector<64>::Zeroes(void) { return {_mm512_set1_epi8(0)}; } // Methods template <> -really_inline void SuperVector<64>::operator=(SuperVector<64> const &o) -{ +really_inline void SuperVector<64>::operator=(SuperVector<64> const &o) { u.v512[0] = o.u.v512[0]; } template <> -really_inline SuperVector<64> SuperVector<64>::operator&(SuperVector<64> const &b) const -{ +really_inline SuperVector<64> +SuperVector<64>::operator&(SuperVector<64> const &b) const { return {_mm512_and_si512(u.v512[0], b.u.v512[0])}; } template <> -really_inline SuperVector<64> SuperVector<64>::operator|(SuperVector<64> const &b) const -{ +really_inline SuperVector<64> +SuperVector<64>::operator|(SuperVector<64> const &b) const { return {_mm512_or_si512(u.v512[0], b.u.v512[0])}; } template <> -really_inline SuperVector<64> SuperVector<64>::operator^(SuperVector<64> const &b) const -{ +really_inline SuperVector<64> +SuperVector<64>::operator^(SuperVector<64> const &b) const { return {_mm512_xor_si512(u.v512[0], b.u.v512[0])}; } -template <> -really_inline SuperVector<64> SuperVector<64>::operator!() const -{ +template <> really_inline SuperVector<64> SuperVector<64>::operator!() const { return {_mm512_xor_si512(u.v512[0], u.v512[0])}; } template <> -really_inline SuperVector<64> SuperVector<64>::opandnot(SuperVector<64> const &b) const -{ +really_inline SuperVector<64> +SuperVector<64>::opandnot(SuperVector<64> const &b) const { return {_mm512_andnot_si512(u.v512[0], b.u.v512[0])}; } template <> -really_inline SuperVector<64> SuperVector<64>::operator==(SuperVector<64> const &b) const -{ +really_inline SuperVector<64> +SuperVector<64>::operator==(SuperVector<64> const &b) const { SuperVector<64>::comparemask_type mask = _mm512_cmpeq_epi8_mask(u.v512[0], b.u.v512[0]); return {_mm512_movm_epi8(mask)}; } template <> -really_inline SuperVector<64> SuperVector<64>::operator!=(SuperVector<64> const &b) const -{ +really_inline SuperVector<64> +SuperVector<64>::operator!=(SuperVector<64> const &b) const { SuperVector<64>::comparemask_type mask = _mm512_cmpneq_epi8_mask(u.v512[0], b.u.v512[0]); return {_mm512_movm_epi8(mask)}; } template <> -really_inline SuperVector<64> SuperVector<64>::operator>(SuperVector<64> const &b) const -{ +really_inline SuperVector<64> +SuperVector<64>::operator>(SuperVector<64> const &b) const { SuperVector<64>::comparemask_type mask = _mm512_cmpgt_epi8_mask(u.v512[0], b.u.v512[0]); return {_mm512_movm_epi8(mask)}; } template <> -really_inline SuperVector<64> SuperVector<64>::operator<(SuperVector<64> const &b) const -{ +really_inline SuperVector<64> +SuperVector<64>::operator<(SuperVector<64> const &b) const { SuperVector<64>::comparemask_type mask = _mm512_cmplt_epi8_mask(u.v512[0], b.u.v512[0]); return {_mm512_movm_epi8(mask)}; } template <> -really_inline SuperVector<64> SuperVector<64>::operator>=(SuperVector<64> const &b) const -{ +really_inline SuperVector<64> +SuperVector<64>::operator>=(SuperVector<64> const &b) const { SuperVector<64>::comparemask_type mask = _mm512_cmpge_epi8_mask(u.v512[0], b.u.v512[0]); return {_mm512_movm_epi8(mask)}; } template <> -really_inline SuperVector<64> SuperVector<64>::operator<=(SuperVector<64> const &b) const -{ +really_inline SuperVector<64> +SuperVector<64>::operator<=(SuperVector<64> const &b) const { SuperVector<64>::comparemask_type mask = _mm512_cmple_epi8_mask(u.v512[0], b.u.v512[0]); return {_mm512_movm_epi8(mask)}; } template <> -really_inline SuperVector<64> SuperVector<64>::eq(SuperVector<64> const &b) const -{ +really_inline SuperVector<64> +SuperVector<64>::eq(SuperVector<64> const &b) const { return (*this == b); } @@ -1445,51 +1663,44 @@ SuperVector<64>::iteration_mask( // } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshl_16_imm() const -{ +template +really_inline SuperVector<64> SuperVector<64>::vshl_16_imm() const { return {_mm512_slli_epi16(u.v512[0], N)}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshl_32_imm() const -{ +template +really_inline SuperVector<64> SuperVector<64>::vshl_32_imm() const { return {_mm512_slli_epi32(u.v512[0], N)}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshl_64_imm() const -{ +template +really_inline SuperVector<64> SuperVector<64>::vshl_64_imm() const { return {_mm512_slli_epi64(u.v512[0], N)}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshl_128_imm() const -{ +template +really_inline SuperVector<64> SuperVector<64>::vshl_128_imm() const { return {_mm512_bslli_epi128(u.v512[0], N)}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshl_256_imm() const -{ +template +really_inline SuperVector<64> SuperVector<64>::vshl_256_imm() const { return {}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshl_512_imm() const -{ +template +really_inline SuperVector<64> SuperVector<64>::vshl_512_imm() const { return {}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshl_imm() const -{ +template +really_inline SuperVector<64> SuperVector<64>::vshl_imm() const { return vshl_512_imm(); } @@ -1501,51 +1712,44 @@ really_inline SuperVector<64> SuperVector<64>::vshl_imm() const // } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshr_16_imm() const -{ +template +really_inline SuperVector<64> SuperVector<64>::vshr_16_imm() const { return {_mm512_srli_epi16(u.v512[0], N)}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshr_32_imm() const -{ +template +really_inline SuperVector<64> SuperVector<64>::vshr_32_imm() const { return {_mm512_srli_epi32(u.v512[0], N)}; } - + template <> -template -really_inline SuperVector<64> SuperVector<64>::vshr_64_imm() const -{ +template +really_inline SuperVector<64> SuperVector<64>::vshr_64_imm() const { return {_mm512_srli_epi64(u.v512[0], N)}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshr_128_imm() const -{ +template +really_inline SuperVector<64> SuperVector<64>::vshr_128_imm() const { return {_mm512_bsrli_epi128(u.v512[0], N)}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshr_256_imm() const -{ +template +really_inline SuperVector<64> SuperVector<64>::vshr_256_imm() const { return {}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshr_512_imm() const -{ +template +really_inline SuperVector<64> SuperVector<64>::vshr_512_imm() const { return {}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshr_imm() const -{ +template +really_inline SuperVector<64> SuperVector<64>::vshr_imm() const { return vshr_512_imm(); } @@ -1563,150 +1767,186 @@ template SuperVector<64> SuperVector<64>::vshr_128_imm<4>() const; #endif // template <> -// really_inline SuperVector<64> SuperVector<64>::vshl_8 (uint8_t const N) const +// really_inline SuperVector<64> SuperVector<64>::vshl_8 (uint8_t const N) +// const // { -// Unroller<0, 15>::iterator([&,v=this](int i) { if (N == i) return {_mm_slli_epi8(v->u.v128[0], i)}; }); -// if (N == 16) return Zeroes(); +// Unroller<0, 15>::iterator([&,v=this](int i) { if (N == i) return +// {_mm_slli_epi8(v->u.v128[0], i)}; }); if (N == 16) return Zeroes(); // } template <> -really_inline SuperVector<64> SuperVector<64>::vshl_16 (uint8_t const N) const -{ - if (N == 0) return *this; - if (N == 64) return Zeroes(); +really_inline SuperVector<64> SuperVector<64>::vshl_16(uint8_t const N) const { + if (N == 0) + return *this; + if (N == 64) + return Zeroes(); SuperVector result; - Unroller<1, 64>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm512_slli_epi16(v->u.v512[0], n)}; }); + Unroller<1, 64>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm512_slli_epi16(v->u.v512[0], n)}; + }); return result; } template <> -really_inline SuperVector<64> SuperVector<64>::vshl_32 (uint8_t const N) const -{ - if (N == 0) return *this; - if (N == 64) return Zeroes(); +really_inline SuperVector<64> SuperVector<64>::vshl_32(uint8_t const N) const { + if (N == 0) + return *this; + if (N == 64) + return Zeroes(); SuperVector result; - Unroller<1, 64>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm512_slli_epi32(v->u.v512[0], n)}; }); + Unroller<1, 64>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm512_slli_epi32(v->u.v512[0], n)}; + }); return result; } template <> -really_inline SuperVector<64> SuperVector<64>::vshl_64 (uint8_t const N) const -{ - if (N == 0) return *this; - if (N == 64) return Zeroes(); +really_inline SuperVector<64> SuperVector<64>::vshl_64(uint8_t const N) const { + if (N == 0) + return *this; + if (N == 64) + return Zeroes(); SuperVector result; - Unroller<1, 64>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm512_slli_epi64(v->u.v512[0], n)}; }); + Unroller<1, 64>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm512_slli_epi64(v->u.v512[0], n)}; + }); return result; } template <> -really_inline SuperVector<64> SuperVector<64>::vshl_128(uint8_t const N) const -{ - if (N == 0) return *this; - if (N == 64) return Zeroes(); +really_inline SuperVector<64> SuperVector<64>::vshl_128(uint8_t const N) const { + if (N == 0) + return *this; + if (N == 64) + return Zeroes(); SuperVector result; - Unroller<1, 64>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm512_bslli_epi128(v->u.v512[0], n)}; }); + Unroller<1, 64>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm512_bslli_epi128(v->u.v512[0], n)}; + }); return result; } template <> -really_inline SuperVector<64> SuperVector<64>::vshl_256(uint8_t const N) const -{ +really_inline SuperVector<64> SuperVector<64>::vshl_256(uint8_t const N) const { return vshl_128(N); } template <> -really_inline SuperVector<64> SuperVector<64>::vshl_512(uint8_t const N) const -{ +really_inline SuperVector<64> SuperVector<64>::vshl_512(uint8_t const N) const { return vshl_128(N); } template <> -really_inline SuperVector<64> SuperVector<64>::vshl(uint8_t const N) const -{ +really_inline SuperVector<64> SuperVector<64>::vshl(uint8_t const N) const { return vshl_512(N); } // template <> -// really_inline SuperVector<16> SuperVector<16>::vshr_8 (uint8_t const N) const +// really_inline SuperVector<16> SuperVector<16>::vshr_8 (uint8_t const N) +// const // { // SuperVector<16> result; -// Unroller<0, 15>::iterator([&,v=this](uint8_t const i) { if (N == i) result = {_mm_srli_epi8(v->u.v128[0], i)}; }); -// if (N == 16) result = Zeroes(); -// return result; +// Unroller<0, 15>::iterator([&,v=this](uint8_t const i) { if (N == i) +// result = {_mm_srli_epi8(v->u.v128[0], i)}; }); if (N == 16) result = +// Zeroes(); return result; // } template <> -really_inline SuperVector<64> SuperVector<64>::vshr_16 (uint8_t const N) const -{ - if (N == 0) return *this; - if (N == 64) return Zeroes(); +really_inline SuperVector<64> SuperVector<64>::vshr_16(uint8_t const N) const { + if (N == 0) + return *this; + if (N == 64) + return Zeroes(); SuperVector result; - Unroller<1, 64>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm512_srli_epi16(v->u.v512[0], n)}; }); + Unroller<1, 64>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm512_srli_epi16(v->u.v512[0], n)}; + }); return result; } template <> -really_inline SuperVector<64> SuperVector<64>::vshr_32 (uint8_t const N) const -{ - if (N == 0) return *this; - if (N == 64) return Zeroes(); +really_inline SuperVector<64> SuperVector<64>::vshr_32(uint8_t const N) const { + if (N == 0) + return *this; + if (N == 64) + return Zeroes(); SuperVector result; - Unroller<1, 64>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm512_srli_epi32(v->u.v512[0], n)}; }); + Unroller<1, 64>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm512_srli_epi32(v->u.v512[0], n)}; + }); return result; } template <> -really_inline SuperVector<64> SuperVector<64>::vshr_64 (uint8_t const N) const -{ - if (N == 0) return *this; - if (N == 16) return Zeroes(); +really_inline SuperVector<64> SuperVector<64>::vshr_64(uint8_t const N) const { + if (N == 0) + return *this; + if (N == 16) + return Zeroes(); SuperVector result; - Unroller<1, 64>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm512_srli_epi64(v->u.v512[0], n)}; }); + Unroller<1, 64>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm512_srli_epi64(v->u.v512[0], n)}; + }); return result; } template <> -really_inline SuperVector<64> SuperVector<64>::vshr_128(uint8_t const N) const -{ - if (N == 0) return *this; - if (N == 64) return Zeroes(); +really_inline SuperVector<64> SuperVector<64>::vshr_128(uint8_t const N) const { + if (N == 0) + return *this; + if (N == 64) + return Zeroes(); SuperVector result; - Unroller<1, 64>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm512_bsrli_epi128(v->u.v512[0], n)}; }); + Unroller<1, 64>::iterator([&, v = this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) + result = {_mm512_bsrli_epi128(v->u.v512[0], n)}; + }); return result; } template <> -really_inline SuperVector<64> SuperVector<64>::vshr_256(uint8_t const N) const -{ +really_inline SuperVector<64> SuperVector<64>::vshr_256(uint8_t const N) const { return vshr_128(N); } template <> -really_inline SuperVector<64> SuperVector<64>::vshr_512(uint8_t const N) const -{ +really_inline SuperVector<64> SuperVector<64>::vshr_512(uint8_t const N) const { return vshr_128(N); } template <> -really_inline SuperVector<64> SuperVector<64>::vshr(uint8_t const N) const -{ +really_inline SuperVector<64> SuperVector<64>::vshr(uint8_t const N) const { return vshr_512(N); } -template<> -really_inline SuperVector<64> SuperVector<64>::Ones_vshr(uint8_t const N) -{ - if (N == 0) return Ones(); +template <> +really_inline SuperVector<64> SuperVector<64>::Ones_vshr(uint8_t const N) { + if (N == 0) + return Ones(); if (N >= 32) return {SuperVector<32>::Ones_vshr(N - 32), SuperVector<32>::Zeroes()}; else return {SuperVector<32>::Ones(), SuperVector<32>::Ones_vshr(N)}; } -template<> -really_inline SuperVector<64> SuperVector<64>::Ones_vshl(uint8_t const N) -{ - if (N == 0) return Ones(); +template <> +really_inline SuperVector<64> SuperVector<64>::Ones_vshl(uint8_t const N) { + if (N == 0) + return Ones(); if (N >= 32) return {SuperVector<32>::Zeroes(), SuperVector<32>::Ones_vshl(N - 32)}; else @@ -1714,8 +1954,8 @@ really_inline SuperVector<64> SuperVector<64>::Ones_vshl(uint8_t const N) } template <> -really_inline SuperVector<64> SuperVector<64>::operator>>(uint8_t const N) const -{ +really_inline SuperVector<64> +SuperVector<64>::operator>>(uint8_t const N) const { if (N == 0) { return *this; } else if (N < 32) { @@ -1737,8 +1977,8 @@ really_inline SuperVector<64> SuperVector<64>::operator>>(uint8_t const N) const } template <> -really_inline SuperVector<64> SuperVector<64>::operator<<(uint8_t const N) const -{ +really_inline SuperVector<64> +SuperVector<64>::operator<<(uint8_t const N) const { if (N == 0) { return *this; } else if (N < 32) { @@ -1760,48 +2000,47 @@ really_inline SuperVector<64> SuperVector<64>::operator<<(uint8_t const N) const } template <> -really_inline SuperVector<64> SuperVector<64>::loadu(void const *ptr) -{ +really_inline SuperVector<64> SuperVector<64>::loadu(void const *ptr) { return {_mm512_loadu_si512((const m512 *)ptr)}; } template <> -really_inline SuperVector<64> SuperVector<64>::load(void const *ptr) -{ +really_inline SuperVector<64> SuperVector<64>::load(void const *ptr) { assert(ISALIGNED_N(ptr, alignof(SuperVector::size))); ptr = vectorscan_assume_aligned(ptr, SuperVector::size); return {_mm512_load_si512((const m512 *)ptr)}; } template <> -really_inline SuperVector<64> SuperVector<64>::loadu_maskz(void const *ptr, uint8_t const len) -{ +really_inline SuperVector<64> SuperVector<64>::loadu_maskz(void const *ptr, + uint8_t const len) { u64a mask = (~0ULL) >> (64 - len); DEBUG_PRINTF("mask = %016llx\n", mask); - SuperVector<64> v = _mm512_mask_loadu_epi8(Zeroes().u.v512[0], mask, (const m512 *)ptr); + SuperVector<64> v = + _mm512_mask_loadu_epi8(Zeroes().u.v512[0], mask, (const m512 *)ptr); v.print8("v"); return v; } -template<> -template<> -really_inline SuperVector<64> SuperVector<64>::pshufb(SuperVector<64> b) -{ +template <> +template <> +really_inline SuperVector<64> SuperVector<64>::pshufb(SuperVector<64> b) { return {_mm512_shuffle_epi8(u.v512[0], b.u.v512[0])}; } -template<> -really_inline SuperVector<64> SuperVector<64>::pshufb_maskz(SuperVector<64> b, uint8_t const len) -{ +template <> +really_inline SuperVector<64> SuperVector<64>::pshufb_maskz(SuperVector<64> b, + uint8_t const len) { u64a mask = (~0ULL) >> (64 - len); DEBUG_PRINTF("mask = %016llx\n", mask); return {_mm512_maskz_shuffle_epi8(mask, u.v512[0], b.u.v512[0])}; } -template<> -really_inline SuperVector<64> SuperVector<64>::alignr(SuperVector<64> &l, int8_t offset) -{ -#if defined(HAVE__BUILTIN_CONSTANT_P) +template <> +really_inline SuperVector<64> SuperVector<64>::alignr(SuperVector<64> &l, + int8_t offset) { +#if defined(HAVE__BUILTIN_CONSTANT_P) && \ + !(defined(__GNUC__) && (__GNUC__ == 14)) if (__builtin_constant_p(offset)) { if (offset == 16) { return *this; @@ -1810,21 +2049,21 @@ really_inline SuperVector<64> SuperVector<64>::alignr(SuperVector<64> &l, int8_t } } #endif - if(offset == 0) { + if (offset == 0) { return *this; - } else if (offset < 32){ + } else if (offset < 32) { SuperVector<32> lo256 = u.v256[0]; SuperVector<32> hi256 = u.v256[1]; SuperVector<32> o_lo256 = l.u.v256[0]; - SuperVector<32> carry1 = hi256.alignr(lo256,offset); - SuperVector<32> carry2 = o_lo256.alignr(hi256,offset); + SuperVector<32> carry1 = hi256.alignr(lo256, offset); + SuperVector<32> carry2 = o_lo256.alignr(hi256, offset); return SuperVector(carry1, carry2); - } else if (offset <= 64){ + } else if (offset <= 64) { SuperVector<32> hi256 = u.v256[1]; SuperVector<32> o_lo256 = l.u.v256[0]; SuperVector<32> o_hi256 = l.u.v256[1]; SuperVector<32> carry1 = o_lo256.alignr(hi256, offset - 32); - SuperVector<32> carry2 = o_hi256.alignr(o_lo256,offset -32); + SuperVector<32> carry2 = o_hi256.alignr(o_lo256, offset - 32); return SuperVector(carry1, carry2); } else { return *this; From 51ac3a2287a47f59396df6ee34b04c99ac1ced3e Mon Sep 17 00:00:00 2001 From: gtsoul-tech Date: Wed, 17 Apr 2024 13:55:42 +0300 Subject: [PATCH 15/19] clang-format revert --- src/util/supervector/arch/x86/impl.cpp | 1735 ++++++++++-------------- 1 file changed, 748 insertions(+), 987 deletions(-) diff --git a/src/util/supervector/arch/x86/impl.cpp b/src/util/supervector/arch/x86/impl.cpp index e0e9d966..d83f6792 100644 --- a/src/util/supervector/arch/x86/impl.cpp +++ b/src/util/supervector/arch/x86/impl.cpp @@ -35,155 +35,170 @@ #include "ue2common.h" #include "util/arch.h" -#include "util/supervector/supervector.hpp" #include "util/unaligned.h" +#include "util/supervector/supervector.hpp" // 128-bit SSE implementation -#if !(!defined(RELEASE_BUILD) && defined(FAT_RUNTIME) && \ - (defined(HAVE_AVX2) || defined(HAVE_AVX512))) && \ - defined(HAVE_SIMD_128_BITS) +#if !(!defined(RELEASE_BUILD) && defined(FAT_RUNTIME) && (defined(HAVE_AVX2) || defined(HAVE_AVX512))) && defined(HAVE_SIMD_128_BITS) -template <> -really_inline SuperVector<16>::SuperVector(SuperVector const &other) { +template<> +really_inline SuperVector<16>::SuperVector(SuperVector const &other) +{ u.v128[0] = other.u.v128[0]; } -template <> -really_inline SuperVector<16>::SuperVector(typename base_type::type const v) { +template<> +really_inline SuperVector<16>::SuperVector(typename base_type::type const v) +{ u.v128[0] = v; }; -template <> -template <> -really_inline SuperVector<16>::SuperVector(int8_t const other) { +template<> +template<> +really_inline SuperVector<16>::SuperVector(int8_t const other) +{ u.v128[0] = _mm_set1_epi8(other); } -template <> -template <> -really_inline SuperVector<16>::SuperVector(uint8_t const other) { +template<> +template<> +really_inline SuperVector<16>::SuperVector(uint8_t const other) +{ u.v128[0] = _mm_set1_epi8(static_cast(other)); } -template <> -template <> -really_inline SuperVector<16>::SuperVector(int16_t const other) { +template<> +template<> +really_inline SuperVector<16>::SuperVector(int16_t const other) +{ u.v128[0] = _mm_set1_epi16(other); } -template <> -template <> -really_inline SuperVector<16>::SuperVector(uint16_t const other) { +template<> +template<> +really_inline SuperVector<16>::SuperVector(uint16_t const other) +{ u.v128[0] = _mm_set1_epi16(static_cast(other)); } -template <> -template <> -really_inline SuperVector<16>::SuperVector(int32_t const other) { +template<> +template<> +really_inline SuperVector<16>::SuperVector(int32_t const other) +{ u.v128[0] = _mm_set1_epi32(other); } -template <> -template <> -really_inline SuperVector<16>::SuperVector(uint32_t const other) { +template<> +template<> +really_inline SuperVector<16>::SuperVector(uint32_t const other) +{ u.v128[0] = _mm_set1_epi32(static_cast(other)); } -template <> -template <> -really_inline SuperVector<16>::SuperVector(int64_t const other) { +template<> +template<> +really_inline SuperVector<16>::SuperVector(int64_t const other) +{ u.v128[0] = _mm_set1_epi64x(other); } -template <> -template <> -really_inline SuperVector<16>::SuperVector(uint64_t const other) { +template<> +template<> +really_inline SuperVector<16>::SuperVector(uint64_t const other) +{ u.v128[0] = _mm_set1_epi64x(static_cast(other)); } // Constants -template <> really_inline SuperVector<16> SuperVector<16>::Ones() { +template<> +really_inline SuperVector<16> SuperVector<16>::Ones() +{ return {_mm_set1_epi8(0xFF)}; } -template <> really_inline SuperVector<16> SuperVector<16>::Zeroes(void) { +template<> +really_inline SuperVector<16> SuperVector<16>::Zeroes(void) +{ return {_mm_set1_epi8(0)}; } // Methods template <> -really_inline void SuperVector<16>::operator=(SuperVector<16> const &other) { +really_inline void SuperVector<16>::operator=(SuperVector<16> const &other) +{ u.v128[0] = other.u.v128[0]; } template <> -really_inline SuperVector<16> -SuperVector<16>::operator&(SuperVector<16> const &b) const { +really_inline SuperVector<16> SuperVector<16>::operator&(SuperVector<16> const &b) const +{ return {_mm_and_si128(u.v128[0], b.u.v128[0])}; } template <> -really_inline SuperVector<16> -SuperVector<16>::operator|(SuperVector<16> const &b) const { +really_inline SuperVector<16> SuperVector<16>::operator|(SuperVector<16> const &b) const +{ return {_mm_or_si128(u.v128[0], b.u.v128[0])}; } template <> -really_inline SuperVector<16> -SuperVector<16>::operator^(SuperVector<16> const &b) const { +really_inline SuperVector<16> SuperVector<16>::operator^(SuperVector<16> const &b) const +{ return {_mm_xor_si128(u.v128[0], b.u.v128[0])}; } -template <> really_inline SuperVector<16> SuperVector<16>::operator!() const { +template <> +really_inline SuperVector<16> SuperVector<16>::operator!() const +{ return {_mm_xor_si128(u.v128[0], u.v128[0])}; } template <> -really_inline SuperVector<16> -SuperVector<16>::opandnot(SuperVector<16> const &b) const { +really_inline SuperVector<16> SuperVector<16>::opandnot(SuperVector<16> const &b) const +{ return {_mm_andnot_si128(u.v128[0], b.u.v128[0])}; } template <> -really_inline SuperVector<16> -SuperVector<16>::operator==(SuperVector<16> const &b) const { +really_inline SuperVector<16> SuperVector<16>::operator==(SuperVector<16> const &b) const +{ return {_mm_cmpeq_epi8(u.v128[0], b.u.v128[0])}; } template <> -really_inline SuperVector<16> -SuperVector<16>::operator!=(SuperVector<16> const &b) const { +really_inline SuperVector<16> SuperVector<16>::operator!=(SuperVector<16> const &b) const +{ return !(*this == b); } template <> -really_inline SuperVector<16> -SuperVector<16>::operator>(SuperVector<16> const &b) const { +really_inline SuperVector<16> SuperVector<16>::operator>(SuperVector<16> const &b) const +{ return {_mm_cmpgt_epi8(u.v128[0], b.u.v128[0])}; } template <> -really_inline SuperVector<16> -SuperVector<16>::operator<(SuperVector<16> const &b) const { +really_inline SuperVector<16> SuperVector<16>::operator<(SuperVector<16> const &b) const +{ return {_mm_cmplt_epi8(u.v128[0], b.u.v128[0])}; } template <> -really_inline SuperVector<16> -SuperVector<16>::operator>=(SuperVector<16> const &b) const { +really_inline SuperVector<16> SuperVector<16>::operator>=(SuperVector<16> const &b) const +{ return !(*this < b); } template <> -really_inline SuperVector<16> -SuperVector<16>::operator<=(SuperVector<16> const &b) const { +really_inline SuperVector<16> SuperVector<16>::operator<=(SuperVector<16> const &b) const +{ return !(*this > b); } template <> -really_inline SuperVector<16> -SuperVector<16>::eq(SuperVector<16> const &b) const { +really_inline SuperVector<16> SuperVector<16>::eq(SuperVector<16> const &b) const +{ return (*this == b); } @@ -217,32 +232,37 @@ SuperVector<16>::iteration_mask( // } template <> -template -really_inline SuperVector<16> SuperVector<16>::vshl_16_imm() const { +template +really_inline SuperVector<16> SuperVector<16>::vshl_16_imm() const +{ return {_mm_slli_epi16(u.v128[0], N)}; } template <> -template -really_inline SuperVector<16> SuperVector<16>::vshl_32_imm() const { +template +really_inline SuperVector<16> SuperVector<16>::vshl_32_imm() const +{ return {_mm_slli_epi32(u.v128[0], N)}; } template <> -template -really_inline SuperVector<16> SuperVector<16>::vshl_64_imm() const { +template +really_inline SuperVector<16> SuperVector<16>::vshl_64_imm() const +{ return {_mm_slli_epi64(u.v128[0], N)}; } template <> -template -really_inline SuperVector<16> SuperVector<16>::vshl_128_imm() const { +template +really_inline SuperVector<16> SuperVector<16>::vshl_128_imm() const +{ return {_mm_slli_si128(u.v128[0], N)}; } template <> -template -really_inline SuperVector<16> SuperVector<16>::vshl_imm() const { +template +really_inline SuperVector<16> SuperVector<16>::vshl_imm() const +{ return vshl_128_imm(); } @@ -254,32 +274,37 @@ really_inline SuperVector<16> SuperVector<16>::vshl_imm() const { // } template <> -template -really_inline SuperVector<16> SuperVector<16>::vshr_16_imm() const { +template +really_inline SuperVector<16> SuperVector<16>::vshr_16_imm() const +{ return {_mm_srli_epi16(u.v128[0], N)}; } template <> -template -really_inline SuperVector<16> SuperVector<16>::vshr_32_imm() const { +template +really_inline SuperVector<16> SuperVector<16>::vshr_32_imm() const +{ return {_mm_srli_epi32(u.v128[0], N)}; } - + template <> -template -really_inline SuperVector<16> SuperVector<16>::vshr_64_imm() const { +template +really_inline SuperVector<16> SuperVector<16>::vshr_64_imm() const +{ return {_mm_srli_epi64(u.v128[0], N)}; } template <> -template -really_inline SuperVector<16> SuperVector<16>::vshr_128_imm() const { +template +really_inline SuperVector<16> SuperVector<16>::vshr_128_imm() const +{ return {_mm_srli_si128(u.v128[0], N)}; } template <> -template -really_inline SuperVector<16> SuperVector<16>::vshr_imm() const { +template +really_inline SuperVector<16> SuperVector<16>::vshr_imm() const +{ return vshr_128_imm(); } @@ -297,196 +322,156 @@ template SuperVector<16> SuperVector<16>::vshr_128_imm<4>() const; #endif // template <> -// really_inline SuperVector<16> SuperVector<16>::vshl_8 (uint8_t const N) -// const +// really_inline SuperVector<16> SuperVector<16>::vshl_8 (uint8_t const N) const // { -// Unroller<0, 15>::iterator([&,v=this](int i) { if (N == i) return -// {_mm_slli_epi8(v->u.v128[0], i)}; }); if (N == 16) return Zeroes(); +// Unroller<0, 15>::iterator([&,v=this](int i) { if (N == i) return {_mm_slli_epi8(v->u.v128[0], i)}; }); +// if (N == 16) return Zeroes(); // } template <> -really_inline SuperVector<16> SuperVector<16>::vshl_16(uint8_t const N) const { +really_inline SuperVector<16> SuperVector<16>::vshl_16 (uint8_t const N) const +{ #if defined(HAVE__BUILTIN_CONSTANT_P) if (__builtin_constant_p(N)) { return {_mm_slli_epi16(u.v128[0], N)}; } #endif - if (N == 0) - return *this; - if (N == 16) - return Zeroes(); + if (N == 0) return *this; + if (N == 16) return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm_slli_epi16(v->u.v128[0], n)}; - }); + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm_slli_epi16(v->u.v128[0], n)}; }); return result; } template <> -really_inline SuperVector<16> SuperVector<16>::vshl_32(uint8_t const N) const { +really_inline SuperVector<16> SuperVector<16>::vshl_32 (uint8_t const N) const +{ #if defined(HAVE__BUILTIN_CONSTANT_P) if (__builtin_constant_p(N)) { return {_mm_slli_epi32(u.v128[0], N)}; } #endif - if (N == 0) - return *this; - if (N == 16) - return Zeroes(); + if (N == 0) return *this; + if (N == 16) return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm_slli_epi32(v->u.v128[0], n)}; - }); + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm_slli_epi32(v->u.v128[0], n)}; }); return result; } template <> -really_inline SuperVector<16> SuperVector<16>::vshl_64(uint8_t const N) const { +really_inline SuperVector<16> SuperVector<16>::vshl_64 (uint8_t const N) const +{ #if defined(HAVE__BUILTIN_CONSTANT_P) if (__builtin_constant_p(N)) { return {_mm_slli_epi64(u.v128[0], N)}; } #endif - if (N == 0) - return *this; - if (N == 16) - return Zeroes(); + if (N == 0) return *this; + if (N == 16) return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm_slli_epi64(v->u.v128[0], n)}; - }); + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm_slli_epi64(v->u.v128[0], n)}; }); return result; } template <> -really_inline SuperVector<16> SuperVector<16>::vshl_128(uint8_t const N) const { +really_inline SuperVector<16> SuperVector<16>::vshl_128(uint8_t const N) const +{ #if defined(HAVE__BUILTIN_CONSTANT_P) && !defined(VS_SIMDE_BACKEND) if (__builtin_constant_p(N)) { return {_mm_slli_si128(u.v128[0], N)}; } #endif - if (N == 0) - return *this; - if (N == 16) - return Zeroes(); + if (N == 0) return *this; + if (N == 16) return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm_slli_si128(v->u.v128[0], n)}; - }); + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm_slli_si128(v->u.v128[0], n)}; }); return result; } template <> -really_inline SuperVector<16> SuperVector<16>::vshl(uint8_t const N) const { +really_inline SuperVector<16> SuperVector<16>::vshl(uint8_t const N) const +{ return vshl_128(N); } // template <> -// really_inline SuperVector<16> SuperVector<16>::vshr_8 (uint8_t const N) -// const +// really_inline SuperVector<16> SuperVector<16>::vshr_8 (uint8_t const N) const // { // SuperVector<16> result; -// Unroller<0, 15>::iterator([&,v=this](uint8_t const i) { if (N == i) -// result = {_mm_srli_epi8(v->u.v128[0], i)}; }); if (N == 16) result = -// Zeroes(); return result; +// Unroller<0, 15>::iterator([&,v=this](uint8_t const i) { if (N == i) result = {_mm_srli_epi8(v->u.v128[0], i)}; }); +// if (N == 16) result = Zeroes(); +// return result; // } template <> -really_inline SuperVector<16> SuperVector<16>::vshr_16(uint8_t const N) const { +really_inline SuperVector<16> SuperVector<16>::vshr_16 (uint8_t const N) const +{ #if defined(HAVE__BUILTIN_CONSTANT_P) if (__builtin_constant_p(N)) { return {_mm_srli_epi16(u.v128[0], N)}; } #endif - if (N == 0) - return *this; - if (N == 16) - return Zeroes(); + if (N == 0) return *this; + if (N == 16) return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm_srli_epi16(v->u.v128[0], n)}; - }); + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm_srli_epi16(v->u.v128[0], n)}; }); return result; } template <> -really_inline SuperVector<16> SuperVector<16>::vshr_32(uint8_t const N) const { +really_inline SuperVector<16> SuperVector<16>::vshr_32 (uint8_t const N) const +{ #if defined(HAVE__BUILTIN_CONSTANT_P) if (__builtin_constant_p(N)) { return {_mm_srli_epi32(u.v128[0], N)}; } #endif - if (N == 0) - return *this; - if (N == 16) - return Zeroes(); + if (N == 0) return *this; + if (N == 16) return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm_srli_epi32(v->u.v128[0], n)}; - }); + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm_srli_epi32(v->u.v128[0], n)}; }); return result; } template <> -really_inline SuperVector<16> SuperVector<16>::vshr_64(uint8_t const N) const { +really_inline SuperVector<16> SuperVector<16>::vshr_64 (uint8_t const N) const +{ #if defined(HAVE__BUILTIN_CONSTANT_P) if (__builtin_constant_p(N)) { return {_mm_srli_epi64(u.v128[0], N)}; } #endif - if (N == 0) - return *this; - if (N == 16) - return Zeroes(); + if (N == 0) return *this; + if (N == 16) return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm_srli_epi64(v->u.v128[0], n)}; - }); + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm_srli_epi64(v->u.v128[0], n)}; }); return result; } template <> -really_inline SuperVector<16> SuperVector<16>::vshr_128(uint8_t const N) const { +really_inline SuperVector<16> SuperVector<16>::vshr_128(uint8_t const N) const +{ #if defined(HAVE__BUILTIN_CONSTANT_P) && !defined(VS_SIMDE_BACKEND) if (__builtin_constant_p(N)) { return {_mm_srli_si128(u.v128[0], N)}; } #endif - if (N == 0) - return *this; - if (N == 16) - return Zeroes(); + if (N == 0) return *this; + if (N == 16) return Zeroes(); SuperVector result; - Unroller<1, 16>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm_srli_si128(v->u.v128[0], n)}; - }); + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm_srli_si128(v->u.v128[0], n)}; }); return result; } template <> -really_inline SuperVector<16> SuperVector<16>::vshr(uint8_t const N) const { +really_inline SuperVector<16> SuperVector<16>::vshr(uint8_t const N) const +{ return vshr_128(N); } template <> -really_inline SuperVector<16> -SuperVector<16>::operator>>(uint8_t const N) const { +really_inline SuperVector<16> SuperVector<16>::operator>>(uint8_t const N) const +{ #if defined(HAVE__BUILTIN_CONSTANT_P) && !defined(VS_SIMDE_BACKEND) if (__builtin_constant_p(N)) { return {_mm_srli_si128(u.v128[0], N)}; @@ -496,8 +481,8 @@ SuperVector<16>::operator>>(uint8_t const N) const { } template <> -really_inline SuperVector<16> -SuperVector<16>::operator<<(uint8_t const N) const { +really_inline SuperVector<16> SuperVector<16>::operator<<(uint8_t const N) const +{ #if defined(HAVE__BUILTIN_CONSTANT_P) && !defined(VS_SIMDE_BACKEND) if (__builtin_constant_p(N)) { return {_mm_slli_si128(u.v128[0], N)}; @@ -506,45 +491,45 @@ SuperVector<16>::operator<<(uint8_t const N) const { return vshl_128(N); } -template <> -really_inline SuperVector<16> SuperVector<16>::Ones_vshr(uint8_t const N) { - if (N == 0) - return Ones(); - else - return Ones().vshr_128(N); +template<> +really_inline SuperVector<16> SuperVector<16>::Ones_vshr(uint8_t const N) +{ + if (N == 0) return Ones(); + else return Ones().vshr_128(N); +} + +template<> +really_inline SuperVector<16> SuperVector<16>::Ones_vshl(uint8_t const N) +{ + if (N == 0) return Ones(); + else return Ones().vshr_128(N); } template <> -really_inline SuperVector<16> SuperVector<16>::Ones_vshl(uint8_t const N) { - if (N == 0) - return Ones(); - else - return Ones().vshr_128(N); -} - -template <> -really_inline SuperVector<16> SuperVector<16>::loadu(void const *ptr) { +really_inline SuperVector<16> SuperVector<16>::loadu(void const *ptr) +{ return _mm_loadu_si128((const m128 *)ptr); } template <> -really_inline SuperVector<16> SuperVector<16>::load(void const *ptr) { +really_inline SuperVector<16> SuperVector<16>::load(void const *ptr) +{ assert(ISALIGNED_N(ptr, alignof(SuperVector::size))); ptr = vectorscan_assume_aligned(ptr, SuperVector::size); return _mm_load_si128((const m128 *)ptr); } template <> -really_inline SuperVector<16> SuperVector<16>::loadu_maskz(void const *ptr, - uint8_t const len) { - SuperVector mask = Ones_vshr(16 - len); +really_inline SuperVector<16> SuperVector<16>::loadu_maskz(void const *ptr, uint8_t const len) +{ + SuperVector mask = Ones_vshr(16 -len); SuperVector v = _mm_loadu_si128((const m128 *)ptr); return mask & v; } -template <> -really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, - int8_t offset) { +template<> +really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, int8_t offset) +{ #if defined(HAVE__BUILTIN_CONSTANT_P) if (__builtin_constant_p(offset)) { if (offset == 16) { @@ -554,239 +539,224 @@ really_inline SuperVector<16> SuperVector<16>::alignr(SuperVector<16> &other, } } #endif - switch (offset) { - case 0: - return other; - break; - case 1: - return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 1)}; - break; - case 2: - return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 2)}; - break; - case 3: - return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 3)}; - break; - case 4: - return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 4)}; - break; - case 5: - return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 5)}; - break; - case 6: - return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 6)}; - break; - case 7: - return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 7)}; - break; - case 8: - return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 8)}; - break; - case 9: - return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 9)}; - break; - case 10: - return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 10)}; - break; - case 11: - return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 11)}; - break; - case 12: - return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 12)}; - break; - case 13: - return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 13)}; - break; - case 14: - return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 14)}; - break; - case 15: - return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 15)}; - break; - default: - break; + switch(offset) { + case 0: return other; break; + case 1: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 1)}; break; + case 2: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 2)}; break; + case 3: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 3)}; break; + case 4: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 4)}; break; + case 5: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 5)}; break; + case 6: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 6)}; break; + case 7: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 7)}; break; + case 8: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 8)}; break; + case 9: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 9)}; break; + case 10: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 10)}; break; + case 11: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 11)}; break; + case 12: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 12)}; break; + case 13: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 13)}; break; + case 14: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 14)}; break; + case 15: return {_mm_alignr_epi8(u.v128[0], other.u.v128[0], 15)}; break; + default: break; } return *this; } -template <> -template <> -really_inline SuperVector<16> SuperVector<16>::pshufb(SuperVector<16> b) { +template<> +template<> +really_inline SuperVector<16> SuperVector<16>::pshufb(SuperVector<16> b) +{ return {_mm_shuffle_epi8(u.v128[0], b.u.v128[0])}; } -template <> -really_inline SuperVector<16> SuperVector<16>::pshufb_maskz(SuperVector<16> b, - uint8_t const len) { - SuperVector mask = Ones_vshr(16 - len); +template<> +really_inline SuperVector<16> SuperVector<16>::pshufb_maskz(SuperVector<16> b, uint8_t const len) +{ + SuperVector mask = Ones_vshr(16 -len); return mask & pshufb(b); } #endif // !defined(FAT_RUNTIME) && !defined(HAVE_AVX2) // 256-bit AVX2 implementation -#if !(!defined(RELEASE_BUILD) && defined(FAT_RUNTIME) && \ - defined(HAVE_AVX512)) && \ - defined(HAVE_AVX2) +#if !(!defined(RELEASE_BUILD) && defined(FAT_RUNTIME) && defined(HAVE_AVX512)) && defined(HAVE_AVX2) -template <> -really_inline SuperVector<32>::SuperVector(SuperVector const &other) { +template<> +really_inline SuperVector<32>::SuperVector(SuperVector const &other) +{ u.v256[0] = other.u.v256[0]; } -template <> -really_inline SuperVector<32>::SuperVector(typename base_type::type const v) { +template<> +really_inline SuperVector<32>::SuperVector(typename base_type::type const v) +{ u.v256[0] = v; }; -template <> -template <> -really_inline SuperVector<32>::SuperVector(m128 const v) { +template<> +template<> +really_inline SuperVector<32>::SuperVector(m128 const v) +{ u.v256[0] = _mm256_broadcastsi128_si256(v); }; -template <> -really_inline SuperVector<32>::SuperVector(m128 const lo, m128 const hi) { +template<> +really_inline SuperVector<32>::SuperVector(m128 const lo, m128 const hi) +{ u.v128[0] = lo; u.v128[1] = hi; }; -template <> -really_inline SuperVector<32>::SuperVector(SuperVector<16> const lo, - SuperVector<16> const hi) { +template<> +really_inline SuperVector<32>::SuperVector(SuperVector<16> const lo, SuperVector<16> const hi) +{ u.v128[0] = lo.u.v128[0]; u.v128[1] = hi.u.v128[0]; }; -template <> -template <> -really_inline SuperVector<32>::SuperVector(int8_t const other) { +template<> +template<> +really_inline SuperVector<32>::SuperVector(int8_t const other) +{ u.v256[0] = _mm256_set1_epi8(other); } -template <> -template <> -really_inline SuperVector<32>::SuperVector(uint8_t const other) { +template<> +template<> +really_inline SuperVector<32>::SuperVector(uint8_t const other) +{ u.v256[0] = _mm256_set1_epi8(static_cast(other)); } -template <> -template <> -really_inline SuperVector<32>::SuperVector(int16_t const other) { +template<> +template<> +really_inline SuperVector<32>::SuperVector(int16_t const other) +{ u.v256[0] = _mm256_set1_epi16(other); } -template <> -template <> -really_inline SuperVector<32>::SuperVector(uint16_t const other) { +template<> +template<> +really_inline SuperVector<32>::SuperVector(uint16_t const other) +{ u.v256[0] = _mm256_set1_epi16(static_cast(other)); } -template <> -template <> -really_inline SuperVector<32>::SuperVector(int32_t const other) { +template<> +template<> +really_inline SuperVector<32>::SuperVector(int32_t const other) +{ u.v256[0] = _mm256_set1_epi32(other); } -template <> -template <> -really_inline SuperVector<32>::SuperVector(uint32_t const other) { +template<> +template<> +really_inline SuperVector<32>::SuperVector(uint32_t const other) +{ u.v256[0] = _mm256_set1_epi32(static_cast(other)); } -template <> -template <> -really_inline SuperVector<32>::SuperVector(int64_t const other) { +template<> +template<> +really_inline SuperVector<32>::SuperVector(int64_t const other) +{ u.v256[0] = _mm256_set1_epi64x(other); } -template <> -template <> -really_inline SuperVector<32>::SuperVector(uint64_t const other) { +template<> +template<> +really_inline SuperVector<32>::SuperVector(uint64_t const other) +{ u.v256[0] = _mm256_set1_epi64x(static_cast(other)); } // Constants -template <> really_inline SuperVector<32> SuperVector<32>::Ones(void) { +template<> +really_inline SuperVector<32> SuperVector<32>::Ones(void) +{ return {_mm256_set1_epi8(0xFF)}; } -template <> really_inline SuperVector<32> SuperVector<32>::Zeroes(void) { +template<> +really_inline SuperVector<32> SuperVector<32>::Zeroes(void) +{ return {_mm256_set1_epi8(0)}; } template <> -really_inline void SuperVector<32>::operator=(SuperVector<32> const &other) { +really_inline void SuperVector<32>::operator=(SuperVector<32> const &other) +{ u.v256[0] = other.u.v256[0]; } template <> -really_inline SuperVector<32> -SuperVector<32>::operator&(SuperVector<32> const &b) const { +really_inline SuperVector<32> SuperVector<32>::operator&(SuperVector<32> const &b) const +{ return {_mm256_and_si256(u.v256[0], b.u.v256[0])}; } template <> -really_inline SuperVector<32> -SuperVector<32>::operator|(SuperVector<32> const &b) const { +really_inline SuperVector<32> SuperVector<32>::operator|(SuperVector<32> const &b) const +{ return {_mm256_or_si256(u.v256[0], b.u.v256[0])}; } template <> -really_inline SuperVector<32> -SuperVector<32>::operator^(SuperVector<32> const &b) const { +really_inline SuperVector<32> SuperVector<32>::operator^(SuperVector<32> const &b) const +{ return {_mm256_xor_si256(u.v256[0], b.u.v256[0])}; } -template <> really_inline SuperVector<32> SuperVector<32>::operator!() const { +template <> +really_inline SuperVector<32> SuperVector<32>::operator!() const +{ return {_mm256_xor_si256(u.v256[0], u.v256[0])}; } template <> -really_inline SuperVector<32> -SuperVector<32>::opandnot(SuperVector<32> const &b) const { +really_inline SuperVector<32> SuperVector<32>::opandnot(SuperVector<32> const &b) const +{ return {_mm256_andnot_si256(u.v256[0], b.u.v256[0])}; } template <> -really_inline SuperVector<32> -SuperVector<32>::operator==(SuperVector<32> const &b) const { +really_inline SuperVector<32> SuperVector<32>::operator==(SuperVector<32> const &b) const +{ return {_mm256_cmpeq_epi8(u.v256[0], b.u.v256[0])}; } template <> -really_inline SuperVector<32> -SuperVector<32>::operator!=(SuperVector<32> const &b) const { +really_inline SuperVector<32> SuperVector<32>::operator!=(SuperVector<32> const &b) const +{ return !(*this == b); } template <> -really_inline SuperVector<32> -SuperVector<32>::operator>(SuperVector<32> const &b) const { +really_inline SuperVector<32> SuperVector<32>::operator>(SuperVector<32> const &b) const +{ return {_mm256_cmpgt_epi8(u.v256[0], b.u.v256[0])}; } template <> -really_inline SuperVector<32> -SuperVector<32>::operator<(SuperVector<32> const &b) const { +really_inline SuperVector<32> SuperVector<32>::operator<(SuperVector<32> const &b) const +{ return (b > *this); } template <> -really_inline SuperVector<32> -SuperVector<32>::operator>=(SuperVector<32> const &b) const { +really_inline SuperVector<32> SuperVector<32>::operator>=(SuperVector<32> const &b) const +{ return !(*this < b); } template <> -really_inline SuperVector<32> -SuperVector<32>::operator<=(SuperVector<32> const &b) const { +really_inline SuperVector<32> SuperVector<32>::operator<=(SuperVector<32> const &b) const +{ return !(*this > b); } template <> -really_inline SuperVector<32> -SuperVector<32>::eq(SuperVector<32> const &b) const { +really_inline SuperVector<32> SuperVector<32>::eq(SuperVector<32> const &b) const +{ return (*this == b); } @@ -820,56 +790,51 @@ SuperVector<32>::iteration_mask( // } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshl_16_imm() const { +template +really_inline SuperVector<32> SuperVector<32>::vshl_16_imm() const +{ return {_mm256_slli_epi16(u.v256[0], N)}; } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshl_32_imm() const { +template +really_inline SuperVector<32> SuperVector<32>::vshl_32_imm() const +{ return {_mm256_slli_epi32(u.v256[0], N)}; } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshl_64_imm() const { +template +really_inline SuperVector<32> SuperVector<32>::vshl_64_imm() const +{ return {_mm256_slli_epi64(u.v256[0], N)}; } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshl_128_imm() const { +template +really_inline SuperVector<32> SuperVector<32>::vshl_128_imm() const +{ return {_mm256_slli_si256(u.v256[0], N)}; } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshl_256_imm() const { - if (N == 0) - return *this; - if (N == 16) - return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], - _MM_SHUFFLE(0, 0, 2, 0))}; - if (N == 32) - return Zeroes(); +template +really_inline SuperVector<32> SuperVector<32>::vshl_256_imm() const +{ + if (N == 0) return *this; + if (N == 16) return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0))}; + if (N == 32) return Zeroes(); if (N < 16) { - return {_mm256_alignr_epi8( - u.v256[0], - _mm256_permute2x128_si256(u.v256[0], u.v256[0], - _MM_SHUFFLE(0, 0, 2, 0)), - 16 - N)}; + return {_mm256_alignr_epi8(u.v256[0], _mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0)), 16 - N)}; } else { - return {_mm256_slli_si256( - _mm256_permute2x128_si256(u.v256[0], u.v256[0], - _MM_SHUFFLE(0, 0, 2, 0)), - N - 16)}; + return {_mm256_slli_si256(_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0)), N - 16)}; } } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshl_imm() const { +template +really_inline SuperVector<32> SuperVector<32>::vshl_imm() const +{ return vshl_256_imm(); } @@ -881,56 +846,51 @@ really_inline SuperVector<32> SuperVector<32>::vshl_imm() const { // } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshr_16_imm() const { +template +really_inline SuperVector<32> SuperVector<32>::vshr_16_imm() const +{ return {_mm256_srli_epi16(u.v256[0], N)}; } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshr_32_imm() const { +template +really_inline SuperVector<32> SuperVector<32>::vshr_32_imm() const +{ return {_mm256_srli_epi32(u.v256[0], N)}; } - + template <> -template -really_inline SuperVector<32> SuperVector<32>::vshr_64_imm() const { +template +really_inline SuperVector<32> SuperVector<32>::vshr_64_imm() const +{ return {_mm256_srli_epi64(u.v256[0], N)}; } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshr_128_imm() const { +template +really_inline SuperVector<32> SuperVector<32>::vshr_128_imm() const +{ return {_mm256_srli_si256(u.v256[0], N)}; } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshr_256_imm() const { - if (N == 0) - return *this; - if (N == 16) - return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], - _MM_SHUFFLE(2, 0, 0, 1))}; - if (N == 32) - return Zeroes(); +template +really_inline SuperVector<32> SuperVector<32>::vshr_256_imm() const +{ + if (N == 0) return *this; + if (N == 16) return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(2, 0, 0, 1))}; + if (N == 32) return Zeroes(); if (N < 16) { - return {_mm256_alignr_epi8( - u.v256[0], - _mm256_permute2x128_si256(u.v256[0], u.v256[0], - _MM_SHUFFLE(0, 0, 2, 0)), - 16 - N)}; + return {_mm256_alignr_epi8(u.v256[0], _mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0)), 16 - N)}; } else { - return {_mm256_srli_si256( - _mm256_permute2x128_si256(u.v256[0], u.v256[0], - _MM_SHUFFLE(2, 0, 0, 1)), - N - 16)}; + return {_mm256_srli_si256(_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(2, 0, 0, 1)), N - 16)}; } } template <> -template -really_inline SuperVector<32> SuperVector<32>::vshr_imm() const { +template +really_inline SuperVector<32> SuperVector<32>::vshr_imm() const +{ return vshr_256_imm(); } @@ -950,233 +910,161 @@ template SuperVector<32> SuperVector<32>::vshr_imm<1>() const; #endif // template <> -// really_inline SuperVector<16> SuperVector<16>::vshl_8 (uint8_t const N) -// const +// really_inline SuperVector<16> SuperVector<16>::vshl_8 (uint8_t const N) const // { -// Unroller<0, 15>::iterator([&,v=this](int i) { if (N == i) return -// {_mm256_slli_epi8(v->u.v256[0], i)}; }); if (N == 16) return Zeroes(); +// Unroller<0, 15>::iterator([&,v=this](int i) { if (N == i) return {_mm256_slli_epi8(v->u.v256[0], i)}; }); +// if (N == 16) return Zeroes(); // } template <> -really_inline SuperVector<32> SuperVector<32>::vshl_16(uint8_t const N) const { - if (N == 0) - return *this; - if (N == 32) - return Zeroes(); +really_inline SuperVector<32> SuperVector<32>::vshl_16 (uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 32) return Zeroes(); SuperVector result; - Unroller<1, 32>::iterator([&, v = this](auto const i) { + Unroller<1, 32>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm256_slli_epi16(v->u.v256[0], n)}; }); + return result; +} + +template <> +really_inline SuperVector<32> SuperVector<32>::vshl_32 (uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 32) return Zeroes(); + SuperVector result; + Unroller<1, 32>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm256_slli_epi32(v->u.v256[0], n)}; }); + return result; +} + +template <> +really_inline SuperVector<32> SuperVector<32>::vshl_64 (uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 32) return Zeroes(); + SuperVector result; + Unroller<1, 32>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm256_slli_epi64(v->u.v256[0], n)}; }); + return result; +} + +template <> +really_inline SuperVector<32> SuperVector<32>::vshl_128(uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 32) return Zeroes(); + SuperVector result; + Unroller<1, 32>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm256_slli_si256(v->u.v256[0], n)}; }); + return result; +} + +template <> +really_inline SuperVector<32> SuperVector<32>::vshl_256(uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 16) return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0))}; + if (N == 32) return Zeroes(); + SuperVector result; + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; - if (N == n) - result = {_mm256_slli_epi16(v->u.v256[0], n)}; + if (N == n) result = {_mm256_alignr_epi8(u.v256[0], _mm256_permute2x128_si256(v->u.v256[0], v->u.v256[0], _MM_SHUFFLE(0, 0, 2, 0)), 16 - n)};; + }); + Unroller<17, 32>::iterator([&,v=this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) result = {_mm256_slli_si256(_mm256_permute2x128_si256(v->u.v256[0], v->u.v256[0], _MM_SHUFFLE(0, 0, 2, 0)), n - 16)}; }); return result; } template <> -really_inline SuperVector<32> SuperVector<32>::vshl_32(uint8_t const N) const { - if (N == 0) - return *this; - if (N == 32) - return Zeroes(); - SuperVector result; - Unroller<1, 32>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm256_slli_epi32(v->u.v256[0], n)}; - }); - return result; -} - -template <> -really_inline SuperVector<32> SuperVector<32>::vshl_64(uint8_t const N) const { - if (N == 0) - return *this; - if (N == 32) - return Zeroes(); - SuperVector result; - Unroller<1, 32>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm256_slli_epi64(v->u.v256[0], n)}; - }); - return result; -} - -template <> -really_inline SuperVector<32> SuperVector<32>::vshl_128(uint8_t const N) const { - if (N == 0) - return *this; - if (N == 32) - return Zeroes(); - SuperVector result; - Unroller<1, 32>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm256_slli_si256(v->u.v256[0], n)}; - }); - return result; -} - -template <> -really_inline SuperVector<32> SuperVector<32>::vshl_256(uint8_t const N) const { - if (N == 0) - return *this; - if (N == 16) - return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], - _MM_SHUFFLE(0, 0, 2, 0))}; - if (N == 32) - return Zeroes(); - SuperVector result; - Unroller<1, 16>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm256_alignr_epi8( - u.v256[0], - _mm256_permute2x128_si256(v->u.v256[0], v->u.v256[0], - _MM_SHUFFLE(0, 0, 2, 0)), - 16 - n)}; - ; - }); - Unroller<17, 32>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm256_slli_si256( - _mm256_permute2x128_si256(v->u.v256[0], v->u.v256[0], - _MM_SHUFFLE(0, 0, 2, 0)), - n - 16)}; - }); - return result; -} - -template <> -really_inline SuperVector<32> SuperVector<32>::vshl(uint8_t const N) const { +really_inline SuperVector<32> SuperVector<32>::vshl(uint8_t const N) const +{ return vshl_256(N); } // template <> -// really_inline SuperVector<16> SuperVector<16>::vshr_8 (uint8_t const N) -// const +// really_inline SuperVector<16> SuperVector<16>::vshr_8 (uint8_t const N) const // { // SuperVector<16> result; -// Unroller<0, 15>::iterator([&,v=this](uint8_t const i) { if (N == i) -// result = {_mm_srli_epi8(v->u.v128[0], i)}; }); if (N == 16) result = -// Zeroes(); return result; +// Unroller<0, 15>::iterator([&,v=this](uint8_t const i) { if (N == i) result = {_mm_srli_epi8(v->u.v128[0], i)}; }); +// if (N == 16) result = Zeroes(); +// return result; // } template <> -really_inline SuperVector<32> SuperVector<32>::vshr_16(uint8_t const N) const { - if (N == 0) - return *this; - if (N == 32) - return Zeroes(); +really_inline SuperVector<32> SuperVector<32>::vshr_16 (uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 32) return Zeroes(); SuperVector result; - Unroller<1, 32>::iterator([&, v = this](auto const i) { + Unroller<1, 32>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm256_srli_epi16(v->u.v256[0], n)}; }); + return result; +} + +template <> +really_inline SuperVector<32> SuperVector<32>::vshr_32 (uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 32) return Zeroes(); + SuperVector result; + Unroller<1, 32>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm256_srli_epi32(v->u.v256[0], n)}; }); + return result; +} + +template <> +really_inline SuperVector<32> SuperVector<32>::vshr_64 (uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 32) return Zeroes(); + SuperVector result; + Unroller<1, 32>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm256_srli_epi64(v->u.v256[0], n)}; }); + return result; +} + +template <> +really_inline SuperVector<32> SuperVector<32>::vshr_128(uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 32) return Zeroes(); + SuperVector result; + Unroller<1, 32>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm256_srli_si256(v->u.v256[0], n)}; }); + return result; +} + +template <> +really_inline SuperVector<32> SuperVector<32>::vshr_256(uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 16) return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(2, 0, 0, 1))}; + if (N == 32) return Zeroes(); + SuperVector result; + Unroller<1, 16>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; - if (N == n) - result = {_mm256_srli_epi16(v->u.v256[0], n)}; + if (N == n) result = {_mm256_alignr_epi8(_mm256_permute2x128_si256(v->u.v256[0], v->u.v256[0], _MM_SHUFFLE(2, 0, 0, 1)), v->u.v256[0], n)}; + }); + Unroller<17, 32>::iterator([&,v=this](auto const i) { + constexpr uint8_t n = i.value; + if (N == n) result = {_mm256_srli_si256(_mm256_permute2x128_si256(v->u.v256[0], v->u.v256[0], _MM_SHUFFLE(2, 0, 0, 1)), n - 16)}; }); return result; } template <> -really_inline SuperVector<32> SuperVector<32>::vshr_32(uint8_t const N) const { - if (N == 0) - return *this; - if (N == 32) - return Zeroes(); - SuperVector result; - Unroller<1, 32>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm256_srli_epi32(v->u.v256[0], n)}; - }); - return result; -} - -template <> -really_inline SuperVector<32> SuperVector<32>::vshr_64(uint8_t const N) const { - if (N == 0) - return *this; - if (N == 32) - return Zeroes(); - SuperVector result; - Unroller<1, 32>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm256_srli_epi64(v->u.v256[0], n)}; - }); - return result; -} - -template <> -really_inline SuperVector<32> SuperVector<32>::vshr_128(uint8_t const N) const { - if (N == 0) - return *this; - if (N == 32) - return Zeroes(); - SuperVector result; - Unroller<1, 32>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm256_srli_si256(v->u.v256[0], n)}; - }); - return result; -} - -template <> -really_inline SuperVector<32> SuperVector<32>::vshr_256(uint8_t const N) const { - if (N == 0) - return *this; - if (N == 16) - return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], - _MM_SHUFFLE(2, 0, 0, 1))}; - if (N == 32) - return Zeroes(); - SuperVector result; - Unroller<1, 16>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm256_alignr_epi8( - _mm256_permute2x128_si256(v->u.v256[0], v->u.v256[0], - _MM_SHUFFLE(2, 0, 0, 1)), - v->u.v256[0], n)}; - }); - Unroller<17, 32>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm256_srli_si256( - _mm256_permute2x128_si256(v->u.v256[0], v->u.v256[0], - _MM_SHUFFLE(2, 0, 0, 1)), - n - 16)}; - }); - return result; -} - -template <> -really_inline SuperVector<32> SuperVector<32>::vshr(uint8_t const N) const { +really_inline SuperVector<32> SuperVector<32>::vshr(uint8_t const N) const +{ return vshr_256(N); } template <> -really_inline SuperVector<32> -SuperVector<32>::operator>>(uint8_t const N) const { +really_inline SuperVector<32> SuperVector<32>::operator>>(uint8_t const N) const +{ #if defined(HAVE__BUILTIN_CONSTANT_P) if (__builtin_constant_p(N)) { - // As found here: - // https://stackoverflow.com/questions/25248766/emulating-shifts-on-32-bytes-with-avx + // As found here: https://stackoverflow.com/questions/25248766/emulating-shifts-on-32-bytes-with-avx if (N < 16) { - return {_mm256_alignr_epi8( - _mm256_permute2x128_si256(u.v256[0], u.v256[0], - _MM_SHUFFLE(2, 0, 0, 1)), - u.v256[0], N)}; + return {_mm256_alignr_epi8(_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(2, 0, 0, 1)), u.v256[0], N)}; } else if (N == 16) { - return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], - _MM_SHUFFLE(2, 0, 0, 1))}; + return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(2, 0, 0, 1))}; } else { - return {_mm256_srli_si256( - _mm256_permute2x128_si256(u.v256[0], u.v256[0], - _MM_SHUFFLE(2, 0, 0, 1)), - N - 16)}; + return {_mm256_srli_si256(_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(2, 0, 0, 1)), N - 16)}; } } #endif @@ -1184,46 +1072,37 @@ SuperVector<32>::operator>>(uint8_t const N) const { } template <> -really_inline SuperVector<32> -SuperVector<32>::operator<<(uint8_t const N) const { +really_inline SuperVector<32> SuperVector<32>::operator<<(uint8_t const N) const +{ #if defined(HAVE__BUILTIN_CONSTANT_P) if (__builtin_constant_p(N)) { - // As found here: - // https://stackoverflow.com/questions/25248766/emulating-shifts-on-32-bytes-with-avx + // As found here: https://stackoverflow.com/questions/25248766/emulating-shifts-on-32-bytes-with-avx if (N < 16) { - return {_mm256_alignr_epi8( - u.v256[0], - _mm256_permute2x128_si256(u.v256[0], u.v256[0], - _MM_SHUFFLE(0, 0, 2, 0)), - 16 - N)}; + return {_mm256_alignr_epi8(u.v256[0], _mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0)), 16 - N)}; } else if (N == 16) { - return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], - _MM_SHUFFLE(0, 0, 2, 0))}; + return {_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0))}; } else { - return {_mm256_slli_si256( - _mm256_permute2x128_si256(u.v256[0], u.v256[0], - _MM_SHUFFLE(0, 0, 2, 0)), - N - 16)}; + return {_mm256_slli_si256(_mm256_permute2x128_si256(u.v256[0], u.v256[0], _MM_SHUFFLE(0, 0, 2, 0)), N - 16)}; } } #endif return vshl_256(N); } -template <> -really_inline SuperVector<32> SuperVector<32>::Ones_vshr(uint8_t const N) { - if (N == 0) - return Ones(); +template<> +really_inline SuperVector<32> SuperVector<32>::Ones_vshr(uint8_t const N) +{ + if (N == 0) return Ones(); if (N >= 16) return {SuperVector<16>::Ones_vshr(N - 16), SuperVector<16>::Zeroes()}; else return {SuperVector<16>::Ones(), SuperVector<16>::Ones_vshr(N)}; } -template <> -really_inline SuperVector<32> SuperVector<32>::Ones_vshl(uint8_t const N) { - if (N == 0) - return Ones(); +template<> +really_inline SuperVector<32> SuperVector<32>::Ones_vshl(uint8_t const N) +{ + if (N == 0) return Ones(); if (N >= 16) return {SuperVector<16>::Zeroes(), SuperVector<16>::Ones_vshl(N - 16)}; else @@ -1231,29 +1110,30 @@ really_inline SuperVector<32> SuperVector<32>::Ones_vshl(uint8_t const N) { } template <> -really_inline SuperVector<32> SuperVector<32>::loadu(void const *ptr) { +really_inline SuperVector<32> SuperVector<32>::loadu(void const *ptr) +{ return {_mm256_loadu_si256((const m256 *)ptr)}; } template <> -really_inline SuperVector<32> SuperVector<32>::load(void const *ptr) { +really_inline SuperVector<32> SuperVector<32>::load(void const *ptr) +{ assert(ISALIGNED_N(ptr, alignof(SuperVector::size))); ptr = vectorscan_assume_aligned(ptr, SuperVector::size); return {_mm256_load_si256((const m256 *)ptr)}; } template <> -really_inline SuperVector<32> SuperVector<32>::loadu_maskz(void const *ptr, - uint8_t const len) { +really_inline SuperVector<32> SuperVector<32>::loadu_maskz(void const *ptr, uint8_t const len) +{ #ifdef HAVE_AVX512 u32 mask = (~0ULL) >> (32 - len); - SuperVector<32> v = - _mm256_mask_loadu_epi8(Zeroes().u.v256[0], mask, (const m256 *)ptr); + SuperVector<32> v = _mm256_mask_loadu_epi8(Zeroes().u.v256[0], mask, (const m256 *)ptr); v.print8("v"); return v; #else DEBUG_PRINTF("len = %d", len); - SuperVector<32> mask = Ones_vshr(32 - len); + SuperVector<32> mask = Ones_vshr(32 -len); mask.print8("mask"); (Ones() >> (32 - len)).print8("mask"); SuperVector<32> v = _mm256_loadu_si256((const m256 *)ptr); @@ -1262,11 +1142,10 @@ really_inline SuperVector<32> SuperVector<32>::loadu_maskz(void const *ptr, #endif } -template <> -really_inline SuperVector<32> SuperVector<32>::alignr(SuperVector<32> &other, - int8_t offset) { -#if defined(HAVE__BUILTIN_CONSTANT_P) && \ - !(defined(__GNUC__) && ((__GNUC__ == 13) || (__GNUC__ == 14))) +template<> +really_inline SuperVector<32> SuperVector<32>::alignr(SuperVector<32> &other, int8_t offset) +{ +#if defined(HAVE__BUILTIN_CONSTANT_P) && !(defined(__GNUC__) && ((__GNUC__ == 13) || (__GNUC__ == 14))) if (__builtin_constant_p(offset)) { if (offset == 16) { return *this; @@ -1275,359 +1154,262 @@ really_inline SuperVector<32> SuperVector<32>::alignr(SuperVector<32> &other, } } #endif - // As found here: - // https://stackoverflow.com/questions/8517970/mm-alignr-epi8-palignr-equivalent-in-avx2#8637458 - switch (offset) { - case 0: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 0), - _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 0)); - break; - case 1: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 1), - _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 1)); - break; - case 2: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 2), - _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 2)); - break; - case 3: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 3), - _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 3)); - break; - case 4: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 4), - _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 4)); - break; - case 5: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 5), - _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 5)); - break; - case 6: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 6), - _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 6)); - break; - case 7: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 7), - _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 7)); - break; - case 8: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 8), - _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 8)); - break; - case 9: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 9), - _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 9)); - break; - case 10: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 10), - _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 10)); - break; - case 11: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 11), - _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 11)); - break; - case 12: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 12), - _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 12)); - break; - case 13: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 13), - _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 13)); - break; - case 14: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 14), - _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 14)); - break; - case 15: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 15), - _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 15)); - break; - case 16: - return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 0), - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 0)); - break; - case 17: - return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 1), - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 1)); - break; - case 18: - return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 2), - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 2)); - break; - case 19: - return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 3), - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 3)); - break; - case 20: - return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 4), - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 4)); - break; - case 21: - return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 5), - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 5)); - break; - case 22: - return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 6), - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 6)); - break; - case 23: - return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 7), - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 7)); - break; - case 24: - return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 8), - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 8)); - break; - case 25: - return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 9), - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 9)); - break; - case 26: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[1], u.v128[0], 10), - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 10)); - break; - case 27: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[1], u.v128[0], 11), - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 11)); - break; - case 28: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[1], u.v128[0], 12), - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 12)); - break; - case 29: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[1], u.v128[0], 13), - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 13)); - break; - case 30: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[1], u.v128[0], 14), - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 14)); - break; - case 31: - return _mm256_set_m128i( - _mm_alignr_epi8(u.v128[1], u.v128[0], 15), - _mm_alignr_epi8(u.v128[0], other.u.v128[1], 15)); - break; - default: - break; + // As found here: https://stackoverflow.com/questions/8517970/mm-alignr-epi8-palignr-equivalent-in-avx2#8637458 + switch (offset){ + case 0 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 0), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 0)); break; + case 1 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 1), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 1)); break; + case 2 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 2), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 2)); break; + case 3 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 3), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 3)); break; + case 4 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 4), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 4)); break; + case 5 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 5), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 5)); break; + case 6 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 6), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 6)); break; + case 7 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 7), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 7)); break; + case 8 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 8), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 8)); break; + case 9 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 9), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 9)); break; + case 10 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 10), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 10)); break; + case 11 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 11), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 11)); break; + case 12 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 12), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 12)); break; + case 13 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 13), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 13)); break; + case 14 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 14), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 14)); break; + case 15 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[0], other.u.v128[1], 15), _mm_alignr_epi8(other.u.v128[1], other.u.v128[0], 15)); break; + case 16 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 0), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 0)); break; + case 17 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 1), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 1)); break; + case 18 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 2), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 2)); break; + case 19 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 3), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 3)); break; + case 20 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 4), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 4)); break; + case 21 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 5), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 5)); break; + case 22 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 6), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 6)); break; + case 23 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 7), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 7)); break; + case 24 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 8), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 8)); break; + case 25 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 9), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 9)); break; + case 26 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 10), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 10)); break; + case 27 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 11), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 11)); break; + case 28 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 12), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 12)); break; + case 29 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 13), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 13)); break; + case 30 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 14), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 14)); break; + case 31 : return _mm256_set_m128i(_mm_alignr_epi8(u.v128[1], u.v128[0], 15), _mm_alignr_epi8(u.v128[0], other.u.v128[1], 15)); break; + default: break; } return *this; } -template <> -template <> -really_inline SuperVector<32> SuperVector<32>::pshufb(SuperVector<32> b) { +template<> +template<> +really_inline SuperVector<32> SuperVector<32>::pshufb(SuperVector<32> b) +{ return {_mm256_shuffle_epi8(u.v256[0], b.u.v256[0])}; } -template <> -really_inline SuperVector<32> SuperVector<32>::pshufb_maskz(SuperVector<32> b, - uint8_t const len) { - SuperVector<32> mask = Ones_vshr(32 - len); +template<> +really_inline SuperVector<32> SuperVector<32>::pshufb_maskz(SuperVector<32> b, uint8_t const len) +{ + SuperVector<32> mask = Ones_vshr(32 -len); return mask & pshufb(b); } #endif // HAVE_AVX2 + // 512-bit AVX512 implementation #if defined(HAVE_AVX512) -template <> really_inline SuperVector<64>::SuperVector(SuperVector const &o) { +template<> +really_inline SuperVector<64>::SuperVector(SuperVector const &o) +{ u.v512[0] = o.u.v512[0]; } -template <> -really_inline SuperVector<64>::SuperVector(typename base_type::type const v) { +template<> +really_inline SuperVector<64>::SuperVector(typename base_type::type const v) +{ u.v512[0] = v; }; -template <> -template <> -really_inline SuperVector<64>::SuperVector(m256 const v) { +template<> +template<> +really_inline SuperVector<64>::SuperVector(m256 const v) +{ u.v512[0] = _mm512_broadcast_i64x4(v); }; -template <> -really_inline SuperVector<64>::SuperVector(m256 const lo, m256 const hi) { +template<> +really_inline SuperVector<64>::SuperVector(m256 const lo, m256 const hi) +{ u.v256[0] = lo; u.v256[1] = hi; }; -template <> -really_inline SuperVector<64>::SuperVector(SuperVector<32> const lo, - SuperVector<32> const hi) { +template<> +really_inline SuperVector<64>::SuperVector(SuperVector<32> const lo, SuperVector<32> const hi) +{ u.v256[0] = lo.u.v256[0]; u.v256[1] = hi.u.v256[0]; }; -template <> -template <> -really_inline SuperVector<64>::SuperVector(m128 const v) { +template<> +template<> +really_inline SuperVector<64>::SuperVector(m128 const v) +{ u.v512[0] = _mm512_broadcast_i32x4(v); }; -template <> -template <> -really_inline SuperVector<64>::SuperVector(int8_t const o) { +template<> +template<> +really_inline SuperVector<64>::SuperVector(int8_t const o) +{ u.v512[0] = _mm512_set1_epi8(o); } -template <> -template <> -really_inline SuperVector<64>::SuperVector(uint8_t const o) { +template<> +template<> +really_inline SuperVector<64>::SuperVector(uint8_t const o) +{ u.v512[0] = _mm512_set1_epi8(static_cast(o)); } -template <> -template <> -really_inline SuperVector<64>::SuperVector(int16_t const o) { +template<> +template<> +really_inline SuperVector<64>::SuperVector(int16_t const o) +{ u.v512[0] = _mm512_set1_epi16(o); } -template <> -template <> -really_inline SuperVector<64>::SuperVector(uint16_t const o) { +template<> +template<> +really_inline SuperVector<64>::SuperVector(uint16_t const o) +{ u.v512[0] = _mm512_set1_epi16(static_cast(o)); } -template <> -template <> -really_inline SuperVector<64>::SuperVector(int32_t const o) { +template<> +template<> +really_inline SuperVector<64>::SuperVector(int32_t const o) +{ u.v512[0] = _mm512_set1_epi32(o); } -template <> -template <> -really_inline SuperVector<64>::SuperVector(uint32_t const o) { +template<> +template<> +really_inline SuperVector<64>::SuperVector(uint32_t const o) +{ u.v512[0] = _mm512_set1_epi32(static_cast(o)); } -template <> -template <> -really_inline SuperVector<64>::SuperVector(int64_t const o) { +template<> +template<> +really_inline SuperVector<64>::SuperVector(int64_t const o) +{ u.v512[0] = _mm512_set1_epi64(o); } -template <> -template <> -really_inline SuperVector<64>::SuperVector(uint64_t const o) { +template<> +template<> +really_inline SuperVector<64>::SuperVector(uint64_t const o) +{ u.v512[0] = _mm512_set1_epi64(static_cast(o)); } // Constants -template <> really_inline SuperVector<64> SuperVector<64>::Ones(void) { +template<> +really_inline SuperVector<64> SuperVector<64>::Ones(void) +{ return {_mm512_set1_epi8(0xFF)}; } -template <> really_inline SuperVector<64> SuperVector<64>::Zeroes(void) { +template<> +really_inline SuperVector<64> SuperVector<64>::Zeroes(void) +{ return {_mm512_set1_epi8(0)}; } // Methods template <> -really_inline void SuperVector<64>::operator=(SuperVector<64> const &o) { +really_inline void SuperVector<64>::operator=(SuperVector<64> const &o) +{ u.v512[0] = o.u.v512[0]; } template <> -really_inline SuperVector<64> -SuperVector<64>::operator&(SuperVector<64> const &b) const { +really_inline SuperVector<64> SuperVector<64>::operator&(SuperVector<64> const &b) const +{ return {_mm512_and_si512(u.v512[0], b.u.v512[0])}; } template <> -really_inline SuperVector<64> -SuperVector<64>::operator|(SuperVector<64> const &b) const { +really_inline SuperVector<64> SuperVector<64>::operator|(SuperVector<64> const &b) const +{ return {_mm512_or_si512(u.v512[0], b.u.v512[0])}; } template <> -really_inline SuperVector<64> -SuperVector<64>::operator^(SuperVector<64> const &b) const { +really_inline SuperVector<64> SuperVector<64>::operator^(SuperVector<64> const &b) const +{ return {_mm512_xor_si512(u.v512[0], b.u.v512[0])}; } -template <> really_inline SuperVector<64> SuperVector<64>::operator!() const { +template <> +really_inline SuperVector<64> SuperVector<64>::operator!() const +{ return {_mm512_xor_si512(u.v512[0], u.v512[0])}; } template <> -really_inline SuperVector<64> -SuperVector<64>::opandnot(SuperVector<64> const &b) const { +really_inline SuperVector<64> SuperVector<64>::opandnot(SuperVector<64> const &b) const +{ return {_mm512_andnot_si512(u.v512[0], b.u.v512[0])}; } template <> -really_inline SuperVector<64> -SuperVector<64>::operator==(SuperVector<64> const &b) const { +really_inline SuperVector<64> SuperVector<64>::operator==(SuperVector<64> const &b) const +{ SuperVector<64>::comparemask_type mask = _mm512_cmpeq_epi8_mask(u.v512[0], b.u.v512[0]); return {_mm512_movm_epi8(mask)}; } template <> -really_inline SuperVector<64> -SuperVector<64>::operator!=(SuperVector<64> const &b) const { +really_inline SuperVector<64> SuperVector<64>::operator!=(SuperVector<64> const &b) const +{ SuperVector<64>::comparemask_type mask = _mm512_cmpneq_epi8_mask(u.v512[0], b.u.v512[0]); return {_mm512_movm_epi8(mask)}; } template <> -really_inline SuperVector<64> -SuperVector<64>::operator>(SuperVector<64> const &b) const { +really_inline SuperVector<64> SuperVector<64>::operator>(SuperVector<64> const &b) const +{ SuperVector<64>::comparemask_type mask = _mm512_cmpgt_epi8_mask(u.v512[0], b.u.v512[0]); return {_mm512_movm_epi8(mask)}; } template <> -really_inline SuperVector<64> -SuperVector<64>::operator<(SuperVector<64> const &b) const { +really_inline SuperVector<64> SuperVector<64>::operator<(SuperVector<64> const &b) const +{ SuperVector<64>::comparemask_type mask = _mm512_cmplt_epi8_mask(u.v512[0], b.u.v512[0]); return {_mm512_movm_epi8(mask)}; } template <> -really_inline SuperVector<64> -SuperVector<64>::operator>=(SuperVector<64> const &b) const { +really_inline SuperVector<64> SuperVector<64>::operator>=(SuperVector<64> const &b) const +{ SuperVector<64>::comparemask_type mask = _mm512_cmpge_epi8_mask(u.v512[0], b.u.v512[0]); return {_mm512_movm_epi8(mask)}; } template <> -really_inline SuperVector<64> -SuperVector<64>::operator<=(SuperVector<64> const &b) const { +really_inline SuperVector<64> SuperVector<64>::operator<=(SuperVector<64> const &b) const +{ SuperVector<64>::comparemask_type mask = _mm512_cmple_epi8_mask(u.v512[0], b.u.v512[0]); return {_mm512_movm_epi8(mask)}; } template <> -really_inline SuperVector<64> -SuperVector<64>::eq(SuperVector<64> const &b) const { +really_inline SuperVector<64> SuperVector<64>::eq(SuperVector<64> const &b) const +{ return (*this == b); } @@ -1663,44 +1445,51 @@ SuperVector<64>::iteration_mask( // } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshl_16_imm() const { +template +really_inline SuperVector<64> SuperVector<64>::vshl_16_imm() const +{ return {_mm512_slli_epi16(u.v512[0], N)}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshl_32_imm() const { +template +really_inline SuperVector<64> SuperVector<64>::vshl_32_imm() const +{ return {_mm512_slli_epi32(u.v512[0], N)}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshl_64_imm() const { +template +really_inline SuperVector<64> SuperVector<64>::vshl_64_imm() const +{ return {_mm512_slli_epi64(u.v512[0], N)}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshl_128_imm() const { +template +really_inline SuperVector<64> SuperVector<64>::vshl_128_imm() const +{ return {_mm512_bslli_epi128(u.v512[0], N)}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshl_256_imm() const { +template +really_inline SuperVector<64> SuperVector<64>::vshl_256_imm() const +{ return {}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshl_512_imm() const { +template +really_inline SuperVector<64> SuperVector<64>::vshl_512_imm() const +{ return {}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshl_imm() const { +template +really_inline SuperVector<64> SuperVector<64>::vshl_imm() const +{ return vshl_512_imm(); } @@ -1712,44 +1501,51 @@ really_inline SuperVector<64> SuperVector<64>::vshl_imm() const { // } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshr_16_imm() const { +template +really_inline SuperVector<64> SuperVector<64>::vshr_16_imm() const +{ return {_mm512_srli_epi16(u.v512[0], N)}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshr_32_imm() const { +template +really_inline SuperVector<64> SuperVector<64>::vshr_32_imm() const +{ return {_mm512_srli_epi32(u.v512[0], N)}; } - + template <> -template -really_inline SuperVector<64> SuperVector<64>::vshr_64_imm() const { +template +really_inline SuperVector<64> SuperVector<64>::vshr_64_imm() const +{ return {_mm512_srli_epi64(u.v512[0], N)}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshr_128_imm() const { +template +really_inline SuperVector<64> SuperVector<64>::vshr_128_imm() const +{ return {_mm512_bsrli_epi128(u.v512[0], N)}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshr_256_imm() const { +template +really_inline SuperVector<64> SuperVector<64>::vshr_256_imm() const +{ return {}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshr_512_imm() const { +template +really_inline SuperVector<64> SuperVector<64>::vshr_512_imm() const +{ return {}; } template <> -template -really_inline SuperVector<64> SuperVector<64>::vshr_imm() const { +template +really_inline SuperVector<64> SuperVector<64>::vshr_imm() const +{ return vshr_512_imm(); } @@ -1767,186 +1563,150 @@ template SuperVector<64> SuperVector<64>::vshr_128_imm<4>() const; #endif // template <> -// really_inline SuperVector<64> SuperVector<64>::vshl_8 (uint8_t const N) -// const +// really_inline SuperVector<64> SuperVector<64>::vshl_8 (uint8_t const N) const // { -// Unroller<0, 15>::iterator([&,v=this](int i) { if (N == i) return -// {_mm_slli_epi8(v->u.v128[0], i)}; }); if (N == 16) return Zeroes(); +// Unroller<0, 15>::iterator([&,v=this](int i) { if (N == i) return {_mm_slli_epi8(v->u.v128[0], i)}; }); +// if (N == 16) return Zeroes(); // } template <> -really_inline SuperVector<64> SuperVector<64>::vshl_16(uint8_t const N) const { - if (N == 0) - return *this; - if (N == 64) - return Zeroes(); +really_inline SuperVector<64> SuperVector<64>::vshl_16 (uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 64) return Zeroes(); SuperVector result; - Unroller<1, 64>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm512_slli_epi16(v->u.v512[0], n)}; - }); + Unroller<1, 64>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm512_slli_epi16(v->u.v512[0], n)}; }); return result; } template <> -really_inline SuperVector<64> SuperVector<64>::vshl_32(uint8_t const N) const { - if (N == 0) - return *this; - if (N == 64) - return Zeroes(); +really_inline SuperVector<64> SuperVector<64>::vshl_32 (uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 64) return Zeroes(); SuperVector result; - Unroller<1, 64>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm512_slli_epi32(v->u.v512[0], n)}; - }); + Unroller<1, 64>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm512_slli_epi32(v->u.v512[0], n)}; }); return result; } template <> -really_inline SuperVector<64> SuperVector<64>::vshl_64(uint8_t const N) const { - if (N == 0) - return *this; - if (N == 64) - return Zeroes(); +really_inline SuperVector<64> SuperVector<64>::vshl_64 (uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 64) return Zeroes(); SuperVector result; - Unroller<1, 64>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm512_slli_epi64(v->u.v512[0], n)}; - }); + Unroller<1, 64>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm512_slli_epi64(v->u.v512[0], n)}; }); return result; } template <> -really_inline SuperVector<64> SuperVector<64>::vshl_128(uint8_t const N) const { - if (N == 0) - return *this; - if (N == 64) - return Zeroes(); +really_inline SuperVector<64> SuperVector<64>::vshl_128(uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 64) return Zeroes(); SuperVector result; - Unroller<1, 64>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm512_bslli_epi128(v->u.v512[0], n)}; - }); + Unroller<1, 64>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm512_bslli_epi128(v->u.v512[0], n)}; }); return result; } template <> -really_inline SuperVector<64> SuperVector<64>::vshl_256(uint8_t const N) const { +really_inline SuperVector<64> SuperVector<64>::vshl_256(uint8_t const N) const +{ return vshl_128(N); } template <> -really_inline SuperVector<64> SuperVector<64>::vshl_512(uint8_t const N) const { +really_inline SuperVector<64> SuperVector<64>::vshl_512(uint8_t const N) const +{ return vshl_128(N); } template <> -really_inline SuperVector<64> SuperVector<64>::vshl(uint8_t const N) const { +really_inline SuperVector<64> SuperVector<64>::vshl(uint8_t const N) const +{ return vshl_512(N); } // template <> -// really_inline SuperVector<16> SuperVector<16>::vshr_8 (uint8_t const N) -// const +// really_inline SuperVector<16> SuperVector<16>::vshr_8 (uint8_t const N) const // { // SuperVector<16> result; -// Unroller<0, 15>::iterator([&,v=this](uint8_t const i) { if (N == i) -// result = {_mm_srli_epi8(v->u.v128[0], i)}; }); if (N == 16) result = -// Zeroes(); return result; +// Unroller<0, 15>::iterator([&,v=this](uint8_t const i) { if (N == i) result = {_mm_srli_epi8(v->u.v128[0], i)}; }); +// if (N == 16) result = Zeroes(); +// return result; // } template <> -really_inline SuperVector<64> SuperVector<64>::vshr_16(uint8_t const N) const { - if (N == 0) - return *this; - if (N == 64) - return Zeroes(); +really_inline SuperVector<64> SuperVector<64>::vshr_16 (uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 64) return Zeroes(); SuperVector result; - Unroller<1, 64>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm512_srli_epi16(v->u.v512[0], n)}; - }); + Unroller<1, 64>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm512_srli_epi16(v->u.v512[0], n)}; }); return result; } template <> -really_inline SuperVector<64> SuperVector<64>::vshr_32(uint8_t const N) const { - if (N == 0) - return *this; - if (N == 64) - return Zeroes(); +really_inline SuperVector<64> SuperVector<64>::vshr_32 (uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 64) return Zeroes(); SuperVector result; - Unroller<1, 64>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm512_srli_epi32(v->u.v512[0], n)}; - }); + Unroller<1, 64>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm512_srli_epi32(v->u.v512[0], n)}; }); return result; } template <> -really_inline SuperVector<64> SuperVector<64>::vshr_64(uint8_t const N) const { - if (N == 0) - return *this; - if (N == 16) - return Zeroes(); +really_inline SuperVector<64> SuperVector<64>::vshr_64 (uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 16) return Zeroes(); SuperVector result; - Unroller<1, 64>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm512_srli_epi64(v->u.v512[0], n)}; - }); + Unroller<1, 64>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm512_srli_epi64(v->u.v512[0], n)}; }); return result; } template <> -really_inline SuperVector<64> SuperVector<64>::vshr_128(uint8_t const N) const { - if (N == 0) - return *this; - if (N == 64) - return Zeroes(); +really_inline SuperVector<64> SuperVector<64>::vshr_128(uint8_t const N) const +{ + if (N == 0) return *this; + if (N == 64) return Zeroes(); SuperVector result; - Unroller<1, 64>::iterator([&, v = this](auto const i) { - constexpr uint8_t n = i.value; - if (N == n) - result = {_mm512_bsrli_epi128(v->u.v512[0], n)}; - }); + Unroller<1, 64>::iterator([&,v=this](auto const i) { constexpr uint8_t n = i.value; if (N == n) result = {_mm512_bsrli_epi128(v->u.v512[0], n)}; }); return result; } template <> -really_inline SuperVector<64> SuperVector<64>::vshr_256(uint8_t const N) const { +really_inline SuperVector<64> SuperVector<64>::vshr_256(uint8_t const N) const +{ return vshr_128(N); } template <> -really_inline SuperVector<64> SuperVector<64>::vshr_512(uint8_t const N) const { +really_inline SuperVector<64> SuperVector<64>::vshr_512(uint8_t const N) const +{ return vshr_128(N); } template <> -really_inline SuperVector<64> SuperVector<64>::vshr(uint8_t const N) const { +really_inline SuperVector<64> SuperVector<64>::vshr(uint8_t const N) const +{ return vshr_512(N); } -template <> -really_inline SuperVector<64> SuperVector<64>::Ones_vshr(uint8_t const N) { - if (N == 0) - return Ones(); +template<> +really_inline SuperVector<64> SuperVector<64>::Ones_vshr(uint8_t const N) +{ + if (N == 0) return Ones(); if (N >= 32) return {SuperVector<32>::Ones_vshr(N - 32), SuperVector<32>::Zeroes()}; else return {SuperVector<32>::Ones(), SuperVector<32>::Ones_vshr(N)}; } -template <> -really_inline SuperVector<64> SuperVector<64>::Ones_vshl(uint8_t const N) { - if (N == 0) - return Ones(); +template<> +really_inline SuperVector<64> SuperVector<64>::Ones_vshl(uint8_t const N) +{ + if (N == 0) return Ones(); if (N >= 32) return {SuperVector<32>::Zeroes(), SuperVector<32>::Ones_vshl(N - 32)}; else @@ -1954,8 +1714,8 @@ really_inline SuperVector<64> SuperVector<64>::Ones_vshl(uint8_t const N) { } template <> -really_inline SuperVector<64> -SuperVector<64>::operator>>(uint8_t const N) const { +really_inline SuperVector<64> SuperVector<64>::operator>>(uint8_t const N) const +{ if (N == 0) { return *this; } else if (N < 32) { @@ -1977,8 +1737,8 @@ SuperVector<64>::operator>>(uint8_t const N) const { } template <> -really_inline SuperVector<64> -SuperVector<64>::operator<<(uint8_t const N) const { +really_inline SuperVector<64> SuperVector<64>::operator<<(uint8_t const N) const +{ if (N == 0) { return *this; } else if (N < 32) { @@ -2000,47 +1760,48 @@ SuperVector<64>::operator<<(uint8_t const N) const { } template <> -really_inline SuperVector<64> SuperVector<64>::loadu(void const *ptr) { +really_inline SuperVector<64> SuperVector<64>::loadu(void const *ptr) +{ return {_mm512_loadu_si512((const m512 *)ptr)}; } template <> -really_inline SuperVector<64> SuperVector<64>::load(void const *ptr) { +really_inline SuperVector<64> SuperVector<64>::load(void const *ptr) +{ assert(ISALIGNED_N(ptr, alignof(SuperVector::size))); ptr = vectorscan_assume_aligned(ptr, SuperVector::size); return {_mm512_load_si512((const m512 *)ptr)}; } template <> -really_inline SuperVector<64> SuperVector<64>::loadu_maskz(void const *ptr, - uint8_t const len) { +really_inline SuperVector<64> SuperVector<64>::loadu_maskz(void const *ptr, uint8_t const len) +{ u64a mask = (~0ULL) >> (64 - len); DEBUG_PRINTF("mask = %016llx\n", mask); - SuperVector<64> v = - _mm512_mask_loadu_epi8(Zeroes().u.v512[0], mask, (const m512 *)ptr); + SuperVector<64> v = _mm512_mask_loadu_epi8(Zeroes().u.v512[0], mask, (const m512 *)ptr); v.print8("v"); return v; } -template <> -template <> -really_inline SuperVector<64> SuperVector<64>::pshufb(SuperVector<64> b) { +template<> +template<> +really_inline SuperVector<64> SuperVector<64>::pshufb(SuperVector<64> b) +{ return {_mm512_shuffle_epi8(u.v512[0], b.u.v512[0])}; } -template <> -really_inline SuperVector<64> SuperVector<64>::pshufb_maskz(SuperVector<64> b, - uint8_t const len) { +template<> +really_inline SuperVector<64> SuperVector<64>::pshufb_maskz(SuperVector<64> b, uint8_t const len) +{ u64a mask = (~0ULL) >> (64 - len); DEBUG_PRINTF("mask = %016llx\n", mask); return {_mm512_maskz_shuffle_epi8(mask, u.v512[0], b.u.v512[0])}; } -template <> -really_inline SuperVector<64> SuperVector<64>::alignr(SuperVector<64> &l, - int8_t offset) { -#if defined(HAVE__BUILTIN_CONSTANT_P) && \ - !(defined(__GNUC__) && (__GNUC__ == 14)) +template<> +really_inline SuperVector<64> SuperVector<64>::alignr(SuperVector<64> &l, int8_t offset) +{ +#if defined(HAVE__BUILTIN_CONSTANT_P) && !(defined(__GNUC__) && (__GNUC__ == 14)) if (__builtin_constant_p(offset)) { if (offset == 16) { return *this; @@ -2049,21 +1810,21 @@ really_inline SuperVector<64> SuperVector<64>::alignr(SuperVector<64> &l, } } #endif - if (offset == 0) { + if(offset == 0) { return *this; - } else if (offset < 32) { + } else if (offset < 32){ SuperVector<32> lo256 = u.v256[0]; SuperVector<32> hi256 = u.v256[1]; SuperVector<32> o_lo256 = l.u.v256[0]; - SuperVector<32> carry1 = hi256.alignr(lo256, offset); - SuperVector<32> carry2 = o_lo256.alignr(hi256, offset); + SuperVector<32> carry1 = hi256.alignr(lo256,offset); + SuperVector<32> carry2 = o_lo256.alignr(hi256,offset); return SuperVector(carry1, carry2); - } else if (offset <= 64) { + } else if (offset <= 64){ SuperVector<32> hi256 = u.v256[1]; SuperVector<32> o_lo256 = l.u.v256[0]; SuperVector<32> o_hi256 = l.u.v256[1]; SuperVector<32> carry1 = o_lo256.alignr(hi256, offset - 32); - SuperVector<32> carry2 = o_hi256.alignr(o_lo256, offset - 32); + SuperVector<32> carry2 = o_hi256.alignr(o_lo256,offset -32); return SuperVector(carry1, carry2); } else { return *this; From 1e614dc86145a3163970efc9b9a11aa3eda88bee Mon Sep 17 00:00:00 2001 From: "G.E." Date: Wed, 17 Apr 2024 15:40:52 +0300 Subject: [PATCH 16/19] enable the rpath hack on all gcc13, and on arm/gcc12 --- cmake/osdetection.cmake | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/cmake/osdetection.cmake b/cmake/osdetection.cmake index 8bfbd3bd..2cef0b94 100644 --- a/cmake/osdetection.cmake +++ b/cmake/osdetection.cmake @@ -4,14 +4,12 @@ endif(CMAKE_SYSTEM_NAME MATCHES "Linux") if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD") set(FREEBSD true) - if(ARCH_AARCH64) - set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) - #FIXME: find a nicer and more general way of doing this - if(CMAKE_C_COMPILER MATCHES "/usr/local/bin/gcc12") - set(CMAKE_BUILD_RPATH "/usr/local/lib/gcc12") - elseif(CMAKE_C_COMPILER MATCHES "/usr/local/bin/gcc13") - set(CMAKE_BUILD_RPATH "/usr/local/lib/gcc13") - endif() + set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) + #FIXME: find a nicer and more general way of doing this + if(CMAKE_C_COMPILER MATCHES "/usr/local/bin/gcc13") + set(CMAKE_BUILD_RPATH "/usr/local/lib/gcc13") + elseif(ARCH_AARCH64 AND (CMAKE_C_COMPILER MATCHES "/usr/local/bin/gcc12")) + set(CMAKE_BUILD_RPATH "/usr/local/lib/gcc12") endif() endif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD") From cdc0d47cde9e350e510cbe3b56b1cac767cefdad Mon Sep 17 00:00:00 2001 From: Konstantinos Margaritis Date: Wed, 17 Apr 2024 17:23:11 +0300 Subject: [PATCH 17/19] Update SIMDe --- simde | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/simde b/simde index aae22459..416091eb 160000 --- a/simde +++ b/simde @@ -1 +1 @@ -Subproject commit aae22459fa284e9fc2b7d4b8e4571afa0418125f +Subproject commit 416091ebdb9e901b29d026633e73167d6353a0b0 From 50fdcaf35733f30ff045d2dac88909c65f612a00 Mon Sep 17 00:00:00 2001 From: "G.E." Date: Wed, 17 Apr 2024 23:03:09 +0300 Subject: [PATCH 18/19] readme edit --- README.md | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/README.md b/README.md index 2e68d2e6..d5f0892c 100644 --- a/README.md +++ b/README.md @@ -146,6 +146,7 @@ export CXX="/usr/pkg/gcc12/bin/g++" ``` In FreeBSD similarly, you might want to install a different compiler. +If you want to use gcc, it is recommended to use gcc12. You will also, as in NetBSD, need to install cmake, sqlite, boost and ragel packages. Using the example of gcc12 from pkg: installing the desired compiler: @@ -175,12 +176,6 @@ export CXX="/usr/local/bin/g++12" Then continue with the build as below. -A note about running in FreeBSD: if you built a dynamically linked binary -with an alternative compiler, the libraries specific to the compiler that -built the binary will probably not be found and the base distro libraries -in /lib will be found instead. Adjust LD_LIBRARY_PATH appropriately. For -example, with gcc12 installed from pkg, one would want to use -```export LD_LIBRARY_PATH=/usr/local/lib/gcc12/``` ## Configure & build From acbef47c74d842c60284e05e634dbdb6c27e9650 Mon Sep 17 00:00:00 2001 From: "G.E." Date: Thu, 18 Apr 2024 16:16:06 +0300 Subject: [PATCH 19/19] tiny change to readme --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index d5f0892c..483b2cad 100644 --- a/README.md +++ b/README.md @@ -165,7 +165,6 @@ the environment variables to point to this compiler: export CC="/usr/local/bin/gcc" export CXX="/usr/local/bin/g++" ``` - A further note in FreeBSD, on the PowerPC and ARM platforms, the gcc12 package installs to a slightly different name, on FreeBSD/ppc, gcc12 will be found using: