Adding bitmatchers

This commit is contained in:
Anatoly Burakov 2015-12-09 11:11:49 +00:00 committed by Matthew Barr
parent 68f6849687
commit 77ff826bbf
9 changed files with 1165 additions and 0 deletions

View File

@ -437,6 +437,13 @@ set (hs_exec_SRCS
src/nfa/mpv.h
src/nfa/mpv.c
src/nfa/mpv_internal.h
src/nfa/multiaccel_common.h
src/nfa/multiaccel_doubleshift.h
src/nfa/multiaccel_doubleshiftgrab.h
src/nfa/multiaccel_long.h
src/nfa/multiaccel_longgrab.h
src/nfa/multiaccel_shift.h
src/nfa/multiaccel_shiftgrab.h
src/nfa/nfa_api.h
src/nfa/nfa_api_dispatch.c
src/nfa/nfa_internal.h

265
src/nfa/multiaccel_common.h Normal file
View File

@ -0,0 +1,265 @@
/*
* Copyright (c) 2015, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MULTIACCEL_COMMON_H_
#define MULTIACCEL_COMMON_H_
#include "config.h"
#include "ue2common.h"
#include "util/join.h"
#include "util/bitutils.h"
/*
* When doing shifting, remember that the total number of shifts should be n-1
*/
#define VARISHIFT(src, dst, len) \
do { \
(dst) &= (src) >> (len); \
} while (0)
#define STATIC_SHIFT1(x) \
do { \
(x) &= (x) >> 1; \
} while (0)
#define STATIC_SHIFT2(x) \
do { \
(x) &= (x) >> 2;\
} while (0)
#define STATIC_SHIFT4(x) \
do { \
(x) &= (x) >> 4; \
} while (0)
#define STATIC_SHIFT8(x) \
do { \
(x) &= (x) >> 8; \
} while (0)
#define SHIFT1(x) \
do {} while (0)
#define SHIFT2(x) \
do { \
STATIC_SHIFT1(x); \
} while (0)
#define SHIFT3(x) \
do { \
STATIC_SHIFT1(x); \
STATIC_SHIFT1(x); \
} while (0)
#define SHIFT4(x) \
do { \
STATIC_SHIFT1(x); \
STATIC_SHIFT2(x); \
} while (0)
#define SHIFT5(x) \
do { \
SHIFT4(x); \
STATIC_SHIFT1(x); \
} while (0)
#define SHIFT6(x) \
do { \
SHIFT4(x); \
STATIC_SHIFT2(x); \
} while (0)
#define SHIFT7(x) \
do { \
SHIFT4(x); \
STATIC_SHIFT1(x); \
STATIC_SHIFT2(x); \
} while (0)
#define SHIFT8(x) \
do { \
SHIFT4(x); \
STATIC_SHIFT4(x); \
} while (0)
#define SHIFT9(x) \
do { \
SHIFT8(x); \
STATIC_SHIFT1(x); \
} while (0)
#define SHIFT10(x) \
do { \
SHIFT8(x); \
STATIC_SHIFT2(x); \
} while (0)
#define SHIFT11(x) \
do { \
SHIFT8(x); \
STATIC_SHIFT1(x); \
STATIC_SHIFT2(x); \
} while (0)
#define SHIFT12(x); \
do { \
SHIFT8(x);\
STATIC_SHIFT4(x); \
} while (0)
#define SHIFT13(x); \
do { \
SHIFT8(x); \
STATIC_SHIFT1(x); \
STATIC_SHIFT4(x); \
} while (0)
#define SHIFT14(x) \
do { \
SHIFT8(x); \
STATIC_SHIFT2(x); \
STATIC_SHIFT4(x); \
} while (0)
#define SHIFT15(x) \
do { \
SHIFT8(x); \
STATIC_SHIFT1(x); \
STATIC_SHIFT2(x); \
STATIC_SHIFT4(x); \
} while (0)
#define SHIFT16(x) \
do { \
SHIFT8(x); \
STATIC_SHIFT8(x); \
} while (0)
#define SHIFT17(x) \
do { \
SHIFT16(x); \
STATIC_SHIFT1(x); \
} while (0)
#define SHIFT18(x) \
do { \
SHIFT16(x); \
STATIC_SHIFT2(x); \
} while (0)
#define SHIFT19(x) \
do { \
SHIFT16(x); \
STATIC_SHIFT1(x); \
STATIC_SHIFT2(x); \
} while (0)
#define SHIFT20(x) \
do { \
SHIFT16(x); \
STATIC_SHIFT4(x); \
} while (0)
#define SHIFT21(x) \
do { \
SHIFT16(x); \
STATIC_SHIFT1(x); \
STATIC_SHIFT4(x); \
} while (0)
#define SHIFT22(x) \
do { \
SHIFT16(x); \
STATIC_SHIFT2(x); \
STATIC_SHIFT4(x); \
} while (0)
#define SHIFT23(x) \
do { \
SHIFT16(x); \
STATIC_SHIFT1(x); \
STATIC_SHIFT2(x); \
STATIC_SHIFT4(x); \
} while (0)
#define SHIFT24(x) \
do { \
SHIFT16(x); \
STATIC_SHIFT8(x); \
} while (0)
#define SHIFT25(x) \
do { \
SHIFT24(x); \
STATIC_SHIFT1(x); \
} while (0)
#define SHIFT26(x) \
do { \
SHIFT24(x); \
STATIC_SHIFT2(x); \
} while (0)
#define SHIFT27(x) \
do { \
SHIFT24(x); \
STATIC_SHIFT1(x); \
STATIC_SHIFT2(x); \
} while (0)
#define SHIFT28(x) \
do { \
SHIFT24(x); \
STATIC_SHIFT4(x); \
} while (0)
#define SHIFT29(x) \
do { \
SHIFT24(x); \
STATIC_SHIFT1(x); \
STATIC_SHIFT4(x); \
} while (0)
#define SHIFT30(x) \
do { \
SHIFT24(x); \
STATIC_SHIFT2(x); \
STATIC_SHIFT4(x); \
} while (0)
#define SHIFT31(x) \
do { \
SHIFT24(x); \
STATIC_SHIFT1(x); \
STATIC_SHIFT2(x); \
STATIC_SHIFT4(x); \
} while (0)
#define SHIFT32(x) \
do { \
SHIFT24(x); \
STATIC_SHIFT8(x); \
} while (0)
/*
* this function is used by 32-bit multiaccel matchers. 32-bit matchers accept
* a 32-bit integer as a buffer, where low 16 bits is movemask result and
* high 16 bits are "don't care" values. this function is not expected to return
* a result higher than 16.
*/
static really_inline
const u8 *match32(const u8 *buf, const u32 z) {
if (unlikely(z != 0)) {
u32 pos = ctz32(z);
assert(pos < 16);
return buf + pos;
}
return NULL;
}
/*
* this function is used by 64-bit multiaccel matchers. 64-bit matchers accept
* a 64-bit integer as a buffer, where low 32 bits is movemask result and
* high 32 bits are "don't care" values. this function is not expected to return
* a result higher than 32.
*/
static really_inline
const u8 *match64(const u8 *buf, const u64a z) {
if (unlikely(z != 0)) {
u32 pos = ctz64(z);
assert(pos < 32);
return buf + pos;
}
return NULL;
}
#endif /* MULTIACCEL_COMMON_H_ */

View File

@ -0,0 +1,149 @@
/*
* Copyright (c) 2015, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MULTIACCEL_DOUBLESHIFT_H_
#define MULTIACCEL_DOUBLESHIFT_H_
#include "multiaccel_common.h"
#define DOUBLESHIFT_MATCH(len, match_t, match_sz) \
static really_inline \
const u8 * JOIN4(doubleshiftMatch_, match_sz, _, len)(const u8 *buf, match_t z, u32 len2) {\
if (unlikely(z)) { \
match_t tmp = z; \
z |= ((match_t) (1 << (len)) - 1) << (match_sz / 2); \
tmp |= ((match_t) (1 << (len + len2)) - 1) << (match_sz / 2); \
VARISHIFT(z, z, len); \
VARISHIFT(tmp, tmp, len2); \
VARISHIFT(tmp, z, len); \
return JOIN(match, match_sz)(buf, z); \
} \
return NULL; \
}
#define DOUBLESHIFT_MATCH_32_DEF(n) \
DOUBLESHIFT_MATCH(n, u32, 32)
#define DOUBLESHIFT_MATCH_64_DEF(n) \
DOUBLESHIFT_MATCH(n, u64a, 64)
#define DOUBLESHIFT_MATCH_DEF(n) \
DOUBLESHIFT_MATCH_32_DEF(n) \
DOUBLESHIFT_MATCH_64_DEF(n)
DOUBLESHIFT_MATCH_DEF(1)
DOUBLESHIFT_MATCH_DEF(2)
DOUBLESHIFT_MATCH_DEF(3)
DOUBLESHIFT_MATCH_DEF(4)
DOUBLESHIFT_MATCH_DEF(5)
DOUBLESHIFT_MATCH_DEF(6)
DOUBLESHIFT_MATCH_DEF(7)
DOUBLESHIFT_MATCH_DEF(8)
DOUBLESHIFT_MATCH_DEF(9)
DOUBLESHIFT_MATCH_DEF(10)
DOUBLESHIFT_MATCH_DEF(11)
DOUBLESHIFT_MATCH_DEF(12)
DOUBLESHIFT_MATCH_DEF(13)
DOUBLESHIFT_MATCH_DEF(14)
DOUBLESHIFT_MATCH_DEF(15)
DOUBLESHIFT_MATCH_64_DEF(16)
DOUBLESHIFT_MATCH_64_DEF(17)
DOUBLESHIFT_MATCH_64_DEF(18)
DOUBLESHIFT_MATCH_64_DEF(19)
DOUBLESHIFT_MATCH_64_DEF(20)
DOUBLESHIFT_MATCH_64_DEF(21)
DOUBLESHIFT_MATCH_64_DEF(22)
DOUBLESHIFT_MATCH_64_DEF(23)
DOUBLESHIFT_MATCH_64_DEF(24)
DOUBLESHIFT_MATCH_64_DEF(25)
DOUBLESHIFT_MATCH_64_DEF(26)
DOUBLESHIFT_MATCH_64_DEF(27)
DOUBLESHIFT_MATCH_64_DEF(28)
DOUBLESHIFT_MATCH_64_DEF(29)
DOUBLESHIFT_MATCH_64_DEF(30)
DOUBLESHIFT_MATCH_64_DEF(31)
static
const UNUSED u8 * (*doubleshift_match_funcs_32[])(const u8 *buf, u32 z, u32 len2) =
{
// skip the first
0,
&doubleshiftMatch_32_1,
&doubleshiftMatch_32_2,
&doubleshiftMatch_32_3,
&doubleshiftMatch_32_4,
&doubleshiftMatch_32_5,
&doubleshiftMatch_32_6,
&doubleshiftMatch_32_7,
&doubleshiftMatch_32_8,
&doubleshiftMatch_32_9,
&doubleshiftMatch_32_10,
&doubleshiftMatch_32_11,
&doubleshiftMatch_32_12,
&doubleshiftMatch_32_13,
&doubleshiftMatch_32_14,
&doubleshiftMatch_32_15,
};
static
const UNUSED u8 * (*doubleshift_match_funcs_64[])(const u8 *buf, u64a z, u32 len2) =
{
// skip the first
0,
&doubleshiftMatch_64_1,
&doubleshiftMatch_64_2,
&doubleshiftMatch_64_3,
&doubleshiftMatch_64_4,
&doubleshiftMatch_64_5,
&doubleshiftMatch_64_6,
&doubleshiftMatch_64_7,
&doubleshiftMatch_64_8,
&doubleshiftMatch_64_9,
&doubleshiftMatch_64_10,
&doubleshiftMatch_64_11,
&doubleshiftMatch_64_12,
&doubleshiftMatch_64_13,
&doubleshiftMatch_64_14,
&doubleshiftMatch_64_15,
&doubleshiftMatch_64_16,
&doubleshiftMatch_64_17,
&doubleshiftMatch_64_18,
&doubleshiftMatch_64_19,
&doubleshiftMatch_64_20,
&doubleshiftMatch_64_21,
&doubleshiftMatch_64_22,
&doubleshiftMatch_64_23,
&doubleshiftMatch_64_24,
&doubleshiftMatch_64_25,
&doubleshiftMatch_64_26,
&doubleshiftMatch_64_27,
&doubleshiftMatch_64_28,
&doubleshiftMatch_64_29,
&doubleshiftMatch_64_30,
&doubleshiftMatch_64_31,
};
#endif /* MULTIACCEL_DOUBLESHIFT_H_ */

View File

@ -0,0 +1,152 @@
/*
* Copyright (c) 2015, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MULTIACCEL_DOUBLESHIFTGRAB_H_
#define MULTIACCEL_DOUBLESHIFTGRAB_H_
#include "multiaccel_common.h"
#define DOUBLESHIFTGRAB_MATCH(len, match_t, match_sz) \
static really_inline \
const u8 * JOIN4(doubleshiftgrabMatch_, match_sz, _, len)(const u8 *buf, match_t z, u32 len2) {\
if (unlikely(z)) { \
match_t neg = ~z; \
match_t tmp = z; \
z |= ((match_t) (1 << (len)) - 1) << (match_sz / 2); \
tmp |= ((match_t) (1 << (len + len2)) - 1) << (match_sz / 2); \
neg |= ((match_t) (1 << len) - 1) << (match_sz / 2); \
VARISHIFT(z, z, len); \
VARISHIFT(tmp, tmp, len2); \
VARISHIFT(neg, z, 1); \
VARISHIFT(tmp, z, len); \
return JOIN(match, match_sz)(buf, z); \
} \
return NULL; \
}
#define DOUBLESHIFTGRAB_MATCH_32_DEF(n) \
DOUBLESHIFTGRAB_MATCH(n, u32, 32)
#define DOUBLESHIFTGRAB_MATCH_64_DEF(n) \
DOUBLESHIFTGRAB_MATCH(n, u64a, 64)
#define DOUBLESHIFTGRAB_MATCH_DEF(n) \
DOUBLESHIFTGRAB_MATCH_32_DEF(n) \
DOUBLESHIFTGRAB_MATCH_64_DEF(n)
DOUBLESHIFTGRAB_MATCH_DEF(1)
DOUBLESHIFTGRAB_MATCH_DEF(2)
DOUBLESHIFTGRAB_MATCH_DEF(3)
DOUBLESHIFTGRAB_MATCH_DEF(4)
DOUBLESHIFTGRAB_MATCH_DEF(5)
DOUBLESHIFTGRAB_MATCH_DEF(6)
DOUBLESHIFTGRAB_MATCH_DEF(7)
DOUBLESHIFTGRAB_MATCH_DEF(8)
DOUBLESHIFTGRAB_MATCH_DEF(9)
DOUBLESHIFTGRAB_MATCH_DEF(10)
DOUBLESHIFTGRAB_MATCH_DEF(11)
DOUBLESHIFTGRAB_MATCH_DEF(12)
DOUBLESHIFTGRAB_MATCH_DEF(13)
DOUBLESHIFTGRAB_MATCH_DEF(14)
DOUBLESHIFTGRAB_MATCH_DEF(15)
DOUBLESHIFTGRAB_MATCH_64_DEF(16)
DOUBLESHIFTGRAB_MATCH_64_DEF(17)
DOUBLESHIFTGRAB_MATCH_64_DEF(18)
DOUBLESHIFTGRAB_MATCH_64_DEF(19)
DOUBLESHIFTGRAB_MATCH_64_DEF(20)
DOUBLESHIFTGRAB_MATCH_64_DEF(21)
DOUBLESHIFTGRAB_MATCH_64_DEF(22)
DOUBLESHIFTGRAB_MATCH_64_DEF(23)
DOUBLESHIFTGRAB_MATCH_64_DEF(24)
DOUBLESHIFTGRAB_MATCH_64_DEF(25)
DOUBLESHIFTGRAB_MATCH_64_DEF(26)
DOUBLESHIFTGRAB_MATCH_64_DEF(27)
DOUBLESHIFTGRAB_MATCH_64_DEF(28)
DOUBLESHIFTGRAB_MATCH_64_DEF(29)
DOUBLESHIFTGRAB_MATCH_64_DEF(30)
DOUBLESHIFTGRAB_MATCH_64_DEF(31)
static
const UNUSED u8 * (*doubleshiftgrab_match_funcs_32[])(const u8 *buf, u32 z, u32 len2) =
{
// skip the first
0,
&doubleshiftgrabMatch_32_1,
&doubleshiftgrabMatch_32_2,
&doubleshiftgrabMatch_32_3,
&doubleshiftgrabMatch_32_4,
&doubleshiftgrabMatch_32_5,
&doubleshiftgrabMatch_32_6,
&doubleshiftgrabMatch_32_7,
&doubleshiftgrabMatch_32_8,
&doubleshiftgrabMatch_32_9,
&doubleshiftgrabMatch_32_10,
&doubleshiftgrabMatch_32_11,
&doubleshiftgrabMatch_32_12,
&doubleshiftgrabMatch_32_13,
&doubleshiftgrabMatch_32_14,
&doubleshiftgrabMatch_32_15,
};
static
const UNUSED u8 * (*doubleshiftgrab_match_funcs_64[])(const u8 *buf, u64a z, u32 len2) =
{
// skip the first
0,
&doubleshiftgrabMatch_64_1,
&doubleshiftgrabMatch_64_2,
&doubleshiftgrabMatch_64_3,
&doubleshiftgrabMatch_64_4,
&doubleshiftgrabMatch_64_5,
&doubleshiftgrabMatch_64_6,
&doubleshiftgrabMatch_64_7,
&doubleshiftgrabMatch_64_8,
&doubleshiftgrabMatch_64_9,
&doubleshiftgrabMatch_64_10,
&doubleshiftgrabMatch_64_11,
&doubleshiftgrabMatch_64_12,
&doubleshiftgrabMatch_64_13,
&doubleshiftgrabMatch_64_14,
&doubleshiftgrabMatch_64_15,
&doubleshiftgrabMatch_64_16,
&doubleshiftgrabMatch_64_17,
&doubleshiftgrabMatch_64_18,
&doubleshiftgrabMatch_64_19,
&doubleshiftgrabMatch_64_20,
&doubleshiftgrabMatch_64_21,
&doubleshiftgrabMatch_64_22,
&doubleshiftgrabMatch_64_23,
&doubleshiftgrabMatch_64_24,
&doubleshiftgrabMatch_64_25,
&doubleshiftgrabMatch_64_26,
&doubleshiftgrabMatch_64_27,
&doubleshiftgrabMatch_64_28,
&doubleshiftgrabMatch_64_29,
&doubleshiftgrabMatch_64_30,
&doubleshiftgrabMatch_64_31,
};
#endif /* MULTIACCEL_DOUBLESHIFTGRAB_H_ */

145
src/nfa/multiaccel_long.h Normal file
View File

@ -0,0 +1,145 @@
/*
* Copyright (c) 2015, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MULTIACCEL_LONG_H_
#define MULTIACCEL_LONG_H_
#include "multiaccel_common.h"
#define LONG_MATCH(len, match_t, match_sz) \
static really_inline \
const u8 * JOIN4(longMatch_, match_sz, _, len)(const u8 *buf, match_t z) { \
if (unlikely(z)) { \
z |= ((match_t) (1 << (len - 1)) - 1) << (match_sz / 2); \
JOIN(SHIFT, len)(z); \
return JOIN(match, match_sz)(buf, z); \
} \
return NULL; \
}
#define LONG_MATCH_32_DEF(n) \
LONG_MATCH(n, u32, 32)
#define LONG_MATCH_64_DEF(n) \
LONG_MATCH(n, u64a, 64)
#define LONG_MATCH_DEF(n) \
LONG_MATCH_32_DEF(n) \
LONG_MATCH_64_DEF(n)
LONG_MATCH_DEF(1)
LONG_MATCH_DEF(2)
LONG_MATCH_DEF(3)
LONG_MATCH_DEF(4)
LONG_MATCH_DEF(5)
LONG_MATCH_DEF(6)
LONG_MATCH_DEF(7)
LONG_MATCH_DEF(8)
LONG_MATCH_DEF(9)
LONG_MATCH_DEF(10)
LONG_MATCH_DEF(11)
LONG_MATCH_DEF(12)
LONG_MATCH_DEF(13)
LONG_MATCH_DEF(14)
LONG_MATCH_DEF(15)
LONG_MATCH_64_DEF(16)
LONG_MATCH_64_DEF(17)
LONG_MATCH_64_DEF(18)
LONG_MATCH_64_DEF(19)
LONG_MATCH_64_DEF(20)
LONG_MATCH_64_DEF(21)
LONG_MATCH_64_DEF(22)
LONG_MATCH_64_DEF(23)
LONG_MATCH_64_DEF(24)
LONG_MATCH_64_DEF(25)
LONG_MATCH_64_DEF(26)
LONG_MATCH_64_DEF(27)
LONG_MATCH_64_DEF(28)
LONG_MATCH_64_DEF(29)
LONG_MATCH_64_DEF(30)
LONG_MATCH_64_DEF(31)
static
const UNUSED u8 *(*long_match_funcs_32[])(const u8 *buf, u32 z) =
{
// skip the first three
0,
&longMatch_32_1,
&longMatch_32_2,
&longMatch_32_3,
&longMatch_32_4,
&longMatch_32_5,
&longMatch_32_6,
&longMatch_32_7,
&longMatch_32_8,
&longMatch_32_9,
&longMatch_32_10,
&longMatch_32_11,
&longMatch_32_12,
&longMatch_32_13,
&longMatch_32_14,
&longMatch_32_15,
};
static
const UNUSED u8 *(*long_match_funcs_64[])(const u8 *buf, u64a z) =
{
// skip the first three
0,
&longMatch_64_1,
&longMatch_64_2,
&longMatch_64_3,
&longMatch_64_4,
&longMatch_64_5,
&longMatch_64_6,
&longMatch_64_7,
&longMatch_64_8,
&longMatch_64_9,
&longMatch_64_10,
&longMatch_64_11,
&longMatch_64_12,
&longMatch_64_13,
&longMatch_64_14,
&longMatch_64_15,
&longMatch_64_16,
&longMatch_64_17,
&longMatch_64_18,
&longMatch_64_19,
&longMatch_64_20,
&longMatch_64_21,
&longMatch_64_22,
&longMatch_64_23,
&longMatch_64_24,
&longMatch_64_25,
&longMatch_64_26,
&longMatch_64_27,
&longMatch_64_28,
&longMatch_64_29,
&longMatch_64_30,
&longMatch_64_31,
};
#endif /* MULTIACCEL_LONG_H_ */

View File

@ -0,0 +1,148 @@
/*
* Copyright (c) 2015, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MULTIACCEL_LONGGRAB_H_
#define MULTIACCEL_LONGGRAB_H_
#include "multiaccel_common.h"
#define LONGGRAB_MATCH(len, match_t, match_sz) \
static really_inline \
const u8 * JOIN4(longgrabMatch_, match_sz, _, len)(const u8 *buf, match_t z) { \
if (unlikely(z)) { \
match_t tmp = ~z; \
tmp |= ((match_t) (1 << len) - 1) << (match_sz / 2); \
z |= ((match_t) (1 << (len - 1)) - 1) << (match_sz / 2); \
JOIN(SHIFT, len)(z); \
VARISHIFT(tmp, z, len); \
return JOIN(match, match_sz)(buf, z); \
} \
return NULL; \
}
#define LONGGRAB_MATCH_32_DEF(n) \
LONGGRAB_MATCH(n, u32, 32)
#define LONGGRAB_MATCH_64_DEF(n) \
LONGGRAB_MATCH(n, u64a, 64)
#define LONGGRAB_MATCH_DEF(n) \
LONGGRAB_MATCH_32_DEF(n) \
LONGGRAB_MATCH_64_DEF(n)
LONGGRAB_MATCH_DEF(1)
LONGGRAB_MATCH_DEF(2)
LONGGRAB_MATCH_DEF(3)
LONGGRAB_MATCH_DEF(4)
LONGGRAB_MATCH_DEF(5)
LONGGRAB_MATCH_DEF(6)
LONGGRAB_MATCH_DEF(7)
LONGGRAB_MATCH_DEF(8)
LONGGRAB_MATCH_DEF(9)
LONGGRAB_MATCH_DEF(10)
LONGGRAB_MATCH_DEF(11)
LONGGRAB_MATCH_DEF(12)
LONGGRAB_MATCH_DEF(13)
LONGGRAB_MATCH_DEF(14)
LONGGRAB_MATCH_DEF(15)
LONGGRAB_MATCH_64_DEF(16)
LONGGRAB_MATCH_64_DEF(17)
LONGGRAB_MATCH_64_DEF(18)
LONGGRAB_MATCH_64_DEF(19)
LONGGRAB_MATCH_64_DEF(20)
LONGGRAB_MATCH_64_DEF(21)
LONGGRAB_MATCH_64_DEF(22)
LONGGRAB_MATCH_64_DEF(23)
LONGGRAB_MATCH_64_DEF(24)
LONGGRAB_MATCH_64_DEF(25)
LONGGRAB_MATCH_64_DEF(26)
LONGGRAB_MATCH_64_DEF(27)
LONGGRAB_MATCH_64_DEF(28)
LONGGRAB_MATCH_64_DEF(29)
LONGGRAB_MATCH_64_DEF(30)
LONGGRAB_MATCH_64_DEF(31)
static
const UNUSED u8 *(*longgrab_match_funcs_32[])(const u8 *buf, u32 z) =
{
// skip the first three
0,
&longgrabMatch_32_1,
&longgrabMatch_32_2,
&longgrabMatch_32_3,
&longgrabMatch_32_4,
&longgrabMatch_32_5,
&longgrabMatch_32_6,
&longgrabMatch_32_7,
&longgrabMatch_32_8,
&longgrabMatch_32_9,
&longgrabMatch_32_10,
&longgrabMatch_32_11,
&longgrabMatch_32_12,
&longgrabMatch_32_13,
&longgrabMatch_32_14,
&longgrabMatch_32_15,
};
static
const UNUSED u8 *(*longgrab_match_funcs_64[])(const u8 *buf, u64a z) =
{
// skip the first three
0,
&longgrabMatch_64_1,
&longgrabMatch_64_2,
&longgrabMatch_64_3,
&longgrabMatch_64_4,
&longgrabMatch_64_5,
&longgrabMatch_64_6,
&longgrabMatch_64_7,
&longgrabMatch_64_8,
&longgrabMatch_64_9,
&longgrabMatch_64_10,
&longgrabMatch_64_11,
&longgrabMatch_64_12,
&longgrabMatch_64_13,
&longgrabMatch_64_14,
&longgrabMatch_64_15,
&longgrabMatch_64_16,
&longgrabMatch_64_17,
&longgrabMatch_64_18,
&longgrabMatch_64_19,
&longgrabMatch_64_20,
&longgrabMatch_64_21,
&longgrabMatch_64_22,
&longgrabMatch_64_23,
&longgrabMatch_64_24,
&longgrabMatch_64_25,
&longgrabMatch_64_26,
&longgrabMatch_64_27,
&longgrabMatch_64_28,
&longgrabMatch_64_29,
&longgrabMatch_64_30,
&longgrabMatch_64_31,
};
#endif /* MULTIACCEL_LONGGRAB_H_ */

145
src/nfa/multiaccel_shift.h Normal file
View File

@ -0,0 +1,145 @@
/*
* Copyright (c) 2015, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MULTIACCEL_SHIFT_H_
#define MULTIACCEL_SHIFT_H_
#include "multiaccel_common.h"
#define SHIFT_MATCH(len, match_t, match_sz) \
static really_inline \
const u8 * JOIN4(shiftMatch_, match_sz, _, len)(const u8 *buf, match_t z) {\
if (unlikely(z)) { \
z |= ((match_t) (1 << (len)) - 1) << (match_sz / 2); \
VARISHIFT(z, z, len); \
return JOIN(match, match_sz)(buf, z); \
} \
return NULL; \
}
#define SHIFT_MATCH_32_DEF(n) \
SHIFT_MATCH(n, u32, 32)
#define SHIFT_MATCH_64_DEF(n) \
SHIFT_MATCH(n, u64a, 64)
#define SHIFT_MATCH_DEF(n) \
SHIFT_MATCH_32_DEF(n) \
SHIFT_MATCH_64_DEF(n)
SHIFT_MATCH_DEF(1)
SHIFT_MATCH_DEF(2)
SHIFT_MATCH_DEF(3)
SHIFT_MATCH_DEF(4)
SHIFT_MATCH_DEF(5)
SHIFT_MATCH_DEF(6)
SHIFT_MATCH_DEF(7)
SHIFT_MATCH_DEF(8)
SHIFT_MATCH_DEF(9)
SHIFT_MATCH_DEF(10)
SHIFT_MATCH_DEF(11)
SHIFT_MATCH_DEF(12)
SHIFT_MATCH_DEF(13)
SHIFT_MATCH_DEF(14)
SHIFT_MATCH_DEF(15)
SHIFT_MATCH_64_DEF(16)
SHIFT_MATCH_64_DEF(17)
SHIFT_MATCH_64_DEF(18)
SHIFT_MATCH_64_DEF(19)
SHIFT_MATCH_64_DEF(20)
SHIFT_MATCH_64_DEF(21)
SHIFT_MATCH_64_DEF(22)
SHIFT_MATCH_64_DEF(23)
SHIFT_MATCH_64_DEF(24)
SHIFT_MATCH_64_DEF(25)
SHIFT_MATCH_64_DEF(26)
SHIFT_MATCH_64_DEF(27)
SHIFT_MATCH_64_DEF(28)
SHIFT_MATCH_64_DEF(29)
SHIFT_MATCH_64_DEF(30)
SHIFT_MATCH_64_DEF(31)
static
const UNUSED u8 * (*shift_match_funcs_32[])(const u8 *buf, u32 z) =
{
// skip the first
0,
&shiftMatch_32_1,
&shiftMatch_32_2,
&shiftMatch_32_3,
&shiftMatch_32_4,
&shiftMatch_32_5,
&shiftMatch_32_6,
&shiftMatch_32_7,
&shiftMatch_32_8,
&shiftMatch_32_9,
&shiftMatch_32_10,
&shiftMatch_32_11,
&shiftMatch_32_12,
&shiftMatch_32_13,
&shiftMatch_32_14,
&shiftMatch_32_15,
};
static
const UNUSED u8 * (*shift_match_funcs_64[])(const u8 *buf, u64a z) =
{
// skip the first
0,
&shiftMatch_64_1,
&shiftMatch_64_2,
&shiftMatch_64_3,
&shiftMatch_64_4,
&shiftMatch_64_5,
&shiftMatch_64_6,
&shiftMatch_64_7,
&shiftMatch_64_8,
&shiftMatch_64_9,
&shiftMatch_64_10,
&shiftMatch_64_11,
&shiftMatch_64_12,
&shiftMatch_64_13,
&shiftMatch_64_14,
&shiftMatch_64_15,
&shiftMatch_64_16,
&shiftMatch_64_17,
&shiftMatch_64_18,
&shiftMatch_64_19,
&shiftMatch_64_20,
&shiftMatch_64_21,
&shiftMatch_64_22,
&shiftMatch_64_23,
&shiftMatch_64_24,
&shiftMatch_64_25,
&shiftMatch_64_26,
&shiftMatch_64_27,
&shiftMatch_64_28,
&shiftMatch_64_29,
&shiftMatch_64_30,
&shiftMatch_64_31,
};
#endif /* MULTIACCEL_SHIFT_H_ */

View File

@ -0,0 +1,148 @@
/*
* Copyright (c) 2015, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Intel Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MULTIACCEL_SHIFTGRAB_H_
#define MULTIACCEL_SHIFTGRAB_H_
#include "multiaccel_common.h"
#define SHIFTGRAB_MATCH(len, match_t, match_sz) \
static really_inline \
const u8 * JOIN4(shiftgrabMatch_, match_sz, _, len)(const u8 *buf, match_t z) {\
if (unlikely(z)) { \
match_t tmp = ~z; \
z |= ((match_t) (1 << (len)) - 1) << (match_sz / 2); \
tmp |= ((match_t) (1 << len) - 1) << (match_sz / 2); \
VARISHIFT(z, z, len); \
VARISHIFT(tmp, z, 1); \
return JOIN(match, match_sz)(buf, z); \
} \
return NULL; \
}
#define SHIFTGRAB_MATCH_32_DEF(n) \
SHIFTGRAB_MATCH(n, u32, 32)
#define SHIFTGRAB_MATCH_64_DEF(n) \
SHIFTGRAB_MATCH(n, u64a, 64)
#define SHIFTGRAB_MATCH_DEF(n) \
SHIFTGRAB_MATCH_32_DEF(n) \
SHIFTGRAB_MATCH_64_DEF(n)
SHIFTGRAB_MATCH_DEF(1)
SHIFTGRAB_MATCH_DEF(2)
SHIFTGRAB_MATCH_DEF(3)
SHIFTGRAB_MATCH_DEF(4)
SHIFTGRAB_MATCH_DEF(5)
SHIFTGRAB_MATCH_DEF(6)
SHIFTGRAB_MATCH_DEF(7)
SHIFTGRAB_MATCH_DEF(8)
SHIFTGRAB_MATCH_DEF(9)
SHIFTGRAB_MATCH_DEF(10)
SHIFTGRAB_MATCH_DEF(11)
SHIFTGRAB_MATCH_DEF(12)
SHIFTGRAB_MATCH_DEF(13)
SHIFTGRAB_MATCH_DEF(14)
SHIFTGRAB_MATCH_DEF(15)
SHIFTGRAB_MATCH_64_DEF(16)
SHIFTGRAB_MATCH_64_DEF(17)
SHIFTGRAB_MATCH_64_DEF(18)
SHIFTGRAB_MATCH_64_DEF(19)
SHIFTGRAB_MATCH_64_DEF(20)
SHIFTGRAB_MATCH_64_DEF(21)
SHIFTGRAB_MATCH_64_DEF(22)
SHIFTGRAB_MATCH_64_DEF(23)
SHIFTGRAB_MATCH_64_DEF(24)
SHIFTGRAB_MATCH_64_DEF(25)
SHIFTGRAB_MATCH_64_DEF(26)
SHIFTGRAB_MATCH_64_DEF(27)
SHIFTGRAB_MATCH_64_DEF(28)
SHIFTGRAB_MATCH_64_DEF(29)
SHIFTGRAB_MATCH_64_DEF(30)
SHIFTGRAB_MATCH_64_DEF(31)
static
const UNUSED u8 * (*shiftgrab_match_funcs_32[])(const u8 *buf, u32 z) =
{
// skip the first
0,
&shiftgrabMatch_32_1,
&shiftgrabMatch_32_2,
&shiftgrabMatch_32_3,
&shiftgrabMatch_32_4,
&shiftgrabMatch_32_5,
&shiftgrabMatch_32_6,
&shiftgrabMatch_32_7,
&shiftgrabMatch_32_8,
&shiftgrabMatch_32_9,
&shiftgrabMatch_32_10,
&shiftgrabMatch_32_11,
&shiftgrabMatch_32_12,
&shiftgrabMatch_32_13,
&shiftgrabMatch_32_14,
&shiftgrabMatch_32_15,
};
static
const UNUSED u8 * (*shiftgrab_match_funcs_64[])(const u8 *buf, u64a z) =
{
// skip the first
0,
&shiftgrabMatch_64_1,
&shiftgrabMatch_64_2,
&shiftgrabMatch_64_3,
&shiftgrabMatch_64_4,
&shiftgrabMatch_64_5,
&shiftgrabMatch_64_6,
&shiftgrabMatch_64_7,
&shiftgrabMatch_64_8,
&shiftgrabMatch_64_9,
&shiftgrabMatch_64_10,
&shiftgrabMatch_64_11,
&shiftgrabMatch_64_12,
&shiftgrabMatch_64_13,
&shiftgrabMatch_64_14,
&shiftgrabMatch_64_15,
&shiftgrabMatch_64_16,
&shiftgrabMatch_64_17,
&shiftgrabMatch_64_18,
&shiftgrabMatch_64_19,
&shiftgrabMatch_64_20,
&shiftgrabMatch_64_21,
&shiftgrabMatch_64_22,
&shiftgrabMatch_64_23,
&shiftgrabMatch_64_24,
&shiftgrabMatch_64_25,
&shiftgrabMatch_64_26,
&shiftgrabMatch_64_27,
&shiftgrabMatch_64_28,
&shiftgrabMatch_64_29,
&shiftgrabMatch_64_30,
&shiftgrabMatch_64_31,
};
#endif /* MULTIACCEL_SHIFTGRAB_H_ */

View File

@ -31,4 +31,10 @@
#define JOIN(x, y) JOIN_AGAIN(x, y)
#define JOIN_AGAIN(x, y) x ## y
#define JOIN3(x, y, z) JOIN_AGAIN3(x, y, z)
#define JOIN_AGAIN3(x, y, z) x ## y ## z
#define JOIN4(w, x, y, z) JOIN_AGAIN4(w, x, y, z)
#define JOIN_AGAIN4(w, x, y, z) w ## x ## y ## z
#endif