From 5dfa150635ee02959a58fcf7ee391986c7991c84 Mon Sep 17 00:00:00 2001 From: Daniel-Eisenberg <59121493+Daniel-Eisenberg@users.noreply.github.com> Date: Tue, 13 Jan 2026 17:17:18 +0200 Subject: [PATCH] Jan 06 2026 dev (#56) * sync code * sync code * sync code * sync code * sync code * sync code --------- Co-authored-by: Daniel Eisenberg Co-authored-by: Ned Wright --- .gitignore | 2 + CMakeLists.txt | 7 + attachments/envoy/1.31/config.go | 3 + attachments/envoy/1.31/filter.go | 5 +- attachments/envoy/1.31/go.mod | 40 +- attachments/envoy/1.31/go.sum | 4 +- attachments/envoy/1.31/utils.go | 3 + attachments/envoy/1.32/config.go | 5 +- attachments/envoy/1.32/filter.go | 5 +- attachments/envoy/1.32/go.mod | 42 +- attachments/envoy/1.32/go.sum | 4 +- attachments/envoy/1.32/utils.go | 3 + attachments/envoy/1.33/config.go | 5 +- attachments/envoy/1.33/filter.go | 5 +- attachments/envoy/1.33/go.mod | 42 +- attachments/envoy/1.33/go.sum | 4 +- attachments/envoy/1.33/utils.go | 3 + attachments/envoy/1.34/build_template | 2 - attachments/envoy/1.34/config.go | 51 +- attachments/envoy/1.34/filter.go | 5 +- attachments/envoy/1.34/go.mod | 16 +- attachments/envoy/1.34/go.sum | 4 +- attachments/envoy/1.34/utils.go | 3 + attachments/envoy/1.35/CMakeLists.txt | 33 + attachments/envoy/1.35/build_template | 13 + attachments/envoy/1.35/config.go | 285 +++ attachments/envoy/1.35/filter.go | 498 ++++ attachments/envoy/1.35/go.mod | 20 + attachments/envoy/1.35/go.sum | 23 + attachments/envoy/1.35/utils.go | 111 + attachments/envoy/1.36/CMakeLists.txt | 33 + attachments/envoy/1.36/build_template | 13 + attachments/envoy/1.36/config.go | 285 +++ attachments/envoy/1.36/filter.go | 498 ++++ attachments/envoy/1.36/go.mod | 20 + attachments/envoy/1.36/go.sum | 23 + attachments/envoy/1.36/utils.go | 111 + attachments/envoy/CMakeLists.txt | 2 + attachments/nano_attachment/nano_attachment.c | 10 +- .../nano_attachment/nano_attachment_io.c | 2 +- .../nano_attachment/nano_attachment_io.h | 4 +- .../nano_attachment_sender_thread.c | 4 +- .../nano_attachment_util.cc | 6 + .../nano_attachment/nano_compression.c | 18 +- .../nano_attachment/nano_compression.h | 20 +- .../nginx_attachment_util.cc | 34 +- .../ngx_module/async/ngx_cp_async_body.c | 148 ++ .../ngx_module/async/ngx_cp_async_body.h | 30 + .../ngx_module/async/ngx_cp_async_core.c | 2117 +++++++++++++++++ .../ngx_module/async/ngx_cp_async_core.h | 169 ++ .../async/ngx_cp_async_ctx_validation.c | 202 ++ .../async/ngx_cp_async_ctx_validation.h | 33 + .../ngx_module/async/ngx_cp_async_headers.c | 263 ++ .../ngx_module/async/ngx_cp_async_headers.h | 11 + .../ngx_module/async/ngx_cp_async_sender.c | 605 +++++ .../ngx_module/async/ngx_cp_async_sender.h | 37 + .../ngx_module/async/ngx_cp_async_types.h | 29 + .../nginx/ngx_module/ngx_cp_compression.c | 80 +- .../nginx/ngx_module/ngx_cp_compression.h | 6 +- .../nginx/ngx_module/ngx_cp_custom_response.c | 231 +- .../nginx/ngx_module/ngx_cp_custom_response.h | 12 +- .../nginx/ngx_module/ngx_cp_failing_state.h | 30 +- .../nginx/ngx_module/ngx_cp_hook_threads.c | 18 +- .../nginx/ngx_module/ngx_cp_hook_threads.h | 3 +- attachments/nginx/ngx_module/ngx_cp_hooks.c | 488 +++- attachments/nginx/ngx_module/ngx_cp_hooks.h | 34 +- .../nginx/ngx_module/ngx_cp_http_parser.h | 6 + .../nginx/ngx_module/ngx_cp_initializer.c | 112 +- attachments/nginx/ngx_module/ngx_cp_io.c | 113 +- attachments/nginx/ngx_module/ngx_cp_io.h | 56 +- attachments/nginx/ngx_module/ngx_cp_metric.c | 10 +- attachments/nginx/ngx_module/ngx_cp_metric.h | 32 +- .../nginx/ngx_module/ngx_cp_static_content.c | 2 +- .../nginx/ngx_module/ngx_cp_static_content.h | 4 +- attachments/nginx/ngx_module/ngx_cp_utils.c | 197 +- attachments/nginx/ngx_module/ngx_cp_utils.h | 88 +- .../ngx_http_cp_attachment_module.c | 113 +- .../ngx_http_cp_attachment_module.h | 8 + cmake/FindBrotli.cmake | 225 ++ .../http_configuration/http_configuration.cc | 24 +- core/compression/CMakeLists.txt | 4 + core/compression/compression_utils.cc | 241 +- core/include/attachments/nano_attachment.h | 40 +- .../attachments/nano_attachment_common.h | 191 +- .../attachments/nginx_attachment_common.h | 282 +-- .../attachments/nginx_attachment_util.h | 9 +- core/shmem_ipc/shmem_ipc.c | 41 + core/shmem_ipc_2/shmem_ipc.c | 29 + docker/Dockerfile | 1 + nodes/nginx_attachment/CMakeLists.txt | 6 + .../install-nginx-attachment.sh | 1 + 91 files changed, 7906 insertions(+), 804 deletions(-) create mode 100755 attachments/envoy/1.35/CMakeLists.txt create mode 100755 attachments/envoy/1.35/build_template create mode 100755 attachments/envoy/1.35/config.go create mode 100755 attachments/envoy/1.35/filter.go create mode 100644 attachments/envoy/1.35/go.mod create mode 100755 attachments/envoy/1.35/go.sum create mode 100755 attachments/envoy/1.35/utils.go create mode 100755 attachments/envoy/1.36/CMakeLists.txt create mode 100755 attachments/envoy/1.36/build_template create mode 100755 attachments/envoy/1.36/config.go create mode 100755 attachments/envoy/1.36/filter.go create mode 100644 attachments/envoy/1.36/go.mod create mode 100755 attachments/envoy/1.36/go.sum create mode 100755 attachments/envoy/1.36/utils.go create mode 100755 attachments/nginx/ngx_module/async/ngx_cp_async_body.c create mode 100755 attachments/nginx/ngx_module/async/ngx_cp_async_body.h create mode 100755 attachments/nginx/ngx_module/async/ngx_cp_async_core.c create mode 100755 attachments/nginx/ngx_module/async/ngx_cp_async_core.h create mode 100755 attachments/nginx/ngx_module/async/ngx_cp_async_ctx_validation.c create mode 100755 attachments/nginx/ngx_module/async/ngx_cp_async_ctx_validation.h create mode 100755 attachments/nginx/ngx_module/async/ngx_cp_async_headers.c create mode 100755 attachments/nginx/ngx_module/async/ngx_cp_async_headers.h create mode 100755 attachments/nginx/ngx_module/async/ngx_cp_async_sender.c create mode 100755 attachments/nginx/ngx_module/async/ngx_cp_async_sender.h create mode 100755 attachments/nginx/ngx_module/async/ngx_cp_async_types.h create mode 100755 cmake/FindBrotli.cmake diff --git a/.gitignore b/.gitignore index 57d18ea..5c5464a 100644 --- a/.gitignore +++ b/.gitignore @@ -42,3 +42,5 @@ **/*.cbp **/CMakeScripts **/compile_commands.json + +!go.mod diff --git a/CMakeLists.txt b/CMakeLists.txt index 745f969..ecdcb61 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -5,6 +5,13 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wno-terminate") set(CMAKE_CXX_STANDARD 11) +list(APPEND CMAKE_MODULE_PATH + "${CMAKE_CURRENT_SOURCE_DIR}/cmake" +) + +find_package(PkgConfig REQUIRED) +find_package(Brotli REQUIRED MODULE) + include_directories(external) include_directories(core/include/attachments) diff --git a/attachments/envoy/1.31/config.go b/attachments/envoy/1.31/config.go index 3c290ba..a591b19 100755 --- a/attachments/envoy/1.31/config.go +++ b/attachments/envoy/1.31/config.go @@ -21,6 +21,9 @@ import ( ) /* +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif #include unsigned long get_thread_id() { diff --git a/attachments/envoy/1.31/filter.go b/attachments/envoy/1.31/filter.go index 6c9d306..a6c9454 100755 --- a/attachments/envoy/1.31/filter.go +++ b/attachments/envoy/1.31/filter.go @@ -1,6 +1,9 @@ package main /* +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif #include unsigned long get_thread_id_2() { @@ -257,7 +260,7 @@ func (f *filter) sendBody(buffer api.BufferInstance, is_req bool) C.AttachmentVe } - http_chunks_array := C.HttpBody{ + http_chunks_array := C.NanoHttpBody{ data: f.request_structs.http_body_data, bodies_count: C.size_t(num_of_buffers), } diff --git a/attachments/envoy/1.31/go.mod b/attachments/envoy/1.31/go.mod index 6ae9151..9af1282 100755 --- a/attachments/envoy/1.31/go.mod +++ b/attachments/envoy/1.31/go.mod @@ -1,20 +1,20 @@ -module gitlab.ngen.checkpoint.com/Ngen/agent-core/attachments/envoy - -// the version should >= 1.18 -go 1.20 - -// NOTICE: these lines could be generated automatically by "go mod tidy" -require ( - github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa - github.com/envoyproxy/envoy v1.31.0 - google.golang.org/protobuf v1.34.2 -) - -require github.com/go-chi/chi/v5 v5.1.0 - -require ( - github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect -) +module gitlab.ngen.checkpoint.com/Ngen/agent-core/attachments/envoy + +// the version should >= 1.18 +go 1.20 + +// NOTICE: these lines could be generated automatically by "go mod tidy" +require ( + github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa + github.com/envoyproxy/envoy v1.31.6 + google.golang.org/protobuf v1.34.2 +) + +require github.com/go-chi/chi/v5 v5.1.0 + +require ( + github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect +) diff --git a/attachments/envoy/1.31/go.sum b/attachments/envoy/1.31/go.sum index 45bf562..eb911fa 100755 --- a/attachments/envoy/1.31/go.sum +++ b/attachments/envoy/1.31/go.sum @@ -1,7 +1,7 @@ github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= -github.com/envoyproxy/envoy v1.31.0 h1:NsTo+medzu0bMffXAjl+zKaViLOShKuIZWQnKKYq0/4= -github.com/envoyproxy/envoy v1.31.0/go.mod h1:ujBFxE543X8OePZG+FbeR9LnpBxTLu64IAU7A20EB9A= +github.com/envoyproxy/envoy v1.31.6 h1:jllNbzLILnq1/p8i0vBujOzOn0KEuDg265aqESAC4Vo= +github.com/envoyproxy/envoy v1.31.6/go.mod h1:ujBFxE543X8OePZG+FbeR9LnpBxTLu64IAU7A20EB9A= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw= diff --git a/attachments/envoy/1.31/utils.go b/attachments/envoy/1.31/utils.go index 0d0c90e..a1aa323 100755 --- a/attachments/envoy/1.31/utils.go +++ b/attachments/envoy/1.31/utils.go @@ -1,6 +1,9 @@ package main /* +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif #include #include "nano_attachment_common.h" #include "nano_attachment.h" diff --git a/attachments/envoy/1.32/config.go b/attachments/envoy/1.32/config.go index 3c290ba..aac0ea6 100755 --- a/attachments/envoy/1.32/config.go +++ b/attachments/envoy/1.32/config.go @@ -21,6 +21,9 @@ import ( ) /* +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif #include unsigned long get_thread_id() { @@ -279,4 +282,4 @@ func ConfigFactory(c interface{}, callbacks api.FilterCallbackHandler) api.Strea } } -func main() {} +func main() {} \ No newline at end of file diff --git a/attachments/envoy/1.32/filter.go b/attachments/envoy/1.32/filter.go index 1a47336..6a12de8 100755 --- a/attachments/envoy/1.32/filter.go +++ b/attachments/envoy/1.32/filter.go @@ -1,6 +1,9 @@ package main /* +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif #include unsigned long get_thread_id_2() { @@ -257,7 +260,7 @@ func (f *filter) sendBody(buffer api.BufferInstance, is_req bool) C.AttachmentVe } - http_chunks_array := C.HttpBody{ + http_chunks_array := C.NanoHttpBody{ data: f.request_structs.http_body_data, bodies_count: C.size_t(num_of_buffers), } diff --git a/attachments/envoy/1.32/go.mod b/attachments/envoy/1.32/go.mod index c5350ff..1f2b7ad 100755 --- a/attachments/envoy/1.32/go.mod +++ b/attachments/envoy/1.32/go.mod @@ -1,22 +1,20 @@ -module gitlab.ngen.checkpoint.com/Ngen/agent-core/attachments/envoy - -// the version should >= 1.18 -go 1.22 - -toolchain go1.22.5 - -// NOTICE: these lines could be generated automatically by "go mod tidy" -require ( - github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa - github.com/envoyproxy/envoy v1.32.1 - google.golang.org/protobuf v1.35.1 -) - -require github.com/go-chi/chi/v5 v5.1.0 - -require ( - github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect -) +module gitlab.ngen.checkpoint.com/Ngen/agent-core/attachments/envoy + +// the version should >= 1.18 +go 1.24 + +// NOTICE: these lines could be generated automatically by "go mod tidy" +require ( + github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa + github.com/envoyproxy/envoy v1.32.4 + google.golang.org/protobuf v1.35.1 +) + +require github.com/go-chi/chi/v5 v5.1.0 + +require ( + github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect +) diff --git a/attachments/envoy/1.32/go.sum b/attachments/envoy/1.32/go.sum index 0386434..4a6b21d 100755 --- a/attachments/envoy/1.32/go.sum +++ b/attachments/envoy/1.32/go.sum @@ -1,7 +1,7 @@ github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= -github.com/envoyproxy/envoy v1.32.1 h1:+HeajIC+S9PH3mjY/bVqJabjprqxA7h6pSQ+Ie1Ziww= -github.com/envoyproxy/envoy v1.32.1/go.mod h1:KGS+IUehDX1mSIdqodPTWskKOo7bZMLLy3GHxvOKcJk= +github.com/envoyproxy/envoy v1.32.4 h1:W57m0OqWcXAcgtn4yLsmYX1rmRNfA2hHfZMlanO2faM= +github.com/envoyproxy/envoy v1.32.4/go.mod h1:KGS+IUehDX1mSIdqodPTWskKOo7bZMLLy3GHxvOKcJk= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw= diff --git a/attachments/envoy/1.32/utils.go b/attachments/envoy/1.32/utils.go index 0d0c90e..a1aa323 100755 --- a/attachments/envoy/1.32/utils.go +++ b/attachments/envoy/1.32/utils.go @@ -1,6 +1,9 @@ package main /* +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif #include #include "nano_attachment_common.h" #include "nano_attachment.h" diff --git a/attachments/envoy/1.33/config.go b/attachments/envoy/1.33/config.go index 3c290ba..aac0ea6 100755 --- a/attachments/envoy/1.33/config.go +++ b/attachments/envoy/1.33/config.go @@ -21,6 +21,9 @@ import ( ) /* +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif #include unsigned long get_thread_id() { @@ -279,4 +282,4 @@ func ConfigFactory(c interface{}, callbacks api.FilterCallbackHandler) api.Strea } } -func main() {} +func main() {} \ No newline at end of file diff --git a/attachments/envoy/1.33/filter.go b/attachments/envoy/1.33/filter.go index 1a47336..6a12de8 100755 --- a/attachments/envoy/1.33/filter.go +++ b/attachments/envoy/1.33/filter.go @@ -1,6 +1,9 @@ package main /* +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif #include unsigned long get_thread_id_2() { @@ -257,7 +260,7 @@ func (f *filter) sendBody(buffer api.BufferInstance, is_req bool) C.AttachmentVe } - http_chunks_array := C.HttpBody{ + http_chunks_array := C.NanoHttpBody{ data: f.request_structs.http_body_data, bodies_count: C.size_t(num_of_buffers), } diff --git a/attachments/envoy/1.33/go.mod b/attachments/envoy/1.33/go.mod index 0f67560..192e802 100755 --- a/attachments/envoy/1.33/go.mod +++ b/attachments/envoy/1.33/go.mod @@ -1,22 +1,20 @@ -module gitlab.ngen.checkpoint.com/Ngen/agent-core/attachments/envoy - -// the version should >= 1.18 -go 1.22 - -toolchain go1.22.5 - -// NOTICE: these lines could be generated automatically by "go mod tidy" -require ( - github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa - github.com/envoyproxy/envoy v1.33.0 - google.golang.org/protobuf v1.36.1 -) - -require github.com/go-chi/chi/v5 v5.1.0 - -require ( - github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect -) +module gitlab.ngen.checkpoint.com/Ngen/agent-core/attachments/envoy + +// the version should >= 1.18 +go 1.24 + +// NOTICE: these lines could be generated automatically by "go mod tidy" +require ( + github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa + github.com/envoyproxy/envoy v1.33.1 + google.golang.org/protobuf v1.36.1 +) + +require github.com/go-chi/chi/v5 v5.1.0 + +require ( + github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect +) diff --git a/attachments/envoy/1.33/go.sum b/attachments/envoy/1.33/go.sum index 2089dac..edc620a 100755 --- a/attachments/envoy/1.33/go.sum +++ b/attachments/envoy/1.33/go.sum @@ -1,7 +1,7 @@ github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= -github.com/envoyproxy/envoy v1.33.0 h1:6YYKae/owrJ29psB4ELUpXTtbjaiNSKOX36yZ4ROU2Y= -github.com/envoyproxy/envoy v1.33.0/go.mod h1:faFqv1XeNGX/ph6Zto5Culdcpk4Klxp730Q6XhWarV4= +github.com/envoyproxy/envoy v1.33.1 h1:SV/mRUHOWOtoP6ecaE4hjz77nRC263ljH5SYqxOLcD0= +github.com/envoyproxy/envoy v1.33.1/go.mod h1:faFqv1XeNGX/ph6Zto5Culdcpk4Klxp730Q6XhWarV4= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw= diff --git a/attachments/envoy/1.33/utils.go b/attachments/envoy/1.33/utils.go index 0d0c90e..a1aa323 100755 --- a/attachments/envoy/1.33/utils.go +++ b/attachments/envoy/1.33/utils.go @@ -1,6 +1,9 @@ package main /* +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif #include #include "nano_attachment_common.h" #include "nano_attachment.h" diff --git a/attachments/envoy/1.34/build_template b/attachments/envoy/1.34/build_template index ebbcf97..d54bb36 100755 --- a/attachments/envoy/1.34/build_template +++ b/attachments/envoy/1.34/build_template @@ -11,5 +11,3 @@ cd $ENVOY_ATTACHMENT_DIR # Run the go build command CGO_CFLAGS="-I@ATTACHMENTS_INCLUDE_DIR@ -I@NANO_ATTACHMENT_INCLUDE_DIR@" go build -o ${ENVOY_ATTACHMENT_DIR}/libenvoy_attachment.so -buildmode=c-shared -ldflags="-extldflags '-L${SHMEM_LIBRARY_DIR} -L${NANO_ATTACHMENT_LIBRARY_DIR} -L${NANO_ATTACHMENT_UTIL_LIBRARY_DIR} ${LIBRARIES}'" - - diff --git a/attachments/envoy/1.34/config.go b/attachments/envoy/1.34/config.go index a0db7ba..5a428fa 100755 --- a/attachments/envoy/1.34/config.go +++ b/attachments/envoy/1.34/config.go @@ -21,6 +21,9 @@ import ( ) /* +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif #include unsigned long get_thread_id() { @@ -89,9 +92,9 @@ func getEnvoyConcurrency() int { } func configurationServer() { - r := chi.NewRouter() + r := chi.NewRouter() - r.Get("/load-config", func(w http.ResponseWriter, r *http.Request) { + r.Get("/load-config", func(w http.ResponseWriter, r *http.Request) { mutex.Lock() defer mutex.Unlock() worker_ids := make([]int, 0) @@ -135,7 +138,7 @@ func configurationServer() { workers_reload_status[strconv.Itoa(worker_id)] = "Reload Configuraiton Succeded" } - response, err := json.Marshal(workers_reload_status) + response, err := json.Marshal(workers_reload_status) if err != nil { api.LogWarnf("Error while sending reponse about reload configuration. Err: %s", err.Error()) response = []byte(`{"error": "Internal Error"}`) @@ -145,11 +148,11 @@ func configurationServer() { w.WriteHeader(http.StatusInternalServerError) } - w.Header().Set("Content-Type", "application/json") - w.Write(response) - }) + w.Header().Set("Content-Type", "application/json") + w.Write(response) + }) - http.ListenAndServe(":8119", r) + http.ListenAndServe(":8119", r) } func init() { @@ -158,9 +161,9 @@ func init() { go configurationServer() } -type config struct {} +type config struct{} -type parser struct {} +type parser struct{} func sendKeepAlive() { for { @@ -175,14 +178,14 @@ func sendKeepAlive() { } func (p *parser) initFilterStructs() *filterRequestStructs { - return &filterRequestStructs { - http_start_data: (*C.HttpRequestFilterData)(C.malloc(C.sizeof_HttpRequestFilterData)), - http_meta_data: (*C.HttpMetaData)(C.malloc(C.sizeof_HttpMetaData)), - http_headers: (*C.HttpHeaders)(C.malloc(C.sizeof_HttpHeaders)), - http_headers_data: (*C.HttpHeaderData)(C.malloc(10000 * C.sizeof_HttpHeaderData)), - http_res_headers: (*C.ResHttpHeaders)(C.malloc(C.sizeof_ResHttpHeaders)), - http_body_data: (*C.nano_str_t)(C.malloc(10000 * C.sizeof_nano_str_t)), - attachment_data: (*C.AttachmentData)(C.malloc(C.sizeof_AttachmentData)), + return &filterRequestStructs{ + http_start_data: (*C.HttpRequestFilterData)(C.malloc(C.sizeof_HttpRequestFilterData)), + http_meta_data: (*C.HttpMetaData)(C.malloc(C.sizeof_HttpMetaData)), + http_headers: (*C.HttpHeaders)(C.malloc(C.sizeof_HttpHeaders)), + http_headers_data: (*C.HttpHeaderData)(C.malloc(10000 * C.sizeof_HttpHeaderData)), + http_res_headers: (*C.ResHttpHeaders)(C.malloc(C.sizeof_ResHttpHeaders)), + http_body_data: (*C.nano_str_t)(C.malloc(10000 * C.sizeof_nano_str_t)), + attachment_data: (*C.AttachmentData)(C.malloc(C.sizeof_AttachmentData)), } } @@ -223,7 +226,7 @@ func (p *parser) Parse(any *anypb.Any, callbacks api.ConfigCallbackHandler) (int //mutex.Unlock() } - go func (){ + go func() { sendKeepAlive() }() @@ -270,13 +273,13 @@ func ConfigFactory(c interface{}, callbacks api.FilterCallbackHandler) api.Strea session_data := C.InitSessionData((*C.NanoAttachment)(attachment_ptr), C.SessionID(session_id)) return &filter{ - callbacks: callbacks, - config: conf, - session_id: session_id, - cp_attachment: attachment_ptr, - session_data: session_data, + callbacks: callbacks, + config: conf, + session_id: session_id, + cp_attachment: attachment_ptr, + session_data: session_data, request_structs: attachment_to_filter_request_structs[worker_id], } } -func main() {} \ No newline at end of file +func main() {} diff --git a/attachments/envoy/1.34/filter.go b/attachments/envoy/1.34/filter.go index 1a47336..6a12de8 100755 --- a/attachments/envoy/1.34/filter.go +++ b/attachments/envoy/1.34/filter.go @@ -1,6 +1,9 @@ package main /* +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif #include unsigned long get_thread_id_2() { @@ -257,7 +260,7 @@ func (f *filter) sendBody(buffer api.BufferInstance, is_req bool) C.AttachmentVe } - http_chunks_array := C.HttpBody{ + http_chunks_array := C.NanoHttpBody{ data: f.request_structs.http_body_data, bodies_count: C.size_t(num_of_buffers), } diff --git a/attachments/envoy/1.34/go.mod b/attachments/envoy/1.34/go.mod index 56170f0..5cee392 100644 --- a/attachments/envoy/1.34/go.mod +++ b/attachments/envoy/1.34/go.mod @@ -3,20 +3,18 @@ module gitlab.ngen.checkpoint.com/Ngen/agent-core/attachments/envoy // the version should >= 1.18 go 1.24 -toolchain go1.24.2 - // NOTICE: these lines could be generated automatically by "go mod tidy" require ( - github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa - github.com/envoyproxy/envoy v1.34.4 - google.golang.org/protobuf v1.36.6 + github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa + github.com/envoyproxy/envoy v1.34.5 + google.golang.org/protobuf v1.36.6 ) require github.com/go-chi/chi/v5 v5.1.0 require ( - github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect ) diff --git a/attachments/envoy/1.34/go.sum b/attachments/envoy/1.34/go.sum index 93a51f4..30b4522 100755 --- a/attachments/envoy/1.34/go.sum +++ b/attachments/envoy/1.34/go.sum @@ -1,7 +1,7 @@ github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= -github.com/envoyproxy/envoy v1.34.4 h1:C7icH8oLgy7Fx4A5AaljRrNBoLO7xeyN4XEJhYSlL1U= -github.com/envoyproxy/envoy v1.34.4/go.mod h1:A/vRPuqivdZBAr0NfT3sccV8KtY07B2PyvILAdV0qCU= +github.com/envoyproxy/envoy v1.34.5 h1:qG8j0jSapGLJDo63BgRSGGcq/Y2lq6HyM+YymJR9/Kc= +github.com/envoyproxy/envoy v1.34.5/go.mod h1:A/vRPuqivdZBAr0NfT3sccV8KtY07B2PyvILAdV0qCU= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw= diff --git a/attachments/envoy/1.34/utils.go b/attachments/envoy/1.34/utils.go index 0d0c90e..a1aa323 100755 --- a/attachments/envoy/1.34/utils.go +++ b/attachments/envoy/1.34/utils.go @@ -1,6 +1,9 @@ package main /* +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif #include #include "nano_attachment_common.h" #include "nano_attachment.h" diff --git a/attachments/envoy/1.35/CMakeLists.txt b/attachments/envoy/1.35/CMakeLists.txt new file mode 100755 index 0000000..832a6de --- /dev/null +++ b/attachments/envoy/1.35/CMakeLists.txt @@ -0,0 +1,33 @@ +if(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND ATTACHMENT_TYPE STREQUAL "envoy") + set(ATTACHMENTS_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/core/include/attachments) + set(NANO_ATTACHMENT_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/attachments/nano_attachment) + set(SHMEM_LIBRARY_DIR ${CMAKE_BINARY_DIR}/core/shmem_ipc_2) + set(NANO_ATTACHMENT_LIBRARY_DIR ${CMAKE_BINARY_DIR}/attachments/nano_attachment) + set(NANO_ATTACHMENT_UTIL_LIBRARY_DIR ${CMAKE_BINARY_DIR}/attachments/nano_attachment/nano_attachment_util) + set(LIBRARIES "-lnano_attachment -lnano_attachment_util -lshmem_ipc_2") + set(ENVOY_ATTACHMENT_DIR ${CMAKE_CURRENT_SOURCE_DIR}) + + get_filename_component(CURRENT_DIR ${CMAKE_CURRENT_SOURCE_DIR} NAME) + + # Configure the build.sh script from the template + configure_file( + ${PROJECT_SOURCE_DIR}/attachments/envoy/${CURRENT_DIR}/build_template + ${CMAKE_BINARY_DIR}/attachments/envoy/${CURRENT_DIR}/build.sh + @ONLY + ) + + # Define a custom command to run the bash script + add_custom_target( + envoy_attachment${CURRENT_DIR} ALL + COMMAND chmod +x ${CMAKE_BINARY_DIR}/attachments/envoy/${CURRENT_DIR}/build.sh + COMMAND ${CMAKE_BINARY_DIR}/attachments/envoy/${CURRENT_DIR}/build.sh + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/attachments/envoy + COMMENT "Building envoy attachment ${CURRENT_DIR}" + ) + + add_dependencies(envoy_attachment${CURRENT_DIR} shmem_ipc_2 nano_attachment nano_attachment_util) + + install(FILES libenvoy_attachment.so DESTINATION ${CMAKE_BINARY_DIR}/attachments/envoy/${CURRENT_DIR} PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ) + install(FILES libenvoy_attachment.so DESTINATION envoy/${CURRENT_DIR} PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ) +endif() + diff --git a/attachments/envoy/1.35/build_template b/attachments/envoy/1.35/build_template new file mode 100755 index 0000000..d54bb36 --- /dev/null +++ b/attachments/envoy/1.35/build_template @@ -0,0 +1,13 @@ +#!/bin/bash + +# Set environment variables +SHMEM_LIBRARY_DIR="@SHMEM_LIBRARY_DIR@" +NANO_ATTACHMENT_LIBRARY_DIR="@NANO_ATTACHMENT_LIBRARY_DIR@" +NANO_ATTACHMENT_UTIL_LIBRARY_DIR="@NANO_ATTACHMENT_UTIL_LIBRARY_DIR@" +LIBRARIES="@LIBRARIES@" +ENVOY_ATTACHMENT_DIR="@ENVOY_ATTACHMENT_DIR@" + +cd $ENVOY_ATTACHMENT_DIR + +# Run the go build command +CGO_CFLAGS="-I@ATTACHMENTS_INCLUDE_DIR@ -I@NANO_ATTACHMENT_INCLUDE_DIR@" go build -o ${ENVOY_ATTACHMENT_DIR}/libenvoy_attachment.so -buildmode=c-shared -ldflags="-extldflags '-L${SHMEM_LIBRARY_DIR} -L${NANO_ATTACHMENT_LIBRARY_DIR} -L${NANO_ATTACHMENT_UTIL_LIBRARY_DIR} ${LIBRARIES}'" diff --git a/attachments/envoy/1.35/config.go b/attachments/envoy/1.35/config.go new file mode 100755 index 0000000..5a428fa --- /dev/null +++ b/attachments/envoy/1.35/config.go @@ -0,0 +1,285 @@ +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/go-chi/chi/v5" + + xds "github.com/cncf/xds/go/xds/type/v3" + "google.golang.org/protobuf/types/known/anypb" + + "github.com/envoyproxy/envoy/contrib/golang/common/go/api" + envoyHttp "github.com/envoyproxy/envoy/contrib/golang/filters/http/source/go/pkg/http" +) + +/* +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include + +unsigned long get_thread_id() { + return (unsigned long)pthread_self(); +} + +#include "nano_attachment_common.h" +#include "nano_initializer.h" +#include "nano_attachment.h" +*/ +import "C" + +const Name = "cp_nano_filter" +const admin_api_server_info = "http://127.0.0.1:%s/server_info" +const keep_alive_interval = 10 * time.Second + +var filter_id atomic.Int64 +var attachments_map map[int]*nano_attachment = nil +var thread_to_attachment_mapping map[int]int = nil +var attachment_to_thread_mapping map[int]int = nil +var attachment_to_filter_request_structs map[int]*filterRequestStructs = nil +var mutex sync.Mutex +var last_keep_alive time.Time + +type nano_attachment C.struct_NanoAttachment + +// EnvoyServerInfo represents the structure of the JSON response from /server_info +type EnvoyServerInfo struct { + Concurrency int `json:"concurrency"` +} + +func getEnvoyConcurrency() int { + concurrency_method := getEnv("CONCURRENCY_CALC", "numOfCores") + + if concurrency_method == "numOfCores" { + api.LogWarnf("using number of CPU cores") + return runtime.NumCPU() + } + + var conc_number string + + switch concurrency_method { + case "istioCpuLimit": + conc_number = getEnv("ISTIO_CPU_LIMIT", "-1") + api.LogWarnf("using istioCpuLimit, conc_number %s", conc_number) + case "custom": + conc_number = getEnv("CONCURRENCY_NUMBER", "-1") + api.LogWarnf("using custom concurrency number, conc_number %s", conc_number) + default: + api.LogWarnf("unknown concurrency method %s, using number of CPU cores", concurrency_method) + return runtime.NumCPU() + } + + if conc_number == "-1" { + api.LogWarnf("concurrency number is not set as an env variable, using number of CPU cores") + return runtime.NumCPU() + } + + conc_num, err := strconv.Atoi(conc_number) + if err != nil || conc_num <= 0 { + api.LogWarnf("error converting concurrency number %s, using number of CPU cores", conc_number) + return runtime.NumCPU() + } + + return conc_num +} + +func configurationServer() { + r := chi.NewRouter() + + r.Get("/load-config", func(w http.ResponseWriter, r *http.Request) { + mutex.Lock() + defer mutex.Unlock() + worker_ids := make([]int, 0) + workersParam := r.URL.Query().Get("workers") + num_of_workers := len(attachments_map) // concurrency + if workersParam == "" { + for i := 0; i < num_of_workers; i++ { + worker_ids = append(worker_ids, i) + } + } else { + workers := strings.Split(workersParam, ",") + for _, worker := range workers { + worker_id, err := strconv.Atoi(worker) + + if worker_id >= num_of_workers { + api.LogWarnf( + "Can not load configuration of invalid worker ID %d. worker ID should be lower than: %d", + worker_id, + num_of_workers) + } + + if err != nil || worker_id >= num_of_workers { + w.WriteHeader(http.StatusBadRequest) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(fmt.Sprintf(`{"error": "invalid worker ID: %s"}`, worker))) + return + } + worker_ids = append(worker_ids, worker_id) + } + } + + workers_reload_status := make(map[string]string, len(worker_ids)) + res := C.NANO_OK + for _, worker_id := range worker_ids { + worker_reload_res := C.RestartAttachmentConfiguration((*C.NanoAttachment)(attachments_map[worker_id])) + if worker_reload_res == C.NANO_ERROR { + res = C.NANO_ERROR + workers_reload_status[strconv.Itoa(worker_id)] = "Reload Configuraiton Failed" + continue + } + workers_reload_status[strconv.Itoa(worker_id)] = "Reload Configuraiton Succeded" + } + + response, err := json.Marshal(workers_reload_status) + if err != nil { + api.LogWarnf("Error while sending reponse about reload configuration. Err: %s", err.Error()) + response = []byte(`{"error": "Internal Error"}`) + } + + if res == C.NANO_ERROR || err != nil { + w.WriteHeader(http.StatusInternalServerError) + } + + w.Header().Set("Content-Type", "application/json") + w.Write(response) + }) + + http.ListenAndServe(":8119", r) +} + +func init() { + last_keep_alive = time.Time{} + envoyHttp.RegisterHttpFilterFactoryAndConfigParser(Name, ConfigFactory, &parser{}) + go configurationServer() +} + +type config struct{} + +type parser struct{} + +func sendKeepAlive() { + for { + attachment_ptr := (*C.NanoAttachment)(attachments_map[0]) + if attachment_ptr == nil { + return + } + + C.SendKeepAlive(attachment_ptr) + time.Sleep(30 * time.Second) + } +} + +func (p *parser) initFilterStructs() *filterRequestStructs { + return &filterRequestStructs{ + http_start_data: (*C.HttpRequestFilterData)(C.malloc(C.sizeof_HttpRequestFilterData)), + http_meta_data: (*C.HttpMetaData)(C.malloc(C.sizeof_HttpMetaData)), + http_headers: (*C.HttpHeaders)(C.malloc(C.sizeof_HttpHeaders)), + http_headers_data: (*C.HttpHeaderData)(C.malloc(10000 * C.sizeof_HttpHeaderData)), + http_res_headers: (*C.ResHttpHeaders)(C.malloc(C.sizeof_ResHttpHeaders)), + http_body_data: (*C.nano_str_t)(C.malloc(10000 * C.sizeof_nano_str_t)), + attachment_data: (*C.AttachmentData)(C.malloc(C.sizeof_AttachmentData)), + } +} + +// Parse the filter configuration. We can call the ConfigCallbackHandler to control the filter's +// behavior +func (p *parser) Parse(any *anypb.Any, callbacks api.ConfigCallbackHandler) (interface{}, error) { + conf := &config{} + + if attachments_map != nil { + api.LogInfof("Waf Configuration already loaded") + return conf, nil + } + + num_of_workers := getEnvoyConcurrency() + + configStruct := &xds.TypedStruct{} + if err := any.UnmarshalTo(configStruct); err != nil { + return nil, err + } + + attachments_map = make(map[int]*nano_attachment) + attachment_to_filter_request_structs = make(map[int]*filterRequestStructs) + attachment_to_thread_mapping = make(map[int]int, 0) + thread_to_attachment_mapping = make(map[int]int, 0) + api.LogInfof("Number of worker threds: %d", num_of_workers) + for worker_id := 0; worker_id < num_of_workers; worker_id++ { + + attachment := C.InitNanoAttachment(C.uint8_t(0), C.int(worker_id), C.int(num_of_workers), C.int(C.fileno(C.stdout))) + for attachment == nil { + api.LogWarnf("attachment is nill going to sleep for two seconds and retry") + time.Sleep(2 * time.Second) + attachment = C.InitNanoAttachment(C.uint8_t(0), C.int(worker_id), C.int(num_of_workers), C.int(C.fileno(C.stdout))) + } + + //mutex.Lock() + attachments_map[worker_id] = (*nano_attachment)(attachment) + attachment_to_filter_request_structs[worker_id] = p.initFilterStructs() + //mutex.Unlock() + } + + go func() { + sendKeepAlive() + }() + + return conf, nil +} + +// Merge configuration from the inherited parent configuration +func (p *parser) Merge(parent interface{}, child interface{}) interface{} { + parentConfig := parent.(*config) + + // copy one, do not update parentConfig directly. + newConfig := *parentConfig + return &newConfig +} + +func ConfigFactory(c interface{}, callbacks api.FilterCallbackHandler) api.StreamFilter { + conf, ok := c.(*config) + if !ok { + panic("unexpected config type") + } + + worker_thread_id := int(C.get_thread_id()) + api.LogDebugf("worker_thread_id: %d", worker_thread_id) + if _, ok := thread_to_attachment_mapping[int(worker_thread_id)]; !ok { + api.LogDebugf("need to add new thread to the map") + map_size := len(attachment_to_thread_mapping) + if map_size < len(attachments_map) { + attachment_to_thread_mapping[map_size] = worker_thread_id + thread_to_attachment_mapping[worker_thread_id] = map_size + api.LogDebugf("len(attachment_to_thread_mapping): %d", len(attachment_to_thread_mapping)) + api.LogDebugf("thread_to_attachment_mapping: %v", thread_to_attachment_mapping) + api.LogDebugf("attachment_to_thread_mapping: %v", attachment_to_thread_mapping) + } else { + panic("unexpected thread id") + } + } + + worker_id := thread_to_attachment_mapping[int(worker_thread_id)] + api.LogDebugf("worker_id: %d", worker_id) + + filter_id.Add(1) + session_id := filter_id.Load() + attachment_ptr := attachments_map[worker_id] + session_data := C.InitSessionData((*C.NanoAttachment)(attachment_ptr), C.SessionID(session_id)) + + return &filter{ + callbacks: callbacks, + config: conf, + session_id: session_id, + cp_attachment: attachment_ptr, + session_data: session_data, + request_structs: attachment_to_filter_request_structs[worker_id], + } +} + +func main() {} diff --git a/attachments/envoy/1.35/filter.go b/attachments/envoy/1.35/filter.go new file mode 100755 index 0000000..6a12de8 --- /dev/null +++ b/attachments/envoy/1.35/filter.go @@ -0,0 +1,498 @@ +package main + +/* +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include + +unsigned long get_thread_id_2() { + return (unsigned long)pthread_self(); +} + +#include +#include +#include "nano_attachment_common.h" +#include "nano_attachment.h" + +HttpHeaderData* createHttpHeaderDataArray(int size) { + return (HttpHeaderData*)malloc(size * sizeof(HttpHeaderData)); +} + +HttpMetaData* createHttpMetaData() { + return (HttpMetaData*)malloc(sizeof(HttpMetaData)); +} + +void setHeaderElement(HttpHeaderData* arr, int index, nano_str_t key, nano_str_t value) { + if (arr == NULL) { + return; + } + + arr[index].key = key; + arr[index].value = value; +} +*/ +import "C" +import ( + "github.com/envoyproxy/envoy/contrib/golang/common/go/api" + + "strconv" + "strings" + "unsafe" +) + +func convertBlockPageToString(block_page C.BlockPageData) string { + block_page_size := block_page.title_prefix.len + + block_page.title.len + + block_page.body_prefix.len + + block_page.body.len + + block_page.uuid_prefix.len + + block_page.uuid.len + + block_page.uuid_suffix.len + + block_page_bytes := make([]byte, block_page_size) + + location := 0 + location = copyToSlice( + block_page_bytes, + unsafe.Pointer(block_page.title_prefix.data), + C.size_t(block_page.title_prefix.len), + location) + + location = copyToSlice( + block_page_bytes, + unsafe.Pointer(block_page.title.data), + C.size_t(block_page.title.len), + location) + + location = copyToSlice( + block_page_bytes, + unsafe.Pointer(block_page.body_prefix.data), + C.size_t(block_page.body_prefix.len), + location) + + location = copyToSlice( + block_page_bytes, + unsafe.Pointer(block_page.body.data), + C.size_t(block_page.body.len), + location) + + location = copyToSlice( + block_page_bytes, + unsafe.Pointer(block_page.uuid_prefix.data), + C.size_t(block_page.uuid_prefix.len), + location) + + location = copyToSlice( + block_page_bytes, + unsafe.Pointer(block_page.uuid.data), + C.size_t(block_page.uuid.len), + location) + + copyToSlice( + block_page_bytes, + unsafe.Pointer(block_page.uuid_suffix.data), + C.size_t(block_page.uuid_suffix.len), + location) + + return string(block_page_bytes) +} + +// The callbacks in the filter, like `DecodeHeaders`, can be implemented on demand. +// Because api.PassThroughStreamFilter provides a default implementation. +type filter struct { + api.PassThroughStreamFilter + + callbacks api.FilterCallbackHandler + path string + config *config + session_id int64 + session_data *C.HttpSessionData + cp_attachment *nano_attachment + request_structs *filterRequestStructs + body_buffer_chunk int +} + +type filterRequestStructs struct { + http_start_data *C.HttpRequestFilterData + http_meta_data *C.HttpMetaData + http_headers *C.HttpHeaders + http_headers_data *C.HttpHeaderData + http_res_headers *C.ResHttpHeaders + http_body_data *C.nano_str_t + attachment_data *C.AttachmentData +} + +func (f *filterRequestStructs) ZeroInitialize() { + if f.http_start_data != nil { + C.memset(unsafe.Pointer(f.http_start_data), 0, C.size_t(unsafe.Sizeof(*f.http_start_data))) + } + if f.http_meta_data != nil { + C.memset(unsafe.Pointer(f.http_meta_data), 0, C.size_t(unsafe.Sizeof(*f.http_meta_data))) + } + if f.http_headers != nil { + C.memset(unsafe.Pointer(f.http_headers), 0, C.size_t(unsafe.Sizeof(*f.http_headers))) + } + if f.http_headers_data != nil { + C.memset(unsafe.Pointer(f.http_headers_data), 0, C.size_t(unsafe.Sizeof(*f.http_headers_data))) + } + if f.attachment_data != nil { + C.memset(unsafe.Pointer(f.attachment_data), 0, C.size_t(unsafe.Sizeof(*f.attachment_data))) + } +} + +func (f *filter) isSessionFinalized() bool { + return C.IsSessionFinalized((*C.NanoAttachment)(f.cp_attachment), (*C.HttpSessionData)(f.session_data)) == 1 +} + +func (f *filter) sendData(data unsafe.Pointer, chunkType C.HttpChunkType) C.AttachmentVerdictResponse { + + attachment_data := f.request_structs.attachment_data + attachment_data.session_id = C.uint32_t(f.session_id) + attachment_data.chunk_type = chunkType // Adjust type as needed + attachment_data.session_data = f.session_data // Ensure `f.session_data` is compatible + attachment_data.data = C.DataBuffer(data) // Ensure `data` is compatible with `C.DataBuffer` + + return C.SendDataNanoAttachment((*C.NanoAttachment)(f.cp_attachment), attachment_data) +} + +func (f *filter) handleCustomResponse(verdict_response *C.AttachmentVerdictResponse) api.StatusType { + if verdict_response.web_response_data.web_response_type == C.RESPONSE_CODE_ONLY { + response_code := C.GetResponseCode((*C.AttachmentVerdictResponse)(verdict_response)) + return f.sendLocalReplyInternal(int(response_code), "", nil) + } + + if verdict_response.web_response_data.web_response_type == C.CUSTOM_WEB_RESPONSE { + headers := map[string][]string{ + "Content-Type": []string{"text/html"}, + } + block_page_parts := C.GetBlockPage( + (*C.NanoAttachment)(f.cp_attachment), + (*C.HttpSessionData)(f.session_data), + (*C.AttachmentVerdictResponse)(verdict_response)) + return f.sendLocalReplyInternal(int(block_page_parts.response_code), convertBlockPageToString(block_page_parts), headers) + } + + redirect_data := C.GetRedirectPage( + (*C.NanoAttachment)(f.cp_attachment), + (*C.HttpSessionData)(f.session_data), + (*C.AttachmentVerdictResponse)(verdict_response)) + redirect_location := redirect_data.redirect_location + + redirect_location_slice := unsafe.Slice((*byte)(unsafe.Pointer(redirect_location.data)), redirect_location.len) + headers := map[string][]string{ + "Location": []string{string(redirect_location_slice)}, + } + + return f.sendLocalReplyInternal(307, "", headers) +} + +func (f *filter) finalizeRequest(verdict_response *C.AttachmentVerdictResponse) api.StatusType { + if C.AttachmentVerdict(verdict_response.verdict) == C.ATTACHMENT_VERDICT_DROP { + return f.handleCustomResponse(verdict_response) + } + + return api.Continue +} + +func (f *filter) handleHeaders(header api.HeaderMap) { + const envoy_headers_prefix = "x-envoy" + i := 0 + header.Range(func(key, value string) bool { + if i > 10000 { + return true + } + + api.LogInfof("inserting headers: key %s, value %s", key, value) + + if strings.HasPrefix(key, envoy_headers_prefix) || + key == "x-request-id" || + key == ":method" || + key == ":path" || + key == ":scheme" || + key == "x-forwarded-proto" { + return true + } + + if key == ":authority" { + key = "Host" + } + + key_nano_str := createNanoStrWithoutCopy(key) + value_nano_str := createNanoStrWithoutCopy(value) + C.setHeaderElement((*C.HttpHeaderData)(f.request_structs.http_headers_data), C.int(i), key_nano_str, value_nano_str) + i++ + return true + }) + + http_headers := f.request_structs.http_headers + http_headers.data = f.request_structs.http_headers_data + http_headers.headers_count = C.size_t(i) +} + +func (f *filter) sendBody(buffer api.BufferInstance, is_req bool) C.AttachmentVerdictResponse { + chunk_type := C.HTTP_REQUEST_BODY + if !is_req { + chunk_type = C.HTTP_RESPONSE_BODY + } + + data := buffer.Bytes() + data_len := len(data) + buffer_size := 8 * 1024 + + num_of_buffers := ((data_len - 1) / buffer_size) + 1 + + // TO DO: FIX THIS ASAP + if num_of_buffers > 10000 { + num_of_buffers = 10000 + } + + + for i := 0; i < num_of_buffers; i++ { + nanoStrPtr := (*C.nano_str_t)(unsafe.Pointer(uintptr(unsafe.Pointer(f.request_structs.http_body_data)) + uintptr(i)*unsafe.Sizeof(*f.request_structs.http_body_data))) + nanoStrPtr.data = (*C.uchar)(unsafe.Pointer(&data[i * buffer_size])) + + if i + 1 == num_of_buffers { + nanoStrPtr.len = C.size_t(data_len - (i * buffer_size)) + } else { + nanoStrPtr.len = C.size_t(buffer_size) + } + + } + + http_chunks_array := C.NanoHttpBody{ + data: f.request_structs.http_body_data, + bodies_count: C.size_t(num_of_buffers), + } + + api.LogInfof("sending body data: %+v", http_chunks_array) + return f.sendData(unsafe.Pointer(&http_chunks_array), C.HttpChunkType(chunk_type)) + +} + +func (f *filter) sendStartTransaction(start_transaction_data *C.HttpRequestFilterData) C.AttachmentVerdictResponse { + return f.sendData(unsafe.Pointer(&start_transaction_data), C.HTTP_REQUEST_FILTER) +} + +func (f *filter) handleStartTransaction(header api.RequestHeaderMap) { + stream_info := f.callbacks.StreamInfo() + + ip_location := 0 + port_location := 1 + + listening_address := stream_info.DownstreamLocalAddress() + listening_address_arr := strings.Split(listening_address, ":") + listening_port, _ := strconv.Atoi(listening_address_arr[port_location]) + + client_address := stream_info.DownstreamRemoteAddress() + client_addr_arr := strings.Split(client_address, ":") + client_port, _ := strconv.Atoi(client_addr_arr[port_location]) + + host := strings.Split(header.Host(), ":")[0] + + protocol, _ := stream_info.Protocol() + + // init start transaction struct + meta_data := f.request_structs.http_meta_data + meta_data.http_protocol = createNanoStr(protocol) + meta_data.method_name = createNanoStr(header.Method()) + meta_data.host = createNanoStr(host) + meta_data.listening_ip = createNanoStr(listening_address_arr[ip_location]) + meta_data.listening_port = C.uint16_t(listening_port) + meta_data.uri = createNanoStr(header.Path()) + meta_data.client_ip = createNanoStr(client_addr_arr[ip_location]) + meta_data.client_port = C.uint16_t(client_port) +} + +func (f *filter) sendLocalReplyInternal(ret_code int, custom_response string, headers map[string][]string) api.StatusType { + f.callbacks.DecoderFilterCallbacks().SendLocalReply(ret_code, custom_response, headers, 0, "") + return api.LocalReply +} + +func (f *filter) endInspectionPart(chunk_type C.HttpChunkType) api.StatusType { + api.LogInfof("Ending inspection for current chunk") + res := f.sendData(nil, chunk_type) + + if C.AttachmentVerdict(res.verdict) != C.ATTACHMENT_VERDICT_INSPECT { + api.LogInfof("got final verict: %v", res.verdict) + return f.finalizeRequest(&res) + } + + return api.Continue +} + +// Callbacks which are called in request path +// The endStream is true if the request doesn't have body +func (f *filter) DecodeHeaders(header api.RequestHeaderMap, endStream bool) api.StatusType { + ret := api.Continue + + defer RecoverPanic(&ret) + + if f.isSessionFinalized() { + api.LogInfof("session has already been inspected, no need for further inspection") + return api.Continue + } + + f.handleStartTransaction(header) + f.handleHeaders(header) + + http_start_data := f.request_structs.http_start_data + http_start_data.meta_data = f.request_structs.http_meta_data + http_start_data.req_headers = f.request_structs.http_headers + http_start_data.contains_body = C.bool(!endStream) + + res := f.sendData(unsafe.Pointer(http_start_data), C.HTTP_REQUEST_FILTER) + if C.AttachmentVerdict(res.verdict) != C.ATTACHMENT_VERDICT_INSPECT { + api.LogInfof("got final verict: %v", res.verdict) + return f.finalizeRequest(&res) + } + + return ret +} + +// DecodeData might be called multiple times during handling the request body. +// The endStream is true when handling the last piece of the body. +func (f *filter) DecodeData(buffer api.BufferInstance, endStream bool) api.StatusType { + ret := api.Continue + + defer RecoverPanic(&ret) + + if f.isSessionFinalized() { + return api.Continue + } + + if endStream && buffer.Len() == 0 { + return f.endInspectionPart(C.HttpChunkType(C.HTTP_REQUEST_END)) + } + + if buffer.Len() == 0 { + return ret + } + + res := f.sendBody(buffer, true) + if C.AttachmentVerdict(res.verdict) != C.ATTACHMENT_VERDICT_INSPECT { + api.LogInfof("got final verict: %v", res.verdict) + return f.finalizeRequest(&res) + } + + if endStream { + return f.endInspectionPart(C.HttpChunkType(C.HTTP_REQUEST_END)) + } + + return ret +} + +// Callbacks which are called in response path +// The endStream is true if the response doesn't have body +func (f *filter) EncodeHeaders(header api.ResponseHeaderMap, endStream bool) api.StatusType { + ret := api.Continue + + defer RecoverPanic(&ret) + + if f.isSessionFinalized() { + return api.Continue + } + + const content_length_key = "content-length" + const status_code_key = ":status" + + + content_length_str, _ := header.Get(content_length_key) + status_code_str, _ := header.Get(status_code_key) + content_length, _ := strconv.Atoi(content_length_str) + status_code, _ := strconv.Atoi(status_code_str) + + f.handleHeaders(header) + res_http_headers := f.request_structs.http_res_headers + res_http_headers.headers = f.request_structs.http_headers + res_http_headers.content_length = C.uint64_t(content_length) + res_http_headers.response_code = C.uint16_t(status_code) + + res := f.sendData(unsafe.Pointer(res_http_headers), C.HTTP_RESPONSE_HEADER) + if C.AttachmentVerdict(res.verdict) != C.ATTACHMENT_VERDICT_INSPECT { + api.LogInfof("got final verict: %v", res.verdict) + return f.finalizeRequest(&res) + } + + if endStream { + return f.endInspectionPart(C.HttpChunkType(C.HTTP_RESPONSE_END)) + } + + return ret +} + +func injectBodyChunk( + curr_modification *C.struct_NanoHttpModificationList, + body_buffer_chunk int, + buffer *api.BufferInstance) { + for curr_modification != nil { + if (int(curr_modification.modification.orig_buff_index) == body_buffer_chunk) { + mod := curr_modification.modification // type: HttpInjectData + modifications := C.GoString(curr_modification.modification_buffer) + new_buffer:= insertAtPosition((*buffer).String(), modifications, int(mod.injection_pos)) + (*buffer).SetString(new_buffer) + } + curr_modification = curr_modification.next + } +} + +// EncodeData might be called multiple times during handling the response body. +// The endStream is true when handling the last piece of the body. +func (f *filter) EncodeData(buffer api.BufferInstance, endStream bool) api.StatusType { + ret := api.Continue + + defer RecoverPanic(&ret) + + if f.isSessionFinalized() { + return api.Continue + } + + if endStream && buffer.Len() == 0 { + return f.endInspectionPart(C.HttpChunkType(C.HTTP_RESPONSE_END)) + } + + if buffer.Len() == 0 { + return ret + } + + res := f.sendBody(buffer, false) + injectBodyChunk(res.modifications, f.body_buffer_chunk, &buffer) + f.body_buffer_chunk++ + if C.AttachmentVerdict(res.verdict) != C.ATTACHMENT_VERDICT_INSPECT { + api.LogInfof("got final verict: %v", res.verdict) + return f.finalizeRequest(&res) + } + + if endStream { + return f.endInspectionPart(C.HttpChunkType(C.HTTP_RESPONSE_END)) + } + + return ret +} + +// ____________NOT IMPLEMENTED AT THE MOMENT____________ +func (f *filter) DecodeTrailers(trailers api.RequestTrailerMap) api.StatusType { + // support suspending & resuming the filter in a background goroutine + return api.Continue +} + +func (f *filter) EncodeTrailers(trailers api.ResponseTrailerMap) api.StatusType { + return api.Continue +} + +// OnLog is called when the HTTP stream is ended on HTTP Connection Manager filter. +func (f *filter) OnLog(api.RequestHeaderMap, api.RequestTrailerMap, api.ResponseHeaderMap, api.ResponseTrailerMap) {} + +// OnLogDownstreamStart is called when HTTP Connection Manager filter receives a new HTTP request +// (required the corresponding access log type is enabled) +func (f *filter) OnLogDownstreamStart(api.RequestHeaderMap) {} + +// OnLogDownstreamPeriodic is called on any HTTP Connection Manager periodic log record +// (required the corresponding access log type is enabled) +func (f *filter) OnLogDownstreamPeriodic(api.RequestHeaderMap, api.RequestTrailerMap, api.ResponseHeaderMap, api.ResponseTrailerMap) {} + +func (f *filter) OnDestroy(reason api.DestroyReason) { + freeHttpMetaDataFields(f.request_structs.http_meta_data) + f.request_structs.ZeroInitialize() + C.FiniSessionData((*C.NanoAttachment)(f.cp_attachment), f.session_data) +} diff --git a/attachments/envoy/1.35/go.mod b/attachments/envoy/1.35/go.mod new file mode 100644 index 0000000..308bf7a --- /dev/null +++ b/attachments/envoy/1.35/go.mod @@ -0,0 +1,20 @@ +module gitlab.ngen.checkpoint.com/Ngen/agent-core/attachments/envoy + +// the version should >= 1.18 +go 1.24 + +// NOTICE: these lines could be generated automatically by "go mod tidy" +require ( + github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa + github.com/envoyproxy/envoy v1.35.6 + google.golang.org/protobuf v1.36.6 +) + +require github.com/go-chi/chi/v5 v5.1.0 + +require ( + github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect +) diff --git a/attachments/envoy/1.35/go.sum b/attachments/envoy/1.35/go.sum new file mode 100755 index 0000000..46938dd --- /dev/null +++ b/attachments/envoy/1.35/go.sum @@ -0,0 +1,23 @@ +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= +github.com/envoyproxy/envoy v1.35.6 h1:t28s0D/Zb5TSwbDmrAbEaL1iIFY2Is4yjctKe/TwYNU= +github.com/envoyproxy/envoy v1.35.6/go.mod h1:A/vRPuqivdZBAr0NfT3sccV8KtY07B2PyvILAdV0qCU= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw= +github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 h1:rcS6EyEaoCO52hQDupoSfrxI3R6C2Tq741is7X8OvnM= +google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917/go.mod h1:CmlNWB9lSezaYELKS5Ym1r44VrrbPUa7JTvw+6MbpJ0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 h1:6G8oQ016D88m1xAKljMlBOOGWDZkes4kMhgGFlf8WcQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go.mod h1:xtjpI3tXFPP051KaWnhvxkiubL/6dJ18vLVf7q2pTOU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= diff --git a/attachments/envoy/1.35/utils.go b/attachments/envoy/1.35/utils.go new file mode 100755 index 0000000..a1aa323 --- /dev/null +++ b/attachments/envoy/1.35/utils.go @@ -0,0 +1,111 @@ +package main + +/* +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include "nano_attachment_common.h" +#include "nano_attachment.h" +#include +*/ +import "C" +import ( + "github.com/envoyproxy/envoy/contrib/golang/common/go/api" + + "reflect" + "unsafe" + "os" + "runtime" + "strconv" +) +func getEnv(key, defaultValue string) string { + value, exists := os.LookupEnv(key) + if !exists { + return defaultValue + } + return value +} + +var INSERT_POS_ERR_MSG = "Got invalid insertion position, will not insert." + +func copyToSlice(dest []byte, src unsafe.Pointer, size C.size_t, location int) int { + C.memcpy(unsafe.Pointer(&dest[location]), src, size) + return location + int(size) +} + +func newNanoStr(data []byte) *C.nano_str_t { + nanoStr := (*C.nano_str_t)(C.malloc(C.size_t(unsafe.Sizeof(C.nano_str_t{})))) + if nanoStr == nil { + panic("failed to allocate memory for nano_str_t struct") + } + + nanoStr.len = C.size_t(len(data)) + return nanoStr +} + +func insertAtPosition(buff string, injection string, pos int) string { + if pos < 0 || pos > len(buff) { + api.LogDebugf( + INSERT_POS_ERR_MSG + + " Position: " + + strconv.Itoa(pos) + + ", buffer's lenght: " + + strconv.Itoa(len(buff))) + return buff + } + return_buff := buff[:pos] + injection + buff[pos:] + return return_buff +} + +func createNanoStr(str string) C.nano_str_t { + c_str := C.CString(str) + nanoStr := C.nano_str_t{ + len: C.size_t(len(str)), + data: (*C.uchar)(unsafe.Pointer(c_str)), + } + + return nanoStr +} + +func createNanoStrWithoutCopy(str string) C.nano_str_t { + nanoStr := C.nano_str_t{ + len: C.size_t(len(str)), + data: (*C.uchar)(unsafe.Pointer((*(*reflect.StringHeader)(unsafe.Pointer(&str))).Data)), + } + + return nanoStr +} + +func freeNanoStr(str *C.nano_str_t) { + C.free(unsafe.Pointer(str.data)) +} + +func freeHttpMetaDataFields(meta_data *C.HttpMetaData) { + freeNanoStr(&(*meta_data).http_protocol) + freeNanoStr(&(*meta_data).method_name) + freeNanoStr(&(*meta_data).host) + freeNanoStr(&(*meta_data).listening_ip) + freeNanoStr(&(*meta_data).uri) + freeNanoStr(&(*meta_data).client_ip) +} + +func freeHeaders(header_arr *C.HttpHeaderData, header_slice []C.HttpHeaderData) { + C.free(unsafe.Pointer(header_arr)) + + for _, header := range header_slice { + freeNanoStr(&(header.key)) + freeNanoStr(&(header.value)) + } +} + +func RecoverPanic(ret *api.StatusType) { + if e := recover(); e != nil { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + api.LogErrorf("http: panic serving: %v\n%s", e, buf) + + *ret = api.Continue + } +} diff --git a/attachments/envoy/1.36/CMakeLists.txt b/attachments/envoy/1.36/CMakeLists.txt new file mode 100755 index 0000000..832a6de --- /dev/null +++ b/attachments/envoy/1.36/CMakeLists.txt @@ -0,0 +1,33 @@ +if(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND ATTACHMENT_TYPE STREQUAL "envoy") + set(ATTACHMENTS_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/core/include/attachments) + set(NANO_ATTACHMENT_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/attachments/nano_attachment) + set(SHMEM_LIBRARY_DIR ${CMAKE_BINARY_DIR}/core/shmem_ipc_2) + set(NANO_ATTACHMENT_LIBRARY_DIR ${CMAKE_BINARY_DIR}/attachments/nano_attachment) + set(NANO_ATTACHMENT_UTIL_LIBRARY_DIR ${CMAKE_BINARY_DIR}/attachments/nano_attachment/nano_attachment_util) + set(LIBRARIES "-lnano_attachment -lnano_attachment_util -lshmem_ipc_2") + set(ENVOY_ATTACHMENT_DIR ${CMAKE_CURRENT_SOURCE_DIR}) + + get_filename_component(CURRENT_DIR ${CMAKE_CURRENT_SOURCE_DIR} NAME) + + # Configure the build.sh script from the template + configure_file( + ${PROJECT_SOURCE_DIR}/attachments/envoy/${CURRENT_DIR}/build_template + ${CMAKE_BINARY_DIR}/attachments/envoy/${CURRENT_DIR}/build.sh + @ONLY + ) + + # Define a custom command to run the bash script + add_custom_target( + envoy_attachment${CURRENT_DIR} ALL + COMMAND chmod +x ${CMAKE_BINARY_DIR}/attachments/envoy/${CURRENT_DIR}/build.sh + COMMAND ${CMAKE_BINARY_DIR}/attachments/envoy/${CURRENT_DIR}/build.sh + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/attachments/envoy + COMMENT "Building envoy attachment ${CURRENT_DIR}" + ) + + add_dependencies(envoy_attachment${CURRENT_DIR} shmem_ipc_2 nano_attachment nano_attachment_util) + + install(FILES libenvoy_attachment.so DESTINATION ${CMAKE_BINARY_DIR}/attachments/envoy/${CURRENT_DIR} PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ) + install(FILES libenvoy_attachment.so DESTINATION envoy/${CURRENT_DIR} PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ) +endif() + diff --git a/attachments/envoy/1.36/build_template b/attachments/envoy/1.36/build_template new file mode 100755 index 0000000..d54bb36 --- /dev/null +++ b/attachments/envoy/1.36/build_template @@ -0,0 +1,13 @@ +#!/bin/bash + +# Set environment variables +SHMEM_LIBRARY_DIR="@SHMEM_LIBRARY_DIR@" +NANO_ATTACHMENT_LIBRARY_DIR="@NANO_ATTACHMENT_LIBRARY_DIR@" +NANO_ATTACHMENT_UTIL_LIBRARY_DIR="@NANO_ATTACHMENT_UTIL_LIBRARY_DIR@" +LIBRARIES="@LIBRARIES@" +ENVOY_ATTACHMENT_DIR="@ENVOY_ATTACHMENT_DIR@" + +cd $ENVOY_ATTACHMENT_DIR + +# Run the go build command +CGO_CFLAGS="-I@ATTACHMENTS_INCLUDE_DIR@ -I@NANO_ATTACHMENT_INCLUDE_DIR@" go build -o ${ENVOY_ATTACHMENT_DIR}/libenvoy_attachment.so -buildmode=c-shared -ldflags="-extldflags '-L${SHMEM_LIBRARY_DIR} -L${NANO_ATTACHMENT_LIBRARY_DIR} -L${NANO_ATTACHMENT_UTIL_LIBRARY_DIR} ${LIBRARIES}'" diff --git a/attachments/envoy/1.36/config.go b/attachments/envoy/1.36/config.go new file mode 100755 index 0000000..5a428fa --- /dev/null +++ b/attachments/envoy/1.36/config.go @@ -0,0 +1,285 @@ +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/go-chi/chi/v5" + + xds "github.com/cncf/xds/go/xds/type/v3" + "google.golang.org/protobuf/types/known/anypb" + + "github.com/envoyproxy/envoy/contrib/golang/common/go/api" + envoyHttp "github.com/envoyproxy/envoy/contrib/golang/filters/http/source/go/pkg/http" +) + +/* +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include + +unsigned long get_thread_id() { + return (unsigned long)pthread_self(); +} + +#include "nano_attachment_common.h" +#include "nano_initializer.h" +#include "nano_attachment.h" +*/ +import "C" + +const Name = "cp_nano_filter" +const admin_api_server_info = "http://127.0.0.1:%s/server_info" +const keep_alive_interval = 10 * time.Second + +var filter_id atomic.Int64 +var attachments_map map[int]*nano_attachment = nil +var thread_to_attachment_mapping map[int]int = nil +var attachment_to_thread_mapping map[int]int = nil +var attachment_to_filter_request_structs map[int]*filterRequestStructs = nil +var mutex sync.Mutex +var last_keep_alive time.Time + +type nano_attachment C.struct_NanoAttachment + +// EnvoyServerInfo represents the structure of the JSON response from /server_info +type EnvoyServerInfo struct { + Concurrency int `json:"concurrency"` +} + +func getEnvoyConcurrency() int { + concurrency_method := getEnv("CONCURRENCY_CALC", "numOfCores") + + if concurrency_method == "numOfCores" { + api.LogWarnf("using number of CPU cores") + return runtime.NumCPU() + } + + var conc_number string + + switch concurrency_method { + case "istioCpuLimit": + conc_number = getEnv("ISTIO_CPU_LIMIT", "-1") + api.LogWarnf("using istioCpuLimit, conc_number %s", conc_number) + case "custom": + conc_number = getEnv("CONCURRENCY_NUMBER", "-1") + api.LogWarnf("using custom concurrency number, conc_number %s", conc_number) + default: + api.LogWarnf("unknown concurrency method %s, using number of CPU cores", concurrency_method) + return runtime.NumCPU() + } + + if conc_number == "-1" { + api.LogWarnf("concurrency number is not set as an env variable, using number of CPU cores") + return runtime.NumCPU() + } + + conc_num, err := strconv.Atoi(conc_number) + if err != nil || conc_num <= 0 { + api.LogWarnf("error converting concurrency number %s, using number of CPU cores", conc_number) + return runtime.NumCPU() + } + + return conc_num +} + +func configurationServer() { + r := chi.NewRouter() + + r.Get("/load-config", func(w http.ResponseWriter, r *http.Request) { + mutex.Lock() + defer mutex.Unlock() + worker_ids := make([]int, 0) + workersParam := r.URL.Query().Get("workers") + num_of_workers := len(attachments_map) // concurrency + if workersParam == "" { + for i := 0; i < num_of_workers; i++ { + worker_ids = append(worker_ids, i) + } + } else { + workers := strings.Split(workersParam, ",") + for _, worker := range workers { + worker_id, err := strconv.Atoi(worker) + + if worker_id >= num_of_workers { + api.LogWarnf( + "Can not load configuration of invalid worker ID %d. worker ID should be lower than: %d", + worker_id, + num_of_workers) + } + + if err != nil || worker_id >= num_of_workers { + w.WriteHeader(http.StatusBadRequest) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(fmt.Sprintf(`{"error": "invalid worker ID: %s"}`, worker))) + return + } + worker_ids = append(worker_ids, worker_id) + } + } + + workers_reload_status := make(map[string]string, len(worker_ids)) + res := C.NANO_OK + for _, worker_id := range worker_ids { + worker_reload_res := C.RestartAttachmentConfiguration((*C.NanoAttachment)(attachments_map[worker_id])) + if worker_reload_res == C.NANO_ERROR { + res = C.NANO_ERROR + workers_reload_status[strconv.Itoa(worker_id)] = "Reload Configuraiton Failed" + continue + } + workers_reload_status[strconv.Itoa(worker_id)] = "Reload Configuraiton Succeded" + } + + response, err := json.Marshal(workers_reload_status) + if err != nil { + api.LogWarnf("Error while sending reponse about reload configuration. Err: %s", err.Error()) + response = []byte(`{"error": "Internal Error"}`) + } + + if res == C.NANO_ERROR || err != nil { + w.WriteHeader(http.StatusInternalServerError) + } + + w.Header().Set("Content-Type", "application/json") + w.Write(response) + }) + + http.ListenAndServe(":8119", r) +} + +func init() { + last_keep_alive = time.Time{} + envoyHttp.RegisterHttpFilterFactoryAndConfigParser(Name, ConfigFactory, &parser{}) + go configurationServer() +} + +type config struct{} + +type parser struct{} + +func sendKeepAlive() { + for { + attachment_ptr := (*C.NanoAttachment)(attachments_map[0]) + if attachment_ptr == nil { + return + } + + C.SendKeepAlive(attachment_ptr) + time.Sleep(30 * time.Second) + } +} + +func (p *parser) initFilterStructs() *filterRequestStructs { + return &filterRequestStructs{ + http_start_data: (*C.HttpRequestFilterData)(C.malloc(C.sizeof_HttpRequestFilterData)), + http_meta_data: (*C.HttpMetaData)(C.malloc(C.sizeof_HttpMetaData)), + http_headers: (*C.HttpHeaders)(C.malloc(C.sizeof_HttpHeaders)), + http_headers_data: (*C.HttpHeaderData)(C.malloc(10000 * C.sizeof_HttpHeaderData)), + http_res_headers: (*C.ResHttpHeaders)(C.malloc(C.sizeof_ResHttpHeaders)), + http_body_data: (*C.nano_str_t)(C.malloc(10000 * C.sizeof_nano_str_t)), + attachment_data: (*C.AttachmentData)(C.malloc(C.sizeof_AttachmentData)), + } +} + +// Parse the filter configuration. We can call the ConfigCallbackHandler to control the filter's +// behavior +func (p *parser) Parse(any *anypb.Any, callbacks api.ConfigCallbackHandler) (interface{}, error) { + conf := &config{} + + if attachments_map != nil { + api.LogInfof("Waf Configuration already loaded") + return conf, nil + } + + num_of_workers := getEnvoyConcurrency() + + configStruct := &xds.TypedStruct{} + if err := any.UnmarshalTo(configStruct); err != nil { + return nil, err + } + + attachments_map = make(map[int]*nano_attachment) + attachment_to_filter_request_structs = make(map[int]*filterRequestStructs) + attachment_to_thread_mapping = make(map[int]int, 0) + thread_to_attachment_mapping = make(map[int]int, 0) + api.LogInfof("Number of worker threds: %d", num_of_workers) + for worker_id := 0; worker_id < num_of_workers; worker_id++ { + + attachment := C.InitNanoAttachment(C.uint8_t(0), C.int(worker_id), C.int(num_of_workers), C.int(C.fileno(C.stdout))) + for attachment == nil { + api.LogWarnf("attachment is nill going to sleep for two seconds and retry") + time.Sleep(2 * time.Second) + attachment = C.InitNanoAttachment(C.uint8_t(0), C.int(worker_id), C.int(num_of_workers), C.int(C.fileno(C.stdout))) + } + + //mutex.Lock() + attachments_map[worker_id] = (*nano_attachment)(attachment) + attachment_to_filter_request_structs[worker_id] = p.initFilterStructs() + //mutex.Unlock() + } + + go func() { + sendKeepAlive() + }() + + return conf, nil +} + +// Merge configuration from the inherited parent configuration +func (p *parser) Merge(parent interface{}, child interface{}) interface{} { + parentConfig := parent.(*config) + + // copy one, do not update parentConfig directly. + newConfig := *parentConfig + return &newConfig +} + +func ConfigFactory(c interface{}, callbacks api.FilterCallbackHandler) api.StreamFilter { + conf, ok := c.(*config) + if !ok { + panic("unexpected config type") + } + + worker_thread_id := int(C.get_thread_id()) + api.LogDebugf("worker_thread_id: %d", worker_thread_id) + if _, ok := thread_to_attachment_mapping[int(worker_thread_id)]; !ok { + api.LogDebugf("need to add new thread to the map") + map_size := len(attachment_to_thread_mapping) + if map_size < len(attachments_map) { + attachment_to_thread_mapping[map_size] = worker_thread_id + thread_to_attachment_mapping[worker_thread_id] = map_size + api.LogDebugf("len(attachment_to_thread_mapping): %d", len(attachment_to_thread_mapping)) + api.LogDebugf("thread_to_attachment_mapping: %v", thread_to_attachment_mapping) + api.LogDebugf("attachment_to_thread_mapping: %v", attachment_to_thread_mapping) + } else { + panic("unexpected thread id") + } + } + + worker_id := thread_to_attachment_mapping[int(worker_thread_id)] + api.LogDebugf("worker_id: %d", worker_id) + + filter_id.Add(1) + session_id := filter_id.Load() + attachment_ptr := attachments_map[worker_id] + session_data := C.InitSessionData((*C.NanoAttachment)(attachment_ptr), C.SessionID(session_id)) + + return &filter{ + callbacks: callbacks, + config: conf, + session_id: session_id, + cp_attachment: attachment_ptr, + session_data: session_data, + request_structs: attachment_to_filter_request_structs[worker_id], + } +} + +func main() {} diff --git a/attachments/envoy/1.36/filter.go b/attachments/envoy/1.36/filter.go new file mode 100755 index 0000000..6a12de8 --- /dev/null +++ b/attachments/envoy/1.36/filter.go @@ -0,0 +1,498 @@ +package main + +/* +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include + +unsigned long get_thread_id_2() { + return (unsigned long)pthread_self(); +} + +#include +#include +#include "nano_attachment_common.h" +#include "nano_attachment.h" + +HttpHeaderData* createHttpHeaderDataArray(int size) { + return (HttpHeaderData*)malloc(size * sizeof(HttpHeaderData)); +} + +HttpMetaData* createHttpMetaData() { + return (HttpMetaData*)malloc(sizeof(HttpMetaData)); +} + +void setHeaderElement(HttpHeaderData* arr, int index, nano_str_t key, nano_str_t value) { + if (arr == NULL) { + return; + } + + arr[index].key = key; + arr[index].value = value; +} +*/ +import "C" +import ( + "github.com/envoyproxy/envoy/contrib/golang/common/go/api" + + "strconv" + "strings" + "unsafe" +) + +func convertBlockPageToString(block_page C.BlockPageData) string { + block_page_size := block_page.title_prefix.len + + block_page.title.len + + block_page.body_prefix.len + + block_page.body.len + + block_page.uuid_prefix.len + + block_page.uuid.len + + block_page.uuid_suffix.len + + block_page_bytes := make([]byte, block_page_size) + + location := 0 + location = copyToSlice( + block_page_bytes, + unsafe.Pointer(block_page.title_prefix.data), + C.size_t(block_page.title_prefix.len), + location) + + location = copyToSlice( + block_page_bytes, + unsafe.Pointer(block_page.title.data), + C.size_t(block_page.title.len), + location) + + location = copyToSlice( + block_page_bytes, + unsafe.Pointer(block_page.body_prefix.data), + C.size_t(block_page.body_prefix.len), + location) + + location = copyToSlice( + block_page_bytes, + unsafe.Pointer(block_page.body.data), + C.size_t(block_page.body.len), + location) + + location = copyToSlice( + block_page_bytes, + unsafe.Pointer(block_page.uuid_prefix.data), + C.size_t(block_page.uuid_prefix.len), + location) + + location = copyToSlice( + block_page_bytes, + unsafe.Pointer(block_page.uuid.data), + C.size_t(block_page.uuid.len), + location) + + copyToSlice( + block_page_bytes, + unsafe.Pointer(block_page.uuid_suffix.data), + C.size_t(block_page.uuid_suffix.len), + location) + + return string(block_page_bytes) +} + +// The callbacks in the filter, like `DecodeHeaders`, can be implemented on demand. +// Because api.PassThroughStreamFilter provides a default implementation. +type filter struct { + api.PassThroughStreamFilter + + callbacks api.FilterCallbackHandler + path string + config *config + session_id int64 + session_data *C.HttpSessionData + cp_attachment *nano_attachment + request_structs *filterRequestStructs + body_buffer_chunk int +} + +type filterRequestStructs struct { + http_start_data *C.HttpRequestFilterData + http_meta_data *C.HttpMetaData + http_headers *C.HttpHeaders + http_headers_data *C.HttpHeaderData + http_res_headers *C.ResHttpHeaders + http_body_data *C.nano_str_t + attachment_data *C.AttachmentData +} + +func (f *filterRequestStructs) ZeroInitialize() { + if f.http_start_data != nil { + C.memset(unsafe.Pointer(f.http_start_data), 0, C.size_t(unsafe.Sizeof(*f.http_start_data))) + } + if f.http_meta_data != nil { + C.memset(unsafe.Pointer(f.http_meta_data), 0, C.size_t(unsafe.Sizeof(*f.http_meta_data))) + } + if f.http_headers != nil { + C.memset(unsafe.Pointer(f.http_headers), 0, C.size_t(unsafe.Sizeof(*f.http_headers))) + } + if f.http_headers_data != nil { + C.memset(unsafe.Pointer(f.http_headers_data), 0, C.size_t(unsafe.Sizeof(*f.http_headers_data))) + } + if f.attachment_data != nil { + C.memset(unsafe.Pointer(f.attachment_data), 0, C.size_t(unsafe.Sizeof(*f.attachment_data))) + } +} + +func (f *filter) isSessionFinalized() bool { + return C.IsSessionFinalized((*C.NanoAttachment)(f.cp_attachment), (*C.HttpSessionData)(f.session_data)) == 1 +} + +func (f *filter) sendData(data unsafe.Pointer, chunkType C.HttpChunkType) C.AttachmentVerdictResponse { + + attachment_data := f.request_structs.attachment_data + attachment_data.session_id = C.uint32_t(f.session_id) + attachment_data.chunk_type = chunkType // Adjust type as needed + attachment_data.session_data = f.session_data // Ensure `f.session_data` is compatible + attachment_data.data = C.DataBuffer(data) // Ensure `data` is compatible with `C.DataBuffer` + + return C.SendDataNanoAttachment((*C.NanoAttachment)(f.cp_attachment), attachment_data) +} + +func (f *filter) handleCustomResponse(verdict_response *C.AttachmentVerdictResponse) api.StatusType { + if verdict_response.web_response_data.web_response_type == C.RESPONSE_CODE_ONLY { + response_code := C.GetResponseCode((*C.AttachmentVerdictResponse)(verdict_response)) + return f.sendLocalReplyInternal(int(response_code), "", nil) + } + + if verdict_response.web_response_data.web_response_type == C.CUSTOM_WEB_RESPONSE { + headers := map[string][]string{ + "Content-Type": []string{"text/html"}, + } + block_page_parts := C.GetBlockPage( + (*C.NanoAttachment)(f.cp_attachment), + (*C.HttpSessionData)(f.session_data), + (*C.AttachmentVerdictResponse)(verdict_response)) + return f.sendLocalReplyInternal(int(block_page_parts.response_code), convertBlockPageToString(block_page_parts), headers) + } + + redirect_data := C.GetRedirectPage( + (*C.NanoAttachment)(f.cp_attachment), + (*C.HttpSessionData)(f.session_data), + (*C.AttachmentVerdictResponse)(verdict_response)) + redirect_location := redirect_data.redirect_location + + redirect_location_slice := unsafe.Slice((*byte)(unsafe.Pointer(redirect_location.data)), redirect_location.len) + headers := map[string][]string{ + "Location": []string{string(redirect_location_slice)}, + } + + return f.sendLocalReplyInternal(307, "", headers) +} + +func (f *filter) finalizeRequest(verdict_response *C.AttachmentVerdictResponse) api.StatusType { + if C.AttachmentVerdict(verdict_response.verdict) == C.ATTACHMENT_VERDICT_DROP { + return f.handleCustomResponse(verdict_response) + } + + return api.Continue +} + +func (f *filter) handleHeaders(header api.HeaderMap) { + const envoy_headers_prefix = "x-envoy" + i := 0 + header.Range(func(key, value string) bool { + if i > 10000 { + return true + } + + api.LogInfof("inserting headers: key %s, value %s", key, value) + + if strings.HasPrefix(key, envoy_headers_prefix) || + key == "x-request-id" || + key == ":method" || + key == ":path" || + key == ":scheme" || + key == "x-forwarded-proto" { + return true + } + + if key == ":authority" { + key = "Host" + } + + key_nano_str := createNanoStrWithoutCopy(key) + value_nano_str := createNanoStrWithoutCopy(value) + C.setHeaderElement((*C.HttpHeaderData)(f.request_structs.http_headers_data), C.int(i), key_nano_str, value_nano_str) + i++ + return true + }) + + http_headers := f.request_structs.http_headers + http_headers.data = f.request_structs.http_headers_data + http_headers.headers_count = C.size_t(i) +} + +func (f *filter) sendBody(buffer api.BufferInstance, is_req bool) C.AttachmentVerdictResponse { + chunk_type := C.HTTP_REQUEST_BODY + if !is_req { + chunk_type = C.HTTP_RESPONSE_BODY + } + + data := buffer.Bytes() + data_len := len(data) + buffer_size := 8 * 1024 + + num_of_buffers := ((data_len - 1) / buffer_size) + 1 + + // TO DO: FIX THIS ASAP + if num_of_buffers > 10000 { + num_of_buffers = 10000 + } + + + for i := 0; i < num_of_buffers; i++ { + nanoStrPtr := (*C.nano_str_t)(unsafe.Pointer(uintptr(unsafe.Pointer(f.request_structs.http_body_data)) + uintptr(i)*unsafe.Sizeof(*f.request_structs.http_body_data))) + nanoStrPtr.data = (*C.uchar)(unsafe.Pointer(&data[i * buffer_size])) + + if i + 1 == num_of_buffers { + nanoStrPtr.len = C.size_t(data_len - (i * buffer_size)) + } else { + nanoStrPtr.len = C.size_t(buffer_size) + } + + } + + http_chunks_array := C.NanoHttpBody{ + data: f.request_structs.http_body_data, + bodies_count: C.size_t(num_of_buffers), + } + + api.LogInfof("sending body data: %+v", http_chunks_array) + return f.sendData(unsafe.Pointer(&http_chunks_array), C.HttpChunkType(chunk_type)) + +} + +func (f *filter) sendStartTransaction(start_transaction_data *C.HttpRequestFilterData) C.AttachmentVerdictResponse { + return f.sendData(unsafe.Pointer(&start_transaction_data), C.HTTP_REQUEST_FILTER) +} + +func (f *filter) handleStartTransaction(header api.RequestHeaderMap) { + stream_info := f.callbacks.StreamInfo() + + ip_location := 0 + port_location := 1 + + listening_address := stream_info.DownstreamLocalAddress() + listening_address_arr := strings.Split(listening_address, ":") + listening_port, _ := strconv.Atoi(listening_address_arr[port_location]) + + client_address := stream_info.DownstreamRemoteAddress() + client_addr_arr := strings.Split(client_address, ":") + client_port, _ := strconv.Atoi(client_addr_arr[port_location]) + + host := strings.Split(header.Host(), ":")[0] + + protocol, _ := stream_info.Protocol() + + // init start transaction struct + meta_data := f.request_structs.http_meta_data + meta_data.http_protocol = createNanoStr(protocol) + meta_data.method_name = createNanoStr(header.Method()) + meta_data.host = createNanoStr(host) + meta_data.listening_ip = createNanoStr(listening_address_arr[ip_location]) + meta_data.listening_port = C.uint16_t(listening_port) + meta_data.uri = createNanoStr(header.Path()) + meta_data.client_ip = createNanoStr(client_addr_arr[ip_location]) + meta_data.client_port = C.uint16_t(client_port) +} + +func (f *filter) sendLocalReplyInternal(ret_code int, custom_response string, headers map[string][]string) api.StatusType { + f.callbacks.DecoderFilterCallbacks().SendLocalReply(ret_code, custom_response, headers, 0, "") + return api.LocalReply +} + +func (f *filter) endInspectionPart(chunk_type C.HttpChunkType) api.StatusType { + api.LogInfof("Ending inspection for current chunk") + res := f.sendData(nil, chunk_type) + + if C.AttachmentVerdict(res.verdict) != C.ATTACHMENT_VERDICT_INSPECT { + api.LogInfof("got final verict: %v", res.verdict) + return f.finalizeRequest(&res) + } + + return api.Continue +} + +// Callbacks which are called in request path +// The endStream is true if the request doesn't have body +func (f *filter) DecodeHeaders(header api.RequestHeaderMap, endStream bool) api.StatusType { + ret := api.Continue + + defer RecoverPanic(&ret) + + if f.isSessionFinalized() { + api.LogInfof("session has already been inspected, no need for further inspection") + return api.Continue + } + + f.handleStartTransaction(header) + f.handleHeaders(header) + + http_start_data := f.request_structs.http_start_data + http_start_data.meta_data = f.request_structs.http_meta_data + http_start_data.req_headers = f.request_structs.http_headers + http_start_data.contains_body = C.bool(!endStream) + + res := f.sendData(unsafe.Pointer(http_start_data), C.HTTP_REQUEST_FILTER) + if C.AttachmentVerdict(res.verdict) != C.ATTACHMENT_VERDICT_INSPECT { + api.LogInfof("got final verict: %v", res.verdict) + return f.finalizeRequest(&res) + } + + return ret +} + +// DecodeData might be called multiple times during handling the request body. +// The endStream is true when handling the last piece of the body. +func (f *filter) DecodeData(buffer api.BufferInstance, endStream bool) api.StatusType { + ret := api.Continue + + defer RecoverPanic(&ret) + + if f.isSessionFinalized() { + return api.Continue + } + + if endStream && buffer.Len() == 0 { + return f.endInspectionPart(C.HttpChunkType(C.HTTP_REQUEST_END)) + } + + if buffer.Len() == 0 { + return ret + } + + res := f.sendBody(buffer, true) + if C.AttachmentVerdict(res.verdict) != C.ATTACHMENT_VERDICT_INSPECT { + api.LogInfof("got final verict: %v", res.verdict) + return f.finalizeRequest(&res) + } + + if endStream { + return f.endInspectionPart(C.HttpChunkType(C.HTTP_REQUEST_END)) + } + + return ret +} + +// Callbacks which are called in response path +// The endStream is true if the response doesn't have body +func (f *filter) EncodeHeaders(header api.ResponseHeaderMap, endStream bool) api.StatusType { + ret := api.Continue + + defer RecoverPanic(&ret) + + if f.isSessionFinalized() { + return api.Continue + } + + const content_length_key = "content-length" + const status_code_key = ":status" + + + content_length_str, _ := header.Get(content_length_key) + status_code_str, _ := header.Get(status_code_key) + content_length, _ := strconv.Atoi(content_length_str) + status_code, _ := strconv.Atoi(status_code_str) + + f.handleHeaders(header) + res_http_headers := f.request_structs.http_res_headers + res_http_headers.headers = f.request_structs.http_headers + res_http_headers.content_length = C.uint64_t(content_length) + res_http_headers.response_code = C.uint16_t(status_code) + + res := f.sendData(unsafe.Pointer(res_http_headers), C.HTTP_RESPONSE_HEADER) + if C.AttachmentVerdict(res.verdict) != C.ATTACHMENT_VERDICT_INSPECT { + api.LogInfof("got final verict: %v", res.verdict) + return f.finalizeRequest(&res) + } + + if endStream { + return f.endInspectionPart(C.HttpChunkType(C.HTTP_RESPONSE_END)) + } + + return ret +} + +func injectBodyChunk( + curr_modification *C.struct_NanoHttpModificationList, + body_buffer_chunk int, + buffer *api.BufferInstance) { + for curr_modification != nil { + if (int(curr_modification.modification.orig_buff_index) == body_buffer_chunk) { + mod := curr_modification.modification // type: HttpInjectData + modifications := C.GoString(curr_modification.modification_buffer) + new_buffer:= insertAtPosition((*buffer).String(), modifications, int(mod.injection_pos)) + (*buffer).SetString(new_buffer) + } + curr_modification = curr_modification.next + } +} + +// EncodeData might be called multiple times during handling the response body. +// The endStream is true when handling the last piece of the body. +func (f *filter) EncodeData(buffer api.BufferInstance, endStream bool) api.StatusType { + ret := api.Continue + + defer RecoverPanic(&ret) + + if f.isSessionFinalized() { + return api.Continue + } + + if endStream && buffer.Len() == 0 { + return f.endInspectionPart(C.HttpChunkType(C.HTTP_RESPONSE_END)) + } + + if buffer.Len() == 0 { + return ret + } + + res := f.sendBody(buffer, false) + injectBodyChunk(res.modifications, f.body_buffer_chunk, &buffer) + f.body_buffer_chunk++ + if C.AttachmentVerdict(res.verdict) != C.ATTACHMENT_VERDICT_INSPECT { + api.LogInfof("got final verict: %v", res.verdict) + return f.finalizeRequest(&res) + } + + if endStream { + return f.endInspectionPart(C.HttpChunkType(C.HTTP_RESPONSE_END)) + } + + return ret +} + +// ____________NOT IMPLEMENTED AT THE MOMENT____________ +func (f *filter) DecodeTrailers(trailers api.RequestTrailerMap) api.StatusType { + // support suspending & resuming the filter in a background goroutine + return api.Continue +} + +func (f *filter) EncodeTrailers(trailers api.ResponseTrailerMap) api.StatusType { + return api.Continue +} + +// OnLog is called when the HTTP stream is ended on HTTP Connection Manager filter. +func (f *filter) OnLog(api.RequestHeaderMap, api.RequestTrailerMap, api.ResponseHeaderMap, api.ResponseTrailerMap) {} + +// OnLogDownstreamStart is called when HTTP Connection Manager filter receives a new HTTP request +// (required the corresponding access log type is enabled) +func (f *filter) OnLogDownstreamStart(api.RequestHeaderMap) {} + +// OnLogDownstreamPeriodic is called on any HTTP Connection Manager periodic log record +// (required the corresponding access log type is enabled) +func (f *filter) OnLogDownstreamPeriodic(api.RequestHeaderMap, api.RequestTrailerMap, api.ResponseHeaderMap, api.ResponseTrailerMap) {} + +func (f *filter) OnDestroy(reason api.DestroyReason) { + freeHttpMetaDataFields(f.request_structs.http_meta_data) + f.request_structs.ZeroInitialize() + C.FiniSessionData((*C.NanoAttachment)(f.cp_attachment), f.session_data) +} diff --git a/attachments/envoy/1.36/go.mod b/attachments/envoy/1.36/go.mod new file mode 100644 index 0000000..b7016e7 --- /dev/null +++ b/attachments/envoy/1.36/go.mod @@ -0,0 +1,20 @@ +module gitlab.ngen.checkpoint.com/Ngen/agent-core/attachments/envoy + +// the version should >= 1.18 +go 1.24 + +// NOTICE: these lines could be generated automatically by "go mod tidy" +require ( + github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa + github.com/envoyproxy/envoy v1.36.2 + google.golang.org/protobuf v1.36.10 +) + +require github.com/go-chi/chi/v5 v5.1.0 + +require ( + github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect +) diff --git a/attachments/envoy/1.36/go.sum b/attachments/envoy/1.36/go.sum new file mode 100755 index 0000000..da959fb --- /dev/null +++ b/attachments/envoy/1.36/go.sum @@ -0,0 +1,23 @@ +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= +github.com/envoyproxy/envoy v1.36.2 h1:87+0C7ZCbGJdDfD9sVsvqGLVvGk2OIKuxzzm9oNpvDM= +github.com/envoyproxy/envoy v1.36.2/go.mod h1:mgMEye9tOlNiUTG+iYhQYgCzQcX46MMS0Jo6bVwRt1U= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw= +github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 h1:rcS6EyEaoCO52hQDupoSfrxI3R6C2Tq741is7X8OvnM= +google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917/go.mod h1:CmlNWB9lSezaYELKS5Ym1r44VrrbPUa7JTvw+6MbpJ0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 h1:6G8oQ016D88m1xAKljMlBOOGWDZkes4kMhgGFlf8WcQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go.mod h1:xtjpI3tXFPP051KaWnhvxkiubL/6dJ18vLVf7q2pTOU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= diff --git a/attachments/envoy/1.36/utils.go b/attachments/envoy/1.36/utils.go new file mode 100755 index 0000000..a1aa323 --- /dev/null +++ b/attachments/envoy/1.36/utils.go @@ -0,0 +1,111 @@ +package main + +/* +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include "nano_attachment_common.h" +#include "nano_attachment.h" +#include +*/ +import "C" +import ( + "github.com/envoyproxy/envoy/contrib/golang/common/go/api" + + "reflect" + "unsafe" + "os" + "runtime" + "strconv" +) +func getEnv(key, defaultValue string) string { + value, exists := os.LookupEnv(key) + if !exists { + return defaultValue + } + return value +} + +var INSERT_POS_ERR_MSG = "Got invalid insertion position, will not insert." + +func copyToSlice(dest []byte, src unsafe.Pointer, size C.size_t, location int) int { + C.memcpy(unsafe.Pointer(&dest[location]), src, size) + return location + int(size) +} + +func newNanoStr(data []byte) *C.nano_str_t { + nanoStr := (*C.nano_str_t)(C.malloc(C.size_t(unsafe.Sizeof(C.nano_str_t{})))) + if nanoStr == nil { + panic("failed to allocate memory for nano_str_t struct") + } + + nanoStr.len = C.size_t(len(data)) + return nanoStr +} + +func insertAtPosition(buff string, injection string, pos int) string { + if pos < 0 || pos > len(buff) { + api.LogDebugf( + INSERT_POS_ERR_MSG + + " Position: " + + strconv.Itoa(pos) + + ", buffer's lenght: " + + strconv.Itoa(len(buff))) + return buff + } + return_buff := buff[:pos] + injection + buff[pos:] + return return_buff +} + +func createNanoStr(str string) C.nano_str_t { + c_str := C.CString(str) + nanoStr := C.nano_str_t{ + len: C.size_t(len(str)), + data: (*C.uchar)(unsafe.Pointer(c_str)), + } + + return nanoStr +} + +func createNanoStrWithoutCopy(str string) C.nano_str_t { + nanoStr := C.nano_str_t{ + len: C.size_t(len(str)), + data: (*C.uchar)(unsafe.Pointer((*(*reflect.StringHeader)(unsafe.Pointer(&str))).Data)), + } + + return nanoStr +} + +func freeNanoStr(str *C.nano_str_t) { + C.free(unsafe.Pointer(str.data)) +} + +func freeHttpMetaDataFields(meta_data *C.HttpMetaData) { + freeNanoStr(&(*meta_data).http_protocol) + freeNanoStr(&(*meta_data).method_name) + freeNanoStr(&(*meta_data).host) + freeNanoStr(&(*meta_data).listening_ip) + freeNanoStr(&(*meta_data).uri) + freeNanoStr(&(*meta_data).client_ip) +} + +func freeHeaders(header_arr *C.HttpHeaderData, header_slice []C.HttpHeaderData) { + C.free(unsafe.Pointer(header_arr)) + + for _, header := range header_slice { + freeNanoStr(&(header.key)) + freeNanoStr(&(header.value)) + } +} + +func RecoverPanic(ret *api.StatusType) { + if e := recover(); e != nil { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + api.LogErrorf("http: panic serving: %v\n%s", e, buf) + + *ret = api.Continue + } +} diff --git a/attachments/envoy/CMakeLists.txt b/attachments/envoy/CMakeLists.txt index d0f9274..4b58462 100755 --- a/attachments/envoy/CMakeLists.txt +++ b/attachments/envoy/CMakeLists.txt @@ -2,3 +2,5 @@ add_subdirectory(1.31) add_subdirectory(1.32) add_subdirectory(1.33) add_subdirectory(1.34) +add_subdirectory(1.35) +add_subdirectory(1.36) diff --git a/attachments/nano_attachment/nano_attachment.c b/attachments/nano_attachment/nano_attachment.c index dc4b240..7aa6746 100755 --- a/attachments/nano_attachment/nano_attachment.c +++ b/attachments/nano_attachment/nano_attachment.c @@ -606,21 +606,21 @@ FreeAttachmentResponseContent( return; } -HttpBody * -compressBody(NanoAttachment *attachment, HttpSessionData *session_data, HttpBody *bodies) +NanoHttpBody * +compressBody(NanoAttachment *attachment, HttpSessionData *session_data, NanoHttpBody *bodies) { return nano_compress_body(attachment, bodies, session_data); } -HttpBody * -decompressBody(NanoAttachment *attachment, HttpSessionData *session_data, HttpBody *bodies) +NanoHttpBody * +decompressBody(NanoAttachment *attachment, HttpSessionData *session_data, NanoHttpBody *bodies) { return nano_decompress_body(attachment, bodies, session_data); } void -freeCompressedBody(NanoAttachment *attachment, HttpSessionData *session_data, HttpBody *bodies) +freeCompressedBody(NanoAttachment *attachment, HttpSessionData *session_data, NanoHttpBody *bodies) { nano_free_compressed_body(attachment, bodies, session_data); } diff --git a/attachments/nano_attachment/nano_attachment_io.c b/attachments/nano_attachment/nano_attachment_io.c index c9bac47..57ac7bb 100755 --- a/attachments/nano_attachment/nano_attachment_io.c +++ b/attachments/nano_attachment/nano_attachment_io.c @@ -1560,7 +1560,7 @@ nano_header_sender( void nano_body_sender( NanoAttachment *attachment, - HttpBody *bodies, + NanoHttpBody *bodies, HttpEventThreadCtx *ctx, AttachmentDataType body_type, uint32_t cur_request_id, diff --git a/attachments/nano_attachment/nano_attachment_io.h b/attachments/nano_attachment/nano_attachment_io.h index 21972d4..dab5d9a 100644 --- a/attachments/nano_attachment/nano_attachment_io.h +++ b/attachments/nano_attachment/nano_attachment_io.h @@ -168,7 +168,7 @@ nano_header_sender( /// of messages sent. /// /// @param attachment Pointer to a NanoAttachment struct representing the attachment/module. -/// @param bodies Pointer to an HttpBody struct containing the HTTP request/response body data. +/// @param bodies Pointer to an NanoHttpBody struct containing the HTTP request/response body data. /// @param ctx Pointer to an HttpEventThreadCtx struct representing the HTTP event thread context. /// @param body_type Enum value indicating whether the body is a request or response body. /// @param cur_request_id Current request ID for logging and tracking purposes. @@ -177,7 +177,7 @@ nano_header_sender( void nano_body_sender( NanoAttachment *attachment, - HttpBody *bodies, + NanoHttpBody *bodies, HttpEventThreadCtx *ctx, AttachmentDataType body_type, uint32_t cur_request_id, diff --git a/attachments/nano_attachment/nano_attachment_sender_thread.c b/attachments/nano_attachment/nano_attachment_sender_thread.c index 42c4a0d..fb2a43e 100644 --- a/attachments/nano_attachment/nano_attachment_sender_thread.c +++ b/attachments/nano_attachment/nano_attachment_sender_thread.c @@ -229,7 +229,7 @@ void * SendRequestBodyThread(void *_ctx) { HttpEventThreadCtx *ctx = (HttpEventThreadCtx *)_ctx; - HttpBody *bodies = (HttpBody*)ctx->data->data; + NanoHttpBody *bodies = (NanoHttpBody*)ctx->data->data; NanoAttachment *attachment = ctx->attachment; HttpSessionData *session_data_p = ctx->session_data_p; @@ -249,7 +249,7 @@ void * SendResponseBodyThread(void *_ctx) { HttpEventThreadCtx *ctx = (HttpEventThreadCtx *)_ctx; - HttpBody *bodies = (HttpBody*)ctx->data->data; + NanoHttpBody *bodies = (NanoHttpBody*)ctx->data->data; NanoAttachment *attachment = ctx->attachment; HttpSessionData *session_data_p = ctx->session_data_p; diff --git a/attachments/nano_attachment/nano_attachment_util/nano_attachment_util.cc b/attachments/nano_attachment/nano_attachment_util/nano_attachment_util.cc index 4cf5d9e..759e609 100644 --- a/attachments/nano_attachment/nano_attachment_util/nano_attachment_util.cc +++ b/attachments/nano_attachment/nano_attachment_util/nano_attachment_util.cc @@ -261,3 +261,9 @@ isSkipSource(c_str ip_str) return 0; } + +unsigned int +isPairedAffinityEnabled() +{ + return conf_data.getNumericalValue("is_paired_affinity_enabled"); +} diff --git a/attachments/nano_attachment/nano_compression.c b/attachments/nano_attachment/nano_compression.c index 75a35b3..08edff7 100755 --- a/attachments/nano_attachment/nano_compression.c +++ b/attachments/nano_attachment/nano_compression.c @@ -7,15 +7,15 @@ #include "compression_utils.h" #include "nano_utils.h" -HttpBody * +NanoHttpBody * nano_compress_body( NanoAttachment *attachment, - HttpBody *bodies, + NanoHttpBody *bodies, HttpSessionData *session_data_p ) { CompressionResult compression_result; - HttpBody *compressed_body; + NanoHttpBody *compressed_body; size_t i; if (session_data_p->response_data.compression_type == NO_COMPRESSION) { @@ -32,7 +32,7 @@ nano_compress_body( session_data_p->response_data.compression_stream = initCompressionStream(); } - compressed_body = malloc(sizeof(HttpBody)); + compressed_body = malloc(sizeof(NanoHttpBody)); if (compressed_body == NULL) { return NULL; } @@ -59,15 +59,15 @@ nano_compress_body( return compressed_body; } -HttpBody * +NanoHttpBody * nano_decompress_body( NanoAttachment *attachment, - HttpBody *bodies, + NanoHttpBody *bodies, HttpSessionData *session_data_p ) { DecompressionResult decompression_result; - HttpBody *decompressed_body; + NanoHttpBody *decompressed_body; size_t i; if (session_data_p->response_data.compression_type == NO_COMPRESSION) { @@ -84,7 +84,7 @@ nano_decompress_body( session_data_p->response_data.decompression_stream = initCompressionStream(); } - decompressed_body = malloc(sizeof(HttpBody)); + decompressed_body = malloc(sizeof(NanoHttpBody)); if (decompressed_body == NULL) { return NULL; } @@ -112,7 +112,7 @@ nano_decompress_body( void nano_free_compressed_body( NanoAttachment *attachment, - HttpBody *bodies, + NanoHttpBody *bodies, HttpSessionData *session_data_p ) { diff --git a/attachments/nano_attachment/nano_compression.h b/attachments/nano_attachment/nano_compression.h index 5108b35..d083fd4 100755 --- a/attachments/nano_attachment/nano_compression.h +++ b/attachments/nano_attachment/nano_compression.h @@ -6,39 +6,39 @@ /// @brief Compresses the given HTTP body using the specified compression type in the session data. /// /// @param attachment Pointer to the NanoAttachment structure. -/// @param bodies Pointer to the HttpBody structure containing the data to be compressed. +/// @param bodies Pointer to the NanoHttpBody structure containing the data to be compressed. /// @param session_data_p Pointer to the HttpSessionData structure containing session-specific data. /// -/// @return Pointer to a new HttpBody structure containing the compressed data, +/// @return Pointer to a new NanoHttpBody structure containing the compressed data, /// or NULL if compression is not needed or fails. -HttpBody *nano_compress_body( +NanoHttpBody *nano_compress_body( NanoAttachment *attachment, - HttpBody *bodies, + NanoHttpBody *bodies, HttpSessionData *session_data_p ); /// @brief Decompresses the given HTTP body using the specified compression type in the session data. /// /// @param attachment Pointer to the NanoAttachment structure. -/// @param bodies Pointer to the HttpBody structure containing the data to be decompressed. +/// @param bodies Pointer to the NanoHttpBody structure containing the data to be decompressed. /// @param session_data_p Pointer to the HttpSessionData structure containing session-specific data. /// -/// @return Pointer to a new HttpBody structure containing the decompressed data, +/// @return Pointer to a new NanoHttpBody structure containing the decompressed data, /// or NULL if decompression is not needed or fails. -HttpBody *nano_decompress_body( +NanoHttpBody *nano_decompress_body( NanoAttachment *attachment, - HttpBody *bodies, + NanoHttpBody *bodies, HttpSessionData *session_data_p ); /// @brief Frees the memory allocated for the compressed HTTP body. /// /// @param attachment Pointer to the NanoAttachment structure. -/// @param bodies Pointer to the HttpBody structure containing the compressed data to be freed. +/// @param bodies Pointer to the NanoHttpBody structure containing the compressed data to be freed. /// @param session_data_p Pointer to the HttpSessionData structure containing session-specific data. void nano_free_compressed_body( NanoAttachment *attachment, - HttpBody *bodies, + NanoHttpBody *bodies, HttpSessionData *session_data_p ); diff --git a/attachments/nginx/nginx_attachment_util/nginx_attachment_util.cc b/attachments/nginx/nginx_attachment_util/nginx_attachment_util.cc index 85cd5b6..71313a9 100644 --- a/attachments/nginx/nginx_attachment_util/nginx_attachment_util.cc +++ b/attachments/nginx/nginx_attachment_util/nginx_attachment_util.cc @@ -28,10 +28,10 @@ initAttachmentConfig(c_str conf_file) return conf_data.init(conf_file); } -ngx_http_inspection_mode_e +NanoHttpInspectionMode getInspectionMode() { - return static_cast(conf_data.getNumericalValue("nginx_inspection_mode")); + return static_cast(conf_data.getNumericalValue("nginx_inspection_mode")); } unsigned int @@ -191,6 +191,24 @@ getRemoveResServerHeader() return conf_data.getNumericalValue("remove_server_header"); } +unsigned int +getDecompressionPoolSize() +{ + return conf_data.getNumericalValue("decompression_pool_size"); +} + +unsigned int +getRecompressionPoolSize() +{ + return conf_data.getNumericalValue("recompression_pool_size"); +} + +unsigned int +getIsBrotliInspectionEnabled() +{ + return conf_data.getNumericalValue("is_brotli_inspection_enabled"); +} + int isIPAddress(c_str ip_str) { @@ -285,3 +303,15 @@ isSkipSource(c_str ip_str) return 0; } + +unsigned int +isPairedAffinityEnabled() +{ + return conf_data.getNumericalValue("is_paired_affinity_enabled") != 0; +} + +unsigned int +isAsyncModeEnabled() +{ + return conf_data.getNumericalValue("is_async_mode_enabled") != 0; +} diff --git a/attachments/nginx/ngx_module/async/ngx_cp_async_body.c b/attachments/nginx/ngx_module/async/ngx_cp_async_body.c new file mode 100755 index 0000000..6c4ea0c --- /dev/null +++ b/attachments/nginx/ngx_module/async/ngx_cp_async_body.c @@ -0,0 +1,148 @@ +#include "ngx_cp_async_body.h" + +#include +#include +#include +#include +#include +#include + +#include "ngx_cp_async_core.h" +#include "ngx_cp_async_ctx_validation.h" +#include "ngx_cp_async_sender.h" +#include "../ngx_cp_hooks.h" +#include "../ngx_cp_initializer.h" +#include "../ngx_cp_utils.h" + +ngx_int_t +ngx_http_cp_req_body_filter_async(ngx_http_request_t *r, ngx_chain_t *in) +{ + ngx_http_cp_session_data *sd = recover_cp_session_data(r); + ngx_http_cp_async_ctx_t *ctx; + + write_dbg(DBG_LEVEL_DEBUG, "=== ASYNC REQUEST BODY FILTER START ==="); + + print_buffer_chain(in, "outgoing", 32, DBG_LEVEL_TRACE); + if (!sd->initial_async_mode || (sd->initial_async_mode && !is_async_mode_enabled)) { + write_dbg(DBG_LEVEL_WARNING, "Async mode not initialized or changed - passing through"); + return ngx_http_next_request_body_filter(r, in); + } + + if (!isIpcReady() || !sd->async_processing_needed) { + write_dbg(DBG_LEVEL_DEBUG, "No async processing needed - passing through"); + return ngx_http_next_request_body_filter(r, in); + } + + ctx = ngx_cp_async_find_ctx(sd->session_id); + if (!ctx) { + write_dbg(DBG_LEVEL_DEBUG, "No async ctx; pass-through session %d", sd->session_id); + return ngx_http_next_request_body_filter(r, in); + } + + if (sd->verdict != TRAFFIC_VERDICT_INSPECT) { + write_dbg(DBG_LEVEL_DEBUG, "Request already inspected; applying verdict for session %d", sd->session_id); + SAFE_DESTROY_CTX(ctx); + return sd->verdict == TRAFFIC_VERDICT_ACCEPT ? ngx_http_next_request_body_filter(r, in) : NGX_HTTP_FORBIDDEN; + } + + if (ngx_cp_async_ctx_get_flow_error_safe(ctx)) { + write_dbg(DBG_LEVEL_DEBUG, "Flow error detected for session %d", ngx_cp_async_ctx_get_session_id_safe(ctx)); + SAFE_DESTROY_CTX(ctx); + return ngx_http_next_request_body_filter(r, in); + } + + write_dbg(DBG_LEVEL_DEBUG, "Found async context for session %d - processing body", sd->session_id); + + if (!ctx->body_phase_started){ + r->request_body->filter_need_buffering = 1; + r->request_body_no_buffering = 0; + ctx->body_phase_started = 1; + } + + if (ngx_cp_async_ctx_get_released_safe(ctx) && ngx_cp_async_ctx_get_queue_head_safe(ctx)) { + write_dbg(DBG_LEVEL_DEBUG, "Agent released; forwarding queued body for session %d", ngx_cp_async_ctx_get_session_id_safe(ctx)); + ngx_int_t rc = ngx_http_next_request_body_filter(r, ctx->queue_head); + queue_free(r, ctx); + ctx->queue_head = ctx->queue_tail = NULL; + SAFE_DESTROY_CTX(ctx); + return rc; + } + + if (!ctx->req_seq && ctx->queue_head && !in) { + write_dbg(DBG_LEVEL_DEBUG, "Forwarding queued body (no new data) for session %d", ctx->session_id); + ngx_int_t rc = ngx_http_next_request_body_filter(r, ctx->queue_head); + queue_free(r, ctx); + ctx->queue_head = ctx->queue_tail = NULL; + return rc; + } + + if (in) { + write_dbg(DBG_LEVEL_DEBUG, "New body chunk received for session %d", ctx->session_id); + + if (chain_add_copy(r, ctx, in) != NGX_OK) { + write_dbg(DBG_LEVEL_ERROR, "Queue copy failed; session %d", ctx->session_id); + ctx->session_data->async_processing_needed = 0; + queue_free(r, ctx); + ctx->queue_head = ctx->queue_tail = NULL; + SAFE_DESTROY_CTX(ctx); + return NGX_ERROR; + } + + for (ngx_chain_t *cl = in; cl; cl = cl->next) { + ngx_buf_t *b = cl->buf; + if (b == NULL) continue; + + ngx_uint_t nmsgs = 0; + ngx_int_t rc = NGX_OK; + if (ngx_cp_async_send_single_body_chunk_nonblocking(ctx, cl, &nmsgs) != NGX_OK) { + write_dbg(DBG_LEVEL_DEBUG, "IPC send failed; fail-safe pass-through session %d", ctx->session_id); + if (ctx->queue_head) { + rc = ngx_http_next_request_body_filter(r, ctx->queue_head); + ctx->session_data->async_processing_needed = 0; + queue_free(r, ctx); + ctx->queue_head = ctx->queue_tail = NULL; + } + SAFE_DESTROY_CTX(ctx); + return rc; + } + ctx->req_seq += nmsgs; + sd->remaining_messages_to_reply += nmsgs; + + if (b->last_buf) { + if (ngx_cp_async_send_end_transaction_nonblocking(ctx, &nmsgs) != NGX_OK) { + write_dbg(DBG_LEVEL_DEBUG, "IPC send failed; fail-safe pass-through session %d", ctx->session_id); + if (ctx->queue_head) { + rc = ngx_http_next_request_body_filter(r, ctx->queue_head); + queue_free(r, ctx); + ctx->queue_head = ctx->queue_tail = NULL; + } + SAFE_DESTROY_CTX(ctx); + return rc; + } + ctx->req_seen_last = 1; + ctx->req_seq += nmsgs; + sd->remaining_messages_to_reply += nmsgs; + write_dbg(DBG_LEVEL_DEBUG, "Seen last body chunk for session %d", ctx->session_id); + } + } + + write_dbg(DBG_LEVEL_DEBUG, "Queued body chunk and waiting for verdict; session %d", ctx->session_id); + if (ctx->req_seen_last == 1) { + write_dbg(DBG_LEVEL_DEBUG, "Last chunk sent; waiting for release; session %d", ctx->session_id); + ngx_cp_async_start_deadline_timer(ctx, ngx_max(req_max_proccessing_ms_time, async_body_stage_timeout)); + ctx->waiting = 1; + + if (!ctx->request_ref_incremented && r->http_version == NGX_HTTP_VERSION_20) { + r->main->count++; + ctx->request_ref_incremented = 1; + write_dbg(DBG_LEVEL_DEBUG, "Incremented request main reference count for HTTP/2 session %d", ctx->session_id); + } + + return NGX_DONE; + } + return ngx_http_next_request_body_filter(r, NULL); + } + + write_dbg(DBG_LEVEL_DEBUG, "No new input; pass-through session %d", ctx->session_id); + return ngx_http_next_request_body_filter(r, in); +} diff --git a/attachments/nginx/ngx_module/async/ngx_cp_async_body.h b/attachments/nginx/ngx_module/async/ngx_cp_async_body.h new file mode 100755 index 0000000..a729563 --- /dev/null +++ b/attachments/nginx/ngx_module/async/ngx_cp_async_body.h @@ -0,0 +1,30 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// @file ngx_cp_async_body.h +/// +/// Body async filter processing for Check Point Nano Agent NGINX module. +/// + +#ifndef __NGX_CP_ASYNC_BODY_H__ +#define __NGX_CP_ASYNC_BODY_H__ + +#include +#include +#include +#include + +ngx_int_t ngx_http_cp_req_body_filter_async(ngx_http_request_t *r, ngx_chain_t *in); + +#endif // __NGX_CP_ASYNC_BODY_H__ diff --git a/attachments/nginx/ngx_module/async/ngx_cp_async_core.c b/attachments/nginx/ngx_module/async/ngx_cp_async_core.c new file mode 100755 index 0000000..a270b3e --- /dev/null +++ b/attachments/nginx/ngx_module/async/ngx_cp_async_core.c @@ -0,0 +1,2117 @@ +#include "ngx_cp_async_core.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ngx_cp_async_types.h" +#include "ngx_cp_async_ctx_validation.h" +#include "ngx_cp_async_sender.h" +#include "../ngx_cp_hooks.h" +#include "../ngx_cp_utils.h" +#include "../ngx_cp_initializer.h" +#include "../ngx_cp_failing_state.h" +#include "../ngx_cp_metric.h" +#include "../ngx_cp_io.h" +#include "../ngx_cp_static_content.h" +#include "../ngx_http_cp_attachment_module.h" +#include "../ngx_cp_thread.h" + +#define CP_ASYNC_CTX_BUCKETS 2048 ///< Hash table buckets for better distribution + +static ngx_http_cp_async_ctx_t *ctx_buckets[CP_ASYNC_CTX_BUCKETS] = {NULL}; +ngx_uint_t context_count = 0; + +static ngx_uint_t pending_inspection_chunks = 0; +static ngx_event_t backpressure_drain_event; +static ngx_int_t backpressure_event_initialized = 0; + +// Event-driven IPC infrastructure +static ngx_connection_t *ipc_verdict_conn = NULL; // Connection obtained via ngx_get_connection + +// Global epoll instance for backpressure handling (created once, reused) +static int g_backpressure_epoll_fd = -1; +static int g_backpressure_registered_socket = -1; ///< Track which socket is registered + +ngx_int_t is_initialized = 0; +ngx_uint_t async_backpressure_threshold = 10; +ngx_msec_t async_header_timeout_ms = 1000; // Default 1s for headers/meta_data/end_transaction +ngx_msec_t async_body_stage_timeout = 5000; // Default 5s for body stage +ngx_msec_t async_wait_verdict_timeout_ms = 50; // Default 50ms for wait verdict polling +ngx_msec_t async_signal_timeout_ms = 10; // Default 10ms for service signal timeout +ngx_msec_t async_context_cleanup_timeout_ms = 300000; // Default 5 minutes for context cleanup + +// forward declaration +static ngx_int_t ngx_cp_async_handle_wait_verdict(ngx_http_cp_async_ctx_t *ctx, const char *stage_name); +static void ngx_cp_async_verdict_event_handler(ngx_event_t *ev); +static void ngx_cp_async_transition_after_wait(ngx_http_cp_async_ctx_t *ctx); +static void cp_async_posted_resume(ngx_http_request_t *r); +static void cp_async_resume_event_handler(ngx_event_t *ev); +static void cp_async_post_request(ngx_http_cp_async_ctx_t *ctx); +static void ngx_cp_async_context_cleanup_handler(ngx_event_t *ev); +static void ngx_cp_async_backpressure_drain_handler(ngx_event_t *ev); +static void ngx_cp_async_cancel_deadline_timer(ngx_http_cp_async_ctx_t *ctx); +static ngx_int_t drain_ipc_queue(ngx_uint_t *verdicts_drained); +static ngx_int_t is_verdict_final(ServiceVerdict verdict); +static ngx_int_t ngx_cp_async_apply_verdict_for_stage(ngx_http_cp_async_ctx_t *ctx); +static ngx_int_t ngx_cp_async_apply_verdict(ngx_http_cp_async_ctx_t *ctx, HttpReplyFromService *reply_p); +static ssize_t drain_comm_socket_fully(int sock); +static ngx_int_t is_verdict_drop_or_custom(ServiceVerdict verdict); + +/// +/// @brief Increment the global pending inspection chunks counter +/// @param[in] session_id Session ID for logging purposes +/// @param[in] chunk_type Description of chunk type being sent +/// +void +ngx_cp_async_increment_pending_chunks(uint32_t session_id, const char *chunk_type) +{ + pending_inspection_chunks++; + write_dbg(DBG_LEVEL_DEBUG, "ASYNC_CHUNKS: Incremented pending chunks to %d for session %d (%s)", + pending_inspection_chunks, session_id, chunk_type); + + if ( + pending_inspection_chunks >= async_backpressure_threshold && + backpressure_event_initialized && + !backpressure_drain_event.posted && + nano_service_ipc != NULL + ) { + write_dbg( + DBG_LEVEL_DEBUG, + "ASYNC_BACKPRESSURE: Threshold %d reached (%d pending chunks) - posting drain event", + async_backpressure_threshold, + pending_inspection_chunks + ); + ngx_post_event(&backpressure_drain_event, &ngx_posted_events); + } +} + +/// +/// @brief Decrement the global pending inspection chunks counter +/// @param[in] session_id Session ID for logging purposes +/// @param[in] verdict_type Description of verdict type being processed +/// +void +ngx_cp_async_decrement_pending_chunks(uint32_t session_id, const char *verdict_type) +{ + if (pending_inspection_chunks > 0) { + pending_inspection_chunks--; + write_dbg( + DBG_LEVEL_DEBUG, + "ASYNC_CHUNKS: Decremented pending chunks to %d for session %d (%s)", + pending_inspection_chunks, + session_id, + verdict_type + ); + } else { + write_dbg( + DBG_LEVEL_DEBUG, + "ASYNC_CHUNKS: Attempted to decrement pending chunks below zero for session %d (%s)", + session_id, + verdict_type + ); + } +} + +/// +/// @brief Post backpressure drain event if conditions are met +/// +void +ngx_cp_async_post_backpressure_drain_event(void) +{ + if (backpressure_event_initialized && !backpressure_drain_event.posted && nano_service_ipc != NULL) { + ngx_post_event(&backpressure_drain_event, &ngx_posted_events); + } +} + +/// +/// @brief Reset the global pending inspection chunks counter +/// +static inline void +ngx_cp_async_reset_pending_chunks(void) +{ + if (pending_inspection_chunks > 0) { + write_dbg( + DBG_LEVEL_INFO, + "ASYNC_CHUNKS: Resetting pending chunks from %d to 0 (IPC disconnect/reset)", + pending_inspection_chunks + ); + + if (backpressure_event_initialized && backpressure_drain_event.posted) { + ngx_delete_posted_event(&backpressure_drain_event); + write_dbg(DBG_LEVEL_INFO, "ASYNC_BACKPRESSURE: Cancelled pending drain event due to reset"); + } + } + pending_inspection_chunks = 0; +} + +// Simplified macro for stage transitions +#define ASYNC_STAGE_TRANSITION(ctx, new_stage) do { \ + if ((new_stage) == NGX_CP_ASYNC_STAGE_COMPLETE) { \ + if (ctx->deadline_event.timer_set) { \ + ngx_del_timer(&ctx->deadline_event); \ + write_dbg(DBG_LEVEL_DEBUG, "Auto-cancelled deadline timer during transition to COMPLETE for session %d", ctx->session_id); \ + } \ + if (ctx->cleanup_event.timer_set) { \ + ngx_del_timer(&ctx->cleanup_event); \ + write_dbg(DBG_LEVEL_DEBUG, "Auto-cancelled cleanup timer during transition to COMPLETE for session %d", ctx->session_id); \ + } \ + } \ + ctx->stage = new_stage; \ +} while(0) + +/// +/// @brief Hash function for session IDs using Knuth's multiplicative method +/// @param[in] session_id Session ID to hash +/// @return Hash bucket index +/// +static ngx_uint_t +cp_async_ctx_hash(uint32_t session_id) +{ + // Knuth's multiplicative hash with golden ratio constant + static const uint32_t KNUTH_CONSTANT = 2654435761U; // (sqrt(5) - 1) / 2 * 2^32 + return (session_id * KNUTH_CONSTANT) % CP_ASYNC_CTX_BUCKETS; +} + +/// +/// @brief Initialize async timeout configuration from environment variables +/// +static void ngx_cp_async_init_timeout_config(void) { + char *env_value; + + env_value = getenv("CP_ASYNC_HEADER_TIMEOUT_MS"); + if (env_value != NULL) { + ngx_uint_t timeout = (ngx_uint_t)atoi(env_value); + if (timeout >= 100 && timeout <= 30000) { // 100ms to 30s range + async_header_timeout_ms = (ngx_msec_t)timeout; + write_dbg(DBG_LEVEL_INFO, "Async header timeout set to %dms from environment", async_header_timeout_ms); + } else { + write_dbg(DBG_LEVEL_WARNING, "Invalid async header timeout %d from environment, using default %dms", timeout, async_header_timeout_ms); + } + } + + env_value = getenv("CP_ASYNC_WAIT_VERDICT_TIMEOUT_MS"); + if (env_value != NULL) { + ngx_uint_t timeout = (ngx_uint_t)atoi(env_value); + if (timeout >= 50 && timeout <= 10000) { // 50ms to 10s range + async_wait_verdict_timeout_ms = (ngx_msec_t)timeout; + write_dbg(DBG_LEVEL_INFO, "Async wait verdict timeout set to %dms from environment", async_wait_verdict_timeout_ms); + } else { + write_dbg(DBG_LEVEL_WARNING, "Invalid async wait verdict timeout %d from environment, using default %dms", timeout, async_wait_verdict_timeout_ms); + } + } + + env_value = getenv("CP_ASYNC_BODY_STAGE_TIMEOUT_MS"); + if (env_value != NULL) { + ngx_uint_t timeout = (ngx_uint_t)atoi(env_value); + if (timeout >= 1000 && timeout <= 600000) { // 1s to 10min range + async_body_stage_timeout = (ngx_msec_t)timeout; + write_dbg(DBG_LEVEL_INFO, "Async body stage timeout set to %dms from environment", async_body_stage_timeout); + } else { + write_dbg(DBG_LEVEL_WARNING, "Invalid async body stage timeout %d from environment, using default %dms", timeout, async_body_stage_timeout); + } + } + + env_value = getenv("CP_ASYNC_CONTEXT_CLEANUP_TIMEOUT_MS"); + if (env_value != NULL) { + ngx_uint_t timeout = (ngx_uint_t)atoi(env_value); + if (timeout >= 10000 && timeout <= 300000) { + async_context_cleanup_timeout_ms = (ngx_msec_t)timeout; + write_dbg(DBG_LEVEL_INFO, "Async context cleanup timeout set to %dms from environment", async_context_cleanup_timeout_ms); + } else { + write_dbg(DBG_LEVEL_WARNING, "Invalid async context cleanup timeout %d from environment, using default %dms", timeout, async_context_cleanup_timeout_ms); + } + } + + env_value = getenv("CP_ASYNC_SIGNAL_TIMEOUT_MS"); + if (env_value != NULL) { + ngx_uint_t timeout = (ngx_uint_t)atoi(env_value); + if (timeout >= 1 && timeout <= 1000) { // 1ms to 1s range + async_signal_timeout_ms = (ngx_msec_t)timeout; + write_dbg(DBG_LEVEL_INFO, "Async signal timeout set to %dms from environment", async_signal_timeout_ms); + } else { + write_dbg(DBG_LEVEL_WARNING, "Invalid async signal timeout %d from environment, using default %dms", timeout, async_signal_timeout_ms); + } + } + + write_dbg( + DBG_LEVEL_INFO, + "Async timeout config: header=%dms, wait_verdict=%dms, first_wait_verdict=%dms, signal=%dms, context_cleanup=%dms", + async_header_timeout_ms, + async_wait_verdict_timeout_ms, + req_max_proccessing_ms_time, + async_signal_timeout_ms, + async_context_cleanup_timeout_ms + ); +} + +ngx_int_t +ngx_cp_async_setup_verdict_event_handler(void) +{ + if (comm_socket < 0) { + write_dbg(DBG_LEVEL_ERROR, "Cannot set up verdict event handler - comm_socket still not available after re-initialization"); + return NGX_ERROR; + } + + // If there is a stale managed connection, free it before recreating + if (ipc_verdict_conn) { + write_dbg(DBG_LEVEL_INFO, "Cleaning up stale verdict event handler (fd: %d)", ipc_verdict_conn->fd); + if (ipc_verdict_conn->read && ipc_verdict_conn->read->active) { + ngx_del_event(ipc_verdict_conn->read, NGX_READ_EVENT, 0); + } + ngx_free_connection(ipc_verdict_conn); + ipc_verdict_conn = NULL; + } + + write_dbg(DBG_LEVEL_DEBUG, "Setting up verdict event handler for comm_socket=%d", comm_socket); + + if (ngx_nonblocking(comm_socket) != NGX_OK) { + write_dbg(DBG_LEVEL_WARNING, "Failed to set comm_socket nonblocking"); + return NGX_ERROR; + } + + ngx_connection_t *c = ngx_get_connection(comm_socket, ngx_cycle ? ngx_cycle->log : NULL); + if (c == NULL) { + write_dbg(DBG_LEVEL_ERROR, "Failed to get NGINX connection for verdict socket %d", comm_socket); + return NGX_ERROR; + } + + // Initialize the read event on this connection + ngx_event_t *rev = c->read; + rev->handler = ngx_cp_async_verdict_event_handler; + rev->data = c; + rev->log = ngx_cycle ? ngx_cycle->log : NULL; + + if (ngx_add_event(rev, NGX_READ_EVENT, 0) != NGX_OK) { + write_dbg(DBG_LEVEL_ERROR, "Failed to add verdict event to NGINX event system"); + ngx_free_connection(c); + return NGX_ERROR; + } + + ipc_verdict_conn = c; + write_dbg(DBG_LEVEL_INFO, "Added verdict notification socket %d to NGINX event system", comm_socket); + return NGX_OK; +} + +void +disable_ipc_verdict_event_handler() +{ + if (ipc_verdict_conn) { + if (ipc_verdict_conn->read && ipc_verdict_conn->read->active) { + ngx_del_event(ipc_verdict_conn->read, NGX_READ_EVENT, 0); + write_dbg(DBG_LEVEL_INFO, "Disabled verdict event handler for socket %d", ipc_verdict_conn->fd); + } + ngx_free_connection(ipc_verdict_conn); + ipc_verdict_conn = NULL; + } + ngx_cp_async_reset_pending_chunks(); +} + +void +enable_ipc_verdict_event_handler() +{ + ngx_cp_async_setup_verdict_event_handler(); + ngx_cp_async_reset_pending_chunks(); +} + +ngx_int_t +ngx_cp_async_init() +{ + if (is_initialized) { + write_dbg(DBG_LEVEL_INFO, "Async system already initialized - skipping re-initialization"); + return NGX_OK; + } + is_initialized = 1; + ngx_uint_t i; + + write_dbg(DBG_LEVEL_INFO, "Initializing event-driven async system"); + + ngx_cp_async_init_timeout_config(); + + for (i = 0; i < CP_ASYNC_CTX_BUCKETS; i++) { + ctx_buckets[i] = NULL; + } + context_count = 0; + + ngx_cp_async_reset_pending_chunks(); + + // Initialize backpressure drain event + if (!backpressure_event_initialized) { + ngx_memzero(&backpressure_drain_event, sizeof(ngx_event_t)); + backpressure_drain_event.handler = ngx_cp_async_backpressure_drain_handler; + backpressure_drain_event.data = NULL; + backpressure_drain_event.log = NULL; + backpressure_event_initialized = 1; + write_dbg(DBG_LEVEL_INFO, "ASYNC_BACKPRESSURE: Initialized drain event with threshold %d", async_backpressure_threshold); + } + + // Initialize global epoll instance for backpressure handling (created once, reused) + if (g_backpressure_epoll_fd < 0) { + g_backpressure_epoll_fd = epoll_create1(EPOLL_CLOEXEC); + if (g_backpressure_epoll_fd < 0) { + write_dbg(DBG_LEVEL_WARNING, "ASYNC_BACKPRESSURE: Failed to create global epoll fd: %s", strerror(errno)); + } else { + write_dbg(DBG_LEVEL_INFO, "ASYNC_BACKPRESSURE: Created global epoll fd %d", g_backpressure_epoll_fd); + } + g_backpressure_registered_socket = -1; + } + + // Ensure no stale managed connection remains from previous cycles + if (ipc_verdict_conn) { + if (ipc_verdict_conn->read && ipc_verdict_conn->read->active) { + ngx_del_event(ipc_verdict_conn->read, NGX_READ_EVENT, 0); + } + ngx_free_connection(ipc_verdict_conn); + ipc_verdict_conn = NULL; + } + + write_dbg(DBG_LEVEL_DEBUG, "ASYNC INIT: comm_socket=%d during initialization", comm_socket); + if (comm_socket >= 0) { + write_dbg(DBG_LEVEL_DEBUG, "Setting up comm socket %d for async notifications during init", comm_socket); + if (ngx_cp_async_setup_verdict_event_handler() == NGX_OK) { + write_dbg(DBG_LEVEL_DEBUG, "ASYNC INIT SUCCESS: Verdict event handler set up during initialization"); + } else { + write_dbg(DBG_LEVEL_WARNING, "ASYNC INIT WARNING: Failed to set up verdict event handler during initialization"); + } + } else { + write_dbg(DBG_LEVEL_WARNING, "Communication socket not available during async init - will set up later"); + } + + write_dbg(DBG_LEVEL_INFO, "Event-driven async system initialized successfully"); + return NGX_OK; +} + +void +ngx_cp_async_cleanup() +{ + ngx_uint_t i; + ngx_http_cp_async_ctx_t *ctx, *next; + + write_dbg(DBG_LEVEL_DEBUG, "Cleaning up event-driven async system"); + + if (ipc_verdict_conn) { + if (ipc_verdict_conn->read && ipc_verdict_conn->read->active) { + ngx_del_event(ipc_verdict_conn->read, NGX_READ_EVENT, 0); + } + ngx_free_connection(ipc_verdict_conn); + ipc_verdict_conn = NULL; + write_dbg(DBG_LEVEL_DEBUG, "Removed verdict event/connection from event system"); + } + + for (i = 0; i < CP_ASYNC_CTX_BUCKETS; i++) { + ctx = ctx_buckets[i]; + while (ctx != NULL) { + next = ctx->map_next; + + if (ngx_cp_async_ctx_is_valid(ctx)) { + write_dbg( + DBG_LEVEL_INFO, + "Releasing pending connection for session %d in stage %d during cleanup", + ctx->session_id, + ctx->stage + ); + + ctx->session_data->verdict = fail_mode_verdict == NGX_OK ? TRAFFIC_VERDICT_ACCEPT : TRAFFIC_VERDICT_DROP; + ctx->session_data->remaining_messages_to_reply = 0; + ctx->session_data->async_processing_needed = 0; + ctx->header_declined = 1; + ctx->req_seq = 0; + + if (ngx_cp_async_ctx_get_stage_safe(ctx) != NGX_CP_ASYNC_STAGE_BODY) { + ctx->flow_error = 1; + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_COMPLETE); + } + + write_dbg( + DBG_LEVEL_INFO, + "Releasing session %d with verdict %d (fail_mode=%s)", + ctx->session_id, + ctx->session_data->verdict, + fail_mode_verdict == NGX_OK ? "open" : "closed" + ); + + ngx_cp_async_event_handler(&ctx->agent_event); + ctx->flow_error = 1; + } else { + write_dbg(DBG_LEVEL_DEBUG, "Cleaning up async context for session %d", ctx->session_id); + SAFE_DESTROY_CTX(ctx); + } + ctx = next; + } + } + context_count = 0; + + ngx_cp_async_reset_pending_chunks(); + + if (backpressure_event_initialized) { + if (backpressure_drain_event.posted) { + ngx_delete_posted_event(&backpressure_drain_event); + } + backpressure_event_initialized = 0; + write_dbg(DBG_LEVEL_DEBUG, "ASYNC_BACKPRESSURE: Cleaned up drain event"); + } + + // Clean up global epoll instance + if (g_backpressure_epoll_fd >= 0) { + close(g_backpressure_epoll_fd); + g_backpressure_epoll_fd = -1; + g_backpressure_registered_socket = -1; + write_dbg(DBG_LEVEL_DEBUG, "ASYNC_BACKPRESSURE: Closed global epoll fd"); + } + + is_initialized = 0; + write_dbg(DBG_LEVEL_DEBUG, "Event-driven async system cleanup complete"); +} + +ngx_http_cp_async_ctx_t * +ngx_cp_async_create_ctx(ngx_http_request_t *request, ngx_http_cp_session_data *session_data) +{ + ngx_http_cp_async_ctx_t *ctx; + ngx_pool_cleanup_t *cln; + + if (request == NULL || session_data == NULL) { + write_dbg(DBG_LEVEL_WARNING, "Invalid parameters for async context creation"); + return NULL; + } + + write_dbg(DBG_LEVEL_DEBUG, "Creating simplified async context for session %d", session_data->session_id); + + ctx = ngx_pcalloc(request->pool, sizeof(ngx_http_cp_async_ctx_t)); + if (ctx == NULL) { + write_dbg(DBG_LEVEL_WARNING, "Failed to allocate async context"); + return NULL; + } + + // Initialize context with minimal fields + ctx->request = request; + ctx->session_data = session_data; + ctx->session_id = session_data->session_id; + ctx->stage = NGX_CP_ASYNC_STAGE_INIT; + ctx->modifications = NULL; + ctx->map_next = NULL; // Initialize hash chain pointer + + // Initialize unbuffered body processing fields + ctx->req_seen_last = 0; + ctx->req_seq = 0; + ctx->waiting = 0; + ctx->released = 0; + ctx->body_phase_started = 0; + ctx->queue_head = NULL; + ctx->queue_tail = NULL; + + ctx->meta_data_sent = 0; + ctx->headers_sent = 0; + ctx->end_transaction_sent = 0; + ctx->header_declined = 0; + + ctx->waf_tag.data = NULL; + ctx->waf_tag.len = 0; + ctx->first_wait_verdict_encountered = 0; + ctx->flow_error = 0; + ctx->request_ref_incremented = 0; + + ngx_memzero(&ctx->agent_event, sizeof(ngx_event_t)); + ctx->agent_event.handler = ngx_cp_async_event_handler; + ctx->agent_event.data = ctx; + ctx->agent_event.log = request->connection->log; + + ngx_memzero(&ctx->cleanup_event, sizeof(ngx_event_t)); + ctx->cleanup_event.handler = ngx_cp_async_context_cleanup_handler; + ctx->cleanup_event.data = ctx; + ctx->cleanup_event.log = request->connection->log; + + ngx_memzero(&ctx->resume_event, sizeof(ngx_event_t)); + ctx->resume_event.handler = cp_async_resume_event_handler; + ctx->resume_event.data = ctx; + ctx->resume_event.log = request->connection->log; + + cln = ngx_pool_cleanup_add(request->pool, 0); + if (cln == NULL) { + write_dbg(DBG_LEVEL_WARNING, "Failed to add cleanup handler for async context"); + return NULL; + } + + cln->handler = (ngx_pool_cleanup_pt) ngx_cp_async_destroy_ctx; + cln->data = ctx; + + clock_gettime(CLOCK_REALTIME, &ctx->start_time); + + ngx_add_timer(&ctx->cleanup_event, async_context_cleanup_timeout_ms); + + write_dbg(DBG_LEVEL_DEBUG, "Simplified async context created successfully for session %d with %dms cleanup timeout", + session_data->session_id, async_context_cleanup_timeout_ms); + + write_dbg(DBG_LEVEL_DEBUG, "Context cleanup timer started for session %d - will trigger in %dms", + session_data->session_id, async_context_cleanup_timeout_ms); + + return ctx; +} + +void +ngx_cp_async_destroy_ctx(ngx_http_cp_async_ctx_t *ctx) +{ + uint32_t session_id; + + if (ctx == NULL) { + return; + } + + session_id = ctx->session_id; + + write_dbg(DBG_LEVEL_DEBUG, "Destroying async context for session %d", session_id); + + // Nullify all event data references first + ngx_cp_async_nullify_ctx_refs(ctx); + + if (ctx->agent_event.timer_set) { + ngx_del_timer(&ctx->agent_event); + } + + if (ctx->cleanup_event.timer_set) { + ngx_del_timer(&ctx->cleanup_event); + } + if (ctx->resume_event.timer_set) { + ngx_del_timer(&ctx->resume_event); + } + if (ctx->agent_event.posted) { + ngx_delete_posted_event(&ctx->agent_event); + } + if (ctx->cleanup_event.posted) { + ngx_delete_posted_event(&ctx->cleanup_event); + } + if (ctx->resume_event.posted) { + ngx_delete_posted_event(&ctx->resume_event); + } + + ngx_cp_async_cancel_deadline_timer(ctx); + ngx_cp_async_remove_ctx(ctx); + + write_dbg(DBG_LEVEL_DEBUG, "Simplified async context destroyed for session %d", session_id); +} + +ngx_http_cp_async_ctx_t * +ngx_cp_async_find_ctx(uint32_t session_id) +{ + ngx_uint_t bucket = cp_async_ctx_hash(session_id); + ngx_http_cp_async_ctx_t *ctx = ctx_buckets[bucket]; + + while (ctx != NULL) { + if (ctx->session_id == session_id) { + return ctx; + } + ctx = ctx->map_next; + } + + return NULL; +} + +ngx_int_t +ngx_cp_async_add_ctx(ngx_http_cp_async_ctx_t *ctx) +{ + ngx_uint_t bucket; + + if (ctx == NULL) { + return NGX_ERROR; + } + + bucket = cp_async_ctx_hash(ctx->session_id); + + ctx->map_next = ctx_buckets[bucket]; + + ctx_buckets[bucket] = ctx; + context_count++; + + write_dbg(DBG_LEVEL_DEBUG, "Added async context to hash bucket %d for session %d", bucket, ctx->session_id); + return NGX_OK; +} + +void +ngx_cp_async_remove_ctx(ngx_http_cp_async_ctx_t *ctx) +{ + ngx_uint_t bucket; + ngx_http_cp_async_ctx_t *current, *prev; + + if (ctx == NULL) { + return; + } + + bucket = cp_async_ctx_hash(ctx->session_id); + current = ctx_buckets[bucket]; + prev = NULL; + + while (current != NULL) { + if (current == ctx) { + if (prev == NULL) { + ctx_buckets[bucket] = current->map_next; + } else { + prev->map_next = current->map_next; + } + context_count--; + write_dbg(DBG_LEVEL_DEBUG, "Removed async context from hash bucket %d for session %d", bucket, ctx->session_id); + return; + } + prev = current; + current = current->map_next; + } +} + +/// +/// @brief Generic wait verdict handler for all wait stages +/// @param[in] ctx Async context +/// @param[in] stage_name Stage name for logging +/// @return NGX_OK, NGX_AGAIN, NGX_HTTP_FORBIDDEN, or NGX_ERROR +/// +static ngx_int_t +ngx_cp_async_handle_wait_verdict(ngx_http_cp_async_ctx_t *ctx, const char *stage_name) +{ + ngx_uint_t num_messages_sent = 0; + ngx_int_t rc; + uint32_t session_id; + ngx_http_cp_session_data *session_data; + + session_id = ngx_cp_async_ctx_get_session_id_safe(ctx); + if (session_id == 0) { + write_dbg(DBG_LEVEL_ERROR, "Handle wait verdict: invalid session ID for %s", stage_name); + return NGX_ERROR; + } + + // Check for flow error before processing + if (ngx_cp_async_ctx_get_flow_error_safe(ctx)) { + write_dbg(DBG_LEVEL_ERROR, "Flow error flag set, skipping wait verdict for %s", stage_name); + return NGX_ERROR; + } + + session_data = ngx_cp_async_ctx_get_session_data_safe(ctx); + if (session_data == NULL) { + write_dbg(DBG_LEVEL_ERROR, "Handle wait verdict: invalid session data for %s session %d", stage_name, session_id); + return NGX_ERROR; + } + + write_dbg( + DBG_LEVEL_DEBUG, + "Polling for %s verdict update for session %d", + stage_name, + session_id + ); + + if (session_data->verdict != TRAFFIC_VERDICT_DELAYED) { + write_dbg( + DBG_LEVEL_DEBUG, + "%s verdict resolved to %d for session %d - processing immediately", + stage_name, + session_data->verdict, + session_id + ); + + ngx_cp_async_transition_after_wait(ctx); + ngx_cp_async_event_handler(&ctx->agent_event); + return NGX_AGAIN; + } + + write_dbg( + DBG_LEVEL_DEBUG, + "%s verdict still WAIT for session %d - continuing to wait", + stage_name, + session_id + ); + + rc = ngx_cp_async_wait_signal_sender(ctx, &num_messages_sent); + if (rc != NGX_OK && rc != NGX_HTTP_REQUEST_TIME_OUT) { + write_dbg(DBG_LEVEL_WARNING, "Failed to send %s wait signal for session %d", stage_name, session_id); + ctx->flow_error = 1; + session_data->verdict = fail_mode_hold_verdict == NGX_OK ? TRAFFIC_VERDICT_ACCEPT : TRAFFIC_VERDICT_DROP; + updateMetricField(HOLD_THREAD_TIMEOUT, 1); + session_data->async_processing_needed = 0; + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_COMPLETE); + ngx_cp_async_event_handler(&ctx->agent_event); + return NGX_DECLINED; + } + + ngx_add_timer(&ctx->agent_event, async_wait_verdict_timeout_ms); + + write_dbg( + DBG_LEVEL_DEBUG, + "Scheduled next %s verdict check in %dms for session %d", + stage_name, + async_wait_verdict_timeout_ms, + session_id + ); + + return NGX_AGAIN; +} + +/// +/// @brief Helper function to transition to next stage after wait verdict resolves +/// @param[in] ctx Async context +/// +static void +ngx_cp_async_transition_after_wait(ngx_http_cp_async_ctx_t *ctx) +{ + uint32_t session_id; + ngx_http_cp_session_data *session_data; + ngx_cp_async_stage_t stage; + + session_id = ngx_cp_async_ctx_get_session_id_safe(ctx); + if (session_id == 0) { + write_dbg(DBG_LEVEL_ERROR, "Transition after wait: invalid session ID"); + return; + } + + if (ngx_cp_async_ctx_get_flow_error_safe(ctx)) { + write_dbg(DBG_LEVEL_ERROR, "Flow error flag set, skipping transition after wait"); + return; + } + + session_data = ngx_cp_async_ctx_get_session_data_safe(ctx); + if (session_data == NULL) { + write_dbg(DBG_LEVEL_ERROR, "Transition after wait: invalid session data for session %d", session_id); + return; + } + + if (session_data->verdict == TRAFFIC_VERDICT_DELAYED) { + write_dbg(DBG_LEVEL_DEBUG, "Still in WAIT verdict - no stage transition for session %d", session_id); + return; + } + + stage = ngx_cp_async_ctx_get_stage_safe(ctx); + switch (stage) { + case NGX_CP_ASYNC_STAGE_WAIT_HEADER_VERDICT: + if (is_verdict_drop_or_custom(session_data->verdict)) { + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_COMPLETE); + } else { + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_END_TRANSACTION); + } + break; + + case NGX_CP_ASYNC_STAGE_WAIT_END_VERDICT: + if (ctx->body_phase_started) { + write_dbg(DBG_LEVEL_DEBUG, "Transitioning to BODY stage for session %d", session_id); + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_BODY); + } else { + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_COMPLETE); + } + break; + + case NGX_CP_ASYNC_STAGE_WAIT_BODY_VERDICT: + if (is_verdict_drop_or_custom(session_data->verdict)) { + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_COMPLETE); + } else { + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_BODY); + } + break; + default: + write_dbg(DBG_LEVEL_WARNING, "Unknown wait stage %d for session %d", stage, session_id); + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_COMPLETE); + break; + } + + write_dbg( + DBG_LEVEL_DEBUG, + "Transitioned to stage %d after wait verdict for session %d", + ngx_cp_async_ctx_get_stage_safe(ctx), + session_id + ); +} + +void +ngx_cp_async_event_handler(ngx_event_t *ev) +{ + ngx_http_cp_async_ctx_t *ctx; + ngx_int_t rc; + uint32_t session_id; + + ctx = (ngx_http_cp_async_ctx_t *) ev->data; + + // Validate context before any access + if (!ngx_cp_async_ctx_is_valid(ctx)) { + write_dbg(DBG_LEVEL_WARNING, "Event handler called with invalid/destroyed context - ignoring"); + return; + } + + session_id = ngx_cp_async_ctx_get_session_id_safe(ctx); + if (session_id == 0) { + write_dbg(DBG_LEVEL_WARNING, "Event handler: invalid session ID - ignoring"); + return; + } + + write_dbg( + DBG_LEVEL_DEBUG, + "=== EVENT HANDLER: SESSION %u, STAGE %d (load: %d) ===", + session_id, + (int)ngx_cp_async_ctx_get_stage_safe(ctx), + context_count + ); + + set_current_session_id(session_id); + rc = ngx_cp_async_continue_processing(ctx); + write_dbg(DBG_LEVEL_DEBUG, "Continue processing returned: %d for session %d", rc, session_id); + + if (rc != NGX_AGAIN) { + write_dbg(DBG_LEVEL_DEBUG, "Async processing complete for session %d with rc: %d - finalizing request", session_id, rc); + + ngx_http_request_t *request = ngx_cp_async_ctx_get_request_safe(ctx); + if (request == NULL) { + write_dbg(DBG_LEVEL_DEBUG, "Event handler: invalid request for session %d - cannot finalize", session_id); + return; + } + + ngx_http_cp_session_data *session_data = ngx_cp_async_ctx_get_session_data_safe(ctx); + if (session_data == NULL) { + write_dbg(DBG_LEVEL_WARNING, "Event handler: invalid session data for session %d - cannot finalize", session_id); + return; + } + + if (is_verdict_drop_or_custom(session_data->verdict)) { + write_dbg( + DBG_LEVEL_DEBUG, + "Request BLOCKED - finalizing with status %d for session %d", + NGX_HTTP_FORBIDDEN, + session_id + ); + request->keepalive = 0; + SAFE_DESTROY_CTX(ctx); + ctx = NULL; + ngx_http_cp_finalize_rejected_request(request, 0); + } else if (rc == NGX_DECLINED) { + ngx_cp_async_cancel_deadline_timer(ctx); + ngx_int_t is_waiting = ctx->waiting; + if (ngx_cp_async_ctx_get_stage_safe(ctx) == NGX_CP_ASYNC_STAGE_COMPLETE) { + write_dbg(DBG_LEVEL_DEBUG, "Cleaning up async context for session %d", session_id); + SAFE_DESTROY_CTX(ctx); + ctx = NULL; + } + write_dbg(DBG_LEVEL_DEBUG, "Resuming session %d", session_id); + if (is_waiting) { + if (ctx) { + ctx->waiting = 0; + } + + ngx_http_core_run_phases(request); + } + return; + } + } else { + write_dbg(DBG_LEVEL_DEBUG, "Processing yielded - waiting for next event for session %d", session_id); + } + + write_dbg(DBG_LEVEL_DEBUG, "=== EVENT HANDLER COMPLETE FOR SESSION %d ===", session_id); +} + +/// +/// @brief Deadline timeout handler - triggers fail-safe when stage deadline is exceeded +/// @param[in] ev Deadline timeout event +/// +static void +ngx_cp_async_deadline_handler(ngx_event_t *ev) +{ + ngx_http_cp_async_ctx_t *ctx; + const char *stage_name; + ngx_int_t verdict_to_apply; + ngx_int_t fail_mode_to_use; + uint32_t session_id; + ngx_cp_async_stage_t stage; + + ctx = (ngx_http_cp_async_ctx_t *) ev->data; + + if (!ngx_cp_async_ctx_is_valid(ctx)) { + write_dbg(DBG_LEVEL_WARNING, "Deadline timer fired for invalid/destroyed context - ignoring stale timer"); + return; + } + + session_id = ngx_cp_async_ctx_get_session_id_safe(ctx); + stage = ngx_cp_async_ctx_get_stage_safe(ctx); + + if (stage == NGX_CP_ASYNC_STAGE_COMPLETE) { + write_dbg(DBG_LEVEL_WARNING, "Deadline timer fired for session %d already in COMPLETE stage - ignoring stale timer", session_id); + return; + } + + if (ngx_cp_async_ctx_get_flow_error_safe(ctx)) { + write_dbg(DBG_LEVEL_WARNING, "Deadline timer fired for session %d with flow error flag set - ignoring", session_id); + return; + } + + switch (stage) { + case NGX_CP_ASYNC_STAGE_META_DATA: + stage_name = "META_DATA"; + fail_mode_to_use = fail_mode_verdict; + break; + case NGX_CP_ASYNC_STAGE_HEADERS: + case NGX_CP_ASYNC_STAGE_WAIT_HEADER_VERDICT: + stage_name = "HEADERS"; + fail_mode_to_use = (stage == NGX_CP_ASYNC_STAGE_WAIT_HEADER_VERDICT) ? + fail_mode_hold_verdict + : fail_mode_verdict; + break; + case NGX_CP_ASYNC_STAGE_END_TRANSACTION: + case NGX_CP_ASYNC_STAGE_WAIT_END_VERDICT: + stage_name = "END_TRANSACTION"; + fail_mode_to_use = (stage == NGX_CP_ASYNC_STAGE_WAIT_END_VERDICT) ? + fail_mode_hold_verdict + : fail_mode_verdict; + break; + case NGX_CP_ASYNC_STAGE_BODY: + stage_name = "BODY"; + fail_mode_to_use = fail_mode_verdict; + break; + case NGX_CP_ASYNC_STAGE_WAIT_BODY_VERDICT: + stage_name = "BODY"; + fail_mode_to_use = fail_mode_hold_verdict; + break; + default: + stage_name = "UNKNOWN"; + fail_mode_to_use = fail_mode_verdict; + break; + } + + verdict_to_apply = fail_mode_to_use == NGX_OK ? TRAFFIC_VERDICT_ACCEPT : TRAFFIC_VERDICT_DROP; + + write_dbg( + DBG_LEVEL_WARNING, + "DEADLINE EXCEEDED: %s stage timeout for session %d - applying fail-safe verdict %d (fail_mode=%s)", + stage_name, + session_id, + verdict_to_apply, + (fail_mode_to_use == fail_mode_hold_verdict) ? "hold" : "regular" + ); + + ngx_http_cp_session_data *session_data = ngx_cp_async_ctx_get_session_data_safe(ctx); + if (session_data != NULL) { + session_data->verdict = verdict_to_apply; + session_data->remaining_messages_to_reply = 0; + session_data->async_processing_needed = 0; + } + + ctx->header_declined = 1; + updateMetricField(REQ_HEADER_THREAD_TIMEOUT, 1); + + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_COMPLETE); + write_dbg( + DBG_LEVEL_DEBUG, + "Deadline timeout: forcing completion for session %d with fail-safe verdict %d", + session_id, + verdict_to_apply + ); + + ngx_cp_async_event_handler(&ctx->agent_event); +} + +/// +/// @brief Start deadline timer for current stage with specified timeout +/// @param[in] ctx Async context +/// @param[in] timeout_ms Timeout in milliseconds +/// @return NGX_OK on success, NGX_ERROR on failure +/// +ngx_int_t +ngx_cp_async_start_deadline_timer(ngx_http_cp_async_ctx_t *ctx, ngx_msec_t timeout_ms) +{ + if (ctx->deadline_event.timer_set) { + ngx_del_timer(&ctx->deadline_event); + } + + if (ctx->deadline_event.handler == NULL) { + ctx->deadline_event.handler = ngx_cp_async_deadline_handler; + ctx->deadline_event.data = ctx; + ctx->deadline_event.log = ctx->request->connection->log; + } + + ngx_add_timer(&ctx->deadline_event, timeout_ms); + + write_dbg( + DBG_LEVEL_DEBUG, "Started deadline timer: %dms for session %d stage %d", + timeout_ms, + ctx->session_id, ctx->stage + ); + + return NGX_OK; +} + +/// +/// @brief Cancel deadline timer +/// @param[in] ctx Async context +/// +static void +ngx_cp_async_cancel_deadline_timer(ngx_http_cp_async_ctx_t *ctx) +{ + if (ctx->deadline_event.timer_set) { + ngx_del_timer(&ctx->deadline_event); + write_dbg(DBG_LEVEL_DEBUG, "Cancelled deadline timer for session %d", ctx->session_id); + } +} + +/// +/// @brief Context cleanup timeout handler - automatically destroys stale contexts +/// @details Automatically destroys contexts that remain active beyond the cleanup timeout +/// to prevent memory leaks and dangling pointers +/// @param[in] ev Cleanup timeout event +/// +static void +ngx_cp_async_context_cleanup_handler(ngx_event_t *ev) +{ + ngx_http_cp_async_ctx_t *ctx; + struct timespec current_time; + ngx_uint_t age_seconds; + uint32_t session_id; + ngx_cp_async_stage_t stage; + + ctx = (ngx_http_cp_async_ctx_t *) ev->data; + + if (!ngx_cp_async_ctx_is_valid(ctx)) { + write_dbg(DBG_LEVEL_WARNING, "Context cleanup timer fired for invalid/destroyed context - ignoring stale timer"); + return; + } + + session_id = ngx_cp_async_ctx_get_session_id_safe(ctx); + stage = ngx_cp_async_ctx_get_stage_safe(ctx); + + if (stage == NGX_CP_ASYNC_STAGE_COMPLETE) { + write_dbg(DBG_LEVEL_DEBUG, "Context cleanup timer fired for session %d already in COMPLETE stage - allowing normal cleanup", session_id); + return; + } + + clock_gettime(CLOCK_REALTIME, ¤t_time); + age_seconds = (ngx_uint_t)(current_time.tv_sec - ctx->start_time.tv_sec); + + write_dbg( + DBG_LEVEL_WARNING, + "CONTEXT CLEANUP TIMEOUT: Session %d has been active for %d seconds (stage: %d) - forcing cleanup to prevent memory leak", + session_id, + age_seconds, + stage + ); + + ctx->flow_error = 1; + + ngx_http_cp_session_data *session_data = ngx_cp_async_ctx_get_session_data_safe(ctx); + if (session_data != NULL) { + session_data->verdict = fail_mode_verdict == NGX_OK ? TRAFFIC_VERDICT_ACCEPT : TRAFFIC_VERDICT_DROP; + session_data->remaining_messages_to_reply = 0; + session_data->async_processing_needed = 0; + } + + ctx->header_declined = 1; + + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_COMPLETE); + + write_dbg( + DBG_LEVEL_WARNING, + "Context cleanup timeout: forcing completion for session %d after %d seconds with fail-safe verdict %d", + session_id, + age_seconds, + session_data ? (int)session_data->verdict : -1 + ); + + // Trigger final cleanup through event handler + ngx_cp_async_event_handler(&ctx->agent_event); +} + +static void +ngx_cp_async_verdict_event_handler(ngx_event_t *ev) +{ + ngx_uint_t verdicts_processed = 0; + ssize_t socket_bytes_drained; + (void) ev; // Unused parameter + + if (!is_async_mode_enabled) { + write_dbg(DBG_LEVEL_INFO, "VERDICT_EVENT: Async mode disabled - cleaning up verdict event handler"); + if (ipc_verdict_conn) { + write_dbg(DBG_LEVEL_INFO, "Cleaning up stale verdict event handler (fd: %d)", ipc_verdict_conn->fd); + if (ipc_verdict_conn->read && ipc_verdict_conn->read->active) { + ngx_del_event(ipc_verdict_conn->read, NGX_READ_EVENT, 0); + } + ngx_free_connection(ipc_verdict_conn); + ipc_verdict_conn = NULL; + } + return; + } + + // Fully drain the socket (EPOLLET-correct: must drain until EAGAIN) + // Socket is just a "doorbell" - actual data is in shared memory + socket_bytes_drained = drain_comm_socket_fully(comm_socket); + if (socket_bytes_drained == -1) { + write_dbg(DBG_LEVEL_ERROR, "VERDICT_EVENT: Agent disconnected - cleaning up verdict event handler"); + disable_ipc_verdict_event_handler(); + ngx_cp_async_cleanup(); + return; + } + + write_dbg( + DBG_LEVEL_DEBUG, + "VERDICT_EVENT: Socket notification received, drained %zd bytes - processing IPC queue", + socket_bytes_drained + ); + + if (drain_ipc_queue(&verdicts_processed) != NGX_OK) { + write_dbg(DBG_LEVEL_WARNING, "VERDICT_EVENT: IPC queue drain failed"); + return; + } + + write_dbg( + DBG_LEVEL_DEBUG, + "=== VERDICT EVENT HANDLER COMPLETE: processed %d verdicts, pending chunks: %d ===", + verdicts_processed, + pending_inspection_chunks + ); +} + +/// +/// @brief Drain IPC queue aggressively in tight loop +/// @param[out] verdicts_drained Counter for drained verdicts +/// @return NGX_OK on success +/// +static ngx_int_t +drain_ipc_queue(ngx_uint_t *verdicts_drained) +{ + ngx_http_cp_async_ctx_t *ctx; + HttpReplyFromService *reply_p; + const char *reply_data; + uint16_t reply_size; + ngx_int_t res; + + // Tight loop - drain all available IPC data in one go + while (nano_service_ipc && isDataAvailable(nano_service_ipc)) { + res = receiveData(nano_service_ipc, &reply_size, &reply_data); + if (res < 0 || reply_data == NULL) { + write_dbg(DBG_LEVEL_WARNING, "ASYNC_BACKPRESSURE: Failed to receive verdict data from IPC"); + return NGX_ERROR; + } + + reply_p = (HttpReplyFromService *)reply_data; + (*verdicts_drained)++; + + if (reply_p->verdict == TRAFFIC_VERDICT_RECONF) { + write_dbg(DBG_LEVEL_WARNING, "Received reconf verdict"); + popData(nano_service_ipc); + reset_attachment_config(); + continue; + } + + ctx = ngx_cp_async_find_ctx(reply_p->session_id); + if (ctx == NULL) { + popData(nano_service_ipc); + ngx_cp_async_decrement_pending_chunks(reply_p->session_id, "backpressure_orphaned"); + continue; + } + + if (!ngx_cp_async_ctx_is_valid(ctx)) { + popData(nano_service_ipc); + ngx_cp_async_decrement_pending_chunks(reply_p->session_id, "backpressure_invalid_ctx"); + continue; + } + + if (ngx_cp_async_ctx_get_flow_error_safe(ctx)) { + popData(nano_service_ipc); + ngx_cp_async_decrement_pending_chunks(reply_p->session_id, "backpressure_flow_error"); + continue; + } + + res = ngx_cp_async_apply_verdict(ctx, reply_p); + if (popData(nano_service_ipc) != 0) { + write_dbg(DBG_LEVEL_WARNING, "ASYNC_BACKPRESSURE: Failed to pop verdict data from IPC"); + return NGX_ERROR; + } + ngx_cp_async_decrement_pending_chunks(reply_p->session_id, "backpressure_drained"); + + if (res != NGX_OK) { + write_dbg(DBG_LEVEL_DEBUG, "ASYNC_BACKPRESSURE: Failed to apply verdict for session %d", reply_p->session_id); + } + } + + return NGX_OK; +} + +/// +/// @brief Drain comm socket completely (non-blocking, edge-triggered correct) +/// @param[in] sock Socket to drain +/// @return Number of bytes drained, or -1 if socket disconnected +/// +static ssize_t +drain_comm_socket_fully(int sock) +{ + static char drain_buf[4096]; + ssize_t total_drained = 0; + ssize_t n; + + while ((n = read(sock, drain_buf, sizeof(drain_buf))) > 0) { + total_drained += n; + } + + // Check for disconnect (EOF) + if (n == 0) { + write_dbg(DBG_LEVEL_WARNING, "ASYNC_BACKPRESSURE: Socket disconnected (EOF) - agent connection lost"); + return -1; // Signal disconnect to caller + } + + // n < 0: either EAGAIN (expected) or error + if (n < 0 && errno != EAGAIN && errno != EWOULDBLOCK) { + write_dbg(DBG_LEVEL_WARNING, "ASYNC_BACKPRESSURE: Socket drain error: %s", strerror(errno)); + } + + return total_drained; +} + +/// +/// @brief Backpressure drain event handler - actively waits for and processes all pending verdicts +/// @details Optimized with: +/// - Reusable global epoll fd (no create/close overhead) +/// - Non-blocking socket with full drain (EPOLLET-correct) +/// - Minimal epoll_ctl calls (only when socket changes) +/// - Aggressive IPC queue draining in tight loops +/// - Large socket drain buffer (4KB) +/// @param[in] ev Posted event (unused) +/// +static void +ngx_cp_async_backpressure_drain_handler(ngx_event_t *ev) +{ + ngx_uint_t verdicts_drained = 0; + ngx_uint_t initial_pending_chunks = pending_inspection_chunks; + struct epoll_event epoll_event; + struct epoll_event events[1]; + int epoll_result; + ngx_uint_t iterations = 0; + const int epoll_timeout_ms = 50; + const ngx_uint_t max_drain_iterations = 100; + + (void)ev; // Unused parameter + + write_dbg( + DBG_LEVEL_DEBUG, + "ASYNC_BACKPRESSURE: Starting drain handler with %d pending chunks", + pending_inspection_chunks + ); + + if (nano_service_ipc == NULL) { + write_dbg(DBG_LEVEL_WARNING, "ASYNC_BACKPRESSURE: IPC not available - aborting drain"); + return; + } + + if (comm_socket < 0) { + write_dbg(DBG_LEVEL_WARNING, "ASYNC_BACKPRESSURE: comm_socket invalid - aborting drain"); + return; + } + + // Use global epoll instance (already created in init) + if (g_backpressure_epoll_fd < 0) { + write_dbg(DBG_LEVEL_WARNING, "ASYNC_BACKPRESSURE: Global epoll fd not initialized - falling back to immediate drain"); + goto drain_immediate; + } + + // Only call epoll_ctl when the socket changes (avoids repeated syscalls) + if (g_backpressure_registered_socket != comm_socket) { + // Remove old socket if any + if (g_backpressure_registered_socket >= 0) { + epoll_ctl(g_backpressure_epoll_fd, EPOLL_CTL_DEL, g_backpressure_registered_socket, NULL); + write_dbg(DBG_LEVEL_DEBUG, "ASYNC_BACKPRESSURE: Removed old socket %d from epoll", g_backpressure_registered_socket); + } + + // Add new socket + epoll_event.events = EPOLLIN | EPOLLET; + epoll_event.data.fd = comm_socket; + if (epoll_ctl(g_backpressure_epoll_fd, EPOLL_CTL_ADD, comm_socket, &epoll_event) < 0) { + write_dbg(DBG_LEVEL_WARNING, "ASYNC_BACKPRESSURE: Failed to add comm_socket to epoll: %s", strerror(errno)); + g_backpressure_registered_socket = -1; + goto drain_immediate; + } + + g_backpressure_registered_socket = comm_socket; + write_dbg(DBG_LEVEL_DEBUG, "ASYNC_BACKPRESSURE: Registered socket %d with epoll fd %d", comm_socket, g_backpressure_epoll_fd); + } + + while (pending_inspection_chunks > 0 || iterations < max_drain_iterations) { + iterations++; + + if (drain_ipc_queue(&verdicts_drained) != NGX_OK) { + write_dbg(DBG_LEVEL_WARNING, "ASYNC_BACKPRESSURE: IPC drain failed at iteration %d", iterations); + break; + } + + if (pending_inspection_chunks == 0) { + write_dbg(DBG_LEVEL_DEBUG, "ASYNC_BACKPRESSURE: All pending chunks processed after %d iterations", iterations); + break; + } + + write_dbg( + DBG_LEVEL_DEBUG, + "ASYNC_BACKPRESSURE: Waiting for more verdicts (iteration %d, pending: %d)...", + iterations, + pending_inspection_chunks + ); + + epoll_result = epoll_wait(g_backpressure_epoll_fd, events, 1, epoll_timeout_ms); + if (epoll_result < 0) { + if (errno == EINTR) { + write_dbg(DBG_LEVEL_DEBUG, "ASYNC_BACKPRESSURE: epoll_wait interrupted, continuing"); + continue; + } + write_dbg(DBG_LEVEL_WARNING, "ASYNC_BACKPRESSURE: epoll_wait failed: %s", strerror(errno)); + break; + } else if (epoll_result == 0) { + write_dbg( + DBG_LEVEL_DEBUG, + "ASYNC_BACKPRESSURE: epoll_wait timeout after %dms (iteration %d, pending: %d)", + epoll_timeout_ms, + iterations, + pending_inspection_chunks + ); + break; + } else { + ssize_t drained_bytes = drain_comm_socket_fully(comm_socket); + if (drained_bytes == -1) { + write_dbg( + DBG_LEVEL_ERROR, + "ASYNC_BACKPRESSURE: Agent disconnected during backpressure drain (iteration %d) - aborting", + iterations + ); + disable_ipc_verdict_event_handler(); + ngx_cp_async_cleanup(); + return; + } + write_dbg( + DBG_LEVEL_DEBUG, + "ASYNC_BACKPRESSURE: Socket notification received, drained %zd bytes (iteration %d)", + drained_bytes, + iterations + ); + } + } + + write_dbg( + DBG_LEVEL_DEBUG, + "ASYNC_BACKPRESSURE: Drain complete after %d iterations - processed %d verdicts, pending chunks: %d -> %d", + iterations, + verdicts_drained, + initial_pending_chunks, + pending_inspection_chunks + ); + return; + +drain_immediate: + write_dbg(DBG_LEVEL_DEBUG, "ASYNC_BACKPRESSURE: Immediate drain mode (no epoll)"); + drain_ipc_queue(&verdicts_drained); + + write_dbg( + DBG_LEVEL_WARNING, + "ASYNC_BACKPRESSURE: Immediate drain complete - processed %d verdicts, pending chunks: %d -> %d", + verdicts_drained, + initial_pending_chunks, + pending_inspection_chunks + ); +} + +ngx_int_t +ngx_cp_async_start_agent_communication(ngx_http_cp_async_ctx_t *ctx) +{ + ngx_int_t rc; + + write_dbg(DBG_LEVEL_DEBUG, "=== STARTING AGENT COMMUNICATION FOR SESSION %d ===", ctx->session_id); + + // Registration is now done synchronously in the handler, so go directly to meta data stage + write_dbg(DBG_LEVEL_DEBUG, "Registration completed synchronously - proceeding to meta data stage for session %d", ctx->session_id); + ctx->stage = NGX_CP_ASYNC_STAGE_META_DATA; + + if (handle_shmem_corruption() == NGX_ERROR) { + write_dbg(DBG_LEVEL_WARNING, "Shared memory corrupted for session %d", ctx->session_id); + ctx->flow_error = 1; + ctx->session_data->verdict = fail_mode_verdict == NGX_OK ? TRAFFIC_VERDICT_ACCEPT : TRAFFIC_VERDICT_DROP; + return fail_mode_verdict == NGX_OK ? NGX_DECLINED : fail_mode_verdict; + } + + rc = ngx_cp_async_continue_processing(ctx); + + write_dbg(DBG_LEVEL_DEBUG, "=== AGENT COMMUNICATION START COMPLETE - RC: %d ===", rc); + return rc; +} + +void +queue_free(ngx_http_request_t *r, ngx_http_cp_async_ctx_t *ctx) +{ + ngx_chain_t *cl = ctx->queue_head, *ln; + while (cl) { + ln = cl; + cl = cl->next; + ngx_free_chain(r->pool, ln); + } + ctx->queue_head = ctx->queue_tail = NULL; +} + +ngx_int_t +chain_add_copy(ngx_http_request_t *request, ngx_http_cp_async_ctx_t *ctx, ngx_chain_t *in) +{ + if (in == NULL) return NGX_OK; + + ngx_chain_t *cl, **ll = (ctx->queue_tail ? &ctx->queue_tail->next : &ctx->queue_head); + + /* copy links (NOT buffers) */ + if (ngx_chain_add_copy(request->pool, ll, in) != NGX_OK) { + return NGX_ERROR; + } + + if (ctx->queue_tail == NULL) { + for (cl = ctx->queue_head; cl && cl->next; cl = cl->next) { /* find tail */ + /* no-op */ + } + ctx->queue_tail = cl ? cl : ctx->queue_head; + } else { + for (cl = ctx->queue_tail; cl && cl->next; cl = cl->next) { + /* no-op */ + } + ctx->queue_tail = cl; + } + + return NGX_OK; +} + +// Event handler for resume processing +static void +cp_async_resume_event_handler(ngx_event_t *ev) +{ + ngx_http_cp_async_ctx_t *ctx = (ngx_http_cp_async_ctx_t *) ev->data; + ngx_http_request_t *r; + uint32_t session_id; + + if (!ngx_cp_async_ctx_is_valid(ctx)) { + write_dbg(DBG_LEVEL_WARNING, "Resume event handler called with invalid/destroyed context"); + return; + } + + session_id = ngx_cp_async_ctx_get_session_id_safe(ctx); + r = ngx_cp_async_ctx_get_request_safe(ctx); + + if (r == NULL) { + write_dbg(DBG_LEVEL_WARNING, "Resume event handler: no request for session %d", session_id); + return; + } + + write_dbg(DBG_LEVEL_DEBUG, "Resume event handler for session %d", session_id); + + cp_async_posted_resume(r); +} + +// Called in event loop to resume the filter after agent signals "released" +static void +cp_async_posted_resume(ngx_http_request_t *r) +{ + ngx_http_cp_session_data *sd = recover_cp_session_data(r); + ngx_http_cp_async_ctx_t *ctx; + + if (sd == NULL) { + write_dbg(DBG_LEVEL_WARNING, "Posted resume: no session data"); + return; + } + + ctx = ngx_cp_async_find_ctx(sd->session_id); + if (ctx == NULL) { + write_dbg(DBG_LEVEL_WARNING, "Posted resume: no async context for session %d", sd->session_id); + return; + } + + write_dbg(DBG_LEVEL_DEBUG, "Posted resume for session %d", ctx->session_id); + + if (ctx->queue_head) { + write_dbg(DBG_LEVEL_DEBUG, "Posted resume: forwarding queued body for session %d", ctx->session_id); + ngx_int_t rc = ngx_http_next_request_body_filter(r, ctx->queue_head); + if (rc == NGX_AGAIN) { + write_dbg(DBG_LEVEL_WARNING, "Downstream busy during resume for session %d", ctx->session_id); + ngx_add_timer(&ctx->resume_event, 50); + return; + } else { + queue_free(r, ctx); + ctx->queue_head = ctx->queue_tail = NULL; + } + } + + if (ctx->released && ctx->waiting) { + write_dbg(DBG_LEVEL_DEBUG, "Posted resume: resuming session %d complete", ctx->session_id); + ctx->waiting = 0; + SAFE_DESTROY_CTX(ctx); + ngx_http_core_run_phases(r); + return; + } + + ngx_post_event(r->connection->read, &ngx_posted_events); +} + +static void +cp_async_post_request(ngx_http_cp_async_ctx_t *ctx) +{ + write_dbg(DBG_LEVEL_DEBUG, "Posting event for session %d", ctx->session_id); + ngx_post_event(&ctx->resume_event, &ngx_posted_events); +} + +/// @brief Checks if the given verdict is a final verdict (accept/drop/custom_response) +/// @param[in] verdict The verdict to check +/// @return 1 if verdict is final, 0 otherwise +static ngx_int_t +is_verdict_final(ServiceVerdict verdict) +{ + return (verdict == TRAFFIC_VERDICT_ACCEPT || + verdict == TRAFFIC_VERDICT_DROP || + verdict == TRAFFIC_VERDICT_CUSTOM_RESPONSE); +} + +/// +/// @brief Free modification list +/// @param[in] ctx Async context +/// +static void +ngx_cp_async_free_modification_list(ngx_http_cp_async_ctx_t *ctx) +{ + ngx_http_request_t *request; + ngx_http_cp_modification_list *current_modification; + + if (ctx == NULL || ctx->modifications == NULL) { + return; + } + + request = ngx_cp_async_ctx_get_request_safe(ctx); + if (request == NULL) { + return; + } + + while (ctx->modifications) { + current_modification = ctx->modifications; + ctx->modifications = ctx->modifications->next; + ngx_pfree(request->pool, current_modification->modification.data); + ngx_pfree(request->pool, current_modification); + } +} + +/// +/// @brief Check if verdict should be treated as drop (includes custom response) +/// @param[in] verdict The verdict to check +/// @return 1 if verdict should be treated as drop, 0 otherwise +/// +static ngx_int_t +is_verdict_drop_or_custom(ServiceVerdict verdict) +{ + return (verdict == TRAFFIC_VERDICT_DROP || verdict == TRAFFIC_VERDICT_CUSTOM_RESPONSE); +} + +static ngx_int_t +ngx_cp_async_apply_verdict_for_stage(ngx_http_cp_async_ctx_t *ctx) +{ + uint32_t session_id; + ngx_http_cp_session_data *session_data; + ngx_cp_async_stage_t stage; + + session_id = ngx_cp_async_ctx_get_session_id_safe(ctx); + session_data = ngx_cp_async_ctx_get_session_data_safe(ctx); + stage = ngx_cp_async_ctx_get_stage_safe(ctx); + + if (session_id == 0 || session_data == NULL || stage == NGX_CP_ASYNC_STAGE_ERROR) { + write_dbg(DBG_LEVEL_WARNING, "Apply verdict for stage: invalid context data"); + return NGX_ERROR; + } + + write_dbg( + DBG_LEVEL_DEBUG, + "Applying verdict %d for stage %d, session %d", + session_data->verdict, + stage, + session_id + ); + + switch (stage) { + case NGX_CP_ASYNC_STAGE_META_DATA: + write_dbg(DBG_LEVEL_DEBUG, "Meta data verdict processing for session %d", session_id); + + if (is_verdict_drop_or_custom(session_data->verdict)) { + write_dbg(DBG_LEVEL_DEBUG, "Meta data verdict is DROP/CUSTOM_RESPONSE - proceeding directly to completion for session %d", session_id); + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_COMPLETE); + return NGX_OK; + } else if (session_data->verdict == TRAFFIC_VERDICT_DELAYED) { + write_dbg(DBG_LEVEL_DEBUG, "Meta data verdict is WAIT - transitioning to wait meta verdict stage for session %d", session_id); + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_WAIT_META_VERDICT); + return NGX_OK; + } else { + write_dbg( + DBG_LEVEL_DEBUG, + "Meta data verdict %d - continuing to headers for session %d", + session_data->verdict, + session_id + ); + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_HEADERS); + return NGX_OK; + } + break; + + case NGX_CP_ASYNC_STAGE_HEADERS: + write_dbg(DBG_LEVEL_DEBUG, "Headers verdict processing for session %d", ctx->session_id); + + if (is_verdict_drop_or_custom(ctx->session_data->verdict)) { + write_dbg(DBG_LEVEL_DEBUG, "Header verdict is DROP/CUSTOM_RESPONSE - proceeding directly to completion for session %d", ctx->session_id); + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_COMPLETE); + return NGX_OK; + } else if (ctx->session_data->verdict == TRAFFIC_VERDICT_DELAYED) { + write_dbg(DBG_LEVEL_DEBUG, "Header verdict is WAIT - transitioning to wait header verdict stage for session %d", ctx->session_id); + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_WAIT_HEADER_VERDICT); + return NGX_OK; + } else { + write_dbg(DBG_LEVEL_DEBUG, "Header verdict is INSPECT - checking for body for session %d", ctx->session_id); + if (does_contain_body(&(ctx->request->headers_in))) { + write_dbg(DBG_LEVEL_DEBUG, "Request has body - transitioning to body stage for session %d", ctx->session_id); + ctx->header_declined = 1; + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_BODY); + ngx_cp_async_cancel_deadline_timer(ctx); + if (ctx->waiting) { + ctx->waiting = 0; + ngx_http_core_run_phases(ctx->request); + } + return NGX_DECLINED; + } else { + write_dbg(DBG_LEVEL_DEBUG, "No body in request - proceeding to end transaction for session %d", ctx->session_id); + ctx->session_data->async_processing_needed = 0; + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_END_TRANSACTION); + return NGX_OK; + } + } + break; + + case NGX_CP_ASYNC_STAGE_END_TRANSACTION: + write_dbg(DBG_LEVEL_DEBUG, "End transaction verdict processing for session %d", ctx->session_id); + + if (is_verdict_drop_or_custom(ctx->session_data->verdict)) { + write_dbg(DBG_LEVEL_DEBUG, "End transaction verdict is DROP/CUSTOM_RESPONSE - proceeding directly to completion for session %d", ctx->session_id); + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_COMPLETE); + return NGX_OK; + } else if (ctx->session_data->verdict == TRAFFIC_VERDICT_DELAYED) { + write_dbg(DBG_LEVEL_DEBUG, "End transaction verdict is WAIT - transitioning to wait end verdict stage for session %d", ctx->session_id); + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_WAIT_END_VERDICT); + + // Use configurable timeout for waiting on delayed verdicts + ngx_add_timer(&ctx->agent_event, async_wait_verdict_timeout_ms); + return NGX_AGAIN; + } else { + write_dbg( + DBG_LEVEL_DEBUG, + "End transaction verdict received (%d) - proceeding to completion for session %d", + ctx->session_data->verdict, + ctx->session_id + ); + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_COMPLETE); + ctx->header_declined = 1; + ctx->session_data->async_processing_needed = 0; + return NGX_OK; + } + break; + + case NGX_CP_ASYNC_STAGE_BODY: + write_dbg(DBG_LEVEL_DEBUG, "Body verdict processing for session %d", ctx->session_id); + + if (is_verdict_drop_or_custom(ctx->session_data->verdict)) { + write_dbg(DBG_LEVEL_DEBUG, "Body verdict is DROP/CUSTOM_RESPONSE - proceeding to completion for session %d", ctx->session_id); + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_COMPLETE); + return NGX_OK; + } else if (ctx->session_data->verdict == TRAFFIC_VERDICT_DELAYED) { + write_dbg(DBG_LEVEL_DEBUG, "Body verdict is WAIT - transitioning to wait body verdict stage for session %d", ctx->session_id); + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_WAIT_BODY_VERDICT); + ngx_add_timer(&ctx->agent_event, async_wait_verdict_timeout_ms); + return NGX_AGAIN; + } else if (ctx->session_data->was_request_fully_inspected) { + write_dbg(DBG_LEVEL_DEBUG, "Body verdict is ACCEPT (final) - proceeding directly to completion for session %d", ctx->session_id); + ctx->req_seq = 0; + return NGX_OK; + } else { + write_dbg( + DBG_LEVEL_DEBUG, + "Body verdict received (%d) - continuing body processing for session %d", + ctx->session_data->verdict + ,ctx->session_id + ); + return NGX_OK; + } + break; + + case NGX_CP_ASYNC_STAGE_WAIT_META_VERDICT: + case NGX_CP_ASYNC_STAGE_WAIT_HEADER_VERDICT: + case NGX_CP_ASYNC_STAGE_WAIT_BODY_VERDICT: + case NGX_CP_ASYNC_STAGE_WAIT_END_VERDICT: + write_dbg(DBG_LEVEL_DEBUG, "Still waiting for verdict in stage %d for session %d", ctx->stage, ctx->session_id); + ngx_add_timer(&ctx->agent_event, async_wait_verdict_timeout_ms); + return NGX_AGAIN; + + default: + write_dbg( + DBG_LEVEL_ERROR, + "No stage-specific verdict processing for stage %d, session %d", + ctx->stage, + ctx->session_id + ); + return NGX_OK; + } + + return NGX_OK; +} + +static ngx_int_t +ngx_cp_async_apply_verdict(ngx_http_cp_async_ctx_t *ctx, HttpReplyFromService *reply_p) +{ + ngx_int_t rc = NGX_OK; + uint32_t session_id; + ngx_http_cp_session_data *session_data; + + if (!ngx_cp_async_ctx_is_valid(ctx)) { + write_dbg(DBG_LEVEL_WARNING, "Apply verdict called with invalid/destroyed context for session %d - ignoring", reply_p->session_id); + return NGX_ERROR; + } + + session_id = ngx_cp_async_ctx_get_session_id_safe(ctx); + session_data = ngx_cp_async_ctx_get_session_data_safe(ctx); + + if (session_data == NULL) { + write_dbg(DBG_LEVEL_WARNING, "Apply verdict: invalid session data for session %d", session_id); + return NGX_ERROR; + } + + write_dbg(DBG_LEVEL_DEBUG, "Applying verdict %d for session %d", reply_p->verdict, session_id); + + session_data->verdict = (ServiceVerdict)reply_p->verdict; + + if (is_verdict_final((ServiceVerdict)reply_p->verdict)) { + session_data->was_request_fully_inspected = 1; + write_dbg( + DBG_LEVEL_DEBUG, + "Final verdict %d received for session %d - marking request as fully inspected", + reply_p->verdict, + session_id + ); + } + + if (reply_p->verdict == TRAFFIC_VERDICT_DELAYED && !ctx->first_wait_verdict_encountered) { + ctx->first_wait_verdict_encountered = 1; + write_dbg( + DBG_LEVEL_DEBUG, + "First wait verdict encountered for session %d - cancelling current deadline timer and starting new one with %dms timeout", + session_id, + req_max_proccessing_ms_time + ); + ngx_cp_async_cancel_deadline_timer(ctx); + ngx_cp_async_start_deadline_timer(ctx, ngx_max(req_max_proccessing_ms_time, async_body_stage_timeout)); + } + + if (reply_p->verdict == LIMIT_RESPONSE_HEADERS) { + write_dbg(DBG_LEVEL_DEBUG, "Received limit response headers verdict for session %d", session_id); + return NGX_OK; + } + + if (is_verdict_drop_or_custom(reply_p->verdict) && reply_p->modification_count > 0) { + if (reply_p->verdict == TRAFFIC_VERDICT_DROP) { + write_dbg(DBG_LEVEL_DEBUG, "Applying custom web response for session %d", session_id); + handle_custom_web_response(reply_p->modify_data->web_response_data); + } else if (reply_p->verdict == TRAFFIC_VERDICT_CUSTOM_RESPONSE) { + write_dbg(DBG_LEVEL_DEBUG, "Applying custom JSON response for session %d", session_id); + handle_custom_json_response(reply_p->modify_data->json_response_data); + } + + ngx_cp_async_free_modification_list(ctx); + session_data->remaining_messages_to_reply = 0; + } + + if (reply_p->verdict == TRAFFIC_VERDICT_INJECT && reply_p->modification_count > 0) { + write_dbg( + DBG_LEVEL_DEBUG, "Processing %d modifications for session %d", + reply_p->modification_count, + session_id + ); + + HttpInjectData *current_inject_data = reply_p->modify_data->inject_data; + uint8_t modification_count = reply_p->modification_count; + ngx_http_cp_modification_list *new_modification = NULL; + ngx_http_cp_modification_list *current_modification = NULL; + unsigned int modification_index; + + ngx_http_request_t *request = ngx_cp_async_ctx_get_request_safe(ctx); + if (request == NULL) { + write_dbg(DBG_LEVEL_WARNING, "Apply verdict: invalid request for session %d - cannot process modifications", session_id); + return NGX_ERROR; + } + + for (modification_index = 0; modification_index < modification_count; modification_index++) { + new_modification = create_modification_node(current_inject_data, request); + if (new_modification == NULL) { + write_dbg(DBG_LEVEL_WARNING, "Failed to create modification node for session %d", session_id); + while (ctx->modifications) { + current_modification = ctx->modifications; + ctx->modifications = ctx->modifications->next; + ngx_pfree(request->pool, current_modification->modification_buffer); + ngx_pfree(request->pool, current_modification); + } + rc = NGX_ERROR; + break; + } + + if (ctx->modifications == NULL) { + ctx->modifications = new_modification; + current_modification = ctx->modifications; + } else { + current_modification->next = new_modification; + current_modification = current_modification->next; + } + + current_inject_data = (HttpInjectData *)( + (char *)current_inject_data + + sizeof(HttpInjectData) + + current_inject_data->injection_size + ); + } + + if (rc != NGX_OK) { + write_dbg(DBG_LEVEL_WARNING, "Failed to parse modifications for session %d", session_id); + } else { + write_dbg( + DBG_LEVEL_DEBUG, + "Successfully parsed %d modifications for session %d", + modification_count, + session_id + ); + } + } + + if (session_data->remaining_messages_to_reply > 0) { + session_data->remaining_messages_to_reply--; + write_dbg( + DBG_LEVEL_DEBUG, + "Verdict received - remaining messages for session %d: %d", + session_id, + session_data->remaining_messages_to_reply + ); + } + + if (ctx->req_seq > 0) { + ctx->req_seq--; + } + + if ( + session_data->remaining_messages_to_reply == 0 + || (session_data->was_request_fully_inspected && ngx_cp_async_ctx_get_stage_safe(ctx) == NGX_CP_ASYNC_STAGE_BODY) + ) { + ngx_int_t apply_verdict_rc = ngx_cp_async_apply_verdict_for_stage(ctx); + if (apply_verdict_rc == NGX_OK) { + ngx_cp_async_event_handler(&ctx->agent_event); + } + } + + return rc; +} + +ngx_int_t +ngx_cp_async_continue_processing(ngx_http_cp_async_ctx_t *ctx) +{ + ngx_int_t rc = NGX_OK; + ngx_uint_t num_messages_sent = 0; + + if (ctx->flow_error) { + write_dbg(DBG_LEVEL_WARNING, "Flow error flag set, skipping processing"); + ctx->session_data->verdict = fail_mode_verdict == NGX_OK ? TRAFFIC_VERDICT_ACCEPT : TRAFFIC_VERDICT_DROP; + return fail_mode_verdict == NGX_OK ? NGX_DECLINED : fail_mode_verdict; + } + + write_dbg( + DBG_LEVEL_DEBUG, + "DEBUG: Processing stage %d - remaining_messages_to_reply=%d", + ctx->stage, + ctx->session_data->remaining_messages_to_reply + ); + + // Set debug context to this session + set_current_session_id(ctx->session_id); + + switch (ctx->stage) { + case NGX_CP_ASYNC_STAGE_META_DATA: + write_dbg(DBG_LEVEL_DEBUG, "*** STAGE: META_DATA for session %d ***", ctx->session_id); + + if (!ctx->meta_data_sent) { + write_dbg(DBG_LEVEL_DEBUG, "Calling async meta data sender for session %d", ctx->session_id); + rc = ngx_cp_async_send_meta_data_nonblocking(ctx, &num_messages_sent); + write_dbg( + DBG_LEVEL_DEBUG, "Async meta data sender returned: %d, messages sent: %d for session %d", + rc, + num_messages_sent, + ctx->session_id + ); + + if (rc == INSPECTION_IRRELEVANT) { + write_dbg(DBG_LEVEL_DEBUG, "Request irrelevant for session %d - finishing with IRRELEVANT verdict", ctx->session_id); + ctx->session_data->verdict = TRAFFIC_VERDICT_IRRELEVANT; + return NGX_DECLINED; + } + + if (rc != NGX_OK) { + write_dbg(DBG_LEVEL_DEBUG, "Failed to send meta data for session %d - rc: %d", ctx->session_id, rc); + ctx->flow_error = 1; + ctx->session_data->verdict = fail_mode_verdict == NGX_OK ? TRAFFIC_VERDICT_ACCEPT : TRAFFIC_VERDICT_DROP; + goto process_complete_stage; + } + + write_dbg(DBG_LEVEL_DEBUG, "Meta data sent - expecting verdict for session %d", ctx->session_id); + ctx->session_data->remaining_messages_to_reply += num_messages_sent; + ctx->meta_data_sent = 1; + return NGX_AGAIN; + } + break; + + case NGX_CP_ASYNC_STAGE_HEADERS: + write_dbg(DBG_LEVEL_DEBUG, "*** STAGE: HEADERS for session %d ***", ctx->session_id); + + if (!ctx->headers_sent) { + num_messages_sent = 0; + write_dbg(DBG_LEVEL_DEBUG, "Calling async header sender for session %d", ctx->session_id); + rc = ngx_cp_async_send_headers_nonblocking(ctx, &num_messages_sent); + write_dbg( + DBG_LEVEL_DEBUG, + "Async header sender returned: %d, messages sent: %d for session %d", + rc, + num_messages_sent, + ctx->session_id + ); + + if (rc != NGX_OK) { + write_dbg(DBG_LEVEL_WARNING, "Failed to send headers for session %d - rc: %d", ctx->session_id, rc); + ctx->flow_error = 1; + ctx->session_data->verdict = fail_mode_verdict == NGX_OK ? TRAFFIC_VERDICT_ACCEPT : TRAFFIC_VERDICT_DROP; + goto process_complete_stage; + } + + write_dbg(DBG_LEVEL_DEBUG, "Headers sent - expecting verdict for session %d", ctx->session_id); + ctx->session_data->remaining_messages_to_reply += num_messages_sent; + ctx->headers_sent = 1; + + write_dbg(DBG_LEVEL_DEBUG, "Awaiting header verdict - processing will be handled by verdict event handler for session %d", ctx->session_id); + return NGX_AGAIN; + } + break; + + case NGX_CP_ASYNC_STAGE_END_TRANSACTION: + write_dbg(DBG_LEVEL_DEBUG, "*** STAGE: END_TRANSACTION for session %d ***", ctx->session_id); + if (!ctx->end_transaction_sent) { + write_dbg(DBG_LEVEL_DEBUG, "Calling async end transaction sender for session %d", ctx->session_id); + rc = ngx_cp_async_send_end_transaction_nonblocking(ctx, &num_messages_sent); + + write_dbg( + DBG_LEVEL_DEBUG, "Async end transaction sender returned: %d, messages sent: %d for session %d", + rc, + num_messages_sent, + ctx->session_id + ); + + if (rc != NGX_OK) { + write_dbg(DBG_LEVEL_WARNING, "End transaction sender failed for session %d", ctx->session_id); + ctx->flow_error = 1; + ctx->session_data->verdict = fail_mode_verdict == NGX_OK ? TRAFFIC_VERDICT_ACCEPT : TRAFFIC_VERDICT_DROP; + goto process_complete_stage; + } + + write_dbg(DBG_LEVEL_DEBUG, "End transaction sent - expecting verdict for session %d", ctx->session_id); + ctx->session_data->remaining_messages_to_reply += num_messages_sent; + ctx->end_transaction_sent = 1; + return NGX_AGAIN; + } + break; + + case NGX_CP_ASYNC_STAGE_BODY: + write_dbg(DBG_LEVEL_DEBUG, "*** STAGE: BODY for session %d ***", ctx->session_id); + if (is_verdict_drop_or_custom(ctx->session_data->verdict)) { + write_dbg(DBG_LEVEL_DEBUG, "Final verdict is DROP - proceeding to complete stage for session %d", ctx->session_id); + ASYNC_STAGE_TRANSITION(ctx, NGX_CP_ASYNC_STAGE_COMPLETE); + goto process_complete_stage; + } + + if (ctx->req_seq > 0) { + write_dbg(DBG_LEVEL_DEBUG, "Body chunks sent - waiting for verdict for session %d", ctx->session_id); + return NGX_AGAIN; + } + + if (ctx->req_seq == 0 && ctx->end_transaction_sent == 1 && ctx->req_seen_last == 1) { + write_dbg(DBG_LEVEL_DEBUG, "All body chunks processed - finalizing request for session %d", ctx->session_id); + ctx->released = 1; + } + + cp_async_post_request(ctx); + return NGX_AGAIN; + break; + + case NGX_CP_ASYNC_STAGE_WAIT_HEADER_VERDICT: + write_dbg(DBG_LEVEL_DEBUG, "*** STAGE: WAIT_HEADER_VERDICT for session %d ***", ctx->session_id); + return ngx_cp_async_handle_wait_verdict(ctx, "header"); + + case NGX_CP_ASYNC_STAGE_WAIT_END_VERDICT: + write_dbg(DBG_LEVEL_DEBUG, "*** STAGE: WAIT_END_VERDICT for session %d ***", ctx->session_id); + return ngx_cp_async_handle_wait_verdict(ctx, "end"); + + case NGX_CP_ASYNC_STAGE_WAIT_BODY_VERDICT: + write_dbg(DBG_LEVEL_DEBUG, "*** STAGE: WAIT_BODY_VERDICT for session %d ***", ctx->session_id); + return ngx_cp_async_handle_wait_verdict(ctx, "body"); + +process_complete_stage: + case NGX_CP_ASYNC_STAGE_COMPLETE: + write_dbg(DBG_LEVEL_DEBUG, "*** STAGE: COMPLETE for session %d ***", ctx->session_id); + + ngx_cp_async_cancel_deadline_timer(ctx); + + write_dbg(DBG_LEVEL_DEBUG, "Calculating processing time for session %d", ctx->session_id); + calcProcessingTime(ctx->session_data, &ctx->start_time, 1); + + write_dbg(DBG_LEVEL_DEBUG, "Finalizing request headers hook for session %d", ctx->session_id); + rc = ngx_http_cp_finalize_request_headers_hook( + ctx->request, + ctx->session_data, + ctx->modifications, + NGX_OK + ); + write_dbg(DBG_LEVEL_DEBUG, "Finalize headers hook returned: %d for session %d", rc, ctx->session_id); + + if (ctx->agent_event.timer_set) { + write_dbg(DBG_LEVEL_DEBUG, "Canceling pending timer for session %d", ctx->session_id); + ngx_del_timer(&ctx->agent_event); + } + + if ( + ctx->session_data->verdict == TRAFFIC_VERDICT_ACCEPT + || ctx->session_data->verdict == TRAFFIC_VERDICT_INSPECT + || ctx->session_data->verdict == TRAFFIC_VERDICT_IRRELEVANT + ) { + write_dbg( + DBG_LEVEL_DEBUG, + "Request ALLOWED (verdict: %d) - continuing to proxy pass for session %d", + ctx->session_data->verdict, + ctx->session_id + ); + return NGX_DECLINED; + } + + write_dbg(DBG_LEVEL_DEBUG, "Final verdict for session %d: %d", ctx->session_id, ctx->session_data->verdict); + if (is_verdict_drop_or_custom(ctx->session_data->verdict)) { + write_dbg(DBG_LEVEL_DEBUG, "Request BLOCKED - rejecting request for session %d", ctx->session_id); + return NGX_HTTP_FORBIDDEN; + } else { + write_dbg( + DBG_LEVEL_WARNING, + "Unknown verdict %d - using fail-safe mode for session %d", + ctx->session_data->verdict, + ctx->session_id + ); + SAFE_DESTROY_CTX(ctx); + ngx_int_t fail_safe_rc = fail_mode_verdict == NGX_OK ? NGX_DECLINED : NGX_HTTP_FORBIDDEN; + return fail_safe_rc; + } + + write_dbg(DBG_LEVEL_DEBUG, "=== ASYNC PROCESSING COMPLETE FOR SESSION %d ===", ctx->session_id); + break; + + case NGX_CP_ASYNC_STAGE_ERROR: + default: + write_dbg(DBG_LEVEL_WARNING, "*** STAGE: ERROR/UNKNOWN (%d) for session %d ***", ctx->stage, ctx->session_id); + return NGX_ERROR; + } + + write_dbg(DBG_LEVEL_DEBUG, "=== CONTINUE PROCESSING END - RETURNING NGX_AGAIN ==="); + return NGX_AGAIN; +} diff --git a/attachments/nginx/ngx_module/async/ngx_cp_async_core.h b/attachments/nginx/ngx_module/async/ngx_cp_async_core.h new file mode 100755 index 0000000..951a9d8 --- /dev/null +++ b/attachments/nginx/ngx_module/async/ngx_cp_async_core.h @@ -0,0 +1,169 @@ +#ifndef __NGX_CP_ASYNC_CORE_H__ +#define __NGX_CP_ASYNC_CORE_H__ + +#include +#include +#include +#include + +#include "../ngx_cp_hook_threads.h" +#include "nano_attachment_common.h" + +extern ngx_module_t ngx_http_cp_attachment_module; ///< CP Attachment module +extern ngx_http_request_body_filter_pt ngx_http_next_request_body_filter; ///< NGINX request body filter. +extern ngx_uint_t async_backpressure_threshold; +extern ngx_msec_t async_header_timeout_ms; // Default 3s for headers/meta_data/end_transaction +extern ngx_msec_t async_body_stage_timeout; // Default 5s for body stage +extern ngx_msec_t async_wait_verdict_timeout_ms; // Default 50ms for wait verdict polling +extern ngx_msec_t async_first_wait_verdict_timeout_ms; // Default 10s for first wait verdict deadline timer +extern ngx_msec_t async_signal_timeout_ms; // Default 10ms for service signal timeout +extern ngx_msec_t async_context_cleanup_timeout_ms; // Default 5 minutes for context cleanup + +/// @struct ngx_http_cp_async_ctx +/// @brief Simplified async context for handling non-blocking agent communication +typedef struct ngx_http_cp_async_ctx { + ngx_http_request_t *request; ///< Original request + ngx_http_cp_session_data *session_data; ///< Session data + uint32_t session_id; ///< Session ID for this context + ngx_int_t stage; ///< Current processing stage + ngx_event_t agent_event; ///< Event for agent communication + ngx_event_t deadline_event; ///< Deadline timeout event for current stage + ngx_event_t cleanup_event; ///< Context cleanup timeout event + ngx_event_t resume_event; ///< Event to resume request processing + struct timespec start_time; ///< Processing start time + ngx_str_t waf_tag; ///< WAF tag for this request + ngx_http_cp_modification_list *modifications; ///< Modifications data + unsigned waiting:1; ///< Flag to indicate waiting for verdict + unsigned body_phase_started:1; ///< Flag to indicate if body phase started + unsigned released:1; ///< Flag to indicate if request is released + unsigned req_seen_last:1; ///< Flag to indicate if last chunk seen + ngx_uint_t req_seq; ///< Request body chunk sequence number + ngx_chain_t *queue_head; ///< Saved chains to forward later + ngx_chain_t *queue_tail; ///< Tail of saved chains + ngx_uint_t meta_data_sent; ///< Flag to track if meta data was sent + ngx_uint_t headers_sent; ///< Flag to track if headers were sent + ngx_uint_t end_transaction_sent; ///< Flag to track if end transaction was sent + ngx_uint_t header_declined; ///< Flag to track if headers were declined + unsigned first_wait_verdict_encountered:1; ///< Flag to track if first wait verdict was encountered + struct ngx_http_cp_async_ctx *map_next; ///< Next context in hash bucket chain + unsigned flow_error:1; ///< Flag to indicate flow error/failure/abort occurred + unsigned request_ref_incremented:1; ///< Flag to track if request reference count was incremented + struct timespec request_start_time; ///< Current stage start time +} ngx_http_cp_async_ctx_t; + + +/// +/// @brief Initialize the async connection management system +/// @return NGX_OK on success, NGX_ERROR on failure +/// +ngx_int_t ngx_cp_async_init(); + +/// +/// @brief Cleanup the async connection management system +/// +void ngx_cp_async_cleanup(); + +/// +/// @brief Create and initialize async context +/// @param[in] request NGINX request +/// @param[in] session_data Session data +/// @return Async context pointer or NULL on failure +/// +ngx_http_cp_async_ctx_t *ngx_cp_async_create_ctx(ngx_http_request_t *request, ngx_http_cp_session_data *session_data); + +/// +/// @brief Destroy async context +/// @param[in] ctx Async context to destroy +/// +void ngx_cp_async_destroy_ctx(ngx_http_cp_async_ctx_t *ctx); + +/// +/// @brief Find async context by session ID +/// @param[in] session_id Session ID to find +/// @return Async context pointer or NULL if not found +/// +ngx_http_cp_async_ctx_t *ngx_cp_async_find_ctx(uint32_t session_id); + +/// +/// @brief Add async context to connection map +/// @param[in] ctx Async context to add +/// @return NGX_OK on success, NGX_ERROR on failure +/// +ngx_int_t ngx_cp_async_add_ctx(ngx_http_cp_async_ctx_t *ctx); + +/// +/// @brief Remove async context from connection map +/// @param[in] ctx Async context to remove +/// +void ngx_cp_async_remove_ctx(ngx_http_cp_async_ctx_t *ctx); + +/// +/// @brief Main async event handler +/// @param[in] ev Event that triggered the handler +/// +void ngx_cp_async_event_handler(ngx_event_t *ev); + +/// +/// @brief Start async agent communication +/// @param[in] ctx Async context +/// @return NGX_OK on success, NGX_ERROR on failure +/// +ngx_int_t ngx_cp_async_start_agent_communication(ngx_http_cp_async_ctx_t *ctx); + +/// +/// @brief Continue processing to next stage +/// @param[in] ctx Async context +/// @return NGX_OK, NGX_AGAIN, NGX_HTTP_FORBIDDEN, or NGX_ERROR +/// +ngx_int_t ngx_cp_async_continue_processing(ngx_http_cp_async_ctx_t *ctx); + +/// +/// @brief Start deadline timer for current stage +/// @param[in] ctx Async context +/// @param[in] timeout_ms Timeout in milliseconds +/// @return NGX_OK on success, NGX_ERROR on failure +/// +ngx_int_t ngx_cp_async_start_deadline_timer(ngx_http_cp_async_ctx_t *ctx, ngx_msec_t timeout_ms); + +/// +/// @brief Disable IPC verdict event handler and free connection +/// +void disable_ipc_verdict_event_handler(void); + +/// +/// @brief Enable IPC verdict event handler and setup connection +/// +void enable_ipc_verdict_event_handler(void); + +/// +/// @brief Setup IPC verdict event handler +/// @return NGX_OK on success, NGX_ERROR on failure +/// +ngx_int_t ngx_cp_async_setup_verdict_event_handler(void); + +/// +/// @brief Add chain of buffers to async context queue +/// @param[in] request NGINX request +/// @param[in] ctx Async context +/// @param[in] in Chain of buffers to add +/// @return NGX_OK on success, NGX_ERROR on failure +/// +ngx_int_t chain_add_copy(ngx_http_request_t *request, ngx_http_cp_async_ctx_t *ctx, ngx_chain_t *in); + +/// +/// @brief Free queued chains in async context +/// @param[in] r NGINX request +/// @param[in] ctx Async context +/// +void queue_free(ngx_http_request_t *r, ngx_http_cp_async_ctx_t *ctx); + +void ngx_cp_async_increment_pending_chunks(uint32_t session_id, const char *chunk_type); + +void ngx_cp_async_decrement_pending_chunks(uint32_t session_id, const char *verdict_type); + +/// +/// @brief Post backpressure drain event if conditions are met +/// +void ngx_cp_async_post_backpressure_drain_event(void); + +#endif // __NGX_CP_ASYNC_CORE_H__ diff --git a/attachments/nginx/ngx_module/async/ngx_cp_async_ctx_validation.c b/attachments/nginx/ngx_module/async/ngx_cp_async_ctx_validation.c new file mode 100755 index 0000000..5053797 --- /dev/null +++ b/attachments/nginx/ngx_module/async/ngx_cp_async_ctx_validation.c @@ -0,0 +1,202 @@ +#include "ngx_cp_async_ctx_validation.h" + +#include "ngx_cp_async_core.h" +#include "../ngx_cp_utils.h" + +/// +/// @brief Check if a context pointer is valid and not destroyed +/// @param[in] ctx Context to validate +/// @return 1 if valid, 0 if invalid/destroyed +/// +ngx_int_t +ngx_cp_async_ctx_is_valid(ngx_http_cp_async_ctx_t *ctx) +{ + if (!ctx) { + return 0; + } + + if (!ctx->session_id) { + write_dbg(DBG_LEVEL_WARNING, "Context validation failed: invalid session_id 0"); + return 0; + } + + if (!ctx->request) { + write_dbg(DBG_LEVEL_WARNING, "Context validation failed: NULL request for session %d", ctx->session_id); + return 0; + } + + if (!ctx->session_data) { + write_dbg(DBG_LEVEL_WARNING, "Context validation failed: NULL session_data for session %d", ctx->session_id); + return 0; + } + + return 1; +} + +/// +/// @brief Safely get session ID from context +/// @param[in] ctx Context to get session ID from +/// @return Session ID or 0 if invalid +/// +uint32_t +ngx_cp_async_ctx_get_session_id_safe(ngx_http_cp_async_ctx_t *ctx) +{ + if (!ngx_cp_async_ctx_is_valid(ctx)) { + return 0; + } + return ctx->session_id; +} + +/// +/// @brief Safely get request from context +/// @param[in] ctx Context to get request from +/// @return Request pointer or NULL if invalid +/// +ngx_http_request_t * +ngx_cp_async_ctx_get_request_safe(ngx_http_cp_async_ctx_t *ctx) +{ + if (!ngx_cp_async_ctx_is_valid(ctx)) { + return NULL; + } + return ctx->request; +} + +/// +/// @brief Safely get session data from context +/// @param[in] ctx Context to get session data from +/// @return Session data pointer or NULL if invalid +/// +ngx_http_cp_session_data * +ngx_cp_async_ctx_get_session_data_safe(ngx_http_cp_async_ctx_t *ctx) +{ + if (!ngx_cp_async_ctx_is_valid(ctx)) { + return NULL; + } + return ctx->session_data; +} + +/// +/// @brief Safely get stage from context +/// @param[in] ctx Context to get stage from +/// @return Stage or NGX_CP_ASYNC_STAGE_ERROR if invalid +/// +ngx_cp_async_stage_t +ngx_cp_async_ctx_get_stage_safe(ngx_http_cp_async_ctx_t *ctx) +{ + if (!ngx_cp_async_ctx_is_valid(ctx)) { + return NGX_CP_ASYNC_STAGE_ERROR; + } + return ctx->stage; +} + +/// +/// @brief Safely get flow error flag from context +/// @param[in] ctx Context to get flow error from +/// @return Flow error flag or 1 (error) if invalid +/// +ngx_int_t +ngx_cp_async_ctx_get_flow_error_safe(ngx_http_cp_async_ctx_t *ctx) +{ + if (!ngx_cp_async_ctx_is_valid(ctx)) { + return 1; // Assume error if context is invalid + } + return ctx->flow_error; +} + +/// +/// @brief Safely get header declined flag from context +/// @param[in] ctx Context to get header declined from +/// @return Header declined flag or 0 if invalid +/// +ngx_int_t +ngx_cp_async_ctx_get_header_declined_safe(ngx_http_cp_async_ctx_t *ctx) +{ + if (!ngx_cp_async_ctx_is_valid(ctx)) { + return 0; + } + return ctx->header_declined; +} + +/// +/// @brief Safely get request sequence from context +/// @param[in] ctx Context to get req_seq from +/// @return Request sequence or 0 if invalid +/// +ngx_uint_t +ngx_cp_async_ctx_get_req_seq_safe(ngx_http_cp_async_ctx_t *ctx) +{ + if (!ngx_cp_async_ctx_is_valid(ctx)) { + return 0; + } + return ctx->req_seq; +} + +/// +/// @brief Safely get waiting flag from context +/// @param[in] ctx Context to get waiting from +/// @return Waiting flag or 0 if invalid +/// +ngx_int_t +ngx_cp_async_ctx_get_waiting_safe(ngx_http_cp_async_ctx_t *ctx) +{ + if (!ngx_cp_async_ctx_is_valid(ctx)) { + return 0; + } + return ctx->waiting; +} + +/// +/// @brief Safely get released flag from context +/// @param[in] ctx Context to get released from +/// @return Released flag or 0 if invalid +/// +ngx_int_t +ngx_cp_async_ctx_get_released_safe(ngx_http_cp_async_ctx_t *ctx) +{ + if (!ngx_cp_async_ctx_is_valid(ctx)) { + return 0; + } + return ctx->released; +} + +/// +/// @brief Safely get queue head from context +/// @param[in] ctx Context to get queue_head from +/// @return Queue head or NULL if invalid +/// +ngx_chain_t * +ngx_cp_async_ctx_get_queue_head_safe(ngx_http_cp_async_ctx_t *ctx) +{ + if (!ngx_cp_async_ctx_is_valid(ctx)) { + return NULL; + } + return ctx->queue_head; +} + +/// +/// @brief Nullify all references to a context in event handlers +/// @param[in] ctx Context being destroyed +/// +void +ngx_cp_async_nullify_ctx_refs(ngx_http_cp_async_ctx_t *ctx) +{ + if (ctx == NULL) { + return; + } + + // Clear event data pointers to prevent dangling references + if (ctx->agent_event.data == ctx) { + ctx->agent_event.data = NULL; + } + if (ctx->cleanup_event.data == ctx) { + ctx->cleanup_event.data = NULL; + } + if (ctx->resume_event.data == ctx) { + ctx->resume_event.data = NULL; + } + if (ctx->deadline_event.data == ctx) { + ctx->deadline_event.data = NULL; + } + + write_dbg(DBG_LEVEL_DEBUG, "Nullified context references for session %d", ctx->session_id); +} diff --git a/attachments/nginx/ngx_module/async/ngx_cp_async_ctx_validation.h b/attachments/nginx/ngx_module/async/ngx_cp_async_ctx_validation.h new file mode 100755 index 0000000..f086184 --- /dev/null +++ b/attachments/nginx/ngx_module/async/ngx_cp_async_ctx_validation.h @@ -0,0 +1,33 @@ +#ifndef __NGX_CP_ASYNC_CTX_VALIDATION_H__ +#define __NGX_CP_ASYNC_CTX_VALIDATION_H__ + +#include "ngx_cp_async_types.h" + +// Context validation functions +ngx_int_t ngx_cp_async_ctx_is_valid(ngx_http_cp_async_ctx_t *ctx); +uint32_t ngx_cp_async_ctx_get_session_id_safe(ngx_http_cp_async_ctx_t *ctx); +ngx_http_request_t *ngx_cp_async_ctx_get_request_safe(ngx_http_cp_async_ctx_t *ctx); +ngx_http_cp_session_data *ngx_cp_async_ctx_get_session_data_safe(ngx_http_cp_async_ctx_t *ctx); +ngx_cp_async_stage_t ngx_cp_async_ctx_get_stage_safe(ngx_http_cp_async_ctx_t *ctx); +ngx_int_t ngx_cp_async_ctx_get_flow_error_safe(ngx_http_cp_async_ctx_t *ctx); +ngx_int_t ngx_cp_async_ctx_get_header_declined_safe(ngx_http_cp_async_ctx_t *ctx); +ngx_uint_t ngx_cp_async_ctx_get_req_seq_safe(ngx_http_cp_async_ctx_t *ctx); +ngx_int_t ngx_cp_async_ctx_get_waiting_safe(ngx_http_cp_async_ctx_t *ctx); +ngx_int_t ngx_cp_async_ctx_get_released_safe(ngx_http_cp_async_ctx_t *ctx); +ngx_chain_t *ngx_cp_async_ctx_get_queue_head_safe(ngx_http_cp_async_ctx_t *ctx); + +// Context nullification function +void ngx_cp_async_nullify_ctx_refs(ngx_http_cp_async_ctx_t *ctx); + +// Forward declaration for find function +ngx_http_cp_async_ctx_t *ngx_cp_async_find_ctx(uint32_t session_id); + +// Macro for safe context destruction with null assignment +#define SAFE_DESTROY_CTX(ctx_ptr) do { \ + if (ctx_ptr) { \ + ngx_cp_async_destroy_ctx(ctx_ptr); \ + ctx_ptr = NULL; \ + } \ +} while(0) + +#endif // __NGX_CP_ASYNC_CTX_VALIDATION_H__ diff --git a/attachments/nginx/ngx_module/async/ngx_cp_async_headers.c b/attachments/nginx/ngx_module/async/ngx_cp_async_headers.c new file mode 100755 index 0000000..addce04 --- /dev/null +++ b/attachments/nginx/ngx_module/async/ngx_cp_async_headers.c @@ -0,0 +1,263 @@ +#include "ngx_cp_async_headers.h" + +#include +#include +#include +#include +#include +#include + +#include "ngx_cp_async_core.h" +#include "ngx_cp_async_ctx_validation.h" +#include "ngx_cp_async_sender.h" +#include "../ngx_cp_hooks.h" +#include "../ngx_cp_initializer.h" +#include "../ngx_http_cp_attachment_module.h" +#include "../ngx_cp_utils.h" +#include "../ngx_cp_failing_state.h" +#include "../ngx_cp_metric.h" +#include "../ngx_cp_thread.h" +#include "../ngx_cp_static_content.h" + +extern ngx_int_t is_initialized; + +ngx_int_t +ngx_http_cp_req_header_handler_async(ngx_http_request_t *request) +{ + ngx_http_cp_session_data *session_data_p; + ngx_http_cp_async_ctx_t *ctx; + ngx_int_t handle_static_resource_result; + ServiceVerdict sessions_per_minute_verdict; + ngx_cp_attachment_conf_t *conf; + ngx_int_t final_res; + struct timespec hook_time_begin; + + static int is_failure_state_initialized = 0; + static int is_metric_data_initialized = 0; + + write_dbg(DBG_LEVEL_DEBUG, "=== ASYNC REQUEST HEADER HANDLER START ==="); + + clock_gettime(CLOCK_REALTIME, &hook_time_begin); + if (is_async_mode_enabled && !is_initialized) { + ngx_cp_async_init(); + } + + if (is_failure_state_initialized == 0) { + write_dbg(DBG_LEVEL_ERROR, "Initializing failure state (first time)"); + reset_transparent_mode(); + is_failure_state_initialized = 1; + } + + if (is_metric_data_initialized == 0) { + write_dbg(DBG_LEVEL_ERROR, "Initializing metric data (first time)"); + reset_metric_data(); + is_metric_data_initialized = 1; + } + + set_current_session_id(0); + reset_dbg_ctx(); + + if (is_in_transparent_mode()) { + write_dbg(DBG_LEVEL_DEBUG, "In transparent mode - updating metrics and returning"); + updateMetricField(TRANSPARENTS_COUNT, 1); + return fail_mode_verdict == NGX_OK ? NGX_DECLINED : NGX_ERROR; + } + + if (is_ngx_cp_attachment_disabled(request)) { + write_dbg(DBG_LEVEL_DEBUG, "Ignoring inspection of request on a disabled location"); + return NGX_DECLINED; + } + + conf = ngx_http_get_module_loc_conf(request, ngx_http_cp_attachment_module); + if (conf == NULL) { + write_dbg(DBG_LEVEL_WARNING, "Failed to get module configuration"); + return NGX_DECLINED; + } + + session_data_p = ngx_http_get_module_ctx(request, ngx_http_cp_attachment_module); + if (session_data_p == NULL) { + write_dbg(DBG_LEVEL_DEBUG, "No existing session data - initializing new session"); + session_data_p = init_cp_session_data(request); + if (session_data_p == NULL) { + write_dbg(DBG_LEVEL_WARNING, "Failed to initialize session data"); + return NGX_DECLINED; + } + } + + set_current_session_id(session_data_p->session_id); + write_dbg(DBG_LEVEL_DEBUG, "Async request header filter handling session ID: %d", session_data_p->session_id); + session_data_p->initial_async_mode = 1; + if (!is_async_mode_enabled) { + write_dbg(DBG_LEVEL_WARNING, "Async mode is not enabled for request"); + return NGX_DECLINED; + } + + sessions_per_minute_verdict = enforce_sessions_rate(); + if (sessions_per_minute_verdict != TRAFFIC_VERDICT_INSPECT) { + session_data_p->verdict = sessions_per_minute_verdict; + return sessions_per_minute_verdict == TRAFFIC_VERDICT_ACCEPT ? NGX_DECLINED : NGX_ERROR; + } + + // Do immediate blocking registration (same as sync version) + if (!get_already_registered() || !isIpcReady()) { + struct ngx_http_cp_event_thread_ctx_t ctx; + int res; + + init_thread_ctx(&ctx, request, session_data_p, NULL); + ctx.waf_tag = conf->waf_tag; + + if (is_registration_timeout_reached()) { + write_dbg(DBG_LEVEL_DEBUG, "spawn ngx_http_cp_registration_thread"); + reset_registration_timeout(); + res = ngx_cp_run_in_thread_timeout( + ngx_http_cp_registration_thread, + (void *)&ctx, + ngx_max(registration_thread_timeout_msec, 200), + "ngx_http_cp_registration_thread" + ); + } else { + res = 0; + write_dbg(DBG_LEVEL_DEBUG, "Attachment registration has recently started, wait for timeout"); + } + + if (!res) { + // failed to execute thread task, or it timed out + session_data_p->verdict = fail_mode_verdict == NGX_OK ? TRAFFIC_VERDICT_ACCEPT : TRAFFIC_VERDICT_DROP; + write_dbg( + DBG_LEVEL_DEBUG, + "registraton thread failed, returning default fail mode verdict. Session id: %d, verdict: %s", + session_data_p->session_id, + session_data_p->verdict == TRAFFIC_VERDICT_ACCEPT ? "accept" : "drop" + ); + updateMetricField(REG_THREAD_TIMEOUT, 1); + + return fail_mode_verdict == NGX_OK ? NGX_DECLINED : fail_mode_verdict; + } + write_dbg( + DBG_LEVEL_DEBUG, + "finished ngx_http_cp_registration_thread successfully. return=%d res=%d", + ctx.should_return, + ctx.res + ); + if (ctx.should_return) { + session_data_p->verdict = TRAFFIC_VERDICT_ACCEPT; + return ctx.res == NGX_OK ? NGX_DECLINED : ctx.res; + } + + if (ngx_cp_async_setup_verdict_event_handler() != NGX_OK) { + write_dbg(DBG_LEVEL_WARNING, "Failed to set up verdict event handler for session %d", session_data_p->session_id); + return fail_mode_verdict == NGX_OK ? NGX_DECLINED : fail_mode_verdict; + } + } + + set_already_registered(1); + reset_registration_timeout_duration(); + + if (handle_shmem_corruption() == NGX_ERROR) { + session_data_p->verdict = fail_mode_verdict == NGX_OK ? TRAFFIC_VERDICT_ACCEPT : TRAFFIC_VERDICT_DROP; + write_dbg( + DBG_LEVEL_DEBUG, + "Shared memory is corrupted, returning default fail mode verdict. Session id: %d, verdict: %s", + session_data_p->session_id, + session_data_p->verdict == TRAFFIC_VERDICT_ACCEPT ? "accept" : "drop" + ); + return fail_mode_verdict == NGX_OK ? NGX_DECLINED : fail_mode_verdict; + } + + ctx = ngx_cp_async_find_ctx(session_data_p->session_id); + if (ctx != NULL) { + write_dbg( + DBG_LEVEL_DEBUG, + "Found existing async context for session %d - stage: %d, header_declined: %d", + session_data_p->session_id, + ctx->stage, ctx->header_declined + ); + + if (ctx->header_declined) { + write_dbg( + DBG_LEVEL_DEBUG, + "Header already declined for body processing - returning NGX_DECLINED again for session %d", + session_data_p->session_id + ); + return NGX_DECLINED; + } + + return ngx_cp_async_continue_processing(ctx); + } + + if ( + session_data_p->async_processing_needed == 0 + && (session_data_p->verdict != TRAFFIC_VERDICT_INSPECT || session_data_p->was_request_fully_inspected) + ) { + write_dbg(DBG_LEVEL_DEBUG, "Async processing already completed for session %d - allowing to pass through", session_data_p->session_id); + SAFE_DESTROY_CTX(ctx); + return NGX_DECLINED; + } + + + handle_static_resource_result = handle_static_resource_request( + session_data_p->session_id, + &session_data_p->verdict, + request + ); + + if (handle_static_resource_result != NOT_A_STATIC_RESOURCE) { + write_dbg(DBG_LEVEL_DEBUG, "Static resource handled - result: %d", handle_static_resource_result); + return handle_static_resource_result; + } + + ctx = ngx_cp_async_create_ctx(request, session_data_p); + if (ctx == NULL) { + write_dbg(DBG_LEVEL_WARNING, "Failed to create async context - allowing request to continue"); + return NGX_DECLINED; + } + + ctx->waf_tag.data = conf->waf_tag.data; + ctx->waf_tag.len = conf->waf_tag.len; + + ngx_cp_async_add_ctx(ctx); + final_res = ngx_cp_async_start_agent_communication(ctx); + + session_data_p->async_processing_needed = 1; + + write_dbg( + DBG_LEVEL_DEBUG, + "Async processing started with result: %d for session %d", + final_res, + session_data_p->session_id + ); + + if (final_res == NGX_AGAIN) { + write_dbg( + DBG_LEVEL_DEBUG, + "Async processing in progress - HOLDING REQUEST until verdict received for session %d", + session_data_p->session_id + ); + ngx_cp_async_start_deadline_timer(ctx, ngx_max(req_header_thread_timeout_msec, async_header_timeout_ms)); + ctx->waiting = 1; + + if (!ctx->request_ref_incremented && ctx->request->http_version == NGX_HTTP_VERSION_20) { + ctx->request->main->count++; + ctx->request_ref_incremented = 1; + write_dbg(DBG_LEVEL_DEBUG, "Incremented request main reference count for HTTP/2 session %d", session_data_p->session_id); + } + + return NGX_DONE; + } else if (final_res == NGX_DECLINED) { + write_dbg( + DBG_LEVEL_DEBUG, + "Async processing completed immediately - allowing request to continue for session %d", + session_data_p->session_id + ); + return NGX_DECLINED; + } else { + write_dbg( + DBG_LEVEL_WARNING, + "Async processing failed - fail-open request for session %d", + session_data_p->session_id + ); + return final_res; + } + + write_dbg(DBG_LEVEL_DEBUG, "=== ASYNC REQUEST HEADER HANDLER END ==="); +} diff --git a/attachments/nginx/ngx_module/async/ngx_cp_async_headers.h b/attachments/nginx/ngx_module/async/ngx_cp_async_headers.h new file mode 100755 index 0000000..3d1ddf5 --- /dev/null +++ b/attachments/nginx/ngx_module/async/ngx_cp_async_headers.h @@ -0,0 +1,11 @@ +#ifndef __NGX_CP_ASYNC_HEADERS_H__ +#define __NGX_CP_ASYNC_HEADERS_H__ + +#include +#include +#include +#include + +ngx_int_t ngx_http_cp_req_header_handler_async(ngx_http_request_t *request); + +#endif // __NGX_CP_ASYNC_HEADERS_H__ diff --git a/attachments/nginx/ngx_module/async/ngx_cp_async_sender.c b/attachments/nginx/ngx_module/async/ngx_cp_async_sender.c new file mode 100755 index 0000000..d2e4e3d --- /dev/null +++ b/attachments/nginx/ngx_module/async/ngx_cp_async_sender.c @@ -0,0 +1,605 @@ +#include "ngx_cp_async_sender.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "ngx_cp_async_core.h" +#include "ngx_cp_async_ctx_validation.h" +#include "../ngx_cp_utils.h" +#include "../ngx_cp_io.h" +#include "../ngx_cp_initializer.h" + +/// +/// @brief Signals nano service about new session to inspect with timeout protection. +/// @param[in] cur_session_id Session's Id. +/// @param[in] ctx Async context for setting flow_error on timeout (can be NULL). +/// @param[in] timeout_ms Write timeout in milliseconds (default 200ms if 0). +/// @returns ngx_int_t +/// - #NGX_OK +/// - #NGX_ERROR +/// - #NGX_HTTP_REQUEST_TIME_OUT +/// +static ngx_int_t +ngx_http_cp_signal_to_service_with_timeout(uint32_t cur_session_id, ngx_uint_t timeout_ms) +{ + int res = 0; + size_t bytes_written = 0; + ngx_uint_t actual_timeout_ms = timeout_ms > 0 ? timeout_ms : 200; // Default 200ms + struct pollfd poll_fd; + int poll_result; + + write_dbg(DBG_LEVEL_TRACE, "Sending signal to the service to notify about new session data to inspect (timeout: %dms)", actual_timeout_ms); + + while (bytes_written < sizeof(cur_session_id)) { + res = write(comm_socket, ((char *)&cur_session_id) + bytes_written, sizeof(cur_session_id) - bytes_written); + + if (res > 0) { + bytes_written += res; + continue; + } + + if (res < 0) { + if (errno == EAGAIN || errno == EWOULDBLOCK) { + // Socket would block - use poll to wait for write readiness with timeout + poll_fd.fd = comm_socket; + poll_fd.events = POLLOUT; + poll_fd.revents = 0; + + poll_result = poll(&poll_fd, 1, actual_timeout_ms); + + if (poll_result < 0) { + write_dbg(DBG_LEVEL_WARNING, "Poll failed for comm_socket write: %s", strerror(errno)); + disconnect_communication(); + return NGX_ERROR; + } else if (poll_result == 0) { + write_dbg(DBG_LEVEL_DEBUG, "Write timeout (%dms) reached during signal to nano service for session %d", actual_timeout_ms, cur_session_id); + return NGX_HTTP_REQUEST_TIME_OUT; + } else { + // Socket is ready for writing, continue the loop + continue; + } + } else { + // Fatal write error - disconnect and return error + write_dbg(DBG_LEVEL_WARNING, "Fatal write error on comm_socket: %s", strerror(errno)); + disconnect_communication(); + return NGX_ERROR; + } + } else { + // res == 0, which shouldn't happen for write() on a socket + write_dbg(DBG_LEVEL_WARNING, "Unexpected write() return value 0 on comm_socket"); + disconnect_communication(); + return NGX_ERROR; + } + } + + write_dbg(DBG_LEVEL_DEBUG, "Successfully signaled nano service for session %d", cur_session_id); + return NGX_OK; +} + +/// @brief Generic wait verdict handler for all wait stages +/// @param[in] ctx Async context +/// @param[in] stage_name Stage name for logging +/// @return NGX_OK, NGX_AGAIN, NGX_HTTP_FORBIDDEN, or NGX_ERROR +/// +ngx_int_t +ngx_cp_async_wait_signal_sender(ngx_http_cp_async_ctx_t *ctx, ngx_uint_t *num_messages_sent) +{ + int err_code = 0; + ngx_int_t signal_res; + uint32_t session_id = ngx_cp_async_ctx_get_session_id_safe(ctx); + + if (session_id == 0) { + write_dbg(DBG_LEVEL_WARNING, "Wait signal sender: invalid session ID"); + return NGX_ERROR; + } + + static const ngx_uint_t wait_fragments_count = 2; + char *fragments[wait_fragments_count]; + uint16_t fragments_sizes[wait_fragments_count]; + AttachmentDataType transaction_type = REQUEST_DELAYED_VERDICT; + + set_fragments_identifiers(fragments, fragments_sizes, (uint16_t *)&transaction_type, &session_id); + + write_dbg(DBG_LEVEL_DEBUG, "Sending async wait data to shared memory for session %d", session_id); + + err_code = sendChunkedData(nano_service_ipc, fragments_sizes, (const char **)fragments, wait_fragments_count); + if (err_code != 0) { + write_dbg(DBG_LEVEL_WARNING, "Failed to send wait data to shared memory for session %d, error: %d", session_id, err_code); + return NGX_ERROR; + } + ngx_cp_async_increment_pending_chunks(session_id, "wait_signal"); + + write_dbg(DBG_LEVEL_DEBUG, "Signaling agent service about wait data for session %d with %dms timeout protection", session_id, async_signal_timeout_ms); + signal_res = ngx_http_cp_signal_to_service_with_timeout(session_id, async_signal_timeout_ms); + if (signal_res != NGX_OK) { + if (signal_res == NGX_HTTP_REQUEST_TIME_OUT) { + write_dbg(DBG_LEVEL_DEBUG, "Signal timeout (%dms) reached for wait data, session %d - flow_error set", async_signal_timeout_ms, session_id); + ngx_cp_async_post_backpressure_drain_event(); + return NGX_HTTP_REQUEST_TIME_OUT; + } else { + write_dbg(DBG_LEVEL_WARNING, "Failed to signal service for wait data, session %d", session_id); + } + return NGX_ERROR; + } + + write_dbg(DBG_LEVEL_DEBUG, "Successfully sent async wait signal for session %d", session_id); + *num_messages_sent = 1; + return NGX_OK; +} + +/// +/// @brief Async version of ngx_http_cp_meta_data_sender - sends data but doesn't wait +/// @param[in] ctx Async context +/// @param[out] num_messages_sent Number of messages sent +/// @return NGX_OK on success, NGX_ERROR on failure, INSPECTION_IRRELEVANT if irrelevant +/// +ngx_int_t +ngx_cp_async_send_meta_data_nonblocking(ngx_http_cp_async_ctx_t *ctx, ngx_uint_t *num_messages_sent) +{ + static ngx_str_t ngx_parsed_host_str = ngx_string("host"); + char client_ip[INET6_ADDRSTRLEN]; + char listening_ip[INET6_ADDRSTRLEN]; + uint16_t client_ip_len; + uint16_t listening_ip_len; + uint16_t client_port; + uint16_t chunck_type; + uint16_t listening_port; + ngx_int_t res; + ngx_str_t maybe_host = { 0, (u_char *)"" }; + ngx_str_t ngx_parsed_host = { 0, (u_char *)"" }; + ngx_str_t parsed_uri = { 0, (u_char *)"" }; + ngx_http_variable_value_t *ngx_var; + char *fragments[META_DATA_COUNT + 2]; + uint16_t fragments_sizes[META_DATA_COUNT + 2]; + int err_code = 0; + + write_dbg(DBG_LEVEL_DEBUG, "Async sending request start meta data for inspection"); + + convert_sock_addr_to_string(((struct sockaddr *)ctx->request->connection->sockaddr), client_ip); + if(!is_inspection_required_for_source(client_ip)) return INSPECTION_IRRELEVANT; + + chunck_type = REQUEST_START; + set_fragments_identifiers(fragments, fragments_sizes, &chunck_type, &ctx->session_id); + + set_fragment_elem( + fragments, + fragments_sizes, + &ctx->request->http_protocol.len, + sizeof(uint16_t), + HTTP_PROTOCOL_SIZE + 2 + ); + + set_fragment_elem( + fragments, + fragments_sizes, + ctx->request->http_protocol.data, + ctx->request->http_protocol.len, + HTTP_PROTOCOL_DATA + 2 + ); + + set_fragment_elem(fragments, fragments_sizes, &ctx->request->method_name.len, sizeof(uint16_t), HTTP_METHOD_SIZE + 2); + set_fragment_elem( + fragments, + fragments_sizes, + ctx->request->method_name.data, + ctx->request->method_name.len, + HTTP_METHOD_DATA + 2 + ); + + ngx_var = ngx_http_get_variable(ctx->request, &ngx_parsed_host_str, ngx_hash_key(ngx_parsed_host_str.data, ngx_parsed_host_str.len)); + if (ngx_var == NULL || ngx_var->not_found) { + write_dbg(DBG_LEVEL_DEBUG, "No parsed host found, using headers host"); + if (ctx->request->headers_in.host != NULL) { + maybe_host.data = ctx->request->headers_in.host->value.data; + maybe_host.len = ctx->request->headers_in.host->value.len; + } + } else { + ngx_parsed_host.data = ngx_var->data; + ngx_parsed_host.len = ngx_var->len; + } + + if (ctx->request->uri.len > 0) { + parsed_uri.data = ctx->request->uri.data; + parsed_uri.len = ctx->request->uri.len; + } else { + parsed_uri.data = ctx->request->unparsed_uri.data; + parsed_uri.len = ctx->request->unparsed_uri.len; + } + + set_fragment_elem( + fragments, + fragments_sizes, + &maybe_host.len, + sizeof(uint16_t), + HOST_NAME_SIZE + 2 + ); + set_fragment_elem( + fragments, + fragments_sizes, + maybe_host.data, + maybe_host.len, + HOST_NAME_DATA + 2 + ); + + // Add listening IP and port data (exact same logic) + convert_sock_addr_to_string(((struct sockaddr *)ctx->request->connection->local_sockaddr), listening_ip); + listening_ip_len = strlen(listening_ip); + set_fragment_elem(fragments, fragments_sizes, &listening_ip_len, sizeof(uint16_t), LISTENING_ADDR_SIZE + 2); + set_fragment_elem(fragments, fragments_sizes, listening_ip, listening_ip_len, LISTENING_ADDR_DATA + 2); + + listening_port = htons(((struct sockaddr_in *)ctx->request->connection->local_sockaddr)->sin_port); + set_fragment_elem(fragments, fragments_sizes, &listening_port, sizeof(listening_port), LISTENING_PORT + 2); + + // Add URI data (exact same logic) + set_fragment_elem(fragments, fragments_sizes, &ctx->request->unparsed_uri.len, sizeof(uint16_t), URI_SIZE + 2); + set_fragment_elem(fragments, fragments_sizes, ctx->request->unparsed_uri.data, ctx->request->unparsed_uri.len, URI_DATA + 2); + + // Add client IP and port data (exact same logic) + client_ip_len = strlen(client_ip); + set_fragment_elem(fragments, fragments_sizes, &client_ip_len, sizeof(uint16_t), CLIENT_ADDR_SIZE + 2); + set_fragment_elem(fragments, fragments_sizes, client_ip, client_ip_len, CLIENT_ADDR_DATA + 2); + + client_port = htons(((struct sockaddr_in *)ctx->request->connection->sockaddr)->sin_port); + set_fragment_elem(fragments, fragments_sizes, &client_port, sizeof(client_port), CLIENT_PORT + 2); + + // Add parsed host and URI data (exact same logic) + set_fragment_elem(fragments, fragments_sizes, &ngx_parsed_host.len, sizeof(uint16_t), PARSED_HOST_SIZE + 2); + set_fragment_elem(fragments, fragments_sizes, ngx_parsed_host.data, ngx_parsed_host.len, PARSED_HOST_DATA + 2); + + set_fragment_elem(fragments, fragments_sizes, &parsed_uri.len, sizeof(uint16_t), PARSED_URI_SIZE + 2); + set_fragment_elem(fragments, fragments_sizes, parsed_uri.data, parsed_uri.len, PARSED_URI_DATA + 2); + + // Add WAF tag data (exact same logic) + if (ctx->waf_tag.len > 0) { + set_fragment_elem(fragments, fragments_sizes, &ctx->waf_tag.len, sizeof(uint16_t), WAF_TAG_SIZE + 2); + set_fragment_elem(fragments, fragments_sizes, ctx->waf_tag.data, ctx->waf_tag.len, WAF_TAG_DATA + 2); + } else { + uint16_t zero = 0; + set_fragment_elem(fragments, fragments_sizes, &zero, sizeof(uint16_t), WAF_TAG_SIZE + 2); + set_fragment_elem(fragments, fragments_sizes, "", 0, WAF_TAG_DATA + 2); + } + + write_dbg(DBG_LEVEL_DEBUG, "Async sending meta data chunk to shared memory"); + + err_code = sendChunkedData(nano_service_ipc, fragments_sizes, (const char **)fragments, META_DATA_COUNT + 2); + if (err_code != 0) { + write_dbg(DBG_LEVEL_WARNING, "Failed to send meta data chunk - error code %d", err_code); + return NGX_ERROR; + } + ngx_cp_async_increment_pending_chunks(ctx->session_id, "meta_data"); + + write_dbg(DBG_LEVEL_DEBUG, "Async signaling agent for meta data with %dms timeout protection", async_signal_timeout_ms); + res = ngx_http_cp_signal_to_service_with_timeout(ctx->session_id, async_signal_timeout_ms); + if (res != NGX_OK && res != NGX_HTTP_REQUEST_TIME_OUT) { + write_dbg(DBG_LEVEL_WARNING, "Failed to signal agent for single body chunk, session %d", ctx->session_id); + return NGX_ERROR; + } + + if (res == NGX_HTTP_REQUEST_TIME_OUT) { + write_dbg(DBG_LEVEL_DEBUG, "Signal timeout (%dms) reached for single body chunk, session %d", async_signal_timeout_ms, ctx->session_id); + ngx_cp_async_post_backpressure_drain_event(); + } + + *num_messages_sent = 1; + write_dbg(DBG_LEVEL_DEBUG, "Async meta data sent and signaled successfully"); + return NGX_OK; +} + +/// +/// @brief Async version of ngx_http_cp_header_sender - sends data but doesn't wait +/// @param[in] ctx Async context +/// @param[out] num_messages_sent Number of messages sent +/// @return NGX_OK on success, NGX_ERROR on failure +/// +ngx_int_t +ngx_cp_async_send_headers_nonblocking(ngx_http_cp_async_ctx_t *ctx, ngx_uint_t *num_messages_sent) +{ + ngx_uint_t header_idx = 0; + ngx_uint_t idx_in_bulk = 0; + ngx_uint_t num_of_bulks_sent = 0; + uint8_t part_count = 0; + uint8_t bulk_part_idx = 0; + uint8_t is_last_part; + ngx_list_part_t *headers_iter; + ngx_table_elt_t *headers_to_inspect; + ngx_table_elt_t *header; + const ngx_uint_t max_bulk_size = 10; + char *fragments[HEADER_DATA_COUNT * max_bulk_size + 4]; + uint16_t fragments_sizes[HEADER_DATA_COUNT * max_bulk_size + 4]; + int err_code = 0; + ngx_int_t res; + + write_dbg(DBG_LEVEL_DEBUG, "Async sending request headers for inspection"); + + uint16_t header_type = REQUEST_HEADER; + set_fragments_identifiers(fragments, fragments_sizes, &header_type, &ctx->session_id); + + for (headers_iter = &(ctx->request->headers_in.headers.part); headers_iter; headers_iter = headers_iter->next) { + for (header_idx = 0; header_idx < headers_iter->nelts; ++header_idx) { + headers_to_inspect = headers_iter->elts; + header = headers_to_inspect + header_idx; + + write_dbg( + DBG_LEVEL_DEBUG, + "Async sending header (key: '%.*s', value: '%.*s')", + header->key.len, + header->key.data, + header->value.len, + header->value.data + ); + + is_last_part = (headers_iter->next == NULL && header_idx + 1 == headers_iter->nelts) ? 1 : 0; + add_header_to_bulk(fragments, fragments_sizes, header, idx_in_bulk); + + idx_in_bulk++; + part_count++; + if (idx_in_bulk < max_bulk_size && !is_last_part) continue; + + set_fragment_elem(fragments, fragments_sizes, &is_last_part, sizeof(is_last_part), 2); + set_fragment_elem(fragments, fragments_sizes, &bulk_part_idx, sizeof(bulk_part_idx), 3); + + write_dbg(DBG_LEVEL_DEBUG, "Async sending header bulk to shared memory"); + err_code = sendChunkedData( + nano_service_ipc, + fragments_sizes, + (const char **)fragments, + HEADER_DATA_COUNT * idx_in_bulk + 4 + ); + + if (err_code != 0) { + write_dbg(DBG_LEVEL_WARNING, "Failed to send header bulk - error code %d", err_code); + return NGX_ERROR; + } + + ngx_cp_async_increment_pending_chunks(ctx->session_id, "headers"); + + num_of_bulks_sent++; + write_dbg(DBG_LEVEL_DEBUG, "Async header bulk sent successfully (no signal yet)"); + + if (is_last_part) break; + + idx_in_bulk = 0; + bulk_part_idx = part_count; + } + } + + if (part_count == 0) { + write_dbg(DBG_LEVEL_DEBUG, "Async sending empty header list"); + + uint8_t is_last_part = 1; + uint8_t bulk_part_idx = 0; + set_fragment_elem(fragments, fragments_sizes, &is_last_part, sizeof(is_last_part), 2); + set_fragment_elem(fragments, fragments_sizes, &bulk_part_idx, sizeof(bulk_part_idx), 3); + + err_code = sendChunkedData( + nano_service_ipc, + fragments_sizes, + (const char **)fragments, + HEADER_DATA_COUNT * 1 + 4 + ); + + if (err_code != 0) { + write_dbg(DBG_LEVEL_WARNING, "Failed to send empty header list - error code %d", err_code); + return NGX_ERROR; + } + + // Increment pending chunks counter for empty headers + ngx_cp_async_increment_pending_chunks(ctx->session_id, "headers"); + + num_of_bulks_sent = 1; + } + + // Signal agent once after all header bulks are sent + write_dbg(DBG_LEVEL_DEBUG, "Async signaling agent for all headers with %dms timeout protection", async_signal_timeout_ms); + res = ngx_http_cp_signal_to_service_with_timeout(ctx->session_id, async_signal_timeout_ms); + if (res != NGX_OK && res != NGX_HTTP_REQUEST_TIME_OUT) { + write_dbg(DBG_LEVEL_WARNING, "Failed to signal agent for single body chunk, session %d", ctx->session_id); + return NGX_ERROR; + } + + if (res == NGX_HTTP_REQUEST_TIME_OUT) { + write_dbg(DBG_LEVEL_DEBUG, "Signal timeout (%dms) reached for single body chunk, session %d", async_signal_timeout_ms, ctx->session_id); + ngx_cp_async_post_backpressure_drain_event(); + } + + *num_messages_sent = num_of_bulks_sent; + write_dbg(DBG_LEVEL_DEBUG, "Async headers sent and signaled successfully - %d bulks", num_of_bulks_sent); + return NGX_OK; +} + +/// +/// @brief Async version of ngx_http_cp_end_transaction_sender - sends data but doesn't wait +/// @param[in] ctx Async context +/// @param[out] num_messages_sent Number of messages sent +/// @return NGX_OK on success, NGX_ERROR on failure +/// +ngx_int_t +ngx_cp_async_send_end_transaction_nonblocking(ngx_http_cp_async_ctx_t *ctx, ngx_uint_t *num_messages_sent) +{ + char *fragments[2]; + uint16_t fragments_sizes[2]; + uint16_t chunck_type = REQUEST_END; + int err_code = 0; + ngx_int_t res; + + write_dbg(DBG_LEVEL_DEBUG, "Async sending end transaction for inspection"); + + set_fragments_identifiers(fragments, fragments_sizes, &chunck_type, &ctx->session_id); + + write_dbg(DBG_LEVEL_DEBUG, "Async sending end transaction to shared memory"); + err_code = sendChunkedData(nano_service_ipc, fragments_sizes, (const char **)fragments, 2); + if (err_code != 0) { + write_dbg(DBG_LEVEL_WARNING, "Failed to send end transaction - error code %d", err_code); + return NGX_ERROR; + } + + ngx_cp_async_increment_pending_chunks(ctx->session_id, "end_transaction"); + + write_dbg(DBG_LEVEL_DEBUG, "Async signaling agent for end transaction with %dms timeout protection", async_signal_timeout_ms); + res = ngx_http_cp_signal_to_service_with_timeout(ctx->session_id, async_signal_timeout_ms); + if (res != NGX_OK && res != NGX_HTTP_REQUEST_TIME_OUT) { + write_dbg(DBG_LEVEL_WARNING, "Failed to signal agent for end transaction"); + return NGX_ERROR; + } + + if (res == NGX_HTTP_REQUEST_TIME_OUT) { + write_dbg(DBG_LEVEL_DEBUG, "Signal timeout (%dms) reached for end transaction, session %d", async_signal_timeout_ms, ctx->session_id); + ngx_cp_async_post_backpressure_drain_event(); + } + + *num_messages_sent = 1; + ctx->end_transaction_sent = 1; + write_dbg(DBG_LEVEL_DEBUG, "Async end transaction sent and signaled successfully"); + return NGX_OK; +} + +ngx_int_t +ngx_cp_async_send_single_body_chunk_nonblocking(ngx_http_cp_async_ctx_t *ctx, ngx_chain_t *chunk, ngx_uint_t *num_messages_sent) +{ + static const ngx_uint_t num_body_chunk_fragments = 5; + + ngx_buf_t *buf; + ngx_int_t res = NGX_ERROR; + uint8_t is_last_chunk; + uint8_t part_count = 0; + size_t buf_size; + char *fragments[num_body_chunk_fragments]; + uint16_t fragments_sizes[num_body_chunk_fragments]; + AttachmentDataType body_type = REQUEST_BODY; + + write_dbg(DBG_LEVEL_DEBUG, "Sending single body chunk for session %d", ctx->session_id); + + if (chunk == NULL) { + write_dbg(DBG_LEVEL_WARNING, "No chunk data to send for session %d", ctx->session_id); + *num_messages_sent = 0; + return NGX_OK; + } + + set_fragments_identifiers(fragments, fragments_sizes, (uint16_t *)&body_type, &ctx->session_id); + + buf = chunk->buf; + is_last_chunk = buf->last_buf ? 1 : 0; + buf_size = buf->last - buf->pos; + + write_dbg( + DBG_LEVEL_DEBUG, + "Processing single body chunk of size: %zu, last_chunk: %d for session %d", + buf_size, + is_last_chunk, + ctx->session_id + ); + + if (buf_size > 0 || is_last_chunk) { + set_fragment_elem(fragments, fragments_sizes, &is_last_chunk, sizeof(is_last_chunk), 2); + set_fragment_elem(fragments, fragments_sizes, &part_count, sizeof(part_count), 3); + set_fragment_elem(fragments, fragments_sizes, buf->pos, buf->last - buf->pos, 4); + + ctx->session_data->processed_req_body_size += (buf->last - buf->pos); + + write_dbg(DBG_LEVEL_DEBUG, "Sending single body chunk to agent for session %d", ctx->session_id); + res = sendChunkedData(nano_service_ipc, fragments_sizes, (const char **)fragments, num_body_chunk_fragments); + if (res != 0) { + write_dbg(DBG_LEVEL_WARNING, "Failed to send single body chunk to agent for session %d: %d", ctx->session_id, res); + return NGX_ERROR; + } + ngx_cp_async_increment_pending_chunks(ctx->session_id, "body_chunk"); + + write_dbg(DBG_LEVEL_DEBUG, "Successfully sent single body chunk to agent for session %d", ctx->session_id); + + res = ngx_http_cp_signal_to_service_with_timeout(ctx->session_id, async_signal_timeout_ms); + if (res != NGX_OK && res != NGX_HTTP_REQUEST_TIME_OUT) { + write_dbg(DBG_LEVEL_WARNING, "Failed to signal agent for single body chunk, session %d", ctx->session_id); + return NGX_ERROR; + } + + if (res == NGX_HTTP_REQUEST_TIME_OUT) { + write_dbg(DBG_LEVEL_DEBUG, "Signal timeout (%dms) reached for single body chunk, session %d", async_signal_timeout_ms, ctx->session_id); + ngx_cp_async_post_backpressure_drain_event(); + } + + *num_messages_sent = 1; + write_dbg(DBG_LEVEL_DEBUG, "Single chunk sender completed successfully for session %d", ctx->session_id); + return NGX_OK; + } + + write_dbg(DBG_LEVEL_DEBUG, "Empty single chunk for session %d", ctx->session_id); + *num_messages_sent = 0; + return NGX_OK; +} + + +ngx_int_t +ngx_cp_async_send_to_agent_nonblocking( + ngx_http_cp_async_ctx_t *ctx, + AttachmentDataType chunk_type, + const void *data, + uint16_t data_size) +{ + ngx_int_t res; + NanoHttpRequestData *request_data; + uint16_t total_size; + const char *chunks[2]; + uint16_t chunk_sizes[2]; + + write_dbg( + DBG_LEVEL_DEBUG, + "Sending non-blocking data to agent for session %d, type: %d", + ctx->session_id, + chunk_type + ); + + total_size = sizeof(NanoHttpRequestData) + data_size; + + request_data = ngx_palloc(ctx->request->pool, total_size); + if (request_data == NULL) { + write_dbg(DBG_LEVEL_WARNING, "Failed to allocate request data for session %d", ctx->session_id); + return NGX_ERROR; + } + + request_data->data_type = chunk_type; + request_data->session_id = ctx->session_id; + + if (data && data_size > 0) { + ngx_memcpy(request_data->data, data, data_size); + } + + chunks[0] = (const char *)request_data; + chunk_sizes[0] = total_size; + + res = sendChunkedData(nano_service_ipc, chunk_sizes, chunks, 1); + if (res != 0) { + write_dbg(DBG_LEVEL_WARNING, "Failed to send data to agent for session %d: %d", ctx->session_id, res); + return NGX_ERROR; + } + + write_dbg(DBG_LEVEL_DEBUG, "Successfully sent data to agent for session %d", ctx->session_id); + return NGX_OK; +} + +ngx_int_t +ngx_cp_async_signal_agent_nonblocking(ngx_http_cp_async_ctx_t *ctx) +{ + ssize_t bytes_written; + uint32_t session_id = ctx->session_id; + + write_dbg(DBG_LEVEL_DEBUG, "Signaling agent for session %d (non-blocking)", ctx->session_id); + + if (comm_socket < 0) { + write_dbg(DBG_LEVEL_ERROR, "Communication socket not ready yet for session %d - skipping signal", ctx->session_id); + return NGX_OK; + } + + bytes_written = write(comm_socket, &session_id, sizeof(session_id)); + if (bytes_written != sizeof(session_id)) { + write_dbg(DBG_LEVEL_WARNING, "Failed to signal agent for session %d: %zd", ctx->session_id, bytes_written); + return NGX_ERROR; + } + + write_dbg(DBG_LEVEL_DEBUG, "Successfully signaled agent for session %d", ctx->session_id); + return NGX_OK; +} diff --git a/attachments/nginx/ngx_module/async/ngx_cp_async_sender.h b/attachments/nginx/ngx_module/async/ngx_cp_async_sender.h new file mode 100755 index 0000000..f89604b --- /dev/null +++ b/attachments/nginx/ngx_module/async/ngx_cp_async_sender.h @@ -0,0 +1,37 @@ +#ifndef __NGX_CP_ASYNC_SENDER_H__ +#define __NGX_CP_ASYNC_SENDER_H__ + +#include +#include +#include +#include + +#include "ngx_cp_async_core.h" +#include "nano_attachment_common.h" + +ngx_int_t ngx_cp_async_wait_signal_sender(ngx_http_cp_async_ctx_t *ctx, ngx_uint_t *num_messages_sent); + +ngx_int_t ngx_cp_async_send_meta_data_nonblocking(ngx_http_cp_async_ctx_t *ctx, ngx_uint_t *num_messages_sent); + +ngx_int_t ngx_cp_async_wait_signal_sender(ngx_http_cp_async_ctx_t *ctx, ngx_uint_t *num_messages_sent); + +ngx_int_t ngx_cp_async_send_headers_nonblocking(ngx_http_cp_async_ctx_t *ctx, ngx_uint_t *num_messages_sent); + +ngx_int_t ngx_cp_async_send_end_transaction_nonblocking(ngx_http_cp_async_ctx_t *ctx, ngx_uint_t *num_messages_sent); + +ngx_int_t +ngx_cp_async_send_single_body_chunk_nonblocking( + ngx_http_cp_async_ctx_t *ctx, + ngx_chain_t *chunk, + ngx_uint_t *num_messages_sent +); + +ngx_int_t +ngx_cp_async_send_to_agent_nonblocking( + ngx_http_cp_async_ctx_t *ctx, + AttachmentDataType chunk_type, + const void *data, + uint16_t data_size +); + +#endif // __NGX_CP_ASYNC_SENDER_H__ diff --git a/attachments/nginx/ngx_module/async/ngx_cp_async_types.h b/attachments/nginx/ngx_module/async/ngx_cp_async_types.h new file mode 100755 index 0000000..1811d5f --- /dev/null +++ b/attachments/nginx/ngx_module/async/ngx_cp_async_types.h @@ -0,0 +1,29 @@ +#ifndef __NGX_CP_ASYNC_TYPES_H__ +#define __NGX_CP_ASYNC_TYPES_H__ + +#include +#include +#include + +// Forward declarations +typedef struct ngx_http_cp_async_ctx ngx_http_cp_async_ctx_t; +typedef struct ngx_http_cp_session_data ngx_http_cp_session_data; + +/// @enum ngx_cp_async_stage_t +/// @brief Processing stages for async operations +typedef enum { + NGX_CP_ASYNC_STAGE_INIT = 0, + NGX_CP_ASYNC_STAGE_META_DATA, + NGX_CP_ASYNC_STAGE_WAIT_META_VERDICT, + NGX_CP_ASYNC_STAGE_HEADERS, + NGX_CP_ASYNC_STAGE_WAIT_HEADER_VERDICT, + NGX_CP_ASYNC_STAGE_END_TRANSACTION, + NGX_CP_ASYNC_STAGE_WAIT_END_VERDICT, + NGX_CP_ASYNC_STAGE_BODY, + NGX_CP_ASYNC_STAGE_WAIT_BODY_VERDICT, + NGX_CP_ASYNC_STAGE_VERDICT, + NGX_CP_ASYNC_STAGE_COMPLETE, + NGX_CP_ASYNC_STAGE_ERROR = -1 +} ngx_cp_async_stage_t; + +#endif // __NGX_CP_ASYNC_TYPES_H__ diff --git a/attachments/nginx/ngx_module/ngx_cp_compression.c b/attachments/nginx/ngx_module/ngx_cp_compression.c index 3678386..c73c8f9 100644 --- a/attachments/nginx/ngx_module/ngx_cp_compression.c +++ b/attachments/nginx/ngx_module/ngx_cp_compression.c @@ -166,28 +166,39 @@ ngx_chain_remove_empty_chunks(ngx_chain_t **chain, ngx_pool_t *pool) { ngx_chain_t *prev = NULL; ngx_chain_t *curr = *chain; - size_t chunk_num = 0; + size_t chunk_index = 0; - while (curr != NULL) { - size_t size = curr->buf->last - curr->buf->pos; - if (size == 0) { - write_dbg(DBG_LEVEL_WARNING, "Removing empty chunk from the chain, chunk number: %d", chunk_num); - if (prev == NULL) { - *chain = curr->next; - } else { - prev->next = curr->next; - } - ngx_chain_t *tmp = curr; + while (curr) { + ngx_buf_t *b = curr->buf; + + /* Keep special links (flush/last_buf/sync) even if size is 0 */ + if (ngx_buf_special(b)) { + prev = curr; curr = curr->next; + chunk_index++; + continue; + } + + off_t buf_size = curr->buf->last - curr->buf->pos; + if (buf_size <= 0) { + ngx_chain_t *tmp = curr; + write_dbg(DBG_LEVEL_WARNING, + "Removing empty chunk from the chain, index: %zu", chunk_index); + curr = curr->next; + if (prev == NULL) { + *chain = curr; + } else { + prev->next = curr; + } ngx_free_chain(pool, tmp); continue; } prev = curr; curr = curr->next; - chunk_num++; + chunk_index++; } - if (chunk_num == 0) { + if (*chain == NULL) { write_dbg(DBG_LEVEL_WARNING, "Empty chain after removing empty chunks"); return NGX_ERROR; } @@ -426,16 +437,13 @@ compression_chain_filter( return NGX_ERROR; } - if (curr_original_contents_link != NULL) { + // Save ONLY first buffer of original body if requested (prevents memory spikes) + if (curr_original_contents_link != NULL && curr_original_contents_link->buf == NULL) { + // Only save the FIRST buffer, don't accumulate subsequent chunks curr_original_contents_link->buf = ngx_calloc_buf(pool); ngx_memcpy(curr_original_contents_link->buf, curr_input_link->buf, sizeof(ngx_buf_t)); - - if (curr_input_link->next != NULL) { - // Allocates next chain. - curr_original_contents_link->next = ngx_alloc_chain_link(pool); - ngx_memset(curr_original_contents_link->next, 0, sizeof(ngx_chain_t)); - curr_original_contents_link = curr_original_contents_link->next; - } + curr_original_contents_link->next = NULL; // No accumulation + write_dbg(DBG_LEVEL_TRACE, "Saved first chunk of original body (no accumulation)"); } ngx_memcpy(curr_input_link->buf, output_buffer, sizeof(ngx_buf_t)); @@ -522,7 +530,7 @@ decompress_chain( ngx_int_t decompress_body( CompressionStream *decompression_stream, - const ngx_http_chunk_type_e chunk_type, + const AttachmentDataType chunk_type, int *is_last_decompressed_part, ngx_chain_t **body, ngx_chain_t **original_body_contents, @@ -563,7 +571,7 @@ ngx_int_t compress_body( CompressionStream *compression_stream, const CompressionType compression_type, - const ngx_http_chunk_type_e chunk_type, + const AttachmentDataType chunk_type, const int is_last_part, ngx_chain_t **body, ngx_chain_t **original_body_contents, @@ -581,21 +589,31 @@ compress_body( return NGX_ERROR; } - if (compression_type == BROTLI) { - // Brotli compression is not supported. - // This if statement serves a case that the compression type is set to BROTLI - // For now, we should not reach inside this function with a compression type of BROTLI. - write_dbg(DBG_LEVEL_WARNING, "Brotli compression is not supported"); - return NGX_ERROR; + body_type = chunk_type == REQUEST_BODY ? "request" : "response"; + + const char* format_name = "unknown"; + switch (compression_type) { + case GZIP: + format_name = "gzip"; + break; + case ZLIB: + format_name = "zlib"; + break; + case BROTLI: + format_name = "brotli"; + break; + default: + write_dbg(DBG_LEVEL_WARNING, "Unknown compression type: %d", compression_type); + return NGX_ERROR; } - body_type = chunk_type == REQUEST_BODY ? "request" : "response"; write_dbg( DBG_LEVEL_TRACE, "Compressing plain-text %s body in the format \"%s\"", body_type, - compression_type == GZIP ? "gzip" : "zlib" + format_name ); + // Checks if the compression was successful. compress_res = compress_chain( compression_stream, diff --git a/attachments/nginx/ngx_module/ngx_cp_compression.h b/attachments/nginx/ngx_module/ngx_cp_compression.h index 21fbe57..8cebe82 100644 --- a/attachments/nginx/ngx_module/ngx_cp_compression.h +++ b/attachments/nginx/ngx_module/ngx_cp_compression.h @@ -18,7 +18,7 @@ #include -#include "nginx_attachment_common.h" +#include "nano_attachment_common.h" #include "compression_utils.h" /// @struct ngx_cp_http_compression_params @@ -58,7 +58,7 @@ void initialize_compression_debug_printing(); ngx_int_t decompress_body( CompressionStream *decompression_stream, - const ngx_http_chunk_type_e chunk_type, + const AttachmentDataType chunk_type, int *is_last_decompressed_part, ngx_chain_t **body, ngx_chain_t **original_body_contents, @@ -91,7 +91,7 @@ ngx_int_t compress_body( CompressionStream *compression_stream, const CompressionType compression_type, - const ngx_http_chunk_type_e chunk_type, + const AttachmentDataType chunk_type, const int is_last_part, ngx_chain_t **body, ngx_chain_t **original_body_contents, diff --git a/attachments/nginx/ngx_module/ngx_cp_custom_response.c b/attachments/nginx/ngx_module/ngx_cp_custom_response.c index 6a59416..76fc6cf 100644 --- a/attachments/nginx/ngx_module/ngx_cp_custom_response.c +++ b/attachments/nginx/ngx_module/ngx_cp_custom_response.c @@ -181,16 +181,36 @@ ngx_http_cp_file_response_sender( /// /// @brief Adds event ID to the provided NGINX request. /// @param[in, out] request NGINX request. -/// +/// void ngx_add_event_id_to_header(ngx_http_request_t *request) { u_char *uuid = (u_char *)get_web_response_uuid(); + if (uuid == NULL) { + write_dbg(DBG_LEVEL_WARNING, "web_response_uuid is NULL, skipping X-Event-ID header"); + return; + } + ngx_uint_t uuid_size = get_web_response_uuid_size(); + if (uuid_size == 0) { + write_dbg(DBG_LEVEL_WARNING, "web_response_uuid_size is 0, skipping X-Event-ID header"); + return; + } + + // Validate that UUID contains actual data, not just null bytes + if (uuid[0] == '\0') { + write_dbg( + DBG_LEVEL_WARNING, + "web_response_uuid is empty (contains null bytes) despite size %d, skipping X-Event-ID header", + uuid_size + ); + return; + } + static u_char uuid_key[] = { 'X', '-', 'E', 'v', 'e', 'n', 't', '-', 'I', 'D' }; write_dbg( - DBG_LEVEL_WARNING, + DBG_LEVEL_TRACE, "Adding instance ID to header. Incident ID: %s, Incident ID size: %d", uuid, uuid_size @@ -204,6 +224,71 @@ ngx_add_event_id_to_header(ngx_http_request_t *request) ); } +ngx_int_t +ngx_http_cp_finalize_custom_response_request(ngx_http_request_t *request) +{ + ngx_chain_t out_chain[1]; + ngx_int_t rc; + uint16_t response_code; + + write_dbg(DBG_LEVEL_TRACE, "Finalizing Custom JSON Response request"); + + // Get JSON response data + response_code = get_response_code_json(); + + rc = get_response_page_json(request, &out_chain); + if (rc != NGX_OK) { + write_dbg(DBG_LEVEL_WARNING, "Failed to get JSON response page"); + goto CUSTOM_RESPONSE_OUT; + } + + request->keepalive = 0; + request->headers_out.status = response_code; + request->headers_out.status_line.len = 0; + + delete_headers_list(&request->headers_out.headers); + + if (get_response_content_type() == CONTENT_TYPE_TEXT_HTML) { + static u_char text_html[] = {'t', 'e', 'x', 't', '/', 'h', 't', 'm', 'l'}; + request->headers_out.content_type.len = sizeof(text_html); + request->headers_out.content_type.data = text_html; + request->headers_out.content_type_len = request->headers_out.content_type.len; + } else { + static u_char json_content_type[] = {'a', 'p', 'p', 'l', 'i', 'c', 'a', 't', 'i', 'o', 'n', '/', 'j', 's', 'o', 'n'}; + request->headers_out.content_type.len = sizeof(json_content_type); + request->headers_out.content_type.data = json_content_type; + request->headers_out.content_type_len = request->headers_out.content_type.len; + + } + + // Set content length + request->headers_out.content_length_n = get_response_page_length_json(); + + rc = ngx_http_send_header(request); + if (rc == NGX_ERROR || rc > NGX_OK) { + write_dbg( + DBG_LEVEL_WARNING, + "Failed to send Custom JSON Response headers (result: %d)", + rc + ); + goto CUSTOM_RESPONSE_OUT; + } + + write_dbg(DBG_LEVEL_TRACE, "Successfully sent Custom JSON Response headers"); + + // Send the JSON response body using the chain data + rc = ngx_http_output_filter(request, out_chain); + if (rc != NGX_OK && rc != NGX_AGAIN) { + write_dbg(DBG_LEVEL_WARNING, "Failed to send Custom JSON Response"); + } else { + write_dbg(DBG_LEVEL_TRACE, "Custom JSON Response sent successfully"); + } + +CUSTOM_RESPONSE_OUT: + ngx_http_finalize_request(request, NGX_HTTP_CLOSE); + return NGX_HTTP_CLOSE; +} + ngx_int_t ngx_http_cp_finalize_rejected_request(ngx_http_request_t *request, int is_response_phase) { @@ -241,6 +326,7 @@ ngx_http_cp_finalize_rejected_request(ngx_http_request_t *request, int is_respon goto CUSTOM_RES_OUT; } + delete_headers_list(&request->headers_out.headers); if (get_response_code() == NGX_HTTP_TEMPORARY_REDIRECT) { // Handling redirect web response. write_dbg( @@ -275,7 +361,7 @@ ngx_http_cp_finalize_rejected_request(ngx_http_request_t *request, int is_respon ngx_add_event_id_to_header(request); - if (get_response_page_length() == 0) { + if (get_response_page_length_web_page() == 0) { // Page details were not provided. write_dbg( DBG_LEVEL_WARNING, @@ -289,9 +375,7 @@ ngx_http_cp_finalize_rejected_request(ngx_http_request_t *request, int is_respon request->headers_out.content_type.len = size_of_text_html; request->headers_out.content_type_len = request->headers_out.content_type.len; request->headers_out.content_type.data = text_html; - request->headers_out.content_length_n = get_response_page_length(); - - delete_headers_list(&request->headers_out.headers); + request->headers_out.content_length_n = get_response_page_length_web_page(); write_dbg(DBG_LEVEL_TRACE, "Sending response headers for rejected request"); rc = ngx_http_send_header(request); @@ -313,7 +397,7 @@ ngx_http_cp_finalize_rejected_request(ngx_http_request_t *request, int is_respon if (send_response_custom_body) { // Sending response custom body. - if (get_response_page(request, &out_chain) != NGX_OK) { + if (get_block_page_response(request, &out_chain) != NGX_OK) { // Failed to generate custom response page. write_dbg( DBG_LEVEL_DEBUG, @@ -611,7 +695,7 @@ perform_header_modification( ngx_list_t *headers, ngx_http_cp_list_iterator *headers_iterator, ngx_http_cp_modification_list *modification, - ngx_http_modification_type_e type, + HttpModificationType type, ngx_flag_t is_content_length ) { @@ -674,7 +758,7 @@ perform_header_modification( /// - #NULL if failed to get the next element. /// static ngx_http_cp_modification_list * -get_next_header_modification(ngx_http_cp_modification_list *modification, ngx_http_modification_type_e type) +get_next_header_modification(ngx_http_cp_modification_list *modification, HttpModificationType type) { switch (type) { case APPEND: @@ -698,7 +782,7 @@ get_next_header_modification(ngx_http_cp_modification_list *modification, ngx_ht static void free_header_modification( ngx_http_cp_modification_list *modification, - ngx_http_modification_type_e type, + HttpModificationType type, ngx_pool_t *pool ) { @@ -731,7 +815,7 @@ ngx_http_cp_header_modifier( ngx_flag_t is_content_length ) { - ngx_http_modification_type_e type; + HttpModificationType type; ngx_http_cp_modification_list *next_modification; ngx_http_cp_list_iterator headers_iterator; init_list_iterator(headers, &headers_iterator); @@ -785,75 +869,106 @@ ngx_http_cp_body_modifier( ngx_pool_t *pool ) { + static const size_t MAX_MODIFICATIONS_PER_CHUNK = 64; ngx_http_cp_modification_list *next_modification; ngx_uint_t cur_body_chunk = 0; ngx_chain_t *chain_iter; - ngx_chain_t *injected_chain_elem; - ngx_uint_t num_appended_elements; size_t cur_chunk_size = 0; for (chain_iter = body_chain; chain_iter; chain_iter = chain_iter->next, cur_body_chunk++) { - // Iterates of the body chains if (curr_modification == NULL) return NGX_OK; - if (curr_modification->modification.orig_buff_index != cur_body_chunk) continue; - + cur_chunk_size = chain_iter->buf->last - chain_iter->buf->pos; + if (cur_chunk_size == 0) { write_dbg(DBG_LEVEL_TRACE, "No need to modify body chunk of size 0. Chunk index: %d", cur_body_chunk); continue; } - write_dbg( - DBG_LEVEL_DEBUG, - "Handling current modification. " - "Injection position: %d, injection size: %d, original buffer index: %d, modification buffer: %s", - curr_modification->modification.injection_pos, - curr_modification->modification.injection_size, - curr_modification->modification.orig_buff_index, - curr_modification->modification_buffer - ); - // Create a chain element. - injected_chain_elem = create_chain_elem( - curr_modification->modification.injection_size, - curr_modification->modification_buffer, - pool - ); + ngx_http_cp_modification_list *modifications_for_chunk[MAX_MODIFICATIONS_PER_CHUNK]; + ngx_uint_t modification_count = 0; + ngx_http_cp_modification_list *temp_mod = curr_modification; - if (injected_chain_elem == NULL) { + while (temp_mod != NULL && temp_mod->modification.orig_buff_index == cur_body_chunk + && modification_count < MAX_MODIFICATIONS_PER_CHUNK) { + modifications_for_chunk[modification_count] = temp_mod; + modification_count++; + temp_mod = temp_mod->next; + } + + if (modification_count == 0) { + continue; + } + + size_t original_size = chain_iter->buf->last - chain_iter->buf->pos; + size_t total_injection_size = 0; + + for (ngx_uint_t i = 0; i < modification_count; i++) { + total_injection_size += modifications_for_chunk[i]->modification.injection_size; + } + + size_t new_buffer_size = original_size + total_injection_size; + + u_char *new_buffer = ngx_palloc(pool, new_buffer_size); + if (new_buffer == NULL) { free_modifications_list(curr_modification, pool); return NGX_ERROR; } - - write_dbg(DBG_LEVEL_DEBUG, "Handling modification of chain element number %d", cur_body_chunk); - // Handling modification of a chain element. - if (curr_modification->modification.injection_pos == 0) { - // Pre appends chain element. - prepend_chain_elem(chain_iter, injected_chain_elem); - chain_iter = chain_iter->next; - num_appended_elements = 0; - } else if (curr_modification->modification.injection_pos == chain_iter->buf->last - chain_iter->buf->pos + 1) { - // Prepend a chain element. - append_chain_elem(chain_iter, injected_chain_elem); - chain_iter = chain_iter->next; - num_appended_elements = 1; - } else { - if (split_chain_elem(chain_iter, curr_modification->modification.injection_pos, pool) != NGX_OK) { - // Failed to iterate over the modification. - free_modifications_list(curr_modification, pool); - return NGX_ERROR; + + u_char *original_data = chain_iter->buf->pos; + + // Sort modifications by injection position in DESCENDING order + for (ngx_uint_t i = 0; i < modification_count - 1; i++) { + for (ngx_uint_t j = 0; j < modification_count - i - 1; j++) { + if (modifications_for_chunk[j]->modification.injection_pos < + modifications_for_chunk[j + 1]->modification.injection_pos) { + ngx_http_cp_modification_list *temp = modifications_for_chunk[j]; + modifications_for_chunk[j] = modifications_for_chunk[j + 1]; + modifications_for_chunk[j + 1] = temp; + } } + } + + // Start with original buffer and apply modifications from end to beginning + u_char *current_buffer = original_data; + size_t current_size = original_size; + + for (ngx_uint_t i = 0; i < modification_count; i++) { + ngx_http_cp_modification_list *mod = modifications_for_chunk[i]; + size_t injection_pos = mod->modification.injection_pos; + + if (injection_pos > current_size) { + continue; + } + + size_t new_size = current_size + mod->modification.injection_size; + + u_char *target_buffer = (i == 0) ? new_buffer : ngx_palloc(pool, new_size); + if (target_buffer == NULL) { + continue; + } + + // Copy: [start...injection_pos] + [injection_data] + [injection_pos...end] + ngx_memcpy(target_buffer, current_buffer, injection_pos); + ngx_memcpy(target_buffer + injection_pos, mod->modification_buffer, mod->modification.injection_size); + ngx_memcpy(target_buffer + injection_pos + mod->modification.injection_size, + current_buffer + injection_pos, current_size - injection_pos); - append_chain_elem(chain_iter, injected_chain_elem); - chain_iter = chain_iter->next->next; - num_appended_elements = 2; + current_buffer = target_buffer; + current_size = new_size; } - // Moves to the next modification element and frees the modifier. - next_modification = curr_modification->next; - ngx_pfree(pool, curr_modification); - curr_modification = next_modification; + chain_iter->buf->pos = current_buffer; + chain_iter->buf->last = current_buffer + current_size; + chain_iter->buf->start = current_buffer; + chain_iter->buf->end = current_buffer + current_size; - cur_body_chunk += num_appended_elements; + for (ngx_uint_t i = 0; i < modification_count; i++) { + next_modification = modifications_for_chunk[i]->next; + ngx_pfree(pool, modifications_for_chunk[i]->modification_buffer); + ngx_pfree(pool, modifications_for_chunk[i]); + curr_modification = next_modification; + } } return NGX_OK; } diff --git a/attachments/nginx/ngx_module/ngx_cp_custom_response.h b/attachments/nginx/ngx_module/ngx_cp_custom_response.h index 85ecc8d..373d249 100644 --- a/attachments/nginx/ngx_module/ngx_cp_custom_response.h +++ b/attachments/nginx/ngx_module/ngx_cp_custom_response.h @@ -20,13 +20,13 @@ #include #include -#include "nginx_attachment_common.h" +#include "nano_attachment_common.h" /// @struct ngx_http_cp_modification_list /// @brief A node that holds all the information regarding modifications. typedef struct ngx_http_cp_modification_list { struct ngx_http_cp_modification_list *next; ///< Next node. - ngx_http_cp_inject_data_t modification; ///< Modification data. + HttpInjectData modification; ///< Modification data. char *modification_buffer; ///< Modification buffer used to store extra needed data. } ngx_http_cp_modification_list; @@ -83,6 +83,14 @@ ngx_http_cp_file_response_sender( /// ngx_int_t ngx_http_cp_finalize_rejected_request(ngx_http_request_t *request, int is_response_phase); +/// +/// @brief Finalizing a Custom Response request with JSON success response. +/// @param[in, out] request NGINX request. +/// @return ngx_int_t +/// - #NGX_HTTP_CLOSE +/// +ngx_int_t ngx_http_cp_finalize_custom_response_request(ngx_http_request_t *request); + /// /// @brief Modifies headers with the provided modifiers. /// @param[in, out] headers NGINX headers list. diff --git a/attachments/nginx/ngx_module/ngx_cp_failing_state.h b/attachments/nginx/ngx_module/ngx_cp_failing_state.h index 373acc8..0eb8e60 100644 --- a/attachments/nginx/ngx_module/ngx_cp_failing_state.h +++ b/attachments/nginx/ngx_module/ngx_cp_failing_state.h @@ -1,17 +1,17 @@ -// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + /// @file ngx_cp_failing_state.h #ifndef __NGX_CP_FAILING_STATE_H__ #define __NGX_CP_FAILING_STATE_H__ @@ -19,7 +19,7 @@ #include #include -#include "nginx_attachment_common.h" +#include "nano_attachment_common.h" #include "ngx_cp_hooks.h" /// diff --git a/attachments/nginx/ngx_module/ngx_cp_hook_threads.c b/attachments/nginx/ngx_module/ngx_cp_hook_threads.c index 46d6b88..9a7d532 100644 --- a/attachments/nginx/ngx_module/ngx_cp_hook_threads.c +++ b/attachments/nginx/ngx_module/ngx_cp_hook_threads.c @@ -25,7 +25,7 @@ #include "nginx_attachment_util.h" #include "shmem_ipc.h" #include "compression_utils.h" -#include "nginx_attachment_common.h" +#include "nano_attachment_common.h" #include "ngx_cp_io.h" #include "ngx_cp_utils.h" #include "ngx_cp_initializer.h" @@ -104,6 +104,7 @@ init_thread_ctx( ctx->res = NGX_OK; ctx->should_return = 0; ctx->should_return_next_filter = 0; + ctx->chain_part_number = 0; ctx->chain = chain; ctx->modifications = NULL; } @@ -253,11 +254,13 @@ ngx_http_cp_req_body_filter_thread(void *_ctx) ngx_int_t is_last_part; ngx_int_t send_body_result; ngx_uint_t num_messages_sent = 0; + ngx_int_t part_count = 0; send_body_result = ngx_http_cp_body_sender( ctx->chain, REQUEST_BODY, session_data_p, + &part_count, &is_last_part, &num_messages_sent, &ctx->chain @@ -275,7 +278,7 @@ ngx_http_cp_req_body_filter_thread(void *_ctx) } THREAD_CTX_RETURN(NGX_HTTP_FORBIDDEN); } - session_data_p->remaining_messages_to_reply += num_messages_sent; + session_data_p->remaining_messages_to_reply += num_messages_sent; // Fetch nano services' results. ctx->res = ngx_http_cp_reply_receiver( @@ -318,7 +321,8 @@ ngx_http_cp_req_end_transaction_thread(void *_ctx) session_data_p->remaining_messages_to_reply += num_messages_sent; if (session_data_p->verdict != TRAFFIC_VERDICT_ACCEPT && - session_data_p->verdict != TRAFFIC_VERDICT_DROP) { + session_data_p->verdict != TRAFFIC_VERDICT_DROP && + session_data_p->verdict != TRAFFIC_VERDICT_CUSTOM_RESPONSE) { // Fetch nano services' results. ctx->res = ngx_http_cp_reply_receiver( &session_data_p->remaining_messages_to_reply, @@ -454,6 +458,7 @@ ngx_http_cp_res_body_filter_thread(void *_ctx) { struct ngx_http_cp_event_thread_ctx_t *ctx = (struct ngx_http_cp_event_thread_ctx_t *)_ctx; ngx_http_request_t *request = ctx->request; + ngx_int_t part_number = ctx->chain_part_number; ngx_http_cp_session_data *session_data_p = ctx->session_data_p; ngx_int_t send_body_result; ngx_uint_t num_messages_sent = 0; @@ -470,6 +475,7 @@ ngx_http_cp_res_body_filter_thread(void *_ctx) ctx->chain, RESPONSE_BODY, session_data_p, + &part_number, &is_last_response_part, &num_messages_sent, &ctx->chain @@ -519,7 +525,7 @@ ngx_http_cp_res_body_filter_thread(void *_ctx) ); - if (session_data_p->verdict == TRAFFIC_VERDICT_WAIT) { + if (session_data_p->verdict == TRAFFIC_VERDICT_DELAYED) { if (!ngx_http_cp_hold_verdict(ctx)) { session_data_p->verdict = fail_mode_hold_verdict == NGX_OK ? TRAFFIC_VERDICT_ACCEPT : TRAFFIC_VERDICT_DROP; updateMetricField(HOLD_THREAD_TIMEOUT, 1); @@ -585,7 +591,7 @@ ngx_http_cp_res_body_filter_thread(void *_ctx) session_data_p->session_id ); - if (session_data_p->verdict == TRAFFIC_VERDICT_WAIT) { + if (session_data_p->verdict == TRAFFIC_VERDICT_DELAYED) { if (!ngx_http_cp_hold_verdict(ctx)) { session_data_p->verdict = fail_mode_hold_verdict == NGX_OK ? TRAFFIC_VERDICT_ACCEPT : TRAFFIC_VERDICT_DROP; updateMetricField(HOLD_THREAD_TIMEOUT, 1); @@ -632,7 +638,7 @@ ngx_http_cp_hold_verdict_thread(void *_ctx) session_data_p->session_id, request, &ctx->modifications, - HOLD_DATA, + REQUEST_DELAYED_VERDICT, 0 ); diff --git a/attachments/nginx/ngx_module/ngx_cp_hook_threads.h b/attachments/nginx/ngx_module/ngx_cp_hook_threads.h index a7de641..be35078 100644 --- a/attachments/nginx/ngx_module/ngx_cp_hook_threads.h +++ b/attachments/nginx/ngx_module/ngx_cp_hook_threads.h @@ -42,6 +42,7 @@ struct ngx_http_cp_event_thread_ctx_t /// Should context continue to the next filter. int should_return_next_filter; + int chain_part_number; ngx_http_cp_modification_list *modifications; ///< Context's modification. ngx_str_t waf_tag; ///< WAF tag value for the location block. @@ -183,7 +184,7 @@ void * ngx_http_cp_res_body_filter_thread(void *_ctx); /// /// @brief Sends a request to the attachment's service to update the earlier provided "WAIT" verdict. -/// @details Communicates with the attachment service by sending a HOLD_DATA request to the attachment's service +/// @details Communicates with the attachment service by sending a REQUEST_DELAYED_VERDICT request to the attachment's service /// and modifies _ctx by the received response. /// @note _ctx needs to be properly initialized by init_thread_ctx() and /// be called after another call returned wait verdict. diff --git a/attachments/nginx/ngx_module/ngx_cp_hooks.c b/attachments/nginx/ngx_module/ngx_cp_hooks.c index 7ea7eee..d422c9b 100644 --- a/attachments/nginx/ngx_module/ngx_cp_hooks.c +++ b/attachments/nginx/ngx_module/ngx_cp_hooks.c @@ -25,7 +25,7 @@ #include "nginx_attachment_util.h" #include "shmem_ipc.h" #include "compression_utils.h" -#include "nginx_attachment_common.h" +#include "nano_attachment_common.h" #include "ngx_cp_io.h" #include "ngx_cp_utils.h" #include "ngx_cp_initializer.h" @@ -38,6 +38,11 @@ #include "ngx_cp_metric.h" #include "ngx_cp_thread.h" #include "ngx_cp_hooks.h" +#ifdef NGINX_ASYNC_SUPPORTED +#include "async/ngx_cp_async_core.h" +#include "async/ngx_cp_async_headers.h" +#include "async/ngx_cp_async_body.h" +#endif extern ngx_module_t ngx_http_cp_attachment_module; ///< CP Attachment module @@ -54,7 +59,7 @@ static const uint one_minute = 60; /// - #ngx_http_cp_session_data pointer if everything was initiated properly. /// - #NULL /// -static ngx_http_cp_session_data * +ngx_http_cp_session_data * init_cp_session_data(ngx_http_request_t *request) { static uint32_t session_id = 1; @@ -88,6 +93,9 @@ init_cp_session_data(ngx_http_request_t *request) session_data->processed_req_body_size = 0; session_data->processed_res_body_size = 0; session_data->is_res_body_inspected = 0; + session_data->async_processing_needed = 0; + session_data->body_processed = 0; + session_data->initial_async_mode = -1; ngx_http_set_ctx(request, session_data, ngx_http_cp_attachment_module); @@ -109,41 +117,118 @@ fini_cp_session_data(ngx_http_cp_session_data *session_data) finiCompressionStream(session_data->response_data.decompression_stream); session_data->response_data.decompression_stream = NULL; } + if (session_data->response_data.decompression_pool != NULL) { + write_dbg(DBG_LEVEL_TRACE, "Destroying decompression pool for session ID %d", session_data->session_id); + ngx_destroy_pool(session_data->response_data.decompression_pool); + session_data->response_data.decompression_pool = NULL; + } + if (session_data->response_data.recompression_pool != NULL) { + write_dbg(DBG_LEVEL_TRACE, "Destroying recompression pool for session ID %d", session_data->session_id); + ngx_destroy_pool(session_data->response_data.recompression_pool); + session_data->response_data.recompression_pool = NULL; + } } /// -/// @brief Cleans up session data. -/// @param[in] data Pointer to the session data to be cleaned up. +/// @brief Copies compressed data back into original Nginx buffers to avoid pool accumulation +/// \param original_chain The original Nginx buffer chain (will be modified to contain compressed data) +/// \param compressed_chain The compressed data chain from temporary pool +/// \param request_pool Pool to use for overflow buffers if needed +/// \return NGX_OK or NGX_ERROR +/// +static ngx_int_t +copy_compressed_to_original_buffers( + ngx_chain_t *original_chain, + ngx_chain_t *compressed_chain, + ngx_pool_t *request_pool +) +{ + ngx_chain_t *orig_cl = original_chain; + ngx_chain_t *comp_cl = compressed_chain; + u_char *comp_pos; + size_t comp_remaining; + + while (comp_cl != NULL) { + comp_pos = comp_cl->buf->pos; + comp_remaining = comp_cl->buf->last - comp_cl->buf->pos; + + while (comp_remaining > 0 && orig_cl != NULL) { + size_t orig_capacity = orig_cl->buf->end - orig_cl->buf->pos; + size_t copy_size = comp_remaining < orig_capacity ? comp_remaining : orig_capacity; + + // Copy compressed data into original buffer + ngx_memcpy(orig_cl->buf->pos, comp_pos, copy_size); + orig_cl->buf->last = orig_cl->buf->pos + copy_size; + orig_cl->buf->memory = 1; + orig_cl->buf->temporary = 1; + + comp_pos += copy_size; + comp_remaining -= copy_size; + + if (comp_remaining > 0) { + // Need to allocate overflow buffer + ngx_chain_t *overflow = ngx_alloc_chain_link(request_pool); + if (overflow == NULL) { + write_dbg(DBG_LEVEL_WARNING, "Failed to allocate overflow chain link"); + return NGX_ERROR; + } + + overflow->buf = ngx_calloc_buf(request_pool); + if (overflow->buf == NULL) { + write_dbg(DBG_LEVEL_WARNING, "Failed to allocate overflow buffer"); + return NGX_ERROR; + } + + overflow->buf->memory = 1; + overflow->buf->temporary = 1; + overflow->next = orig_cl->next; + orig_cl->next = overflow; + orig_cl = overflow->next; + overflow->next = NULL; + write_dbg(DBG_LEVEL_TRACE, "Created overflow buffer of size %zu", comp_remaining); + break; + } + orig_cl = orig_cl->next; + } + + comp_cl = comp_cl->next; + } + + return NGX_OK; +} + +/// +/// @brief Cleanup handler for session data (called when request pool is destroyed) +/// \param data Pointer to session data /// static void ngx_session_data_cleanup(void *data) { - if (data == NULL) return; ngx_http_cp_session_data *session_data = (ngx_http_cp_session_data *)data; write_dbg(DBG_LEVEL_DEBUG, "Cleaning up session data for session ID %d", session_data->session_id); - - if (session_data->response_data.original_compressed_body != NULL) { - free_chain(session_data->response_data.request_pool, session_data->response_data.original_compressed_body); - session_data->response_data.original_compressed_body = NULL; + if (session_data != NULL) { + fini_cp_session_data(session_data); } - - fini_cp_session_data(session_data); } /// -/// @brief initializes session data with response_data chain allocation and cleanup from given ngx pool +/// @brief Initializes storage for the FIRST chunk only of original compressed body +/// This provides a reference without accumulating all chunks (prevents memory spikes) /// \param session_data -/// \param request -/// \return +/// \param pool Request pool +/// \return NGX_OK or NGX_ERROR /// static ngx_int_t -init_cp_session_original_body(ngx_http_cp_session_data *session_data, ngx_pool_t *pool) +init_cp_session_original_body(ngx_http_cp_session_data *session_data, ngx_pool_t *request_pool) { - ngx_pool_cleanup_t *cln; + // Only initialize once (for first chunk only) + if (session_data->response_data.original_compressed_body != NULL) { + return NGX_OK; + } - write_dbg(DBG_LEVEL_TRACE, "Initializing original compressed body for session ID %d", session_data->session_id); + write_dbg(DBG_LEVEL_TRACE, "Initializing original compressed body storage (first chunk only) for session ID %d", session_data->session_id); - session_data->response_data.original_compressed_body = ngx_alloc_chain_link(pool); + session_data->response_data.original_compressed_body = ngx_alloc_chain_link(request_pool); if (session_data->response_data.original_compressed_body == NULL) { write_dbg( @@ -153,22 +238,14 @@ init_cp_session_original_body(ngx_http_cp_session_data *session_data, ngx_pool_t ); return NGX_ERROR; } - session_data->response_data.request_pool = pool; + session_data->response_data.request_pool = request_pool; ngx_memset(session_data->response_data.original_compressed_body, 0, sizeof(ngx_chain_t)); - cln = ngx_pool_cleanup_add(pool, 0); + ngx_pool_cleanup_t *cln = ngx_pool_cleanup_add(request_pool, 0); if (cln == NULL) { - write_dbg( - DBG_LEVEL_WARNING, - "Failed to allocate cleanup memory for original compressed body in session ID %d\n", - session_data->session_id - ); - ngx_free_chain(session_data->response_data.request_pool, session_data->response_data.original_compressed_body); - session_data->response_data.original_compressed_body = NULL; + write_dbg(DBG_LEVEL_WARNING, "Failed to add cleanup handler for session ID %d", session_data->session_id); return NGX_ERROR; } - - write_dbg(DBG_LEVEL_TRACE, "Adding session_data cleanup handler for session ID %d", session_data->session_id); cln->handler = ngx_session_data_cleanup; cln->data = session_data; @@ -182,7 +259,7 @@ init_cp_session_original_body(ngx_http_cp_session_data *session_data, ngx_pool_t /// - #ngx_http_cp_session_data pointer if everything was initiated properly. /// - #NULL /// -static ngx_http_cp_session_data * +ngx_http_cp_session_data * recover_cp_session_data(ngx_http_request_t *request) { return (ngx_http_cp_session_data *)ngx_http_get_module_ctx(request, ngx_http_cp_attachment_module); @@ -235,7 +312,7 @@ ngx_http_cp_hold_verdict(struct ngx_http_cp_event_thread_ctx_t *ctx) continue; } - if (session_data_p->verdict != TRAFFIC_VERDICT_WAIT) { + if (session_data_p->verdict != TRAFFIC_VERDICT_DELAYED) { // Verdict was updated. write_dbg( DBG_LEVEL_DEBUG, @@ -251,11 +328,11 @@ ngx_http_cp_hold_verdict(struct ngx_http_cp_event_thread_ctx_t *ctx) return 0; } -ngx_http_cp_verdict_e +ServiceVerdict enforce_sessions_rate() { ngx_http_cp_sessions_per_minute_limit *sessions_limit = get_periodic_sessions_limit_info(); - ngx_http_cp_verdict_e verdict = get_sessions_per_minute_limit_verdict(); + ServiceVerdict verdict = get_sessions_per_minute_limit_verdict(); unsigned int max_sessions = get_max_sessions_per_minute(); unsigned int curr_real_second = (unsigned int)(time(NULL)); @@ -404,11 +481,11 @@ ngx_http_cp_request_and_response_size_handler(ngx_http_request_t *request) } ngx_int_t -ngx_http_cp_req_header_handler(ngx_http_request_t *request) +ngx_http_cp_req_header_handler_sync(ngx_http_request_t *request) { ngx_http_cp_session_data *session_data_p; ngx_int_t handle_static_resource_result; - ngx_http_cp_verdict_e sessions_per_minute_verdict; + ServiceVerdict sessions_per_minute_verdict; ngx_cp_attachment_conf_t *conf; struct ngx_http_cp_event_thread_ctx_t ctx; struct timespec hook_time_begin; @@ -454,6 +531,11 @@ ngx_http_cp_req_header_handler(ngx_http_request_t *request) set_current_session_id(session_data_p->session_id); write_dbg(DBG_LEVEL_DEBUG, "Request header filter handling session ID: %d", session_data_p->session_id); + session_data_p->initial_async_mode = 0; + if (is_ngx_cp_async_mode_enabled_for_request(request)) { + write_dbg(DBG_LEVEL_WARNING, "Async mode detected in sync filter - passing through"); + return NGX_DECLINED; + } init_thread_ctx(&ctx, request, session_data_p, NULL); ctx.waf_tag = conf->waf_tag; @@ -553,15 +635,25 @@ ngx_http_cp_req_header_handler(ngx_http_request_t *request) ctx.res ); - if (session_data_p->verdict == TRAFFIC_VERDICT_WAIT) { + if (session_data_p->verdict == TRAFFIC_VERDICT_DELAYED) { res = ngx_http_cp_hold_verdict(&ctx); if (!res) { session_data_p->verdict = fail_mode_hold_verdict == NGX_OK ? TRAFFIC_VERDICT_ACCEPT : TRAFFIC_VERDICT_DROP; updateMetricField(HOLD_THREAD_TIMEOUT, 1); - return fail_mode_verdict == NGX_OK ? NGX_DECLINED : fail_mode_verdict; + return fail_mode_hold_verdict == NGX_OK ? NGX_DECLINED : fail_mode_hold_verdict; } } + if (session_data_p->verdict == TRAFFIC_VERDICT_CUSTOM_RESPONSE) + { + write_dbg( + DBG_LEVEL_DEBUG, + "Received NGX_HTTP_FORBIDDEN with TRAFFIC_VERDICT_CUSTOM_RESPONSE for session ID: %d, returning Custom Response", + session_data_p->session_id + ); + return ngx_http_cp_finalize_custom_response_request(request); + } + calcProcessingTime(session_data_p, &hook_time_begin, 1); if (ctx.should_return) { return ctx.res == NGX_OK ? NGX_DECLINED : ctx.res; @@ -578,7 +670,7 @@ ngx_http_cp_req_header_handler(ngx_http_request_t *request) } ngx_int_t -ngx_http_cp_req_body_filter(ngx_http_request_t *request, ngx_chain_t *request_body_chain) +ngx_http_cp_req_body_filter_sync(ngx_http_request_t *request, ngx_chain_t *request_body_chain) { struct ngx_http_cp_event_thread_ctx_t ctx; ngx_http_cp_session_data *session_data_p = recover_cp_session_data(request); @@ -589,7 +681,12 @@ ngx_http_cp_req_body_filter(ngx_http_request_t *request, ngx_chain_t *request_bo if (session_data_p == NULL) return ngx_http_next_request_body_filter(request, request_body_chain); - write_dbg(DBG_LEVEL_DEBUG, "Request body received"); + write_dbg(DBG_LEVEL_DEBUG, "Request body received [SYNC]"); + + if (session_data_p->initial_async_mode || (!session_data_p->initial_async_mode && is_ngx_cp_async_mode_enabled_for_request(request))) { + write_dbg(DBG_LEVEL_WARNING, "Async mode detected in sync filter - passing through"); + return ngx_http_next_request_body_filter(request, request_body_chain); + } set_current_session_id(0); @@ -688,7 +785,7 @@ ngx_http_cp_req_body_filter(ngx_http_request_t *request, ngx_chain_t *request_bo ctx.res ); - if (session_data_p->verdict == TRAFFIC_VERDICT_WAIT) { + if (session_data_p->verdict == TRAFFIC_VERDICT_DELAYED) { res = ngx_http_cp_hold_verdict(&ctx); if (!res) { session_data_p->verdict = fail_mode_hold_verdict == NGX_OK ? TRAFFIC_VERDICT_ACCEPT : TRAFFIC_VERDICT_DROP; @@ -717,12 +814,23 @@ ngx_http_cp_req_body_filter(ngx_http_request_t *request, ngx_chain_t *request_bo return fail_mode_verdict == NGX_OK ? ngx_http_next_request_body_filter(request, request_body_chain) : NGX_HTTP_FORBIDDEN; } - if (session_data_p->verdict == TRAFFIC_VERDICT_WAIT) { + if (session_data_p->verdict == TRAFFIC_VERDICT_CUSTOM_RESPONSE) + { + write_dbg( + DBG_LEVEL_DEBUG, + "Received NGX_HTTP_FORBIDDEN with TRAFFIC_VERDICT_CUSTOM_RESPONSE for session ID: %d, returning Custom Response", + session_data_p->session_id + ); + return ngx_http_cp_finalize_custom_response_request(request); + } + + if (session_data_p->verdict == TRAFFIC_VERDICT_DELAYED) { write_dbg(DBG_LEVEL_DEBUG, "spawn ngx_http_cp_hold_verdict"); res = ngx_http_cp_hold_verdict(&ctx); if (!res) { write_dbg(DBG_LEVEL_DEBUG, "ngx_http_cp_hold_verdict failed"); updateMetricField(HOLD_THREAD_TIMEOUT, 1); + return fail_mode_hold_verdict == NGX_OK ? ngx_http_next_request_body_filter(request, request_body_chain) : NGX_HTTP_FORBIDDEN; } } @@ -846,7 +954,7 @@ remove_server_header(ngx_http_request_t *r) } ngx_int_t -ngx_http_cp_res_header_filter(ngx_http_request_t *request) +ngx_http_cp_res_header_filter_sync(ngx_http_request_t *request) { struct ngx_http_cp_event_thread_ctx_t ctx; ngx_http_cp_session_data *session_data_p; @@ -863,7 +971,12 @@ ngx_http_cp_res_header_filter(ngx_http_request_t *request) set_current_session_id(session_data_p->session_id); - write_dbg(DBG_LEVEL_DEBUG, "Response header filter handling session ID: %d", session_data_p->session_id); + write_dbg(DBG_LEVEL_DEBUG, "Response header filter [SYNC] handling session ID: %d", session_data_p->session_id); + + if (session_data_p->initial_async_mode || (!session_data_p->initial_async_mode && is_ngx_cp_async_mode_enabled_for_request(request))) { + write_dbg(DBG_LEVEL_WARNING, "Async mode detected in sync filter - passing through"); + return ngx_http_next_response_header_filter(request); + } if (!isIpcReady()) { write_dbg( @@ -978,7 +1091,7 @@ ngx_http_cp_res_header_filter(ngx_http_request_t *request) } ngx_int_t -ngx_http_cp_res_body_filter(ngx_http_request_t *request, ngx_chain_t *body_chain) +ngx_http_cp_res_body_filter_sync(ngx_http_request_t *request, ngx_chain_t *body_chain) { struct ngx_http_cp_event_thread_ctx_t ctx; ngx_http_cp_session_data *session_data_p; @@ -994,10 +1107,15 @@ ngx_http_cp_res_body_filter(ngx_http_request_t *request, ngx_chain_t *body_chain if (session_data_p == NULL) return ngx_http_next_response_body_filter(request, body_chain); set_current_session_id(session_data_p->session_id); - write_dbg(DBG_LEVEL_DEBUG, "Response body filter handling response ID: %d", session_data_p->session_id); - + write_dbg(DBG_LEVEL_DEBUG, "Response body filter [SYNC] handling response ID: %d", session_data_p->session_id); + print_buffer_chain(body_chain, "incoming", 32, DBG_LEVEL_TRACE); + if (session_data_p->initial_async_mode || (!session_data_p->initial_async_mode && is_ngx_cp_async_mode_enabled_for_request(request))) { + write_dbg(DBG_LEVEL_WARNING, "Async mode detected in sync filter - passing through"); + return ngx_http_next_response_body_filter(request, body_chain); + } + if (!isIpcReady()) { write_dbg( DBG_LEVEL_TRACE, @@ -1026,18 +1144,18 @@ ngx_http_cp_res_body_filter(ngx_http_request_t *request, ngx_chain_t *body_chain "Session with corrupted compression has DROP verdict, returning HTTP_FORBIDDEN. Session ID: %d", session_data_p->session_id ); - return NGX_HTTP_FORBIDDEN; + return ngx_http_filter_finalize_request(request, &ngx_http_cp_attachment_module, NGX_HTTP_FORBIDDEN); } return ngx_http_next_response_body_filter(request, body_chain); } if ( session_data_p->verdict != TRAFFIC_VERDICT_INSPECT && - session_data_p->verdict != TRAFFIC_VERDICT_WAIT && + session_data_p->verdict != TRAFFIC_VERDICT_DELAYED && ( session_data_p->verdict != TRAFFIC_VERDICT_ACCEPT || session_data_p->response_data.new_compression_type == NO_COMPRESSION || - session_data_p->response_data.new_compression_type == BROTLI || + (is_brotli_inspection_enabled && session_data_p->response_data.new_compression_type == BROTLI) || session_data_p->response_data.num_body_chunk == 0 ) ) { @@ -1052,7 +1170,7 @@ ngx_http_cp_res_body_filter(ngx_http_request_t *request, ngx_chain_t *body_chain "Session has DROP verdict, returning HTTP_FORBIDDEN instead of streaming. Session ID: %d", session_data_p->session_id ); - return NGX_HTTP_FORBIDDEN; + return ngx_http_filter_finalize_request(request, &ngx_http_cp_attachment_module, NGX_HTTP_FORBIDDEN); } return ngx_http_next_response_body_filter(request, body_chain); } @@ -1069,23 +1187,70 @@ ngx_http_cp_res_body_filter(ngx_http_request_t *request, ngx_chain_t *body_chain return ngx_http_next_response_body_filter(request, body_chain); } - if (body_chain->buf->pos != NULL && session_data_p->response_data.new_compression_type != NO_COMPRESSION && session_data_p->response_data.new_compression_type != BROTLI) { - write_dbg(DBG_LEVEL_TRACE, "Decompressing response body"); - if (init_cp_session_original_body(session_data_p, request->pool) == NGX_OK) { - if (session_data_p->response_data.decompression_stream == NULL) { - session_data_p->response_data.decompression_stream = initCompressionStream(); - } + // Save original chain before any processing (we'll copy compressed data back to these buffers) + ngx_chain_t *original_nginx_chain = body_chain; + if ( + body_chain->buf->pos != NULL && + session_data_p->response_data.new_compression_type != NO_COMPRESSION && + ( + session_data_p->response_data.new_compression_type != BROTLI || + (session_data_p->response_data.new_compression_type == BROTLI && is_brotli_inspection_enabled) + ) + ) { + write_dbg( + DBG_LEVEL_TRACE, + "Decompressing response body for session ID %d, compression type: %d, chunk: %d", + session_data_p->session_id, + session_data_p->response_data.new_compression_type, + session_data_p->response_data.num_body_chunk + ); - compression_result = decompress_body( - session_data_p->response_data.decompression_stream, - RESPONSE_BODY, - &is_last_decompressed_part, - &body_chain, - &session_data_p->response_data.original_compressed_body, - request->pool - ); + // Save original body ONLY on first chunk (prevents memory spikes from accumulation) + if (session_data_p->response_data.num_body_chunk == 1) { + if (init_cp_session_original_body(session_data_p, request->pool) != NGX_OK) { + write_dbg( + DBG_LEVEL_WARNING, + "Failed to initialize original compressed body storage for session ID %d", + session_data_p->session_id + ); + handle_inspection_failure(inspection_failure_weight, fail_mode_verdict, session_data_p); + fini_cp_session_data(session_data_p); + session_data_p->response_data.response_data_status = NGX_ERROR; + return fail_mode_verdict == NGX_OK ? + ngx_http_next_response_body_filter(request, body_chain) : + ngx_http_filter_finalize_request(request, &ngx_http_cp_attachment_module, NGX_HTTP_FORBIDDEN); + } } + if (session_data_p->response_data.decompression_stream == NULL) { + session_data_p->response_data.decompression_stream = initCompressionStream(); + } + + // Get or create decompression pool for temporary decompressed data + if (session_data_p->response_data.decompression_pool == NULL) { + session_data_p->response_data.decompression_pool = ngx_create_pool(decompression_pool_size, request->pool->log); + if (session_data_p->response_data.decompression_pool == NULL) { + write_dbg(DBG_LEVEL_WARNING, "Failed to create decompression pool for session ID %d", session_data_p->session_id); + handle_inspection_failure(inspection_failure_weight, fail_mode_verdict, session_data_p); + fini_cp_session_data(session_data_p); + session_data_p->response_data.response_data_status = NGX_ERROR; + return fail_mode_verdict == NGX_OK ? + ngx_http_next_response_body_filter(request, body_chain) : + ngx_http_filter_finalize_request(request, &ngx_http_cp_attachment_module, NGX_HTTP_FORBIDDEN); + } + write_dbg(DBG_LEVEL_TRACE, "Created decompression pool for session ID %d", session_data_p->session_id); + } + + // Use decompression pool for decompressed data (will be destroyed after re-compression) + compression_result = decompress_body( + session_data_p->response_data.decompression_stream, + RESPONSE_BODY, + &is_last_decompressed_part, + &body_chain, + &session_data_p->response_data.original_compressed_body, + session_data_p->response_data.decompression_pool // Destroyed after re-compression + ); + if (compression_result != NGX_OK) { write_dbg(DBG_LEVEL_WARNING, "Failed to decompress response body"); handle_inspection_failure(inspection_failure_weight, fail_mode_verdict, session_data_p); @@ -1093,16 +1258,35 @@ ngx_http_cp_res_body_filter(ngx_http_request_t *request, ngx_chain_t *body_chain session_data_p->response_data.response_data_status = NGX_ERROR; return fail_mode_verdict == NGX_OK ? ngx_http_next_response_body_filter(request, body_chain) : - NGX_ERROR; + ngx_http_filter_finalize_request(request, &ngx_http_cp_attachment_module, NGX_HTTP_FORBIDDEN); } } if (session_data_p->verdict == TRAFFIC_VERDICT_ACCEPT) { - write_dbg(DBG_LEVEL_TRACE, "Compressing response body"); if (session_data_p->response_data.compression_stream == NULL) { session_data_p->response_data.compression_stream = initCompressionStream(); } + // Recreate/reset recompression pool for this chunk to avoid accumulation + if (session_data_p->response_data.recompression_pool != NULL) { + write_dbg(DBG_LEVEL_TRACE, "Resetting recompression pool for session ID %d chunk %d", + session_data_p->session_id, session_data_p->response_data.num_body_chunk); + ngx_reset_pool(session_data_p->response_data.recompression_pool); + } else { + session_data_p->response_data.recompression_pool = ngx_create_pool(recompression_pool_size, request->pool->log); + if (session_data_p->response_data.recompression_pool == NULL) { + write_dbg(DBG_LEVEL_WARNING, "Failed to create recompression pool for session ID %d", session_data_p->session_id); + handle_inspection_failure(inspection_failure_weight, fail_mode_verdict, session_data_p); + fini_cp_session_data(session_data_p); + session_data_p->response_data.response_data_status = NGX_ERROR; + return fail_mode_verdict == NGX_OK ? + ngx_http_next_response_body_filter(request, body_chain) : + ngx_http_filter_finalize_request(request, &ngx_http_cp_attachment_module, NGX_HTTP_FORBIDDEN); + } + write_dbg(DBG_LEVEL_TRACE, "Created recompression pool for session ID %d", session_data_p->session_id); + } + + // Compress into temporary pool (body_chain will be modified to point to new compressed chain) compression_result = compress_body( session_data_p->response_data.compression_stream, session_data_p->response_data.new_compression_type, @@ -1110,7 +1294,7 @@ ngx_http_cp_res_body_filter(ngx_http_request_t *request, ngx_chain_t *body_chain is_last_decompressed_part, &body_chain, NULL, - request->pool + session_data_p->response_data.recompression_pool // Temporary - will be destroyed after copying ); if (compression_result != NGX_OK) { write_dbg(DBG_LEVEL_WARNING, "Failed to compress response body"); @@ -1120,16 +1304,36 @@ ngx_http_cp_res_body_filter(ngx_http_request_t *request, ngx_chain_t *body_chain session_data_p->response_data.response_data_status = NGX_ERROR; return fail_mode_verdict == NGX_OK ? ngx_http_next_response_body_filter(request, body_chain) : - NGX_ERROR; + ngx_http_filter_finalize_request(request, &ngx_http_cp_attachment_module, NGX_HTTP_FORBIDDEN); } - return ngx_http_next_response_body_filter(request, body_chain); + // Copy compressed data from temporary pool back to original Nginx buffers + ngx_chain_t *compressed_chain = body_chain; + if (copy_compressed_to_original_buffers(original_nginx_chain, compressed_chain, request->pool) != NGX_OK) { + write_dbg(DBG_LEVEL_WARNING, "Failed to copy compressed data back to original buffers"); + handle_inspection_failure(inspection_failure_weight, fail_mode_verdict, session_data_p); + fini_cp_session_data(session_data_p); + session_data_p->response_data.response_data_status = NGX_ERROR; + return fail_mode_verdict == NGX_OK ? + ngx_http_next_response_body_filter(request, body_chain) : + ngx_http_filter_finalize_request(request, &ngx_http_cp_attachment_module, NGX_HTTP_FORBIDDEN); + } + + print_buffer_chain(original_nginx_chain, "outgoing chain elem", -1, DBG_LEVEL_TRACE); + + if (session_data_p->response_data.decompression_pool != NULL) { + write_dbg(DBG_LEVEL_TRACE, "Destroying decompression pool for session ID %d", session_data_p->session_id); + ngx_destroy_pool(session_data_p->response_data.decompression_pool); + session_data_p->response_data.decompression_pool = NULL; + } + + return ngx_http_next_response_body_filter(request, original_nginx_chain); } if (was_transaction_timedout(session_data_p)) { // Session was timed out. if (session_data_p->verdict == TRAFFIC_VERDICT_DROP) { - return NGX_HTTP_FORBIDDEN; + return ngx_http_filter_finalize_request(request, &ngx_http_cp_attachment_module, NGX_HTTP_FORBIDDEN); } session_data_p->verdict = fail_mode_verdict == NGX_OK ? TRAFFIC_VERDICT_ACCEPT : TRAFFIC_VERDICT_DROP; fini_cp_session_data(session_data_p); @@ -1142,7 +1346,7 @@ ngx_http_cp_res_body_filter(ngx_http_request_t *request, ngx_chain_t *body_chain if (fail_mode_verdict == NGX_OK) { return ngx_http_next_response_body_filter(request, body_chain); } - return NGX_ERROR; + return ngx_http_filter_finalize_request(request, &ngx_http_cp_attachment_module, NGX_HTTP_FORBIDDEN); } if (!session_data_p->was_request_fully_inspected) { @@ -1182,7 +1386,6 @@ ngx_http_cp_res_body_filter(ngx_http_request_t *request, ngx_chain_t *body_chain ) { // failed to execute thread task, or it timed out session_data_p->verdict = fail_mode_verdict == NGX_OK ? TRAFFIC_VERDICT_ACCEPT : TRAFFIC_VERDICT_DROP; - fini_cp_session_data(session_data_p); write_dbg( DBG_LEVEL_DEBUG, "res_body_filter thread failed, returning default fail mode verdict. Session id: %d, verdict: %s", @@ -1198,7 +1401,7 @@ ngx_http_cp_res_body_filter(ngx_http_request_t *request, ngx_chain_t *body_chain if (fail_mode_verdict == NGX_OK) { return ngx_http_next_response_body_filter(request, body_chain); } - return NGX_HTTP_FORBIDDEN; + return ngx_http_filter_finalize_request(request, &ngx_http_cp_attachment_module, NGX_HTTP_FORBIDDEN); } write_dbg( DBG_LEVEL_DEBUG, @@ -1222,6 +1425,7 @@ ngx_http_cp_res_body_filter(ngx_http_request_t *request, ngx_chain_t *body_chain fini_cp_session_data(session_data_p); return ngx_http_next_response_body_filter(request, body_chain); } + ctx.chain_part_number++; } if (ctx.chain) { @@ -1237,14 +1441,14 @@ ngx_http_cp_res_body_filter(ngx_http_request_t *request, ngx_chain_t *body_chain if (fail_mode_verdict == NGX_OK) { return ngx_http_next_response_body_filter(request, body_chain); } - return NGX_HTTP_FORBIDDEN; + return ngx_http_filter_finalize_request(request, &ngx_http_cp_attachment_module, NGX_HTTP_FORBIDDEN); } final_res = ctx.res; if (final_res == NGX_HTTP_FORBIDDEN) { handle_inspection_success(session_data_p); - return ngx_http_cp_finalize_rejected_request(request, 1); + return ngx_http_filter_finalize_request(request, &ngx_http_cp_attachment_module, NGX_HTTP_FORBIDDEN); } if (final_res != NGX_OK) { @@ -1257,10 +1461,16 @@ ngx_http_cp_res_body_filter(ngx_http_request_t *request, ngx_chain_t *body_chain if (fail_mode_verdict == NGX_OK) { return ngx_http_next_response_body_filter(request, body_chain); } - return NGX_HTTP_FORBIDDEN; + return ngx_http_filter_finalize_request(request, &ngx_http_cp_attachment_module, NGX_HTTP_FORBIDDEN); } - if (ctx.modifications && session_data_p->response_data.new_compression_type != BROTLI) { + if ( + ctx.modifications && + ( + session_data_p->response_data.new_compression_type != BROTLI || + (session_data_p->response_data.new_compression_type == BROTLI && is_brotli_inspection_enabled) + ) + ) { write_dbg(DBG_LEVEL_TRACE, "Handling response body modification"); if (ngx_http_cp_body_modifier(body_chain, ctx.modifications, request->pool) != NGX_OK) { write_dbg(DBG_LEVEL_WARNING, "Failed to modify response body"); @@ -1269,11 +1479,11 @@ ngx_http_cp_res_body_filter(ngx_http_request_t *request, ngx_chain_t *body_chain if (fail_mode_verdict == NGX_OK) { return ngx_http_next_response_body_filter(request, body_chain); } - return NGX_HTTP_FORBIDDEN; + return ngx_http_filter_finalize_request(request, &ngx_http_cp_attachment_module, NGX_HTTP_FORBIDDEN); } } - - if (ctx.modifications && session_data_p->response_data.new_compression_type == BROTLI) { + + if (ctx.modifications && session_data_p->response_data.new_compression_type == BROTLI && !is_brotli_inspection_enabled) { ngx_http_cp_modification_list *mod = ctx.modifications; while (mod != NULL) { ngx_http_cp_modification_list *next_mod = mod->next; @@ -1315,7 +1525,7 @@ ngx_http_cp_res_body_filter(ngx_http_request_t *request, ngx_chain_t *body_chain fini_cp_session_data(session_data_p); return fail_mode_verdict == NGX_OK ? ngx_http_next_response_body_filter(request, body_chain) : - NGX_HTTP_FORBIDDEN; + ngx_http_filter_finalize_request(request, &ngx_http_cp_attachment_module, NGX_HTTP_FORBIDDEN); } } @@ -1329,7 +1539,7 @@ ngx_http_cp_res_body_filter(ngx_http_request_t *request, ngx_chain_t *body_chain "Final verdict is DROP, blocking stream to client. Session ID: %d", session_data_p->session_id ); - return NGX_HTTP_FORBIDDEN; + return ngx_http_filter_finalize_request(request, &ngx_http_cp_attachment_module, NGX_HTTP_FORBIDDEN); } write_dbg( @@ -1341,3 +1551,117 @@ ngx_http_cp_res_body_filter(ngx_http_request_t *request, ngx_chain_t *body_chain return ngx_http_next_response_body_filter(request, body_chain); } + +/// +/// @brief Dynamic wrapper for response header filter that chooses sync or async based on configuration. +/// @details Branches internally to call either the synchronous or asynchronous implementation +/// based on the async mode configuration for the specific request. +/// @param[in, out] request NGINX request. +/// @returns ngx_int_t +/// - #NGX_OK +/// - #NGX_HTTP_FORBIDDEN +/// - #NGX_ERROR +/// +ngx_int_t +ngx_http_cp_res_header_filter(ngx_http_request_t *request) +{ +#ifdef NGINX_ASYNC_SUPPORTED + if (is_ngx_cp_async_mode_enabled_for_request(request)) { + return ngx_http_next_response_header_filter(request); + } else { + return ngx_http_cp_res_header_filter_sync(request); + } +#else + // For nginx versions below 1.22, always use sync mode + return ngx_http_cp_res_header_filter_sync(request); +#endif +} + +/// +/// @brief Dynamic wrapper for response body filter that chooses sync or async based on configuration. +/// @details Branches internally to call either the synchronous or asynchronous implementation +/// based on the async mode configuration for the specific request. +/// @param[in, out] request NGINX request. +/// @param[in, out] body_chain NGINX body chain. +/// @returns ngx_int_t +/// - #NGX_OK +/// - #NGX_HTTP_FORBIDDEN +/// - #NGX_ERROR +/// +ngx_int_t +ngx_http_cp_res_body_filter(ngx_http_request_t *request, ngx_chain_t *body_chain) +{ + ngx_int_t res; +#ifdef NGINX_ASYNC_SUPPORTED + if (is_ngx_cp_async_mode_enabled_for_request(request)) { + res = ngx_http_next_response_body_filter(request, body_chain); + } else { + res = ngx_http_cp_res_body_filter_sync(request, body_chain); + } + + if (is_async_toggled_off_in_last_reconfig()) { + disable_ipc_verdict_event_handler(); + reset_async_mode_toggled(); + } + + if (is_async_toggled_on_in_last_reconfig()) { + enable_ipc_verdict_event_handler(); + reset_async_mode_toggled(); + } +#else + // For nginx versions below 1.22, always use sync mode + res = ngx_http_cp_res_body_filter_sync(request, body_chain); +#endif + return res; +} + +/// +/// @brief Dynamic wrapper for request header handler that chooses sync or async based on configuration. +/// @details Branches internally to call either the synchronous or asynchronous implementation +/// based on the async mode configuration for the specific request. +/// @param[in, out] request NGINX request. +/// @returns ngx_int_t +/// - #NGX_OK +/// - #NGX_HTTP_FORBIDDEN +/// - #NGX_ERROR +/// +ngx_int_t +ngx_http_cp_req_header_handler(ngx_http_request_t *request) +{ +#ifdef NGINX_ASYNC_SUPPORTED + if (is_ngx_cp_async_mode_enabled_for_request(request)) { + return ngx_http_cp_req_header_handler_async(request); + } else { + return ngx_http_cp_req_header_handler_sync(request); + } +#else + // For nginx versions below 1.22, always use sync mode + return ngx_http_cp_req_header_handler_sync(request); +#endif +} + +/// +/// @brief Dynamic wrapper for request body filter that chooses sync or async based on configuration. +/// @details Branches internally to call either the synchronous or asynchronous implementation +/// based on the async mode configuration for the specific request. +/// @param[in, out] request NGINX request. +/// @param[in, out] request_body_chain NGINX body chain. +/// @returns ngx_int_t +/// - #NGX_OK +/// - #NGX_HTTP_FORBIDDEN +/// - #NGX_ERROR +/// +ngx_int_t +ngx_http_cp_req_body_filter(ngx_http_request_t *request, ngx_chain_t *request_body_chain) +{ +#ifdef NGINX_ASYNC_SUPPORTED + if (is_ngx_cp_async_mode_enabled_for_request(request)) { + return ngx_http_cp_req_body_filter_async(request, request_body_chain); + } else { + return ngx_http_cp_req_body_filter_sync(request, request_body_chain); + } +#else + // For nginx versions below 1.22, always use sync mode + return ngx_http_cp_req_body_filter_sync(request, request_body_chain); +#endif +} diff --git a/attachments/nginx/ngx_module/ngx_cp_hooks.h b/attachments/nginx/ngx_module/ngx_cp_hooks.h index 49e7737..925ed4b 100644 --- a/attachments/nginx/ngx_module/ngx_cp_hooks.h +++ b/attachments/nginx/ngx_module/ngx_cp_hooks.h @@ -24,7 +24,7 @@ #include #include "ngx_cp_http_parser.h" -#include "nginx_attachment_common.h" +#include "nano_attachment_common.h" #include "ngx_cp_hook_threads.h" static const int registration_failure_weight = 2; ///< Registration failure weight. @@ -36,7 +36,7 @@ static const ngx_int_t METRIC_TIMEOUT_VAL = METRIC_PERIODIC_TIMEOUT; /// @details Such as to save verdict and session ID between the request and the response typedef struct ngx_http_cp_session_data { ngx_int_t was_request_fully_inspected; ///< Holds if the request fully inspected. - ngx_http_cp_verdict_e verdict; ///< Holds the session's verdict from the Nano Service. + ServiceVerdict verdict; ///< Holds the session's verdict from the Nano Service. uint32_t session_id; ///< Current session's Id. ngx_int_t remaining_messages_to_reply; ///< Remaining messages left for the agent to respond to. ngx_http_response_data response_data; ///< Holds session's response data. @@ -46,6 +46,9 @@ typedef struct ngx_http_cp_session_data { uint64_t processed_req_body_size; ///< Holds session's request body's size. uint64_t processed_res_body_size; ///< Holds session's response body's size'. ngx_int_t is_res_body_inspected; ///< Holds if the response body was inspected + ngx_int_t async_processing_needed; ///< Holds if async processing is needed in filters + ngx_int_t body_processed; ///< Holds if request body processing is complete + ngx_int_t initial_async_mode; ///< Initial async mode for this request (0=sync, 1=async, -1=unset) } ngx_http_cp_session_data; /// @@ -100,7 +103,7 @@ ngx_int_t ngx_http_cp_req_header_handler(ngx_http_request_t *request); /// /// @brief Sends a request to the nano service to update the verdict. -/// @note Should be called after the nano service provided the verdict TRAFFIC_VERDICT_WAIT to get the updated verdict. +/// @note Should be called after the nano service provided the verdict TRAFFIC_VERDICT_DELAYED to get the updated verdict. /// @param[in, out] request Event thread context to be updated. /// @returns ngx_int_t /// - #1 if request was properly communicated with the nano service and provided an updated response. @@ -121,12 +124,12 @@ ngx_int_t was_transaction_timedout(ngx_http_cp_session_data *ctx); /// /// @brief Enforces the sessions rate. -/// @returns ngx_http_cp_verdict_e +/// @returns ServiceVerdict /// - #TRAFFIC_VERDICT_INSPECT /// - #TRAFFIC_VERDICT_ACCEPT /// - #TRAFFIC_VERDICT_DROP /// -ngx_http_cp_verdict_e enforce_sessions_rate(); +ServiceVerdict enforce_sessions_rate(); /// @@ -137,4 +140,25 @@ ngx_http_cp_verdict_e enforce_sessions_rate(); /// ngx_int_t ngx_http_cp_request_and_response_size_handler(ngx_http_request_t *request); +// Session management functions +ngx_http_cp_session_data *init_cp_session_data(ngx_http_request_t *request); +ngx_http_cp_session_data *recover_cp_session_data(ngx_http_request_t *request); + +// Utility functions +void calcProcessingTime(ngx_http_cp_session_data *session_data_p, struct timespec *hook_time_begin, int is_req); +ngx_int_t ngx_http_cp_finalize_request_headers_hook( + ngx_http_request_t *request, + ngx_http_cp_session_data *session_data_p, + ngx_http_cp_modification_list *modifications, + ngx_int_t final_res); + +// Sync and async handlers +ngx_int_t ngx_http_cp_req_header_handler_sync(ngx_http_request_t *request); +ngx_int_t ngx_http_cp_req_body_filter_sync(ngx_http_request_t *request, ngx_chain_t *request_body_chain); + +#ifdef NGINX_ASYNC_SUPPORTED +ngx_int_t ngx_http_cp_req_header_handler_async(ngx_http_request_t *request); +ngx_int_t ngx_http_cp_req_body_filter_async(ngx_http_request_t *request, ngx_chain_t *request_body_chain); +#endif + #endif // __NGX_CP_HOOKS_H__ diff --git a/attachments/nginx/ngx_module/ngx_cp_http_parser.h b/attachments/nginx/ngx_module/ngx_cp_http_parser.h index bc36fee..16d289e 100644 --- a/attachments/nginx/ngx_module/ngx_cp_http_parser.h +++ b/attachments/nginx/ngx_module/ngx_cp_http_parser.h @@ -56,6 +56,12 @@ typedef struct { /// Decompression stream CompressionStream *decompression_stream; + + /// Pool for temporary decompression buffers (destroyed after each chunk's re-compression) + ngx_pool_t *decompression_pool; + + /// Pool for re-compression buffers (reset at start of each chunk to prevent accumulation) + ngx_pool_t *recompression_pool; } ngx_http_response_data; /// diff --git a/attachments/nginx/ngx_module/ngx_cp_initializer.c b/attachments/nginx/ngx_module/ngx_cp_initializer.c index b962bde..aef7522 100644 --- a/attachments/nginx/ngx_module/ngx_cp_initializer.c +++ b/attachments/nginx/ngx_module/ngx_cp_initializer.c @@ -20,12 +20,14 @@ #include #include #include +#include #include #include #include #include +#include "nano_attachment_common.h" #include "nginx_attachment_common.h" #include "ngx_cp_io.h" #include "ngx_cp_utils.h" @@ -33,6 +35,7 @@ #include "ngx_cp_compression.h" #include "attachment_types.h" #include "ngx_http_cp_attachment_module.h" +#include "async/ngx_cp_async_core.h" typedef enum ngx_cp_attachment_registration_state { NOT_REGISTERED, @@ -41,6 +44,7 @@ typedef enum ngx_cp_attachment_registration_state { } ngx_cp_attachment_registration_state_e; ///< Indicates the current attachment registation stage. char unique_id[MAX_NGINX_UID_LEN] = ""; // Holds the unique identifier for this instance. +uint32_t unique_id_integer = 0; // Holds the integer representation of the unique identifier. char shared_verdict_signal_path[128]; // Holds the path associating the attachment and service. int registration_socket = -1; // Holds the file descriptor used for registering the instance. @@ -239,7 +243,7 @@ init_signaling_socket() close(comm_socket); comm_socket = -1; write_dbg( - DBG_LEVEL_DEBUG, + DBG_LEVEL_WARNING, "Could not connect to nano service. Path: %s, Error: %s", server.sun_path, strerror(errno) @@ -295,6 +299,39 @@ init_signaling_socket() return NGX_ERROR; } + // Calculate and send target core for affinity pairing + int32_t target_core = -1; // Use signed int, -1 indicates affinity disabled + if (paired_affinity_enabled) { + // Use hash of the container ID part only (before underscore) as offset to distribute containers across CPU cores + char container_id[256]; + strncpy(container_id, unique_id, sizeof(container_id) - 1); + container_id[sizeof(container_id) - 1] = '\0'; + + char *underscore_pos = strrchr(container_id, '_'); + if (underscore_pos != NULL) { + *underscore_pos = '\0'; // Truncate at underscore to get container ID only + } + + uint32_t affinity_offset = hash_string(container_id); + int num_cores = sysconf(_SC_NPROCESSORS_CONF); + target_core = ((unique_id_integer - 1) + affinity_offset) % num_cores; + write_dbg(DBG_LEVEL_INFO, "Calculated target core for service affinity: worker_id=%d, offset=%u, num_cores=%d, target_core=%d (from container_id: %s, full unique_id: %s)", unique_id_integer, affinity_offset, num_cores, target_core, container_id, unique_id); + } else { + write_dbg(DBG_LEVEL_INFO, "Paired affinity disabled, sending target_core=-1 to service"); + } + + res = exchange_communication_data_with_service( + comm_socket, + &target_core, + sizeof(int32_t), + WRITE_TO_SOCKET, + &timeout + ); + if (res <= 0) { + write_dbg(DBG_LEVEL_WARNING, "Failed to send target core"); + return NGX_ERROR; + } + // Get an acknowledgement form the service that communication has been established. timeout = get_timeout_val_sec(1); res = exchange_communication_data_with_service( @@ -309,7 +346,7 @@ init_signaling_socket() return NGX_ERROR; } - write_dbg(DBG_LEVEL_DEBUG, "Successfully connected on client socket %d", comm_socket); + write_dbg(DBG_LEVEL_WARNING, "Successfully connected on client socket %d", comm_socket); return NGX_OK; } @@ -343,12 +380,21 @@ get_docker_id(char **_docker_id) size_t len = 0; while (getline(&line, &len, file) != -1) { char *docker_ptr = strstr(line, "docker/"); - if (docker_ptr == NULL) continue; + char *containerd_ptr = strstr(line, "cri-containerd-"); - // We've found a line with "docker/" so the identifier will be right after that. - docker_ptr += strlen("docker/"); - snprintf(docker_id, MAX_CONTAINER_LEN + 1, "%s", docker_ptr); - break; + if (docker_ptr != NULL) { + // We've found a line with "docker/" so the identifier will be right after that. + docker_ptr += strlen("docker/"); + snprintf(docker_id, MAX_CONTAINER_LEN + 1, "%s", docker_ptr); + break; + } + + if (containerd_ptr != NULL) { + // We've found a line with "cri-containerd-" so the identifier will be right after that. + containerd_ptr += strlen("cri-containerd-"); + snprintf(docker_id, MAX_CONTAINER_LEN + 1, "%s", containerd_ptr); + break; + } } free(line); fclose(file); @@ -570,12 +616,21 @@ set_unique_id() size_t len = 0; while (getline(&line, &len, file) != -1) { char *docker_ptr = strstr(line, "docker/"); - if (docker_ptr == NULL) continue; + char *containerd_ptr = strstr(line, "cri-containerd-"); - is_container_env = 1; - docker_ptr += strlen("docker/"); - snprintf(docker_id, max_container_id_len + 1, "%s", docker_ptr); - break; + if (docker_ptr != NULL) { + is_container_env = 1; + docker_ptr += strlen("docker/"); + snprintf(docker_id, max_container_id_len + 1, "%s", docker_ptr); + break; + } + + if (containerd_ptr != NULL) { + is_container_env = 1; + containerd_ptr += strlen("cri-containerd-"); + snprintf(docker_id, max_container_id_len + 1, "%s", containerd_ptr); + break; + } } free(line); fclose(file); @@ -588,7 +643,10 @@ set_unique_id() snprintf(unique_id, unique_id_size, "%lu", ngx_worker_id); } - write_dbg(DBG_LEVEL_INFO, "Successfully set attachment's unique_id: '%s'", unique_id); + // Set integer representation + unique_id_integer = (uint32_t)ngx_worker_id; + + write_dbg(DBG_LEVEL_INFO, "Successfully set attachment's unique_id: '%s' (int: %u)", unique_id, unique_id_integer); return NGX_OK; } @@ -634,7 +692,7 @@ ngx_cp_attachment_init_process(ngx_http_request_t *request) set_need_registration(REGISTERED); } - if (comm_socket < 0) { + if (comm_socket < 0 || is_async_toggled_in_last_reconfig()) { write_dbg(DBG_LEVEL_DEBUG, "Registering to nano service"); if (init_signaling_socket() == NGX_ERROR) { write_dbg(DBG_LEVEL_DEBUG, "Failed to register to the Nano Service"); @@ -696,6 +754,28 @@ ngx_cp_attachment_init_process(ngx_http_request_t *request) // we want to indicate about successful registration only once in default level write_dbg(dbg_is_needed ? DBG_LEVEL_DEBUG : DBG_LEVEL_INFO, "NGINX attachment (UID='%s') successfully registered to nano service after %d attempts.", unique_id, num_of_connection_attempts); + // Set affinity to core based on UID (worker id) only if paired affinity is enabled + if (paired_affinity_enabled) { + // Use hash of the container ID part only (before underscore) as offset to distribute containers across CPU cores + // This ensures workers within same container are distributed evenly, but different containers get different offsets + char container_id[256]; + strncpy(container_id, unique_id, sizeof(container_id) - 1); + container_id[sizeof(container_id) - 1] = '\0'; + + char *underscore_pos = strrchr(container_id, '_'); + if (underscore_pos != NULL) { + *underscore_pos = '\0'; // Truncate at underscore to get container ID only + } + + uint32_t affinity_offset = hash_string(container_id); + int num_cores = sysconf(_SC_NPROCESSORS_CONF); + write_dbg(DBG_LEVEL_INFO, "Setting CPU affinity for NGINX attachment with UID: %d, offset: %u, num_cores: %d (from container_id: %s, full unique_id: %s)", unique_id_integer, affinity_offset, num_cores, container_id, unique_id); + int err = set_affinity_by_uid_with_offset_fixed_cores(unique_id_integer, affinity_offset, num_cores); + if (err != NGX_OK) { + write_dbg(DBG_LEVEL_WARNING, "Failed to set affinity for worker %d, err %d", ngx_worker, err); + } + } + dbg_is_needed = 1; num_of_connection_attempts = 0; @@ -736,6 +816,10 @@ disconnect_communication() nano_service_ipc = NULL; } +#ifdef NGINX_ASYNC_SUPPORTED + disable_ipc_verdict_event_handler(); +#endif + set_need_registration(NOT_REGISTERED); } diff --git a/attachments/nginx/ngx_module/ngx_cp_io.c b/attachments/nginx/ngx_module/ngx_cp_io.c index 526e126..42936d3 100644 --- a/attachments/nginx/ngx_module/ngx_cp_io.c +++ b/attachments/nginx/ngx_module/ngx_cp_io.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include #include "ngx_cp_utils.h" #include "ngx_cp_initializer.h" @@ -91,7 +93,7 @@ ngx_http_cp_signal_to_service(uint32_t cur_session_id) /// - #NGX_AGAIN /// static ngx_int_t -ngx_http_cp_wait_for_service(uint32_t cur_session_id, ngx_http_chunk_type_e chunk_type, ngx_int_t tout_retries) +ngx_http_cp_wait_for_service(uint32_t cur_session_id, AttachmentDataType chunk_type, ngx_int_t tout_retries) { static int dbg_count = 0; static clock_t clock_start = (clock_t) 0; @@ -100,7 +102,7 @@ ngx_http_cp_wait_for_service(uint32_t cur_session_id, ngx_http_chunk_type_e chun uint32_t reply_from_service; ngx_int_t retry; int is_fail_open_disabled = (inspection_mode != NON_BLOCKING_THREAD); - ngx_uint_t timeout = chunk_type == HOLD_DATA ? fail_open_hold_timeout : fail_open_timeout; + ngx_uint_t timeout = chunk_type == REQUEST_DELAYED_VERDICT ? fail_open_hold_timeout : fail_open_timeout; res = ngx_http_cp_signal_to_service(cur_session_id); if (res != NGX_OK) return res; @@ -197,7 +199,7 @@ ngx_http_cp_send_data_to_service( uint8_t num_of_data_elem, uint32_t cur_session_id, int *was_waiting, - ngx_http_chunk_type_e chunk_type, + AttachmentDataType chunk_type, ngx_int_t tout_retries ) { @@ -245,11 +247,11 @@ ngx_http_cp_send_data_to_service( /// /// @brief Receieves data from service. -/// @returns ngx_http_cp_reply_from_service_t -/// - #A valid ngx_http_cp_reply_from_service_t pointer if valid. +/// @returns HttpReplyFromService +/// - #A valid HttpReplyFromService pointer if valid. /// - #NULL if failed. /// -static ngx_http_cp_reply_from_service_t * +static HttpReplyFromService * ngx_http_cp_receive_data_from_service() { ngx_int_t res, retry; @@ -275,7 +277,7 @@ ngx_http_cp_receive_data_from_service() continue; } - return (ngx_http_cp_reply_from_service_t *)reply_data; + return (HttpReplyFromService *)reply_data; } return NULL; } @@ -286,7 +288,31 @@ ngx_http_cp_receive_data_from_service() static void free_data_from_service() { - popData(nano_service_ipc); + if (nano_service_ipc && isDataAvailable(nano_service_ipc)) { + write_dbg(DBG_LEVEL_TRACE, "Freeing data from nano service"); + popData(nano_service_ipc); + } +} + +/// +/// @brief Create a custom JSON response by the provided data +/// @param[in] json_response_data JSON response data. +/// @returns ngx_int_t +/// - #NGX_OK +/// +ngx_int_t +handle_custom_json_response(HttpJsonResponseData *json_response_data) +{ + ngx_str_t json_body; + + write_dbg(DBG_LEVEL_TRACE, "Preparing to set custom JSON response"); + + json_body.len = json_response_data->body_size; + json_body.data = (u_char *)json_response_data->body; + + set_custom_response_json(&json_body, json_response_data->response_code, json_response_data->content_type); + + return NGX_OK; } /// @@ -294,8 +320,8 @@ free_data_from_service() /// @details If web_response_type is set to REDIRECT_WEB_RESPONSE, it will set a redirect response. /// @param[in] web_response_data Web response data. /// -static void -handle_custom_web_response(ngx_http_cp_web_response_data_t *web_response_data) +void +handle_custom_web_response(HttpWebResponseData *web_response_data) { ngx_str_t title; ngx_str_t body; @@ -304,7 +330,7 @@ handle_custom_web_response(ngx_http_cp_web_response_data_t *web_response_data) uuid.len = web_response_data->uuid_size; - if (web_response_data->web_repsonse_type == REDIRECT_WEB_RESPONSE) { + if (web_response_data->web_response_type == REDIRECT_WEB_RESPONSE) { // Settings a redirected web response. write_dbg(DBG_LEVEL_TRACE, "Preparing to set redirect web response"); redirect_location.len = web_response_data->response_data.redirect_data.redirect_location_size; @@ -328,7 +354,7 @@ handle_custom_web_response(ngx_http_cp_web_response_data_t *web_response_data) body.data = (u_char *)web_response_data->response_data.custom_response_data.data + title.len; } uuid.data = (u_char *)web_response_data->response_data.custom_response_data.data + title.len + body.len; - set_custom_response(&title, &body, &uuid, web_response_data->response_data.custom_response_data.response_code); + set_custom_response_block_page(&title, &body, &uuid, web_response_data->response_data.custom_response_data.response_code); } /// @@ -364,8 +390,8 @@ create_modification_buffer(char **target, uint16_t data_size, char *data, ngx_po /// - #ngx_http_cp_modification_list pointer on success. /// - #NULL if the creation failed. /// -static ngx_http_cp_modification_list * -create_modification_node(ngx_http_cp_inject_data_t *modification, ngx_http_request_t *request) +ngx_http_cp_modification_list * +create_modification_node(HttpInjectData *modification, ngx_http_request_t *request) { ngx_int_t res; ngx_http_cp_modification_list *modification_node = (ngx_http_cp_modification_list *)ngx_pcalloc( @@ -423,7 +449,7 @@ create_modification_node(ngx_http_cp_inject_data_t *modification, ngx_http_reque ngx_int_t ngx_http_cp_is_reconf_needed() { - ngx_http_cp_reply_from_service_t *reply_p; + HttpReplyFromService *reply_p; ngx_int_t res; const char *reply_data; uint16_t reply_size; @@ -439,7 +465,7 @@ ngx_http_cp_is_reconf_needed() return NGX_ERROR; } - reply_p = (ngx_http_cp_reply_from_service_t *)reply_data; + reply_p = (HttpReplyFromService *)reply_data; if (reply_p->verdict == TRAFFIC_VERDICT_RECONF) { write_dbg(DBG_LEVEL_DEBUG, "Verdict reconf was received from the nano service. Performing reconf on the nginx worker attachment"); reset_attachment_config(); @@ -452,19 +478,19 @@ ngx_http_cp_is_reconf_needed() ngx_int_t ngx_http_cp_reply_receiver( ngx_int_t *expected_replies, - ngx_http_cp_verdict_e *verdict, + ServiceVerdict *verdict, ngx_int_t *inspect_all_response_headers, uint32_t cur_session_id, ngx_http_request_t *request, ngx_http_cp_modification_list **modification_list, - ngx_http_chunk_type_e chunk_type, + AttachmentDataType chunk_type, uint64_t processed_body_size ) { - ngx_http_cp_reply_from_service_t *reply_p; + HttpReplyFromService *reply_p; ngx_http_cp_modification_list *new_modification = NULL; ngx_http_cp_modification_list *current_modification = NULL; - ngx_http_cp_inject_data_t *current_inject_data = NULL; + HttpInjectData *current_inject_data = NULL; ngx_int_t res; ngx_int_t tout_retries = min_retries_for_verdict; uint8_t modification_count; @@ -537,9 +563,9 @@ ngx_http_cp_reply_receiver( current_modification = current_modification->next; } // Saving injected data. - current_inject_data = (ngx_http_cp_inject_data_t *)( + current_inject_data = (HttpInjectData *)( (char *)current_inject_data + - sizeof(ngx_http_cp_inject_data_t) + + sizeof(HttpInjectData) + current_inject_data->injection_size ); } @@ -596,7 +622,7 @@ ngx_http_cp_reply_receiver( break; } - case TRAFFIC_VERDICT_WAIT: { + case TRAFFIC_VERDICT_DELAYED: { // After a wait verdict, query the nano agent again to get an updated verdict. write_dbg(DBG_LEVEL_DEBUG, "Verdict wait received from the nano service"); updateMetricField(HOLD_VERDICTS_COUNT, 1); @@ -608,6 +634,23 @@ ngx_http_cp_reply_receiver( *inspect_all_response_headers = 0; break; } + + case TRAFFIC_VERDICT_CUSTOM_RESPONSE: { + // Custom Response verdict received from the nano service. + write_dbg(DBG_LEVEL_INFO, "Verdict Custom Response received from the nano service"); + + handle_custom_json_response(reply_p->modify_data->json_response_data); + + *expected_replies = 0; + free_data_from_service(); + while (*modification_list) { + current_modification = *modification_list; + *modification_list = (*modification_list)->next; + ngx_pfree(request->pool, current_modification->modification.data); + ngx_pfree(request->pool, current_modification); + } + return NGX_OK; + } } free_data_from_service(); @@ -625,7 +668,7 @@ ngx_http_cp_reply_receiver( /// @param[in] size Size to be set into the meta_data_sizes array. /// @param[in] idx Index of the arrays to set the data and size into. /// -static void +void set_fragment_elem(char **meta_data_elems, uint16_t *meta_data_sizes, void *data, uint16_t size, uint idx) { meta_data_elems[idx] = data; @@ -640,7 +683,7 @@ set_fragment_elem(char **meta_data_elems, uint16_t *meta_data_sizes, void *data, /// @param[in] data_type Data type identifier to be set. /// @param[in] cur_request_id Request's Id. /// -static void +void set_fragments_identifiers( char **meta_data_elems, uint16_t *meta_data_sizes, @@ -656,7 +699,7 @@ set_fragments_identifiers( /// @param[in, out] sockaddr Socker to convert. /// @param[in, out] ip_addr Output location of the conversion. /// -static void +void convert_sock_addr_to_string(const struct sockaddr *sa, char *ip_addr) { void *ip = NULL; @@ -834,7 +877,7 @@ ngx_http_cp_meta_data_sender(ngx_http_request_t *request, uint32_t cur_request_i ngx_int_t ngx_http_cp_end_transaction_sender( - ngx_http_chunk_type_e end_transaction_type, + AttachmentDataType end_transaction_type, uint32_t cur_request_id, ngx_uint_t *num_messages_sent ) @@ -869,7 +912,7 @@ ngx_http_cp_wait_sender(uint32_t cur_request_id, ngx_uint_t *num_messages_sent) char *fragments[end_transaction_num_fragments]; uint16_t fragments_sizes[end_transaction_num_fragments]; - ngx_http_chunk_type_e transaction_type = HOLD_DATA; + AttachmentDataType transaction_type = REQUEST_DELAYED_VERDICT; ngx_int_t res; set_fragments_identifiers(fragments, fragments_sizes, (uint16_t *)&transaction_type, &cur_request_id); @@ -940,7 +983,7 @@ ngx_http_cp_content_length_sender(uint64_t content_length_n, uint32_t cur_req_id /// @param[in] header Header to add to the fragment array. /// @param[in] index Index of the arrays to set the header into. /// -static inline void +void add_header_to_bulk(char **fragments, uint16_t *fragments_sizes, ngx_table_elt_t *header, ngx_uint_t index) { ngx_uint_t pos = index * HEADER_DATA_COUNT; @@ -1024,7 +1067,7 @@ send_empty_header_list( ngx_int_t ngx_http_cp_header_sender( ngx_list_part_t *headers_list, - ngx_http_chunk_type_e header_type, + AttachmentDataType header_type, uint32_t cur_request_id, ngx_uint_t *num_messages_sent ) @@ -1111,8 +1154,9 @@ ngx_http_cp_header_sender( ngx_int_t ngx_http_cp_body_sender( ngx_chain_t *input, - ngx_http_chunk_type_e body_type, + AttachmentDataType body_type, ngx_http_cp_session_data *session_data, + ngx_int_t *part_number, ngx_int_t *is_last_part, ngx_uint_t *num_messages_sent, ngx_chain_t **next_elem_to_inspect @@ -1147,7 +1191,7 @@ ngx_http_cp_body_sender( set_fragments_identifiers(fragments, fragments_sizes, (uint16_t *)&body_type, &session_data->session_id); num_parts_sent = 0; - part_count = 0; + part_count = (body_type == RESPONSE_BODY) ? *part_number : 0; for (chain_iter = input; chain_iter && chunks_processed < max_chunks_to_process; chain_iter = chain_iter->next) { // For each NGINX buffer, fragment the buffer and then send the fragments to the nano service. @@ -1235,14 +1279,13 @@ ngx_http_cp_metric_data_sender() write_dbg(DBG_LEVEL_DEBUG, "Sending metric data to service"); fragment_type = METRIC_DATA_FROM_PLUGIN; - ngx_http_cp_metric_data_t data_to_send; + NanoHttpMetricData data_to_send; data_to_send.data_type = fragment_type; memcpy(data_to_send.data, metric_data, METRIC_TYPES_COUNT * sizeof(data_to_send.data[0])); fragments = (char *)&data_to_send; - fragments_sizes = sizeof(ngx_http_cp_metric_data_t); + fragments_sizes = sizeof(NanoHttpMetricData); res = ngx_http_cp_send_data_to_service(&fragments, &fragments_sizes, 1, 0, NULL, fail_open_timeout, min_retries_for_verdict); reset_metric_data(); return res; } - diff --git a/attachments/nginx/ngx_module/ngx_cp_io.h b/attachments/nginx/ngx_module/ngx_cp_io.h index b24ddff..b26a1f0 100644 --- a/attachments/nginx/ngx_module/ngx_cp_io.h +++ b/attachments/nginx/ngx_module/ngx_cp_io.h @@ -22,7 +22,7 @@ #include #include "shmem_ipc.h" -#include "nginx_attachment_common.h" +#include "nano_attachment_common.h" #include "ngx_cp_custom_response.h" #include "ngx_cp_hooks.h" @@ -57,12 +57,12 @@ extern int comm_socket; ///< Communication socket. ngx_int_t ngx_http_cp_reply_receiver( ngx_int_t *expected_replies, - ngx_http_cp_verdict_e *verdict, + ServiceVerdict *verdict, ngx_int_t *inspect_all_response_headers, uint32_t cur_session_id, ngx_http_request_t *request, ngx_http_cp_modification_list **modification_list, - ngx_http_chunk_type_e chunk_type, + AttachmentDataType chunk_type, uint64_t processed_body_size ); @@ -97,7 +97,7 @@ ngx_http_cp_meta_data_sender( /// ngx_int_t ngx_http_cp_end_transaction_sender( - ngx_http_chunk_type_e end_transaction_type, + AttachmentDataType end_transaction_type, uint32_t cur_request_id, ngx_uint_t *num_messages_sent ); @@ -148,7 +148,7 @@ ngx_http_cp_content_length_sender( ngx_int_t ngx_http_cp_header_sender( ngx_list_part_t *headers, - ngx_http_chunk_type_e header_type, + AttachmentDataType header_type, uint32_t cur_request_id, ngx_uint_t *num_messages_sent ); @@ -170,16 +170,17 @@ ngx_http_cp_header_sender( ngx_int_t ngx_http_cp_body_sender( ngx_chain_t *input, - ngx_http_chunk_type_e body_type, + AttachmentDataType body_type, ngx_http_cp_session_data *session_data, + ngx_int_t *part_number, ngx_int_t *is_last_part, ngx_uint_t *num_messages_sent, ngx_chain_t **next_elem_to_inspect ); /// -/// @brief Sends HOLD_DATA request to the nano service. -/// @details HOLD_DATA request is a request that asks the nano service to provide with an updated verdict. +/// @brief Sends REQUEST_DELAYED_VERDICT request to the nano service. +/// @details REQUEST_DELAYED_VERDICT request is a request that asks the nano service to provide with an updated verdict. /// @param[in] cur_request_id Request session's Id. /// @param[in, out] num_messages_sent Number of messages sent will be saved onto this parameter. /// - #NGX_OK @@ -216,4 +217,43 @@ void ngx_http_cp_report_time_metrics( double res_proccesing_time ); +/// +/// @brief Create a modifications node. +/// @param[in] modification Modification data. +/// @param[in] request NGINX request. +/// @returns modification_node +/// - #ngx_http_cp_modification_list pointer on success. +/// - #NULL if the creation failed. +/// +ngx_http_cp_modification_list * +create_modification_node(HttpInjectData *modification, ngx_http_request_t *request); + +/// +/// @brief Convert socket address to string +/// @param[in] sa Socket address +/// @param[out] ip_addr String buffer to write IP address to +/// +void +convert_sock_addr_to_string(const struct sockaddr *sa, char *ip_addr); + +/// +/// @brief Set fragments identifiers for data transmission +/// @param[in,out] meta_data_elems Fragments data array +/// @param[in,out] meta_data_sizes Fragments data sizes array +/// @param[in] data_type Data type identifier to be set +/// @param[in] cur_request_id Request's Id +/// +void +set_fragments_identifiers( + char **meta_data_elems, + uint16_t *meta_data_sizes, + uint16_t *data_type, + uint32_t *cur_request_id); + +void set_fragment_elem(char **meta_data_elems, uint16_t *meta_data_sizes, void *data, uint16_t size, uint idx); +void add_header_to_bulk(char **fragments, uint16_t *fragments_sizes, ngx_table_elt_t *header, ngx_uint_t index); + +ngx_int_t handle_custom_json_response(HttpJsonResponseData *json_response_data); +void handle_custom_web_response(HttpWebResponseData *web_response_data); + #endif // __NGX_CP_IO_H__ diff --git a/attachments/nginx/ngx_module/ngx_cp_metric.c b/attachments/nginx/ngx_module/ngx_cp_metric.c index 1e02781..ba79860 100644 --- a/attachments/nginx/ngx_module/ngx_cp_metric.c +++ b/attachments/nginx/ngx_module/ngx_cp_metric.c @@ -37,7 +37,7 @@ reset_metric_data() /// @param[in] value Value to increment the metric type. /// static void -updateCounterMetricField(ngx_http_plugin_metric_type_e metric_type, uint64_t value) +updateCounterMetricField(AttachmentMetricType metric_type, uint64_t value) { metric_data[metric_type] += value; } @@ -48,7 +48,7 @@ updateCounterMetricField(ngx_http_plugin_metric_type_e metric_type, uint64_t val /// @param[in] value Value to add to the average metric. /// static void -updateAverageMetricField(ngx_http_plugin_metric_type_e metric_type, uint64_t value) +updateAverageMetricField(AttachmentMetricType metric_type, uint64_t value) { metric_data[metric_type] = (((metric_data[metric_type] * metric_average_data_divisor[metric_type]) + value) / (metric_average_data_divisor[metric_type] + 1)); @@ -61,7 +61,7 @@ updateAverageMetricField(ngx_http_plugin_metric_type_e metric_type, uint64_t val /// @param[in] value Value to set. /// static void -updateMaxMetricField(ngx_http_plugin_metric_type_e metric_type, uint64_t value) +updateMaxMetricField(AttachmentMetricType metric_type, uint64_t value) { if (metric_data[metric_type] < value) metric_data[metric_type] = value; } @@ -72,7 +72,7 @@ updateMaxMetricField(ngx_http_plugin_metric_type_e metric_type, uint64_t value) /// @param[in] value Value to set. /// static void -updateMinMetricField(ngx_http_plugin_metric_type_e metric_type, uint64_t value) +updateMinMetricField(AttachmentMetricType metric_type, uint64_t value) { if (metric_data[metric_type] == 0) { metric_data[metric_type] = value; @@ -82,7 +82,7 @@ updateMinMetricField(ngx_http_plugin_metric_type_e metric_type, uint64_t value) } void -updateMetricField(ngx_http_plugin_metric_type_e metric_type, uint64_t value) +updateMetricField(AttachmentMetricType metric_type, uint64_t value) { switch (metric_type) { case CPU_USAGE: diff --git a/attachments/nginx/ngx_module/ngx_cp_metric.h b/attachments/nginx/ngx_module/ngx_cp_metric.h index 2453f7f..4aff40a 100644 --- a/attachments/nginx/ngx_module/ngx_cp_metric.h +++ b/attachments/nginx/ngx_module/ngx_cp_metric.h @@ -1,22 +1,22 @@ -// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + /// @file ngx_cp_metric.h #ifndef __NGX_CP_METRIC_H__ #define __NGX_CP_METRIC_H__ -#include +#include #include #include @@ -25,7 +25,7 @@ /// @param[in] metric_type Metric type to update. /// @param[in] value Value to set. /// -void updateMetricField(ngx_http_plugin_metric_type_e metric_type, uint64_t value); +void updateMetricField(AttachmentMetricType metric_type, uint64_t value); /// /// @brief Goes over all the metrics and resets them to 0. diff --git a/attachments/nginx/ngx_module/ngx_cp_static_content.c b/attachments/nginx/ngx_module/ngx_cp_static_content.c index 279f615..ce0614f 100644 --- a/attachments/nginx/ngx_module/ngx_cp_static_content.c +++ b/attachments/nginx/ngx_module/ngx_cp_static_content.c @@ -484,7 +484,7 @@ finalize_static_resource_response( } ngx_int_t -handle_static_resource_request(uint32_t session_id, ngx_http_cp_verdict_e *verdict, ngx_http_request_t *request) +handle_static_resource_request(uint32_t session_id, ServiceVerdict *verdict, ngx_http_request_t *request) { ngx_str_t null_terminated_uri; ngx_str_t static_resource_name; diff --git a/attachments/nginx/ngx_module/ngx_cp_static_content.h b/attachments/nginx/ngx_module/ngx_cp_static_content.h index 04792f4..b891b8c 100644 --- a/attachments/nginx/ngx_module/ngx_cp_static_content.h +++ b/attachments/nginx/ngx_module/ngx_cp_static_content.h @@ -20,7 +20,7 @@ #include #include -#include "nginx_attachment_common.h" +#include "nano_attachment_common.h" #define NOT_A_STATIC_RESOURCE NGX_DECLINED @@ -55,7 +55,7 @@ ngx_int_t is_static_resources_table_initialized(void); /// ngx_int_t handle_static_resource_request( uint32_t session_id, - ngx_http_cp_verdict_e *verdict, + ServiceVerdict *verdict, ngx_http_request_t *request ); diff --git a/attachments/nginx/ngx_module/ngx_cp_utils.c b/attachments/nginx/ngx_module/ngx_cp_utils.c index ff5a151..76b85bb 100644 --- a/attachments/nginx/ngx_module/ngx_cp_utils.c +++ b/attachments/nginx/ngx_module/ngx_cp_utils.c @@ -28,8 +28,13 @@ #include "nginx_attachment_util.h" #include "ngx_cp_initializer.h" -#include "nginx_attachment_common.h" +#include "nano_attachment_common.h" #include "ngx_cp_metric.h" +#ifdef NGINX_ASYNC_SUPPORTED +#include "async/ngx_cp_async_core.h" +#endif + +extern void disconnect_communication(void); #define USERCHECK_TITLE_START "" #define USERCHECK_BODY_START "" @@ -73,6 +78,9 @@ static uint32_t cur_session_id = 0; ///< Current session ID. static uint pid = 0; +static uint is_async_mode_toggled_on_in_last_reconfig = 0; +static uint is_async_mode_toggled_off_in_last_reconfig = 0; + ngx_http_cp_sessions_per_minute_limit sessions_per_minute_limit_info = { .sessions_per_second = {0}, .last_minute_sessions_sum = 0, @@ -87,7 +95,7 @@ ngx_int_t dbg_is_needed = 0; ///< Debug flag. ngx_int_t num_of_connection_attempts = 0; ///< Maximum number of attempted connections. ngx_uint_t fail_open_timeout = 50; ///< Fail open timeout in milliseconds. ngx_uint_t fail_open_hold_timeout = 150; ///< Fail open wait timeout in milliseconds. -ngx_http_cp_verdict_e sessions_per_minute_limit_verdict = TRAFFIC_VERDICT_ACCEPT; +ServiceVerdict sessions_per_minute_limit_verdict = TRAFFIC_VERDICT_ACCEPT; ngx_uint_t max_sessions_per_minute = 0; ///< Masimum session per minute. ngx_uint_t req_max_proccessing_ms_time = 3000; ///< Total Request processing timeout in milliseconds. ngx_uint_t res_max_proccessing_ms_time = 3000; ///< Total Response processing timeout in milliseconds. @@ -97,8 +105,8 @@ ngx_uint_t req_body_thread_timeout_msec = 150; ///< Request body processing time ngx_uint_t res_header_thread_timeout_msec = 100; ///< Response header processing timeout in milliseconds. ngx_uint_t res_body_thread_timeout_msec = 150; ///< Response body processing timeout in milliseconds. ngx_uint_t waiting_for_verdict_thread_timeout_msec = 150; ///< Wait thread processing timeout in milliseconds. -ngx_http_inspection_mode_e inspection_mode = NON_BLOCKING_THREAD; ///< Default inspection mode. -ngx_uint_t num_of_nginx_ipc_elements = 200; ///< Number of NGINX IPC elements. +NanoHttpInspectionMode inspection_mode = NON_BLOCKING_THREAD; ///< Default inspection mode. +ngx_uint_t num_of_nginx_ipc_elements = 2048; ///< Number of NGINX IPC elements. ngx_msec_t keep_alive_interval_msec = DEFAULT_KEEP_ALIVE_INTERVAL_MSEC; ngx_uint_t min_retries_for_verdict = 3; ///< Minimum number of retries for verdict. ngx_uint_t max_retries_for_verdict = 15; ///< Maximum number of retries for verdict. @@ -106,6 +114,16 @@ ngx_uint_t hold_verdict_retries = 3; ///< Number of retries for hold verdict. ngx_uint_t hold_verdict_polling_time = 1; ///< Polling time for hold verdict. ngx_uint_t body_size_trigger = 200000; ///< Request body size in bytes to switch to maximum retries for verdict. ngx_uint_t remove_res_server_header = 0; ///< Remove server header flag. +ngx_uint_t paired_affinity_enabled = 0; ///< Paired affinity enabled flag. +ngx_uint_t decompression_pool_size = 262144; ///< Decompression pool size in bytes (256KB for high compression rates). +ngx_uint_t recompression_pool_size = 16384; ///< Recompression pool size in bytes. +ngx_uint_t is_async_mode_enabled = 0; ///< Async mode enabled flag. +ngx_uint_t is_brotli_inspection_enabled = 0; ///< Brotli inspection enabled flag. + +// JSON response support +static ngx_str_t json_response_body = {0, NULL}; +static ngx_uint_t json_response_code = NGX_HTTP_FORBIDDEN; +static AttachmentContentType json_response_content_type = CONTENT_TYPE_APPLICATION_JSON; static struct timeval getCurrTimeFast() @@ -521,7 +539,7 @@ get_timeout_val_msec(const int delta_time_in_msec) } void -set_custom_response(const ngx_str_t *title, const ngx_str_t *body, const ngx_str_t *uuid, ngx_uint_t response_code) +set_custom_response_block_page(const ngx_str_t *title, const ngx_str_t *body, const ngx_str_t *uuid, ngx_uint_t response_code) { write_dbg( DBG_LEVEL_TRACE, @@ -539,6 +557,9 @@ set_custom_response(const ngx_str_t *title, const ngx_str_t *body, const ngx_str web_response_body_size = body->len; web_response_uuid_size = uuid->len; + memcpy(web_response_uuid, uuid->data, web_response_uuid_size); + web_response_uuid[web_response_uuid_size] = 0; + if (web_response_title_size == 0 || web_response_body_size == 0) return; // Copies the provided variables into their respective response variables. memcpy(web_response_title, title->data, web_response_title_size); @@ -546,8 +567,6 @@ set_custom_response(const ngx_str_t *title, const ngx_str_t *body, const ngx_str if (web_response_uuid_size >= sizeof(web_response_uuid)) { web_response_uuid_size = sizeof(web_response_uuid) - 1; } - memcpy(web_response_uuid, uuid->data, web_response_uuid_size); - web_response_uuid[web_response_uuid_size] = 0; } void @@ -601,7 +620,7 @@ set_response_page_chain_elem(ngx_buf_t **part, ngx_str_t *content, ngx_chain_t * } ngx_int_t -get_response_page(ngx_http_request_t *request, ngx_chain_t (*out_chain)[7]) +get_block_page_response(ngx_http_request_t *request, ngx_chain_t (*out_chain)[7]) { ngx_int_t idx; ngx_chain_t *tmp_next; @@ -651,7 +670,7 @@ get_response_page(ngx_http_request_t *request, ngx_chain_t (*out_chain)[7]) } ngx_uint_t -get_response_page_length(void) +get_response_page_length_web_page(void) { ngx_uint_t idx; ngx_uint_t total_length = 0; @@ -675,6 +694,79 @@ get_response_code(void) return web_triggers_response_code; } +void +set_custom_response_json(const ngx_str_t *body, ngx_uint_t response_code, AttachmentContentType content_type) +{ + write_dbg( + DBG_LEVEL_INFO, + "Setting JSON response: response_code = %d, body size = %d, uuid size = %d", + response_code, + body->len + ); + + json_response_code = response_code; + json_response_content_type = content_type; + + if (json_response_body.data && memory_pool) { + ngx_pfree(memory_pool, json_response_body.data); + json_response_body.data = NULL; + json_response_body.len = 0; + } + + if (memory_pool && body->len > 0) { + json_response_body.len = body->len; + json_response_body.data = ngx_pcalloc(memory_pool, body->len + 1); + if (json_response_body.data) { + ngx_memcpy(json_response_body.data, body->data, body->len); + json_response_body.data[body->len] = 0; + } + } +} + +ngx_int_t +get_response_page_json(ngx_http_request_t *request, ngx_chain_t (*out_chain)[1]) +{ + ngx_buf_t *buf = ngx_calloc_buf(request->pool); + if (buf == NULL) { + write_dbg(DBG_LEVEL_WARNING, "Failed to allocate new buffer element for JSON response"); + return NGX_ERROR_ERR; + } + + if (json_response_body.data == NULL || json_response_body.len == 0) { + write_dbg(DBG_LEVEL_INFO, "JSON response body is empty or not set"); + return NGX_ERROR_ERR; + } + + buf->pos = json_response_body.data; + buf->last = buf->pos + json_response_body.len; + buf->memory = 1; + buf->last_buf = 1; + buf->last_in_chain = 1; + + (*out_chain)[0].buf = buf; + (*out_chain)[0].next = NULL; + + return NGX_OK; +} + +ngx_uint_t +get_response_page_length_json(void) +{ + return json_response_body.len; +} + +ngx_uint_t +get_response_code_json(void) +{ + return json_response_code; +} + +AttachmentContentType +get_response_content_type(void) +{ + return json_response_content_type; +} + const char * get_web_response_uuid(void) { @@ -718,7 +810,7 @@ get_number_of_digits(int num) return num_of_digits; } -ngx_http_cp_verdict_e +ServiceVerdict get_sessions_per_minute_limit_verdict() { return sessions_per_minute_limit_verdict; @@ -914,6 +1006,31 @@ reset_dbg_ctx() is_ctx_match = 1; } +void +reset_async_mode_toggled() +{ + is_async_mode_toggled_on_in_last_reconfig = 0; + is_async_mode_toggled_off_in_last_reconfig = 0; +} + +ngx_int_t +is_async_toggled_on_in_last_reconfig() +{ + return is_async_mode_toggled_on_in_last_reconfig; +} + +ngx_int_t +is_async_toggled_off_in_last_reconfig() +{ + return is_async_mode_toggled_off_in_last_reconfig; +} + +ngx_int_t +is_async_toggled_in_last_reconfig() +{ + return is_async_toggled_off_in_last_reconfig() || is_async_toggled_on_in_last_reconfig(); +} + ngx_int_t init_general_config(const char *conf_path) { @@ -973,9 +1090,30 @@ init_general_config(const char *conf_path) max_retries_for_verdict = getMaxRetriesForVerdict(); body_size_trigger = getReqBodySizeTrigger(); remove_res_server_header = getRemoveResServerHeader(); + decompression_pool_size = getDecompressionPoolSize(); + recompression_pool_size = getRecompressionPoolSize(); + is_brotli_inspection_enabled = getIsBrotliInspectionEnabled(); num_of_nginx_ipc_elements = getNumOfNginxIpcElements(); keep_alive_interval_msec = (ngx_msec_t) getKeepAliveIntervalMsec(); + paired_affinity_enabled = isPairedAffinityEnabled(); + +#ifdef NGINX_ASYNC_SUPPORTED + ngx_uint_t current_async_mode_enabled = is_async_mode_enabled; + is_async_mode_enabled = isAsyncModeEnabled(); + + if (is_async_mode_enabled && (is_async_mode_enabled != current_async_mode_enabled)) { + write_dbg(DBG_LEVEL_INFO, "Enabling async mode"); + is_async_mode_toggled_on_in_last_reconfig = 1; + is_async_mode_toggled_off_in_last_reconfig = 0; + } + + if (!is_async_mode_enabled && (is_async_mode_enabled != current_async_mode_enabled)) { + write_dbg(DBG_LEVEL_INFO, "Disabling async mode"); + is_async_mode_toggled_off_in_last_reconfig = 1; + is_async_mode_toggled_on_in_last_reconfig = 0; + } +#endif set_static_resources_path(getStaticResourcesPath()); is_configuration_updated = NGX_OK; @@ -1001,12 +1139,15 @@ init_general_config(const char *conf_path) "wait thread timeout: %u msec, " "static resources path: %s, " "num of nginx ipc elements: %u, " - "keep alive interval msec: %u msec" - "min retries for verdict: %u" - "max retries for verdict: %u" - "num retries for hold verdict: %u" - "polling time for hold verdict: %u" - "body size trigger for request: %u", + "keep alive interval msec: %u msec, " + "min retries for verdict: %u, " + "max retries for verdict: %u, " + "num retries for hold verdict: %u, " + "polling time for hold verdict: %u, " + "body size trigger for request: %u, " + "decompression pool size: %u bytes, " + "recompression pool size: %u bytes, " + "async mode: %d", inspection_mode, new_dbg_level, (fail_mode_verdict == NGX_OK ? "fail-open" : "fail-close"), @@ -1030,7 +1171,10 @@ init_general_config(const char *conf_path) max_retries_for_verdict, hold_verdict_retries, hold_verdict_polling_time, - body_size_trigger + body_size_trigger, + decompression_pool_size, + recompression_pool_size, + is_async_mode_enabled ); @@ -1224,10 +1368,23 @@ print_buffer_chain(ngx_chain_t *chain, char *msg, int num_bytes, int _dbg_level) for (ngx_chain_t *chain_elem = chain; chain_elem != NULL; chain_elem = chain_elem->next) { write_dbg( DBG_LEVEL_WARNING, - "%s chain elem: size: %d, is last buf: %d", + "%s chain elem: size=%d " + "[tmp:%d mem:%d mmap:%d in_file:%d " + "flush:%d sync:%d recycled:%d " + "last_buf:%d last_in_chain:%d last_shadow:%d temp_file:%d]", msg, - chain_elem->buf->last - chain_elem->buf->pos, - chain_elem->buf->last_buf + (int)(chain_elem->buf->last - chain_elem->buf->pos), + chain_elem->buf->temporary, + chain_elem->buf->memory, + chain_elem->buf->mmap, + chain_elem->buf->in_file, + chain_elem->buf->flush, + chain_elem->buf->sync, + chain_elem->buf->recycled, + chain_elem->buf->last_buf, + chain_elem->buf->last_in_chain, + chain_elem->buf->last_shadow, + chain_elem->buf->temp_file ); print_buffer(chain_elem->buf, num_bytes, _dbg_level); } diff --git a/attachments/nginx/ngx_module/ngx_cp_utils.h b/attachments/nginx/ngx_module/ngx_cp_utils.h index 74cd1f8..1c71ebe 100644 --- a/attachments/nginx/ngx_module/ngx_cp_utils.h +++ b/attachments/nginx/ngx_module/ngx_cp_utils.h @@ -23,7 +23,7 @@ #include #include -#include "nginx_attachment_common.h" +#include "nano_attachment_common.h" #ifndef __FILENAME__ #define __FILENAME__ (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) @@ -60,7 +60,7 @@ extern ngx_uint_t req_body_thread_timeout_msec; extern ngx_uint_t res_header_thread_timeout_msec; extern ngx_uint_t res_body_thread_timeout_msec; extern ngx_uint_t waiting_for_verdict_thread_timeout_msec; -extern ngx_http_inspection_mode_e inspection_mode; +extern NanoHttpInspectionMode inspection_mode; extern ngx_uint_t num_of_nginx_ipc_elements; extern ngx_uint_t min_retries_for_verdict; extern ngx_uint_t max_retries_for_verdict; @@ -68,6 +68,11 @@ extern ngx_uint_t hold_verdict_retries; extern ngx_uint_t hold_verdict_polling_time; extern ngx_uint_t body_size_trigger; extern ngx_uint_t remove_res_server_header; +extern ngx_uint_t paired_affinity_enabled; +extern ngx_uint_t decompression_pool_size; +extern ngx_uint_t recompression_pool_size; +extern ngx_uint_t is_async_mode_enabled; +extern ngx_uint_t is_brotli_inspection_enabled; /// /// @struct ngx_http_cp_list_iterator @@ -232,13 +237,20 @@ const char *get_web_response_uuid(void); ngx_uint_t get_web_response_uuid_size(void); /// -/// @brief Sets a custom response page by modifying web_response_title/body/uuid variables. +/// @brief Sets a custom web page response by modifying web_response_title/body/uuid variables. /// @param[in] title Sets the web response title. -/// @param[in] message Sets the response body. +/// @param[in] body Sets the response body. /// @param[in] uuid Sets the uuid of the custom response. -/// @param[in, out] response_code Sets the response code of the custom response. +/// @param[in] response_code Sets the response code of the custom response. /// -void set_custom_response(const ngx_str_t *title, const ngx_str_t *message, const ngx_str_t *uuid, ngx_uint_t response_code); +void set_custom_response_block_page(const ngx_str_t *title, const ngx_str_t *body, const ngx_str_t *uuid, ngx_uint_t response_code); + +/// +/// @brief Sets a custom JSON response. +/// @param[in] body Sets the JSON response body. +/// @param[in] response_code Sets the response code of the custom response. +/// +void set_custom_response_json(const ngx_str_t *body, ngx_uint_t response_code, AttachmentContentType content_type); /// /// @brief Sets a redirect response by modifying redirect triggers, redirect_location and web_response_uuid. @@ -294,28 +306,52 @@ struct timeval get_timeout_val_usec(const int delta_time_in_usec); /// struct timeval get_timeout_val_msec(const int delta_time_in_msec); -/// -/// @brief Get the currently set response page. -/// @param[in, out] request NGINX request, used to get the NGINX pool to allocate buffer needed for out_chain. -/// @param[in, out] out_chain NGINX chain that the response page data will be written to. -/// @returns ngx_int_t -/// - #NGX_OK. -/// - #NGX_ERROR_ERR. -/// -ngx_int_t get_response_page(ngx_http_request_t *request, ngx_chain_t (*out_chain)[7]); - -/// -/// @brief Get currently set response page length. -/// @returns ngx_uint_t length of the response page. -/// -ngx_uint_t get_response_page_length(void); - /// /// @brief Get currently set response code. /// @returns ngx_uint_t web_triggers_response_code variable. /// ngx_uint_t get_response_code(void); +/// +/// @brief Get JSON response page. +/// @param[in] request NGINX request. +/// @param[out] out_chain NGINX chain to fill with JSON response data. +/// @returns ngx_int_t NGX_OK if successful, NGX_ERROR_ERR if failed. +/// +ngx_int_t get_response_page_json(ngx_http_request_t *request, ngx_chain_t (*out_chain)[1]); + +/// +/// @brief Get currently set JSON response page length. +/// @returns ngx_uint_t length of the JSON response body. +/// +ngx_uint_t get_response_page_length_json(void); + +/// +/// @brief Get currently set JSON response code. +/// @returns ngx_uint_t JSON response code variable. +/// +ngx_uint_t get_response_code_json(void); + +/// +/// @brief Get currently set JSON content type. +/// @returns AttachmentContentType JSON content type variable. +/// +AttachmentContentType get_response_content_type(void); + +/// +/// @brief Get JSON response page for web page format. +/// @param[in] request NGINX request. +/// @param[out] out_chain NGINX chain to fill with web page response data. +/// @returns ngx_int_t NGX_OK if successful, NGX_ERROR_ERR if failed. +/// +ngx_int_t get_block_page_response(ngx_http_request_t *request, ngx_chain_t (*out_chain)[7]); + +/// +/// @brief Get currently set web page response length. +/// @returns ngx_uint_t length of the web page response. +/// +ngx_uint_t get_response_page_length_web_page(void); + /// /// @brief Get currently set static resource path. /// @returns char * get static_resources_path variable. @@ -343,9 +379,9 @@ unsigned int get_number_of_digits(int num); /// /// @brief Get sessions per minute limit verdict. -/// @returns ngx_http_cp_verdict_e sessions_per_minute_limit_verdict variable. +/// @returns ServiceVerdict sessions_per_minute_limit_verdict variable. /// -ngx_http_cp_verdict_e get_sessions_per_minute_limit_verdict(void); +ServiceVerdict get_sessions_per_minute_limit_verdict(void); /// /// @brief Get maximum sessions per minute. @@ -460,5 +496,9 @@ void print_buffer(ngx_buf_t *buf, int num_bytes, int _dbg_level); /// void print_buffer_chain(ngx_chain_t *chain, char *msg, int num_bytes, int _dbg_level); +ngx_int_t is_async_toggled_on_in_last_reconfig(); +ngx_int_t is_async_toggled_off_in_last_reconfig(); +ngx_int_t is_async_toggled_in_last_reconfig(); +void reset_async_mode_toggled(); #endif // __NGX_CP_UTILS_H__ diff --git a/attachments/nginx/ngx_module/ngx_http_cp_attachment_module.c b/attachments/nginx/ngx_module/ngx_http_cp_attachment_module.c index d4a7454..dd85661 100644 --- a/attachments/nginx/ngx_module/ngx_http_cp_attachment_module.c +++ b/attachments/nginx/ngx_module/ngx_http_cp_attachment_module.c @@ -20,10 +20,15 @@ #include #include "ngx_cp_hooks.h" +#ifdef NGINX_ASYNC_SUPPORTED +#include "async/ngx_cp_async_core.h" +#include "async/ngx_cp_async_headers.h" +#include "async/ngx_cp_async_body.h" +#endif #include "ngx_cp_utils.h" #include "ngx_cp_initializer.h" #include "ngx_http_cp_attachment_module.h" -#include "nginx_attachment_common.h" +#include "nano_attachment_common.h" extern ngx_uint_t current_config_version; ///< NGINX configuration version. @@ -85,6 +90,15 @@ static ngx_int_t ngx_cp_attachment_init_worker(ngx_cycle_t *cycle); /// static void ngx_cp_attachment_fini_worker(ngx_cycle_t *cycle); +/// +/// @brief Initialize global environment variable cache for async mode. +/// @details Called once during module initialization to cache the CP_ASYNC_MODE environment variable. +/// +static void ngx_cp_init_env_async_mode(void); + +// Global variable to cache environment variable check +static ngx_int_t g_env_async_mode = 0; ///< Cached environment async mode setting + ngx_http_output_header_filter_pt ngx_http_next_response_header_filter; ///< NGINX response header filter. ngx_http_request_body_filter_pt ngx_http_next_request_body_filter; ///< NGINX request body filter. ngx_http_output_body_filter_pt ngx_http_next_response_body_filter; ///< NGINX output body filter. @@ -123,6 +137,16 @@ static ngx_command_t ngx_cp_attachment_commands[] = { offsetof(ngx_cp_attachment_conf_t, waf_tag), NULL }, +#ifdef NGINX_ASYNC_SUPPORTED + { + ngx_string("ngx_cp_async_mode"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_FLAG, + ngx_conf_set_flag_slot, + NGX_HTTP_LOC_CONF_OFFSET, + offsetof(ngx_cp_attachment_conf_t, async_mode), + NULL + }, +#endif ngx_null_command }; @@ -213,6 +237,9 @@ ngx_cp_attachment_create_conf(ngx_conf_t *conf) module_conf->enable = NGX_CONF_UNSET; module_conf->num_of_workers = 0; module_conf->current_loc_config_version = current_config_version; +#ifdef NGINX_ASYNC_SUPPORTED + module_conf->async_mode = NGX_CONF_UNSET; +#endif ngx_str_null(&module_conf->waf_tag); write_dbg(DBG_LEVEL_TRACE, "Successfully created attachment module configuration"); return module_conf; @@ -265,6 +292,37 @@ ngx_cp_set_module_loc_conf(ngx_http_request_t *request, ngx_flag_t new_state) write_dbg(DBG_LEVEL_INFO, "Configuration set to be %s", conf->enable ? "enabled" : "disabled"); } +ngx_int_t +is_ngx_cp_async_mode_enabled_for_request(ngx_http_request_t *request) +{ +#ifndef NGINX_ASYNC_SUPPORTED + // For nginx versions below 1.20, async mode is not supported + (void)request; + is_async_mode_enabled = 0; + return 0; +#else + if (is_async_mode_enabled) { + return 1; + } + + if (g_env_async_mode) { + is_async_mode_enabled = 1; + return 1; + } + + ngx_cp_attachment_conf_t *conf = ngx_http_get_module_loc_conf(request, ngx_http_cp_attachment_module); + if (conf != NULL && conf->async_mode != NGX_CONF_UNSET) { + if (conf->async_mode) { + is_async_mode_enabled = 1; + return 1; + } + } + + is_async_mode_enabled = 0; + return 0; +#endif +} + static char * ngx_cp_attachment_merge_conf(ngx_conf_t *configure, void *curr, void *next) { @@ -274,6 +332,9 @@ ngx_cp_attachment_merge_conf(ngx_conf_t *configure, void *curr, void *next) ngx_conf_merge_value(conf->enable, prev->enable, NGX_CONF_UNSET); ngx_conf_merge_value(conf->num_of_workers, prev->num_of_workers, ngx_ncpu); +#ifdef NGINX_ASYNC_SUPPORTED + ngx_conf_merge_value(conf->async_mode, prev->async_mode, 0); +#endif ngx_conf_merge_str_value(conf->waf_tag, prev->waf_tag, ""); write_dbg(DBG_LEVEL_TRACE, "Successfully set attachment module configuration in nginx configuration chain"); @@ -460,7 +521,20 @@ ngx_cp_attachment_init_worker(ngx_cycle_t *cycle) timer_interval_msec ); + ngx_cp_init_env_async_mode(); init_attachment_registration_thread(); + +#ifdef NGINX_ASYNC_SUPPORTED + if (g_env_async_mode || is_async_mode_enabled) { + if (ngx_cp_async_init() != NGX_OK) { + write_dbg(DBG_LEVEL_WARNING, "Failed to initialize async connection management"); + } + } else { + write_dbg(DBG_LEVEL_INFO, "Async mode disabled, skipping async connection management initialization"); + } +#else + write_dbg(DBG_LEVEL_INFO, "Nginx version does not support async mode"); +#endif } return NGX_OK; } @@ -476,6 +550,11 @@ ngx_cp_attachment_fini_worker(ngx_cycle_t *cycle) reset_attachment_registration(); +#ifdef NGINX_ASYNC_SUPPORTED + // Cleanup async connection management + ngx_cp_async_cleanup(); +#endif + (void)cycle; if (is_timer_active) ngx_del_timer(&ngx_keep_alive_event); write_dbg(DBG_LEVEL_INFO, "Timer successfully deleted"); @@ -488,6 +567,7 @@ ngx_cp_attachment_init(ngx_conf_t *conf) ngx_http_handler_pt *handler; ngx_http_handler_pt *size_metrics_handler; ngx_http_core_main_conf_t *http_core_main_conf; + write_dbg(DBG_LEVEL_TRACE, "Setting the memory pool used in the current context"); if (conf->pool == NULL) { write_dbg( @@ -510,14 +590,17 @@ ngx_cp_attachment_init(ngx_conf_t *conf) write_dbg(DBG_LEVEL_WARNING, "Failed to set HTTP request headers' handler"); return NGX_ERROR; } + + // Set handler based on async mode configuration + write_dbg(DBG_LEVEL_DEBUG, "Setting unified handlers with dynamic async mode support"); *handler = ngx_http_cp_req_header_handler; + + ngx_http_next_request_body_filter = ngx_http_top_request_body_filter; + ngx_http_top_request_body_filter = ngx_http_cp_req_body_filter; ngx_http_next_response_header_filter = ngx_http_top_header_filter; ngx_http_top_header_filter = ngx_http_cp_res_header_filter; - ngx_http_next_request_body_filter = ngx_http_top_request_body_filter; - ngx_http_top_request_body_filter = ngx_http_cp_req_body_filter; - ngx_http_next_response_body_filter = ngx_http_top_body_filter; ngx_http_top_body_filter = ngx_http_cp_res_body_filter; @@ -533,3 +616,25 @@ ngx_cp_attachment_init(ngx_conf_t *conf) return NGX_OK; } + +/// +/// @brief Initialize global environment variable cache for async mode. +/// @details Called once during module initialization to cache the CP_ASYNC_MODE environment variable. +/// +static void +ngx_cp_init_env_async_mode(void) +{ +#ifdef NGINX_ASYNC_SUPPORTED + char *env_async_mode = getenv("CP_ASYNC_MODE"); + if (env_async_mode != NULL) { + if (strcmp(env_async_mode, "true") == 0 || strcmp(env_async_mode, "1") == 0) { + g_env_async_mode = 1; + } else { + g_env_async_mode = 0; + } + } +#else + // For nginx versions below 1.20, async mode is not supported + g_env_async_mode = 0; +#endif +} diff --git a/attachments/nginx/ngx_module/ngx_http_cp_attachment_module.h b/attachments/nginx/ngx_module/ngx_http_cp_attachment_module.h index c3418f8..ceaafce 100644 --- a/attachments/nginx/ngx_module/ngx_http_cp_attachment_module.h +++ b/attachments/nginx/ngx_module/ngx_http_cp_attachment_module.h @@ -32,6 +32,7 @@ typedef struct { ngx_int_t num_of_workers; ///< Number of workers. ngx_uint_t current_loc_config_version; ///< NGINX configuration version. ngx_str_t waf_tag; ///< WAF tag value for the location block. + ngx_flag_t async_mode; ///< Flags if async mode is enabled (default: true). } ngx_cp_attachment_conf_t; /// @@ -63,4 +64,11 @@ ngx_uint_t get_num_of_workers(ngx_http_request_t *request); /// void ngx_cp_set_module_loc_conf(ngx_http_request_t *request, ngx_flag_t new_state); +/// +/// @brief Check if async mode is enabled for a specific request. +/// @param[in] request NGINX request. +/// @returns ngx_int_t 1 if async mode is enabled, 0 if disabled. +/// +ngx_int_t is_ngx_cp_async_mode_enabled_for_request(ngx_http_request_t *request); + #endif // __NGX_HTTP_CP_ATTACHMENT_MODULE_H__ diff --git a/cmake/FindBrotli.cmake b/cmake/FindBrotli.cmake new file mode 100755 index 0000000..7d6cdc2 --- /dev/null +++ b/cmake/FindBrotli.cmake @@ -0,0 +1,225 @@ +# FindBrotli.cmake +# +# Supports COMPONENTS: +# decoder, encoder, common +# +# Exports targets: +# Brotli::decoder +# Brotli::encoder +# Brotli::common +# +# Optional variables: +# BROTLI_ROOT_DIR +# BROTLI_USE_STATIC_LIBS + +# ------------------------------------------------------------ +# Version handling (not supported) +# ------------------------------------------------------------ +if(Brotli_FIND_VERSION) + set(_brotli_version_error_msg "FindBrotli.cmake does not support version checking.") + if(Brotli_FIND_REQUIRED) + message(FATAL_ERROR "${_brotli_version_error_msg}") + elseif(NOT Brotli_FIND_QUIETLY) + message(WARNING "${_brotli_version_error_msg}") + endif() +endif() + +# ------------------------------------------------------------ +# Component dependencies +# ------------------------------------------------------------ +if(Brotli_FIND_REQUIRED_decoder OR Brotli_FIND_REQUIRED_encoder) + set(Brotli_FIND_REQUIRED_common TRUE) +endif() + +# ------------------------------------------------------------ +# Static library preference +# ------------------------------------------------------------ +if(BROTLI_USE_STATIC_LIBS) + set(_brotli_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES}) + if(WIN32) + set(CMAKE_FIND_LIBRARY_SUFFIXES .lib .a) + else() + set(CMAKE_FIND_LIBRARY_SUFFIXES .a) + endif() +endif() + +# ------------------------------------------------------------ +# Optional pkg-config +# ------------------------------------------------------------ +find_package(PkgConfig QUIET) + +# ------------------------------------------------------------ +# Includes +# ------------------------------------------------------------ +find_path(Brotli_INCLUDE_DIR + NAMES + brotli/decode.h + brotli/encode.h + HINTS + ${BROTLI_ROOT_DIR} + PATH_SUFFIXES + include + includes +) +mark_as_advanced(Brotli_INCLUDE_DIR) + +# ------------------------------------------------------------ +# Internal state +# ------------------------------------------------------------ +set(_brotli_req_vars "") + +# For figuring out the real (non-ALIAS) targets when using pkg-config +set(_brotli_decoder_real_target "") +set(_brotli_encoder_real_target "") +set(_brotli_common_real_target "") + +if(BROTLI_USE_STATIC_LIBS) + set(_brotli_stat_str "_STATIC") +else() + set(_brotli_stat_str "") +endif() + +# ------------------------------------------------------------ +# Components loop +# ------------------------------------------------------------ +foreach(_listvar "common;common" "decoder;dec" "encoder;enc") + list(GET _listvar 0 _component) + list(GET _listvar 1 _libname) + + # ---- pkg-config path ---- + if(PKG_CONFIG_FOUND) + if(BROTLI_USE_STATIC_LIBS) + pkg_check_modules( + Brotli_${_component}_STATIC + QUIET + GLOBAL + IMPORTED_TARGET + libbrotli${_libname} + ) + else() + pkg_check_modules( + Brotli_${_component} + QUIET + GLOBAL + IMPORTED_TARGET + libbrotli${_libname} + ) + endif() + endif() + + # If pkg-config created an imported target, make our alias to it. + if(TARGET PkgConfig::Brotli_${_component}${_brotli_stat_str}) + add_library( + Brotli::${_component} + ALIAS + PkgConfig::Brotli_${_component}${_brotli_stat_str} + ) + + # Save the underlying real target name for later linkage fixes + set(_brotli_${_component}_real_target "PkgConfig::Brotli_${_component}${_brotli_stat_str}") + + set(Brotli_${_component}_FOUND TRUE) + + if(Brotli_FIND_REQUIRED_${_component}) + # For FindPackageHandleStandardArgs: ensure libraries are actually present + if(BROTLI_USE_STATIC_LIBS) + list(APPEND _brotli_req_vars Brotli_${_component}_STATIC_LIBRARIES) + else() + list(APPEND _brotli_req_vars Brotli_${_component}_LINK_LIBRARIES) + endif() + endif() + + continue() + endif() + + # ---- find_library path ---- + if(Brotli_FIND_REQUIRED_${_component}) + list(APPEND _brotli_req_vars Brotli_${_component}) + endif() + + if(BROTLI_USE_STATIC_LIBS) + set(_brotli_names + brotli${_libname}-static + libbrotli${_libname}-static + ) + else() + set(_brotli_names + brotli${_libname} + libbrotli${_libname} + ) + endif() + + find_library(Brotli_${_component} + NAMES ${_brotli_names} + HINTS ${BROTLI_ROOT_DIR} + PATH_SUFFIXES + lib + lib64 + libs + libs64 + lib/x86_64-linux-gnu + ) + mark_as_advanced(Brotli_${_component}) + + if(Brotli_${_component}) + set(Brotli_${_component}_FOUND TRUE) + + add_library(Brotli::${_component} UNKNOWN IMPORTED) + set_target_properties(Brotli::${_component} PROPERTIES + IMPORTED_LOCATION "${Brotli_${_component}}" + INTERFACE_INCLUDE_DIRECTORIES "${Brotli_INCLUDE_DIR}" + ) + + # In this branch, our target is real (not ALIAS), so it can be linked later. + set(_brotli_${_component}_real_target "Brotli::${_component}") + else() + set(Brotli_${_component}_FOUND FALSE) + endif() +endforeach() + +# ------------------------------------------------------------ +# Link decoder/encoder → common (but never on ALIAS targets or IMPORTED targets) +# ------------------------------------------------------------ +if(_brotli_common_real_target) + foreach(_comp decoder encoder) + if(_brotli_${_comp}_real_target) + # Only link if the target is NOT an ALIAS and NOT an IMPORTED target + get_target_property(_aliased ${_brotli_${_comp}_real_target} ALIASED_TARGET) + get_target_property(_imported ${_brotli_${_comp}_real_target} IMPORTED) + if(NOT _aliased AND NOT _imported) + target_link_libraries(${_brotli_${_comp}_real_target} INTERFACE ${_brotli_common_real_target}) + endif() + endif() + endforeach() +endif() + +# ------------------------------------------------------------ +# Aggregate convenience variables +# ------------------------------------------------------------ +set(Brotli_LIBRARIES "") +foreach(_comp decoder encoder common) + if(TARGET Brotli::${_comp}) + list(APPEND Brotli_LIBRARIES Brotli::${_comp}) + endif() +endforeach() + +# ------------------------------------------------------------ +# Final package check (FIXED: use _brotli_req_vars) +# ------------------------------------------------------------ +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(Brotli + FOUND_VAR + Brotli_FOUND + REQUIRED_VARS + Brotli_INCLUDE_DIR + ${_brotli_req_vars} + HANDLE_COMPONENTS +) + +# ------------------------------------------------------------ +# Restore suffixes +# ------------------------------------------------------------ +if(BROTLI_USE_STATIC_LIBS) + set(CMAKE_FIND_LIBRARY_SUFFIXES ${_brotli_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES}) +endif() + diff --git a/core/attachments/http_configuration/http_configuration.cc b/core/attachments/http_configuration/http_configuration.cc index 18639ea..34f963c 100644 --- a/core/attachments/http_configuration/http_configuration.cc +++ b/core/attachments/http_configuration/http_configuration.cc @@ -106,7 +106,7 @@ HttpAttachmentConfiguration::save(cereal::JSONOutputArchive &archive) const "waiting_for_verdict_thread_timeout_msec", getNumericalValue("waiting_for_verdict_thread_timeout_msec") ), - cereal::make_nvp("nginx_inspection_mode", getNumericalValue("inspection_mode")), + cereal::make_nvp("nginx_inspection_mode", getNumericalValue("nginx_inspection_mode")), cereal::make_nvp("num_of_nginx_ipc_elements", getNumericalValue("num_of_nginx_ipc_elements")), cereal::make_nvp("keep_alive_interval_msec", getNumericalValue("keep_alive_interval_msec")), cereal::make_nvp("min_retries_for_verdict", getNumericalValue("min_retries_for_verdict")), @@ -114,7 +114,12 @@ HttpAttachmentConfiguration::save(cereal::JSONOutputArchive &archive) const cereal::make_nvp("hold_verdict_retries", getNumericalValue("hold_verdict_retries")), cereal::make_nvp("hold_verdict_polling_time", getNumericalValue("hold_verdict_polling_time")), cereal::make_nvp("body_size_trigger", getNumericalValue("body_size_trigger")), - cereal::make_nvp("remove_server_header", getNumericalValue("remove_server_header")) + cereal::make_nvp("remove_server_header", getNumericalValue("remove_server_header")), + cereal::make_nvp("decompression_pool_size", getNumericalValue("decompression_pool_size")), + cereal::make_nvp("recompression_pool_size", getNumericalValue("recompression_pool_size")), + cereal::make_nvp("is_paired_affinity_enabled", getNumericalValue("is_paired_affinity_enabled")), + cereal::make_nvp("is_async_mode_enabled", getNumericalValue("is_async_mode_enabled")), + cereal::make_nvp("is_brotli_inspection_enabled", getNumericalValue("is_brotli_inspection_enabled")) ); } @@ -173,6 +178,21 @@ HttpAttachmentConfiguration::load(cereal::JSONInputArchive &archive) loadNumericalValue(archive, "hold_verdict_polling_time", 1); loadNumericalValue(archive, "body_size_trigger", 200000); loadNumericalValue(archive, "remove_server_header", 0); + loadNumericalValue(archive, "decompression_pool_size", 262144); + loadNumericalValue(archive, "recompression_pool_size", 16384); + loadNumericalValue(archive, "is_paired_affinity_enabled", 0); + loadNumericalValue(archive, "is_brotli_inspection_enabled", 0); + + int g_env_async_mode = 1; + char *env_async_mode = getenv("CP_ASYNC_MODE"); + if (env_async_mode != NULL) { + if (strcmp(env_async_mode, "true") == 0 || strcmp(env_async_mode, "1") == 0) { + g_env_async_mode = 1; + } else { + g_env_async_mode = 0; + } + } + loadNumericalValue(archive, "is_async_mode_enabled", g_env_async_mode); } bool diff --git a/core/compression/CMakeLists.txt b/core/compression/CMakeLists.txt index 2331156..298d4cb 100644 --- a/core/compression/CMakeLists.txt +++ b/core/compression/CMakeLists.txt @@ -2,5 +2,9 @@ add_definitions(-DZLIB_CONST) add_library(osrc_compression_utils SHARED compression_utils.cc) +target_link_libraries(osrc_compression_utils + ${Brotli_LIBRARIES} +) + install(TARGETS osrc_compression_utils DESTINATION lib) install(TARGETS osrc_compression_utils DESTINATION nginx_attachment/lib) diff --git a/core/compression/compression_utils.cc b/core/compression/compression_utils.cc index 0aaa656..c7e8922 100644 --- a/core/compression/compression_utils.cc +++ b/core/compression/compression_utils.cc @@ -22,6 +22,8 @@ #include #include #include +#include +#include using namespace std; @@ -29,6 +31,10 @@ using DebugFunction = void(*)(const char *); static const int max_debug_level = static_cast(CompressionUtilsDebugLevel::COMPRESSION_DBG_LEVEL_ASSERTION); +static const int max_retries = 3; +static const size_t default_brotli_buffer_size = 16384; +static const size_t brotli_decompression_probe_size = 64; + static void defaultPrint(const char *debug_message) { @@ -104,12 +110,23 @@ static const int zlib_no_flush = Z_NO_FLUSH; struct CompressionStream { - CompressionStream() { bzero(&stream, sizeof(z_stream)); } + CompressionStream() + : + br_encoder_state(nullptr), + br_decoder_state(nullptr) + { + bzero(&stream, sizeof(z_stream)); + } + ~CompressionStream() { fini(); } tuple, bool> decompress(const unsigned char *data, uint32_t size) { + if (state == TYPE::UNINITIALIZED && size > 0 && isBrotli(data, size)) return decompressBrotli(data, size); + + if (state == TYPE::DECOMPRESS_BROTLI) return decompressBrotli(data, size); + initInflate(); if (state != TYPE::DECOMPRESS) throw runtime_error("Could not start decompression"); @@ -138,7 +155,7 @@ struct CompressionStream res.append(work_space.data(), stream.total_out - old_total_out); } else { ++retries; - if (retries > 3) { + if (retries > max_retries) { fini(); throw runtime_error("No results from inflate more than three times"); } @@ -156,6 +173,7 @@ struct CompressionStream basic_string compress(CompressionType type, const unsigned char *data, uint32_t size, int is_last_chunk) { + if (type == CompressionType::BROTLI) return compressBrotli(data, size, is_last_chunk); initDeflate(type); if (state != TYPE::COMPRESS) throw runtime_error("Could not start compression"); @@ -183,7 +201,7 @@ struct CompressionStream res.append(work_space.data(), stream.total_out - old_total_out); } else { ++retries; - if (retries > 3) { + if (retries > max_retries) { fini(); throw runtime_error("No results from deflate more than three times"); } @@ -201,7 +219,7 @@ private: void initInflate() { - if (state != TYPE::UNINITIALIZAED) return; + if (state != TYPE::UNINITIALIZED) return; auto init_status = inflateInit2(&stream, default_num_window_bits + 32); if (init_status != zlib_ok_return_value) { @@ -216,7 +234,7 @@ private: void initDeflate(CompressionType type) { - if (state != TYPE::UNINITIALIZAED) return; + if (state != TYPE::UNINITIALIZED) return; int num_history_window_bits; switch (type) { @@ -228,6 +246,10 @@ private: num_history_window_bits = default_num_window_bits; break; } + case CompressionType::BROTLI: { + zlibDbgAssertion << "Brotli compression should use compressBrotli()"; + return; + } default: { zlibDbgAssertion << "Invalid compression type value: " @@ -253,6 +275,190 @@ private: state = TYPE::COMPRESS; } + basic_string + compressBrotli(const unsigned char *data, uint32_t size, int is_last_chunk) + { + if (state == TYPE::UNINITIALIZED) { + br_encoder_state = BrotliEncoderCreateInstance(nullptr, nullptr, nullptr); + if (!br_encoder_state) throw runtime_error("Failed to create Brotli encoder state"); + + BrotliEncoderSetParameter(br_encoder_state, BROTLI_PARAM_QUALITY, BROTLI_DEFAULT_QUALITY); + BrotliEncoderSetParameter(br_encoder_state, BROTLI_PARAM_LGWIN, BROTLI_DEFAULT_WINDOW); + state = TYPE::COMPRESS_BROTLI; + } else if (state != TYPE::COMPRESS_BROTLI) { + throw runtime_error("Compression stream in inconsistent state for Brotli compression"); + } + + basic_string output; + vector buffer(16384); + int retries = 0; + const uint8_t* next_in = data; + size_t available_in = size; + + while (available_in > 0 || is_last_chunk) { + size_t available_out = buffer.size(); + uint8_t* next_out = buffer.data(); + + + BrotliEncoderOperation op = is_last_chunk ? BROTLI_OPERATION_FINISH : BROTLI_OPERATION_PROCESS; + auto brotli_success = BrotliEncoderCompressStream( + br_encoder_state, + op, + &available_in, + &next_in, + &available_out, + &next_out, + nullptr + ); + + if (brotli_success == BROTLI_FALSE) { + fini(); + throw runtime_error("Brotli compression error"); + } + + size_t bytes_written = buffer.size() - available_out; + if (bytes_written > 0) { + output.append(buffer.data(), bytes_written); + retries = 0; + } else { + retries++; + if (retries > max_retries) { + fini(); + throw runtime_error("Brotli compression error: Exceeded retry limit."); + } + } + + if (BrotliEncoderIsFinished(br_encoder_state)) break; + + if (available_in == 0 && !is_last_chunk) break; + } + + if (is_last_chunk) fini(); + + return output; + } + + tuple, bool> + decompressBrotli(const unsigned char *data, uint32_t size) + { + if (state != TYPE::DECOMPRESS_BROTLI) { + br_decoder_state = BrotliDecoderCreateInstance(nullptr, nullptr, nullptr); + + if (!br_decoder_state) throw runtime_error("Failed to create Brotli decoder state"); + + BrotliDecoderSetParameter(br_decoder_state, BROTLI_DECODER_PARAM_LARGE_WINDOW, 1u); + state = TYPE::DECOMPRESS_BROTLI; + } + + basic_string output; + const uint8_t* next_in = data; + size_t available_in = size; + + size_t buffer_size = max(size * 4, default_brotli_buffer_size); + vector buffer(buffer_size); + + // Use a constant ratio for max buffer size relative to input size + const size_t max_buffer_size = 256 * 1024 * 1024; // 256 MB max buffer size + + while (true) { + size_t available_out = buffer.size(); + uint8_t* next_out = buffer.data(); + + BrotliDecoderResult result = BrotliDecoderDecompressStream( + br_decoder_state, + &available_in, + &next_in, + &available_out, + &next_out, + nullptr + ); + + if (result == BROTLI_DECODER_RESULT_ERROR) { + fini(); + auto error_msg = string(BrotliDecoderErrorString(BrotliDecoderGetErrorCode(br_decoder_state))); + throw runtime_error("Brotli decompression error: " + error_msg); + } + + // Handle any produced output + size_t bytes_produced = buffer.size() - available_out; + if (bytes_produced > 0) { + output.append(buffer.data(), bytes_produced); + } + + if (result == BROTLI_DECODER_RESULT_SUCCESS) { + bool is_finished = BrotliDecoderIsFinished(br_decoder_state); + if (is_finished) fini(); + return make_tuple(output, is_finished); + } + + if (result == BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT) { + // Check if we've exceeded the maximum buffer size limit + if (buffer.size() >= max_buffer_size) { + fini(); + throw runtime_error("Brotli decompression buffer size limit exceeded - possibly corrupted data"); + } + + // Resize buffer to accommodate more output + size_t new_size = min(buffer.size() * 2, max_buffer_size); + buffer.resize(new_size); + continue; // Continue with the same input, new buffer + } + + // If we reach here, we need more input but have no more to provide + if (available_in == 0) { + // No more input data available, return what we have so far + return make_tuple(output, false); + } + } + + return make_tuple(output, false); + } + + bool + isBrotli(const unsigned char *data, uint32_t size) + { + if (size < 4) return false; + + BrotliDecoderState* test_decoder = BrotliDecoderCreateInstance(nullptr, nullptr, nullptr); + if (!test_decoder) return false; + + const uint8_t* next_in = data; + size_t available_in = min(size, brotli_decompression_probe_size); + uint8_t output[brotli_decompression_probe_size]; + size_t available_out = sizeof(output); + uint8_t* next_out = output; + + BrotliDecoderResult result = BrotliDecoderDecompressStream( + test_decoder, + &available_in, + &next_in, + &available_out, + &next_out, + nullptr + ); + + bool is_brotli = false; + + if ( + result != BROTLI_DECODER_RESULT_ERROR && + ( + available_out < sizeof(output) || + available_in < min(size, brotli_decompression_probe_size) + ) + ) { + is_brotli = true; + } + + BrotliDecoderDestroyInstance(test_decoder); + if (is_brotli) { + br_decoder_state = BrotliDecoderCreateInstance(nullptr, nullptr, nullptr); + BrotliDecoderSetParameter(br_decoder_state, BROTLI_DECODER_PARAM_LARGE_WINDOW, 1u); + state = TYPE::DECOMPRESS_BROTLI; + return true; + } + return false; + } + void fini() { @@ -261,11 +467,21 @@ private: if (state == TYPE::DECOMPRESS) end_stream_res = inflateEnd(&stream); if (state == TYPE::COMPRESS) end_stream_res = deflateEnd(&stream); - if (end_stream_res != zlib_ok_return_value) { + if (br_encoder_state) { + BrotliEncoderDestroyInstance(br_encoder_state); + br_encoder_state = nullptr; + } + + if (br_decoder_state) { + BrotliDecoderDestroyInstance(br_decoder_state); + br_decoder_state = nullptr; + } + + if (end_stream_res != zlib_ok_return_value && end_stream_res != Z_DATA_ERROR) { zlibDbgError << "Failed to clean state: " << getZlibError(end_stream_res); } - state = TYPE::UNINITIALIZAED; + state = TYPE::UNINITIALIZED; } string @@ -288,7 +504,16 @@ private: } z_stream stream; - enum class TYPE { UNINITIALIZAED, COMPRESS, DECOMPRESS } state = TYPE::UNINITIALIZAED; + enum class TYPE { + UNINITIALIZED, + COMPRESS, + DECOMPRESS, + COMPRESS_BROTLI, + DECOMPRESS_BROTLI + } state = TYPE::UNINITIALIZED; + + BrotliEncoderState* br_encoder_state = nullptr; + BrotliDecoderState* br_decoder_state = nullptr; }; void diff --git a/core/include/attachments/nano_attachment.h b/core/include/attachments/nano_attachment.h index 7d9c743..bd1d710 100755 --- a/core/include/attachments/nano_attachment.h +++ b/core/include/attachments/nano_attachment.h @@ -225,35 +225,35 @@ void FreeAttachmentResponseContent( ); /// -/// @brief Compresses HttpBody and return allocated compressed body. +/// @brief Compresses NanoHttpBody and return allocated compressed body. /// /// @param attachment The NanoAttachment object associated with the session. /// @param session_data The HttpSessionData object representing the session. /// @param bodies The bodies pointer to be compressed. /// -HttpBody * compressBody( +NanoHttpBody * compressBody( NanoAttachment *attachment, HttpSessionData *session_data, - HttpBody *bodies + NanoHttpBody *bodies ); /// -/// @brief Compresses HttpBody and return allocated compressed body. +/// @brief Compresses NanoHttpBody and return allocated compressed body. /// /// @param attachment The NanoAttachment object associated with the session. /// @param session_data The HttpSessionData object representing the session. /// @param bodies The bodies pointer to be decompressed. /// -HttpBody * decompressBody( +NanoHttpBody * decompressBody( NanoAttachment *attachment, HttpSessionData *session_data, - HttpBody *bodies + NanoHttpBody *bodies ); /// /// @brief Free allocated compressed body. /// -/// This function frees the allocated resources of HttpBody object. +/// This function frees the allocated resources of NanoHttpBody object. /// /// @param attachment The NanoAttachment object associated with the session. /// @param session_data The HttpSessionData object representing the session. @@ -263,31 +263,7 @@ void freeCompressedBody( NanoAttachment *attachment, HttpSessionData *session_data, - HttpBody *bodies + NanoHttpBody *bodies ); -/// -/// @brief Gets the request processing timeout in milliseconds. -/// -/// This function retrieves the configured timeout value for request processing -/// from the NanoAttachment configuration. -/// -/// @param attachment A pointer to the NanoAttachment structure. -/// -/// @return The request processing timeout in milliseconds. -/// -uint32_t GetRequestProcessingTimeout(NanoAttachment *attachment); - -/// -/// @brief Gets the response processing timeout in milliseconds. -/// -/// This function retrieves the configured timeout value for response processing -/// from the NanoAttachment configuration. -/// -/// @param attachment A pointer to the NanoAttachment structure. -/// -/// @return The response processing timeout in milliseconds. -/// -uint32_t GetResponseProcessingTimeout(NanoAttachment *attachment); - #endif // __NANO_ATTACHMENT_H__ diff --git a/core/include/attachments/nano_attachment_common.h b/core/include/attachments/nano_attachment_common.h index c0a6e0d..85aa1ed 100755 --- a/core/include/attachments/nano_attachment_common.h +++ b/core/include/attachments/nano_attachment_common.h @@ -7,10 +7,16 @@ #include #include +#include +#include +#include +#include + #include "compression_utils.h" typedef uint32_t SessionID; typedef void* DataBuffer; +typedef int64_t NanoHttpCpInjectPos; #define MAX_NGINX_UID_LEN 32 #define MAX_SHARED_MEM_PATH_LEN 128 @@ -175,7 +181,9 @@ typedef enum AttachmentDataType RESPONSE_END, CONTENT_LENGTH, METRIC_DATA_FROM_PLUGIN, - REQUEST_DELAYED_VERDICT + REQUEST_DELAYED_VERDICT, + + COUNT } AttachmentDataType; #ifdef __cplusplus @@ -207,9 +215,23 @@ typedef enum ServiceVerdict TRAFFIC_VERDICT_INJECT, TRAFFIC_VERDICT_IRRELEVANT, TRAFFIC_VERDICT_RECONF, - TRAFFIC_VERDICT_DELAYED + TRAFFIC_VERDICT_DELAYED, + LIMIT_RESPONSE_HEADERS, + TRAFFIC_VERDICT_CUSTOM_RESPONSE } ServiceVerdict; +#ifdef __cplusplus +typedef enum class AttachmentContentType +#else +typedef enum AttachmentContentType +#endif +{ + CONTENT_TYPE_APPLICATION_JSON, + CONTENT_TYPE_TEXT_HTML, + CONTENT_TYPE_TEXT_PLAIN, + CONTENT_TYPE_OTHER +} AttachmentContentType; + #ifdef __cplusplus typedef enum class AttachmentVerdict #else @@ -234,7 +256,7 @@ typedef enum HttpModificationType } HttpModificationType; typedef struct __attribute__((__packed__)) HttpInjectData { - int64_t injection_pos; + NanoHttpCpInjectPos injection_pos; HttpModificationType mod_type; uint16_t injection_size; uint8_t is_header; @@ -263,6 +285,13 @@ typedef struct __attribute__((__packed__)) HttpWebResponseData { } response_data; } HttpWebResponseData; +typedef struct __attribute__((__packed__)) HttpJsonResponseData { + uint16_t response_code; + uint16_t body_size; + AttachmentContentType content_type; + char body[0]; +} HttpJsonResponseData; + typedef struct { size_t len; unsigned char *data; @@ -308,6 +337,8 @@ typedef enum HttpMetaDataType PARSED_HOST_DATA, PARSED_URI_SIZE, PARSED_URI_DATA, + WAF_TAG_SIZE, + WAF_TAG_DATA, META_DATA_COUNT } HttpMetaDataType; @@ -402,10 +433,10 @@ typedef struct ResHttpHeaders { uint64_t content_length; } ResHttpHeaders; -typedef struct HttpBody { +typedef struct NanoHttpBody { nano_str_t *data; size_t bodies_count; -} HttpBody; +} NanoHttpBody; typedef struct AttachmentData { SessionID session_id; @@ -417,6 +448,7 @@ typedef struct AttachmentData { typedef union __attribute__((__packed__)) HttpModifyData { HttpInjectData inject_data[0]; HttpWebResponseData web_response_data[0]; + HttpJsonResponseData json_response_data[0]; } HttpModifyData; typedef struct __attribute__((__packed__)) HttpReplyFromService { @@ -479,6 +511,12 @@ typedef struct NanoResponseModifications { NanoHttpModificationList *modifications; } NanoResponseModifications; +typedef struct __attribute__((__packed__)) NanoHttpRequestData { + uint16_t data_type; + uint32_t session_id; + unsigned char data[0]; +} NanoHttpRequestData; + typedef struct __attribute__((__packed__)) NanoHttpMetricData { uint16_t data_type; #ifdef __cplusplus @@ -488,4 +526,147 @@ typedef struct __attribute__((__packed__)) NanoHttpMetricData { #endif } NanoHttpMetricData; +// Simple but reliable hash function for generating consistent, well-distributed offsets +// Uses a basic polynomial hash that avoids large intermediate values +static inline uint32_t hash_string(const char *str) { + uint32_t hash = 0; + while (*str) { + hash = (hash * 31 + (unsigned char)*str++) % 10000; // Keep values under 10000 + } + return hash; // Return bounded hash - modulo will be applied by caller +} + +static inline int set_affinity_by_uid(uint32_t uid) { + int num_cores = sysconf(_SC_NPROCESSORS_CONF); + // Debug print for troubleshooting + fprintf(stderr, "[DEBUG] set_affinity_by_uid: num_cores=%d, uid=%u\n", num_cores, uid); + uint32_t core_num = (uid - 1) % num_cores; // Ensure core_num is within bounds + cpu_set_t mask, mask_check; + CPU_ZERO(&mask); + CPU_ZERO(&mask_check); + CPU_SET(core_num, &mask); + pid_t pid = getpid(); // Use process PID, not thread ID + + if (sched_setaffinity(pid, sizeof(mask), &mask) != 0) { + return -1; // Error setting affinity + } + if (sched_getaffinity(pid, sizeof(mask_check), &mask_check) != 0) { + return -2; // Error getting affinity + } + // Compare mask and mask_check + int i; + for (i = 0; i < num_cores; ++i) { + if (CPU_ISSET(i, &mask) != CPU_ISSET(i, &mask_check)) { + return -3; // Affinity not set as expected + } + } + return 0; // Success +} + +static inline int set_affinity_by_uid_with_offset(uint32_t uid, uint32_t offset) { + int num_cores = sysconf(_SC_NPROCESSORS_CONF); + // Debug print for troubleshooting + fprintf( + stderr, "[DEBUG] set_affinity_by_uid_with_offset: num_cores=%d, uid=%u, offset=%u\n", num_cores, uid, offset); + // Prevent integer overflow by applying modulo to offset first + uint32_t safe_offset = offset % num_cores; + uint32_t core_num = ((uid - 1) + safe_offset) % num_cores; + cpu_set_t mask, mask_check; + CPU_ZERO(&mask); + CPU_ZERO(&mask_check); + CPU_SET(core_num, &mask); + pid_t pid = getpid(); // Use process PID, not thread ID + + if (sched_setaffinity(pid, sizeof(mask), &mask) != 0) { + return -1; // Error setting affinity + } + if (sched_getaffinity(pid, sizeof(mask_check), &mask_check) != 0) { + return -2; // Error getting affinity + } + // Compare mask and mask_check + int i; + for (i = 0; i < num_cores; ++i) { + if (CPU_ISSET(i, &mask) != CPU_ISSET(i, &mask_check)) { + return -3; // Affinity not set as expected + } + } + return 0; // Success +} + +static inline int set_affinity_by_uid_with_offset_fixed_cores(uint32_t uid, uint32_t offset, int num_cores) { + // Debug print for troubleshooting + fprintf( + stderr, + "[DEBUG] set_affinity_by_uid_with_offset_fixed_cores: num_cores=%d, uid=%u, offset=%u\n", + num_cores, + uid, + offset + ); + // Prevent integer overflow by applying modulo to offset first + + uint32_t safe_offset = offset % num_cores; + uint32_t core_num = ((uid - 1) + safe_offset) % num_cores; + cpu_set_t mask, mask_check; + CPU_ZERO(&mask); + CPU_ZERO(&mask_check); + CPU_SET(core_num, &mask); + pid_t pid = getpid(); // Use process PID, not thread ID + + if (sched_setaffinity(pid, sizeof(mask), &mask) != 0) { + return -1; // Error setting affinity + } + if (sched_getaffinity(pid, sizeof(mask_check), &mask_check) != 0) { + return -2; // Error getting affinity + } + // Compare mask and mask_check + int i; + for (i = 0; i < num_cores; ++i) { + if (CPU_ISSET(i, &mask) != CPU_ISSET(i, &mask_check)) { + return -3; // Affinity not set as expected + } + } + return 0; // Success +} + +static inline int set_affinity_to_core(int target_core) { + // Debug print for troubleshooting + fprintf(stderr, "[DEBUG] set_affinity_to_core: target_core=%d\n", target_core); + cpu_set_t mask, mask_check; + CPU_ZERO(&mask); + CPU_ZERO(&mask_check); + CPU_SET(target_core, &mask); + pid_t pid = getpid(); // Use process PID, not thread ID + + if (sched_setaffinity(pid, sizeof(mask), &mask) != 0) { + return -1; // Error setting affinity + } + if (sched_getaffinity(pid, sizeof(mask_check), &mask_check) != 0) { + return -2; // Error getting affinity + } + // Compare mask and mask_check + int num_cores = sysconf(_SC_NPROCESSORS_CONF); + int i; + for (i = 0; i < num_cores; ++i) { + if (CPU_ISSET(i, &mask) != CPU_ISSET(i, &mask_check)) { + return -3; // Affinity not set as expected + } + } + return 0; // Success +} + +static inline int reset_affinity() { + int num_cores = sysconf(_SC_NPROCESSORS_CONF); + // Debug print for troubleshooting + fprintf(stderr, "[DEBUG] reset_affinity: num_cores=%d\n", num_cores); + cpu_set_t mask; + CPU_ZERO(&mask); + int i; + for (i = 0; i < num_cores; ++i) CPU_SET(i, &mask); + pid_t pid = getpid(); // Use process PID, not thread ID + if (sched_setaffinity(pid, sizeof(mask), &mask) != 0) { + return -1; // Error setting affinity + } + return 0; // Success +} + #endif // __NANO_ATTACHMENT_COMMON_H__ diff --git a/core/include/attachments/nginx_attachment_common.h b/core/include/attachments/nginx_attachment_common.h index 3929dc2..6f8030f 100644 --- a/core/include/attachments/nginx_attachment_common.h +++ b/core/include/attachments/nginx_attachment_common.h @@ -15,286 +15,10 @@ #ifndef __NGINX_ATTACHMENT_COMMON_H__ #define __NGINX_ATTACHMENT_COMMON_H__ -#include -#include -#include -#include +// This file has been deprecated. Do not add anything here. +// Any future additions should be added to nano_attachment_common.h +// For any inquiries please contact Daniel Yashin. -#define MAX_NGINX_UID_LEN 32 -#define NUM_OF_NGINX_IPC_ELEMENTS 200 -#define DEFAULT_KEEP_ALIVE_INTERVAL_MSEC 300000 -#define SHARED_MEM_PATH "/dev/shm/" -#define SHARED_REGISTRATION_SIGNAL_PATH SHARED_MEM_PATH "check-point/cp-nano-attachment-registration" -#define SHARED_KEEP_ALIVE_PATH SHARED_MEM_PATH "check-point/cp-nano-attachment-registration-expiration-socket" -#define SHARED_VERDICT_SIGNAL_PATH SHARED_MEM_PATH "check-point/cp-nano-http-transaction-handler" -#define SHARED_ATTACHMENT_CONF_PATH SHARED_MEM_PATH "cp_nano_http_attachment_conf" -#define DEFAULT_STATIC_RESOURCES_PATH SHARED_MEM_PATH "static_resources" -#define INJECT_POS_IRRELEVANT -1 -#define CORRUPTED_SESSION_ID 0 -#define METRIC_PERIODIC_TIMEOUT 600 - -extern char shared_verdict_signal_path[]; extern int workers_amount_to_send; -typedef int64_t ngx_http_cp_inject_pos_t; - -#ifdef __cplusplus -typedef enum class ngx_http_modification_type -#else -typedef enum ngx_http_modification_type -#endif -{ - APPEND, - INJECT, - REPLACE -} ngx_http_modification_type_e; - -#ifdef __cplusplus -typedef enum class ngx_http_chunk_type -#else -typedef enum ngx_http_chunk_type -#endif -{ - REQUEST_START, - REQUEST_HEADER, - REQUEST_BODY, - REQUEST_END, - RESPONSE_CODE, - RESPONSE_HEADER, - RESPONSE_BODY, - RESPONSE_END, - CONTENT_LENGTH, - METRIC_DATA_FROM_PLUGIN, - HOLD_DATA, - - COUNT -} ngx_http_chunk_type_e; - -#ifdef __cplusplus -typedef enum class ngx_http_plugin_metric_type -#else -typedef enum ngx_http_plugin_metric_type -#endif -{ - TRANSPARENTS_COUNT, - TOTAL_TRANSPARENTS_TIME, - INSPECTION_OPEN_FAILURES_COUNT, - INSPECTION_CLOSE_FAILURES_COUNT, - INSPECTION_SUCCESSES_COUNT, - INJECT_VERDICTS_COUNT, - DROP_VERDICTS_COUNT, - ACCEPT_VERDICTS_COUNT, - IRRELEVANT_VERDICTS_COUNT, - RECONF_VERDICTS_COUNT, - INSPECT_VERDICTS_COUNT, - HOLD_VERDICTS_COUNT, - AVERAGE_OVERALL_PPROCESSING_TIME_UNTIL_VERDICT, - MAX_OVERALL_PPROCESSING_TIME_UNTIL_VERDICT, - MIN_OVERALL_PPROCESSING_TIME_UNTIL_VERDICT, - AVERAGE_REQ_PPROCESSING_TIME_UNTIL_VERDICT, - MAX_REQ_PPROCESSING_TIME_UNTIL_VERDICT, - MIN_REQ_PPROCESSING_TIME_UNTIL_VERDICT, - AVERAGE_RES_PPROCESSING_TIME_UNTIL_VERDICT, - MAX_RES_PPROCESSING_TIME_UNTIL_VERDICT, - MIN_RES_PPROCESSING_TIME_UNTIL_VERDICT, - THREAD_TIMEOUT, - REG_THREAD_TIMEOUT, - REQ_HEADER_THREAD_TIMEOUT, - REQ_BODY_THREAD_TIMEOUT, - AVERAGE_REQ_BODY_SIZE_UPON_TIMEOUT, - MAX_REQ_BODY_SIZE_UPON_TIMEOUT, - MIN_REQ_BODY_SIZE_UPON_TIMEOUT, - RES_HEADER_THREAD_TIMEOUT, - RES_BODY_THREAD_TIMEOUT, - HOLD_THREAD_TIMEOUT, - AVERAGE_RES_BODY_SIZE_UPON_TIMEOUT, - MAX_RES_BODY_SIZE_UPON_TIMEOUT, - MIN_RES_BODY_SIZE_UPON_TIMEOUT, - THREAD_FAILURE, - REQ_PROCCESSING_TIMEOUT, - RES_PROCCESSING_TIMEOUT, - REQ_FAILED_TO_REACH_UPSTREAM, - REQ_FAILED_COMPRESSION_COUNT, - RES_FAILED_COMPRESSION_COUNT, - REQ_FAILED_DECOMPRESSION_COUNT, - RES_FAILED_DECOMPRESSION_COUNT, - REQ_SUCCESSFUL_COMPRESSION_COUNT, - RES_SUCCESSFUL_COMPRESSION_COUNT, - REQ_SUCCESSFUL_DECOMPRESSION_COUNT, - RES_SUCCESSFUL_DECOMPRESSION_COUNT, - CORRUPTED_ZIP_SKIPPED_SESSION_COUNT, - CPU_USAGE, - AVERAGE_VM_MEMORY_USAGE, - AVERAGE_RSS_MEMORY_USAGE, - MAX_VM_MEMORY_USAGE, - MAX_RSS_MEMORY_USAGE, - REQUEST_OVERALL_SIZE_COUNT, - RESPONSE_OVERALL_SIZE_COUNT, - - METRIC_TYPES_COUNT -} ngx_http_plugin_metric_type_e; - -#ifdef __cplusplus -typedef enum class ngx_http_cp_verdict -#else -typedef enum ngx_http_cp_verdict -#endif -{ - TRAFFIC_VERDICT_INSPECT, - TRAFFIC_VERDICT_ACCEPT, - TRAFFIC_VERDICT_DROP, - TRAFFIC_VERDICT_INJECT, - TRAFFIC_VERDICT_IRRELEVANT, - TRAFFIC_VERDICT_RECONF, - TRAFFIC_VERDICT_WAIT, - LIMIT_RESPONSE_HEADERS -} ngx_http_cp_verdict_e; - -#ifdef __cplusplus -typedef enum class ngx_http_cp_debug_level -#else -typedef enum ngx_http_cp_debug_level -#endif -{ - DBG_LEVEL_TRACE, - DBG_LEVEL_DEBUG, - DBG_LEVEL_INFO, - DBG_LEVEL_WARNING, - DBG_LEVEL_ERROR, -#ifndef __cplusplus - DBG_LEVEL_ASSERT, -#endif - DBG_LEVEL_COUNT -} ngx_http_cp_debug_level_e; - -#ifdef __cplusplus -typedef enum class ngx_http_meta_data -#else -typedef enum ngx_http_meta_data -#endif -{ - HTTP_PROTOCOL_SIZE, - HTTP_PROTOCOL_DATA, - HTTP_METHOD_SIZE, - HTTP_METHOD_DATA, - HOST_NAME_SIZE, - HOST_NAME_DATA, - LISTENING_ADDR_SIZE, - LISTENING_ADDR_DATA, - LISTENING_PORT, - URI_SIZE, - URI_DATA, - CLIENT_ADDR_SIZE, - CLIENT_ADDR_DATA, - CLIENT_PORT, - PARSED_HOST_SIZE, - PARSED_HOST_DATA, - PARSED_URI_SIZE, - PARSED_URI_DATA, - WAF_TAG_SIZE, - WAF_TAG_DATA, - - META_DATA_COUNT -} ngx_http_meta_data_e; - -#ifdef __cplusplus -typedef enum class ngx_http_header_data -#else -typedef enum ngx_http_header_data -#endif -{ - HEADER_KEY_SIZE, - HEADER_KEY_DATA, - HEADER_VAL_SIZE, - HEADER_VAL_DATA, - - HEADER_DATA_COUNT -} ngx_http_header_data_e; - -typedef enum ngx_http_inspection_mode -{ - NON_BLOCKING_THREAD, - BLOCKING_THREAD, - NO_THREAD, - - INSPECTION_MODE_COUNT -} ngx_http_inspection_mode_e; - -#ifdef __cplusplus -typedef enum class ngx_web_response_type -#else -typedef enum ngx_web_response_type -#endif -{ - CUSTOM_WEB_RESPONSE, - CUSTOM_WEB_BLOCK_PAGE_RESPONSE, - RESPONSE_CODE_ONLY, - REDIRECT_WEB_RESPONSE, - - NO_WEB_RESPONSE -} ngx_web_response_type_e; - -typedef struct __attribute__((__packed__)) ngx_http_cp_inject_data { - ngx_http_cp_inject_pos_t injection_pos; - ngx_http_modification_type_e mod_type; - uint16_t injection_size; - uint8_t is_header; - uint8_t orig_buff_index; - char data[0]; -} ngx_http_cp_inject_data_t; - -typedef struct __attribute__((__packed__)) ngx_http_cp_web_response_data { - uint8_t web_repsonse_type; - uint8_t uuid_size; - - union { - struct __attribute__((__packed__)) ngx_http_cp_custom_web_response_data { - uint16_t response_code; - uint8_t title_size; - uint8_t body_size; - char data[0]; - } custom_response_data; - - struct __attribute__((__packed__)) ngx_http_cp_redirect_data { - uint8_t unused_dummy; - uint8_t add_event_id; - uint16_t redirect_location_size; - char redirect_location[0]; - } redirect_data; - } response_data; -} ngx_http_cp_web_response_data_t; - -static_assert( - sizeof(((ngx_http_cp_web_response_data_t*)0)->response_data.custom_response_data) == - sizeof(((ngx_http_cp_web_response_data_t*)0)->response_data.redirect_data), - "custom_response_data must be equal to redirect_data in size" -); - -typedef union __attribute__((__packed__)) ngx_http_cp_modify_data { - ngx_http_cp_inject_data_t inject_data[0]; - ngx_http_cp_web_response_data_t web_response_data[0]; -} ngx_http_cp_modify_data_t; - -typedef struct __attribute__((__packed__)) ngx_http_cp_reply_from_service { - uint16_t verdict; - uint32_t session_id; - uint8_t modification_count; - ngx_http_cp_modify_data_t modify_data[0]; -} ngx_http_cp_reply_from_service_t; - -typedef struct __attribute__((__packed__)) ngx_http_cp_request_data { - uint16_t data_type; - uint32_t session_id; - unsigned char data[0]; -} ngx_http_cp_request_data_t; - -typedef struct __attribute__((__packed__)) ngx_http_cp_metric_data { - uint16_t data_type; -#ifdef __cplusplus - uint64_t data[static_cast(ngx_http_plugin_metric_type::METRIC_TYPES_COUNT)]; -#else - uint64_t data[METRIC_TYPES_COUNT]; -#endif -} ngx_http_cp_metric_data_t; - #endif // __NGINX_ATTACHMENT_COMMON_H__ diff --git a/core/include/attachments/nginx_attachment_util.h b/core/include/attachments/nginx_attachment_util.h index 6a2b5e9..0a4e16b 100644 --- a/core/include/attachments/nginx_attachment_util.h +++ b/core/include/attachments/nginx_attachment_util.h @@ -17,7 +17,7 @@ #include -#include "nginx_attachment_common.h" +#include "nano_attachment_common.h" #ifdef __cplusplus extern "C" { @@ -29,7 +29,7 @@ typedef const char * c_str; int initAttachmentConfig(c_str conf_file); -ngx_http_inspection_mode_e getInspectionMode(); +NanoHttpInspectionMode getInspectionMode(); unsigned int getNumOfNginxIpcElements(); unsigned int getKeepAliveIntervalMsec(); unsigned int getDbgLevel(); @@ -61,11 +61,16 @@ unsigned int getMinRetriesForVerdict(); unsigned int getMaxRetriesForVerdict(); unsigned int getReqBodySizeTrigger(); unsigned int getRemoveResServerHeader(); +unsigned int getDecompressionPoolSize(); +unsigned int getRecompressionPoolSize(); +unsigned int getIsBrotliInspectionEnabled(); unsigned int getWaitingForVerdictThreadTimeout(); int isIPAddress(c_str ip_str); int isSkipSource(c_str ip_str); +unsigned int isPairedAffinityEnabled(); +unsigned int isAsyncModeEnabled(); #ifdef __cplusplus } diff --git a/core/shmem_ipc/shmem_ipc.c b/core/shmem_ipc/shmem_ipc.c index d004da6..c303990 100644 --- a/core/shmem_ipc/shmem_ipc.c +++ b/core/shmem_ipc/shmem_ipc.c @@ -199,6 +199,11 @@ void resetIpc(SharedMemoryIPC *ipc, uint16_t num_of_data_segments) { writeDebug(TraceLevel, "Reseting IPC queues\n"); + if (!ipc || !ipc->rx_queue || !ipc->tx_queue) { + writeDebug(WarningLevel, "resetIpc called with NULL ipc pointer\n"); + return; + } + resetRingQueue(ipc->rx_queue, num_of_data_segments); resetRingQueue(ipc->tx_queue, num_of_data_segments); } @@ -208,6 +213,11 @@ destroyIpc(SharedMemoryIPC *shmem, int is_owner) { writeDebug(TraceLevel, "Destroying IPC queues\n"); + if (!shmem) { + writeDebug(WarningLevel, "Destroying IPC queues called with NULL shmem pointer\n"); + return; + } + if (shmem->rx_queue != NULL) { destroySharedRingQueue(shmem->rx_queue, is_owner, isTowardsOwner(is_owner, 0)); shmem->rx_queue = NULL; @@ -225,6 +235,10 @@ dumpIpcMemory(SharedMemoryIPC *ipc) { writeDebug(WarningLevel, "Ipc memory dump:\n"); writeDebug(WarningLevel, "RX queue:\n"); + if (!ipc || !ipc->rx_queue) { + writeDebug(WarningLevel, "RX queue is NULL\n"); + return; + } dumpRingQueueShmem(ipc->rx_queue); writeDebug(WarningLevel, "TX queue:\n"); dumpRingQueueShmem(ipc->tx_queue); @@ -234,6 +248,10 @@ int sendData(SharedMemoryIPC *ipc, const uint16_t data_to_send_size, const char *data_to_send) { writeDebug(TraceLevel, "Sending data of size %u\n", data_to_send_size); + if (!ipc || !ipc->tx_queue) { + writeDebug(WarningLevel, "sendData called with NULL ipc pointer\n"); + return -1; + } return pushToQueue(ipc->tx_queue, data_to_send, data_to_send_size); } @@ -247,12 +265,22 @@ sendChunkedData( { writeDebug(TraceLevel, "Sending %u chunks of data\n", num_of_data_elem); + if (!ipc) { + writeDebug(WarningLevel, "sendChunkedData called with NULL ipc pointer\n"); + return -1; + } + return pushBuffersToQueue(ipc->tx_queue, data_elem_to_send, data_to_send_sizes, num_of_data_elem); } int receiveData(SharedMemoryIPC *ipc, uint16_t *received_data_size, const char **received_data) { + if (!ipc) { + writeDebug(WarningLevel, "receiveData called with NULL ipc pointer\n"); + return -1; + } + int res = peekToQueue(ipc->rx_queue, received_data, received_data_size); writeDebug(TraceLevel, "Received data from queue. Res: %d, data size: %u\n", res, *received_data_size); return res; @@ -261,6 +289,10 @@ receiveData(SharedMemoryIPC *ipc, uint16_t *received_data_size, const char **rec int popData(SharedMemoryIPC *ipc) { + if (!ipc) { + writeDebug(WarningLevel, "popData called with NULL ipc pointer\n"); + return -1; + } int res = popFromQueue(ipc->rx_queue); writeDebug(TraceLevel, "Popped data from queue. Res: %d\n", res); return res; @@ -269,6 +301,10 @@ popData(SharedMemoryIPC *ipc) int isDataAvailable(SharedMemoryIPC *ipc) { + if (!ipc) { + writeDebug(WarningLevel, "isDataAvailable called with NULL ipc pointer\n"); + return 0; + } int res = !isQueueEmpty(ipc->rx_queue); writeDebug(TraceLevel, "Checking if there is data pending to be read. Res: %d\n", res); return res; @@ -277,6 +313,11 @@ isDataAvailable(SharedMemoryIPC *ipc) int isCorruptedShmem(SharedMemoryIPC *ipc, int is_owner) { + if (!ipc) { + writeDebug(WarningLevel, "isCorruptedShmem called with NULL ipc pointer\n"); + return 1; + } + if (isCorruptedQueue(ipc->rx_queue, isTowardsOwner(is_owner, 0)) || isCorruptedQueue(ipc->tx_queue, isTowardsOwner(is_owner, 1)) ) { diff --git a/core/shmem_ipc_2/shmem_ipc.c b/core/shmem_ipc_2/shmem_ipc.c index a1c5162..8e663f6 100755 --- a/core/shmem_ipc_2/shmem_ipc.c +++ b/core/shmem_ipc_2/shmem_ipc.c @@ -297,6 +297,9 @@ initIpc( void resetIpc(SharedMemoryIPC *ipc, uint16_t num_of_data_segments) { + if (!ipc || !ipc->rx_queue || !ipc->tx_queue) { + return; + } writeDebug(&(ipc->logging_data), TraceLevel, "Reseting IPC queues\n"); resetRingQueue(&(ipc->logging_data), ipc->rx_queue, num_of_data_segments); resetRingQueue(&(ipc->logging_data), ipc->tx_queue, num_of_data_segments); @@ -335,6 +338,9 @@ destroyIpc(SharedMemoryIPC *shmem, int is_owner) void dumpIpcMemory(SharedMemoryIPC *ipc) { + if (!ipc) { + return; + } writeDebug(&(ipc->logging_data), WarningLevel, "Ipc memory dump:\n"); writeDebug(&(ipc->logging_data), WarningLevel, "RX queue:\n"); dumpRingQueueShmem(&(ipc->logging_data), ipc->rx_queue); @@ -345,6 +351,9 @@ dumpIpcMemory(SharedMemoryIPC *ipc) int sendData(SharedMemoryIPC *ipc, const uint16_t data_to_send_size, const char *data_to_send) { + if (!ipc || !ipc->tx_queue) { + return -1; + } writeDebug(&(ipc->logging_data), TraceLevel, "Sending data of size %u\n", data_to_send_size); return pushToQueue(&(ipc->logging_data), ipc->tx_queue, ipc->global_data, data_to_send, data_to_send_size); } @@ -357,6 +366,10 @@ sendChunkedData( const uint8_t num_of_data_elem ) { + if (!ipc || !ipc->tx_queue) { + return -1; + } + writeDebug(&(ipc->logging_data), TraceLevel, "Sending %u chunks of data\n", num_of_data_elem); return pushBuffersToQueue( @@ -372,6 +385,10 @@ sendChunkedData( int receiveData(SharedMemoryIPC *ipc, uint16_t *received_data_size, const char **received_data) { + if (!ipc) { + return -1; + } + int res = peekToQueue(&(ipc->logging_data), ipc->rx_queue, ipc->global_data, received_data, received_data_size); writeDebug( &(ipc->logging_data), @@ -386,6 +403,10 @@ receiveData(SharedMemoryIPC *ipc, uint16_t *received_data_size, const char **rec int popData(SharedMemoryIPC *ipc) { + if (!ipc || !ipc->rx_queue) { + return -1; + } + int res = popFromQueue(&(ipc->logging_data), ipc->rx_queue, ipc->global_data); writeDebug(&(ipc->logging_data), TraceLevel, "Popped data from queue. Res: %d\n", res); return res; @@ -394,6 +415,10 @@ popData(SharedMemoryIPC *ipc) int isDataAvailable(SharedMemoryIPC *ipc) { + if (!ipc || !ipc->rx_queue) { + return 0; + } + int res = !isQueueEmpty(ipc->rx_queue); writeDebug(&(ipc->logging_data), TraceLevel, "Checking if there is data pending to be read. Res: %d\n", res); return res; @@ -402,6 +427,10 @@ isDataAvailable(SharedMemoryIPC *ipc) int isCorruptedShmem(SharedMemoryIPC *ipc, int is_owner) { + if (!ipc) { + return 1; + } + if (isCorruptedQueue(&(ipc->logging_data), ipc->rx_queue, ipc->global_data, isTowardsOwner(is_owner, 0)) || isCorruptedQueue(&(ipc->logging_data), ipc->tx_queue, ipc->global_data, isTowardsOwner(is_owner, 1)) ) { diff --git a/docker/Dockerfile b/docker/Dockerfile index 6144724..de3db6b 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -5,6 +5,7 @@ USER root RUN apk update RUN apk add --no-cache -u busybox RUN apk add --no-cache -u zlib +RUN apk add --no-cache -u brotli brotli-dev RUN apk add --no-cache libstdc++ RUN mkdir -p /usr/lib/nginx/modules/ RUN mkdir -p /usr/lib64/nginx/modules/ diff --git a/nodes/nginx_attachment/CMakeLists.txt b/nodes/nginx_attachment/CMakeLists.txt index 73b0045..18687ab 100644 --- a/nodes/nginx_attachment/CMakeLists.txt +++ b/nodes/nginx_attachment/CMakeLists.txt @@ -1,5 +1,11 @@ install(FILES install-nginx-attachment.sh DESTINATION nginx_attachment/ PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ) +execute_process ( + COMMAND sh -c "find /usr/lib* -name \"libbrotli*.so*\" | awk '{printf \$0\";\"}'" + OUTPUT_VARIABLE brotli +) +install(FILES ${brotli} DESTINATION nginx_attachment/lib) + gen_package( install-cp-nano-nginx-attachment.sh nginx_attachment diff --git a/nodes/nginx_attachment/install-nginx-attachment.sh b/nodes/nginx_attachment/install-nginx-attachment.sh index 617657f..669fb54 100755 --- a/nodes/nginx_attachment/install-nginx-attachment.sh +++ b/nodes/nginx_attachment/install-nginx-attachment.sh @@ -40,6 +40,7 @@ run_installation() { cp_print "Starting installation of Check Point ${NANO_SERVICE_NAME} Nano service [$INSTALLATION_TIME]\n" $FORCE_STDOUT + cp_exec "cp lib/libbrotli*.so* /usr/lib/" cp_exec "cp lib/libosrc_compression_utils.so /usr/lib/libosrc_compression_utils.so" cp_exec "cp lib/libosrc_compression_utils.so /usr/lib64/libosrc_compression_utils.so" cp_exec "cp lib/libosrc_nginx_attachment_util.so /usr/lib/libosrc_nginx_attachment_util.so"