mirror of
https://github.com/openappsec/openappsec.git
synced 2025-11-15 17:02:15 +03:00
Compare commits
173 Commits
orianelou-
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
78d1bcf7c4 | ||
|
|
c90862d74c | ||
|
|
b7923dfd8c | ||
|
|
ed4e20b010 | ||
|
|
14159402e2 | ||
|
|
b74957d9d4 | ||
|
|
0c0da6d91b | ||
|
|
ef887dd1c7 | ||
|
|
6bbc89712a | ||
|
|
dd19bf6158 | ||
|
|
60facef890 | ||
|
|
a3ac05642c | ||
|
|
682b91684d | ||
|
|
ff8c5701fe | ||
|
|
796c6cf935 | ||
|
|
31ff6f2c72 | ||
|
|
eac686216b | ||
|
|
938cae1270 | ||
|
|
87cdeef42f | ||
|
|
d04ea7d3e2 | ||
|
|
6d649cf5d5 | ||
|
|
5f71946590 | ||
|
|
c75f1e88b7 | ||
|
|
c4975497eb | ||
|
|
782dfeada6 | ||
|
|
bc1eac9d39 | ||
|
|
4dacd7d009 | ||
|
|
3a34984def | ||
|
|
5aaf787cfa | ||
|
|
2c7b5818e8 | ||
|
|
c8743d4d4b | ||
|
|
d703f16e35 | ||
|
|
692c430e8a | ||
|
|
72c5594b10 | ||
|
|
2c6b6baa3b | ||
|
|
37d0f1c45f | ||
|
|
2678db9d2f | ||
|
|
52c93ad574 | ||
|
|
bd3a53041e | ||
|
|
44f40fbd1b | ||
|
|
0691f9b9cd | ||
|
|
0891dcd251 | ||
|
|
7669f0c89c | ||
|
|
39d7884bed | ||
|
|
b8783c3065 | ||
|
|
37dc9f14b4 | ||
|
|
9a1f1b5966 | ||
|
|
b0bfd3077c | ||
|
|
0469f5aa1f | ||
|
|
3578797214 | ||
|
|
16a72fdf3e | ||
|
|
87d257f268 | ||
|
|
36d8006c26 | ||
|
|
8d47795d4d | ||
|
|
f3656712b0 | ||
|
|
b1781234fd | ||
|
|
f71dca2bfa | ||
|
|
bd333818ad | ||
|
|
95e776d7a4 | ||
|
|
51c2912434 | ||
|
|
0246b73bbd | ||
|
|
919921f6d3 | ||
|
|
e9098e2845 | ||
|
|
97d042589b | ||
|
|
df7be864e2 | ||
|
|
ba8ec26344 | ||
|
|
97add465e8 | ||
|
|
38cb1f2c3b | ||
|
|
1dd9371840 | ||
|
|
f23d22a723 | ||
|
|
b51cf09190 | ||
|
|
ceb6469a7e | ||
|
|
b0ae283eed | ||
|
|
5fcb9bdc4a | ||
|
|
fb5698360b | ||
|
|
147626bc7f | ||
|
|
448991ef75 | ||
|
|
2b1ee84280 | ||
|
|
77dd288eee | ||
|
|
3cb4def82e | ||
|
|
a0dd7dd614 | ||
|
|
88eed946ec | ||
|
|
3e1ad8b0f7 | ||
|
|
bd35c421c6 | ||
|
|
9d6e883724 | ||
|
|
cd020a7ddd | ||
|
|
bb35eaf657 | ||
|
|
648f9ae2b1 | ||
|
|
47e47d706a | ||
|
|
b852809d1a | ||
|
|
a77732f84c | ||
|
|
a1a8e28019 | ||
|
|
a99c2ec4a3 | ||
|
|
f1303c1703 | ||
|
|
bd8174ead3 | ||
|
|
4ddcd2462a | ||
|
|
81433bac25 | ||
|
|
8d03b49176 | ||
|
|
84f9624c00 | ||
|
|
3ecda7b979 | ||
|
|
8f05508e02 | ||
|
|
f5b9c93fbe | ||
|
|
62b74c9a10 | ||
|
|
e3163cd4fa | ||
|
|
1e98fc8c66 | ||
|
|
6fbe272378 | ||
|
|
7b3320ce10 | ||
|
|
25cc2d66e7 | ||
|
|
66e2112afb | ||
|
|
ba7c9afd52 | ||
|
|
2aa0993d7e | ||
|
|
0cdfc9df90 | ||
|
|
010814d656 | ||
|
|
3779dd360d | ||
|
|
0e7dc2133d | ||
|
|
c9095acbef | ||
|
|
e47e29321d | ||
|
|
25a66e77df | ||
|
|
6eea40f165 | ||
|
|
cee6ed511a | ||
|
|
4f145fd74f | ||
|
|
3fe5c5b36f | ||
|
|
7542a85ddb | ||
|
|
fae4534e5c | ||
|
|
923a8a804b | ||
|
|
b1731237d1 | ||
|
|
3d3d6e73b9 | ||
|
|
3f80127ec5 | ||
|
|
abdee954bb | ||
|
|
9a516899e8 | ||
|
|
4fd2aa6c6b | ||
|
|
0db666ac4f | ||
|
|
493d9a6627 | ||
|
|
6db87fc7fe | ||
|
|
d2b9bc8c9c | ||
|
|
886a5befe1 | ||
|
|
1f2502f9e4 | ||
|
|
9e4c5014ce | ||
|
|
024423cce9 | ||
|
|
dc4b546bd1 | ||
|
|
a86aca13b4 | ||
|
|
87b34590d4 | ||
|
|
e0198a1a95 | ||
|
|
d024ad5845 | ||
|
|
46d42c8fa3 | ||
|
|
f6c36f3363 | ||
|
|
63541a4c3c | ||
|
|
d14fa7a468 | ||
|
|
ae0de5bf14 | ||
|
|
d39919f348 | ||
|
|
4f215e1409 | ||
|
|
f05b5f8cee | ||
|
|
949b656b13 | ||
|
|
bbe293d215 | ||
|
|
35b2df729f | ||
|
|
7600b6218f | ||
|
|
108abdb35e | ||
|
|
64ebf013eb | ||
|
|
2c91793f08 | ||
|
|
72a263d25a | ||
|
|
4e14ff9a58 | ||
|
|
1fb28e14d6 | ||
|
|
e38bb9525c | ||
|
|
63b8bb22c2 | ||
|
|
11c97330f5 | ||
|
|
e56fb0bc1a | ||
|
|
4571d563f4 | ||
|
|
02c1db01f6 | ||
|
|
c557affd9b | ||
|
|
8889c3c054 | ||
|
|
f67eff87bc | ||
|
|
fa6a2e4233 | ||
|
|
b7e2efbf7e |
36
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
36
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
name: "Bug Report"
|
||||
about: "Report a bug with open-appsec"
|
||||
labels: [bug]
|
||||
---
|
||||
|
||||
**Checklist**
|
||||
- Have you checked the open-appsec troubleshooting guides - https://docs.openappsec.io/troubleshooting/troubleshooting
|
||||
- Yes / No
|
||||
- Have you checked the existing issues and discussions in github for the same issue
|
||||
- Yes / No
|
||||
- Have you checked the knwon limitations same issue - https://docs.openappsec.io/release-notes#limitations
|
||||
- Yes / No
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Go to '...'
|
||||
2. Run '...'
|
||||
3. See error '...'
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots or Logs**
|
||||
If applicable, add screenshots or logs to help explain the issue.
|
||||
|
||||
**Environment (please complete the following information):**
|
||||
- open-appsec version:
|
||||
- Deployment type (Docker, Kubernetes, etc.):
|
||||
- OS:
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: "Documentation & Troubleshooting"
|
||||
url: "https://docs.openappsec.io/"
|
||||
about: "Check the documentation before submitting an issue."
|
||||
- name: "Feature Requests & Discussions"
|
||||
url: "https://github.com/openappsec/openappsec/discussions"
|
||||
about: "Please open a discussion for feature requests."
|
||||
17
.github/ISSUE_TEMPLATE/nginx_version_support.md
vendored
Normal file
17
.github/ISSUE_TEMPLATE/nginx_version_support.md
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: "Nginx Version Support Request"
|
||||
about: "Request for a specific Nginx version to be supported"
|
||||
---
|
||||
|
||||
**Nginx & OS Version:**
|
||||
Which Nginx and OS version are you using?
|
||||
|
||||
**Output of nginx -V**
|
||||
Share the output of nginx -v
|
||||
|
||||
**Expected Behavior:**
|
||||
What do you expect to happen with this version?
|
||||
|
||||
**Checklist**
|
||||
- Have you considered a docker based deployment - find more information here https://docs.openappsec.io/getting-started/start-with-docker?
|
||||
- Yes / No
|
||||
22
README.md
22
README.md
@@ -6,7 +6,7 @@
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/6629)
|
||||
|
||||
# About
|
||||
[open-appsec](https://www.openappsec.io) (openappsec.io) builds on machine learning to provide preemptive web app & API threat protection against OWASP-Top-10 and zero-day attacks. It can be deployed as an add-on to Kubernetes Ingress, NGINX, Envoy (soon), and API Gateways.
|
||||
[open-appsec](https://www.openappsec.io) (openappsec.io) builds on machine learning to provide preemptive web app & API threat protection against OWASP-Top-10 and zero-day attacks. It can be deployed as an add-on to Linux, Docker or K8s deployments, on NGINX, Kong, APISIX, or Envoy.
|
||||
|
||||
The open-appsec engine learns how users normally interact with your web application. It then uses this information to automatically detect requests that fall outside of normal operations, and conducts further analysis to decide whether the request is malicious or not.
|
||||
|
||||
@@ -39,13 +39,13 @@ open-appsec can be managed using multiple methods:
|
||||
* [Using SaaS Web Management](https://docs.openappsec.io/getting-started/using-the-web-ui-saas)
|
||||
|
||||
open-appsec Web UI:
|
||||

|
||||
<img width="1854" height="775" alt="image" src="https://github.com/user-attachments/assets/4c6f7b0a-14f3-4f02-9ab0-ddadc9979b8d" />
|
||||
|
||||
|
||||
|
||||
## Deployment Playgrounds (Virtual labs)
|
||||
You can experiment with open-appsec using [Playgrounds](https://www.openappsec.io/playground)
|
||||
|
||||

|
||||
<img width="781" height="878" alt="image" src="https://github.com/user-attachments/assets/0ddee216-5cdf-4288-8c41-cc28cfbf3297" />
|
||||
|
||||
# Resources
|
||||
* [Project Website](https://openappsec.io)
|
||||
@@ -54,21 +54,15 @@ You can experiment with open-appsec using [Playgrounds](https://www.openappsec.i
|
||||
|
||||
# Installation
|
||||
|
||||
For Kubernetes (NGINX Ingress) using the installer:
|
||||
For Kubernetes (NGINX /Kong / APISIX / Istio) using Helm: follow [documentation](https://docs.openappsec.io/getting-started/start-with-kubernetes)
|
||||
|
||||
```bash
|
||||
$ wget https://downloads.openappsec.io/open-appsec-k8s-install && chmod +x open-appsec-k8s-install
|
||||
$ ./open-appsec-k8s-install
|
||||
```
|
||||
|
||||
For Kubernetes (NGINX or Kong) using Helm: follow [documentation](https://docs.openappsec.io/getting-started/start-with-kubernetes/install-using-helm-ingress-nginx-and-kong) – use this method if you’ve built your own containers.
|
||||
|
||||
For Linux (NGINX or Kong) using the installer (list of supported/pre-compiled NGINX attachments is available [here](https://downloads.openappsec.io/packages/supported-nginx.txt)):
|
||||
For Linux (NGINX / Kong / APISIX) using the installer (list of supported/pre-compiled NGINX attachments is available [here](https://downloads.openappsec.io/packages/supported-nginx.txt)):
|
||||
|
||||
```bash
|
||||
$ wget https://downloads.openappsec.io/open-appsec-install && chmod +x open-appsec-install
|
||||
$ ./open-appsec-install --auto
|
||||
```
|
||||
For kong Lua Based plug in follow [documentation](https://docs.openappsec.io/getting-started/start-with-linux)
|
||||
|
||||
For Linux, if you’ve built your own package use the following commands:
|
||||
|
||||
@@ -177,7 +171,7 @@ open-appsec code was audited by an independent third party in September-October
|
||||
See the [full report](https://github.com/openappsec/openappsec/blob/main/LEXFO-CHP20221014-Report-Code_audit-OPEN-APPSEC-v1.2.pdf).
|
||||
|
||||
### Reporting security vulnerabilities
|
||||
If you've found a vulnerability or a potential vulnerability in open-appsec please let us know at securityalert@openappsec.io. We'll send a confirmation email to acknowledge your report within 24 hours, and we'll send an additional email when we've identified the issue positively or negatively.
|
||||
If you've found a vulnerability or a potential vulnerability in open-appsec please let us know at security-alert@openappsec.io. We'll send a confirmation email to acknowledge your report within 24 hours, and we'll send an additional email when we've identified the issue positively or negatively.
|
||||
|
||||
|
||||
# License
|
||||
|
||||
@@ -95,6 +95,18 @@ getFailOpenHoldTimeout()
|
||||
return conf_data.getNumericalValue("fail_open_hold_timeout");
|
||||
}
|
||||
|
||||
unsigned int
|
||||
getHoldVerdictPollingTime()
|
||||
{
|
||||
return conf_data.getNumericalValue("hold_verdict_polling_time");
|
||||
}
|
||||
|
||||
unsigned int
|
||||
getHoldVerdictRetries()
|
||||
{
|
||||
return conf_data.getNumericalValue("hold_verdict_retries");
|
||||
}
|
||||
|
||||
unsigned int
|
||||
getMaxSessionsPerMinute()
|
||||
{
|
||||
@@ -173,6 +185,12 @@ getReqBodySizeTrigger()
|
||||
return conf_data.getNumericalValue("body_size_trigger");
|
||||
}
|
||||
|
||||
unsigned int
|
||||
getRemoveResServerHeader()
|
||||
{
|
||||
return conf_data.getNumericalValue("remove_server_header");
|
||||
}
|
||||
|
||||
int
|
||||
isIPAddress(c_str ip_str)
|
||||
{
|
||||
|
||||
@@ -66,7 +66,10 @@ TEST_F(HttpAttachmentUtilTest, GetValidAttachmentConfiguration)
|
||||
"\"static_resources_path\": \"" + static_resources_path + "\",\n"
|
||||
"\"min_retries_for_verdict\": 1,\n"
|
||||
"\"max_retries_for_verdict\": 3,\n"
|
||||
"\"body_size_trigger\": 777\n"
|
||||
"\"hold_verdict_retries\": 3,\n"
|
||||
"\"hold_verdict_polling_time\": 1,\n"
|
||||
"\"body_size_trigger\": 777,\n"
|
||||
"\"remove_server_header\": 1\n"
|
||||
"}\n";
|
||||
ofstream valid_configuration_file(attachment_configuration_file_name);
|
||||
valid_configuration_file << valid_configuration;
|
||||
@@ -95,6 +98,9 @@ TEST_F(HttpAttachmentUtilTest, GetValidAttachmentConfiguration)
|
||||
EXPECT_EQ(getReqBodySizeTrigger(), 777u);
|
||||
EXPECT_EQ(getWaitingForVerdictThreadTimeout(), 75u);
|
||||
EXPECT_EQ(getInspectionMode(), ngx_http_inspection_mode::BLOCKING_THREAD);
|
||||
EXPECT_EQ(getRemoveResServerHeader(), 1u);
|
||||
EXPECT_EQ(getHoldVerdictRetries(), 3u);
|
||||
EXPECT_EQ(getHoldVerdictPollingTime(), 1u);
|
||||
|
||||
EXPECT_EQ(isDebugContext("1.2.3.4", "5.6.7.8", 80, "GET", "test", "/abc"), 1);
|
||||
EXPECT_EQ(isDebugContext("1.2.3.9", "5.6.7.8", 80, "GET", "test", "/abc"), 0);
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
install(FILES Dockerfile entry.sh install-cp-agent-intelligence-service.sh install-cp-crowdsec-aux.sh DESTINATION .)
|
||||
install(FILES Dockerfile entry.sh install-cp-agent-intelligence-service.sh install-cp-crowdsec-aux.sh self_managed_openappsec_manifest.json DESTINATION .)
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT ${CMAKE_INSTALL_PREFIX}/agent-docker.img
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
FROM alpine
|
||||
|
||||
ENV OPENAPPSEC_NANO_AGENT=TRUE
|
||||
|
||||
RUN apk add --no-cache -u busybox
|
||||
RUN apk add --no-cache -u zlib
|
||||
RUN apk add --no-cache bash
|
||||
@@ -11,8 +13,12 @@ RUN apk add --no-cache libunwind
|
||||
RUN apk add --no-cache gdb
|
||||
RUN apk add --no-cache libxml2
|
||||
RUN apk add --no-cache pcre2
|
||||
RUN apk add --no-cache ca-certificates
|
||||
RUN apk add --update coreutils
|
||||
|
||||
|
||||
COPY self_managed_openappsec_manifest.json /tmp/self_managed_openappsec_manifest.json
|
||||
|
||||
COPY install*.sh /nano-service-installers/
|
||||
COPY entry.sh /entry.sh
|
||||
|
||||
|
||||
@@ -6,6 +6,8 @@ HTTP_TRANSACTION_HANDLER_SERVICE="install-cp-nano-service-http-transaction-handl
|
||||
ATTACHMENT_REGISTRATION_SERVICE="install-cp-nano-attachment-registration-manager.sh"
|
||||
ORCHESTRATION_INSTALLATION_SCRIPT="install-cp-nano-agent.sh"
|
||||
CACHE_INSTALLATION_SCRIPT="install-cp-nano-agent-cache.sh"
|
||||
PROMETHEUS_INSTALLATION_SCRIPT="install-cp-nano-service-prometheus.sh"
|
||||
NGINX_CENTRAL_MANAGER_INSTALLATION_SCRIPT="install-cp-nano-central-nginx-manager.sh"
|
||||
|
||||
var_fog_address=
|
||||
var_proxy=
|
||||
@@ -13,6 +15,21 @@ var_mode=
|
||||
var_token=
|
||||
var_ignore=
|
||||
init=
|
||||
active_watchdog_pid=
|
||||
|
||||
cleanup() {
|
||||
local signal="$1"
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Signal ${signal} was received, exiting gracefully..." >&2
|
||||
if [ -n "${active_watchdog_pid}" ] && ps -p ${active_watchdog_pid} > /dev/null 2>&1; then
|
||||
kill -TERM ${active_watchdog_pid} 2>/dev/null || true
|
||||
wait ${active_watchdog_pid} 2>/dev/null || true
|
||||
fi
|
||||
echo "Cleanup completed. Exiting now." >&2
|
||||
exit 0
|
||||
}
|
||||
|
||||
trap 'cleanup SIGTERM' SIGTERM
|
||||
trap 'cleanup SIGINT' SIGINT
|
||||
|
||||
if [ ! -f /nano-service-installers/$ORCHESTRATION_INSTALLATION_SCRIPT ]; then
|
||||
echo "Error: agent installation package doesn't exist."
|
||||
@@ -81,6 +98,14 @@ fi
|
||||
/nano-service-installers/$CACHE_INSTALLATION_SCRIPT --install
|
||||
/nano-service-installers/$HTTP_TRANSACTION_HANDLER_SERVICE --install
|
||||
|
||||
if [ "$PROMETHEUS" == "true" ]; then
|
||||
/nano-service-installers/$PROMETHEUS_INSTALLATION_SCRIPT --install
|
||||
fi
|
||||
|
||||
if [ "$CENTRAL_NGINX_MANAGER" == "true" ]; then
|
||||
/nano-service-installers/$NGINX_CENTRAL_MANAGER_INSTALLATION_SCRIPT --install
|
||||
fi
|
||||
|
||||
if [ "$CROWDSEC_ENABLED" == "true" ]; then
|
||||
/nano-service-installers/$INTELLIGENCE_INSTALLATION_SCRIPT --install
|
||||
/nano-service-installers/$CROWDSEC_INSTALLATION_SCRIPT --install
|
||||
@@ -93,25 +118,16 @@ if [ -f "$FILE" ]; then
|
||||
fi
|
||||
|
||||
touch /etc/cp/watchdog/wd.startup
|
||||
/etc/cp/watchdog/cp-nano-watchdog >/dev/null 2>&1 &
|
||||
active_watchdog_pid=$!
|
||||
while true; do
|
||||
if [ -z "$init" ]; then
|
||||
init=true
|
||||
/etc/cp/watchdog/cp-nano-watchdog >/dev/null 2>&1 &
|
||||
sleep 5
|
||||
active_watchdog_pid=$(pgrep -f -x -o "/bin/bash /etc/cp/watchdog/cp-nano-watchdog")
|
||||
fi
|
||||
|
||||
current_watchdog_pid=$(pgrep -f -x -o "/bin/bash /etc/cp/watchdog/cp-nano-watchdog")
|
||||
if [ ! -f /tmp/restart_watchdog ] && [ "$current_watchdog_pid" != "$active_watchdog_pid" ]; then
|
||||
echo "Error: Watchdog exited abnormally"
|
||||
exit 1
|
||||
elif [ -f /tmp/restart_watchdog ]; then
|
||||
if [ -f /tmp/restart_watchdog ]; then
|
||||
rm -f /tmp/restart_watchdog
|
||||
kill -9 "$(pgrep -f -x -o "/bin/bash /etc/cp/watchdog/cp-nano-watchdog")"
|
||||
/etc/cp/watchdog/cp-nano-watchdog >/dev/null 2>&1 &
|
||||
sleep 5
|
||||
active_watchdog_pid=$(pgrep -f -x -o "/bin/bash /etc/cp/watchdog/cp-nano-watchdog")
|
||||
kill -9 ${active_watchdog_pid}
|
||||
fi
|
||||
if [ ! "$(ps -f | grep cp-nano-watchdog | grep ${active_watchdog_pid})" ]; then
|
||||
/etc/cp/watchdog/cp-nano-watchdog >/dev/null 2>&1 &
|
||||
active_watchdog_pid=$!
|
||||
fi
|
||||
|
||||
sleep 5
|
||||
done
|
||||
|
||||
@@ -7,3 +7,4 @@ add_subdirectory(pending_key)
|
||||
add_subdirectory(utils)
|
||||
add_subdirectory(attachment-intakers)
|
||||
add_subdirectory(security_apps)
|
||||
add_subdirectory(nginx_message_reader)
|
||||
|
||||
@@ -31,10 +31,12 @@
|
||||
#include <stdarg.h>
|
||||
|
||||
#include <boost/range/iterator_range.hpp>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/regex.hpp>
|
||||
|
||||
#include "nginx_attachment_config.h"
|
||||
#include "nginx_attachment_opaque.h"
|
||||
#include "generic_rulebase/evaluators/trigger_eval.h"
|
||||
#include "nginx_parser.h"
|
||||
#include "i_instance_awareness.h"
|
||||
#include "common.h"
|
||||
@@ -129,6 +131,7 @@ class NginxAttachment::Impl
|
||||
Singleton::Provide<I_StaticResourcesHandler>::From<NginxAttachment>
|
||||
{
|
||||
static constexpr auto INSPECT = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT;
|
||||
static constexpr auto LIMIT_RESPONSE_HEADERS = ngx_http_cp_verdict_e::LIMIT_RESPONSE_HEADERS;
|
||||
static constexpr auto ACCEPT = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT;
|
||||
static constexpr auto DROP = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP;
|
||||
static constexpr auto INJECT = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT;
|
||||
@@ -260,6 +263,22 @@ public:
|
||||
);
|
||||
}
|
||||
|
||||
const char* ignored_headers_env = getenv("SAAS_IGNORED_UPSTREAM_HEADERS");
|
||||
if (ignored_headers_env) {
|
||||
string ignored_headers_str = ignored_headers_env;
|
||||
ignored_headers_str = NGEN::Strings::trim(ignored_headers_str);
|
||||
|
||||
if (!ignored_headers_str.empty()) {
|
||||
dbgInfo(D_HTTP_MANAGER)
|
||||
<< "Ignoring SAAS_IGNORED_UPSTREAM_HEADERS environment variable: "
|
||||
<< ignored_headers_str;
|
||||
|
||||
vector<string> ignored_headers_vec;
|
||||
boost::split(ignored_headers_vec, ignored_headers_str, boost::is_any_of(";"));
|
||||
for (const string &header : ignored_headers_vec) ignored_headers.insert(header);
|
||||
}
|
||||
}
|
||||
|
||||
dbgInfo(D_NGINX_ATTACHMENT) << "Successfully initialized NGINX Attachment";
|
||||
}
|
||||
|
||||
@@ -1034,7 +1053,11 @@ private:
|
||||
case ChunkType::REQUEST_START:
|
||||
return handleStartTransaction(data, opaque);
|
||||
case ChunkType::REQUEST_HEADER:
|
||||
return handleMultiModifiableChunks(NginxParser::parseRequestHeaders(data), "request header", true);
|
||||
return handleMultiModifiableChunks(
|
||||
NginxParser::parseRequestHeaders(data, ignored_headers),
|
||||
"request header",
|
||||
true
|
||||
);
|
||||
case ChunkType::REQUEST_BODY:
|
||||
return handleModifiableChunk(NginxParser::parseRequestBody(data), "request body", true);
|
||||
case ChunkType::REQUEST_END: {
|
||||
@@ -1125,10 +1148,18 @@ private:
|
||||
handleCustomWebResponse(
|
||||
SharedMemoryIPC *ipc,
|
||||
vector<const char *> &verdict_data,
|
||||
vector<uint16_t> &verdict_data_sizes)
|
||||
vector<uint16_t> &verdict_data_sizes,
|
||||
string web_user_response_id)
|
||||
{
|
||||
ngx_http_cp_web_response_data_t web_response_data;
|
||||
|
||||
ScopedContext ctx;
|
||||
if (web_user_response_id != "") {
|
||||
dbgTrace(D_NGINX_ATTACHMENT)
|
||||
<< "web user response ID registered in contex: "
|
||||
<< web_user_response_id;
|
||||
set<string> triggers_set{web_user_response_id};
|
||||
ctx.registerValue<set<GenericConfigId>>(TriggerMatcher::ctx_key, triggers_set);
|
||||
}
|
||||
WebTriggerConf web_trigger_conf = getConfigurationWithDefault<WebTriggerConf>(
|
||||
WebTriggerConf::default_trigger_conf,
|
||||
"rulebase",
|
||||
@@ -1250,7 +1281,7 @@ private:
|
||||
if (verdict.getVerdict() == DROP) {
|
||||
nginx_attachment_event.addTrafficVerdictCounter(nginxAttachmentEvent::trafficVerdict::DROP);
|
||||
verdict_to_send.modification_count = 1;
|
||||
return handleCustomWebResponse(ipc, verdict_fragments, fragments_sizes);
|
||||
return handleCustomWebResponse(ipc, verdict_fragments, fragments_sizes, verdict.getWebUserResponseID());
|
||||
}
|
||||
|
||||
if (verdict.getVerdict() == ACCEPT) {
|
||||
@@ -1476,11 +1507,17 @@ private:
|
||||
opaque.activateContext();
|
||||
|
||||
FilterVerdict verdict = handleChunkedData(*chunked_data_type, inspection_data, opaque);
|
||||
|
||||
bool is_header =
|
||||
*chunked_data_type == ChunkType::REQUEST_HEADER ||
|
||||
*chunked_data_type == ChunkType::RESPONSE_HEADER ||
|
||||
*chunked_data_type == ChunkType::CONTENT_LENGTH;
|
||||
|
||||
if (verdict.getVerdict() == LIMIT_RESPONSE_HEADERS) {
|
||||
handleVerdictResponse(verdict, attachment_ipc, transaction_data->session_id, is_header);
|
||||
popData(attachment_ipc);
|
||||
verdict = FilterVerdict(INSPECT);
|
||||
}
|
||||
|
||||
handleVerdictResponse(verdict, attachment_ipc, transaction_data->session_id, is_header);
|
||||
|
||||
bool is_final_verdict = verdict.getVerdict() == ACCEPT ||
|
||||
@@ -1593,6 +1630,8 @@ private:
|
||||
return "INJECT";
|
||||
case INSPECT:
|
||||
return "INSPECT";
|
||||
case LIMIT_RESPONSE_HEADERS:
|
||||
return "LIMIT_RESPONSE_HEADERS";
|
||||
case IRRELEVANT:
|
||||
return "IRRELEVANT";
|
||||
case RECONF:
|
||||
@@ -1814,6 +1853,7 @@ private:
|
||||
HttpAttachmentConfig attachment_config;
|
||||
I_MainLoop::RoutineID attachment_routine_id = 0;
|
||||
bool traffic_indicator = false;
|
||||
unordered_set<string> ignored_headers;
|
||||
|
||||
// Interfaces
|
||||
I_Socket *i_socket = nullptr;
|
||||
|
||||
@@ -203,6 +203,13 @@ HttpAttachmentConfig::setFailOpenTimeout()
|
||||
"NGINX wait thread timeout msec"
|
||||
));
|
||||
|
||||
conf_data.setNumericalValue("remove_server_header", getAttachmentConf<uint>(
|
||||
0,
|
||||
"agent.removeServerHeader.nginxModule",
|
||||
"HTTP manager",
|
||||
"Response server header removal"
|
||||
));
|
||||
|
||||
uint inspection_mode = getAttachmentConf<uint>(
|
||||
static_cast<uint>(ngx_http_inspection_mode_e::NON_BLOCKING_THREAD),
|
||||
"agent.inspectionMode.nginxModule",
|
||||
@@ -233,6 +240,21 @@ HttpAttachmentConfig::setRetriesForVerdict()
|
||||
"Max retries for verdict"
|
||||
));
|
||||
|
||||
conf_data.setNumericalValue("hold_verdict_retries", getAttachmentConf<uint>(
|
||||
3,
|
||||
"agent.retriesForHoldVerdict.nginxModule",
|
||||
"HTTP manager",
|
||||
"Retries for hold verdict"
|
||||
));
|
||||
|
||||
conf_data.setNumericalValue("hold_verdict_polling_time", getAttachmentConf<uint>(
|
||||
1,
|
||||
"agent.holdVerdictPollingInterval.nginxModule",
|
||||
"HTTP manager",
|
||||
"Hold verdict polling interval seconds"
|
||||
));
|
||||
|
||||
|
||||
conf_data.setNumericalValue("body_size_trigger", getAttachmentConf<uint>(
|
||||
200000,
|
||||
"agent.reqBodySizeTrigger.nginxModule",
|
||||
|
||||
@@ -19,12 +19,15 @@
|
||||
|
||||
#include "config.h"
|
||||
#include "virtual_modifiers.h"
|
||||
#include "agent_core_utilities.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace boost::uuids;
|
||||
|
||||
USE_DEBUG_FLAG(D_HTTP_MANAGER);
|
||||
|
||||
extern bool is_keep_alive_ctx;
|
||||
|
||||
NginxAttachmentOpaque::NginxAttachmentOpaque(HttpTransactionData _transaction_data)
|
||||
:
|
||||
TableOpaqueSerialize<NginxAttachmentOpaque>(this),
|
||||
@@ -67,6 +70,12 @@ NginxAttachmentOpaque::NginxAttachmentOpaque(HttpTransactionData _transaction_da
|
||||
ctx.registerValue(HttpTransactionData::uri_query_decoded, decoded_url.substr(question_mark_location + 1));
|
||||
}
|
||||
ctx.registerValue(HttpTransactionData::uri_path_decoded, decoded_url);
|
||||
|
||||
// Register waf_tag from transaction data if available
|
||||
const std::string& waf_tag = transaction_data.getWafTag();
|
||||
if (!waf_tag.empty()) {
|
||||
ctx.registerValue(HttpTransactionData::waf_tag_ctx, waf_tag);
|
||||
}
|
||||
}
|
||||
|
||||
NginxAttachmentOpaque::~NginxAttachmentOpaque()
|
||||
@@ -119,3 +128,47 @@ NginxAttachmentOpaque::setSavedData(const string &name, const string &data, EnvK
|
||||
saved_data[name] = data;
|
||||
ctx.registerValue(name, data, log_ctx);
|
||||
}
|
||||
|
||||
bool
|
||||
NginxAttachmentOpaque::setKeepAliveCtx(const string &hdr_key, const string &hdr_val)
|
||||
{
|
||||
if (!is_keep_alive_ctx) return false;
|
||||
|
||||
static pair<string, string> keep_alive_hdr;
|
||||
static bool keep_alive_hdr_initialized = false;
|
||||
|
||||
if (keep_alive_hdr_initialized) {
|
||||
if (!keep_alive_hdr.first.empty() && hdr_key == keep_alive_hdr.first && hdr_val == keep_alive_hdr.second) {
|
||||
dbgTrace(D_HTTP_MANAGER) << "Registering keep alive context";
|
||||
ctx.registerValue("keep_alive_request_ctx", true);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
const char* saas_keep_alive_hdr_name_env = getenv("SAAS_KEEP_ALIVE_HDR_NAME");
|
||||
if (saas_keep_alive_hdr_name_env) {
|
||||
keep_alive_hdr.first = NGEN::Strings::trim(saas_keep_alive_hdr_name_env);
|
||||
dbgInfo(D_HTTP_MANAGER) << "Using SAAS_KEEP_ALIVE_HDR_NAME environment variable: " << keep_alive_hdr.first;
|
||||
}
|
||||
|
||||
if (!keep_alive_hdr.first.empty()) {
|
||||
const char* saas_keep_alive_hdr_value_env = getenv("SAAS_KEEP_ALIVE_HDR_VALUE");
|
||||
if (saas_keep_alive_hdr_value_env) {
|
||||
keep_alive_hdr.second = NGEN::Strings::trim(saas_keep_alive_hdr_value_env);
|
||||
dbgInfo(D_HTTP_MANAGER)
|
||||
<< "Using SAAS_KEEP_ALIVE_HDR_VALUE environment variable: "
|
||||
<< keep_alive_hdr.second;
|
||||
}
|
||||
|
||||
if (!keep_alive_hdr.second.empty() && (hdr_key == keep_alive_hdr.first && hdr_val == keep_alive_hdr.second)) {
|
||||
dbgTrace(D_HTTP_MANAGER) << "Registering keep alive context";
|
||||
ctx.registerValue("keep_alive_request_ctx", true);
|
||||
keep_alive_hdr_initialized = true;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
keep_alive_hdr_initialized = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -85,6 +85,7 @@ public:
|
||||
EnvKeyAttr::LogSection log_ctx = EnvKeyAttr::LogSection::NONE
|
||||
);
|
||||
void setApplicationState(const ApplicationState &app_state) { application_state = app_state; }
|
||||
bool setKeepAliveCtx(const std::string &hdr_key, const std::string &hdr_val);
|
||||
|
||||
private:
|
||||
CompressionStream *response_compression_stream;
|
||||
|
||||
@@ -29,6 +29,7 @@ USE_DEBUG_FLAG(D_NGINX_ATTACHMENT_PARSER);
|
||||
Buffer NginxParser::tenant_header_key = Buffer();
|
||||
static const Buffer proxy_ip_header_key("X-Forwarded-For", 15, Buffer::MemoryType::STATIC);
|
||||
static const Buffer source_ip("sourceip", 8, Buffer::MemoryType::STATIC);
|
||||
bool is_keep_alive_ctx = getenv("SAAS_KEEP_ALIVE_HDR_NAME") != nullptr;
|
||||
|
||||
map<Buffer, CompressionType> NginxParser::content_encodings = {
|
||||
{Buffer("identity"), CompressionType::NO_COMPRESSION},
|
||||
@@ -177,37 +178,70 @@ getActivetenantAndProfile(const string &str, const string &deli = ",")
|
||||
}
|
||||
|
||||
Maybe<vector<HttpHeader>>
|
||||
NginxParser::parseRequestHeaders(const Buffer &data)
|
||||
NginxParser::parseRequestHeaders(const Buffer &data, const unordered_set<string> &ignored_headers)
|
||||
{
|
||||
auto parsed_headers = genHeaders(data);
|
||||
if (!parsed_headers.ok()) return parsed_headers.passErr();
|
||||
auto maybe_parsed_headers = genHeaders(data);
|
||||
if (!maybe_parsed_headers.ok()) return maybe_parsed_headers.passErr();
|
||||
|
||||
auto i_transaction_table = Singleton::Consume<I_TableSpecific<SessionID>>::by<NginxAttachment>();
|
||||
auto parsed_headers = maybe_parsed_headers.unpack();
|
||||
NginxAttachmentOpaque &opaque = i_transaction_table->getState<NginxAttachmentOpaque>();
|
||||
|
||||
for (const HttpHeader &header : *parsed_headers) {
|
||||
if (is_keep_alive_ctx || !ignored_headers.empty()) {
|
||||
bool is_last_header_removed = false;
|
||||
parsed_headers.erase(
|
||||
remove_if(
|
||||
parsed_headers.begin(),
|
||||
parsed_headers.end(),
|
||||
[&opaque, &is_last_header_removed, &ignored_headers](const HttpHeader &header)
|
||||
{
|
||||
string hdr_key = static_cast<string>(header.getKey());
|
||||
string hdr_val = static_cast<string>(header.getValue());
|
||||
if (
|
||||
opaque.setKeepAliveCtx(hdr_key, hdr_val)
|
||||
|| ignored_headers.find(hdr_key) != ignored_headers.end()
|
||||
) {
|
||||
dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "Header was removed from headers list: " << hdr_key;
|
||||
if (header.isLastHeader()) {
|
||||
dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "Last header was removed from headers list";
|
||||
is_last_header_removed = true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
),
|
||||
parsed_headers.end()
|
||||
);
|
||||
if (is_last_header_removed) {
|
||||
dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "Adjusting last header flag";
|
||||
if (!parsed_headers.empty()) parsed_headers.back().setIsLastHeader();
|
||||
}
|
||||
}
|
||||
|
||||
for (const HttpHeader &header : parsed_headers) {
|
||||
auto source_identifiers = getConfigurationWithDefault<UsersAllIdentifiersConfig>(
|
||||
UsersAllIdentifiersConfig(),
|
||||
"rulebase",
|
||||
"usersIdentifiers"
|
||||
);
|
||||
source_identifiers.parseRequestHeaders(header);
|
||||
|
||||
NginxAttachmentOpaque &opaque = i_transaction_table->getState<NginxAttachmentOpaque>();
|
||||
opaque.addToSavedData(
|
||||
HttpTransactionData::req_headers,
|
||||
static_cast<string>(header.getKey()) + ": " + static_cast<string>(header.getValue()) + "\r\n"
|
||||
);
|
||||
|
||||
if (NginxParser::tenant_header_key == header.getKey()) {
|
||||
const auto &header_key = header.getKey();
|
||||
if (NginxParser::tenant_header_key == header_key) {
|
||||
dbgDebug(D_NGINX_ATTACHMENT_PARSER)
|
||||
<< "Identified active tenant header. Key: "
|
||||
<< dumpHex(header.getKey())
|
||||
<< dumpHex(header_key)
|
||||
<< ", Value: "
|
||||
<< dumpHex(header.getValue());
|
||||
|
||||
auto active_tenant_and_profile = getActivetenantAndProfile(header.getValue());
|
||||
opaque.setSessionTenantAndProfile(active_tenant_and_profile[0], active_tenant_and_profile[1]);
|
||||
} else if (proxy_ip_header_key == header.getKey()) {
|
||||
} else if (proxy_ip_header_key == header_key) {
|
||||
source_identifiers.setXFFValuesToOpaqueCtx(header, UsersAllIdentifiersConfig::ExtractType::PROXYIP);
|
||||
}
|
||||
}
|
||||
@@ -345,12 +379,15 @@ NginxParser::parseResponseBody(const Buffer &raw_response_body, CompressionStrea
|
||||
Maybe<CompressionType>
|
||||
NginxParser::parseContentEncoding(const vector<HttpHeader> &headers)
|
||||
{
|
||||
static const Buffer content_encoding_header_key("Content-Encoding");
|
||||
dbgFlow(D_NGINX_ATTACHMENT_PARSER) << "Parsing \"Content-Encoding\" header";
|
||||
static const Buffer content_encoding_header_key("content-encoding");
|
||||
|
||||
auto it = find_if(
|
||||
headers.begin(),
|
||||
headers.end(),
|
||||
[&] (const HttpHeader &http_header) { return http_header.getKey() == content_encoding_header_key; }
|
||||
[&] (const HttpHeader &http_header) {
|
||||
return http_header.getKey().isEqualLowerCase(content_encoding_header_key);
|
||||
}
|
||||
);
|
||||
if (it == headers.end()) {
|
||||
dbgTrace(D_NGINX_ATTACHMENT_PARSER)
|
||||
|
||||
@@ -28,7 +28,10 @@ public:
|
||||
static Maybe<HttpTransactionData> parseStartTrasaction(const Buffer &data);
|
||||
static Maybe<ResponseCode> parseResponseCode(const Buffer &data);
|
||||
static Maybe<uint64_t> parseContentLength(const Buffer &data);
|
||||
static Maybe<std::vector<HttpHeader>> parseRequestHeaders(const Buffer &data);
|
||||
static Maybe<std::vector<HttpHeader>> parseRequestHeaders(
|
||||
const Buffer &data,
|
||||
const std::unordered_set<std::string> &ignored_headers
|
||||
);
|
||||
static Maybe<std::vector<HttpHeader>> parseResponseHeaders(const Buffer &data);
|
||||
static Maybe<HttpBody> parseRequestBody(const Buffer &data);
|
||||
static Maybe<HttpBody> parseResponseBody(const Buffer &raw_response_body, CompressionStream *compression_stream);
|
||||
|
||||
@@ -285,17 +285,21 @@ Maybe<string>
|
||||
UsersAllIdentifiersConfig::parseXForwardedFor(const string &str, ExtractType type) const
|
||||
{
|
||||
vector<string> header_values = split(str);
|
||||
|
||||
if (header_values.empty()) return genError("No IP found in the xff header list");
|
||||
|
||||
vector<string> xff_values = getHeaderValuesFromConfig("x-forwarded-for");
|
||||
vector<CIDRSData> cidr_values(xff_values.begin(), xff_values.end());
|
||||
string last_valid_ip;
|
||||
|
||||
for (auto it = header_values.rbegin(); it != header_values.rend() - 1; ++it) {
|
||||
if (!IPAddr::createIPAddr(*it).ok()) {
|
||||
dbgWarning(D_NGINX_ATTACHMENT_PARSER) << "Invalid IP address found in the xff header IPs list: " << *it;
|
||||
return genError("Invalid IP address");
|
||||
if (last_valid_ip.empty()) {
|
||||
return genError("Invalid IP address");
|
||||
}
|
||||
return last_valid_ip;
|
||||
}
|
||||
last_valid_ip = *it;
|
||||
if (type == ExtractType::PROXYIP) continue;
|
||||
if (!isIpTrusted(*it, cidr_values)) {
|
||||
dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "Found untrusted IP in the xff header IPs list: " << *it;
|
||||
@@ -307,7 +311,10 @@ UsersAllIdentifiersConfig::parseXForwardedFor(const string &str, ExtractType typ
|
||||
dbgWarning(D_NGINX_ATTACHMENT_PARSER)
|
||||
<< "Invalid IP address found in the xff header IPs list: "
|
||||
<< header_values[0];
|
||||
return genError("Invalid IP address");
|
||||
if (last_valid_ip.empty()) {
|
||||
return genError("No Valid Ip address was found");
|
||||
}
|
||||
return last_valid_ip;
|
||||
}
|
||||
|
||||
return header_values[0];
|
||||
@@ -359,6 +366,24 @@ UsersAllIdentifiersConfig::setCustomHeaderToOpaqueCtx(const HttpHeader &header)
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
UsersAllIdentifiersConfig::setWafTagValuesToOpaqueCtx(const HttpHeader &header) const
|
||||
{
|
||||
auto i_transaction_table = Singleton::Consume<I_TableSpecific<SessionID>>::by<NginxAttachment>();
|
||||
if (!i_transaction_table || !i_transaction_table->hasState<NginxAttachmentOpaque>()) {
|
||||
dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "Can't get the transaction table";
|
||||
return;
|
||||
}
|
||||
|
||||
NginxAttachmentOpaque &opaque = i_transaction_table->getState<NginxAttachmentOpaque>();
|
||||
opaque.setSavedData(HttpTransactionData::waf_tag_ctx, static_cast<string>(header.getValue()));
|
||||
|
||||
dbgDebug(D_NGINX_ATTACHMENT_PARSER)
|
||||
<< "Added waf tag to context: "
|
||||
<< static_cast<string>(header.getValue());
|
||||
return;
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
UsersAllIdentifiersConfig::parseCookieElement(
|
||||
const string::const_iterator &start,
|
||||
|
||||
@@ -142,7 +142,7 @@ private:
|
||||
if (temp_params_list.size() == 1) {
|
||||
Maybe<IPAddr> maybe_ip = IPAddr::createIPAddr(temp_params_list[0]);
|
||||
if (!maybe_ip.ok()) return genError("Could not create IP address, " + maybe_ip.getErr());
|
||||
IpAddress addr = move(ConvertToIpAddress(maybe_ip.unpackMove()));
|
||||
IpAddress addr = ConvertToIpAddress(maybe_ip.unpackMove());
|
||||
|
||||
return move(IPRange{.start = addr, .end = addr});
|
||||
}
|
||||
@@ -157,11 +157,11 @@ private:
|
||||
IPAddr max_addr = maybe_ip_max.unpackMove();
|
||||
if (min_addr > max_addr) return genError("Could not create ip range - start greater then end");
|
||||
|
||||
IpAddress addr_min = move(ConvertToIpAddress(move(min_addr)));
|
||||
IpAddress addr_max = move(ConvertToIpAddress(move(max_addr)));
|
||||
IpAddress addr_min = ConvertToIpAddress(move(min_addr));
|
||||
IpAddress addr_max = ConvertToIpAddress(move(max_addr));
|
||||
if (addr_max.ip_type != addr_min.ip_type) return genError("Range IP's type does not match");
|
||||
|
||||
return move(IPRange{.start = move(addr_min), .end = move(addr_max)});
|
||||
return IPRange{.start = move(addr_min), .end = move(addr_max)};
|
||||
}
|
||||
|
||||
return genError("Illegal range received: " + range);
|
||||
|
||||
@@ -15,19 +15,18 @@
|
||||
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <sys/stat.h>
|
||||
#include <climits>
|
||||
#include <unordered_map>
|
||||
#include <boost/range/iterator_range.hpp>
|
||||
#include <unordered_set>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <fstream>
|
||||
#include <algorithm>
|
||||
|
||||
#include "common.h"
|
||||
#include "config.h"
|
||||
#include "table_opaque.h"
|
||||
#include "http_manager_opaque.h"
|
||||
#include "log_generator.h"
|
||||
#include "http_inspection_events.h"
|
||||
#include "agent_core_utilities.h"
|
||||
|
||||
USE_DEBUG_FLAG(D_HTTP_MANAGER);
|
||||
|
||||
@@ -38,6 +37,7 @@ operator<<(ostream &os, const EventVerdict &event)
|
||||
{
|
||||
switch (event.getVerdict()) {
|
||||
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT: return os << "Inspect";
|
||||
case ngx_http_cp_verdict_e::LIMIT_RESPONSE_HEADERS: return os << "Limit Response Headers";
|
||||
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT: return os << "Accept";
|
||||
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP: return os << "Drop";
|
||||
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT: return os << "Inject";
|
||||
@@ -94,12 +94,14 @@ public:
|
||||
ctx.registerValue(app_sec_marker_key, i_transaction_table->keyToString(), EnvKeyAttr::LogSection::MARKER);
|
||||
|
||||
HttpManagerOpaque &state = i_transaction_table->getState<HttpManagerOpaque>();
|
||||
string event_key = static_cast<string>(event.getKey());
|
||||
if (event_key == getProfileAgentSettingWithDefault<string>("", "agent.customHeaderValueLogging")) {
|
||||
|
||||
const auto &custom_header = getProfileAgentSettingWithDefault<string>("", "agent.customHeaderValueLogging");
|
||||
|
||||
if (event.getKey().isEqualLowerCase(custom_header)) {
|
||||
string event_value = static_cast<string>(event.getValue());
|
||||
dbgTrace(D_HTTP_MANAGER)
|
||||
<< "Found header key and value - ("
|
||||
<< event_key
|
||||
<< custom_header
|
||||
<< ": "
|
||||
<< event_value
|
||||
<< ") that matched agent settings";
|
||||
@@ -195,7 +197,6 @@ public:
|
||||
if (state.getUserDefinedValue().ok()) {
|
||||
ctx.registerValue("UserDefined", state.getUserDefinedValue().unpack(), EnvKeyAttr::LogSection::DATA);
|
||||
}
|
||||
|
||||
return handleEvent(EndRequestEvent().performNamedQuery());
|
||||
}
|
||||
|
||||
@@ -323,8 +324,9 @@ private:
|
||||
<< respond.second.getVerdict();
|
||||
|
||||
state.setApplicationVerdict(respond.first, respond.second.getVerdict());
|
||||
state.setApplicationWebResponse(respond.first, respond.second.getWebUserResponseByPractice());
|
||||
}
|
||||
FilterVerdict aggregated_verdict = state.getCurrVerdict();
|
||||
FilterVerdict aggregated_verdict(state.getCurrVerdict(), state.getCurrWebUserResponse());
|
||||
if (aggregated_verdict.getVerdict() == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP) {
|
||||
SecurityAppsDropEvent(state.getCurrentDropVerdictCausers()).notify();
|
||||
}
|
||||
|
||||
@@ -32,6 +32,13 @@ HttpManagerOpaque::setApplicationVerdict(const string &app_name, ngx_http_cp_ver
|
||||
applications_verdicts[app_name] = verdict;
|
||||
}
|
||||
|
||||
void
|
||||
HttpManagerOpaque::setApplicationWebResponse(const string &app_name, string web_user_response_id)
|
||||
{
|
||||
dbgTrace(D_HTTP_MANAGER) << "Security app: " << app_name << ", has web user response: " << web_user_response_id;
|
||||
applications_web_user_response[app_name] = web_user_response_id;
|
||||
}
|
||||
|
||||
ngx_http_cp_verdict_e
|
||||
HttpManagerOpaque::getApplicationsVerdict(const string &app_name) const
|
||||
{
|
||||
@@ -51,8 +58,12 @@ HttpManagerOpaque::getCurrVerdict() const
|
||||
for (const auto &app_verdic_pair : applications_verdicts) {
|
||||
switch (app_verdic_pair.second) {
|
||||
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP:
|
||||
dbgTrace(D_HTTP_MANAGER) << "Verdict DROP for app: " << app_verdic_pair.first;
|
||||
current_web_user_response = applications_web_user_response.at(app_verdic_pair.first);
|
||||
dbgTrace(D_HTTP_MANAGER) << "current_web_user_response=" << current_web_user_response;
|
||||
return app_verdic_pair.second;
|
||||
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT:
|
||||
// Sent in ResponseHeaders and ResponseBody.
|
||||
verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT;
|
||||
break;
|
||||
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT:
|
||||
@@ -60,11 +71,16 @@ HttpManagerOpaque::getCurrVerdict() const
|
||||
break;
|
||||
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT:
|
||||
break;
|
||||
case ngx_http_cp_verdict_e::LIMIT_RESPONSE_HEADERS:
|
||||
// Sent in End Request.
|
||||
verdict = ngx_http_cp_verdict_e::LIMIT_RESPONSE_HEADERS;
|
||||
break;
|
||||
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_IRRELEVANT:
|
||||
dbgTrace(D_HTTP_MANAGER) << "Verdict 'Irrelevant' is not yet supported. Returning Accept";
|
||||
accepted_apps++;
|
||||
break;
|
||||
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_WAIT:
|
||||
// Sent in Request Headers and Request Body.
|
||||
verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_WAIT;
|
||||
break;
|
||||
default:
|
||||
|
||||
@@ -28,10 +28,12 @@ public:
|
||||
HttpManagerOpaque();
|
||||
|
||||
void setApplicationVerdict(const std::string &app_name, ngx_http_cp_verdict_e verdict);
|
||||
void setApplicationWebResponse(const std::string &app_name, std::string web_user_response_id);
|
||||
ngx_http_cp_verdict_e getApplicationsVerdict(const std::string &app_name) const;
|
||||
void setManagerVerdict(ngx_http_cp_verdict_e verdict) { manager_verdict = verdict; }
|
||||
ngx_http_cp_verdict_e getManagerVerdict() const { return manager_verdict; }
|
||||
ngx_http_cp_verdict_e getCurrVerdict() const;
|
||||
const std::string & getCurrWebUserResponse() const { return current_web_user_response; };
|
||||
std::set<std::string> getCurrentDropVerdictCausers() const;
|
||||
void saveCurrentDataToCache(const Buffer &full_data);
|
||||
void setUserDefinedValue(const std::string &value) { user_defined_value = value; }
|
||||
@@ -52,6 +54,8 @@ public:
|
||||
|
||||
private:
|
||||
std::unordered_map<std::string, ngx_http_cp_verdict_e> applications_verdicts;
|
||||
std::unordered_map<std::string, std::string> applications_web_user_response;
|
||||
mutable std::string current_web_user_response;
|
||||
ngx_http_cp_verdict_e manager_verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT;
|
||||
Buffer prev_data_cache;
|
||||
uint aggregated_payload_size = 0;
|
||||
|
||||
45
components/include/central_nginx_manager.h
Executable file
45
components/include/central_nginx_manager.h
Executable file
@@ -0,0 +1,45 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef __CENTRAL_NGINX_MANAGER_H__
|
||||
#define __CENTRAL_NGINX_MANAGER_H__
|
||||
|
||||
#include "component.h"
|
||||
#include "singleton.h"
|
||||
#include "i_messaging.h"
|
||||
#include "i_rest_api.h"
|
||||
#include "i_mainloop.h"
|
||||
#include "i_agent_details.h"
|
||||
|
||||
class CentralNginxManager
|
||||
:
|
||||
public Component,
|
||||
Singleton::Consume<I_RestApi>,
|
||||
Singleton::Consume<I_Messaging>,
|
||||
Singleton::Consume<I_MainLoop>,
|
||||
Singleton::Consume<I_AgentDetails>
|
||||
{
|
||||
public:
|
||||
CentralNginxManager();
|
||||
~CentralNginxManager();
|
||||
|
||||
void preload() override;
|
||||
void init() override;
|
||||
void fini() override;
|
||||
|
||||
private:
|
||||
class Impl;
|
||||
std::unique_ptr<Impl> pimpl;
|
||||
};
|
||||
|
||||
#endif // __CENTRAL_NGINX_MANAGER_H__
|
||||
@@ -45,6 +45,19 @@ private:
|
||||
std::string host;
|
||||
};
|
||||
|
||||
class EqualWafTag : public EnvironmentEvaluator<bool>, Singleton::Consume<I_Environment>
|
||||
{
|
||||
public:
|
||||
EqualWafTag(const std::vector<std::string> ¶ms);
|
||||
|
||||
static std::string getName() { return "EqualWafTag"; }
|
||||
|
||||
Maybe<bool, Context::Error> evalVariable() const override;
|
||||
|
||||
private:
|
||||
std::string waf_tag;
|
||||
};
|
||||
|
||||
class EqualListeningIP : public EnvironmentEvaluator<bool>, Singleton::Consume<I_Environment>
|
||||
{
|
||||
public:
|
||||
|
||||
@@ -317,12 +317,12 @@ public:
|
||||
{
|
||||
return url_for_cef;
|
||||
}
|
||||
Flags<ReportIS::StreamType> getStreams(SecurityType security_type, bool is_action_drop_or_prevent) const;
|
||||
Flags<ReportIS::Enreachments> getEnrechments(SecurityType security_type) const;
|
||||
|
||||
private:
|
||||
ReportIS::Severity getSeverity(bool is_action_drop_or_prevent) const;
|
||||
ReportIS::Priority getPriority(bool is_action_drop_or_prevent) const;
|
||||
Flags<ReportIS::StreamType> getStreams(SecurityType security_type, bool is_action_drop_or_prevent) const;
|
||||
Flags<ReportIS::Enreachments> getEnrechments(SecurityType security_type) const;
|
||||
|
||||
std::string name;
|
||||
std::string verbosity;
|
||||
@@ -339,4 +339,32 @@ private:
|
||||
bool should_format_output = false;
|
||||
};
|
||||
|
||||
class ReportTriggerConf
|
||||
{
|
||||
public:
|
||||
/// \brief Default constructor for ReportTriggerConf.
|
||||
ReportTriggerConf() {}
|
||||
|
||||
/// \brief Preload function to register expected configuration.
|
||||
static void
|
||||
preload()
|
||||
{
|
||||
registerExpectedConfiguration<ReportTriggerConf>("rulebase", "report");
|
||||
}
|
||||
|
||||
/// \brief Load function to deserialize configuration from JSONInputArchive.
|
||||
/// \param archive_in The JSON input archive.
|
||||
void load(cereal::JSONInputArchive &archive_in);
|
||||
|
||||
/// \brief Get the name.
|
||||
/// \return The name.
|
||||
const std::string &
|
||||
getName() const
|
||||
{
|
||||
return name;
|
||||
}
|
||||
private:
|
||||
std::string name;
|
||||
};
|
||||
|
||||
#endif //__TRIGGERS_CONFIG_H__
|
||||
|
||||
@@ -27,9 +27,18 @@ public:
|
||||
verdict(_verdict)
|
||||
{}
|
||||
|
||||
FilterVerdict(
|
||||
ngx_http_cp_verdict_e _verdict,
|
||||
const std::string &_web_reponse_id)
|
||||
:
|
||||
verdict(_verdict),
|
||||
web_user_response_id(_web_reponse_id)
|
||||
{}
|
||||
|
||||
FilterVerdict(const EventVerdict &_verdict, ModifiedChunkIndex _event_idx = -1)
|
||||
:
|
||||
verdict(_verdict.getVerdict())
|
||||
verdict(_verdict.getVerdict()),
|
||||
web_user_response_id(_verdict.getWebUserResponseByPractice())
|
||||
{
|
||||
if (verdict == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT) {
|
||||
addModifications(_verdict.getModifications(), _event_idx);
|
||||
@@ -59,10 +68,12 @@ public:
|
||||
uint getModificationsAmount() const { return total_modifications; }
|
||||
ngx_http_cp_verdict_e getVerdict() const { return verdict; }
|
||||
const std::vector<EventModifications> & getModifications() const { return modifications; }
|
||||
const std::string getWebUserResponseID() const { return web_user_response_id; }
|
||||
|
||||
private:
|
||||
ngx_http_cp_verdict_e verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT;
|
||||
std::vector<EventModifications> modifications;
|
||||
std::string web_user_response_id;
|
||||
uint total_modifications = 0;
|
||||
};
|
||||
|
||||
|
||||
@@ -239,6 +239,7 @@ public:
|
||||
const Buffer & getValue() const { return value; }
|
||||
|
||||
bool isLastHeader() const { return is_last_header; }
|
||||
void setIsLastHeader() { is_last_header = true; }
|
||||
uint8_t getHeaderIndex() const { return header_index; }
|
||||
|
||||
private:
|
||||
@@ -375,16 +376,31 @@ public:
|
||||
verdict(event_verdict)
|
||||
{}
|
||||
|
||||
EventVerdict(
|
||||
const ModificationList &mods,
|
||||
ngx_http_cp_verdict_e event_verdict,
|
||||
std::string response_id) :
|
||||
modifications(mods),
|
||||
verdict(event_verdict),
|
||||
webUserResponseByPractice(response_id)
|
||||
{}
|
||||
|
||||
// LCOV_EXCL_START - sync functions, can only be tested once the sync module exists
|
||||
template <typename T> void serialize(T &ar, uint) { ar(verdict); }
|
||||
// LCOV_EXCL_STOP
|
||||
|
||||
const ModificationList & getModifications() const { return modifications; }
|
||||
ngx_http_cp_verdict_e getVerdict() const { return verdict; }
|
||||
const std::string getWebUserResponseByPractice() const { return webUserResponseByPractice; }
|
||||
void setWebUserResponseByPractice(const std::string id) {
|
||||
dbgTrace(D_HTTP_MANAGER) << "current verdict web user response set to: " << id;
|
||||
webUserResponseByPractice = id;
|
||||
}
|
||||
|
||||
private:
|
||||
ModificationList modifications;
|
||||
ngx_http_cp_verdict_e verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT;
|
||||
std::string webUserResponseByPractice;
|
||||
};
|
||||
|
||||
#endif // __I_HTTP_EVENT_IMPL_H__
|
||||
|
||||
@@ -72,7 +72,8 @@ public:
|
||||
parsed_uri,
|
||||
client_ip,
|
||||
client_port,
|
||||
response_content_encoding
|
||||
response_content_encoding,
|
||||
waf_tag
|
||||
);
|
||||
}
|
||||
|
||||
@@ -91,7 +92,8 @@ public:
|
||||
parsed_uri,
|
||||
client_ip,
|
||||
client_port,
|
||||
response_content_encoding
|
||||
response_content_encoding,
|
||||
waf_tag
|
||||
);
|
||||
}
|
||||
// LCOV_EXCL_STOP
|
||||
@@ -122,6 +124,9 @@ public:
|
||||
response_content_encoding = _response_content_encoding;
|
||||
}
|
||||
|
||||
const std::string & getWafTag() const { return waf_tag; }
|
||||
void setWafTag(const std::string &_waf_tag) { waf_tag = _waf_tag; }
|
||||
|
||||
static const std::string http_proto_ctx;
|
||||
static const std::string method_ctx;
|
||||
static const std::string host_name_ctx;
|
||||
@@ -137,6 +142,7 @@ public:
|
||||
static const std::string source_identifier;
|
||||
static const std::string proxy_ip_ctx;
|
||||
static const std::string xff_vals_ctx;
|
||||
static const std::string waf_tag_ctx;
|
||||
|
||||
static const CompressionType default_response_content_encoding;
|
||||
|
||||
@@ -153,6 +159,7 @@ private:
|
||||
uint16_t client_port;
|
||||
bool is_request;
|
||||
CompressionType response_content_encoding;
|
||||
std::string waf_tag;
|
||||
};
|
||||
|
||||
#endif // __HTTP_TRANSACTION_DATA_H__
|
||||
|
||||
@@ -26,11 +26,12 @@ public:
|
||||
virtual Maybe<std::string> getArch() = 0;
|
||||
virtual std::string getAgentVersion() = 0;
|
||||
virtual bool isKernelVersion3OrHigher() = 0;
|
||||
virtual bool isGw() = 0;
|
||||
virtual bool isGwNotVsx() = 0;
|
||||
virtual bool isVersionAboveR8110() = 0;
|
||||
virtual bool isReverseProxy() = 0;
|
||||
virtual bool isCloudStorageEnabled() = 0;
|
||||
virtual Maybe<std::tuple<std::string, std::string, std::string>> parseNginxMetadata() = 0;
|
||||
virtual Maybe<std::tuple<std::string, std::string, std::string, std::string>> parseNginxMetadata() = 0;
|
||||
virtual Maybe<std::tuple<std::string, std::string, std::string, std::string, std::string>> readCloudMetadata() = 0;
|
||||
virtual std::map<std::string, std::string> getResolvedDetails() = 0;
|
||||
#if defined(gaia) || defined(smb)
|
||||
|
||||
@@ -27,6 +27,7 @@ struct DecisionTelemetryData
|
||||
int responseCode;
|
||||
uint64_t elapsedTime;
|
||||
std::set<std::string> attackTypes;
|
||||
bool temperatureDetected;
|
||||
|
||||
DecisionTelemetryData() :
|
||||
blockType(NOT_BLOCKING),
|
||||
@@ -38,7 +39,8 @@ struct DecisionTelemetryData
|
||||
method(POST),
|
||||
responseCode(0),
|
||||
elapsedTime(0),
|
||||
attackTypes()
|
||||
attackTypes(),
|
||||
temperatureDetected(false)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
#include "singleton.h"
|
||||
#include "i_keywords_rule.h"
|
||||
#include "i_table.h"
|
||||
#include "i_mainloop.h"
|
||||
#include "i_http_manager.h"
|
||||
#include "i_environment.h"
|
||||
#include "http_inspection_events.h"
|
||||
@@ -16,7 +17,8 @@ class IPSComp
|
||||
Singleton::Consume<I_KeywordsRule>,
|
||||
Singleton::Consume<I_Table>,
|
||||
Singleton::Consume<I_Environment>,
|
||||
Singleton::Consume<I_GenericRulebase>
|
||||
Singleton::Consume<I_GenericRulebase>,
|
||||
Singleton::Consume<I_MainLoop>
|
||||
{
|
||||
public:
|
||||
IPSComp();
|
||||
|
||||
@@ -62,6 +62,7 @@ public:
|
||||
|
||||
private:
|
||||
Maybe<std::string> downloadPackage(const Package &package, bool is_clean_installation);
|
||||
std::string getCurrentTimestamp();
|
||||
|
||||
std::string manifest_file_path;
|
||||
std::string temp_ext;
|
||||
|
||||
28
components/include/nginx_message_reader.h
Executable file
28
components/include/nginx_message_reader.h
Executable file
@@ -0,0 +1,28 @@
|
||||
#ifndef __NGINX_MESSAGE_READER_H__
|
||||
#define __NGINX_MESSAGE_READER_H__
|
||||
|
||||
#include "singleton.h"
|
||||
#include "i_mainloop.h"
|
||||
#include "i_socket_is.h"
|
||||
#include "component.h"
|
||||
|
||||
class NginxMessageReader
|
||||
:
|
||||
public Component,
|
||||
Singleton::Consume<I_MainLoop>,
|
||||
Singleton::Consume<I_Socket>
|
||||
{
|
||||
public:
|
||||
NginxMessageReader();
|
||||
~NginxMessageReader();
|
||||
|
||||
void init() override;
|
||||
void fini() override;
|
||||
void preload() override;
|
||||
|
||||
private:
|
||||
class Impl;
|
||||
std::unique_ptr<Impl> pimpl;
|
||||
};
|
||||
|
||||
#endif //__NGINX_MESSAGE_READER_H__
|
||||
51
components/include/nginx_utils.h
Executable file
51
components/include/nginx_utils.h
Executable file
@@ -0,0 +1,51 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef __NGINX_UTILS_H__
|
||||
#define __NGINX_UTILS_H__
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "maybe_res.h"
|
||||
#include "singleton.h"
|
||||
#include "i_shell_cmd.h"
|
||||
|
||||
class NginxConfCollector
|
||||
{
|
||||
public:
|
||||
NginxConfCollector(const std::string &nginx_conf_input_path, const std::string &nginx_conf_output_path);
|
||||
Maybe<std::string> generateFullNginxConf() const;
|
||||
|
||||
private:
|
||||
std::vector<std::string> expandIncludes(const std::string &includePattern) const;
|
||||
void processConfigFile(
|
||||
const std::string &path,
|
||||
std::ostringstream &conf_output,
|
||||
std::vector<std::string> &errors
|
||||
) const;
|
||||
|
||||
std::string main_conf_input_path;
|
||||
std::string main_conf_output_path;
|
||||
std::string main_conf_directory_path;
|
||||
};
|
||||
|
||||
class NginxUtils : Singleton::Consume<I_ShellCmd>
|
||||
{
|
||||
public:
|
||||
static std::string getModulesPath();
|
||||
static std::string getMainNginxConfPath();
|
||||
static Maybe<void> validateNginxConf(const std::string &nginx_conf_path);
|
||||
static Maybe<void> reloadNginx(const std::string &nginx_conf_path);
|
||||
};
|
||||
|
||||
#endif // __NGINX_UTILS_H__
|
||||
30
components/include/prometheus_comp.h
Executable file
30
components/include/prometheus_comp.h
Executable file
@@ -0,0 +1,30 @@
|
||||
#ifndef __PROMETHEUS_COMP_H__
|
||||
#define __PROMETHEUS_COMP_H__
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "component.h"
|
||||
#include "singleton.h"
|
||||
|
||||
#include "i_rest_api.h"
|
||||
#include "i_messaging.h"
|
||||
#include "generic_metric.h"
|
||||
|
||||
class PrometheusComp
|
||||
:
|
||||
public Component,
|
||||
Singleton::Consume<I_RestApi>,
|
||||
Singleton::Consume<I_Messaging>
|
||||
{
|
||||
public:
|
||||
PrometheusComp();
|
||||
~PrometheusComp();
|
||||
|
||||
void init() override;
|
||||
|
||||
private:
|
||||
class Impl;
|
||||
std::unique_ptr<Impl> pimpl;
|
||||
};
|
||||
|
||||
#endif // __PROMETHEUS_COMP_H__
|
||||
@@ -7,15 +7,21 @@
|
||||
#include "singleton.h"
|
||||
#include "i_mainloop.h"
|
||||
#include "i_environment.h"
|
||||
#include "i_geo_location.h"
|
||||
#include "i_generic_rulebase.h"
|
||||
#include "i_shell_cmd.h"
|
||||
#include "i_env_details.h"
|
||||
|
||||
class RateLimit
|
||||
:
|
||||
public Component,
|
||||
Singleton::Consume<I_MainLoop>,
|
||||
Singleton::Consume<I_TimeGet>,
|
||||
Singleton::Consume<I_GeoLocation>,
|
||||
Singleton::Consume<I_Environment>,
|
||||
Singleton::Consume<I_GenericRulebase>
|
||||
Singleton::Consume<I_GenericRulebase>,
|
||||
Singleton::Consume<I_ShellCmd>,
|
||||
Singleton::Consume<I_EnvDetails>
|
||||
{
|
||||
public:
|
||||
RateLimit();
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
#include "generic_metric.h"
|
||||
|
||||
#define LOGGING_INTERVAL_IN_MINUTES 10
|
||||
USE_DEBUG_FLAG(D_WAAP);
|
||||
enum class AssetType { API, WEB, ALL, COUNT };
|
||||
|
||||
class WaapTelemetryEvent : public Event<WaapTelemetryEvent>
|
||||
@@ -75,6 +76,20 @@ private:
|
||||
std::unordered_set<std::string> sources_seen;
|
||||
};
|
||||
|
||||
class WaapAdditionalTrafficTelemetrics : public WaapTelemetryBase
|
||||
{
|
||||
public:
|
||||
void updateMetrics(const std::string &asset_id, const DecisionTelemetryData &data);
|
||||
void initMetrics();
|
||||
|
||||
private:
|
||||
MetricCalculations::Counter requests{this, "reservedNgenA"};
|
||||
MetricCalculations::Counter sources{this, "reservedNgenB"};
|
||||
MetricCalculations::Counter blocked{this, "reservedNgenC"};
|
||||
MetricCalculations::Counter temperature_count{this, "reservedNgenD"};
|
||||
std::unordered_set<std::string> sources_seen;
|
||||
};
|
||||
|
||||
class WaapTrafficTelemetrics : public WaapTelemetryBase
|
||||
{
|
||||
public:
|
||||
@@ -123,6 +138,7 @@ private:
|
||||
std::map<std::string, std::shared_ptr<WaapTrafficTelemetrics>> traffic_telemetries;
|
||||
std::map<std::string, std::shared_ptr<WaapAttackTypesMetrics>> attack_types;
|
||||
std::map<std::string, std::shared_ptr<WaapAttackTypesMetrics>> attack_types_telemetries;
|
||||
std::map<std::string, std::shared_ptr<WaapAdditionalTrafficTelemetrics>> additional_traffic_telemetries;
|
||||
|
||||
template <typename T>
|
||||
void initializeTelemetryData(
|
||||
@@ -132,6 +148,7 @@ private:
|
||||
std::map<std::string, std::shared_ptr<T>>& telemetryMap
|
||||
) {
|
||||
if (!telemetryMap.count(asset_id)) {
|
||||
dbgTrace(D_WAAP) << "creating telemetry data for asset: " << data.assetName;
|
||||
telemetryMap.emplace(asset_id, std::make_shared<T>());
|
||||
telemetryMap[asset_id]->init(
|
||||
telemetryName,
|
||||
@@ -139,7 +156,9 @@ private:
|
||||
ReportIS::IssuingEngine::AGENT_CORE,
|
||||
std::chrono::minutes(LOGGING_INTERVAL_IN_MINUTES),
|
||||
true,
|
||||
ReportIS::Audience::SECURITY
|
||||
ReportIS::Audience::SECURITY,
|
||||
false,
|
||||
asset_id
|
||||
);
|
||||
|
||||
telemetryMap[asset_id]->template registerContext<std::string>(
|
||||
@@ -152,29 +171,30 @@ private:
|
||||
std::string("Web Application"),
|
||||
EnvKeyAttr::LogSection::SOURCE
|
||||
);
|
||||
telemetryMap[asset_id]->template registerContext<std::string>(
|
||||
"assetId",
|
||||
asset_id,
|
||||
EnvKeyAttr::LogSection::SOURCE
|
||||
);
|
||||
telemetryMap[asset_id]->template registerContext<std::string>(
|
||||
"assetName",
|
||||
data.assetName,
|
||||
EnvKeyAttr::LogSection::SOURCE
|
||||
);
|
||||
telemetryMap[asset_id]->template registerContext<std::string>(
|
||||
"practiceId",
|
||||
data.practiceId,
|
||||
EnvKeyAttr::LogSection::SOURCE
|
||||
);
|
||||
telemetryMap[asset_id]->template registerContext<std::string>(
|
||||
"practiceName",
|
||||
data.practiceName,
|
||||
EnvKeyAttr::LogSection::SOURCE
|
||||
);
|
||||
|
||||
telemetryMap[asset_id]->registerListener();
|
||||
}
|
||||
dbgTrace(D_WAAP) << "updating telemetry data for asset: " << data.assetName;
|
||||
|
||||
telemetryMap[asset_id]->template registerContext<std::string>(
|
||||
"assetId",
|
||||
asset_id,
|
||||
EnvKeyAttr::LogSection::SOURCE
|
||||
);
|
||||
telemetryMap[asset_id]->template registerContext<std::string>(
|
||||
"assetName",
|
||||
data.assetName,
|
||||
EnvKeyAttr::LogSection::SOURCE
|
||||
);
|
||||
telemetryMap[asset_id]->template registerContext<std::string>(
|
||||
"practiceId",
|
||||
data.practiceId,
|
||||
EnvKeyAttr::LogSection::SOURCE
|
||||
);
|
||||
telemetryMap[asset_id]->template registerContext<std::string>(
|
||||
"practiceName",
|
||||
data.practiceName,
|
||||
EnvKeyAttr::LogSection::SOURCE
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -30,6 +30,7 @@ public:
|
||||
void parseRequestHeaders(const HttpHeader &header) const;
|
||||
std::vector<std::string> getHeaderValuesFromConfig(const std::string &header_key) const;
|
||||
void setXFFValuesToOpaqueCtx(const HttpHeader &header, ExtractType type) const;
|
||||
void setWafTagValuesToOpaqueCtx(const HttpHeader &header) const;
|
||||
|
||||
private:
|
||||
class UsersIdentifiersConfig
|
||||
|
||||
@@ -33,6 +33,7 @@ class I_WaapAssetStatesManager;
|
||||
class I_Messaging;
|
||||
class I_AgentDetails;
|
||||
class I_Encryptor;
|
||||
class I_WaapModelResultLogger;
|
||||
|
||||
const std::string WAAP_APPLICATION_NAME = "waap application";
|
||||
|
||||
@@ -50,7 +51,8 @@ class WaapComponent
|
||||
Singleton::Consume<I_AgentDetails>,
|
||||
Singleton::Consume<I_Messaging>,
|
||||
Singleton::Consume<I_Encryptor>,
|
||||
Singleton::Consume<I_Environment>
|
||||
Singleton::Consume<I_Environment>,
|
||||
Singleton::Consume<I_WaapModelResultLogger>
|
||||
{
|
||||
public:
|
||||
WaapComponent();
|
||||
|
||||
3
components/nginx_message_reader/CMakeLists.txt
Executable file
3
components/nginx_message_reader/CMakeLists.txt
Executable file
@@ -0,0 +1,3 @@
|
||||
link_directories(${BOOST_ROOT}/lib)
|
||||
|
||||
add_library(nginx_message_reader nginx_message_reader.cc)
|
||||
735
components/nginx_message_reader/nginx_message_reader.cc
Executable file
735
components/nginx_message_reader/nginx_message_reader.cc
Executable file
@@ -0,0 +1,735 @@
|
||||
#include "nginx_message_reader.h"
|
||||
|
||||
#include <string>
|
||||
#include <boost/regex.hpp>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/algorithm/string/regex.hpp>
|
||||
|
||||
#include "config.h"
|
||||
#include "singleton.h"
|
||||
#include "i_mainloop.h"
|
||||
#include "enum_array.h"
|
||||
#include "log_generator.h"
|
||||
#include "maybe_res.h"
|
||||
#include "http_transaction_data.h"
|
||||
#include "generic_rulebase/rulebase_config.h"
|
||||
#include "generic_rulebase/evaluators/asset_eval.h"
|
||||
#include "generic_rulebase/triggers_config.h"
|
||||
#include "agent_core_utilities.h"
|
||||
#include "rate_limit_config.h"
|
||||
|
||||
USE_DEBUG_FLAG(D_NGINX_MESSAGE_READER);
|
||||
|
||||
using namespace std;
|
||||
|
||||
static const string syslog_regex_string = (
|
||||
"<[0-9]+>([A-Z][a-z][a-z]\\s{1,2}\\d{1,2}\\s\\d{2}"
|
||||
"[:]\\d{2}[:]\\d{2})\\s([\\w][\\w\\d\\.@-]*)\\s(nginx:)"
|
||||
);
|
||||
|
||||
static const boost::regex socket_address_regex("(\\d+\\.\\d+\\.\\d+\\.\\d+):(\\d+)");
|
||||
static const boost::regex syslog_regex(syslog_regex_string);
|
||||
static const boost::regex alert_log_regex(
|
||||
"("
|
||||
+ syslog_regex_string + ") "
|
||||
+ "(.+?\\[alert\\] )(.+?)"
|
||||
", (client: .+?)"
|
||||
", (server: .+?)"
|
||||
", (request: \".+?\")"
|
||||
", (upstream: \".+?\")"
|
||||
", (host: \".+?\")$"
|
||||
);
|
||||
|
||||
static const boost::regex error_log_regex(
|
||||
"("
|
||||
+ syslog_regex_string + ") "
|
||||
+ "(.+?\\[error\\] )(.+?)"
|
||||
", (client: .+?)"
|
||||
", (server: .+?)"
|
||||
", (request: \".+?\")"
|
||||
", (upstream: \".+?\")"
|
||||
", (host: \".+?\")$"
|
||||
);
|
||||
|
||||
static const boost::regex server_regex("(\\d+\\.\\d+\\.\\d+\\.\\d+)|(\\w+\\.\\w+)");
|
||||
static const boost::regex uri_regex("^/");
|
||||
static const boost::regex port_regex("\\d+");
|
||||
static const boost::regex response_code_regex("[0-9]{3}");
|
||||
static const boost::regex http_method_regex("[A-Za-z]+");
|
||||
|
||||
class NginxMessageReader::Impl
|
||||
{
|
||||
public:
|
||||
void
|
||||
init()
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||
I_MainLoop *mainloop = Singleton::Consume<I_MainLoop>::by<NginxMessageReader>();
|
||||
mainloop->addOneTimeRoutine(
|
||||
I_MainLoop::RoutineType::System,
|
||||
[this] ()
|
||||
{
|
||||
initSyslogServerSocket();
|
||||
handleNginxLogs();
|
||||
},
|
||||
"Initialize nginx syslog",
|
||||
true
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
preload()
|
||||
{
|
||||
registerConfigLoadCb([this]() { loadNginxMessageReaderConfig(); });
|
||||
}
|
||||
|
||||
void
|
||||
fini()
|
||||
{
|
||||
I_Socket *i_socket = Singleton::Consume<I_Socket>::by<NginxMessageReader>();
|
||||
i_socket->closeSocket(syslog_server_socket);
|
||||
}
|
||||
|
||||
void
|
||||
loadNginxMessageReaderConfig()
|
||||
{
|
||||
rate_limit_status_code = getProfileAgentSettingWithDefault<string>(
|
||||
"429",
|
||||
"accessControl.rateLimit.returnCode"
|
||||
);
|
||||
dbgTrace(D_NGINX_MESSAGE_READER) << "Selected rate-limit status code: " << rate_limit_status_code;
|
||||
}
|
||||
|
||||
private:
|
||||
enum class LogInfo {
|
||||
HTTP_METHOD,
|
||||
URI,
|
||||
RESPONSE_CODE,
|
||||
HOST,
|
||||
SOURCE,
|
||||
DESTINATION_IP,
|
||||
DESTINATION_PORT,
|
||||
EVENT_MESSAGE,
|
||||
ASSET_ID,
|
||||
ASSET_NAME,
|
||||
RULE_NAME,
|
||||
RULE_ID,
|
||||
COUNT
|
||||
};
|
||||
|
||||
void
|
||||
initSyslogServerSocket()
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||
I_MainLoop *mainloop = Singleton::Consume<I_MainLoop>::by<NginxMessageReader>();
|
||||
I_Socket *i_socket = Singleton::Consume<I_Socket>::by<NginxMessageReader>();
|
||||
string nginx_syslog_server_address = getProfileAgentSettingWithDefault<string>(
|
||||
"127.0.0.1:1514",
|
||||
"reverseProxy.nginx.syslogAddress"
|
||||
);
|
||||
dbgInfo(D_NGINX_MESSAGE_READER) << "Attempting to open a socket: " << nginx_syslog_server_address;
|
||||
do {
|
||||
Maybe<I_Socket::socketFd> new_socket = i_socket->genSocket(
|
||||
I_Socket::SocketType::UDP,
|
||||
false,
|
||||
true,
|
||||
nginx_syslog_server_address
|
||||
);
|
||||
if (!new_socket.ok()) {
|
||||
dbgError(D_NGINX_MESSAGE_READER) << "Failed to open a socket. Error: " << new_socket.getErr();
|
||||
mainloop->yield(chrono::milliseconds(500));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (new_socket.unpack() < 0) {
|
||||
dbgError(D_NGINX_MESSAGE_READER)<< "Generated socket is OK yet negative";
|
||||
mainloop->yield(chrono::milliseconds(500));
|
||||
continue;
|
||||
}
|
||||
syslog_server_socket = new_socket.unpack();
|
||||
dbgInfo(D_NGINX_MESSAGE_READER)
|
||||
<< "Opened socket for nginx logs over syslog. Socket: "
|
||||
<< syslog_server_socket;
|
||||
} while (syslog_server_socket < 0);
|
||||
}
|
||||
|
||||
void
|
||||
handleNginxLogs()
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||
I_MainLoop::Routine read_logs =
|
||||
[this] ()
|
||||
{
|
||||
Maybe<string> logs = getLogsFromSocket(syslog_server_socket);
|
||||
|
||||
if (!logs.ok()) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER)
|
||||
<< "Failed to get NGINX logs from the socket. Error: "
|
||||
<< logs.getErr();
|
||||
return;
|
||||
}
|
||||
string raw_logs_to_parse = logs.unpackMove();
|
||||
vector<string> logs_to_parse = separateLogs(raw_logs_to_parse);
|
||||
|
||||
for (auto const &log: logs_to_parse) {
|
||||
bool log_sent;
|
||||
if (isAccessLog(log)) {
|
||||
log_sent = sendAccessLog(log);
|
||||
} else if (isAlertErrorLog(log) || isErrorLog(log)) {
|
||||
log_sent = sendErrorLog(log);
|
||||
} else {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER) << "Unexpected nginx log format";
|
||||
continue;
|
||||
}
|
||||
if (!log_sent) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER) << "Failed to send Log to Infinity Portal";
|
||||
} else {
|
||||
dbgTrace(D_NGINX_MESSAGE_READER) << "Succesfully sent nginx log to Infinity Portal";
|
||||
}
|
||||
}
|
||||
};
|
||||
I_MainLoop *mainloop = Singleton::Consume<I_MainLoop>::by<NginxMessageReader>();
|
||||
mainloop->addFileRoutine(
|
||||
I_MainLoop::RoutineType::RealTime,
|
||||
syslog_server_socket,
|
||||
read_logs,
|
||||
"Process nginx logs",
|
||||
true
|
||||
);
|
||||
}
|
||||
|
||||
bool
|
||||
sendAccessLog(const string &log)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "Access log" << log;
|
||||
Maybe<EnumArray<LogInfo, string>> log_info = parseAccessLog(log);
|
||||
if (!log_info.ok()) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER)
|
||||
<< "Failed parsing the NGINX logs. Error: "
|
||||
<< log_info.getErr();
|
||||
return false;
|
||||
}
|
||||
auto unpacked_log_info = log_info.unpack();
|
||||
|
||||
if (unpacked_log_info[LogInfo::RESPONSE_CODE] == rate_limit_status_code) {
|
||||
return sendRateLimitLog(unpacked_log_info);
|
||||
}
|
||||
return sendLog(unpacked_log_info);
|
||||
}
|
||||
|
||||
bool
|
||||
sendErrorLog(const string &log)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "Error log" << log;
|
||||
Maybe<EnumArray<LogInfo, string>> log_info = parseErrorLog(log);
|
||||
if (!log_info.ok()) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER)
|
||||
<< "Failed parsing the NGINX logs. Error: "
|
||||
<< log_info.getErr();
|
||||
return false;
|
||||
}
|
||||
return sendLog(log_info.unpack());
|
||||
}
|
||||
|
||||
bool
|
||||
isAccessLog(const string &log) const
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "Chekck if string contains \"accessLog\"" << log;
|
||||
return log.find("accessLog") != string::npos;
|
||||
}
|
||||
|
||||
bool
|
||||
isAlertErrorLog(const string &log) const
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "Check if log is of type 'error log'. Log: " << log;
|
||||
return log.find("[alert]") != string::npos;
|
||||
}
|
||||
|
||||
bool
|
||||
isErrorLog(const string &log) const
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "Check if log is of type 'error log'. Log: " << log;
|
||||
return log.find("[error]") != string::npos;
|
||||
}
|
||||
|
||||
bool
|
||||
sendLog(const EnumArray<LogInfo, string> &log_info)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||
string event_name;
|
||||
switch (log_info[LogInfo::RESPONSE_CODE][0]) {
|
||||
case '4': {
|
||||
event_name = "Invalid request or incorrect reverse proxy configuration - Request dropped."
|
||||
" Please check the reverse proxy configuration of your relevant assets";
|
||||
break;
|
||||
}
|
||||
case '5': {
|
||||
event_name = "AppSec Gateway reverse proxy error - Request dropped. "
|
||||
"Please verify the reverse proxy configuration of your relevant assets. "
|
||||
"If the issue persists please contact Check Point Support";
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
dbgError(D_NGINX_MESSAGE_READER) << "Irrelevant status code";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
dbgTrace(D_NGINX_MESSAGE_READER)
|
||||
<< "Nginx log's event name and response code: "
|
||||
<< event_name
|
||||
<< ", "
|
||||
<< log_info[LogInfo::RESPONSE_CODE];
|
||||
LogGen log(
|
||||
event_name,
|
||||
ReportIS::Audience::SECURITY,
|
||||
ReportIS::Severity::INFO,
|
||||
ReportIS::Priority::LOW,
|
||||
ReportIS::Tags::REVERSE_PROXY
|
||||
);
|
||||
log << LogField("eventConfidence", "High");
|
||||
|
||||
for (LogInfo field : makeRange<LogInfo>()) {
|
||||
Maybe<string> string_field = convertLogFieldToString(field);
|
||||
if (!string_field.ok()) {
|
||||
dbgDebug(D_NGINX_MESSAGE_READER) << "Enum field was not converted: " << string_field.getErr();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (field != LogInfo::DESTINATION_PORT) {
|
||||
log << LogField(string_field.unpack(), log_info[field]);
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
log << LogField(string_field.unpack(), stoi(log_info[field]));
|
||||
} catch (const exception &e) {
|
||||
dbgError(D_NGINX_MESSAGE_READER)
|
||||
<< "Unable to convert port to numeric value: "
|
||||
<< e.what();
|
||||
log << LogField(string_field.unpack(), 0);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
sendRateLimitLog(const EnumArray<LogInfo, string> &log_info)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "Getting rate-limit rules of asset ID: " << log_info[LogInfo::ASSET_ID];
|
||||
|
||||
ScopedContext rate_limit_ctx;
|
||||
|
||||
rate_limit_ctx.registerValue<GenericConfigId>(AssetMatcher::ctx_key, log_info[LogInfo::ASSET_ID]);
|
||||
auto rate_limit_config = getConfiguration<RateLimitConfig>("rulebase", "rateLimit");
|
||||
if (!rate_limit_config.ok()) {
|
||||
dbgTrace(D_NGINX_MESSAGE_READER)
|
||||
<< "Rate limit context does not match asset ID: " << log_info[LogInfo::ASSET_ID];
|
||||
return false;
|
||||
}
|
||||
RateLimitConfig unpacked_rate_limit_config = rate_limit_config.unpack();
|
||||
|
||||
string nginx_uri = log_info[LogInfo::URI];
|
||||
const LogTriggerConf &rate_limit_trigger = unpacked_rate_limit_config.getRateLimitTrigger(nginx_uri);
|
||||
|
||||
dbgTrace(D_NGINX_MESSAGE_READER)<< "About to generate NGINX rate-limit log";
|
||||
|
||||
string event_name = "Rate limit";
|
||||
string security_action = "Drop";
|
||||
bool is_log_required = false;
|
||||
|
||||
// Prevent events checkbox (in triggers)
|
||||
if (rate_limit_trigger.isPreventLogActive(LogTriggerConf::SecurityType::AccessControl)) {
|
||||
is_log_required = true;
|
||||
}
|
||||
|
||||
if (!is_log_required) {
|
||||
dbgTrace(D_NGINX_MESSAGE_READER) << "Not sending NGINX rate-limit log as it is not required";
|
||||
return false;
|
||||
}
|
||||
|
||||
ostringstream src_ip;
|
||||
ostringstream dst_ip;
|
||||
src_ip << log_info[LogInfo::SOURCE];
|
||||
dst_ip << log_info[LogInfo::DESTINATION_IP];
|
||||
|
||||
ReportIS::Severity log_severity = ReportIS::Severity::MEDIUM;
|
||||
ReportIS::Priority log_priority = ReportIS::Priority::MEDIUM;
|
||||
|
||||
LogGen log = rate_limit_trigger(
|
||||
event_name,
|
||||
LogTriggerConf::SecurityType::AccessControl,
|
||||
log_severity,
|
||||
log_priority,
|
||||
true, // is drop
|
||||
LogField("practiceType", "Rate Limit"),
|
||||
ReportIS::Tags::RATE_LIMIT
|
||||
);
|
||||
|
||||
for (LogInfo field : makeRange<LogInfo>()) {
|
||||
Maybe<string> string_field = convertLogFieldToString(field);
|
||||
if (!string_field.ok()) {
|
||||
dbgDebug(D_NGINX_MESSAGE_READER) << "Enum field was not converted: " << string_field.getErr();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (
|
||||
field == LogInfo::HOST ||
|
||||
field == LogInfo::URI ||
|
||||
field == LogInfo::HTTP_METHOD ||
|
||||
field == LogInfo::SOURCE ||
|
||||
field == LogInfo::DESTINATION_IP ||
|
||||
field == LogInfo::ASSET_ID ||
|
||||
field == LogInfo::ASSET_NAME ||
|
||||
field == LogInfo::RESPONSE_CODE
|
||||
) {
|
||||
if (!log_info[field].empty()) {
|
||||
log << LogField(string_field.unpack(), log_info[field]);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (field == LogInfo::DESTINATION_PORT) {
|
||||
try {
|
||||
int numeric_dst_port = stoi(log_info[field]);
|
||||
log << LogField(string_field.unpack(), numeric_dst_port);
|
||||
} catch (const exception &e) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER)
|
||||
<< "Unable to convert dst port: "
|
||||
<< log_info[field]
|
||||
<< " to numberic value. Error: "
|
||||
<< e.what();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
convertLogFieldToString(LogInfo field)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||
switch (field) {
|
||||
case LogInfo::HTTP_METHOD:
|
||||
return string("httpMethod");
|
||||
case LogInfo::URI:
|
||||
return string("httpUriPath");
|
||||
case LogInfo::RESPONSE_CODE:
|
||||
return string("httpResponseCode");
|
||||
case LogInfo::HOST:
|
||||
return string("httpHostName");
|
||||
case LogInfo::SOURCE:
|
||||
return string("httpSourceId");
|
||||
case LogInfo::DESTINATION_IP:
|
||||
return string("destinationIp");
|
||||
case LogInfo::DESTINATION_PORT:
|
||||
return string("destinationPort");
|
||||
case LogInfo::ASSET_ID:
|
||||
return string("assetId");
|
||||
case LogInfo::ASSET_NAME:
|
||||
return string("assetName");
|
||||
case LogInfo::EVENT_MESSAGE:
|
||||
return string("httpResponseBody");
|
||||
case LogInfo::RULE_ID:
|
||||
return string("ruleId");
|
||||
case LogInfo::RULE_NAME:
|
||||
return string("ruleName");
|
||||
case LogInfo::COUNT:
|
||||
dbgError(D_NGINX_MESSAGE_READER) << "LogInfo::COUNT is not allowed";
|
||||
return genError("LogInfo::COUNT is not allowed");
|
||||
}
|
||||
dbgError(D_NGINX_MESSAGE_READER) << "No Enum found, int value: " << static_cast<int>(field);
|
||||
return genError("No Enum found");
|
||||
}
|
||||
|
||||
static vector<string>
|
||||
separateLogs(const string &raw_logs_to_parse)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "separating logs. logs: " << raw_logs_to_parse;
|
||||
dbgTrace(D_NGINX_MESSAGE_READER) << "separateLogs start of function. Logs to parse: " << raw_logs_to_parse;
|
||||
boost::smatch matcher;
|
||||
vector<string> logs;
|
||||
|
||||
if (raw_logs_to_parse.empty()) return logs;
|
||||
|
||||
size_t pos = 0;
|
||||
while (NGEN::Regex::regexSearch(__FILE__, __LINE__, raw_logs_to_parse.substr(pos), matcher, syslog_regex)) {
|
||||
if (pos == 0) {
|
||||
dbgTrace(D_NGINX_MESSAGE_READER) << "separateLogs pos = 0";
|
||||
pos++;
|
||||
continue;
|
||||
}
|
||||
auto log_length = matcher.position();
|
||||
logs.push_back(raw_logs_to_parse.substr(pos - 1, log_length));
|
||||
|
||||
pos += log_length + 1;
|
||||
}
|
||||
logs.push_back(raw_logs_to_parse.substr(pos - 1));
|
||||
dbgTrace(D_NGINX_MESSAGE_READER) << "separateLogs end of function";
|
||||
|
||||
return logs;
|
||||
}
|
||||
|
||||
static pair<string, string>
|
||||
parseErrorLogRequestField(const string &request)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "parsing request field. request: " << request;
|
||||
string formatted_request = request;
|
||||
vector<string> result;
|
||||
boost::erase_all(formatted_request, "\"");
|
||||
boost::erase_all(formatted_request, "\n");
|
||||
boost::split(result, formatted_request, boost::is_any_of(" "), boost::token_compress_on);
|
||||
|
||||
const int http_method_index = 1;
|
||||
const int uri_index = 2;
|
||||
return pair<string, string>(result[http_method_index], result[uri_index]);
|
||||
}
|
||||
|
||||
static string
|
||||
parseErrorLogField(const string &field)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "parsing error log field " << field;
|
||||
string formatted_field = field;
|
||||
vector<string> result;
|
||||
boost::erase_all(formatted_field, "\"");
|
||||
boost::erase_all(formatted_field, "\n");
|
||||
boost::split(result, formatted_field, boost::is_any_of(" "), boost::token_compress_on);
|
||||
|
||||
const int field_index = 1;
|
||||
return result[field_index];
|
||||
}
|
||||
|
||||
void
|
||||
addContextFieldsToLogInfo(EnumArray<LogInfo, string> &log_info)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||
ScopedContext ctx;
|
||||
|
||||
try {
|
||||
ctx.registerValue<uint16_t>(
|
||||
HttpTransactionData::listening_port_ctx,
|
||||
static_cast<uint16_t>(stoi(log_info[LogInfo::DESTINATION_PORT]))
|
||||
);
|
||||
} catch (const exception &e) {
|
||||
dbgError(D_NGINX_MESSAGE_READER) << "Failed register values for context " << e.what();
|
||||
}
|
||||
ctx.registerValue<string>(HttpTransactionData::host_name_ctx, log_info[LogInfo::HOST]);
|
||||
ctx.registerValue<string>(HttpTransactionData::uri_ctx, log_info[LogInfo::URI]);
|
||||
auto rule_by_ctx = getConfiguration<BasicRuleConfig>("rulebase", "rulesConfig");
|
||||
if (!rule_by_ctx.ok()) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER)
|
||||
<< "AssetId was not found by the given context. Reason: "
|
||||
<< rule_by_ctx.getErr();
|
||||
return;
|
||||
}
|
||||
|
||||
BasicRuleConfig context = rule_by_ctx.unpack();
|
||||
log_info[LogInfo::ASSET_ID] = context.getAssetId();
|
||||
log_info[LogInfo::ASSET_NAME] = context.getAssetName();
|
||||
log_info[LogInfo::RULE_ID] = context.getRuleId();
|
||||
log_info[LogInfo::RULE_NAME] = context.getRuleName();
|
||||
}
|
||||
|
||||
Maybe<EnumArray<LogInfo, string>>
|
||||
parseErrorLog(const string &log_line)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "Handling log line:" << log_line;
|
||||
string port;
|
||||
EnumArray<LogInfo, string> log_info(EnumArray<LogInfo, string>::Fill(), string(""));
|
||||
|
||||
boost::smatch matcher;
|
||||
vector<string> result;
|
||||
if (
|
||||
!NGEN::Regex::regexSearch(
|
||||
__FILE__,
|
||||
__LINE__,
|
||||
log_line,
|
||||
matcher,
|
||||
isAlertErrorLog(log_line) ? alert_log_regex : error_log_regex
|
||||
)
|
||||
) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER) << "Unexpected nginx log format";
|
||||
return genError("Unexpected nginx log format");
|
||||
}
|
||||
|
||||
const int event_message_index = 6;
|
||||
const int source_index = 7;
|
||||
const int request_index = 9;
|
||||
const int host_index = 11;
|
||||
string host = string(matcher[host_index].first, matcher[host_index].second);
|
||||
string source = string(matcher[source_index].first, matcher[source_index].second);
|
||||
string event_message = string(matcher[event_message_index].first, matcher[event_message_index].second);
|
||||
string request = string(matcher[request_index].first, matcher[request_index].second);
|
||||
|
||||
host = parseErrorLogField(host);
|
||||
source = parseErrorLogField(source);
|
||||
pair<string, string> parsed_request = parseErrorLogRequestField(request);
|
||||
string http_method = parsed_request.first;
|
||||
string uri = parsed_request.second;
|
||||
|
||||
if (NGEN::Regex::regexSearch(__FILE__, __LINE__, host, matcher, socket_address_regex)) {
|
||||
int host_index = 1;
|
||||
int port_index = 2;
|
||||
host = string(matcher[host_index].first, matcher[host_index].second);
|
||||
port = string(matcher[port_index].first, matcher[port_index].second);
|
||||
} else if (NGEN::Regex::regexSearch(__FILE__, __LINE__, host, matcher, boost::regex("https://"))) {
|
||||
port = "443";
|
||||
} else {
|
||||
port = "80";
|
||||
}
|
||||
|
||||
log_info[LogInfo::HOST] = host;
|
||||
log_info[LogInfo::URI] = uri;
|
||||
log_info[LogInfo::RESPONSE_CODE] = "500";
|
||||
log_info[LogInfo::HTTP_METHOD] = http_method;
|
||||
log_info[LogInfo::SOURCE] = source;
|
||||
log_info[LogInfo::DESTINATION_IP] = host;
|
||||
log_info[LogInfo::DESTINATION_PORT] = port;
|
||||
log_info[LogInfo::EVENT_MESSAGE] = event_message;
|
||||
|
||||
addContextFieldsToLogInfo(log_info);
|
||||
|
||||
if (!validateLog(log_info)) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER) << "Unexpected nginx log format";
|
||||
return genError("Unexpected nginx log format");
|
||||
}
|
||||
|
||||
return log_info;
|
||||
}
|
||||
|
||||
Maybe<EnumArray<LogInfo, string>>
|
||||
parseAccessLog(const string &log_line)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "Parsing log line: " << log_line;
|
||||
string formatted_log = log_line;
|
||||
EnumArray<LogInfo, string> log_info(EnumArray<LogInfo, string>::Fill(), string(""));
|
||||
vector<string> result;
|
||||
boost::erase_all(formatted_log, "\"");
|
||||
boost::erase_all(formatted_log, "\n");
|
||||
boost::split(result, formatted_log, boost::is_any_of(" "), boost::token_compress_on);
|
||||
|
||||
const int valid_log_size = 20;
|
||||
|
||||
if (result.size() < valid_log_size) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER) << "Unexpected nginx log format";
|
||||
return genError("Unexpected nginx log format");
|
||||
}
|
||||
|
||||
const int host_index = 6;
|
||||
const int host_port_index = 7;
|
||||
const int http_method_index = 13;
|
||||
const int uri_index = 14;
|
||||
const int response_cod_index = 16;
|
||||
const int source_index = 8;
|
||||
|
||||
log_info[LogInfo::HOST] = result[host_index];
|
||||
log_info[LogInfo::URI] = result[uri_index];
|
||||
log_info[LogInfo::RESPONSE_CODE] = result[response_cod_index];
|
||||
log_info[LogInfo::HTTP_METHOD] = result[http_method_index];
|
||||
log_info[LogInfo::SOURCE] = result[source_index];
|
||||
log_info[LogInfo::DESTINATION_IP] = result[host_index];
|
||||
log_info[LogInfo::DESTINATION_PORT] = result[host_port_index];
|
||||
log_info[LogInfo::EVENT_MESSAGE] = "Invalid request or incorrect reverse proxy configuration - "
|
||||
"Request dropped. Please check the reverse proxy configuration of your relevant assets";
|
||||
|
||||
addContextFieldsToLogInfo(log_info);
|
||||
|
||||
if (!validateLog(log_info)) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER) << "Unexpected nginx log format";
|
||||
return genError("Unexpected nginx log format");
|
||||
}
|
||||
return log_info;
|
||||
}
|
||||
|
||||
static bool
|
||||
validateLog(const EnumArray<LogInfo, string> &log_info)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||
|
||||
boost::smatch matcher;
|
||||
if (!NGEN::Regex::regexSearch(__FILE__, __LINE__, log_info[LogInfo::HOST], matcher, server_regex)) {
|
||||
dbgTrace(D_NGINX_MESSAGE_READER) << "Could not validate server (Host): " << log_info[LogInfo::HOST];
|
||||
return false;
|
||||
}
|
||||
if (!NGEN::Regex::regexSearch(__FILE__, __LINE__, log_info[LogInfo::URI], matcher, uri_regex)) {
|
||||
dbgTrace(D_NGINX_MESSAGE_READER) << "Could not validate Uri: " << log_info[LogInfo::URI];
|
||||
return false;
|
||||
}
|
||||
|
||||
if (
|
||||
!NGEN::Regex::regexSearch(
|
||||
__FILE__,
|
||||
__LINE__,
|
||||
log_info[LogInfo::RESPONSE_CODE],
|
||||
matcher, response_code_regex
|
||||
)
|
||||
) {
|
||||
dbgTrace(D_NGINX_MESSAGE_READER)
|
||||
<< "Could not validate response code: "
|
||||
<< log_info[LogInfo::RESPONSE_CODE];
|
||||
return false;
|
||||
}
|
||||
|
||||
if (
|
||||
!NGEN::Regex::regexSearch(__FILE__, __LINE__, log_info[LogInfo::HTTP_METHOD], matcher, http_method_regex)
|
||||
) {
|
||||
dbgTrace(D_NGINX_MESSAGE_READER) << "Could not validate HTTP method: " << log_info[LogInfo::HTTP_METHOD];
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!NGEN::Regex::regexSearch(__FILE__, __LINE__, log_info[LogInfo::DESTINATION_PORT], matcher, port_regex)) {
|
||||
dbgTrace(D_NGINX_MESSAGE_READER)
|
||||
<< "Could not validate destination port : "
|
||||
<< log_info[LogInfo::DESTINATION_PORT];
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!NGEN::Regex::regexSearch(__FILE__, __LINE__, log_info[LogInfo::SOURCE], matcher, server_regex)) {
|
||||
dbgTrace(D_NGINX_MESSAGE_READER) << "Could not validate source : " << log_info[LogInfo::SOURCE];
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
getLogsFromSocket(const I_Socket::socketFd &client_socket) const
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "Reading logs from socket. fd: " << client_socket;
|
||||
I_Socket *i_socket = Singleton::Consume<I_Socket>::by<NginxMessageReader>();
|
||||
Maybe<vector<char>> raw_log_data = i_socket->receiveData(client_socket, 0, false);
|
||||
if (!raw_log_data.ok()) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER) << "Error receiving data from socket";
|
||||
return genError("Error receiving data from socket");
|
||||
}
|
||||
|
||||
string raw_log(raw_log_data.unpack().begin(), raw_log_data.unpack().end());
|
||||
return move(raw_log);
|
||||
}
|
||||
|
||||
I_Socket::socketFd syslog_server_socket = -1;
|
||||
string rate_limit_status_code = "429";
|
||||
};
|
||||
|
||||
NginxMessageReader::NginxMessageReader() : Component("NginxMessageReader"), pimpl(make_unique<Impl>()) {}
|
||||
|
||||
NginxMessageReader::~NginxMessageReader() {}
|
||||
|
||||
void
|
||||
NginxMessageReader::init()
|
||||
{
|
||||
pimpl->init();
|
||||
}
|
||||
|
||||
void
|
||||
NginxMessageReader::preload()
|
||||
{
|
||||
pimpl->preload();
|
||||
}
|
||||
|
||||
void
|
||||
NginxMessageReader::fini()
|
||||
{
|
||||
pimpl->fini();
|
||||
}
|
||||
@@ -3,5 +3,7 @@ add_subdirectory(ips)
|
||||
add_subdirectory(layer_7_access_control)
|
||||
add_subdirectory(local_policy_mgmt_gen)
|
||||
add_subdirectory(orchestration)
|
||||
add_subdirectory(prometheus)
|
||||
add_subdirectory(rate_limit)
|
||||
add_subdirectory(waap)
|
||||
add_subdirectory(central_nginx_manager)
|
||||
|
||||
3
components/security_apps/central_nginx_manager/CMakeLists.txt
Executable file
3
components/security_apps/central_nginx_manager/CMakeLists.txt
Executable file
@@ -0,0 +1,3 @@
|
||||
include_directories(include)
|
||||
|
||||
add_library(central_nginx_manager central_nginx_manager.cc lets_encrypt_listener.cc)
|
||||
418
components/security_apps/central_nginx_manager/central_nginx_manager.cc
Executable file
418
components/security_apps/central_nginx_manager/central_nginx_manager.cc
Executable file
@@ -0,0 +1,418 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "central_nginx_manager.h"
|
||||
#include "lets_encrypt_listener.h"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <cereal/external/base64.hpp>
|
||||
|
||||
#include "debug.h"
|
||||
#include "config.h"
|
||||
#include "rest.h"
|
||||
#include "log_generator.h"
|
||||
#include "nginx_utils.h"
|
||||
#include "agent_core_utilities.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
USE_DEBUG_FLAG(D_NGINX_MANAGER);
|
||||
|
||||
class CentralNginxConfig
|
||||
{
|
||||
public:
|
||||
void load(cereal::JSONInputArchive &ar)
|
||||
{
|
||||
try {
|
||||
string nginx_conf_base64;
|
||||
ar(cereal::make_nvp("id", file_id));
|
||||
ar(cereal::make_nvp("name", file_name));
|
||||
ar(cereal::make_nvp("data", nginx_conf_base64));
|
||||
nginx_conf_content = cereal::base64::decode(nginx_conf_base64);
|
||||
central_nginx_conf_path = getCentralNginxConfPath();
|
||||
shared_config_path = getSharedConfigPath();
|
||||
if (!nginx_conf_content.empty()) configureCentralNginx();
|
||||
} catch (const cereal::Exception &e) {
|
||||
dbgDebug(D_NGINX_MANAGER) << "Could not load Central Management Config JSON. Error: " << e.what();
|
||||
ar.setNextName(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
const string & getFileId() const { return file_id; }
|
||||
const string & getFileName() const { return file_name; }
|
||||
const string & getFileContent() const { return nginx_conf_content; }
|
||||
|
||||
static string
|
||||
getCentralNginxConfPath()
|
||||
{
|
||||
string central_nginx_conf_path = getProfileAgentSettingWithDefault<string>(
|
||||
string("/tmp/central_nginx.conf"),
|
||||
"centralNginxManagement.confDownloadPath"
|
||||
);
|
||||
dbgInfo(D_NGINX_MANAGER) << "Central NGINX configuration path: " << central_nginx_conf_path;
|
||||
|
||||
return central_nginx_conf_path;
|
||||
}
|
||||
|
||||
static string
|
||||
getSharedConfigPath()
|
||||
{
|
||||
string central_shared_conf_path = getConfigurationWithDefault<string>(
|
||||
"/etc/cp/conf",
|
||||
"Config Component",
|
||||
"configuration path"
|
||||
);
|
||||
central_shared_conf_path += "/centralNginxManager/shared/central_nginx_shared.conf";
|
||||
dbgInfo(D_NGINX_MANAGER) << "Shared NGINX configuration path: " << central_shared_conf_path;
|
||||
|
||||
return central_shared_conf_path;
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
loadAttachmentModule()
|
||||
{
|
||||
string attachment_module_path = NginxUtils::getModulesPath() + "/ngx_cp_attachment_module.so";
|
||||
if (!NGEN::Filesystem::exists(attachment_module_path)) {
|
||||
dbgTrace(D_NGINX_MANAGER) << "Attachment module " << attachment_module_path << " does not exist";
|
||||
return;
|
||||
}
|
||||
|
||||
string attachment_module_conf = "load_module " + attachment_module_path + ";";
|
||||
if (nginx_conf_content.find(attachment_module_conf) != string::npos) {
|
||||
dbgTrace(D_NGINX_MANAGER) << "Attachment module " << attachment_module_path << " already loaded";
|
||||
return;
|
||||
}
|
||||
|
||||
nginx_conf_content = attachment_module_conf + "\n" + nginx_conf_content;
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
loadSharedDirective(const string &directive)
|
||||
{
|
||||
dbgFlow(D_NGINX_MANAGER) << "Loading shared directive into the servers " << directive;
|
||||
|
||||
if (!NGEN::Filesystem::copyFile(shared_config_path, shared_config_path + ".bak", true)) {
|
||||
return genError("Could not create a backup of the shared NGINX configuration file");
|
||||
}
|
||||
|
||||
ifstream shared_config(shared_config_path);
|
||||
if (!shared_config.is_open()) {
|
||||
return genError("Could not open shared NGINX configuration file");
|
||||
}
|
||||
|
||||
string shared_config_content((istreambuf_iterator<char>(shared_config)), istreambuf_iterator<char>());
|
||||
shared_config.close();
|
||||
|
||||
if (shared_config_content.find(directive) != string::npos) {
|
||||
dbgTrace(D_NGINX_MANAGER) << "Shared directive " << directive << " already loaded";
|
||||
return {};
|
||||
}
|
||||
|
||||
ofstream new_shared_config(shared_config_path, ios::app);
|
||||
if (!new_shared_config.is_open()) {
|
||||
return genError("Could not open shared NGINX configuration file");
|
||||
}
|
||||
|
||||
dbgTrace(D_NGINX_MANAGER) << "Adding shared directive " << directive;
|
||||
new_shared_config << directive << "\n";
|
||||
new_shared_config.close();
|
||||
|
||||
auto validation = NginxUtils::validateNginxConf(central_nginx_conf_path);
|
||||
if (!validation.ok()) {
|
||||
if (!NGEN::Filesystem::copyFile(shared_config_path + ".bak", shared_config_path, true)) {
|
||||
return genError("Could not restore the shared NGINX configuration file");
|
||||
}
|
||||
return genError("Could not validate shared NGINX configuration file. Error: " + validation.getErr());
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
loadSharedConfig()
|
||||
{
|
||||
dbgFlow(D_NGINX_MANAGER) << "Loading shared configuration into the servers";
|
||||
|
||||
ofstream shared_config(shared_config_path);
|
||||
if (!shared_config.is_open()) {
|
||||
return genError("Could not create shared NGINX configuration file");
|
||||
}
|
||||
shared_config.close();
|
||||
|
||||
string shared_config_directive = "include " + shared_config_path + ";\n";
|
||||
boost::regex server_regex("server\\s*\\{");
|
||||
nginx_conf_content = NGEN::Regex::regexReplace(
|
||||
__FILE__,
|
||||
__LINE__,
|
||||
nginx_conf_content,
|
||||
server_regex,
|
||||
"server {\n" + shared_config_directive
|
||||
);
|
||||
|
||||
ofstream nginx_conf_file(central_nginx_conf_path);
|
||||
if (!nginx_conf_file.is_open()) {
|
||||
return genError("Could not open a temporary central NGINX configuration file");
|
||||
}
|
||||
nginx_conf_file << nginx_conf_content;
|
||||
nginx_conf_file.close();
|
||||
|
||||
auto validation = NginxUtils::validateNginxConf(central_nginx_conf_path);
|
||||
if (!validation.ok()) {
|
||||
return genError("Could not validate central NGINX configuration file. Error: " + validation.getErr());
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
configureSyslog()
|
||||
{
|
||||
if (!getProfileAgentSettingWithDefault<bool>(false, "centralNginxManagement.syslogEnabled")) {
|
||||
dbgTrace(D_NGINX_MANAGER) << "Syslog is disabled via settings";
|
||||
return {};
|
||||
}
|
||||
|
||||
string syslog_directive = "error_log syslog:server=127.0.0.1:1514 warn;";
|
||||
auto load_shared_directive_result = loadSharedDirective(syslog_directive);
|
||||
if (!load_shared_directive_result.ok()) {
|
||||
return genError("Could not configure syslog directive, error: " + load_shared_directive_result.getErr());
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
saveBaseCentralNginxConf()
|
||||
{
|
||||
ofstream central_nginx_conf_base_file(central_nginx_conf_path + ".base");
|
||||
if (!central_nginx_conf_base_file.is_open()) {
|
||||
return genError("Could not open a temporary central NGINX configuration file");
|
||||
}
|
||||
central_nginx_conf_base_file << nginx_conf_content;
|
||||
central_nginx_conf_base_file.close();
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
void
|
||||
configureCentralNginx()
|
||||
{
|
||||
loadAttachmentModule();
|
||||
auto save_base_nginx_conf = saveBaseCentralNginxConf();
|
||||
if (!save_base_nginx_conf.ok()) {
|
||||
dbgWarning(D_NGINX_MANAGER)
|
||||
<< "Could not save base NGINX configuration. Error: "
|
||||
<< save_base_nginx_conf.getErr();
|
||||
return;
|
||||
}
|
||||
|
||||
string nginx_conf_content_backup = nginx_conf_content;
|
||||
auto shared_config_result = loadSharedConfig();
|
||||
if (!shared_config_result.ok()) {
|
||||
dbgWarning(D_NGINX_MANAGER)
|
||||
<< "Could not load shared configuration. Error: "
|
||||
<< shared_config_result.getErr();
|
||||
nginx_conf_content = nginx_conf_content_backup;
|
||||
return;
|
||||
}
|
||||
|
||||
auto syslog_result = configureSyslog();
|
||||
if (!syslog_result.ok()) {
|
||||
dbgWarning(D_NGINX_MANAGER) << "Could not configure syslog. Error: " << syslog_result.getErr();
|
||||
}
|
||||
}
|
||||
|
||||
string file_id;
|
||||
string file_name;
|
||||
string nginx_conf_content;
|
||||
string central_nginx_conf_path;
|
||||
string shared_config_path;
|
||||
};
|
||||
|
||||
class CentralNginxManager::Impl
|
||||
{
|
||||
public:
|
||||
void
|
||||
init()
|
||||
{
|
||||
dbgInfo(D_NGINX_MANAGER) << "Starting Central NGINX Manager";
|
||||
|
||||
string main_nginx_conf_path = NginxUtils::getMainNginxConfPath();
|
||||
if (
|
||||
NGEN::Filesystem::exists(main_nginx_conf_path)
|
||||
&& !NGEN::Filesystem::exists(main_nginx_conf_path + ".orig")
|
||||
) {
|
||||
dbgInfo(D_NGINX_MANAGER) << "Creating a backup of the original main NGINX configuration file";
|
||||
NGEN::Filesystem::copyFile(main_nginx_conf_path, main_nginx_conf_path + ".orig", true);
|
||||
}
|
||||
|
||||
i_mainloop = Singleton::Consume<I_MainLoop>::by<CentralNginxManager>();
|
||||
if (!lets_encrypt_listener.init()) {
|
||||
dbgWarning(D_NGINX_MANAGER) << "Could not start Lets Encrypt Listener, scheduling retry";
|
||||
i_mainloop->addOneTimeRoutine(
|
||||
I_MainLoop::RoutineType::System,
|
||||
[this] ()
|
||||
{
|
||||
while(!lets_encrypt_listener.init()) {
|
||||
dbgWarning(D_NGINX_MANAGER) << "Could not start Lets Encrypt Listener, will retry";
|
||||
i_mainloop->yield(chrono::seconds(5));
|
||||
}
|
||||
},
|
||||
"Lets Encrypt Listener initializer",
|
||||
false
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
loadPolicy()
|
||||
{
|
||||
auto central_nginx_config = getSetting<vector<CentralNginxConfig>>("centralNginxManagement");
|
||||
if (!central_nginx_config.ok() || central_nginx_config.unpack().empty()) {
|
||||
dbgWarning(D_NGINX_MANAGER)
|
||||
<< "Could not load Central NGINX Management settings. Error: "
|
||||
<< central_nginx_config.getErr();
|
||||
return;
|
||||
}
|
||||
|
||||
auto &config = central_nginx_config.unpack().front();
|
||||
if (config.getFileContent().empty()) {
|
||||
dbgWarning(D_NGINX_MANAGER) << "Empty NGINX configuration file";
|
||||
return;
|
||||
}
|
||||
|
||||
dbgTrace(D_NGINX_MANAGER)
|
||||
<< "Handling Central NGINX Management settings: "
|
||||
<< config.getFileId()
|
||||
<< ", "
|
||||
<< config.getFileName()
|
||||
<< ", "
|
||||
<< config.getFileContent();
|
||||
|
||||
string central_nginx_conf_path = config.getCentralNginxConfPath();
|
||||
ofstream central_nginx_conf_file(central_nginx_conf_path);
|
||||
if (!central_nginx_conf_file.is_open()) {
|
||||
dbgWarning(D_NGINX_MANAGER)
|
||||
<< "Could not open central NGINX configuration file: "
|
||||
<< central_nginx_conf_path;
|
||||
return;
|
||||
}
|
||||
central_nginx_conf_file << config.getFileContent();
|
||||
central_nginx_conf_file.close();
|
||||
|
||||
auto validation_result = NginxUtils::validateNginxConf(central_nginx_conf_path);
|
||||
if (!validation_result.ok()) {
|
||||
dbgWarning(D_NGINX_MANAGER)
|
||||
<< "Could not validate central NGINX configuration file. Error: "
|
||||
<< validation_result.getErr();
|
||||
logError(validation_result.getErr());
|
||||
return;
|
||||
}
|
||||
|
||||
dbgTrace(D_NGINX_MANAGER) << "Validated central NGINX configuration file";
|
||||
|
||||
auto reload_result = NginxUtils::reloadNginx(central_nginx_conf_path);
|
||||
if (!reload_result.ok()) {
|
||||
dbgWarning(D_NGINX_MANAGER)
|
||||
<< "Could not reload central NGINX configuration. Error: "
|
||||
<< reload_result.getErr();
|
||||
logError("Could not reload central NGINX configuration. Error: " + reload_result.getErr());
|
||||
return;
|
||||
}
|
||||
|
||||
logInfo("Central NGINX configuration has been successfully reloaded");
|
||||
}
|
||||
|
||||
void
|
||||
fini()
|
||||
{
|
||||
string central_nginx_base_path = CentralNginxConfig::getCentralNginxConfPath() + ".base";
|
||||
if (!NGEN::Filesystem::exists(central_nginx_base_path)) {
|
||||
dbgWarning(D_NGINX_MANAGER) << "Could not find base NGINX configuration file: " << central_nginx_base_path;
|
||||
return;
|
||||
}
|
||||
|
||||
NginxUtils::reloadNginx(central_nginx_base_path);
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
logError(const string &error)
|
||||
{
|
||||
LogGen log(
|
||||
error,
|
||||
ReportIS::Level::ACTION,
|
||||
ReportIS::Audience::SECURITY,
|
||||
ReportIS::Severity::CRITICAL,
|
||||
ReportIS::Priority::URGENT,
|
||||
ReportIS::Tags::POLICY_INSTALLATION
|
||||
);
|
||||
|
||||
log.addToOrigin(LogField("eventTopic", "Central NGINX Management"));
|
||||
log << LogField("notificationId", "4165c3b1-e9bc-44c3-888b-863e204c1bfb");
|
||||
log << LogField(
|
||||
"eventRemediation",
|
||||
"Please verify your NGINX configuration and enforce policy again. "
|
||||
"Contact Check Point support if the issue persists."
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
logInfo(const string &info)
|
||||
{
|
||||
LogGen log(
|
||||
info,
|
||||
ReportIS::Level::ACTION,
|
||||
ReportIS::Audience::SECURITY,
|
||||
ReportIS::Severity::INFO,
|
||||
ReportIS::Priority::LOW,
|
||||
ReportIS::Tags::POLICY_INSTALLATION
|
||||
);
|
||||
|
||||
log.addToOrigin(LogField("eventTopic", "Central NGINX Management"));
|
||||
log << LogField("notificationId", "4165c3b1-e9bc-44c3-888b-863e204c1bfb");
|
||||
log << LogField("eventRemediation", "No action required");
|
||||
}
|
||||
|
||||
I_MainLoop *i_mainloop = nullptr;
|
||||
LetsEncryptListener lets_encrypt_listener;
|
||||
};
|
||||
|
||||
CentralNginxManager::CentralNginxManager()
|
||||
:
|
||||
Component("Central NGINX Manager"),
|
||||
pimpl(make_unique<CentralNginxManager::Impl>()) {}
|
||||
|
||||
CentralNginxManager::~CentralNginxManager() {}
|
||||
|
||||
void
|
||||
CentralNginxManager::init()
|
||||
{
|
||||
pimpl->init();
|
||||
}
|
||||
|
||||
void
|
||||
CentralNginxManager::fini()
|
||||
{
|
||||
pimpl->fini();
|
||||
}
|
||||
|
||||
void
|
||||
CentralNginxManager::preload()
|
||||
{
|
||||
registerExpectedSetting<vector<CentralNginxConfig>>("centralNginxManagement");
|
||||
registerExpectedConfiguration<string>("Config Component", "configuration path");
|
||||
registerConfigLoadCb([this]() { pimpl->loadPolicy(); });
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef __LETS_ENCRYPT_HANDLER_H__
|
||||
#define __LETS_ENCRYPT_HANDLER_H__
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "maybe_res.h"
|
||||
|
||||
class LetsEncryptListener
|
||||
{
|
||||
public:
|
||||
bool init();
|
||||
|
||||
private:
|
||||
Maybe<std::string> getChallengeValue(const std::string &uri) const;
|
||||
};
|
||||
|
||||
#endif // __LETS_ENCRYPT_HANDLER_H__
|
||||
76
components/security_apps/central_nginx_manager/lets_encrypt_listener.cc
Executable file
76
components/security_apps/central_nginx_manager/lets_encrypt_listener.cc
Executable file
@@ -0,0 +1,76 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "lets_encrypt_listener.h"
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "central_nginx_manager.h"
|
||||
#include "debug.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
USE_DEBUG_FLAG(D_NGINX_MANAGER);
|
||||
|
||||
bool
|
||||
LetsEncryptListener::init()
|
||||
{
|
||||
dbgInfo(D_NGINX_MANAGER) << "Starting Lets Encrypt Listener";
|
||||
return Singleton::Consume<I_RestApi>::by<CentralNginxManager>()->addWildcardGetCall(
|
||||
".well-known/acme-challenge/",
|
||||
[&] (const string &uri) -> string
|
||||
{
|
||||
Maybe<string> maybe_challenge_value = getChallengeValue(uri);
|
||||
if (!maybe_challenge_value.ok()) {
|
||||
dbgWarning(D_NGINX_MANAGER)
|
||||
<< "Could not get challenge value for uri: "
|
||||
<< uri
|
||||
<< ", error: "
|
||||
<< maybe_challenge_value.getErr();
|
||||
return string{""};
|
||||
};
|
||||
|
||||
dbgTrace(D_NGINX_MANAGER) << "Got challenge value: " << maybe_challenge_value.unpack();
|
||||
return maybe_challenge_value.unpack();
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
LetsEncryptListener::getChallengeValue(const string &uri) const
|
||||
{
|
||||
string challenge_key = uri.substr(uri.find_last_of('/') + 1);
|
||||
string api_query = "/api/lets-encrypt-challenge?http_challenge_key=" + challenge_key;
|
||||
|
||||
dbgInfo(D_NGINX_MANAGER) << "Getting challenge value via: " << api_query;
|
||||
|
||||
MessageMetadata md;
|
||||
md.insertHeader("X-Tenant-Id", Singleton::Consume<I_AgentDetails>::by<CentralNginxManager>()->getTenantId());
|
||||
Maybe<HTTPResponse, HTTPResponse> maybe_http_challenge_value =
|
||||
Singleton::Consume<I_Messaging>::by<CentralNginxManager>()->sendSyncMessage(
|
||||
HTTPMethod::GET,
|
||||
api_query,
|
||||
string("{}"),
|
||||
MessageCategory::GENERIC,
|
||||
md
|
||||
);
|
||||
|
||||
if (!maybe_http_challenge_value.ok()) return genError(maybe_http_challenge_value.getErr().getBody());
|
||||
|
||||
string challenge_value = maybe_http_challenge_value.unpack().getBody();
|
||||
if (!challenge_value.empty() && challenge_value.front() == '"' && challenge_value.back() == '"') {
|
||||
challenge_value = challenge_value.substr(1, challenge_value.size() - 2);
|
||||
}
|
||||
|
||||
return challenge_value;
|
||||
}
|
||||
@@ -88,9 +88,18 @@ public:
|
||||
dbgWarning(D_GEO_FILTER) << "failed to get source ip from env";
|
||||
return EventVerdict(default_action);
|
||||
}
|
||||
|
||||
auto source_ip = convertIpAddrToString(maybe_source_ip.unpack());
|
||||
ip_set.insert(source_ip);
|
||||
|
||||
// saas profile setting
|
||||
bool ignore_source_ip =
|
||||
getProfileAgentSettingWithDefault<bool>(false, "agent.geoProtaction.ignoreSourceIP");
|
||||
if (ignore_source_ip){
|
||||
dbgDebug(D_GEO_FILTER) << "Geo protection ignoring source ip: " << source_ip;
|
||||
} else {
|
||||
dbgTrace(D_GEO_FILTER) << "Geo protection source ip: " << source_ip;
|
||||
ip_set.insert(convertIpAddrToString(maybe_source_ip.unpack()));
|
||||
}
|
||||
|
||||
|
||||
ngx_http_cp_verdict_e exception_verdict = getExceptionVerdict(ip_set);
|
||||
if (exception_verdict != ngx_http_cp_verdict_e::TRAFFIC_VERDICT_IRRELEVANT) {
|
||||
@@ -327,6 +336,14 @@ private:
|
||||
ngx_http_cp_verdict_e verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_IRRELEVANT;
|
||||
I_GeoLocation *i_geo_location = Singleton::Consume<I_GeoLocation>::by<HttpGeoFilter>();
|
||||
EnumArray<I_GeoLocation::GeoLocationField, std::string> geo_location_data;
|
||||
auto env = Singleton::Consume<I_Environment>::by<HttpGeoFilter>();
|
||||
string source_id;
|
||||
auto maybe_source_id = env->get<std::string>(HttpTransactionData::source_identifier);
|
||||
if (!maybe_source_id.ok()) {
|
||||
dbgTrace(D_GEO_FILTER) << "failed to get source identifier from env";
|
||||
} else {
|
||||
source_id = maybe_source_id.unpack();
|
||||
}
|
||||
|
||||
for (const std::string& source : sources) {
|
||||
|
||||
@@ -343,7 +360,7 @@ private:
|
||||
|
||||
auto asset_location = i_geo_location->lookupLocation(maybe_source_ip.unpack());
|
||||
if (!asset_location.ok()) {
|
||||
dbgWarning(D_GEO_FILTER) << "Lookup location failed for source: " <<
|
||||
dbgDebug(D_GEO_FILTER) << "Lookup location failed for source: " <<
|
||||
source <<
|
||||
", Error: " <<
|
||||
asset_location.getErr();
|
||||
@@ -358,11 +375,15 @@ private:
|
||||
<< country_code
|
||||
<< ", country name: "
|
||||
<< country_name
|
||||
<< ", source ip address: "
|
||||
<< source;
|
||||
<< ", ip address: "
|
||||
<< source
|
||||
<< ", source identifier: "
|
||||
<< source_id;
|
||||
|
||||
|
||||
unordered_map<string, set<string>> exception_value_country_code = {
|
||||
{"countryCode", {country_code}}
|
||||
{"countryCode", {country_code}},
|
||||
{"sourceIdentifier", {source_id}}
|
||||
};
|
||||
auto matched_behavior_maybe = getBehaviorsVerdict(exception_value_country_code, geo_location_data);
|
||||
if (matched_behavior_maybe.ok()) {
|
||||
@@ -374,7 +395,8 @@ private:
|
||||
}
|
||||
|
||||
unordered_map<string, set<string>> exception_value_country_name = {
|
||||
{"countryName", {country_name}}
|
||||
{"countryName", {country_name}},
|
||||
{"sourceIdentifier", {source_id}}
|
||||
};
|
||||
matched_behavior_maybe = getBehaviorsVerdict(exception_value_country_name, geo_location_data);
|
||||
if (matched_behavior_maybe.ok()) {
|
||||
|
||||
@@ -29,6 +29,8 @@
|
||||
#include "pm_hook.h"
|
||||
#include "i_generic_rulebase.h"
|
||||
|
||||
#define DEFAULT_IPS_YIELD_COUNT 500
|
||||
|
||||
/// \namespace IPSSignatureSubTypes
|
||||
/// \brief Namespace containing subtypes for IPS signatures.
|
||||
namespace IPSSignatureSubTypes
|
||||
@@ -336,9 +338,23 @@ public:
|
||||
return metadata.getYear();
|
||||
}
|
||||
|
||||
bool
|
||||
isOk() const
|
||||
{
|
||||
return is_loaded;
|
||||
}
|
||||
|
||||
static void
|
||||
setYieldCounter(int new_yield_cnt)
|
||||
{
|
||||
yield_on_load_cnt = new_yield_cnt;
|
||||
}
|
||||
|
||||
private:
|
||||
IPSSignatureMetaData metadata;
|
||||
std::shared_ptr<BaseSignature> rule;
|
||||
bool is_loaded;
|
||||
static int yield_on_load_cnt;
|
||||
};
|
||||
|
||||
/// \class SignatureAndAction
|
||||
|
||||
@@ -98,6 +98,7 @@ public:
|
||||
registerListener();
|
||||
table = Singleton::Consume<I_Table>::by<IPSComp>();
|
||||
env = Singleton::Consume<I_Environment>::by<IPSComp>();
|
||||
updateSigsYieldCount();
|
||||
}
|
||||
|
||||
void
|
||||
@@ -307,6 +308,20 @@ public:
|
||||
|
||||
EventVerdict respond (const EndTransactionEvent &) override { return ACCEPT; }
|
||||
|
||||
void
|
||||
updateSigsYieldCount()
|
||||
{
|
||||
const char *ips_yield_env_str = getenv("CPNANO_IPS_LOAD_YIELD_CNT");
|
||||
int ips_yield_default = DEFAULT_IPS_YIELD_COUNT;
|
||||
if (ips_yield_env_str != nullptr) {
|
||||
dbgDebug(D_IPS) << "CPNANO_IPS_LOAD_YIELD_CNT env variable is set to " << ips_yield_env_str;
|
||||
ips_yield_default = atoi(ips_yield_env_str);
|
||||
}
|
||||
int yield_limit = getProfileAgentSettingWithDefault<int>(ips_yield_default, "ips.sigsYieldCnt");
|
||||
dbgDebug(D_IPS) << "Setting IPS yield count to " << yield_limit;
|
||||
IPSSignatureSubTypes::CompleteSignature::setYieldCounter(yield_limit);
|
||||
}
|
||||
|
||||
private:
|
||||
static void setDrop(IPSEntry &state) { state.setDrop(); }
|
||||
static bool isDrop(const IPSEntry &state) { return state.isDrop(); }
|
||||
@@ -373,6 +388,7 @@ IPSComp::preload()
|
||||
registerExpectedConfigFile("ips", Config::ConfigFileType::Policy);
|
||||
registerExpectedConfigFile("ips", Config::ConfigFileType::Data);
|
||||
registerExpectedConfigFile("snort", Config::ConfigFileType::Policy);
|
||||
registerConfigLoadCb([this]() { pimpl->updateSigsYieldCount(); });
|
||||
|
||||
ParameterException::preload();
|
||||
|
||||
|
||||
@@ -45,6 +45,8 @@ static const map<string, IPSLevel> levels = {
|
||||
{ "Very Low", IPSLevel::VERY_LOW }
|
||||
};
|
||||
|
||||
int CompleteSignature::yield_on_load_cnt = DEFAULT_IPS_YIELD_COUNT;
|
||||
|
||||
static IPSLevel
|
||||
getLevel(const string &level_string, const string &attr_name)
|
||||
{
|
||||
@@ -219,10 +221,37 @@ IPSSignatureMetaData::getYear() const
|
||||
void
|
||||
CompleteSignature::load(cereal::JSONInputArchive &ar)
|
||||
{
|
||||
ar(cereal::make_nvp("protectionMetadata", metadata));
|
||||
RuleDetection rule_detection(metadata.getName());
|
||||
ar(cereal::make_nvp("detectionRules", rule_detection));
|
||||
rule = rule_detection.getRule();
|
||||
static int sigs_load_counter = 0;
|
||||
static I_Environment *env = Singleton::Consume<I_Environment>::by<IPSComp>();
|
||||
static bool post_init = false;
|
||||
|
||||
if (!post_init) {
|
||||
auto routine_id = Singleton::Consume<I_MainLoop>::by<IPSComp>()->getCurrentRoutineId();
|
||||
if (routine_id.ok()) {
|
||||
post_init = true;
|
||||
dbgInfo(D_IPS) << "Loading signatures post init, enabling yield with limit " << yield_on_load_cnt;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
ar(cereal::make_nvp("protectionMetadata", metadata));
|
||||
RuleDetection rule_detection(metadata.getName());
|
||||
ar(cereal::make_nvp("detectionRules", rule_detection));
|
||||
rule = rule_detection.getRule();
|
||||
is_loaded = true;
|
||||
} catch (cereal::Exception &e) {
|
||||
is_loaded = false;
|
||||
dbgWarning(D_IPS) << "Failed to load signature: " << e.what();
|
||||
}
|
||||
|
||||
if (post_init && (yield_on_load_cnt > 0) && (++sigs_load_counter == yield_on_load_cnt)) {
|
||||
sigs_load_counter = 0;
|
||||
auto maybe_is_async = env->get<bool>("Is Async Config Load");
|
||||
if (maybe_is_async.ok() && *maybe_is_async == true) {
|
||||
dbgTrace(D_IPS) << "Yielding after " << yield_on_load_cnt << " signatures";
|
||||
Singleton::Consume<I_MainLoop>::by<IPSComp>()->yield(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MatchType
|
||||
@@ -367,7 +396,16 @@ SignatureAndAction::matchSilent(const Buffer &sample) const
|
||||
if (method.ok()) log << LogField("httpMethod", method.unpack());
|
||||
|
||||
auto path = env->get<Buffer>("HTTP_PATH_DECODED");
|
||||
if (path.ok()) log << LogField("httpUriPath", getSubString(path, 1536), LogFieldOption::XORANDB64);
|
||||
if (path.ok()) {
|
||||
log << LogField("httpUriPath", getSubString(path, 1536), LogFieldOption::XORANDB64);
|
||||
} else {
|
||||
auto transaction_path = env->get<string>(HttpTransactionData::uri_path_decoded);
|
||||
if (transaction_path.ok()) {
|
||||
auto uri_path = transaction_path.unpack();
|
||||
auto question_mark = uri_path.find('?');
|
||||
log << LogField("httpUriPath", uri_path.substr(0, question_mark), LogFieldOption::XORANDB64);
|
||||
}
|
||||
}
|
||||
|
||||
auto req_header = ips_state.getTransactionData(IPSCommonTypes::requests_header_for_log);
|
||||
if (req_header.ok()) log << LogField("httpRequestHeaders", getSubString(req_header), LogFieldOption::XORANDB64);
|
||||
@@ -485,13 +523,30 @@ SignatureAndAction::isMatchedPrevent(const Buffer &context_buffer, const set<PMP
|
||||
auto method = env->get<string>(HttpTransactionData::method_ctx);
|
||||
if (method.ok()) log << LogField("httpMethod", method.unpack());
|
||||
uint max_size = getConfigurationWithDefault<uint>(1536, "IPS", "Max Field Size");
|
||||
auto path = env->get<Buffer>("HTTP_PATH_DECODED");
|
||||
if (path.ok() && trigger.isWebLogFieldActive(url_path)) {
|
||||
log << LogField("httpUriPath", getSubString(path, max_size), LogFieldOption::XORANDB64);
|
||||
|
||||
if (trigger.isWebLogFieldActive(url_path)) {
|
||||
auto path = env->get<Buffer>("HTTP_PATH_DECODED");
|
||||
if (path.ok()) {
|
||||
log << LogField("httpUriPath", getSubString(path, max_size), LogFieldOption::XORANDB64);
|
||||
} else {
|
||||
auto transaction_path = env->get<string>(HttpTransactionData::uri_path_decoded);
|
||||
if (transaction_path.ok()) {
|
||||
auto uri_path = transaction_path.unpack();
|
||||
auto question_mark = uri_path.find('?');
|
||||
log << LogField("httpUriPath", uri_path.substr(0, question_mark), LogFieldOption::XORANDB64);
|
||||
}
|
||||
}
|
||||
}
|
||||
auto query = env->get<Buffer>("HTTP_QUERY_DECODED");
|
||||
if (query.ok() && trigger.isWebLogFieldActive(url_query)) {
|
||||
log << LogField("httpUriQuery", getSubString(query, max_size), LogFieldOption::XORANDB64);
|
||||
if (trigger.isWebLogFieldActive(url_query)) {
|
||||
auto query = env->get<Buffer>("HTTP_QUERY_DECODED");
|
||||
if (query.ok()) {
|
||||
log << LogField("httpUriQuery", getSubString(query, max_size), LogFieldOption::XORANDB64);
|
||||
} else {
|
||||
auto transaction_query = env->get<string>(HttpTransactionData::uri_query_decoded);
|
||||
if (transaction_query.ok()) {
|
||||
log << LogField("httpUriQuery", transaction_query.unpack());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto res_code = env->get<Buffer>("HTTP_RESPONSE_CODE");
|
||||
@@ -533,7 +588,9 @@ IPSSignaturesResource::load(cereal::JSONInputArchive &ar)
|
||||
|
||||
all_signatures.reserve(sigs.size());
|
||||
for (auto &sig : sigs) {
|
||||
all_signatures.emplace_back(make_shared<CompleteSignature>(move(sig)));
|
||||
if (sig.isOk()) {
|
||||
all_signatures.emplace_back(make_shared<CompleteSignature>(move(sig)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -29,6 +29,8 @@ public:
|
||||
{
|
||||
comp.preload();
|
||||
comp.init();
|
||||
auto err = genError("not coroutine");
|
||||
EXPECT_CALL(mainloop, getCurrentRoutineId()).WillRepeatedly(Return(Maybe<I_MainLoop::RoutineID>(err)));
|
||||
}
|
||||
|
||||
~ComponentTest()
|
||||
|
||||
@@ -41,6 +41,8 @@ public:
|
||||
EntryTest()
|
||||
{
|
||||
ON_CALL(table, getState(_)).WillByDefault(Return(ptr));
|
||||
auto err = genError("not coroutine");
|
||||
EXPECT_CALL(mock_mainloop, getCurrentRoutineId()).WillRepeatedly(Return(Maybe<I_MainLoop::RoutineID>(err)));
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#include "cptest.h"
|
||||
#include "environment.h"
|
||||
#include "config_component.h"
|
||||
#include "mock/mock_mainloop.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace testing;
|
||||
@@ -61,6 +62,9 @@ TEST(resources, basic_resource)
|
||||
{
|
||||
ConfigComponent conf;
|
||||
::Environment env;
|
||||
NiceMock<MockMainLoop> mock_mainloop;
|
||||
auto err = genError("not coroutine");
|
||||
EXPECT_CALL(mock_mainloop, getCurrentRoutineId()).WillRepeatedly(Return(Maybe<I_MainLoop::RoutineID>(err)));
|
||||
|
||||
conf.preload();
|
||||
|
||||
|
||||
@@ -60,7 +60,12 @@ public:
|
||||
{
|
||||
IPSHelper::has_deobfuscation = true;
|
||||
generic_rulebase.preload();
|
||||
env.preload();
|
||||
env.init();
|
||||
|
||||
EXPECT_CALL(logs, getCurrentLogId()).Times(AnyNumber());
|
||||
auto err = genError("not coroutine");
|
||||
EXPECT_CALL(mock_mainloop, getCurrentRoutineId()).WillRepeatedly(Return(Maybe<I_MainLoop::RoutineID>(err)));
|
||||
ON_CALL(table, getState(_)).WillByDefault(Return(&ips_state));
|
||||
{
|
||||
stringstream ss;
|
||||
@@ -104,6 +109,12 @@ public:
|
||||
cereal::JSONInputArchive ar(ss);
|
||||
high_medium_confidance_signatures.load(ar);
|
||||
}
|
||||
{
|
||||
stringstream ss;
|
||||
ss << "[" << signature_performance_high << ", " << signature_broken << "]";
|
||||
cereal::JSONInputArchive ar(ss);
|
||||
single_broken_signature.load(ar);
|
||||
}
|
||||
}
|
||||
|
||||
~SignatureTest()
|
||||
@@ -117,9 +128,6 @@ public:
|
||||
void
|
||||
loadExceptions()
|
||||
{
|
||||
env.preload();
|
||||
env.init();
|
||||
|
||||
BasicRuleConfig::preload();
|
||||
registerExpectedConfiguration<ParameterException>("rulebase", "exception");
|
||||
|
||||
@@ -189,6 +197,7 @@ public:
|
||||
void
|
||||
load(const IPSSignaturesResource &policy, const string &severity, const string &confidence)
|
||||
{
|
||||
Singleton::Consume<I_Environment>::from(env)->registerValue<bool>("Is Async Config Load", false);
|
||||
setResource(policy, "IPS", "protections");
|
||||
stringstream ss;
|
||||
ss << "{";
|
||||
@@ -250,6 +259,7 @@ public:
|
||||
IPSSignaturesResource performance_signatures1;
|
||||
IPSSignaturesResource performance_signatures2;
|
||||
IPSSignaturesResource performance_signatures3;
|
||||
IPSSignaturesResource single_broken_signature;
|
||||
NiceMock<MockTable> table;
|
||||
MockAgg mock_agg;
|
||||
|
||||
@@ -483,6 +493,26 @@ private:
|
||||
"\"context\": [\"HTTP_REQUEST_BODY\", \"HTTP_RESPONSE_BODY\"]"
|
||||
"}"
|
||||
"}";
|
||||
|
||||
string signature_broken =
|
||||
"{"
|
||||
"\"protectionMetadata\": {"
|
||||
"\"protectionName\": \"BrokenTest\","
|
||||
"\"maintrainId\": \"101\","
|
||||
"\"severity\": \"Medium High\","
|
||||
"\"confidenceLevel\": \"Low\","
|
||||
"\"performanceImpact\": \"High\","
|
||||
"\"lastUpdate\": \"20210420\","
|
||||
"\"tags\": [],"
|
||||
"\"cveList\": []"
|
||||
"},"
|
||||
"\"detectionRules\": {"
|
||||
"\"type\": \"simple\","
|
||||
"\"SSM\": \"\","
|
||||
"\"keywosrds\": \"data: \\\"www\\\";\","
|
||||
"\"context\": [\"HTTP_REQUEST_BODY\", \"HTTP_RESPONSE_BODY\"]"
|
||||
"}"
|
||||
"}";
|
||||
};
|
||||
|
||||
TEST_F(SignatureTest, basic_load_of_signatures)
|
||||
@@ -665,3 +695,14 @@ TEST_F(SignatureTest, high_confidance_signatures_matching)
|
||||
expectLog("\"protectionId\": \"Test4\"", "\"matchedSignatureConfidence\": \"Medium\"");
|
||||
EXPECT_FALSE(checkData("mmm"));
|
||||
}
|
||||
|
||||
TEST_F(SignatureTest, broken_signature)
|
||||
{
|
||||
load(single_broken_signature, "Low or above", "Low");
|
||||
EXPECT_FALSE(checkData("ggg"));
|
||||
|
||||
expectLog("\"matchedSignaturePerformance\": \"High\"");
|
||||
EXPECT_TRUE(checkData("fff"));
|
||||
|
||||
EXPECT_FALSE(checkData("www"));
|
||||
}
|
||||
|
||||
@@ -131,8 +131,12 @@ public:
|
||||
EventVerdict
|
||||
respond(const WaitTransactionEvent &) override
|
||||
{
|
||||
dbgFlow(D_L7_ACCESS_CONTROL) << "Handling wait verdict";
|
||||
if (!isAppEnabled()) {
|
||||
dbgTrace(D_L7_ACCESS_CONTROL) << "Returning Accept verdict as the Layer-7 Access Control app is disabled";
|
||||
return ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT;
|
||||
}
|
||||
|
||||
dbgTrace(D_L7_ACCESS_CONTROL) << "Handling wait verdict";
|
||||
return handleEvent();
|
||||
}
|
||||
|
||||
|
||||
@@ -22,4 +22,5 @@ add_library(local_policy_mgmt_gen
|
||||
access_control_practice.cc
|
||||
configmaps.cc
|
||||
reverse_proxy_section.cc
|
||||
policy_activation_data.cc
|
||||
)
|
||||
|
||||
@@ -497,7 +497,8 @@ WebAppSection::WebAppSection(
|
||||
const AppsecPracticeAntiBotSection &_anti_bots,
|
||||
const LogTriggerSection &parsed_log_trigger,
|
||||
const AppSecTrustedSources &parsed_trusted_sources,
|
||||
const NewAppSecWebAttackProtections &protections)
|
||||
const NewAppSecWebAttackProtections &protections,
|
||||
const vector<InnerException> &exceptions)
|
||||
:
|
||||
application_urls(_application_urls),
|
||||
asset_id(_asset_id),
|
||||
@@ -541,6 +542,10 @@ WebAppSection::WebAppSection(
|
||||
overrides.push_back(AppSecOverride(source_ident));
|
||||
}
|
||||
|
||||
for (const auto &exception : exceptions) {
|
||||
overrides.push_back(AppSecOverride(exception));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// LCOV_EXCL_STOP
|
||||
|
||||
@@ -298,7 +298,8 @@ public:
|
||||
const AppsecPracticeAntiBotSection &_anti_bots,
|
||||
const LogTriggerSection &parsed_log_trigger,
|
||||
const AppSecTrustedSources &parsed_trusted_sources,
|
||||
const NewAppSecWebAttackProtections &protections);
|
||||
const NewAppSecWebAttackProtections &protections,
|
||||
const std::vector<InnerException> &exceptions);
|
||||
|
||||
void save(cereal::JSONOutputArchive &out_ar) const;
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ public:
|
||||
void init();
|
||||
|
||||
std::tuple<std::map<std::string, AppsecLinuxPolicy>, std::map<std::string, V1beta2AppsecLinuxPolicy>>
|
||||
createAppsecPoliciesFromIngresses();
|
||||
createAppsecPolicies();
|
||||
void getClusterId() const;
|
||||
|
||||
private:
|
||||
@@ -101,12 +101,18 @@ private:
|
||||
) const;
|
||||
|
||||
template<class T, class K>
|
||||
void createPolicy(
|
||||
void createPolicyFromIngress(
|
||||
T &appsec_policy,
|
||||
std::map<std::string, T> &policies,
|
||||
std::map<AnnotationKeys, std::string> &annotations_values,
|
||||
const SingleIngressData &item) const;
|
||||
|
||||
template<class T, class K>
|
||||
void createPolicyFromActivation(
|
||||
T &appsec_policy,
|
||||
std::map<std::string, T> &policies,
|
||||
const EnabledPolicy &policy) const;
|
||||
|
||||
std::tuple<Maybe<AppsecLinuxPolicy>, Maybe<V1beta2AppsecLinuxPolicy>> createAppsecPolicyK8s(
|
||||
const std::string &policy_name,
|
||||
const std::string &ingress_mode
|
||||
|
||||
@@ -170,6 +170,7 @@ public:
|
||||
ss.str(modified_json);
|
||||
try {
|
||||
cereal::JSONInputArchive in_ar(ss);
|
||||
in_ar(cereal::make_nvp("apiVersion", api_version));
|
||||
in_ar(cereal::make_nvp("spec", spec));
|
||||
in_ar(cereal::make_nvp("metadata", meta_data));
|
||||
} catch (cereal::Exception &e) {
|
||||
@@ -191,11 +192,18 @@ public:
|
||||
return meta_data;
|
||||
}
|
||||
|
||||
const std::string &
|
||||
getApiVersion() const
|
||||
{
|
||||
return api_version;
|
||||
}
|
||||
|
||||
const T & getSpec() const { return spec; }
|
||||
|
||||
private:
|
||||
T spec;
|
||||
AppsecSpecParserMetaData meta_data;
|
||||
std::string api_version;
|
||||
};
|
||||
|
||||
#endif // __LOCAL_POLICY_COMMON_H__
|
||||
|
||||
@@ -0,0 +1,89 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef __POLICY_ACTIVATION_DATA_H__
|
||||
#define __POLICY_ACTIVATION_DATA_H__
|
||||
|
||||
#include <vector>
|
||||
#include <map>
|
||||
|
||||
#include "config.h"
|
||||
#include "debug.h"
|
||||
#include "rest.h"
|
||||
#include "cereal/archives/json.hpp"
|
||||
#include <cereal/types/map.hpp>
|
||||
#include "customized_cereal_map.h"
|
||||
|
||||
#include "local_policy_common.h"
|
||||
|
||||
class PolicyActivationMetadata
|
||||
{
|
||||
public:
|
||||
void load(cereal::JSONInputArchive &archive_in);
|
||||
|
||||
private:
|
||||
std::string name;
|
||||
};
|
||||
|
||||
class EnabledPolicy
|
||||
{
|
||||
public:
|
||||
void load(cereal::JSONInputArchive &archive_in);
|
||||
|
||||
const std::string & getName() const;
|
||||
const std::vector<std::string> & getHosts() const;
|
||||
|
||||
private:
|
||||
std::string name;
|
||||
std::vector<std::string> hosts;
|
||||
};
|
||||
|
||||
class PolicyActivationSpec
|
||||
{
|
||||
public:
|
||||
void load(cereal::JSONInputArchive &archive_in);
|
||||
|
||||
const std::vector<EnabledPolicy> & getPolicies() const;
|
||||
|
||||
private:
|
||||
std::string appsec_class_name;
|
||||
std::vector<EnabledPolicy> policies;
|
||||
};
|
||||
|
||||
class SinglePolicyActivationData
|
||||
{
|
||||
public:
|
||||
void load(cereal::JSONInputArchive &archive_in);
|
||||
|
||||
const PolicyActivationSpec & getSpec() const;
|
||||
|
||||
private:
|
||||
std::string api_version;
|
||||
std::string kind;
|
||||
PolicyActivationMetadata metadata;
|
||||
PolicyActivationSpec spec;
|
||||
};
|
||||
|
||||
class PolicyActivationData : public ClientRest
|
||||
{
|
||||
public:
|
||||
bool loadJson(const std::string &json);
|
||||
|
||||
const std::vector<SinglePolicyActivationData> & getItems() const;
|
||||
|
||||
private:
|
||||
std::string api_version;
|
||||
std::vector<SinglePolicyActivationData> items;
|
||||
};
|
||||
|
||||
#endif // __POLICY_ACTIVATION_DATA_H__
|
||||
@@ -32,6 +32,7 @@
|
||||
#include "i_messaging.h"
|
||||
#include "appsec_practice_section.h"
|
||||
#include "ingress_data.h"
|
||||
#include "policy_activation_data.h"
|
||||
#include "settings_section.h"
|
||||
#include "triggers_section.h"
|
||||
#include "local_policy_common.h"
|
||||
@@ -205,7 +206,8 @@ private:
|
||||
const RulesConfigRulebase& rule_config,
|
||||
const std::string &practice_id, const std::string &full_url,
|
||||
const std::string &default_mode,
|
||||
std::map<AnnotationTypes, std::string> &rule_annotations
|
||||
std::map<AnnotationTypes, std::string> &rule_annotations,
|
||||
std::vector<InnerException>
|
||||
);
|
||||
|
||||
void
|
||||
|
||||
@@ -515,17 +515,6 @@ K8sPolicyUtils::createAppsecPolicyK8sFromV1beta2Crds(
|
||||
}
|
||||
// LCOV_EXCL_STOP
|
||||
|
||||
bool
|
||||
doesVersionExist(const map<string, string> &annotations, const string &version)
|
||||
{
|
||||
for (auto annotation : annotations) {
|
||||
if(annotation.second.find(version) != std::string::npos) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
std::tuple<Maybe<AppsecLinuxPolicy>, Maybe<V1beta2AppsecLinuxPolicy>>
|
||||
K8sPolicyUtils::createAppsecPolicyK8s(const string &policy_name, const string &ingress_mode) const
|
||||
{
|
||||
@@ -534,7 +523,7 @@ K8sPolicyUtils::createAppsecPolicyK8s(const string &policy_name, const string &i
|
||||
);
|
||||
|
||||
if (!maybe_appsec_policy_spec.ok() ||
|
||||
!doesVersionExist(maybe_appsec_policy_spec.unpack().getMetaData().getAnnotations(), "v1beta1")
|
||||
maybe_appsec_policy_spec.unpack().getApiVersion().find("v1beta1") == std::string::npos
|
||||
) {
|
||||
try {
|
||||
std::string v1beta1_error =
|
||||
@@ -577,7 +566,7 @@ K8sPolicyUtils::createAppsecPolicyK8s(const string &policy_name, const string &i
|
||||
|
||||
template<class T, class K>
|
||||
void
|
||||
K8sPolicyUtils::createPolicy(
|
||||
K8sPolicyUtils::createPolicyFromIngress(
|
||||
T &appsec_policy,
|
||||
map<std::string, T> &policies,
|
||||
map<AnnotationKeys, string> &annotations_values,
|
||||
@@ -615,10 +604,35 @@ K8sPolicyUtils::createPolicy(
|
||||
}
|
||||
}
|
||||
|
||||
std::tuple<map<string, AppsecLinuxPolicy>, map<string, V1beta2AppsecLinuxPolicy>>
|
||||
K8sPolicyUtils::createAppsecPoliciesFromIngresses()
|
||||
template<class T, class K>
|
||||
void
|
||||
K8sPolicyUtils::createPolicyFromActivation(
|
||||
T &appsec_policy,
|
||||
map<std::string, T> &policies,
|
||||
const EnabledPolicy &policy) const
|
||||
{
|
||||
dbgFlow(D_LOCAL_POLICY) << "Getting all policy object from Ingresses";
|
||||
if (policies.find(policy.getName()) == policies.end()) {
|
||||
policies[policy.getName()] = appsec_policy;
|
||||
}
|
||||
auto default_mode = appsec_policy.getAppsecPolicySpec().getDefaultRule().getMode();
|
||||
|
||||
for (const string &host : policy.getHosts()) {
|
||||
if (!appsec_policy.getAppsecPolicySpec().isAssetHostExist(host)) {
|
||||
dbgTrace(D_LOCAL_POLICY)
|
||||
<< "Inserting Host data to the specific asset set:"
|
||||
<< "URL: '"
|
||||
<< host
|
||||
<< "'";
|
||||
K ingress_rule = K(host, default_mode);
|
||||
policies[policy.getName()].addSpecificRule(ingress_rule);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::tuple<map<string, AppsecLinuxPolicy>, map<string, V1beta2AppsecLinuxPolicy>>
|
||||
K8sPolicyUtils::createAppsecPolicies()
|
||||
{
|
||||
dbgFlow(D_LOCAL_POLICY) << "Getting all policy object from Ingresses and PolicyActivation";
|
||||
map<string, AppsecLinuxPolicy> v1bet1_policies;
|
||||
map<string, V1beta2AppsecLinuxPolicy> v1bet2_policies;
|
||||
auto maybe_ingress = getObjectFromCluster<IngressData>("/apis/networking.k8s.io/v1/ingresses");
|
||||
@@ -628,7 +642,7 @@ K8sPolicyUtils::createAppsecPoliciesFromIngresses()
|
||||
dbgWarning(D_LOCAL_POLICY)
|
||||
<< "Failed to retrieve K8S Ingress configurations. Error: "
|
||||
<< maybe_ingress.getErr();
|
||||
return make_tuple(v1bet1_policies, v1bet2_policies);
|
||||
maybe_ingress = IngressData{};
|
||||
}
|
||||
|
||||
|
||||
@@ -658,19 +672,54 @@ K8sPolicyUtils::createAppsecPoliciesFromIngresses()
|
||||
|
||||
if (!std::get<0>(maybe_appsec_policy).ok()) {
|
||||
auto appsec_policy=std::get<1>(maybe_appsec_policy).unpack();
|
||||
createPolicy<V1beta2AppsecLinuxPolicy, NewParsedRule>(
|
||||
createPolicyFromIngress<V1beta2AppsecLinuxPolicy, NewParsedRule>(
|
||||
appsec_policy,
|
||||
v1bet2_policies,
|
||||
annotations_values,
|
||||
item);
|
||||
} else {
|
||||
auto appsec_policy=std::get<0>(maybe_appsec_policy).unpack();
|
||||
createPolicy<AppsecLinuxPolicy, ParsedRule>(
|
||||
createPolicyFromIngress<AppsecLinuxPolicy, ParsedRule>(
|
||||
appsec_policy,
|
||||
v1bet1_policies,
|
||||
annotations_values,
|
||||
item);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
string ns_suffix = getAppSecScopeType() == "namespaced" ? "ns" : "";
|
||||
string ns = getAppSecScopeType() == "namespaced" ? "namespaces/" : "";
|
||||
auto maybe_policy_activation = getObjectFromCluster<PolicyActivationData>(
|
||||
"/apis/openappsec.io/v1beta2/" + ns + agent_ns + "policyactivations" + ns_suffix
|
||||
);
|
||||
|
||||
if (!maybe_policy_activation.ok()) {
|
||||
dbgWarning(D_LOCAL_POLICY)
|
||||
<< "Failed to retrieve K8S PolicyActivation configurations. Error: "
|
||||
<< maybe_policy_activation.getErr();
|
||||
return make_tuple(v1bet1_policies, v1bet2_policies);
|
||||
}
|
||||
|
||||
PolicyActivationData policy_activation = maybe_policy_activation.unpack();
|
||||
for (const SinglePolicyActivationData &item : policy_activation.getItems()) {
|
||||
for (const auto &policy : item.getSpec().getPolicies()) {
|
||||
auto maybe_appsec_policy = createAppsecPolicyK8s(policy.getName(), "");
|
||||
|
||||
if (!std::get<1>(maybe_appsec_policy).ok()) {
|
||||
dbgWarning(D_LOCAL_POLICY)
|
||||
<< "Failed to create appsec policy. v1beta2 Error: "
|
||||
<< std::get<1>(maybe_appsec_policy).getErr();
|
||||
continue;
|
||||
} else {
|
||||
auto appsec_policy=std::get<1>(maybe_appsec_policy).unpack();
|
||||
createPolicyFromActivation<V1beta2AppsecLinuxPolicy, NewParsedRule>(
|
||||
appsec_policy,
|
||||
v1bet2_policies,
|
||||
policy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return make_tuple(v1bet1_policies, v1bet2_policies);
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@
|
||||
#include "customized_cereal_map.h"
|
||||
#include "include/appsec_practice_section.h"
|
||||
#include "include/ingress_data.h"
|
||||
#include "include/policy_activation_data.h"
|
||||
#include "include/settings_section.h"
|
||||
#include "include/triggers_section.h"
|
||||
#include "include/local_policy_common.h"
|
||||
@@ -85,7 +86,7 @@ public:
|
||||
K8sPolicyUtils k8s_policy_utils;
|
||||
k8s_policy_utils.init();
|
||||
|
||||
auto appsec_policies = k8s_policy_utils.createAppsecPoliciesFromIngresses();
|
||||
auto appsec_policies = k8s_policy_utils.createAppsecPolicies();
|
||||
if (!std::get<0>(appsec_policies).empty()) {
|
||||
return policy_maker_utils.proccesMultipleAppsecPolicies<AppsecLinuxPolicy, ParsedRule>(
|
||||
std::get<0>(appsec_policies),
|
||||
|
||||
@@ -69,7 +69,7 @@ Identifier::load(cereal::JSONInputArchive &archive_in)
|
||||
dbgWarning(D_LOCAL_POLICY) << "AppSec identifier invalid: " << identifier;
|
||||
identifier = "sourceip";
|
||||
}
|
||||
parseMandatoryAppsecJSONKey<vector<string>>("value", value, archive_in);
|
||||
parseAppsecJSONKey<vector<string>>("value", value, archive_in);
|
||||
}
|
||||
|
||||
const string &
|
||||
|
||||
103
components/security_apps/local_policy_mgmt_gen/policy_activation_data.cc
Executable file
103
components/security_apps/local_policy_mgmt_gen/policy_activation_data.cc
Executable file
@@ -0,0 +1,103 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "policy_activation_data.h"
|
||||
#include "customized_cereal_map.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
USE_DEBUG_FLAG(D_LOCAL_POLICY);
|
||||
|
||||
void
|
||||
PolicyActivationMetadata::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
dbgTrace(D_LOCAL_POLICY) << "PolicyActivationMetadata load";
|
||||
parseAppsecJSONKey<string>("name", name, archive_in);
|
||||
}
|
||||
|
||||
void
|
||||
EnabledPolicy::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
dbgTrace(D_LOCAL_POLICY) << "Loading policyActivation enabled policy";
|
||||
parseMandatoryAppsecJSONKey<vector<string>>("hosts", hosts, archive_in);
|
||||
parseAppsecJSONKey<string>("name", name, archive_in);
|
||||
}
|
||||
|
||||
const string &
|
||||
EnabledPolicy::getName() const
|
||||
{
|
||||
return name;
|
||||
}
|
||||
|
||||
const vector<string> &
|
||||
EnabledPolicy::getHosts() const
|
||||
{
|
||||
return hosts;
|
||||
}
|
||||
|
||||
void
|
||||
PolicyActivationSpec::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
dbgTrace(D_LOCAL_POLICY) << "PolicyActivationSpec load";
|
||||
parseAppsecJSONKey<string>("appsecClassName", appsec_class_name, archive_in);
|
||||
parseMandatoryAppsecJSONKey<vector<EnabledPolicy>>("enabledPolicies", policies, archive_in);
|
||||
}
|
||||
|
||||
const vector<EnabledPolicy> &
|
||||
PolicyActivationSpec::getPolicies() const
|
||||
{
|
||||
return policies;
|
||||
}
|
||||
|
||||
void
|
||||
SinglePolicyActivationData::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
dbgTrace(D_LOCAL_POLICY) << "Loading single policy activation data";
|
||||
parseAppsecJSONKey<string>("apiVersion", api_version, archive_in);
|
||||
parseAppsecJSONKey<string>("kind", kind, archive_in);
|
||||
parseAppsecJSONKey<PolicyActivationMetadata>("metadata", metadata, archive_in);
|
||||
parseAppsecJSONKey<PolicyActivationSpec>("spec", spec, archive_in);
|
||||
}
|
||||
|
||||
const PolicyActivationSpec &
|
||||
SinglePolicyActivationData::getSpec() const
|
||||
{
|
||||
return spec;
|
||||
}
|
||||
|
||||
bool
|
||||
PolicyActivationData::loadJson(const string &json)
|
||||
{
|
||||
string modified_json = json;
|
||||
modified_json.pop_back();
|
||||
stringstream in;
|
||||
in.str(modified_json);
|
||||
dbgTrace(D_LOCAL_POLICY) << "Loading policy activations data";
|
||||
try {
|
||||
cereal::JSONInputArchive in_ar(in);
|
||||
in_ar(
|
||||
cereal::make_nvp("apiVersion", api_version),
|
||||
cereal::make_nvp("items", items)
|
||||
);
|
||||
} catch (cereal::Exception &e) {
|
||||
dbgError(D_LOCAL_POLICY) << "Failed to load policy activations data JSON. Error: " << e.what();
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
const vector<SinglePolicyActivationData> &
|
||||
PolicyActivationData::getItems() const
|
||||
{
|
||||
return items;
|
||||
}
|
||||
@@ -928,7 +928,6 @@ createMultiRulesSections(
|
||||
PracticeSection practice = PracticeSection(practice_id, practice_type, practice_name);
|
||||
vector<ParametersSection> exceptions_result;
|
||||
for (auto exception : exceptions) {
|
||||
|
||||
const auto &exception_name = exception.first;
|
||||
for (const auto &inner_exception : exception.second) {
|
||||
exceptions_result.push_back(ParametersSection(inner_exception.getBehaviorId(), exception_name));
|
||||
@@ -1220,7 +1219,8 @@ PolicyMakerUtils::createWebAppSection(
|
||||
const string &practice_id,
|
||||
const string &full_url,
|
||||
const string &default_mode,
|
||||
map<AnnotationTypes, string> &rule_annotations)
|
||||
map<AnnotationTypes, string> &rule_annotations,
|
||||
vector<InnerException> rule_inner_exceptions)
|
||||
{
|
||||
auto apssec_practice =
|
||||
getAppsecPracticeSpec<V1beta2AppsecLinuxPolicy, NewAppSecPracticeSpec>(
|
||||
@@ -1255,7 +1255,8 @@ PolicyMakerUtils::createWebAppSection(
|
||||
apssec_practice.getAntiBot(),
|
||||
log_triggers[rule_annotations[AnnotationTypes::TRIGGER]],
|
||||
trusted_sources[rule_annotations[AnnotationTypes::TRUSTED_SOURCES]],
|
||||
apssec_practice.getWebAttacks().getProtections()
|
||||
apssec_practice.getWebAttacks().getProtections(),
|
||||
rule_inner_exceptions
|
||||
);
|
||||
web_apps[rule_config.getAssetName()] = web_app;
|
||||
}
|
||||
@@ -1366,7 +1367,8 @@ PolicyMakerUtils::createThreatPreventionPracticeSections(
|
||||
practice_id,
|
||||
asset_name,
|
||||
default_mode,
|
||||
rule_annotations);
|
||||
rule_annotations,
|
||||
inner_exceptions[rule_annotations[AnnotationTypes::EXCEPTION]]);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ add_subdirectory(details_resolver)
|
||||
add_subdirectory(health_check)
|
||||
add_subdirectory(health_check_manager)
|
||||
add_subdirectory(updates_process_reporter)
|
||||
add_subdirectory(env_details)
|
||||
add_subdirectory(external_sdk_server)
|
||||
|
||||
#add_subdirectory(orchestration_ut)
|
||||
|
||||
@@ -41,12 +41,13 @@ public:
|
||||
|
||||
string getAgentVersion() override;
|
||||
bool isKernelVersion3OrHigher() override;
|
||||
bool isGw() override;
|
||||
bool isGwNotVsx() override;
|
||||
bool isVersionAboveR8110() override;
|
||||
bool isReverseProxy() override;
|
||||
bool isCloudStorageEnabled() override;
|
||||
Maybe<tuple<string, string, string, string, string>> readCloudMetadata() override;
|
||||
Maybe<tuple<string, string, string>> parseNginxMetadata() override;
|
||||
Maybe<tuple<string, string, string, string>> parseNginxMetadata() override;
|
||||
#if defined(gaia) || defined(smb)
|
||||
bool compareCheckpointVersion(int cp_version, std::function<bool(int, int)> compare_operator) const override;
|
||||
#endif // gaia || smb
|
||||
@@ -80,7 +81,9 @@ DetailsResolver::Impl::getHostname()
|
||||
Maybe<string>
|
||||
DetailsResolver::Impl::getPlatform()
|
||||
{
|
||||
#if defined(gaia)
|
||||
#if defined(gaia_arm)
|
||||
return string("gaia_arm");
|
||||
#elif defined(gaia)
|
||||
return string("gaia");
|
||||
#elif defined(arm32_rpi)
|
||||
return string("glibc");
|
||||
@@ -165,6 +168,19 @@ DetailsResolver::Impl::isKernelVersion3OrHigher()
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
DetailsResolver::Impl::isGw()
|
||||
{
|
||||
#if defined(gaia) || defined(smb)
|
||||
static const string is_gw_cmd = "cpprod_util FwIsFirewallModule";
|
||||
auto is_gw = DetailsResolvingHanlder::getCommandOutput(is_gw_cmd);
|
||||
if (is_gw.ok() && !is_gw.unpack().empty()) {
|
||||
return is_gw.unpack().front() == '1';
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
DetailsResolver::Impl::isGwNotVsx()
|
||||
{
|
||||
@@ -228,7 +244,7 @@ isNoResponse(const string &cmd)
|
||||
return !res.ok() || res.unpack().empty();
|
||||
}
|
||||
|
||||
Maybe<tuple<string, string, string>>
|
||||
Maybe<tuple<string, string, string, string>>
|
||||
DetailsResolver::Impl::parseNginxMetadata()
|
||||
{
|
||||
auto output_path = getConfigurationWithDefault<string>(
|
||||
@@ -236,11 +252,22 @@ DetailsResolver::Impl::parseNginxMetadata()
|
||||
"orchestration",
|
||||
"Nginx metadata temp file"
|
||||
);
|
||||
|
||||
const string &filesystem_path_config = getFilesystemPathConfig();
|
||||
|
||||
const string srcipt_exe_cmd =
|
||||
getFilesystemPathConfig() +
|
||||
filesystem_path_config +
|
||||
"/scripts/cp-nano-makefile-generator.sh -f -o " +
|
||||
output_path;
|
||||
|
||||
const string script_fresh_exe_cmd =
|
||||
filesystem_path_config +
|
||||
"/scripts/cp-nano-makefile-generator-fresh.sh save --save-location " +
|
||||
output_path +
|
||||
" --strings_bin_path " +
|
||||
filesystem_path_config +
|
||||
"/bin/strings";
|
||||
|
||||
dbgTrace(D_ORCHESTRATOR) << "Details resolver, srcipt exe cmd: " << srcipt_exe_cmd;
|
||||
if (isNoResponse("which nginx") && isNoResponse("which kong")) {
|
||||
return genError("Nginx or Kong isn't installed");
|
||||
@@ -263,7 +290,7 @@ DetailsResolver::Impl::parseNginxMetadata()
|
||||
return genError("Cannot open the file with nginx metadata, File: " + output_path);
|
||||
}
|
||||
|
||||
string line;
|
||||
string line;
|
||||
while (getline(input_stream, line)) {
|
||||
lines.push_back(line);
|
||||
}
|
||||
@@ -277,7 +304,37 @@ DetailsResolver::Impl::parseNginxMetadata()
|
||||
<< " Error: " << exception.what();
|
||||
}
|
||||
|
||||
if (!isNoResponse("which nginx")) {
|
||||
auto script_output = DetailsResolvingHanlder::getCommandOutput(script_fresh_exe_cmd);
|
||||
if (!script_output.ok()) {
|
||||
return genError("Failed to generate nginx fresh metadata, Error: " + script_output.getErr());
|
||||
}
|
||||
|
||||
try {
|
||||
ifstream input_stream(output_path);
|
||||
if (!input_stream) {
|
||||
return genError("Cannot open the file with nginx fresh metadata, File: " + output_path);
|
||||
}
|
||||
|
||||
string line;
|
||||
while (getline(input_stream, line)) {
|
||||
if (line.find("NGX_MODULE_SIGNATURE") == 0) {
|
||||
lines.push_back(line);
|
||||
}
|
||||
}
|
||||
input_stream.close();
|
||||
|
||||
orchestration_tools->removeFile(output_path);
|
||||
} catch (const ifstream::failure &exception) {
|
||||
dbgWarning(D_ORCHESTRATOR)
|
||||
<< "Cannot read the file with required nginx fresh metadata."
|
||||
<< " File: " << output_path
|
||||
<< " Error: " << exception.what();
|
||||
}
|
||||
}
|
||||
|
||||
if (lines.size() == 0) return genError("Failed to read nginx metadata file");
|
||||
string nginx_signature;
|
||||
string nginx_version;
|
||||
string config_opt;
|
||||
string cc_opt;
|
||||
@@ -292,6 +349,11 @@ DetailsResolver::Impl::parseNginxMetadata()
|
||||
nginx_version = "nginx-" + line.substr(eq_index + 1);
|
||||
continue;
|
||||
}
|
||||
if (line.find("NGX_MODULE_SIGNATURE") != string::npos) {
|
||||
auto eq_index = line.find("=");
|
||||
nginx_signature = line.substr(eq_index + 1);
|
||||
continue;
|
||||
}
|
||||
if (line.find("EXTRA_CC_OPT") != string::npos) {
|
||||
auto eq_index = line.find("=");
|
||||
cc_opt = line.substr(eq_index + 1);
|
||||
@@ -301,7 +363,7 @@ DetailsResolver::Impl::parseNginxMetadata()
|
||||
if (line.back() == '\\') line.pop_back();
|
||||
config_opt += line;
|
||||
}
|
||||
return make_tuple(config_opt, cc_opt, nginx_version);
|
||||
return make_tuple(config_opt, cc_opt, nginx_version, nginx_signature);
|
||||
}
|
||||
|
||||
Maybe<tuple<string, string, string, string, string>>
|
||||
|
||||
@@ -26,9 +26,7 @@
|
||||
Maybe<string>
|
||||
checkSAMLSupportedBlade(const string &command_output)
|
||||
{
|
||||
// uncomment when vpn will support SAML authentication
|
||||
// string supportedBlades[3] = {"identityServer", "vpn", "cvpn"};
|
||||
string supportedBlades[1] = {"identityServer"};
|
||||
string supportedBlades[3] = {"identityServer", "vpn", "cvpn"};
|
||||
for(const string &blade : supportedBlades) {
|
||||
if (command_output.find(blade) != string::npos) {
|
||||
return string("true");
|
||||
@@ -49,6 +47,17 @@ checkIDABlade(const string &command_output)
|
||||
return string("false");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
checkVPNBlade(const string &command_output)
|
||||
{
|
||||
string vpnBlade = "vpn";
|
||||
if (command_output.find(vpnBlade) != string::npos) {
|
||||
return string("true");
|
||||
}
|
||||
|
||||
return string("false");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
checkSAMLPortal(const string &command_output)
|
||||
{
|
||||
@@ -60,9 +69,9 @@ checkSAMLPortal(const string &command_output)
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
checkPepIdaIdnStatus(const string &command_output)
|
||||
checkInfinityIdentityEnabled(const string &command_output)
|
||||
{
|
||||
if (command_output.find("nac_pep_identity_next_enabled = 1") != string::npos) {
|
||||
if (command_output.find("get_identities_from_infinity_identity (true)") != string::npos) {
|
||||
return string("true");
|
||||
}
|
||||
return string("false");
|
||||
@@ -71,7 +80,18 @@ checkPepIdaIdnStatus(const string &command_output)
|
||||
Maybe<string>
|
||||
getRequiredNanoServices(const string &command_output)
|
||||
{
|
||||
return command_output;
|
||||
string idaRequiredServices[2] = {"idaSaml", "idaIdn"};
|
||||
string platform_str = "gaia";
|
||||
#if defined(gaia_arm)
|
||||
platform_str = "gaia_arm";
|
||||
#endif // gaia_arm
|
||||
string result = "";
|
||||
for(const string &serv : idaRequiredServices) {
|
||||
string add_service = serv + "_" + platform_str;
|
||||
result = result + add_service + ";";
|
||||
}
|
||||
command_output.empty(); // overcome unused variable
|
||||
return result;
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
@@ -79,9 +99,6 @@ checkIDP(shared_ptr<istream> file_stream)
|
||||
{
|
||||
string line;
|
||||
while (getline(*file_stream, line)) {
|
||||
if (line.find("<identity_portal/>") != string::npos) {
|
||||
return string("false");
|
||||
}
|
||||
if (line.find("<central_idp ") != string::npos) {
|
||||
return string("true");
|
||||
}
|
||||
@@ -90,6 +107,26 @@ checkIDP(shared_ptr<istream> file_stream)
|
||||
return string("false");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
checkVPNCIDP(shared_ptr<istream> file_stream)
|
||||
{
|
||||
string line;
|
||||
while (getline(*file_stream, line)) {
|
||||
if (line.find("<vpn") != string::npos) {
|
||||
while (getline(*file_stream, line)) {
|
||||
if (line.find("<central_idp ") != string::npos) {
|
||||
return string("true");
|
||||
}
|
||||
if (line.find("</vpn>") != string::npos) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return string("false");
|
||||
}
|
||||
|
||||
#endif // gaia
|
||||
|
||||
#if defined(gaia) || defined(smb)
|
||||
@@ -129,6 +166,17 @@ getIsAiopsRunning(const string &command_output)
|
||||
return command_output;
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
getInterfaceMgmtIp(const string &command_output)
|
||||
{
|
||||
if (!command_output.empty()) {
|
||||
return command_output;
|
||||
}
|
||||
|
||||
return genError("Eth Management IP was not found");
|
||||
}
|
||||
|
||||
|
||||
Maybe<string>
|
||||
checkHasSDWan(const string &command_output)
|
||||
{
|
||||
@@ -342,6 +390,28 @@ getSMCBasedMgmtName(const string &command_output)
|
||||
return getAttr(command_output, "Mgmt object Name was not found");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
getSmbObjectUid(const string &command_output)
|
||||
{
|
||||
static const char centrally_managed_comd_output = '0';
|
||||
|
||||
if (command_output.empty() || command_output[0] != centrally_managed_comd_output) {
|
||||
return genError("Object UUID was not found");
|
||||
}
|
||||
|
||||
Maybe<string> obj_uuid = getAttrFromCpsdwanGetDataJson("uuid");
|
||||
if (obj_uuid.ok()) {
|
||||
return obj_uuid.unpack();
|
||||
}
|
||||
|
||||
static const string obj_path = (getenv("FWDIR") ? string(getenv("FWDIR")) : "") + "/database/myown.C";
|
||||
auto file_stream = std::make_shared<std::ifstream>(obj_path);
|
||||
if (!file_stream->is_open()) {
|
||||
return genError("Failed to open the object file");
|
||||
}
|
||||
return getMgmtObjAttr(file_stream, "uuid ");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
getSmbObjectName(const string &command_output)
|
||||
{
|
||||
@@ -418,6 +488,14 @@ extractManagements(const string &command_output)
|
||||
json_output += "]";
|
||||
return json_output;
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
checkQosLegacyBlade(const string &command_output)
|
||||
{
|
||||
if (command_output == "true" || command_output == "false") return command_output;
|
||||
|
||||
return string("false");
|
||||
}
|
||||
#endif // gaia || smb
|
||||
|
||||
#if defined(gaia)
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
// shell command execution output as its input
|
||||
|
||||
#ifdef SHELL_PRE_CMD
|
||||
#if defined(gaia) || defined(smb)
|
||||
#if defined(gaia) || defined(smb) || defined(smb_thx_v3) || defined(smb_sve_v2) || defined(smb_mrv_v1)
|
||||
SHELL_PRE_CMD("read sdwan data",
|
||||
"(cpsdwan get_data > /tmp/cpsdwan_getdata_orch.json~) "
|
||||
"&& (mv /tmp/cpsdwan_getdata_orch.json~ /tmp/cpsdwan_getdata_orch.json)")
|
||||
@@ -40,17 +40,20 @@ SHELL_PRE_CMD("gunzip local.cfg", "gunzip -c $FWDIR/state/local/FW1/local.cfg.gz
|
||||
#endif
|
||||
|
||||
#ifdef SHELL_CMD_HANDLER
|
||||
#if defined(gaia) || defined(smb)
|
||||
#if defined(gaia) || defined(smb) || defined(smb_thx_v3) || defined(smb_sve_v2) || defined(smb_mrv_v1)
|
||||
SHELL_CMD_HANDLER("cpProductIntegrationMgmtObjectType", "cpprod_util CPPROD_IsMgmtMachine", getMgmtObjType)
|
||||
SHELL_CMD_HANDLER(
|
||||
"cpProductIntegrationMgmtObjectUid",
|
||||
"mgmt_cli --format json -r true show-session | jq -r '.[\"connected-server\"].uid'",
|
||||
getMgmtObjUid
|
||||
)
|
||||
SHELL_CMD_HANDLER("prerequisitesForHorizonTelemetry",
|
||||
"FS_PATH=<FILESYSTEM-PREFIX>; [ -f ${FS_PATH}/cp-nano-horizon-telemetry-prerequisites.log ] "
|
||||
"&& head -1 ${FS_PATH}/cp-nano-horizon-telemetry-prerequisites.log || echo ''",
|
||||
checkIsInstallHorizonTelemetrySucceeded)
|
||||
SHELL_CMD_HANDLER(
|
||||
"IS_AIOPS_RUNNING",
|
||||
"FS_PATH=<FILESYSTEM-PREFIX>; "
|
||||
"PID=$(ps auxf | grep -v grep | grep -E ${FS_PATH}.*cp-nano-horizon-telemetry | awk -F' ' '{printf $2}'); "
|
||||
"[ -z \"${PID}\" ] && echo 'false' || echo 'true'",
|
||||
getIsAiopsRunning)
|
||||
#endif
|
||||
#if defined(gaia)
|
||||
SHELL_CMD_HANDLER("GLOBAL_QUID", "[ -d /opt/CPquid ] "
|
||||
"&& python3 /opt/CPquid/Quid_Api.py -i /opt/CPotelcol/quid_api/get_global_id.json | jq -r .message || echo ''",
|
||||
getQUID)
|
||||
@@ -76,12 +79,31 @@ SHELL_CMD_HANDLER("MGMT_QUID", "[ -d /opt/CPquid ] "
|
||||
SHELL_CMD_HANDLER("AIOPS_AGENT_ROLE", "[ -d /opt/CPOtlpAgent/custom_scripts ] "
|
||||
"&& ENV_NO_FORMAT=1 /opt/CPOtlpAgent/custom_scripts/agent_role.sh",
|
||||
getOtlpAgentGaiaOsRole)
|
||||
SHELL_CMD_HANDLER(
|
||||
"IS_AIOPS_RUNNING",
|
||||
"FS_PATH=<FILESYSTEM-PREFIX>; "
|
||||
"PID=$(ps auxf | grep -v grep | grep -E ${FS_PATH}.*cp-nano-horizon-telemetry | awk -F' ' '{printf $2}'); "
|
||||
"[ -z \"{PID}\" ] && echo 'false' || echo 'true'",
|
||||
getIsAiopsRunning)
|
||||
SHELL_CMD_HANDLER("ETH_MGMT_IP",
|
||||
"FS_PATH=<FILESYSTEM-PREFIX>;"
|
||||
"VS_ID=$(echo \"${FS_PATH}\" | grep -o -E \"vs[0-9]+\" | grep -o -E \"[0-9]+\");"
|
||||
"[ -z \"${VS_ID}\" ] && "
|
||||
"(eth=\"$(grep 'management:interface' /config/active | awk '{print $2}')\" &&"
|
||||
" ip addr show \"${eth}\" | grep inet | awk '{print $2}' | cut -d '/' -f1) || "
|
||||
"(ip a | grep UP | grep -v lo | head -n 1 | cut -d ':' -f2 | tr -d ' ')",
|
||||
getInterfaceMgmtIp)
|
||||
#endif
|
||||
#if defined(smb) || defined(smb_thx_v3) || defined(smb_sve_v2) || defined(smb_mrv_v1)
|
||||
SHELL_CMD_HANDLER("GLOBAL_QUID",
|
||||
"cat $FWDIR/database/myown.C "
|
||||
"| awk -F'[()]' '/:name/ { found=1; next } found && /:uuid/ { uid=tolower($2); print uid; exit }'",
|
||||
getQUID)
|
||||
SHELL_CMD_HANDLER("QUID",
|
||||
"cat $FWDIR/database/myown.C "
|
||||
"| awk -F'[()]' '/:name/ { found=1; next } found && /:uuid/ { uid=tolower($2); print uid; exit }'",
|
||||
getQUID)
|
||||
|
||||
|
||||
SHELL_CMD_HANDLER("SMO_QUID", "echo ''", getQUID)
|
||||
SHELL_CMD_HANDLER("MGMT_QUID", "echo ''", getQUID)
|
||||
SHELL_CMD_HANDLER("AIOPS_AGENT_ROLE", "echo 'SMB'", getOtlpAgentGaiaOsRole)
|
||||
#endif
|
||||
#if defined(gaia) || defined(smb) || defined(smb_thx_v3) || defined(smb_sve_v2) || defined(smb_mrv_v1)
|
||||
SHELL_CMD_HANDLER("hasSDWan", "[ -f $FWDIR/bin/sdwan_steering ] && echo '1' || echo '0'", checkHasSDWan)
|
||||
SHELL_CMD_HANDLER(
|
||||
"canUpdateSDWanData",
|
||||
@@ -102,12 +124,6 @@ SHELL_CMD_HANDLER(
|
||||
"jq -r .lsm_profile_uuid /tmp/cpsdwan_getdata_orch.json",
|
||||
checkLsmProfileUuid
|
||||
)
|
||||
SHELL_CMD_HANDLER(
|
||||
"IP Address",
|
||||
"[ $(cpprod_util FWisDAG) -eq 1 ] && echo \"Dynamic Address\" "
|
||||
"|| (jq -r .main_ip /tmp/cpsdwan_getdata_orch.json)",
|
||||
getGWIPAddress
|
||||
)
|
||||
SHELL_CMD_HANDLER(
|
||||
"Version",
|
||||
"cat /etc/cp-release | grep -oE 'R[0-9]+(\\.[0-9]+)?'",
|
||||
@@ -126,19 +142,33 @@ SHELL_CMD_HANDLER(
|
||||
"fw ctl get int support_fec |& grep -sq \"support_fec =\";echo $?",
|
||||
getFecApplicable
|
||||
)
|
||||
SHELL_CMD_HANDLER("is_legacy_qos_blade_enabled",
|
||||
"cpprod_util CPPROD_GetValue FG1 ProdActive 1 | grep -q '^1$' "
|
||||
"&& (cpprod_util CPPROD_GetValue FG1 FgSDWAN 1 | grep -q '^1$' && echo false || echo true) || "
|
||||
"echo false",
|
||||
checkQosLegacyBlade)
|
||||
#endif //gaia || smb
|
||||
|
||||
#if defined(gaia)
|
||||
SHELL_CMD_HANDLER("hasSAMLSupportedBlade", "enabled_blades", checkSAMLSupportedBlade)
|
||||
SHELL_CMD_HANDLER("hasIDABlade", "enabled_blades", checkIDABlade)
|
||||
SHELL_CMD_HANDLER("hasVPNBlade", "enabled_blades", checkVPNBlade)
|
||||
SHELL_CMD_HANDLER("hasSAMLPortal", "mpclient status nac", checkSAMLPortal)
|
||||
SHELL_CMD_HANDLER("hasIdaIdnEnabled", "fw ctl get int nac_pep_identity_next_enabled", checkPepIdaIdnStatus)
|
||||
SHELL_CMD_HANDLER("requiredNanoServices", "echo 'idaSaml_gaia;idaIdn_gaia;'", getRequiredNanoServices)
|
||||
SHELL_CMD_HANDLER("hasInfinityIdentityEnabled",
|
||||
"cat $FWDIR/database/myself_objects.C | grep get_identities_from_infinity_identity",
|
||||
checkInfinityIdentityEnabled
|
||||
)
|
||||
SHELL_CMD_HANDLER("requiredNanoServices", "echo ida", getRequiredNanoServices)
|
||||
SHELL_CMD_HANDLER(
|
||||
"cpProductIntegrationMgmtObjectName",
|
||||
"mgmt_cli --format json -r true show-session | jq -r '.[\"connected-server\"].name'",
|
||||
getMgmtObjName
|
||||
)
|
||||
SHELL_CMD_HANDLER(
|
||||
"cpProductIntegrationMgmtObjectUid",
|
||||
"mgmt_cli --format json -r true show-session | jq -r '.[\"connected-server\"].uid'",
|
||||
getMgmtObjUid
|
||||
)
|
||||
SHELL_CMD_HANDLER(
|
||||
"cpProductIntegrationMgmtParentObjectName",
|
||||
"cat $FWDIR/database/myself_objects.C "
|
||||
@@ -192,9 +222,17 @@ SHELL_CMD_HANDLER(
|
||||
"echo 1",
|
||||
extractManagements
|
||||
)
|
||||
SHELL_CMD_HANDLER(
|
||||
"IP Address",
|
||||
"( [ $(cpprod_util FwIsHighAvail) -eq 1 ] && [ $(cpprod_util FwIsVSX) -eq 1 ]"
|
||||
"&& (jq -r .cluster_main_ip /tmp/cpsdwan_getdata_orch.json) )"
|
||||
"|| ( [ $(cpprod_util FWisDAG) -eq 1 ] && echo \"Dynamic Address\" )"
|
||||
"|| (jq -r .main_ip /tmp/cpsdwan_getdata_orch.json)",
|
||||
getGWIPAddress
|
||||
)
|
||||
#endif //gaia
|
||||
|
||||
#if defined(smb)
|
||||
#if defined(smb) || defined(smb_thx_v3) || defined(smb_sve_v2) || defined(smb_mrv_v1)
|
||||
SHELL_CMD_HANDLER(
|
||||
"cpProductIntegrationMgmtParentObjectName",
|
||||
"jq -r .cluster_name /tmp/cpsdwan_getdata_orch.json",
|
||||
@@ -210,6 +248,11 @@ SHELL_CMD_HANDLER(
|
||||
"cpprod_util FwIsLocalMgmt",
|
||||
getSmbObjectName
|
||||
)
|
||||
SHELL_CMD_HANDLER(
|
||||
"cpProductIntegrationMgmtObjectUid",
|
||||
"cpprod_util FwIsLocalMgmt",
|
||||
getSmbObjectUid
|
||||
)
|
||||
SHELL_CMD_HANDLER(
|
||||
"Application Control",
|
||||
"cat $FWDIR/conf/active_blades.txt | grep -o 'APCL [01]' | cut -d ' ' -f2",
|
||||
@@ -248,11 +291,21 @@ SHELL_CMD_HANDLER(
|
||||
"echo 1",
|
||||
extractManagements
|
||||
)
|
||||
SHELL_CMD_HANDLER(
|
||||
"IP Address",
|
||||
"[ $(cpprod_util FWisDAG) -eq 1 ] && echo \"Dynamic Address\" "
|
||||
"|| (jq -r .main_ip /tmp/cpsdwan_getdata_orch.json)",
|
||||
getGWIPAddress
|
||||
)
|
||||
SHELL_CMD_HANDLER(
|
||||
"Hardware",
|
||||
R"(ver | sed -E 's/^This is Check Point'\''s +([^ ]+).*$/\1/')",
|
||||
getHardware
|
||||
)
|
||||
#endif//smb
|
||||
|
||||
SHELL_CMD_OUTPUT("kernel_version", "uname -r")
|
||||
SHELL_CMD_OUTPUT("helloWorld", "cat /tmp/agentHelloWorld 2>/dev/null")
|
||||
SHELL_CMD_OUTPUT("report_timestamp", "date -u +\%s")
|
||||
#endif // SHELL_CMD_OUTPUT
|
||||
|
||||
|
||||
@@ -266,6 +319,11 @@ FILE_CONTENT_HANDLER(
|
||||
(getenv("SAMLPORTAL_HOME") ? string(getenv("SAMLPORTAL_HOME")) : "") + "/phpincs/spPortal/idpPolicy.xml",
|
||||
checkIDP
|
||||
)
|
||||
FILE_CONTENT_HANDLER(
|
||||
"hasVPNCidpConfigured",
|
||||
(getenv("SAMLPORTAL_HOME") ? string(getenv("SAMLPORTAL_HOME")) : "") + "/phpincs/spPortal/idpPolicy.xml",
|
||||
checkVPNCIDP
|
||||
)
|
||||
#endif //gaia
|
||||
|
||||
#if defined(alpine)
|
||||
@@ -282,7 +340,7 @@ FILE_CONTENT_HANDLER("AppSecModelVersion", "<FILESYSTEM-PREFIX>/conf/waap/waap.d
|
||||
#endif // FILE_CONTENT_HANDLER
|
||||
|
||||
#ifdef SHELL_POST_CMD
|
||||
#if defined(smb)
|
||||
#if defined(smb) || defined(smb_thx_v3) || defined(smb_sve_v2) || defined(smb_mrv_v1)
|
||||
SHELL_POST_CMD("remove local.cfg", "rm -rf /tmp/local.cfg")
|
||||
#endif //smb
|
||||
#endif
|
||||
|
||||
@@ -41,8 +41,13 @@ HTTPSClient::getFile(const URLParser &url, const string &out_file, bool auth_req
|
||||
|
||||
if (!url.isOverSSL()) return genError("URL is not over SSL.");
|
||||
|
||||
if (getFileSSLDirect(url, out_file, token).ok()) return Maybe<void>();
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to get file over SSL directly. Trying indirectly.";
|
||||
bool skip_direct_download = (url.getQuery().find("/resources/") != string::npos);
|
||||
if (skip_direct_download) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Resources path: " << url.getQuery() << ". Skipping direct download.";
|
||||
} else {
|
||||
if (getFileSSLDirect(url, out_file, token).ok()) return Maybe<void>();
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to get file over SSL directly. Trying indirectly.";
|
||||
}
|
||||
|
||||
if (getFileSSL(url, out_file, token).ok()) return Maybe<void>();
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to get file over SSL. Trying via CURL (SSL).";
|
||||
|
||||
@@ -266,10 +266,10 @@ private:
|
||||
case OrchestrationStatusFieldType::COUNT : return "Count";
|
||||
}
|
||||
|
||||
dbgAssert(false)
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "orchestration health")
|
||||
<< "Trying to convert unknown orchestration status field to string.";
|
||||
return "";
|
||||
return "Unknown Field";
|
||||
}
|
||||
|
||||
HealthCheckStatus
|
||||
@@ -282,7 +282,7 @@ private:
|
||||
case UpdatesProcessResult::DEGRADED : return HealthCheckStatus::DEGRADED;
|
||||
}
|
||||
|
||||
dbgAssert(false)
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "orchestration health")
|
||||
<< "Trying to convert unknown update process result field to health check status.";
|
||||
return HealthCheckStatus::IGNORED;
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
#include "maybe_res.h"
|
||||
|
||||
std::ostream &
|
||||
operator<<(std::ostream &os, const Maybe<std::tuple<std::string, std::string, std::string>> &)
|
||||
operator<<(std::ostream &os, const Maybe<std::tuple<std::string, std::string, std::string, std::string>> &)
|
||||
{
|
||||
return os;
|
||||
}
|
||||
@@ -42,13 +42,14 @@ public:
|
||||
MOCK_METHOD0(getPlatform, Maybe<std::string>());
|
||||
MOCK_METHOD0(getArch, Maybe<std::string>());
|
||||
MOCK_METHOD0(getAgentVersion, std::string());
|
||||
MOCK_METHOD0(isCloudStorageEnabled, bool());
|
||||
MOCK_METHOD0(isCloudStorageEnabled, bool());
|
||||
MOCK_METHOD0(isReverseProxy, bool());
|
||||
MOCK_METHOD0(isKernelVersion3OrHigher, bool());
|
||||
MOCK_METHOD0(isGw, bool());
|
||||
MOCK_METHOD0(isGwNotVsx, bool());
|
||||
MOCK_METHOD0(getResolvedDetails, std::map<std::string, std::string>());
|
||||
MOCK_METHOD0(isVersionAboveR8110, bool());
|
||||
MOCK_METHOD0(parseNginxMetadata, Maybe<std::tuple<std::string, std::string, std::string>>());
|
||||
MOCK_METHOD0(isVersionAboveR8110, bool());
|
||||
MOCK_METHOD0(parseNginxMetadata, Maybe<std::tuple<std::string, std::string, std::string, std::string>>());
|
||||
MOCK_METHOD0(
|
||||
readCloudMetadata, Maybe<std::tuple<std::string, std::string, std::string, std::string, std::string>>());
|
||||
};
|
||||
|
||||
@@ -100,6 +100,7 @@ private:
|
||||
string packages_dir;
|
||||
string orch_service_name;
|
||||
set<string> ignore_packages;
|
||||
Maybe<string> forbidden_versions = genError("Forbidden versions file does not exist");
|
||||
};
|
||||
|
||||
void
|
||||
@@ -135,7 +136,8 @@ ManifestController::Impl::init()
|
||||
"Ignore packages list file path"
|
||||
);
|
||||
|
||||
if (Singleton::Consume<I_OrchestrationTools>::by<ManifestController>()->doesFileExist(ignore_packages_path)) {
|
||||
auto orchestration_tools = Singleton::Consume<I_OrchestrationTools>::by<ManifestController>();
|
||||
if (orchestration_tools->doesFileExist(ignore_packages_path)) {
|
||||
try {
|
||||
ifstream input_stream(ignore_packages_path);
|
||||
if (!input_stream) {
|
||||
@@ -156,6 +158,9 @@ ManifestController::Impl::init()
|
||||
<< " Error: " << f.what();
|
||||
}
|
||||
}
|
||||
|
||||
const string forbidden_versions_path = getFilesystemPathConfig() + "/revert/forbidden_versions";
|
||||
forbidden_versions = orchestration_tools->readFile(forbidden_versions_path);
|
||||
}
|
||||
|
||||
bool
|
||||
@@ -271,6 +276,17 @@ ManifestController::Impl::updateManifest(const string &new_manifest_file)
|
||||
}
|
||||
|
||||
map<string, Package> new_packages = parsed_manifest.unpack();
|
||||
if (!new_packages.empty()) {
|
||||
const Package &package = new_packages.begin()->second;
|
||||
if (forbidden_versions.ok() &&
|
||||
forbidden_versions.unpack().find(package.getVersion()) != string::npos
|
||||
) {
|
||||
dbgWarning(D_ORCHESTRATOR)
|
||||
<< "Packages version is in the forbidden versions list. No upgrade will be performed.";
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
map<string, Package> all_packages = parsed_manifest.unpack();
|
||||
map<string, Package> current_packages;
|
||||
parsed_manifest = orchestration_tools->loadPackagesFromJson(manifest_file_path);
|
||||
|
||||
@@ -58,6 +58,9 @@ public:
|
||||
Debug::setUnitTestFlag(D_ORCHESTRATOR, Debug::DebugLevel::TRACE);
|
||||
const string ignore_packages_file = "/etc/cp/conf/ignore-packages.txt";
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist(ignore_packages_file)).WillOnce(Return(false));
|
||||
Maybe<string> forbidden_versions(string("a1\na2"));
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile("/etc/cp/revert/forbidden_versions"))
|
||||
.WillOnce(Return(forbidden_versions));
|
||||
manifest_controller.init();
|
||||
manifest_file_path = getConfigurationWithDefault<string>(
|
||||
"/etc/cp/conf/manifest.json",
|
||||
@@ -224,6 +227,10 @@ TEST_F(ManifestControllerTest, createNewManifest)
|
||||
EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, manifest_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -363,6 +370,11 @@ TEST_F(ManifestControllerTest, updateManifest)
|
||||
EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).Times(2).WillRepeatedly(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).Times(2).WillRepeatedly(Return(true));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
|
||||
manifest =
|
||||
@@ -417,6 +429,9 @@ TEST_F(ManifestControllerTest, updateManifest)
|
||||
EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services));
|
||||
EXPECT_CALL(mock_orchestration_tools,
|
||||
loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillRepeatedly(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -478,6 +493,11 @@ TEST_F(ManifestControllerTest, selfUpdate)
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, copyFile("/tmp/temp_file", path +
|
||||
temp_ext)).WillOnce(Return(true));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -607,6 +627,10 @@ TEST_F(ManifestControllerTest, removeCurrentErrorPackage)
|
||||
EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
corrupted_packages.clear();
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
@@ -666,6 +690,10 @@ TEST_F(ManifestControllerTest, selfUpdateWithOldCopy)
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, copyFile("/tmp/temp_file", path +
|
||||
temp_ext)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -722,6 +750,10 @@ TEST_F(ManifestControllerTest, selfUpdateWithOldCopyWithError)
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist(path)).WillOnce(Return(false)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, copyFile(path, path + backup_ext + temp_ext)).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, getHostname()).WillOnce(Return(hostname));
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_FALSE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -798,6 +830,10 @@ TEST_F(ManifestControllerTest, installAndRemove)
|
||||
EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).Times(2).WillRepeatedly(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).Times(2).WillRepeatedly(Return(true));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
|
||||
string new_manifest =
|
||||
@@ -858,6 +894,63 @@ TEST_F(ManifestControllerTest, installAndRemove)
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/packages/my1/my1")).Times(2)
|
||||
.WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillRepeatedly(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
TEST_F(ManifestControllerTest, manifestWithForbiddenVersion)
|
||||
{
|
||||
new_services.clear();
|
||||
old_services.clear();
|
||||
|
||||
string manifest =
|
||||
"{"
|
||||
" \"packages\": ["
|
||||
" {"
|
||||
" \"download-path\": \"http://172.23.92.135/my.sh\","
|
||||
" \"relative-path\": \"\","
|
||||
" \"name\": \"my\","
|
||||
" \"version\": \"a1\","
|
||||
" \"checksum-type\": \"sha1sum\","
|
||||
" \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\","
|
||||
" \"package-type\": \"service\","
|
||||
" \"require\": []"
|
||||
" },"
|
||||
" {"
|
||||
" \"download-path\": \"http://172.23.92.135/my.sh\","
|
||||
" \"relative-path\": \"\","
|
||||
" \"name\": \"orchestration\","
|
||||
" \"version\": \"a1\","
|
||||
" \"checksum-type\": \"sha1sum\","
|
||||
" \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\","
|
||||
" \"package-type\": \"service\","
|
||||
" \"require\": []"
|
||||
" },"
|
||||
" {"
|
||||
" \"download-path\": \"\","
|
||||
" \"relative-path\": \"\","
|
||||
" \"name\": \"waap\","
|
||||
" \"version\": \"a1\","
|
||||
" \"checksum-type\": \"sha1sum\","
|
||||
" \"checksum\": \"\","
|
||||
" \"package-type\": \"service\","
|
||||
" \"status\": false,\n"
|
||||
" \"message\": \"This security app isn't valid for this agent\"\n"
|
||||
" }"
|
||||
" ]"
|
||||
"}";
|
||||
|
||||
map<string, Package> manifest_services;
|
||||
load(manifest, manifest_services);
|
||||
checkIfFileExistsCall(manifest_services.at("my"));
|
||||
|
||||
|
||||
load(manifest, new_services);
|
||||
load(old_manifest, old_services);
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services));
|
||||
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -947,6 +1040,10 @@ TEST_F(ManifestControllerTest, badInstall)
|
||||
EXPECT_CALL(mock_orchestration_tools,
|
||||
packagesToJsonFile(corrupted_packages, corrupted_file_list)).WillOnce(Return(true));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_FALSE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -1112,6 +1209,12 @@ TEST_F(ManifestControllerTest, requireUpdate)
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile("new_manifest.json")).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillRepeatedly(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status"))
|
||||
.WillOnce(Return(false))
|
||||
.WillRepeatedly(Return(true));;
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -1212,6 +1315,10 @@ TEST_F(ManifestControllerTest, sharedObjectNotInstalled)
|
||||
).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, copyFile("/tmp/temp_file1", path +
|
||||
temp_ext)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -1313,6 +1420,12 @@ TEST_F(ManifestControllerTest, requireSharedObjectUpdate)
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile("new_manifest.json"))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillRepeatedly(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status"))
|
||||
.WillOnce(Return(false))
|
||||
.WillRepeatedly(Return(true));;
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -1389,6 +1502,7 @@ TEST_F(ManifestControllerTest, failureOnDownloadSharedObject)
|
||||
EXPECT_CALL(mock_details_resolver, getHostname()).WillOnce(Return(string("hostname")));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile("/tmp/temp_file1")).WillOnce(Return(true));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillRepeatedly(Return("b"));
|
||||
EXPECT_FALSE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -1524,6 +1638,12 @@ TEST_F(ManifestControllerTest, multiRequireUpdate)
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile("new_manifest.json"))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillRepeatedly(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status"))
|
||||
.WillOnce(Return(false))
|
||||
.WillRepeatedly(Return(true));;
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -1610,6 +1730,12 @@ TEST_F(ManifestControllerTest, createNewManifestWithUninstallablePackage)
|
||||
EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillRepeatedly(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status"))
|
||||
.WillOnce(Return(false))
|
||||
.WillRepeatedly(Return(true));;
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -1624,7 +1750,7 @@ TEST_F(ManifestControllerTest, updateUninstallPackage)
|
||||
" \"download-path\": \"\","
|
||||
" \"relative-path\": \"\","
|
||||
" \"name\": \"my\","
|
||||
" \"version\": \"\","
|
||||
" \"version\": \"c\","
|
||||
" \"checksum-type\": \"sha1sum\","
|
||||
" \"checksum\": \"\","
|
||||
" \"package-type\": \"service\","
|
||||
@@ -1721,6 +1847,11 @@ TEST_F(ManifestControllerTest, updateUninstallPackage)
|
||||
EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services));
|
||||
EXPECT_CALL(mock_orchestration_tools,
|
||||
loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -1744,6 +1875,9 @@ public:
|
||||
setConfiguration<string>(ignore_packages_file, "orchestration", "Ignore packages list file path");
|
||||
writeIgnoreList(ignore_packages_file, ignore_services);
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist(ignore_packages_file)).WillOnce(Return(true));
|
||||
Maybe<string> forbidden_versions(string("a1\na2"));
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile("/etc/cp/revert/forbidden_versions"))
|
||||
.WillOnce(Return(forbidden_versions));
|
||||
manifest_controller.init();
|
||||
manifest_file_path = getConfigurationWithDefault<string>(
|
||||
"/etc/cp/conf/manifest.json",
|
||||
@@ -1839,6 +1973,7 @@ public:
|
||||
StrictMock<MockOrchestrationStatus> mock_status;
|
||||
StrictMock<MockDownloader> mock_downloader;
|
||||
StrictMock<MockOrchestrationTools> mock_orchestration_tools;
|
||||
StrictMock<MockDetailsResolver> mock_details_resolver;
|
||||
NiceMock<MockShellCmd> mock_shell_cmd;
|
||||
|
||||
ManifestController manifest_controller;
|
||||
@@ -2122,6 +2257,12 @@ TEST_F(ManifestControllerIgnorePakckgeTest, addIgnorePackageAndUpdateNormal)
|
||||
EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillRepeatedly(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status"))
|
||||
.WillOnce(Return(false))
|
||||
.WillRepeatedly(Return(true));;
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -2387,6 +2528,12 @@ TEST_F(ManifestControllerIgnorePakckgeTest, overrideIgnoredPackageFromProfileSet
|
||||
EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillRepeatedly(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status"))
|
||||
.WillOnce(Return(false))
|
||||
.WillRepeatedly(Return(true));;
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
|
||||
EXPECT_THAT(capture_debug.str(), Not(HasSubstr("Ignoring a package from the manifest. Package name: my")));
|
||||
@@ -2411,6 +2558,9 @@ public:
|
||||
doesFileExist("/etc/cp/conf/ignore-packages.txt")
|
||||
).WillOnce(Return(false));
|
||||
|
||||
Maybe<string> forbidden_versions(string("a1\na2"));
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile("/etc/cp/revert/forbidden_versions"))
|
||||
.WillOnce(Return(forbidden_versions));
|
||||
manifest_controller.init();
|
||||
}
|
||||
|
||||
|
||||
@@ -115,9 +115,9 @@ ManifestDiffCalculator::buildRecInstallationQueue(
|
||||
const map<string, Package> ¤t_packages,
|
||||
const map<string, Package> &new_packages)
|
||||
{
|
||||
const vector<string> &requires = package.getRequire();
|
||||
const vector<string> &requires_packages = package.getRequire();
|
||||
|
||||
for (const auto &require : requires) {
|
||||
for (const auto &require : requires_packages) {
|
||||
auto installed_package = current_packages.find(require);
|
||||
auto new_package = new_packages.find(require);
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
#include "manifest_handler.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <ctime>
|
||||
|
||||
#include "debug.h"
|
||||
#include "config.h"
|
||||
@@ -201,18 +202,29 @@ ManifestHandler::installPackage(
|
||||
auto span_scope = i_env->startNewSpanScope(Span::ContextType::CHILD_OF);
|
||||
auto orchestration_status = Singleton::Consume<I_OrchestrationStatus>::by<ManifestHandler>();
|
||||
|
||||
auto details_resolver = Singleton::Consume<I_DetailsResolver>::by<ManifestHandler>();
|
||||
auto orchestration_tools = Singleton::Consume<I_OrchestrationTools>::by<ManifestHandler>();
|
||||
|
||||
auto &package = package_downloaded_file.first;
|
||||
auto &package_name = package.getName();
|
||||
auto &package_handler_path = package_downloaded_file.second;
|
||||
|
||||
dbgInfo(D_ORCHESTRATOR) << "Handling package installation. Package: " << package_name;
|
||||
|
||||
string upgrade_info =
|
||||
details_resolver->getAgentVersion() + " " + package.getVersion() + " " + getCurrentTimestamp();
|
||||
if (!orchestration_tools->doesFileExist(getFilesystemPathConfig() + "/revert/upgrade_status") &&
|
||||
!orchestration_tools->writeFile(upgrade_info, getFilesystemPathConfig() + "/revert/upgrade_status")
|
||||
) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to write to " + getFilesystemPathConfig() + "/revert/upgrade_status";
|
||||
}
|
||||
|
||||
if (package_name.compare(orch_service_name) == 0) {
|
||||
orchestration_status->writeStatusToFile();
|
||||
bool self_update_status = selfUpdate(package, current_packages, package_handler_path);
|
||||
if (!self_update_status) {
|
||||
auto details = Singleton::Consume<I_AgentDetails>::by<ManifestHandler>();
|
||||
auto hostname = Singleton::Consume<I_DetailsResolver>::by<ManifestHandler>()->getHostname();
|
||||
auto hostname = details_resolver->getHostname();
|
||||
string err_hostname = (hostname.ok() ? "on host '" + *hostname : "'" + details->getAgentId()) + "'";
|
||||
string install_error =
|
||||
"Warning: Agent/Gateway " +
|
||||
@@ -246,7 +258,6 @@ ManifestHandler::installPackage(
|
||||
return true;
|
||||
}
|
||||
string current_installation_file = packages_dir + "/" + package_name + "/" + package_name;
|
||||
auto orchestration_tools = Singleton::Consume<I_OrchestrationTools>::by<ManifestHandler>();
|
||||
bool is_clean_installation = !orchestration_tools->doesFileExist(current_installation_file);
|
||||
|
||||
|
||||
@@ -368,3 +379,13 @@ ManifestHandler::selfUpdate(
|
||||
package_handler->preInstallPackage(orch_service_name, current_installation_file) &&
|
||||
package_handler->installPackage(orch_service_name, current_installation_file, false);
|
||||
}
|
||||
|
||||
string
|
||||
ManifestHandler::getCurrentTimestamp()
|
||||
{
|
||||
time_t now = time(nullptr);
|
||||
tm* now_tm = localtime(&now);
|
||||
char timestamp[20];
|
||||
strftime(timestamp, sizeof(timestamp), "%Y-%m-%d %H:%M:%S", now_tm);
|
||||
return string(timestamp);
|
||||
}
|
||||
|
||||
@@ -429,7 +429,7 @@ public:
|
||||
status.insertServiceSetting(service_name, path);
|
||||
return;
|
||||
case OrchestrationStatusConfigType::MANIFEST:
|
||||
dbgAssert(false)
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "sesrvice configuration")
|
||||
<< "Manifest is not a service configuration file type";
|
||||
break;
|
||||
@@ -438,7 +438,9 @@ public:
|
||||
case OrchestrationStatusConfigType::COUNT:
|
||||
break;
|
||||
}
|
||||
dbgAssert(false) << AlertInfo(AlertTeam::CORE, "sesrvice configuration") << "Unknown configuration file type";
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "service configuration")
|
||||
<< "Unknown configuration file type";
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -55,6 +55,8 @@ USE_DEBUG_FLAG(D_ORCHESTRATOR);
|
||||
static string fw_last_update_time = "";
|
||||
#endif // gaia || smb
|
||||
|
||||
static const size_t MAX_SERVER_NAME_LENGTH = 253;
|
||||
|
||||
class SetAgentUninstall
|
||||
:
|
||||
public ServerRest,
|
||||
@@ -103,6 +105,19 @@ public:
|
||||
<< "Initializing Orchestration component, file system path prefix: "
|
||||
<< filesystem_prefix;
|
||||
|
||||
int check_upgrade_success_interval = getSettingWithDefault<uint>(10, "successUpgradeInterval");
|
||||
Singleton::Consume<I_MainLoop>::by<OrchestrationComp>()->addOneTimeRoutine(
|
||||
I_MainLoop::RoutineType::Timer,
|
||||
[this, check_upgrade_success_interval]()
|
||||
{
|
||||
Singleton::Consume<I_MainLoop>::by<OrchestrationComp>()->yield(
|
||||
std::chrono::minutes(check_upgrade_success_interval)
|
||||
);
|
||||
processUpgradeCompletion();
|
||||
},
|
||||
"Orchestration successfully updated (One-Time After Interval)",
|
||||
true
|
||||
);
|
||||
auto orch_policy = loadDefaultOrchestrationPolicy();
|
||||
if (!orch_policy.ok()) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to load Orchestration Policy. Error: " << orch_policy.getErr();
|
||||
@@ -141,6 +156,113 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
saveLastKnownOrchInfo(string curr_agent_version)
|
||||
{
|
||||
static const string upgrades_dir = filesystem_prefix + "/revert";
|
||||
static const string last_known_orchestrator = upgrades_dir + "/last_known_working_orchestrator";
|
||||
static const string current_orchestration_package =
|
||||
filesystem_prefix + "/packages/orchestration/orchestration";
|
||||
static const string last_known_manifest = upgrades_dir + "/last_known_manifest";
|
||||
static const string current_manifest_file = getConfigurationWithDefault<string>(
|
||||
filesystem_prefix + "/conf/manifest.json",
|
||||
"orchestration",
|
||||
"Manifest file path"
|
||||
);
|
||||
|
||||
if (!i_orchestration_tools->copyFile(current_orchestration_package, last_known_orchestrator)) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to copy the orchestration package to " << upgrades_dir;
|
||||
} else {
|
||||
dbgInfo(D_ORCHESTRATOR) << "last known orchestrator version updated to: " << curr_agent_version;
|
||||
}
|
||||
|
||||
if (!i_orchestration_tools->copyFile(current_manifest_file, last_known_manifest)) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to copy " << current_manifest_file << " to " << upgrades_dir;
|
||||
} else {
|
||||
dbgInfo(D_ORCHESTRATOR) << "last known manifest updated";
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
processUpgradeCompletion()
|
||||
{
|
||||
if (!is_first_check_update_success) {
|
||||
int check_upgrade_success_interval = getSettingWithDefault<uint>(10, "successUpgradeInterval");
|
||||
// LCOV_EXCL_START
|
||||
Singleton::Consume<I_MainLoop>::by<OrchestrationComp>()->addOneTimeRoutine(
|
||||
I_MainLoop::RoutineType::Timer,
|
||||
[this, check_upgrade_success_interval]()
|
||||
{
|
||||
Singleton::Consume<I_MainLoop>::by<OrchestrationComp>()->yield(
|
||||
std::chrono::minutes(check_upgrade_success_interval)
|
||||
);
|
||||
processUpgradeCompletion();
|
||||
},
|
||||
"Orchestration successfully updated",
|
||||
true
|
||||
);
|
||||
// LCOV_EXCL_STOP
|
||||
return;
|
||||
}
|
||||
|
||||
static const string upgrades_dir = filesystem_prefix + "/revert";
|
||||
static const string upgrade_status = upgrades_dir + "/upgrade_status";
|
||||
static const string last_known_orchestrator = upgrades_dir + "/last_known_working_orchestrator";
|
||||
static const string upgrade_failure_info_path = upgrades_dir + "/failed_upgrade_info";
|
||||
|
||||
I_DetailsResolver *i_details_resolver = Singleton::Consume<I_DetailsResolver>::by<OrchestrationComp>();
|
||||
|
||||
bool is_upgrade_status_exist = i_orchestration_tools->doesFileExist(upgrade_status);
|
||||
bool is_last_known_orchestrator_exist = i_orchestration_tools->doesFileExist(last_known_orchestrator);
|
||||
|
||||
if (!is_upgrade_status_exist) {
|
||||
if (!is_last_known_orchestrator_exist) {
|
||||
saveLastKnownOrchInfo(i_details_resolver->getAgentVersion());
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
auto maybe_upgrade_data = i_orchestration_tools->readFile(upgrade_status);
|
||||
string upgrade_data, from_version, to_version;
|
||||
if (maybe_upgrade_data.ok()) {
|
||||
upgrade_data = maybe_upgrade_data.unpack();
|
||||
istringstream stream(upgrade_data);
|
||||
stream >> from_version >> to_version;
|
||||
}
|
||||
i_orchestration_tools->removeFile(upgrade_status);
|
||||
|
||||
if (i_orchestration_tools->doesFileExist(upgrade_failure_info_path)) {
|
||||
string info = "Orchestration revert. ";
|
||||
auto failure_info = i_orchestration_tools->readFile(upgrade_failure_info_path);
|
||||
if (failure_info.ok()) info.append(failure_info.unpack());
|
||||
LogGen(
|
||||
info,
|
||||
ReportIS::Level::ACTION,
|
||||
ReportIS::Audience::INTERNAL,
|
||||
ReportIS::Severity::CRITICAL,
|
||||
ReportIS::Priority::URGENT,
|
||||
ReportIS::Tags::ORCHESTRATOR
|
||||
);
|
||||
dbgError(D_ORCHESTRATOR) <<
|
||||
"Error in orchestration version: " << to_version <<
|
||||
". Orchestration reverted to version: " << i_details_resolver->getAgentVersion();
|
||||
i_orchestration_tools->removeFile(upgrade_failure_info_path);
|
||||
return;
|
||||
}
|
||||
|
||||
saveLastKnownOrchInfo(i_details_resolver->getAgentVersion());
|
||||
i_orchestration_tools->writeFile(
|
||||
upgrade_data + "\n",
|
||||
getLogFilesPathConfig() + "/nano_agent/prev_upgrades",
|
||||
true
|
||||
);
|
||||
dbgWarning(D_ORCHESTRATOR) <<
|
||||
"Upgrade process from version: " << from_version <<
|
||||
" to version: " << to_version <<
|
||||
" completed successfully";
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
registerToTheFog()
|
||||
{
|
||||
@@ -1022,6 +1144,7 @@ private:
|
||||
UpdatesProcessResult::SUCCESS,
|
||||
UpdatesConfigType::GENERAL
|
||||
).notify();
|
||||
if (!is_first_check_update_success) is_first_check_update_success = true;
|
||||
return Maybe<void>();
|
||||
}
|
||||
|
||||
@@ -1342,14 +1465,18 @@ private:
|
||||
|
||||
auto nginx_data = i_details_resolver->parseNginxMetadata();
|
||||
if (nginx_data.ok()) {
|
||||
string nginx_signature;
|
||||
string nginx_version;
|
||||
string config_opt;
|
||||
string cc_opt;
|
||||
tie(config_opt, cc_opt, nginx_version) = nginx_data.unpack();
|
||||
tie(config_opt, cc_opt, nginx_version, nginx_signature) = nginx_data.unpack();
|
||||
agent_data_report
|
||||
<< make_pair("nginxVersion", nginx_version)
|
||||
<< make_pair("configureOpt", config_opt)
|
||||
<< make_pair("extraCompilerOpt", cc_opt);
|
||||
<< make_pair("configureOptStatus", "Enabled")
|
||||
<< make_pair("moduleSignatureStatus", "Enabled")
|
||||
<< make_pair("nginxSignature", nginx_signature)
|
||||
<< make_pair("nginxVersion", nginx_version)
|
||||
<< make_pair("configureOpt", config_opt)
|
||||
<< make_pair("extraCompilerOpt", cc_opt);
|
||||
} else {
|
||||
dbgDebug(D_ORCHESTRATOR) << nginx_data.getErr();
|
||||
}
|
||||
@@ -1370,6 +1497,10 @@ private:
|
||||
agent_data_report << AgentReportFieldWithLabel("isKernelVersion3OrHigher", "true");
|
||||
}
|
||||
|
||||
if (i_details_resolver->isGw()) {
|
||||
agent_data_report << AgentReportFieldWithLabel("isGw", "true");
|
||||
}
|
||||
|
||||
if (i_details_resolver->isGwNotVsx()) {
|
||||
agent_data_report << AgentReportFieldWithLabel("isGwNotVsx", "true");
|
||||
}
|
||||
@@ -1389,6 +1520,14 @@ private:
|
||||
|
||||
agent_data_report << AgentReportFieldWithLabel("userEdition", FogCommunication::getUserEdition());
|
||||
|
||||
agent_data_report << make_pair("registeredServer", i_agent_details->getRegisteredServer());
|
||||
|
||||
const char *prometheus_env = getenv("PROMETHEUS");
|
||||
if (prometheus_env != nullptr) {
|
||||
auto enable_prometheus = string(prometheus_env) == "true";
|
||||
agent_data_report << AgentReportFieldWithLabel("enablePrometheus", enable_prometheus ? "true" : "false");
|
||||
}
|
||||
|
||||
#if defined(gaia) || defined(smb)
|
||||
if (i_details_resolver->compareCheckpointVersion(8100, greater_equal<int>())) {
|
||||
agent_data_report << AgentReportFieldWithLabel("isCheckpointVersionGER81", "true");
|
||||
@@ -1549,6 +1688,11 @@ private:
|
||||
<< LogField("agentType", "Orchestration")
|
||||
<< LogField("agentVersion", Version::get());
|
||||
|
||||
string registered_server = getAttribute("registered-server", "registered_server");
|
||||
dbgTrace(D_ORCHESTRATOR) << "Registered server: " << registered_server;
|
||||
if (!registered_server.empty()) {
|
||||
i_agent_details->setRegisteredServer(registered_server.substr(0, MAX_SERVER_NAME_LENGTH));
|
||||
}
|
||||
auto mainloop = Singleton::Consume<I_MainLoop>::by<OrchestrationComp>();
|
||||
mainloop->addOneTimeRoutine(
|
||||
I_MainLoop::RoutineType::Offline,
|
||||
@@ -1587,6 +1731,7 @@ private:
|
||||
}
|
||||
|
||||
setDelayedUpgradeTime();
|
||||
|
||||
while (true) {
|
||||
Singleton::Consume<I_Environment>::by<OrchestrationComp>()->startNewTrace(false);
|
||||
if (shouldReportAgentDetailsMetadata()) {
|
||||
@@ -1628,9 +1773,9 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
string server_name = getAttribute("registered-server", "registered_server");
|
||||
string server_name = Singleton::Consume<I_AgentDetails>::by<OrchestrationComp>()->getRegisteredServer();
|
||||
auto server = TagAndEnumManagement::convertStringToTag(server_name);
|
||||
if (server_name == "'SWAG'") server = Tags::WEB_SERVER_SWAG;
|
||||
if (server_name == "'SWAG'" || server_name == "'SWAG Server'") server = Tags::WEB_SERVER_SWAG;
|
||||
if (server.ok()) tags.insert(*server);
|
||||
|
||||
if (getAttribute("no-setting", "CROWDSEC_ENABLED") == "true") tags.insert(Tags::CROWDSEC);
|
||||
@@ -1652,7 +1797,7 @@ private:
|
||||
tags
|
||||
);
|
||||
|
||||
if (server_name != "") registration_report.addToOrigin(LogField("eventCategory", server_name));
|
||||
registration_report.addToOrigin(LogField("eventCategory", server_name));
|
||||
|
||||
auto email = getAttribute("email-address", "user_email");
|
||||
if (email != "") registration_report << LogField("userDefinedId", email);
|
||||
@@ -1695,13 +1840,19 @@ private:
|
||||
auto backup_installation_file = current_installation_file + backup_ext;
|
||||
auto temp_ext = getConfigurationWithDefault<string>("_temp", "orchestration", "Temp file extension");
|
||||
|
||||
dbgAssert(i_orchestration_tools->doesFileExist(backup_installation_file))
|
||||
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
||||
<< "There is no backup installation package";
|
||||
if (!i_orchestration_tools->doesFileExist(backup_installation_file)) {
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
||||
<< "There is no backup installation package";
|
||||
return;
|
||||
}
|
||||
|
||||
dbgAssert(i_orchestration_tools->copyFile(backup_installation_file, current_installation_file))
|
||||
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
||||
<< "Failed to copy backup installation package";
|
||||
if (!i_orchestration_tools->copyFile(backup_installation_file, current_installation_file)) {
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
||||
<< "Failed to copy backup installation package";
|
||||
return;
|
||||
}
|
||||
|
||||
// Copy the backup manifest file to the default manifest file path.
|
||||
auto manifest_file_path = getConfigurationWithDefault<string>(
|
||||
@@ -1716,12 +1867,18 @@ private:
|
||||
|
||||
auto package_handler = Singleton::Consume<I_PackageHandler>::by<OrchestrationComp>();
|
||||
// Install the backup orchestration service installation package.
|
||||
dbgAssert(package_handler->preInstallPackage(service_name, current_installation_file))
|
||||
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
||||
<< "Failed to restore from backup, pre install test failed";
|
||||
dbgAssert(package_handler->installPackage(service_name, current_installation_file, true))
|
||||
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
||||
<< "Failed to restore from backup, installation failed";
|
||||
if (!package_handler->preInstallPackage(service_name, current_installation_file)) {
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
||||
<< "Failed to restore from backup, pre install test failed";
|
||||
return;
|
||||
}
|
||||
if (!package_handler->installPackage(service_name, current_installation_file, true)) {
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
||||
<< "Failed to restore from backup, installation failed";
|
||||
return;
|
||||
}
|
||||
}
|
||||
// LCOV_EXCL_STOP
|
||||
|
||||
@@ -2052,10 +2209,10 @@ private:
|
||||
int failure_count = 0;
|
||||
unsigned int sleep_interval = 0;
|
||||
bool is_new_success = false;
|
||||
bool is_first_check_update_success = false;
|
||||
OrchestrationPolicy policy;
|
||||
UpdatesProcessReporter updates_process_reporter_listener;
|
||||
HybridModeMetric hybrid_mode_metric;
|
||||
EnvDetails env_details;
|
||||
chrono::minutes upgrade_delay_time;
|
||||
|
||||
string filesystem_prefix = "";
|
||||
@@ -2118,6 +2275,7 @@ OrchestrationComp::preload()
|
||||
registerExpectedSetting<vector<string>>("upgradeDay");
|
||||
registerExpectedSetting<string>("email-address");
|
||||
registerExpectedSetting<string>("registered-server");
|
||||
registerExpectedSetting<uint>("successUpgradeInterval");
|
||||
registerExpectedConfigFile("orchestration", Config::ConfigFileType::Policy);
|
||||
registerExpectedConfigFile("registration-data", Config::ConfigFileType::Policy);
|
||||
}
|
||||
}
|
||||
@@ -150,7 +150,8 @@ getNamespaceDataFromCluster()
|
||||
string auth_header = "Authorization: Bearer " + token;
|
||||
string connection_header = "Connection: close";
|
||||
string host = "https://kubernetes.default.svc:443/api/v1/namespaces/";
|
||||
string culr_cmd = "curl -s -k -H \"" + auth_header + "\" -H \"" + connection_header + "\" " + host +
|
||||
string culr_cmd =
|
||||
"LD_LIBRARY_PATH=\"\" curl -s -k -H \"" + auth_header + "\" -H \"" + connection_header + "\" " + host +
|
||||
" | /etc/cp/bin/cpnano_json";
|
||||
|
||||
auto output_res = Singleton::Consume<I_ShellCmd>::by<OrchestrationTools>()->getExecOutput(culr_cmd);
|
||||
@@ -386,7 +387,7 @@ OrchestrationTools::Impl::calculateChecksum(Package::ChecksumTypes checksum_type
|
||||
return genError("Error while reading file " + path + ", " + e.what());
|
||||
}
|
||||
|
||||
dbgAssert(false)
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "service configuration")
|
||||
<< "Checksum type is not supported. Checksum type: "
|
||||
<< static_cast<unsigned int>(checksum_type);
|
||||
|
||||
@@ -86,7 +86,7 @@ TEST_F(OrchestrationToolsTest, setClusterId)
|
||||
EXPECT_CALL(
|
||||
mock_shell_cmd,
|
||||
getExecOutput(
|
||||
"curl -s -k -H \"Authorization: Bearer 123\" -H \"Connection: close\" "
|
||||
"LD_LIBRARY_PATH=\"\" curl -s -k -H \"Authorization: Bearer 123\" -H \"Connection: close\" "
|
||||
"https://kubernetes.default.svc:443/api/v1/namespaces/ | /etc/cp/bin/cpnano_json",
|
||||
200,
|
||||
false
|
||||
|
||||
@@ -89,6 +89,11 @@ public:
|
||||
|
||||
EXPECT_CALL(mock_service_controller, isServiceInstalled("Access Control")).WillRepeatedly(Return(false));
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(_, _, "Orchestration successfully updated (One-Time After Interval)", true)
|
||||
).WillOnce(DoAll(SaveArg<1>(&upgrade_routine), Return(0)));
|
||||
|
||||
// This Holding the Main Routine of the Orchestration.
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
@@ -135,11 +140,12 @@ public:
|
||||
void
|
||||
expectDetailsResolver()
|
||||
{
|
||||
Maybe<tuple<string, string, string>> no_nginx(genError("No nginx"));
|
||||
Maybe<tuple<string, string, string, string>> no_nginx(genError("No nginx"));
|
||||
EXPECT_CALL(mock_details_resolver, getPlatform()).WillRepeatedly(Return(string("linux")));
|
||||
EXPECT_CALL(mock_details_resolver, getArch()).WillRepeatedly(Return(string("x86_64")));
|
||||
EXPECT_CALL(mock_details_resolver, isReverseProxy()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, isKernelVersion3OrHigher()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, isGw()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, isGwNotVsx()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, isVersionAboveR8110()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, parseNginxMetadata()).WillRepeatedly(Return(no_nginx));
|
||||
@@ -156,6 +162,7 @@ public:
|
||||
runRoutine()
|
||||
{
|
||||
routine();
|
||||
upgrade_routine();
|
||||
}
|
||||
|
||||
void
|
||||
@@ -235,6 +242,7 @@ private:
|
||||
}
|
||||
|
||||
I_MainLoop::Routine routine;
|
||||
I_MainLoop::Routine upgrade_routine;
|
||||
I_MainLoop::Routine status_routine;
|
||||
};
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ std::ostream & operator<<(std::ostream &os, const Package &) { return os; }
|
||||
#include "health_check_status/health_check_status.h"
|
||||
#include "updates_process_event.h"
|
||||
#include "declarative_policy_utils.h"
|
||||
#include "mock/mock_env_details.h"
|
||||
|
||||
using namespace testing;
|
||||
using namespace std;
|
||||
@@ -82,6 +83,12 @@ public:
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path)).WillOnce(Return(response));
|
||||
EXPECT_CALL(mock_status, setFogAddress(host_url)).WillRepeatedly(Return());
|
||||
EXPECT_CALL(mock_orchestration_tools, setClusterId());
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(_, _, "Orchestration successfully updated (One-Time After Interval)", true)
|
||||
).WillOnce(DoAll(SaveArg<1>(&upgrade_routine), Return(0)));
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::System, _, "Orchestration runner", true)
|
||||
@@ -161,12 +168,13 @@ public:
|
||||
void
|
||||
expectDetailsResolver()
|
||||
{
|
||||
Maybe<tuple<string, string, string>> no_nginx(genError("No nginx"));
|
||||
Maybe<tuple<string, string, string, string>> no_nginx(genError("No nginx"));
|
||||
EXPECT_CALL(mock_details_resolver, getPlatform()).WillRepeatedly(Return(string("linux")));
|
||||
EXPECT_CALL(mock_details_resolver, getArch()).WillRepeatedly(Return(string("x86_64")));
|
||||
EXPECT_CALL(mock_details_resolver, isReverseProxy()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, isCloudStorageEnabled()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, isKernelVersion3OrHigher()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, isGw()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, isGwNotVsx()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, isVersionAboveR8110()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, parseNginxMetadata()).WillRepeatedly(Return(no_nginx));
|
||||
@@ -280,6 +288,12 @@ public:
|
||||
status_routine();
|
||||
}
|
||||
|
||||
void
|
||||
runUpgradeRoutine()
|
||||
{
|
||||
upgrade_routine();
|
||||
}
|
||||
|
||||
void
|
||||
preload()
|
||||
{
|
||||
@@ -324,6 +338,7 @@ public:
|
||||
StrictMock<MockOrchestrationTools> mock_orchestration_tools;
|
||||
StrictMock<MockDownloader> mock_downloader;
|
||||
StrictMock<MockShellCmd> mock_shell_cmd;
|
||||
StrictMock<EnvDetailsMocker> mock_env_details;
|
||||
StrictMock<MockMessaging> mock_message;
|
||||
StrictMock<MockRestApi> rest;
|
||||
StrictMock<MockServiceController> mock_service_controller;
|
||||
@@ -357,6 +372,7 @@ private:
|
||||
|
||||
I_MainLoop::Routine routine;
|
||||
I_MainLoop::Routine status_routine;
|
||||
I_MainLoop::Routine upgrade_routine;
|
||||
};
|
||||
|
||||
|
||||
@@ -583,6 +599,8 @@ TEST_F(OrchestrationTest, check_sending_registration_data)
|
||||
env.init();
|
||||
init();
|
||||
|
||||
EXPECT_CALL(mock_env_details, getEnvType()).WillRepeatedly(Return(EnvType::LINUX));
|
||||
|
||||
EXPECT_CALL(mock_service_controller, updateServiceConfiguration(_, _, _, _, _, _))
|
||||
.WillOnce(Return(Maybe<void>()));
|
||||
EXPECT_CALL(mock_orchestration_tools, calculateChecksum(_, _)).WillRepeatedly(Return(string()));
|
||||
@@ -597,14 +615,6 @@ TEST_F(OrchestrationTest, check_sending_registration_data)
|
||||
|
||||
string version = "1";
|
||||
EXPECT_CALL(mock_service_controller, getUpdatePolicyVersion()).WillOnce(ReturnRef(version));
|
||||
|
||||
EXPECT_CALL(mock_ml, yield(A<chrono::microseconds>()))
|
||||
.WillOnce(Return())
|
||||
.WillOnce(Invoke([] (chrono::microseconds) { throw invalid_argument("stop while loop"); }));
|
||||
try {
|
||||
runRoutine();
|
||||
} catch (const invalid_argument& e) {}
|
||||
|
||||
string config_json =
|
||||
"{\n"
|
||||
" \"email-address\": \"fake@example.com\",\n"
|
||||
@@ -613,9 +623,19 @@ TEST_F(OrchestrationTest, check_sending_registration_data)
|
||||
|
||||
istringstream ss(config_json);
|
||||
Singleton::Consume<Config::I_Config>::from(config_comp)->loadConfiguration(ss);
|
||||
EXPECT_CALL(mock_ml, yield(A<chrono::microseconds>()))
|
||||
.WillOnce(Return())
|
||||
.WillOnce(Invoke([] (chrono::microseconds) { throw invalid_argument("stop while loop"); }));
|
||||
try {
|
||||
runRoutine();
|
||||
} catch (const invalid_argument& e) {}
|
||||
|
||||
|
||||
sending_routine();
|
||||
|
||||
EXPECT_THAT(message_body, HasSubstr("\"userDefinedId\": \"fake@example.com\""));
|
||||
EXPECT_THAT(message_body, HasSubstr("\"eventCategory\""));
|
||||
|
||||
EXPECT_THAT(message_body, AnyOf(HasSubstr("\"Embedded Deployment\""), HasSubstr("\"Kubernetes Deployment\"")));
|
||||
EXPECT_THAT(message_body, HasSubstr("\"NGINX Server\""));
|
||||
}
|
||||
@@ -1000,6 +1020,11 @@ TEST_F(OrchestrationTest, loadOrchestrationPolicyFromBackup)
|
||||
);
|
||||
waitForRestCall();
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(_, _, "Orchestration successfully updated (One-Time After Interval)", true)
|
||||
);
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::System, _, "Orchestration runner", true)
|
||||
@@ -1166,6 +1191,29 @@ TEST_F(OrchestrationTest, manifestUpdate)
|
||||
try {
|
||||
runRoutine();
|
||||
} catch (const invalid_argument& e) {}
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/last_known_working_orchestrator"))
|
||||
.WillOnce(Return(true));
|
||||
|
||||
Maybe<string> upgrade_status(string("1.1.1 1.1.2 2025-01-28 07:53:23"));
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile("/etc/cp/revert/upgrade_status"))
|
||||
.WillOnce(Return(upgrade_status));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile("/etc/cp/revert/upgrade_status")).WillOnce(Return(true));
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/failed_upgrade_info"))
|
||||
.WillOnce(Return(false));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillRepeatedly(Return("1.1.2"));
|
||||
EXPECT_CALL(mock_orchestration_tools, copyFile(_, "/etc/cp/revert/last_known_working_orchestrator"))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, copyFile(_, "/etc/cp/revert/last_known_manifest")).WillOnce(Return(true));
|
||||
EXPECT_CALL(
|
||||
mock_orchestration_tools,
|
||||
writeFile("1.1.1 1.1.2 2025-01-28 07:53:23\n", "/var/log/nano_agent/prev_upgrades", true)
|
||||
).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_ml, yield(A<chrono::microseconds>())).WillOnce(Return());
|
||||
runUpgradeRoutine();
|
||||
}
|
||||
|
||||
TEST_F(OrchestrationTest, getBadPolicyUpdate)
|
||||
|
||||
@@ -141,11 +141,11 @@ packageHandlerActionsToString(PackageHandlerActions action)
|
||||
}
|
||||
}
|
||||
|
||||
dbgAssert(false)
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "service configuration")
|
||||
<< "Package handler action is not supported. Action: "
|
||||
<< static_cast<unsigned int>(action);
|
||||
return string();
|
||||
return string("--UNSUPPORTED");
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -208,6 +208,8 @@ ServiceDetails::sendNewConfigurations(int configuration_id, const string &policy
|
||||
MessageMetadata new_config_req_md("127.0.0.1", service_port);
|
||||
new_config_req_md.setConnectioFlag(MessageConnectionConfig::ONE_TIME_CONN);
|
||||
new_config_req_md.setConnectioFlag(MessageConnectionConfig::UNSECURE_CONN);
|
||||
new_config_req_md.setSuspension(false);
|
||||
new_config_req_md.setShouldSendAccessToken(false);
|
||||
auto res = messaging->sendSyncMessage(
|
||||
HTTPMethod::POST,
|
||||
"/set-new-configuration",
|
||||
|
||||
@@ -139,6 +139,25 @@ FogAuthenticator::RegistrationData::serialize(JSONOutputArchive &out_ar) const
|
||||
);
|
||||
}
|
||||
|
||||
static string
|
||||
getDeplymentType()
|
||||
{
|
||||
auto deplyment_type = Singleton::Consume<I_EnvDetails>::by<FogAuthenticator>()->getEnvType();
|
||||
switch (deplyment_type) {
|
||||
case EnvType::LINUX: return "Embedded";
|
||||
case EnvType::DOCKER: return "Docker";
|
||||
case EnvType::NON_CRD_K8S:
|
||||
case EnvType::K8S: return "K8S";
|
||||
case EnvType::COUNT: break;
|
||||
}
|
||||
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "fog communication")
|
||||
<< "Failed to get a legitimate deployment type: "
|
||||
<< static_cast<uint>(deplyment_type);
|
||||
return "Embedded";
|
||||
}
|
||||
|
||||
Maybe<FogAuthenticator::UserCredentials>
|
||||
FogAuthenticator::registerAgent(
|
||||
const FogAuthenticator::RegistrationData ®_data,
|
||||
@@ -168,10 +187,12 @@ FogAuthenticator::registerAgent(
|
||||
auto nginx_data = details_resolver->parseNginxMetadata();
|
||||
|
||||
if (nginx_data.ok()) {
|
||||
string nginx_signature;
|
||||
string nginx_version;
|
||||
string config_opt;
|
||||
string cc_opt;
|
||||
tie(config_opt, cc_opt, nginx_version) = nginx_data.unpack();
|
||||
tie(config_opt, cc_opt, nginx_version, nginx_signature) = nginx_data.unpack();
|
||||
request << make_pair("nginxSignature", nginx_signature);
|
||||
request << make_pair("nginxVersion", nginx_version);
|
||||
request << make_pair("configureOpt", config_opt);
|
||||
request << make_pair("extraCompilerOpt", cc_opt);
|
||||
@@ -206,6 +227,18 @@ FogAuthenticator::registerAgent(
|
||||
|
||||
request << make_pair("userEdition", getUserEdition());
|
||||
|
||||
const char *prometheus_env = getenv("PROMETHEUS");
|
||||
if (prometheus_env != nullptr) {
|
||||
request << make_pair("enablePrometheus", string(prometheus_env) == "true" ? "true" : "false");
|
||||
}
|
||||
|
||||
if (getDeplymentType() == "Docker" || getDeplymentType() == "K8S") {
|
||||
const char *image_version_otp = getenv("IMAGE_VERSION");
|
||||
if (image_version_otp) {
|
||||
request << make_pair("imageVersion", image_version_otp);
|
||||
}
|
||||
}
|
||||
|
||||
if (details_resolver->isReverseProxy()) {
|
||||
request << make_pair("reverse_proxy", "true");
|
||||
}
|
||||
@@ -218,6 +251,10 @@ FogAuthenticator::registerAgent(
|
||||
request << make_pair("isKernelVersion3OrHigher", "true");
|
||||
}
|
||||
|
||||
if (details_resolver->isGw()) {
|
||||
request << make_pair("isGw", "true");
|
||||
}
|
||||
|
||||
if (details_resolver->isGwNotVsx()) {
|
||||
request << make_pair("isGwNotVsx", "true");
|
||||
}
|
||||
@@ -281,11 +318,14 @@ FogAuthenticator::getAccessToken(const UserCredentials &user_credentials) const
|
||||
static const string grant_type_string = "/oauth/token?grant_type=client_credentials";
|
||||
TokenRequest request = TokenRequest();
|
||||
|
||||
MessageMetadata request_token_md;
|
||||
MessageMetadata request_token_md(true);
|
||||
request_token_md.insertHeader(
|
||||
"Authorization",
|
||||
buildBasicAuthHeader(user_credentials.getClientId(), user_credentials.getSharedSecret())
|
||||
);
|
||||
dbgInfo(D_ORCHESTRATOR)
|
||||
<< "Sending request for access token. Trace: "
|
||||
<< (request_token_md.getTraceId().ok() ? request_token_md.getTraceId().unpack() : "No trace id");
|
||||
auto request_token_status = Singleton::Consume<I_Messaging>::by<FogAuthenticator>()->sendSyncMessage(
|
||||
HTTPMethod::POST,
|
||||
grant_type_string,
|
||||
@@ -377,9 +417,13 @@ FogAuthenticator::registerLocalAgentToFog()
|
||||
{
|
||||
auto local_reg_token = getRegistrationToken();
|
||||
if (!local_reg_token.ok()) return;
|
||||
|
||||
string reg_token = local_reg_token.unpack().getData();
|
||||
if (reg_token.empty()) return;
|
||||
|
||||
dbgInfo(D_ORCHESTRATOR) << "Start local agent registration to the fog";
|
||||
|
||||
string exec_command = "open-appsec-ctl --set-mode --online_mode --token " + local_reg_token.unpack().getData();
|
||||
string exec_command = "open-appsec-ctl --set-mode --online_mode --token " + reg_token;
|
||||
|
||||
auto i_agent_details = Singleton::Consume<I_AgentDetails>::by<FogAuthenticator>();
|
||||
auto fog_address = i_agent_details->getFogDomain();
|
||||
@@ -455,25 +499,6 @@ FogAuthenticator::getCredentialsFromFile() const
|
||||
return orchestration_tools->jsonStringToObject<UserCredentials>(encrypted_cred.unpack());
|
||||
}
|
||||
|
||||
static string
|
||||
getDeplymentType()
|
||||
{
|
||||
auto deplyment_type = Singleton::Consume<I_EnvDetails>::by<FogAuthenticator>()->getEnvType();
|
||||
switch (deplyment_type) {
|
||||
case EnvType::LINUX: return "Embedded";
|
||||
case EnvType::DOCKER: return "Docker";
|
||||
case EnvType::NON_CRD_K8S:
|
||||
case EnvType::K8S: return "K8S";
|
||||
case EnvType::COUNT: break;
|
||||
}
|
||||
|
||||
dbgAssert(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "fog communication")
|
||||
<< "Failed to get a legitimate deplyment type: "
|
||||
<< static_cast<uint>(deplyment_type);
|
||||
return "Embedded";
|
||||
}
|
||||
|
||||
Maybe<FogAuthenticator::UserCredentials>
|
||||
FogAuthenticator::getCredentials()
|
||||
{
|
||||
|
||||
2
components/security_apps/prometheus/CMakeLists.txt
Executable file
2
components/security_apps/prometheus/CMakeLists.txt
Executable file
@@ -0,0 +1,2 @@
|
||||
add_library(prometheus_comp prometheus_comp.cc)
|
||||
add_subdirectory(prometheus_ut)
|
||||
200
components/security_apps/prometheus/prometheus_comp.cc
Executable file
200
components/security_apps/prometheus/prometheus_comp.cc
Executable file
@@ -0,0 +1,200 @@
|
||||
#include "prometheus_comp.h"
|
||||
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <cereal/archives/json.hpp>
|
||||
#include <cereal/types/map.hpp>
|
||||
#include <cereal/types/vector.hpp>
|
||||
#include <cereal/types/string.hpp>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
||||
#include "common.h"
|
||||
#include "report/base_field.h"
|
||||
#include "report/report_enums.h"
|
||||
#include "log_generator.h"
|
||||
#include "debug.h"
|
||||
#include "rest.h"
|
||||
#include "customized_cereal_map.h"
|
||||
#include "i_messaging.h"
|
||||
#include "prometheus_metric_names.h"
|
||||
|
||||
USE_DEBUG_FLAG(D_PROMETHEUS);
|
||||
|
||||
using namespace std;
|
||||
using namespace ReportIS;
|
||||
|
||||
struct ServiceData
|
||||
{
|
||||
template <typename Archive>
|
||||
void
|
||||
serialize(Archive &ar)
|
||||
{
|
||||
ar(cereal::make_nvp("Service port", service_port));
|
||||
}
|
||||
|
||||
int service_port;
|
||||
};
|
||||
|
||||
class PrometheusMetricData
|
||||
{
|
||||
public:
|
||||
PrometheusMetricData(const string &n, const string &t, const string &d) : name(n), type(t), description(d) {}
|
||||
|
||||
void
|
||||
addElement(const string &labels, const string &value)
|
||||
{
|
||||
metric_labels_to_values[labels] = value;
|
||||
}
|
||||
|
||||
ostream &
|
||||
print(ostream &os)
|
||||
{
|
||||
if (metric_labels_to_values.empty()) return os;
|
||||
|
||||
string representative_name = "";
|
||||
if (!name.empty()) {
|
||||
auto metric_name = convertMetricName(name);
|
||||
!metric_name.empty() ? representative_name = metric_name : representative_name = name;
|
||||
}
|
||||
|
||||
if (!description.empty()) os << "# HELP " << representative_name << ' ' << description << '\n';
|
||||
if (!name.empty()) os << "# TYPE " << representative_name << ' ' << type << '\n';
|
||||
for (auto &entry : metric_labels_to_values) {
|
||||
os << representative_name << entry.first << ' ' << entry.second << '\n';
|
||||
}
|
||||
os << '\n';
|
||||
metric_labels_to_values.clear();
|
||||
|
||||
return os;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
string name;
|
||||
string type;
|
||||
string description;
|
||||
map<string, string> metric_labels_to_values;
|
||||
};
|
||||
|
||||
static ostream & operator<<(ostream &os, PrometheusMetricData &metric) { return metric.print(os); }
|
||||
|
||||
class PrometheusComp::Impl
|
||||
{
|
||||
public:
|
||||
void
|
||||
init()
|
||||
{
|
||||
Singleton::Consume<I_RestApi>::by<PrometheusComp>()->addGetCall(
|
||||
"metrics",
|
||||
[&] () { return getFormatedPrometheusMetrics(); }
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
addMetrics(const vector<PrometheusData> &metrics)
|
||||
{
|
||||
for(auto &metric : metrics) {
|
||||
auto &metric_object = getDataObject(
|
||||
metric.name,
|
||||
metric.type,
|
||||
metric.description
|
||||
);
|
||||
metric_object.addElement(metric.label, metric.value);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
PrometheusMetricData &
|
||||
getDataObject(const string &name, const string &type, const string &description)
|
||||
{
|
||||
auto elem = prometheus_metrics.find(name);
|
||||
if (elem == prometheus_metrics.end()) {
|
||||
elem = prometheus_metrics.emplace(name, PrometheusMetricData(name, type, description)).first;
|
||||
}
|
||||
|
||||
return elem->second;
|
||||
}
|
||||
|
||||
map<string, ServiceData>
|
||||
getServiceDetails()
|
||||
{
|
||||
map<string, ServiceData> registeredServices;
|
||||
auto registered_services_file = getConfigurationWithDefault<string>(
|
||||
getFilesystemPathConfig() + "/conf/orchestrations_registered_services.json",
|
||||
"orchestration",
|
||||
"Orchestration registered services"
|
||||
);
|
||||
ifstream file(registered_services_file);
|
||||
if (!file.is_open()) {
|
||||
dbgWarning(D_PROMETHEUS) << "Failed to open file: " << registered_services_file;
|
||||
return registeredServices;
|
||||
}
|
||||
stringstream buffer;
|
||||
buffer << file.rdbuf();
|
||||
try {
|
||||
cereal::JSONInputArchive archive(buffer);
|
||||
archive(cereal::make_nvp("Registered Services", registeredServices));
|
||||
} catch (const exception& e) {
|
||||
dbgWarning(D_PROMETHEUS) << "Error parsing Registered Services JSON file: " << e.what();
|
||||
}
|
||||
|
||||
return registeredServices;
|
||||
}
|
||||
|
||||
void
|
||||
getServicesMetrics()
|
||||
{
|
||||
dbgTrace(D_PROMETHEUS) << "Get all registered services metrics";
|
||||
map<string, ServiceData> service_names_to_ports = getServiceDetails();
|
||||
for (const auto &service : service_names_to_ports) {
|
||||
I_Messaging *messaging = Singleton::Consume<I_Messaging>::by<PrometheusComp>();
|
||||
MessageMetadata servie_metric_req_md("127.0.0.1", service.second.service_port);
|
||||
servie_metric_req_md.setConnectioFlag(MessageConnectionConfig::ONE_TIME_CONN);
|
||||
servie_metric_req_md.setConnectioFlag(MessageConnectionConfig::UNSECURE_CONN);
|
||||
auto res = messaging->sendSyncMessage(
|
||||
HTTPMethod::GET,
|
||||
"/service-metrics",
|
||||
string(""),
|
||||
MessageCategory::GENERIC,
|
||||
servie_metric_req_md
|
||||
);
|
||||
if (!res.ok()) {
|
||||
dbgWarning(D_PROMETHEUS) << "Failed to get service metrics. Service: " << service.first;
|
||||
continue;
|
||||
}
|
||||
stringstream buffer;
|
||||
buffer << res.unpack().getBody();
|
||||
cereal::JSONInputArchive archive(buffer);
|
||||
vector<PrometheusData> metrics;
|
||||
archive(cereal::make_nvp("metrics", metrics));
|
||||
addMetrics(metrics);
|
||||
}
|
||||
}
|
||||
|
||||
string
|
||||
getFormatedPrometheusMetrics()
|
||||
{
|
||||
MetricScrapeEvent().notify();
|
||||
getServicesMetrics();
|
||||
stringstream result;
|
||||
for (auto &metric : prometheus_metrics) {
|
||||
result << metric.second;
|
||||
}
|
||||
dbgTrace(D_PROMETHEUS) << "Prometheus metrics: " << result.str();
|
||||
return result.str();
|
||||
}
|
||||
|
||||
map<string, PrometheusMetricData> prometheus_metrics;
|
||||
};
|
||||
|
||||
PrometheusComp::PrometheusComp() : Component("Prometheus"), pimpl(make_unique<Impl>()) {}
|
||||
|
||||
PrometheusComp::~PrometheusComp() {}
|
||||
|
||||
void
|
||||
PrometheusComp::init()
|
||||
{
|
||||
pimpl->init();
|
||||
}
|
||||
143
components/security_apps/prometheus/prometheus_metric_names.h
Executable file
143
components/security_apps/prometheus/prometheus_metric_names.h
Executable file
@@ -0,0 +1,143 @@
|
||||
#ifndef __PROMETHEUS_METRIC_NAMES_H__
|
||||
#define __PROMETHEUS_METRIC_NAMES_H__
|
||||
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "debug.h"
|
||||
|
||||
USE_DEBUG_FLAG(D_PROMETHEUS);
|
||||
|
||||
std::string
|
||||
convertMetricName(const std::string &original_metric_name)
|
||||
{
|
||||
static const std::unordered_map<std::string, std::string> original_to_representative_names = {
|
||||
// HybridModeMetric
|
||||
{"watchdogProcessStartupEventsSum", "nano_service_restarts_counter"},
|
||||
// nginxAttachmentMetric
|
||||
{"inspectVerdictSum", "traffic_inspection_verdict_inspect_counter"},
|
||||
{"acceptVeridctSum", "traffic_inspection_verdict_accept_counter"},
|
||||
{"dropVerdictSum", "traffic_inspection_verdict_drop_counter"},
|
||||
{"injectVerdictSum", "traffic_inspection_verdict_inject_counter"},
|
||||
{"irrelevantVerdictSum", "traffic_inspection_verdict_irrelevant_counter"},
|
||||
{"irrelevantVerdictSum", "traffic_inspection_verdict_irrelevant_counter"},
|
||||
{"reconfVerdictSum", "traffic_inspection_verdict_reconf_counter"},
|
||||
{"responseInspection", "response_body_inspection_counter"},
|
||||
// nginxIntakerMetric
|
||||
{"successfullInspectionTransactionsSum", "successful_Inspection_counter"},
|
||||
{"failopenTransactionsSum", "fail_open_Inspection_counter"},
|
||||
{"failcloseTransactionsSum", "fail_close_Inspection_counter"},
|
||||
{"transparentModeTransactionsSum", "transparent_mode_counter"},
|
||||
{"totalTimeInTransparentModeSum", "total_time_in_transparent_mode_counter"},
|
||||
{"reachInspectVerdictSum", "inspect_verdict_counter"},
|
||||
{"reachAcceptVerdictSum", "accept_verdict_counter"},
|
||||
{"reachDropVerdictSum", "drop_verdict_counter"},
|
||||
{"reachInjectVerdictSum", "inject_verdict_counter"},
|
||||
{"reachIrrelevantVerdictSum", "irrelevant_verdict_counter"},
|
||||
{"reachReconfVerdictSum", "reconf_verdict_counter"},
|
||||
{"requestCompressionFailureSum", "failed_requests_compression_counter"},
|
||||
{"responseCompressionFailureSum", "failed_response_compression_counter"},
|
||||
{"requestDecompressionFailureSum", "failed_requests_decompression_counter"},
|
||||
{"responseDecompressionFailureSum", "failed_response_decompression_counter"},
|
||||
{"requestCompressionSuccessSum", "successful_request_compression_counter"},
|
||||
{"responseCompressionSuccessSum", "successful_response_compression_counter"},
|
||||
{"requestDecompressionSuccessSum", "successful_request_decompression_counter"},
|
||||
{"responseDecompressionSuccessSum", "successful_response_decompression_counter"},
|
||||
{"skippedSessionsUponCorruptedZipSum", "corrupted_zip_skipped_session_counter"},
|
||||
{"attachmentThreadReachedTimeoutSum", "thread_exceeded_processing_time_counter"},
|
||||
{"registrationThreadReachedTimeoutSum", "failed_registration_thread_counter"},
|
||||
{"requestHeaderThreadReachedTimeoutSum", "request_headers_processing_thread_timeouts_counter"},
|
||||
{"requestBodyThreadReachedTimeoutSum", "request_body_processing_thread_timeouts_counter"},
|
||||
{"respondHeaderThreadReachedTimeoutSum", "response_headers_processing_thread_timeouts_counter"},
|
||||
{"respondBodyThreadReachedTimeoutSum", "response_body_processing_thread_timeouts_counter"},
|
||||
{"attachmentThreadFailureSum", "thread_failures_counter"},
|
||||
{"httpRequestProcessingReachedTimeoutSum", "request_processing_timeouts_counter"},
|
||||
{"httpRequestsSizeSum", "requests_total_size_counter"},
|
||||
{"httpResponsesSizeSum", "response_total_size_counter"},
|
||||
{"httpRequestFailedToReachWebServerUpstreamSum", "requests_failed_reach_upstram_counter"},
|
||||
{"overallSessionProcessTimeToVerdictAvgSample", "overall_processing_time_until_verdict_average"},
|
||||
{"overallSessionProcessTimeToVerdictMaxSample", "overall_processing_time_until_verdict_max"},
|
||||
{"overallSessionProcessTimeToVerdictMinSample", "overall_processing_time_until_verdict_min"},
|
||||
{"requestProcessTimeToVerdictAvgSample", "requests_processing_time_until_verdict_average"},
|
||||
{"requestProcessTimeToVerdictMaxSample", "requests_processing_time_until_verdict_max"},
|
||||
{"requestProcessTimeToVerdictMinSample", "requests_processing_time_until_verdict_min"},
|
||||
{"responseProcessTimeToVerdictAvgSample", "response_processing_time_until_verdict_average"},
|
||||
{"responseProcessTimeToVerdictMaxSample", "response_processing_time_until_verdict_max"},
|
||||
{"responseProcessTimeToVerdictMinSample", "response_processing_time_until_verdict_min"},
|
||||
{"requestBodySizeUponTimeoutAvgSample", "request_body_size_average"},
|
||||
{"requestBodySizeUponTimeoutMaxSample", "request_body_size_max"},
|
||||
{"requestBodySizeUponTimeoutMinSample", "request_body_size_min"},
|
||||
{"responseBodySizeUponTimeoutAvgSample", "response_body_size_average"},
|
||||
{"responseBodySizeUponTimeoutMaxSample", "response_body_size_max"},
|
||||
{"responseBodySizeUponTimeoutMinSample", "response_body_size_min"},
|
||||
// WaapTelemetrics
|
||||
{"reservedNgenA", "total_requests_counter"},
|
||||
{"reservedNgenB", "unique_sources_counter"},
|
||||
{"reservedNgenC", "requests_blocked_by_force_and_exception_counter"},
|
||||
{"reservedNgenD", "requests_blocked_by_waf_counter"},
|
||||
{"reservedNgenE", "requests_blocked_by_open_api_counter"},
|
||||
{"reservedNgenF", "requests_blocked_by_bot_protection_counter"},
|
||||
{"reservedNgenG", "requests_threat_level_info_and_no_threat_counter"},
|
||||
{"reservedNgenH", "requests_threat_level_low_counter"},
|
||||
{"reservedNgenI", "requests_threat_level_medium_counter"},
|
||||
{"reservedNgenJ", "requests_threat_level_high_counter"},
|
||||
// WaapTrafficTelemetrics
|
||||
{"reservedNgenA", "post_requests_counter"},
|
||||
{"reservedNgenB", "get_requests_counter"},
|
||||
{"reservedNgenC", "put_requests_counter"},
|
||||
{"reservedNgenD", "patch_requests_counter"},
|
||||
{"reservedNgenE", "delete_requests_counter"},
|
||||
{"reservedNgenF", "other_requests_counter"},
|
||||
{"reservedNgenG", "2xx_status_code_responses_counter"},
|
||||
{"reservedNgenH", "4xx_status_code_responses_counter"},
|
||||
{"reservedNgenI", "5xx_status_code_responses_counter"},
|
||||
{"reservedNgenJ", "requests_time_latency_average"},
|
||||
// WaapAttackTypesMetrics
|
||||
{"reservedNgenA", "sql_injection_attacks_type_counter"},
|
||||
{"reservedNgenB", "vulnerability_scanning_attacks_type_counter"},
|
||||
{"reservedNgenC", "path_traversal_attacks_type_counter"},
|
||||
{"reservedNgenD", "ldap_injection_attacks_type_counter"},
|
||||
{"reservedNgenE", "evasion_techniques_attacks_type_counter"},
|
||||
{"reservedNgenF", "remote_code_execution_attacks_type_counter"},
|
||||
{"reservedNgenG", "xml_extern_entity_attacks_type_counter"},
|
||||
{"reservedNgenH", "cross_site_scripting_attacks_type_counter"},
|
||||
{"reservedNgenI", "general_attacks_type_counter"},
|
||||
// AssetsMetric
|
||||
{"numberOfProtectedApiAssetsSample", "api_assets_counter"},
|
||||
{"numberOfProtectedWebAppAssetsSample", "web_api_assets_counter"},
|
||||
{"numberOfProtectedAssetsSample", "all_assets_counter"},
|
||||
// IPSMetric
|
||||
{"preventEngineMatchesSample", "prevent_action_matches_counter"},
|
||||
{"detectEngineMatchesSample", "detect_action_matches_counter"},
|
||||
{"ignoreEngineMatchesSample", "ignore_action_matches_counter"},
|
||||
// CPUMetric
|
||||
{"cpuMaxSample", "cpu_usage_percentage_max"},
|
||||
{"cpuAvgSample", "cpu_usage_percentage_average"},
|
||||
{"cpuSample", "cpu_usage_percentage_last_value"},
|
||||
// LogMetric
|
||||
{"logQueueMaxSizeSample", "logs_queue_size_max"},
|
||||
{"logQueueAvgSizeSample", "logs_queue_size_average"},
|
||||
{"logQueueCurrentSizeSample", "logs_queue_size_last_value"},
|
||||
{"sentLogsSum", "logs_sent_counter"},
|
||||
{"sentLogsBulksSum", "bulk_logs_sent_counter"},
|
||||
// MemoryMetric
|
||||
{"serviceVirtualMemorySizeMaxSample", "service_virtual_memory_size_kb_max"},
|
||||
{"serviceVirtualMemorySizeMinSample", "service_virtual_memory_size_kb_min"},
|
||||
{"serviceVirtualMemorySizeAvgSample", "service_virtual_memory_size_kb_average"},
|
||||
{"serviceRssMemorySizeMaxSample", "service_physical_memory_size_kb_max"},
|
||||
{"serviceRssMemorySizeMinSample", "service_physical_memory_size_kb_min"},
|
||||
{"serviceRssMemorySizeAvgSample", "service_physical_memory_size_kb_average"},
|
||||
{"generalTotalMemorySizeMaxSample", "general_total_used_memory_max"},
|
||||
{"generalTotalMemorySizeMinSample", "general_total_used_memory_min"},
|
||||
{"generalTotalMemorySizeAvgSample", "general_total_used_memory_average"},
|
||||
};
|
||||
|
||||
auto metric_names = original_to_representative_names.find(original_metric_name);
|
||||
if (metric_names != original_to_representative_names.end()) return metric_names->second;
|
||||
dbgDebug(D_PROMETHEUS)
|
||||
<< "Metric don't have a representative name, originl name: "
|
||||
<< original_metric_name;
|
||||
return "";
|
||||
}
|
||||
|
||||
#endif // __PROMETHEUS_METRIC_NAMES_H__
|
||||
8
components/security_apps/prometheus/prometheus_ut/CMakeLists.txt
Executable file
8
components/security_apps/prometheus/prometheus_ut/CMakeLists.txt
Executable file
@@ -0,0 +1,8 @@
|
||||
link_directories(${BOOST_ROOT}/lib)
|
||||
link_directories(${BOOST_ROOT}/lib ${CMAKE_BINARY_DIR}/core/shmem_ipc)
|
||||
|
||||
add_unit_test(
|
||||
prometheus_ut
|
||||
"prometheus_ut.cc"
|
||||
"prometheus_comp;logging;agent_details;waap_clib;table;singleton;time_proxy;metric;event_is;connkey;http_transaction_data;generic_rulebase;generic_rulebase_evaluators;ip_utilities;intelligence_is_v2;-lboost_regex;messaging;"
|
||||
)
|
||||
79
components/security_apps/prometheus/prometheus_ut/prometheus_ut.cc
Executable file
79
components/security_apps/prometheus/prometheus_ut/prometheus_ut.cc
Executable file
@@ -0,0 +1,79 @@
|
||||
#include "prometheus_comp.h"
|
||||
|
||||
#include <sstream>
|
||||
#include <fstream>
|
||||
#include <vector>
|
||||
|
||||
#include "cmock.h"
|
||||
#include "cptest.h"
|
||||
#include "maybe_res.h"
|
||||
#include "debug.h"
|
||||
#include "config.h"
|
||||
#include "environment.h"
|
||||
#include "config_component.h"
|
||||
#include "agent_details.h"
|
||||
#include "time_proxy.h"
|
||||
#include "mock/mock_mainloop.h"
|
||||
#include "mock/mock_rest_api.h"
|
||||
#include "mock/mock_messaging.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace testing;
|
||||
|
||||
USE_DEBUG_FLAG(D_PROMETHEUS);
|
||||
|
||||
class PrometheusCompTest : public Test
|
||||
{
|
||||
public:
|
||||
PrometheusCompTest()
|
||||
{
|
||||
EXPECT_CALL(mock_rest, mockRestCall(_, "declare-boolean-variable", _)).WillOnce(Return(false));
|
||||
env.preload();
|
||||
config.preload();
|
||||
env.init();
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_rest,
|
||||
addGetCall("metrics", _)
|
||||
).WillOnce(DoAll(SaveArg<1>(&get_metrics_func), Return(true)));
|
||||
|
||||
prometheus_comp.init();
|
||||
}
|
||||
|
||||
::Environment env;
|
||||
ConfigComponent config;
|
||||
PrometheusComp prometheus_comp;
|
||||
StrictMock<MockRestApi> mock_rest;
|
||||
StrictMock<MockMainLoop> mock_ml;
|
||||
NiceMock<MockMessaging> mock_messaging;
|
||||
unique_ptr<ServerRest> agent_uninstall;
|
||||
function<string()> get_metrics_func;
|
||||
CPTestTempfile status_file;
|
||||
string registered_services_file_path;
|
||||
|
||||
};
|
||||
|
||||
TEST_F(PrometheusCompTest, checkAddingMetric)
|
||||
{
|
||||
registered_services_file_path = cptestFnameInSrcDir(string("registered_services.json"));
|
||||
setConfiguration(registered_services_file_path, "orchestration", "Orchestration registered services");
|
||||
string metric_body = "{\n"
|
||||
" \"metrics\": [\n"
|
||||
" {\n"
|
||||
" \"metric_name\": \"watchdogProcessStartupEventsSum\",\n"
|
||||
" \"metric_type\": \"counter\",\n"
|
||||
" \"metric_description\": \"\",\n"
|
||||
" \"labels\": \"{method=\\\"post\\\",code=\\\"200\\\"}\",\n"
|
||||
" \"value\": \"1534\"\n"
|
||||
" }\n"
|
||||
" ]\n"
|
||||
"}";
|
||||
|
||||
string message_body;
|
||||
EXPECT_CALL(mock_messaging, sendSyncMessage(_, "/service-metrics", _, _, _))
|
||||
.Times(2).WillRepeatedly(Return(HTTPResponse(HTTPStatusCode::HTTP_OK, metric_body)));
|
||||
|
||||
string metric_str = "# TYPE nano_service_restarts_counter counter\n"
|
||||
"nano_service_restarts_counter{method=\"post\",code=\"200\"} 1534\n\n";
|
||||
EXPECT_EQ(metric_str, get_metrics_func());
|
||||
}
|
||||
32
components/security_apps/prometheus/prometheus_ut/registered_services.json
Executable file
32
components/security_apps/prometheus/prometheus_ut/registered_services.json
Executable file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"Registered Services": {
|
||||
"cp-nano-orchestration": {
|
||||
"Service name": "cp-nano-orchestration",
|
||||
"Service ID": "cp-nano-orchestration",
|
||||
"Service port": 7777,
|
||||
"Relevant configs": [
|
||||
"zones",
|
||||
"triggers",
|
||||
"rules",
|
||||
"registration-data",
|
||||
"parameters",
|
||||
"orchestration",
|
||||
"exceptions",
|
||||
"agent-intelligence"
|
||||
]
|
||||
},
|
||||
"cp-nano-prometheus": {
|
||||
"Service name": "cp-nano-prometheus",
|
||||
"Service ID": "cp-nano-prometheus",
|
||||
"Service port": 7465,
|
||||
"Relevant configs": [
|
||||
"zones",
|
||||
"triggers",
|
||||
"rules",
|
||||
"parameters",
|
||||
"exceptions",
|
||||
"agent-intelligence"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -246,6 +246,28 @@ public:
|
||||
return matched_rule;
|
||||
}
|
||||
|
||||
void
|
||||
fetchReplicaCount()
|
||||
{
|
||||
string curl_cmd =
|
||||
base_curl_cmd + " -H \"Authorization: Bearer " + kubernetes_token + "\" "
|
||||
"https://kubernetes.default.svc.cluster.local/apis/apps/v1/namespaces/" + kubernetes_namespace +
|
||||
"/deployments/${AGENT_DEPLOYMENT_NAME} -k -s | jq .status.replicas";
|
||||
auto maybe_replicas = i_shell_cmd->getExecOutput(curl_cmd);
|
||||
if (maybe_replicas.ok()) {
|
||||
try {
|
||||
replicas = std::stoi(maybe_replicas.unpack());
|
||||
dbgTrace(D_RATE_LIMIT) << "replicas is set to " << replicas;
|
||||
} catch (const std::exception &e) {
|
||||
dbgWarning(D_RATE_LIMIT) << "error while converting replicas: " << e.what();
|
||||
}
|
||||
}
|
||||
if (replicas == 0) {
|
||||
dbgWarning(D_RATE_LIMIT) << "replicas is set to 0, setting replicas to 1";
|
||||
replicas = 1;
|
||||
}
|
||||
}
|
||||
|
||||
EventVerdict
|
||||
respond(const HttpRequestHeaderEvent &event) override
|
||||
{
|
||||
@@ -271,10 +293,72 @@ public:
|
||||
dbgDebug(D_RATE_LIMIT) << "source identifier value: " << source_identifier;
|
||||
|
||||
auto maybe_source_ip = env->get<IPAddr>(HttpTransactionData::client_ip_ctx);
|
||||
set<string> ip_set;
|
||||
string source_ip = "";
|
||||
if (maybe_source_ip.ok()) source_ip = ipAddrToStr(maybe_source_ip.unpack());
|
||||
if (maybe_source_ip.ok()) {
|
||||
source_ip = ipAddrToStr(maybe_source_ip.unpack());
|
||||
|
||||
unordered_map<string, set<string>> condition_map = createConditionMap(uri, source_ip, source_identifier);
|
||||
if (getProfileAgentSettingWithDefault<bool>(false, "agent.rateLimit.ignoreSourceIP")) {
|
||||
dbgDebug(D_RATE_LIMIT) << "Rate limit ignoring source ip: " << source_ip;
|
||||
} else {
|
||||
ip_set.insert(source_ip);
|
||||
}
|
||||
}
|
||||
|
||||
auto maybe_xff = env->get<string>(HttpTransactionData::xff_vals_ctx);
|
||||
if (!maybe_xff.ok()) {
|
||||
dbgTrace(D_RATE_LIMIT) << "Rate limit failed to get xff vals from env";
|
||||
} else {
|
||||
auto ips = split(maybe_xff.unpack(), ',');
|
||||
ip_set.insert(ips.begin(), ips.end());
|
||||
}
|
||||
|
||||
EnumArray<I_GeoLocation::GeoLocationField, string> geo_location_data;
|
||||
set<string> country_codes;
|
||||
set<string> country_names;
|
||||
for (const string& source : ip_set) {
|
||||
Maybe<IPAddr> maybe_source_ip = IPAddr::createIPAddr(source);
|
||||
if (!maybe_source_ip.ok()){
|
||||
dbgWarning(D_RATE_LIMIT)
|
||||
<< "Rate limit failed to create ip address from source: "
|
||||
<< source
|
||||
<< ", Error: "
|
||||
<< maybe_source_ip.getErr();
|
||||
continue;
|
||||
}
|
||||
auto asset_location =
|
||||
Singleton::Consume<I_GeoLocation>::by<RateLimit>()->lookupLocation(maybe_source_ip.unpack());
|
||||
if (!asset_location.ok()) {
|
||||
dbgWarning(D_RATE_LIMIT)
|
||||
<< "Rate limit lookup location failed for source: "
|
||||
<< source_ip
|
||||
<< ", Error: "
|
||||
<< asset_location.getErr();
|
||||
continue;
|
||||
}
|
||||
geo_location_data = asset_location.unpack();
|
||||
auto code = geo_location_data[I_GeoLocation::GeoLocationField::COUNTRY_CODE];
|
||||
auto name = geo_location_data[I_GeoLocation::GeoLocationField::COUNTRY_NAME];
|
||||
country_codes.insert(code);
|
||||
country_names.insert(name);
|
||||
dbgTrace(D_RATE_LIMIT)
|
||||
<< "Rate limit found "
|
||||
<< "country code: "
|
||||
<< code
|
||||
<< ", country name: "
|
||||
<< name
|
||||
<< ", source ip address: "
|
||||
<< source;
|
||||
}
|
||||
|
||||
|
||||
unordered_map<string, set<string>> condition_map = createConditionMap(
|
||||
uri,
|
||||
source_ip,
|
||||
source_identifier,
|
||||
country_codes,
|
||||
country_names
|
||||
);
|
||||
if (shouldApplyException(condition_map)) {
|
||||
dbgDebug(D_RATE_LIMIT) << "found accept exception, not enforcing rate limit on this URI: " << uri;
|
||||
return ACCEPT;
|
||||
@@ -293,11 +377,6 @@ public:
|
||||
return ACCEPT;
|
||||
}
|
||||
|
||||
auto replicas = getenv("REPLICA_COUNT") ? std::stoi(getenv("REPLICA_COUNT")) : 1;
|
||||
if (replicas == 0) {
|
||||
dbgWarning(D_RATE_LIMIT) << "REPLICA_COUNT environment variable is set to 0, setting REPLICA_COUNT to 1";
|
||||
replicas = 1;
|
||||
}
|
||||
burst = static_cast<float>(rule.getRateLimit()) / replicas;
|
||||
limit = static_cast<float>(calcRuleLimit(rule)) / replicas;
|
||||
|
||||
@@ -476,10 +555,18 @@ public:
|
||||
}
|
||||
|
||||
unordered_map<string, set<string>>
|
||||
createConditionMap(const string &uri, const string &source_ip, const string &source_identifier)
|
||||
createConditionMap(
|
||||
const string &uri,
|
||||
const string &source_ip,
|
||||
const string &source_identifier,
|
||||
const set<string> &country_codes,
|
||||
const set<string> &country_names
|
||||
)
|
||||
{
|
||||
unordered_map<string, set<string>> condition_map;
|
||||
if (!source_ip.empty()) condition_map["sourceIP"].insert(source_ip);
|
||||
if (!country_codes.empty()) condition_map["countryCode"].insert(country_codes.begin(), country_codes.end());
|
||||
if (!country_names.empty()) condition_map["countryName"].insert(country_names.begin(), country_names.end());
|
||||
condition_map["sourceIdentifier"].insert(source_identifier);
|
||||
condition_map["url"].insert(uri);
|
||||
|
||||
@@ -616,6 +703,23 @@ public:
|
||||
"Initialize rate limit component",
|
||||
false
|
||||
);
|
||||
|
||||
i_shell_cmd = Singleton::Consume<I_ShellCmd>::by<RateLimit>();
|
||||
i_env_details = Singleton::Consume<I_EnvDetails>::by<RateLimit>();
|
||||
env_type = i_env_details->getEnvType();
|
||||
const char *nexus_env = getenv("KUBERNETES_METADATA");
|
||||
if (nexus_env == nullptr) return;
|
||||
if (env_type == EnvType::K8S && string(nexus_env) == "true") {
|
||||
kubernetes_token = i_env_details->getToken();
|
||||
kubernetes_namespace = i_env_details->getNameSpace();
|
||||
fetchReplicaCount();
|
||||
Singleton::Consume<I_MainLoop>::by<RateLimit>()->addRecurringRoutine(
|
||||
I_MainLoop::RoutineType::Offline,
|
||||
chrono::seconds(120),
|
||||
[this]() { fetchReplicaCount(); },
|
||||
"Fetch current replica count from the Kubernetes cluster"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@@ -624,6 +728,9 @@ public:
|
||||
disconnectRedis();
|
||||
}
|
||||
|
||||
I_ShellCmd *i_shell_cmd = nullptr;
|
||||
I_EnvDetails* i_env_details = nullptr;
|
||||
|
||||
private:
|
||||
static constexpr auto DROP = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP;
|
||||
static constexpr auto ACCEPT = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT;
|
||||
@@ -634,6 +741,17 @@ private:
|
||||
int burst;
|
||||
float limit;
|
||||
redisContext* redis = nullptr;
|
||||
int replicas = 1;
|
||||
EnvType env_type;
|
||||
string kubernetes_namespace = "";
|
||||
string kubernetes_token = "";
|
||||
#if defined(gaia)
|
||||
const string base_curl_cmd = "curl_cli";
|
||||
#elif defined(alpine)
|
||||
const string base_curl_cmd = "LD_LIBRARY_PATH=/usr/lib/:/usr/lib/cpnano curl";
|
||||
#else
|
||||
const string base_curl_cmd = "curl";
|
||||
#endif
|
||||
};
|
||||
|
||||
RateLimit::RateLimit() : Component("RateLimit"), pimpl(make_unique<Impl>()) {}
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
static const uint max_send_obj_retries = 3;
|
||||
static const std::chrono::microseconds wait_next_attempt(5000000);
|
||||
|
||||
USE_DEBUG_FLAG(D_WAAP);
|
||||
USE_DEBUG_FLAG(D_WAAP_SERIALIZE);
|
||||
|
||||
class RestGetFile : public ClientRest
|
||||
{
|
||||
@@ -137,9 +137,13 @@ public:
|
||||
void setRemoteSyncEnabled(bool enabled);
|
||||
protected:
|
||||
void mergeProcessedFromRemote();
|
||||
std::string getWindowId();
|
||||
void waitSync();
|
||||
std::string getPostDataUrl();
|
||||
std::string getUri();
|
||||
size_t getIntervalsCount();
|
||||
void incrementIntervalsCount();
|
||||
bool isBase();
|
||||
|
||||
template<typename T>
|
||||
bool sendObject(T &obj, HTTPMethod method, std::string uri)
|
||||
@@ -147,13 +151,14 @@ protected:
|
||||
I_Messaging *messaging = Singleton::Consume<I_Messaging>::by<WaapComponent>();
|
||||
I_AgentDetails *agentDetails = Singleton::Consume<I_AgentDetails>::by<WaapComponent>();
|
||||
if (agentDetails->getOrchestrationMode() == OrchestrationMode::OFFLINE) {
|
||||
dbgDebug(D_WAAP) << "offline mode not sending object";
|
||||
dbgDebug(D_WAAP_SERIALIZE) << "offline mode not sending object";
|
||||
return false;
|
||||
}
|
||||
if (agentDetails->getOrchestrationMode() == OrchestrationMode::HYBRID) {
|
||||
MessageMetadata req_md(getSharedStorageHost(), 80);
|
||||
req_md.insertHeader("X-Tenant-Id", agentDetails->getTenantId());
|
||||
req_md.setConnectioFlag(MessageConnectionConfig::UNSECURE_CONN);
|
||||
req_md.setConnectioFlag(MessageConnectionConfig::ONE_TIME_CONN);
|
||||
auto req_status = messaging->sendSyncMessage(
|
||||
method,
|
||||
uri,
|
||||
@@ -162,19 +167,22 @@ protected:
|
||||
req_md
|
||||
);
|
||||
if (!req_status.ok()) {
|
||||
dbgWarning(D_WAAP) << "failed to send request to uri: " << uri
|
||||
dbgWarning(D_WAAP_SERIALIZE) << "failed to send request to uri: " << uri
|
||||
<< ", error: " << req_status.getErr().toString();
|
||||
}
|
||||
return req_status.ok();
|
||||
}
|
||||
MessageMetadata req_md;
|
||||
req_md.setConnectioFlag(MessageConnectionConfig::ONE_TIME_FOG_CONN);
|
||||
auto req_status = messaging->sendSyncMessage(
|
||||
method,
|
||||
uri,
|
||||
obj,
|
||||
MessageCategory::GENERIC
|
||||
MessageCategory::GENERIC,
|
||||
req_md
|
||||
);
|
||||
if (!req_status.ok()) {
|
||||
dbgWarning(D_WAAP) << "failed to send request to uri: " << uri
|
||||
dbgWarning(D_WAAP_SERIALIZE) << "failed to send request to uri: " << uri
|
||||
<< ", error: " << req_status.getErr().toString();
|
||||
}
|
||||
return req_status.ok();
|
||||
@@ -188,14 +196,14 @@ protected:
|
||||
{
|
||||
if (sendObject(obj, method, uri))
|
||||
{
|
||||
dbgTrace(D_WAAP) <<
|
||||
dbgTrace(D_WAAP_SERIALIZE) <<
|
||||
"object sent successfully after " << i << " retry attempts";
|
||||
return true;
|
||||
}
|
||||
dbgInfo(D_WAAP) << "Failed to send object. Attempt: " << i;
|
||||
dbgInfo(D_WAAP_SERIALIZE) << "Failed to send object. Attempt: " << i;
|
||||
mainloop->yield(wait_next_attempt);
|
||||
}
|
||||
dbgWarning(D_WAAP) << "Failed to send object to " << uri << ", reached maximum attempts: " <<
|
||||
dbgWarning(D_WAAP_SERIALIZE) << "Failed to send object to " << uri << ", reached maximum attempts: " <<
|
||||
max_send_obj_retries;
|
||||
return false;
|
||||
}
|
||||
@@ -206,13 +214,14 @@ protected:
|
||||
I_Messaging *messaging = Singleton::Consume<I_Messaging>::by<WaapComponent>();
|
||||
I_AgentDetails *agentDetails = Singleton::Consume<I_AgentDetails>::by<WaapComponent>();
|
||||
if (agentDetails->getOrchestrationMode() == OrchestrationMode::OFFLINE) {
|
||||
dbgDebug(D_WAAP) << "offline mode not sending object";
|
||||
dbgDebug(D_WAAP_SERIALIZE) << "offline mode not sending object";
|
||||
return false;
|
||||
}
|
||||
if (agentDetails->getOrchestrationMode() == OrchestrationMode::HYBRID) {
|
||||
MessageMetadata req_md(getSharedStorageHost(), 80);
|
||||
req_md.insertHeader("X-Tenant-Id", agentDetails->getTenantId());
|
||||
req_md.setConnectioFlag(MessageConnectionConfig::UNSECURE_CONN);
|
||||
req_md.setConnectioFlag(MessageConnectionConfig::ONE_TIME_CONN);
|
||||
return messaging->sendSyncMessageWithoutResponse(
|
||||
method,
|
||||
uri,
|
||||
@@ -221,11 +230,14 @@ protected:
|
||||
req_md
|
||||
);
|
||||
}
|
||||
MessageMetadata req_md;
|
||||
req_md.setConnectioFlag(MessageConnectionConfig::ONE_TIME_FOG_CONN);
|
||||
return messaging->sendSyncMessageWithoutResponse(
|
||||
method,
|
||||
uri,
|
||||
obj,
|
||||
MessageCategory::GENERIC
|
||||
MessageCategory::GENERIC,
|
||||
req_md
|
||||
);
|
||||
}
|
||||
|
||||
@@ -237,14 +249,14 @@ protected:
|
||||
{
|
||||
if (sendNoReplyObject(obj, method, uri))
|
||||
{
|
||||
dbgTrace(D_WAAP) <<
|
||||
dbgTrace(D_WAAP_SERIALIZE) <<
|
||||
"object sent successfully after " << i << " retry attempts";
|
||||
return true;
|
||||
}
|
||||
dbgInfo(D_WAAP) << "Failed to send object. Attempt: " << i;
|
||||
dbgInfo(D_WAAP_SERIALIZE) << "Failed to send object. Attempt: " << i;
|
||||
mainloop->yield(wait_next_attempt);
|
||||
}
|
||||
dbgWarning(D_WAAP) << "Failed to send object to " << uri << ", reached maximum attempts: " <<
|
||||
dbgWarning(D_WAAP_SERIALIZE) << "Failed to send object to " << uri << ", reached maximum attempts: " <<
|
||||
max_send_obj_retries;
|
||||
return false;
|
||||
}
|
||||
@@ -252,14 +264,14 @@ protected:
|
||||
const std::string m_remotePath; // Created from tenentId + / + assetId + / + class
|
||||
std::chrono::seconds m_interval;
|
||||
std::string m_owner;
|
||||
const std::string m_assetId;
|
||||
bool m_remoteSyncEnabled;
|
||||
|
||||
private:
|
||||
bool localSyncAndProcess();
|
||||
void updateStateFromRemoteService();
|
||||
RemoteFilesList getProcessedFilesList();
|
||||
RemoteFilesList getRemoteProcessedFilesList();
|
||||
std::string getWindowId();
|
||||
bool isBase();
|
||||
std::string getLearningHost();
|
||||
std::string getSharedStorageHost();
|
||||
|
||||
@@ -269,8 +281,6 @@ private:
|
||||
size_t m_daysCount;
|
||||
size_t m_windowsCount;
|
||||
size_t m_intervalsCounter;
|
||||
bool m_remoteSyncEnabled;
|
||||
const std::string m_assetId;
|
||||
const bool m_isAssetIdUuid;
|
||||
std::string m_type;
|
||||
std::string m_lastProcessedModified;
|
||||
|
||||
@@ -84,6 +84,7 @@ public:
|
||||
virtual const std::string getUri() const = 0;
|
||||
virtual const std::string getUriStr() const = 0;
|
||||
virtual const std::string& getSourceIdentifier() const = 0;
|
||||
virtual const std::string getCurrentWebUserResponse() = 0;
|
||||
virtual double getScore() const = 0;
|
||||
virtual double getOtherModelScore() const = 0;
|
||||
virtual const std::vector<double> getScoreArray() const = 0;
|
||||
@@ -130,6 +131,7 @@ public:
|
||||
virtual void add_request_body_chunk(const char* data, int data_len) = 0;
|
||||
virtual void end_request_body() = 0;
|
||||
virtual void end_request() = 0;
|
||||
virtual bool shouldLimitResponseHeadersInspection() = 0;
|
||||
// Response
|
||||
virtual void start_response(int response_status, int http_version) = 0;
|
||||
virtual void start_response_hdrs() = 0;
|
||||
@@ -145,4 +147,7 @@ public:
|
||||
virtual ReportIS::Severity computeEventSeverityFromDecision() const = 0;
|
||||
virtual void finish() = 0;
|
||||
virtual Waf2TransactionFlags &getTransactionFlags() = 0;
|
||||
|
||||
virtual void setTemperatureDetected(bool detected) = 0;
|
||||
virtual bool wasTemperatureDetected() const = 0;
|
||||
};
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#include "../waap_clib/WaapParameters.h"
|
||||
#include "../waap_clib/WaapOpenRedirectPolicy.h"
|
||||
#include "../waap_clib/WaapErrorDisclosurePolicy.h"
|
||||
#include "../waap_clib/DecisionType.h"
|
||||
#include "../waap_clib/CsrfPolicy.h"
|
||||
#include "../waap_clib/UserLimitsPolicy.h"
|
||||
#include "../waap_clib/RateLimiting.h"
|
||||
@@ -44,8 +45,8 @@ public:
|
||||
virtual const std::string& get_AssetId() const = 0;
|
||||
virtual const std::string& get_AssetName() const = 0;
|
||||
virtual const BlockingLevel& get_BlockingLevel() const = 0;
|
||||
virtual const std::string& get_PracticeId() const = 0;
|
||||
virtual const std::string& get_PracticeName() const = 0;
|
||||
virtual const std::string& get_PracticeIdByPactice(DecisionType practiceType) const = 0;
|
||||
virtual const std::string& get_PracticeNameByPactice(DecisionType practiceType) const = 0;
|
||||
virtual const std::string& get_PracticeSubType() const = 0;
|
||||
virtual const std::string& get_RuleId() const = 0;
|
||||
virtual const std::string& get_RuleName() const = 0;
|
||||
|
||||
@@ -19,7 +19,6 @@ AutonomousSecurityDecision::AutonomousSecurityDecision(DecisionType type) :
|
||||
m_fpMitigationScore(0.0f),
|
||||
m_finalScore(0.0f),
|
||||
m_threatLevel(NO_THREAT),
|
||||
m_overridesLog(false),
|
||||
m_relativeReputationMean(0.0),
|
||||
m_variance(0.0)
|
||||
{}
|
||||
@@ -52,10 +51,6 @@ void AutonomousSecurityDecision::setThreatLevel(ThreatLevel threatLevel)
|
||||
m_threatLevel = threatLevel;
|
||||
}
|
||||
|
||||
void AutonomousSecurityDecision::setOverridesLog(bool overridesLog)
|
||||
{
|
||||
m_overridesLog = overridesLog;
|
||||
}
|
||||
void AutonomousSecurityDecision::setRelativeReputationMean(double relativeReputationMean)
|
||||
{
|
||||
m_relativeReputationMean = relativeReputationMean;
|
||||
@@ -80,10 +75,6 @@ ThreatLevel AutonomousSecurityDecision::getThreatLevel() const
|
||||
{
|
||||
return m_threatLevel;
|
||||
}
|
||||
bool AutonomousSecurityDecision::getOverridesLog() const
|
||||
{
|
||||
return m_overridesLog;
|
||||
}
|
||||
double AutonomousSecurityDecision::getRelativeReputationMean() const
|
||||
{
|
||||
return m_relativeReputationMean;
|
||||
|
||||
@@ -30,14 +30,12 @@ public:
|
||||
void setFpMitigationScore(double fpMitigationScore);
|
||||
void setFinalScore(double finalScore);
|
||||
void setThreatLevel(ThreatLevel threatLevel);
|
||||
void setOverridesLog(bool overridesLog);
|
||||
void setRelativeReputationMean(double relativeReputationMean);
|
||||
void setVariance(double variance);
|
||||
double getRelativeReputation() const;
|
||||
double getFpMitigationScore() const;
|
||||
double getFinalScore() const;
|
||||
ThreatLevel getThreatLevel() const;
|
||||
bool getOverridesLog() const;
|
||||
double getRelativeReputationMean() const;
|
||||
double getVariance() const;
|
||||
|
||||
@@ -46,7 +44,6 @@ private:
|
||||
double m_fpMitigationScore;
|
||||
double m_finalScore;
|
||||
ThreatLevel m_threatLevel;
|
||||
bool m_overridesLog;
|
||||
double m_relativeReputationMean;
|
||||
double m_variance;
|
||||
};
|
||||
|
||||
@@ -12,6 +12,7 @@ add_library(waap_clib
|
||||
ParserJson.cc
|
||||
ParserMultipartForm.cc
|
||||
ParserRaw.cc
|
||||
ParserGzip.cc
|
||||
ParserUrlEncode.cc
|
||||
ParserXML.cc
|
||||
ParserDelimiter.cc
|
||||
@@ -91,6 +92,7 @@ add_library(waap_clib
|
||||
ParserScreenedJson.cc
|
||||
ParserBinaryFile.cc
|
||||
RegexComparator.cc
|
||||
RequestsMonitor.cc
|
||||
)
|
||||
|
||||
add_definitions("-Wno-unused-function")
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user