Compare commits

...

74 Commits
1.1.22 ... main

Author SHA1 Message Date
orianelou
31ff6f2c72
Update docker-compose.yaml 2025-06-23 12:43:44 +03:00
orianelou
eac686216b
Update docker-compose.yaml 2025-06-23 12:42:41 +03:00
orianelou
938cae1270
Update docker-compose.yaml 2025-06-23 12:41:38 +03:00
orianelou
87cdeef42f
Update docker-compose.yaml 2025-06-23 12:40:40 +03:00
orianelou
d04ea7d3e2
Update docker-compose.yaml 2025-06-23 12:39:50 +03:00
orianelou
6d649cf5d5
Update docker-compose.yaml 2025-06-23 12:38:22 +03:00
orianelou
5f71946590
Update docker-compose.yaml 2025-06-23 12:36:37 +03:00
orianelou
c75f1e88b7
Update docker-compose.yaml 2025-06-23 12:35:49 +03:00
Daniel-Eisenberg
c4975497eb Update entry.sh 2025-06-12 12:55:27 +03:00
Daniel-Eisenberg
782dfeada6
Waf tag (#317)
* add waf-tag to openappsec

* fix waf tag to openappsec

---------

Co-authored-by: wiaamm <wiaamm@checkpoint.com>
2025-06-11 11:34:48 +03:00
wiaam96
bc1eac9d39
Fix Watchdog restarts (#319)
* don't exit

* fix restarting agent

* fix watchdog restarts
2025-06-09 16:11:45 +03:00
Daniel-Eisenberg
4dacd7d009
Prometheus support (#316)
* Add prometheus support

* Add prometheus support

* Add prometheus support

* Add prometheus support

* Add prometheus support

---------

Co-authored-by: avigailo <avigailo@checkpoint.com>
2025-06-05 16:28:57 +03:00
orianelou
3a34984def
Merge pull request #293 from willseward/bugfix/fix-ipv6-cidr
Fix IPv6 masking
2025-05-27 13:43:59 +03:00
orianelou
5aaf787cfa
Create schema_v1beta2.yaml 2025-05-13 16:21:13 +03:00
orianelou
2c7b5818e8
Update and rename schema_v1beta2.yaml to schema_v1beta1.yaml 2025-05-13 16:20:31 +03:00
orianelou
c8743d4d4b
Create schema_v1beta2.yaml 2025-05-13 16:18:52 +03:00
orianelou
d703f16e35
Update README.md 2025-04-17 15:12:48 +03:00
Daniel-Eisenberg
692c430e8a
Merge pull request #298 from openappsec/exception-fix
exception fix
2025-04-17 15:06:23 +03:00
Daniel Eisenberg
72c5594b10 exception fix 2025-04-17 13:37:25 +03:00
orianelou
2c6b6baa3b
Update docker-compose.yaml 2025-04-01 14:24:16 +03:00
orianelou
37d0f1c45f
Update bug_report.md 2025-04-01 10:14:26 +03:00
Wills Ward
2678db9d2f fix IPv6 masking 2025-03-30 14:59:26 -05:00
orianelou
52c93ad574
Merge pull request #291 from MaxShapiro/MaxShapiro-patch-1
Update .env
2025-03-30 10:22:09 +03:00
Max Shapiro
bd3a53041e
Update .env 2025-03-30 09:55:33 +03:00
Daniel-Eisenberg
44f40fbd1b
Merge pull request #287 from openappsec/docker-upgrade-issue
Docker upgrade issue
2025-03-25 22:47:21 +02:00
orianelou
0691f9b9cd
Update open-appsec-k8s-prevent-config-v1beta2.yaml 2025-03-23 14:33:18 +02:00
orianelou
0891dcd251
Update .env 2025-03-23 14:02:41 +02:00
Daniel-Eisenberg
7669f0c89c
Merge pull request #285 from openappsec/Mar_17_2025-Dev
Mar 17 2025 dev
2025-03-19 17:57:49 +02:00
orianelou
39d7884bed
Update bug_report.md 2025-03-19 16:42:28 +02:00
orianelou
b8783c3065
Update nginx_version_support.md 2025-03-19 11:32:09 +02:00
orianelou
37dc9f14b4
Update config.yml 2025-03-19 11:31:32 +02:00
orianelou
9a1f1b5966
Update config.yml 2025-03-19 11:30:41 +02:00
orianelou
b0bfd3077c
Update config.yml 2025-03-19 11:30:09 +02:00
orianelou
0469f5aa1f
Update bug_report.md 2025-03-19 11:29:51 +02:00
orianelou
3578797214
Delete .github/ISSUE_TEMPLATE/feature_request.md 2025-03-19 11:29:28 +02:00
orianelou
16a72fdf3e
Update nginx_version_support.md 2025-03-19 11:29:03 +02:00
orianelou
87d257f268
Update config.yml 2025-03-19 11:26:36 +02:00
orianelou
36d8006c26
Create config.yml 2025-03-19 11:24:55 +02:00
orianelou
8d47795d4d
Delete .github/ISSUE_TEMPLATE/config.yml 2025-03-19 11:21:45 +02:00
orianelou
f3656712b0
Merge pull request #284 from openappsec/orianelou-issue-tamplates
Orianelou issue tamplates
2025-03-19 11:20:41 +02:00
orianelou
b1781234fd
Create config.yml 2025-03-19 11:18:49 +02:00
orianelou
f71dca2bfa
Create nginx_version_support.md 2025-03-19 11:16:52 +02:00
orianelou
bd333818ad
Create feature_request.md 2025-03-19 11:12:10 +02:00
orianelou
95e776d7a4
Create bug_report.md 2025-03-19 11:10:21 +02:00
Ned Wright
51c2912434 sync code 2025-03-18 20:34:34 +00:00
Ned Wright
0246b73bbd sync code 2025-03-17 14:49:44 +00:00
avigailo
919921f6d3 Add manifest to the image creation 2025-03-17 15:26:11 +02:00
avigailo
e9098e2845 Add manifest to the image creation 2025-03-16 16:57:48 +02:00
avigailo
97d042589b Add manifest to the image creation 2025-03-16 13:41:28 +02:00
orianelou
df7be864e2
Update open-appsec-crd-v1beta2.yaml 2025-03-11 16:30:27 +02:00
orianelou
ba8ec26344
Create apisix.yaml 2025-03-09 11:43:40 +02:00
orianelou
97add465e8
Create kong.yml 2025-03-09 11:42:46 +02:00
orianelou
38cb1f2c3b
Create envoy.yaml 2025-03-09 11:41:48 +02:00
orianelou
1dd9371840
Rename examples/juiceshop/nginx/swag/default.conf to examples/juiceshop/swag/default.conf 2025-03-09 11:41:13 +02:00
orianelou
f23d22a723
Rename examples/juiceshop/nginx/swag/juiceshop.subfolder.conf to examples/juiceshop/swag/juiceshop.subfolder.conf 2025-03-09 11:40:47 +02:00
orianelou
b51cf09190
Create juiceshop.subfolder.conf 2025-03-09 11:39:51 +02:00
orianelou
ceb6469a7e
Create default.conf 2025-03-09 11:39:22 +02:00
orianelou
b0ae283eed
Update open-appsec-crd-v1beta2.yaml 2025-03-06 14:19:07 +02:00
orianelou
5fcb9bdc4a
Update open-appsec-crd-v1beta2.yaml 2025-03-06 13:54:49 +02:00
orianelou
fb5698360b
Merge pull request #267 from openappsec/namspace-crds
Update open-appsec-crd-v1beta2.yaml
2025-03-06 13:38:34 +02:00
orianelou
147626bc7f
Update open-appsec-crd-v1beta2.yaml 2025-03-06 13:31:20 +02:00
orianelou
448991ef75
Update docker-compose.yaml 2025-03-03 11:54:03 +02:00
orianelou
2b1ee84280
Update docker-compose.yaml 2025-03-03 11:53:53 +02:00
orianelou
77dd288eee
Update docker-compose.yaml 2025-03-03 11:52:47 +02:00
orianelou
3cb4def82e
Update docker-compose.yaml 2025-03-03 11:52:26 +02:00
orianelou
a0dd7dd614
Update docker-compose.yaml 2025-03-03 11:51:13 +02:00
orianelou
88eed946ec
Update docker-compose.yaml 2025-03-03 11:50:49 +02:00
orianelou
3e1ad8b0f7
Update docker-compose.yaml 2025-03-03 11:50:23 +02:00
Daniel-Eisenberg
bd35c421c6
Merge pull request #263 from openappsec/Feb_27_2025-Dev
Feb 27 2025 dev
2025-03-02 18:23:10 +02:00
Ned Wright
9d6e883724 sync code 2025-02-27 16:08:31 +00:00
Ned Wright
cd020a7ddd sync code 2025-02-27 16:03:28 +00:00
orianelou
bb35eaf657
Update open-appsec-k8s-prevent-config-v1beta2.yaml 2025-02-26 16:16:16 +02:00
orianelou
648f9ae2b1
Update open-appsec-k8s-default-config-v1beta2.yaml 2025-02-26 16:15:54 +02:00
orianelou
47e47d706a
Update open-appsec-k8s-default-config-v1beta2.yaml 2025-02-26 16:15:39 +02:00
101 changed files with 5155 additions and 570 deletions

36
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@ -0,0 +1,36 @@
---
name: "Bug Report"
about: "Report a bug with open-appsec"
labels: [bug]
---
**Checklist**
- Have you checked the open-appsec troubleshooting guides - https://docs.openappsec.io/troubleshooting/troubleshooting
- Yes / No
- Have you checked the existing issues and discussions in github for the same issue
- Yes / No
- Have you checked the knwon limitations same issue - https://docs.openappsec.io/release-notes#limitations
- Yes / No
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Run '...'
3. See error '...'
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots or Logs**
If applicable, add screenshots or logs to help explain the issue.
**Environment (please complete the following information):**
- open-appsec version:
- Deployment type (Docker, Kubernetes, etc.):
- OS:
**Additional context**
Add any other context about the problem here.

8
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@ -0,0 +1,8 @@
blank_issues_enabled: false
contact_links:
- name: "Documentation & Troubleshooting"
url: "https://docs.openappsec.io/"
about: "Check the documentation before submitting an issue."
- name: "Feature Requests & Discussions"
url: "https://github.com/openappsec/openappsec/discussions"
about: "Please open a discussion for feature requests."

View File

@ -0,0 +1,17 @@
---
name: "Nginx Version Support Request"
about: "Request for a specific Nginx version to be supported"
---
**Nginx & OS Version:**
Which Nginx and OS version are you using?
**Output of nginx -V**
Share the output of nginx -v
**Expected Behavior:**
What do you expect to happen with this version?
**Checklist**
- Have you considered a docker based deployment - find more information here https://docs.openappsec.io/getting-started/start-with-docker?
- Yes / No

View File

@ -177,7 +177,7 @@ open-appsec code was audited by an independent third party in September-October
See the [full report](https://github.com/openappsec/openappsec/blob/main/LEXFO-CHP20221014-Report-Code_audit-OPEN-APPSEC-v1.2.pdf).
### Reporting security vulnerabilities
If you've found a vulnerability or a potential vulnerability in open-appsec please let us know at securityalert@openappsec.io. We'll send a confirmation email to acknowledge your report within 24 hours, and we'll send an additional email when we've identified the issue positively or negatively.
If you've found a vulnerability or a potential vulnerability in open-appsec please let us know at security-alert@openappsec.io. We'll send a confirmation email to acknowledge your report within 24 hours, and we'll send an additional email when we've identified the issue positively or negatively.
# License

View File

@ -1,4 +1,4 @@
install(FILES Dockerfile entry.sh install-cp-agent-intelligence-service.sh install-cp-crowdsec-aux.sh DESTINATION .)
install(FILES Dockerfile entry.sh install-cp-agent-intelligence-service.sh install-cp-crowdsec-aux.sh self_managed_openappsec_manifest.json DESTINATION .)
add_custom_command(
OUTPUT ${CMAKE_INSTALL_PREFIX}/agent-docker.img

View File

@ -1,5 +1,7 @@
FROM alpine
ENV OPENAPPSEC_NANO_AGENT=TRUE
RUN apk add --no-cache -u busybox
RUN apk add --no-cache -u zlib
RUN apk add --no-cache bash
@ -13,6 +15,8 @@ RUN apk add --no-cache libxml2
RUN apk add --no-cache pcre2
RUN apk add --update coreutils
COPY self_managed_openappsec_manifest.json /tmp/self_managed_openappsec_manifest.json
COPY install*.sh /nano-service-installers/
COPY entry.sh /entry.sh

View File

@ -6,6 +6,8 @@ HTTP_TRANSACTION_HANDLER_SERVICE="install-cp-nano-service-http-transaction-handl
ATTACHMENT_REGISTRATION_SERVICE="install-cp-nano-attachment-registration-manager.sh"
ORCHESTRATION_INSTALLATION_SCRIPT="install-cp-nano-agent.sh"
CACHE_INSTALLATION_SCRIPT="install-cp-nano-agent-cache.sh"
PROMETHEUS_INSTALLATION_SCRIPT="install-cp-nano-service-prometheus.sh"
NGINX_CENTRAL_MANAGER_INSTALLATION_SCRIPT="install-cp-nano-central-nginx-manager.sh"
var_fog_address=
var_proxy=
@ -81,6 +83,14 @@ fi
/nano-service-installers/$CACHE_INSTALLATION_SCRIPT --install
/nano-service-installers/$HTTP_TRANSACTION_HANDLER_SERVICE --install
if [ "$PROMETHEUS" == "true" ]; then
/nano-service-installers/$PROMETHEUS_INSTALLATION_SCRIPT --install
fi
if [ "$CENTRAL_NGINX_MANAGER" == "true" ]; then
/nano-service-installers/$NGINX_CENTRAL_MANAGER_INSTALLATION_SCRIPT --install
fi
if [ "$CROWDSEC_ENABLED" == "true" ]; then
/nano-service-installers/$INTELLIGENCE_INSTALLATION_SCRIPT --install
/nano-service-installers/$CROWDSEC_INSTALLATION_SCRIPT --install
@ -93,25 +103,16 @@ if [ -f "$FILE" ]; then
fi
touch /etc/cp/watchdog/wd.startup
/etc/cp/watchdog/cp-nano-watchdog >/dev/null 2>&1 &
active_watchdog_pid=$!
while true; do
if [ -z "$init" ]; then
init=true
/etc/cp/watchdog/cp-nano-watchdog >/dev/null 2>&1 &
sleep 5
active_watchdog_pid=$(pgrep -f -x -o "/bin/(bash|sh) /etc/cp/watchdog/cp-nano-watchdog")
fi
current_watchdog_pid=$(pgrep -f -x -o "/bin/(bash|sh) /etc/cp/watchdog/cp-nano-watchdog")
if [ ! -f /tmp/restart_watchdog ] && [ "$current_watchdog_pid" != "$active_watchdog_pid" ]; then
echo "Error: Watchdog exited abnormally"
exit 1
elif [ -f /tmp/restart_watchdog ]; then
if [ -f /tmp/restart_watchdog ]; then
rm -f /tmp/restart_watchdog
kill -9 "$(pgrep -f -x -o "/bin/(bash|sh) /etc/cp/watchdog/cp-nano-watchdog")"
/etc/cp/watchdog/cp-nano-watchdog >/dev/null 2>&1 &
sleep 5
active_watchdog_pid=$(pgrep -f -x -o "/bin/(bash|sh) /etc/cp/watchdog/cp-nano-watchdog")
kill -9 ${active_watchdog_pid}
fi
if [ ! "$(ps -f | grep cp-nano-watchdog | grep ${active_watchdog_pid})" ]; then
/etc/cp/watchdog/cp-nano-watchdog >/dev/null 2>&1 &
active_watchdog_pid=$!
fi
sleep 5
done

View File

@ -28,6 +28,7 @@ USE_DEBUG_FLAG(D_NGINX_ATTACHMENT_PARSER);
Buffer NginxParser::tenant_header_key = Buffer();
static const Buffer proxy_ip_header_key("X-Forwarded-For", 15, Buffer::MemoryType::STATIC);
static const Buffer waf_tag_key("x-waf-tag", 9, Buffer::MemoryType::STATIC);
static const Buffer source_ip("sourceip", 8, Buffer::MemoryType::STATIC);
bool is_keep_alive_ctx = getenv("SAAS_KEEP_ALIVE_HDR_NAME") != nullptr;
@ -231,17 +232,20 @@ NginxParser::parseRequestHeaders(const Buffer &data, const unordered_set<string>
static_cast<string>(header.getKey()) + ": " + static_cast<string>(header.getValue()) + "\r\n"
);
if (NginxParser::tenant_header_key == header.getKey()) {
const auto &header_key = header.getKey();
if (NginxParser::tenant_header_key == header_key) {
dbgDebug(D_NGINX_ATTACHMENT_PARSER)
<< "Identified active tenant header. Key: "
<< dumpHex(header.getKey())
<< dumpHex(header_key)
<< ", Value: "
<< dumpHex(header.getValue());
auto active_tenant_and_profile = getActivetenantAndProfile(header.getValue());
opaque.setSessionTenantAndProfile(active_tenant_and_profile[0], active_tenant_and_profile[1]);
} else if (proxy_ip_header_key == header.getKey()) {
} else if (proxy_ip_header_key == header_key) {
source_identifiers.setXFFValuesToOpaqueCtx(header, UsersAllIdentifiersConfig::ExtractType::PROXYIP);
} else if (waf_tag_key == header_key) {
source_identifiers.setWafTagValuesToOpaqueCtx(header);
}
}

View File

@ -285,17 +285,21 @@ Maybe<string>
UsersAllIdentifiersConfig::parseXForwardedFor(const string &str, ExtractType type) const
{
vector<string> header_values = split(str);
if (header_values.empty()) return genError("No IP found in the xff header list");
vector<string> xff_values = getHeaderValuesFromConfig("x-forwarded-for");
vector<CIDRSData> cidr_values(xff_values.begin(), xff_values.end());
string last_valid_ip;
for (auto it = header_values.rbegin(); it != header_values.rend() - 1; ++it) {
if (!IPAddr::createIPAddr(*it).ok()) {
dbgWarning(D_NGINX_ATTACHMENT_PARSER) << "Invalid IP address found in the xff header IPs list: " << *it;
return genError("Invalid IP address");
if (last_valid_ip.empty()) {
return genError("Invalid IP address");
}
return last_valid_ip;
}
last_valid_ip = *it;
if (type == ExtractType::PROXYIP) continue;
if (!isIpTrusted(*it, cidr_values)) {
dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "Found untrusted IP in the xff header IPs list: " << *it;
@ -307,7 +311,10 @@ UsersAllIdentifiersConfig::parseXForwardedFor(const string &str, ExtractType typ
dbgWarning(D_NGINX_ATTACHMENT_PARSER)
<< "Invalid IP address found in the xff header IPs list: "
<< header_values[0];
return genError("Invalid IP address");
if (last_valid_ip.empty()) {
return genError("No Valid Ip address was found");
}
return last_valid_ip;
}
return header_values[0];
@ -359,6 +366,24 @@ UsersAllIdentifiersConfig::setCustomHeaderToOpaqueCtx(const HttpHeader &header)
return;
}
void
UsersAllIdentifiersConfig::setWafTagValuesToOpaqueCtx(const HttpHeader &header) const
{
auto i_transaction_table = Singleton::Consume<I_TableSpecific<SessionID>>::by<NginxAttachment>();
if (!i_transaction_table || !i_transaction_table->hasState<NginxAttachmentOpaque>()) {
dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "Can't get the transaction table";
return;
}
NginxAttachmentOpaque &opaque = i_transaction_table->getState<NginxAttachmentOpaque>();
opaque.setSavedData(HttpTransactionData::waf_tag_ctx, static_cast<string>(header.getValue()));
dbgDebug(D_NGINX_ATTACHMENT_PARSER)
<< "Added waf tag to context: "
<< static_cast<string>(header.getValue());
return;
}
Maybe<string>
UsersAllIdentifiersConfig::parseCookieElement(
const string::const_iterator &start,

View File

@ -45,6 +45,19 @@ private:
std::string host;
};
class EqualWafTag : public EnvironmentEvaluator<bool>, Singleton::Consume<I_Environment>
{
public:
EqualWafTag(const std::vector<std::string> &params);
static std::string getName() { return "EqualWafTag"; }
Maybe<bool, Context::Error> evalVariable() const override;
private:
std::string waf_tag;
};
class EqualListeningIP : public EnvironmentEvaluator<bool>, Singleton::Consume<I_Environment>
{
public:

View File

@ -137,6 +137,7 @@ public:
static const std::string source_identifier;
static const std::string proxy_ip_ctx;
static const std::string xff_vals_ctx;
static const std::string waf_tag_ctx;
static const CompressionType default_response_content_encoding;

View File

@ -30,7 +30,7 @@ public:
virtual bool isVersionAboveR8110() = 0;
virtual bool isReverseProxy() = 0;
virtual bool isCloudStorageEnabled() = 0;
virtual Maybe<std::tuple<std::string, std::string, std::string>> parseNginxMetadata() = 0;
virtual Maybe<std::tuple<std::string, std::string, std::string, std::string>> parseNginxMetadata() = 0;
virtual Maybe<std::tuple<std::string, std::string, std::string, std::string, std::string>> readCloudMetadata() = 0;
virtual std::map<std::string, std::string> getResolvedDetails() = 0;
#if defined(gaia) || defined(smb)

View File

@ -0,0 +1,30 @@
#ifndef __PROMETHEUS_COMP_H__
#define __PROMETHEUS_COMP_H__
#include <memory>
#include "component.h"
#include "singleton.h"
#include "i_rest_api.h"
#include "i_messaging.h"
#include "generic_metric.h"
class PrometheusComp
:
public Component,
Singleton::Consume<I_RestApi>,
Singleton::Consume<I_Messaging>
{
public:
PrometheusComp();
~PrometheusComp();
void init() override;
private:
class Impl;
std::unique_ptr<Impl> pimpl;
};
#endif // __PROMETHEUS_COMP_H__

View File

@ -30,6 +30,7 @@ public:
void parseRequestHeaders(const HttpHeader &header) const;
std::vector<std::string> getHeaderValuesFromConfig(const std::string &header_key) const;
void setXFFValuesToOpaqueCtx(const HttpHeader &header, ExtractType type) const;
void setWafTagValuesToOpaqueCtx(const HttpHeader &header) const;
private:
class UsersIdentifiersConfig

View File

@ -3,6 +3,7 @@ add_subdirectory(ips)
add_subdirectory(layer_7_access_control)
add_subdirectory(local_policy_mgmt_gen)
add_subdirectory(orchestration)
add_subdirectory(prometheus)
add_subdirectory(rate_limit)
add_subdirectory(waap)
add_subdirectory(central_nginx_manager)

View File

@ -179,7 +179,7 @@ private:
Maybe<void>
configureSyslog()
{
if (!getProfileAgentSettingWithDefault<bool>(true, "centralNginxManagement.syslogEnabled")) {
if (!getProfileAgentSettingWithDefault<bool>(false, "centralNginxManagement.syslogEnabled")) {
dbgTrace(D_NGINX_MANAGER) << "Syslog is disabled via settings";
return {};
}
@ -331,6 +331,8 @@ public:
logError("Could not reload central NGINX configuration. Error: " + reload_result.getErr());
return;
}
logInfo("Central NGINX configuration has been successfully reloaded");
}
void
@ -351,11 +353,37 @@ private:
{
LogGen log(
error,
ReportIS::Level::ACTION,
ReportIS::Audience::SECURITY,
ReportIS::Severity::CRITICAL,
ReportIS::Priority::HIGH,
ReportIS::Priority::URGENT,
ReportIS::Tags::POLICY_INSTALLATION
);
log.addToOrigin(LogField("eventTopic", "Central NGINX Management"));
log << LogField("notificationId", "4165c3b1-e9bc-44c3-888b-863e204c1bfb");
log << LogField(
"eventRemediation",
"Please verify your NGINX configuration and enforce policy again. "
"Contact Check Point support if the issue persists."
);
}
void
logInfo(const string &info)
{
LogGen log(
info,
ReportIS::Level::ACTION,
ReportIS::Audience::SECURITY,
ReportIS::Severity::INFO,
ReportIS::Priority::LOW,
ReportIS::Tags::POLICY_INSTALLATION
);
log.addToOrigin(LogField("eventTopic", "Central NGINX Management"));
log << LogField("notificationId", "4165c3b1-e9bc-44c3-888b-863e204c1bfb");
log << LogField("eventRemediation", "No action required");
}
I_MainLoop *i_mainloop = nullptr;

View File

@ -46,7 +46,7 @@ public:
bool isReverseProxy() override;
bool isCloudStorageEnabled() override;
Maybe<tuple<string, string, string, string, string>> readCloudMetadata() override;
Maybe<tuple<string, string, string>> parseNginxMetadata() override;
Maybe<tuple<string, string, string, string>> parseNginxMetadata() override;
#if defined(gaia) || defined(smb)
bool compareCheckpointVersion(int cp_version, std::function<bool(int, int)> compare_operator) const override;
#endif // gaia || smb
@ -80,7 +80,9 @@ DetailsResolver::Impl::getHostname()
Maybe<string>
DetailsResolver::Impl::getPlatform()
{
#if defined(gaia)
#if defined(gaia_arm)
return string("gaia_arm");
#elif defined(gaia)
return string("gaia");
#elif defined(arm32_rpi)
return string("glibc");
@ -228,7 +230,7 @@ isNoResponse(const string &cmd)
return !res.ok() || res.unpack().empty();
}
Maybe<tuple<string, string, string>>
Maybe<tuple<string, string, string, string>>
DetailsResolver::Impl::parseNginxMetadata()
{
auto output_path = getConfigurationWithDefault<string>(
@ -241,6 +243,11 @@ DetailsResolver::Impl::parseNginxMetadata()
"/scripts/cp-nano-makefile-generator.sh -f -o " +
output_path;
const string script_fresh_exe_cmd =
getFilesystemPathConfig() +
"/scripts/cp-nano-makefile-generator-fresh.sh save --save-location " +
output_path;
dbgTrace(D_ORCHESTRATOR) << "Details resolver, srcipt exe cmd: " << srcipt_exe_cmd;
if (isNoResponse("which nginx") && isNoResponse("which kong")) {
return genError("Nginx or Kong isn't installed");
@ -263,7 +270,7 @@ DetailsResolver::Impl::parseNginxMetadata()
return genError("Cannot open the file with nginx metadata, File: " + output_path);
}
string line;
string line;
while (getline(input_stream, line)) {
lines.push_back(line);
}
@ -277,7 +284,37 @@ DetailsResolver::Impl::parseNginxMetadata()
<< " Error: " << exception.what();
}
if (!isNoResponse("which nginx")) {
auto script_output = DetailsResolvingHanlder::getCommandOutput(script_fresh_exe_cmd);
if (!script_output.ok()) {
return genError("Failed to generate nginx fresh metadata, Error: " + script_output.getErr());
}
try {
ifstream input_stream(output_path);
if (!input_stream) {
return genError("Cannot open the file with nginx fresh metadata, File: " + output_path);
}
string line;
while (getline(input_stream, line)) {
if (line.find("NGX_MODULE_SIGNATURE") == 0) {
lines.push_back(line);
}
}
input_stream.close();
orchestration_tools->removeFile(output_path);
} catch (const ifstream::failure &exception) {
dbgWarning(D_ORCHESTRATOR)
<< "Cannot read the file with required nginx fresh metadata."
<< " File: " << output_path
<< " Error: " << exception.what();
}
}
if (lines.size() == 0) return genError("Failed to read nginx metadata file");
string nginx_signature;
string nginx_version;
string config_opt;
string cc_opt;
@ -292,6 +329,11 @@ DetailsResolver::Impl::parseNginxMetadata()
nginx_version = "nginx-" + line.substr(eq_index + 1);
continue;
}
if (line.find("NGX_MODULE_SIGNATURE") != string::npos) {
auto eq_index = line.find("=");
nginx_signature = line.substr(eq_index + 1);
continue;
}
if (line.find("EXTRA_CC_OPT") != string::npos) {
auto eq_index = line.find("=");
cc_opt = line.substr(eq_index + 1);
@ -301,7 +343,7 @@ DetailsResolver::Impl::parseNginxMetadata()
if (line.back() == '\\') line.pop_back();
config_opt += line;
}
return make_tuple(config_opt, cc_opt, nginx_version);
return make_tuple(config_opt, cc_opt, nginx_version, nginx_signature);
}
Maybe<tuple<string, string, string, string, string>>

View File

@ -71,7 +71,18 @@ checkPepIdaIdnStatus(const string &command_output)
Maybe<string>
getRequiredNanoServices(const string &command_output)
{
return command_output;
string idaRequiredServices[2] = {"idaSaml", "idaIdn"};
string platform_str = "gaia";
#if defined(gaia_arm)
platform_str = "gaia_arm";
#endif // gaia_arm
string result = "";
for(const string &serv : idaRequiredServices) {
string add_service = serv + "_" + platform_str;
result = result + add_service + ";";
}
command_output.empty(); // overcome unused variable
return result;
}
Maybe<string>
@ -342,6 +353,28 @@ getSMCBasedMgmtName(const string &command_output)
return getAttr(command_output, "Mgmt object Name was not found");
}
Maybe<string>
getSmbObjectUid(const string &command_output)
{
static const char centrally_managed_comd_output = '0';
if (command_output.empty() || command_output[0] != centrally_managed_comd_output) {
return genError("Object UUID was not found");
}
Maybe<string> obj_uuid = getAttrFromCpsdwanGetDataJson("uuid");
if (obj_uuid.ok()) {
return obj_uuid.unpack();
}
static const string obj_path = (getenv("FWDIR") ? string(getenv("FWDIR")) : "") + "/database/myown.C";
auto file_stream = std::make_shared<std::ifstream>(obj_path);
if (!file_stream->is_open()) {
return genError("Failed to open the object file");
}
return getMgmtObjAttr(file_stream, "uuid ");
}
Maybe<string>
getSmbObjectName(const string &command_output)
{

View File

@ -42,11 +42,6 @@ SHELL_PRE_CMD("gunzip local.cfg", "gunzip -c $FWDIR/state/local/FW1/local.cfg.gz
#ifdef SHELL_CMD_HANDLER
#if defined(gaia) || defined(smb) || defined(smb_thx_v3) || defined(smb_sve_v2) || defined(smb_mrv_v1)
SHELL_CMD_HANDLER("cpProductIntegrationMgmtObjectType", "cpprod_util CPPROD_IsMgmtMachine", getMgmtObjType)
SHELL_CMD_HANDLER(
"cpProductIntegrationMgmtObjectUid",
"mgmt_cli --format json -r true show-session | jq -r '.[\"connected-server\"].uid'",
getMgmtObjUid
)
SHELL_CMD_HANDLER("prerequisitesForHorizonTelemetry",
"FS_PATH=<FILESYSTEM-PREFIX>; [ -f ${FS_PATH}/cp-nano-horizon-telemetry-prerequisites.log ] "
"&& head -1 ${FS_PATH}/cp-nano-horizon-telemetry-prerequisites.log || echo ''",
@ -150,12 +145,17 @@ SHELL_CMD_HANDLER("hasSAMLSupportedBlade", "enabled_blades", checkSAMLSupportedB
SHELL_CMD_HANDLER("hasIDABlade", "enabled_blades", checkIDABlade)
SHELL_CMD_HANDLER("hasSAMLPortal", "mpclient status nac", checkSAMLPortal)
SHELL_CMD_HANDLER("hasIdaIdnEnabled", "fw ctl get int nac_pep_identity_next_enabled", checkPepIdaIdnStatus)
SHELL_CMD_HANDLER("requiredNanoServices", "echo 'idaSaml_gaia;idaIdn_gaia;'", getRequiredNanoServices)
SHELL_CMD_HANDLER("requiredNanoServices", "echo ida", getRequiredNanoServices)
SHELL_CMD_HANDLER(
"cpProductIntegrationMgmtObjectName",
"mgmt_cli --format json -r true show-session | jq -r '.[\"connected-server\"].name'",
getMgmtObjName
)
SHELL_CMD_HANDLER(
"cpProductIntegrationMgmtObjectUid",
"mgmt_cli --format json -r true show-session | jq -r '.[\"connected-server\"].uid'",
getMgmtObjUid
)
SHELL_CMD_HANDLER(
"cpProductIntegrationMgmtParentObjectName",
"cat $FWDIR/database/myself_objects.C "
@ -227,6 +227,11 @@ SHELL_CMD_HANDLER(
"cpprod_util FwIsLocalMgmt",
getSmbObjectName
)
SHELL_CMD_HANDLER(
"cpProductIntegrationMgmtObjectUid",
"cpprod_util FwIsLocalMgmt",
getSmbObjectUid
)
SHELL_CMD_HANDLER(
"Application Control",
"cat $FWDIR/conf/active_blades.txt | grep -o 'APCL [01]' | cut -d ' ' -f2",

View File

@ -21,7 +21,7 @@
#include "maybe_res.h"
std::ostream &
operator<<(std::ostream &os, const Maybe<std::tuple<std::string, std::string, std::string>> &)
operator<<(std::ostream &os, const Maybe<std::tuple<std::string, std::string, std::string, std::string>> &)
{
return os;
}
@ -48,7 +48,7 @@ public:
MOCK_METHOD0(isGwNotVsx, bool());
MOCK_METHOD0(getResolvedDetails, std::map<std::string, std::string>());
MOCK_METHOD0(isVersionAboveR8110, bool());
MOCK_METHOD0(parseNginxMetadata, Maybe<std::tuple<std::string, std::string, std::string>>());
MOCK_METHOD0(parseNginxMetadata, Maybe<std::tuple<std::string, std::string, std::string, std::string>>());
MOCK_METHOD0(
readCloudMetadata, Maybe<std::tuple<std::string, std::string, std::string, std::string, std::string>>());
};

View File

@ -1465,14 +1465,17 @@ private:
auto nginx_data = i_details_resolver->parseNginxMetadata();
if (nginx_data.ok()) {
string nginx_signature;
string nginx_version;
string config_opt;
string cc_opt;
tie(config_opt, cc_opt, nginx_version) = nginx_data.unpack();
tie(config_opt, cc_opt, nginx_version, nginx_signature) = nginx_data.unpack();
agent_data_report
<< make_pair("nginxVersion", nginx_version)
<< make_pair("configureOpt", config_opt)
<< make_pair("extraCompilerOpt", cc_opt);
<< make_pair("attachmentVersion", "Legacy")
<< make_pair("nginxSignature", nginx_signature)
<< make_pair("nginxVersion", nginx_version)
<< make_pair("configureOpt", config_opt)
<< make_pair("extraCompilerOpt", cc_opt);
} else {
dbgDebug(D_ORCHESTRATOR) << nginx_data.getErr();
}
@ -1528,7 +1531,6 @@ private:
} else {
curr_agent_data_report = agent_data_report;
curr_agent_data_report.disableReportSending();
agent_data_report << AgentReportFieldWithLabel("report_timestamp", i_time->getWalltimeStr());
}
}

View File

@ -140,7 +140,7 @@ public:
void
expectDetailsResolver()
{
Maybe<tuple<string, string, string>> no_nginx(genError("No nginx"));
Maybe<tuple<string, string, string, string>> no_nginx(genError("No nginx"));
EXPECT_CALL(mock_details_resolver, getPlatform()).WillRepeatedly(Return(string("linux")));
EXPECT_CALL(mock_details_resolver, getArch()).WillRepeatedly(Return(string("x86_64")));
EXPECT_CALL(mock_details_resolver, isReverseProxy()).WillRepeatedly(Return(false));

View File

@ -168,7 +168,7 @@ public:
void
expectDetailsResolver()
{
Maybe<tuple<string, string, string>> no_nginx(genError("No nginx"));
Maybe<tuple<string, string, string, string>> no_nginx(genError("No nginx"));
EXPECT_CALL(mock_details_resolver, getPlatform()).WillRepeatedly(Return(string("linux")));
EXPECT_CALL(mock_details_resolver, getArch()).WillRepeatedly(Return(string("x86_64")));
EXPECT_CALL(mock_details_resolver, isReverseProxy()).WillRepeatedly(Return(false));

View File

@ -208,6 +208,7 @@ ServiceDetails::sendNewConfigurations(int configuration_id, const string &policy
MessageMetadata new_config_req_md("127.0.0.1", service_port);
new_config_req_md.setConnectioFlag(MessageConnectionConfig::ONE_TIME_CONN);
new_config_req_md.setConnectioFlag(MessageConnectionConfig::UNSECURE_CONN);
new_config_req_md.setSuspension(false);
auto res = messaging->sendSyncMessage(
HTTPMethod::POST,
"/set-new-configuration",

View File

@ -168,10 +168,12 @@ FogAuthenticator::registerAgent(
auto nginx_data = details_resolver->parseNginxMetadata();
if (nginx_data.ok()) {
string nginx_signature;
string nginx_version;
string config_opt;
string cc_opt;
tie(config_opt, cc_opt, nginx_version) = nginx_data.unpack();
tie(config_opt, cc_opt, nginx_version, nginx_signature) = nginx_data.unpack();
request << make_pair("nginxSignature", nginx_signature);
request << make_pair("nginxVersion", nginx_version);
request << make_pair("configureOpt", config_opt);
request << make_pair("extraCompilerOpt", cc_opt);

View File

@ -0,0 +1,2 @@
add_library(prometheus_comp prometheus_comp.cc)
add_subdirectory(prometheus_ut)

View File

@ -0,0 +1,200 @@
#include "prometheus_comp.h"
#include <string>
#include <map>
#include <vector>
#include <cereal/archives/json.hpp>
#include <cereal/types/map.hpp>
#include <cereal/types/vector.hpp>
#include <cereal/types/string.hpp>
#include <iostream>
#include <fstream>
#include "common.h"
#include "report/base_field.h"
#include "report/report_enums.h"
#include "log_generator.h"
#include "debug.h"
#include "rest.h"
#include "customized_cereal_map.h"
#include "i_messaging.h"
#include "prometheus_metric_names.h"
USE_DEBUG_FLAG(D_PROMETHEUS);
using namespace std;
using namespace ReportIS;
struct ServiceData
{
template <typename Archive>
void
serialize(Archive &ar)
{
ar(cereal::make_nvp("Service port", service_port));
}
int service_port;
};
class PrometheusMetricData
{
public:
PrometheusMetricData(const string &n, const string &t, const string &d) : name(n), type(t), description(d) {}
void
addElement(const string &labels, const string &value)
{
metric_labels_to_values[labels] = value;
}
ostream &
print(ostream &os)
{
if (metric_labels_to_values.empty()) return os;
string representative_name = "";
if (!name.empty()) {
auto metric_name = convertMetricName(name);
!metric_name.empty() ? representative_name = metric_name : representative_name = name;
}
if (!description.empty()) os << "# HELP " << representative_name << ' ' << description << '\n';
if (!name.empty()) os << "# TYPE " << representative_name << ' ' << type << '\n';
for (auto &entry : metric_labels_to_values) {
os << representative_name << entry.first << ' ' << entry.second << '\n';
}
os << '\n';
metric_labels_to_values.clear();
return os;
}
private:
string name;
string type;
string description;
map<string, string> metric_labels_to_values;
};
static ostream & operator<<(ostream &os, PrometheusMetricData &metric) { return metric.print(os); }
class PrometheusComp::Impl
{
public:
void
init()
{
Singleton::Consume<I_RestApi>::by<PrometheusComp>()->addGetCall(
"metrics",
[&] () { return getFormatedPrometheusMetrics(); }
);
}
void
addMetrics(const vector<PrometheusData> &metrics)
{
for(auto &metric : metrics) {
auto &metric_object = getDataObject(
metric.name,
metric.type,
metric.description
);
metric_object.addElement(metric.label, metric.value);
}
}
private:
PrometheusMetricData &
getDataObject(const string &name, const string &type, const string &description)
{
auto elem = prometheus_metrics.find(name);
if (elem == prometheus_metrics.end()) {
elem = prometheus_metrics.emplace(name, PrometheusMetricData(name, type, description)).first;
}
return elem->second;
}
map<string, ServiceData>
getServiceDetails()
{
map<string, ServiceData> registeredServices;
auto registered_services_file = getConfigurationWithDefault<string>(
getFilesystemPathConfig() + "/conf/orchestrations_registered_services.json",
"orchestration",
"Orchestration registered services"
);
ifstream file(registered_services_file);
if (!file.is_open()) {
dbgWarning(D_PROMETHEUS) << "Failed to open file: " << registered_services_file;
return registeredServices;
}
stringstream buffer;
buffer << file.rdbuf();
try {
cereal::JSONInputArchive archive(buffer);
archive(cereal::make_nvp("Registered Services", registeredServices));
} catch (const exception& e) {
dbgWarning(D_PROMETHEUS) << "Error parsing Registered Services JSON file: " << e.what();
}
return registeredServices;
}
void
getServicesMetrics()
{
dbgTrace(D_PROMETHEUS) << "Get all registered services metrics";
map<string, ServiceData> service_names_to_ports = getServiceDetails();
for (const auto &service : service_names_to_ports) {
I_Messaging *messaging = Singleton::Consume<I_Messaging>::by<PrometheusComp>();
MessageMetadata servie_metric_req_md("127.0.0.1", service.second.service_port);
servie_metric_req_md.setConnectioFlag(MessageConnectionConfig::ONE_TIME_CONN);
servie_metric_req_md.setConnectioFlag(MessageConnectionConfig::UNSECURE_CONN);
auto res = messaging->sendSyncMessage(
HTTPMethod::GET,
"/service-metrics",
string(""),
MessageCategory::GENERIC,
servie_metric_req_md
);
if (!res.ok()) {
dbgWarning(D_PROMETHEUS) << "Failed to get service metrics. Service: " << service.first;
continue;
}
stringstream buffer;
buffer << res.unpack().getBody();
cereal::JSONInputArchive archive(buffer);
vector<PrometheusData> metrics;
archive(cereal::make_nvp("metrics", metrics));
addMetrics(metrics);
}
}
string
getFormatedPrometheusMetrics()
{
MetricScrapeEvent().notify();
getServicesMetrics();
stringstream result;
for (auto &metric : prometheus_metrics) {
result << metric.second;
}
dbgTrace(D_PROMETHEUS) << "Prometheus metrics: " << result.str();
return result.str();
}
map<string, PrometheusMetricData> prometheus_metrics;
};
PrometheusComp::PrometheusComp() : Component("Prometheus"), pimpl(make_unique<Impl>()) {}
PrometheusComp::~PrometheusComp() {}
void
PrometheusComp::init()
{
pimpl->init();
}

View File

@ -0,0 +1,143 @@
#ifndef __PROMETHEUS_METRIC_NAMES_H__
#define __PROMETHEUS_METRIC_NAMES_H__
#include <string>
#include <unordered_map>
#include "debug.h"
USE_DEBUG_FLAG(D_PROMETHEUS);
std::string
convertMetricName(const std::string &original_metric_name)
{
static const std::unordered_map<std::string, std::string> original_to_representative_names = {
// HybridModeMetric
{"watchdogProcessStartupEventsSum", "nano_service_restarts_counter"},
// nginxAttachmentMetric
{"inspectVerdictSum", "traffic_inspection_verdict_inspect_counter"},
{"acceptVeridctSum", "traffic_inspection_verdict_accept_counter"},
{"dropVerdictSum", "traffic_inspection_verdict_drop_counter"},
{"injectVerdictSum", "traffic_inspection_verdict_inject_counter"},
{"irrelevantVerdictSum", "traffic_inspection_verdict_irrelevant_counter"},
{"irrelevantVerdictSum", "traffic_inspection_verdict_irrelevant_counter"},
{"reconfVerdictSum", "traffic_inspection_verdict_reconf_counter"},
{"responseInspection", "response_body_inspection_counter"},
// nginxIntakerMetric
{"successfullInspectionTransactionsSum", "successful_Inspection_counter"},
{"failopenTransactionsSum", "fail_open_Inspection_counter"},
{"failcloseTransactionsSum", "fail_close_Inspection_counter"},
{"transparentModeTransactionsSum", "transparent_mode_counter"},
{"totalTimeInTransparentModeSum", "total_time_in_transparent_mode_counter"},
{"reachInspectVerdictSum", "inspect_verdict_counter"},
{"reachAcceptVerdictSum", "accept_verdict_counter"},
{"reachDropVerdictSum", "drop_verdict_counter"},
{"reachInjectVerdictSum", "inject_verdict_counter"},
{"reachIrrelevantVerdictSum", "irrelevant_verdict_counter"},
{"reachReconfVerdictSum", "reconf_verdict_counter"},
{"requestCompressionFailureSum", "failed_requests_compression_counter"},
{"responseCompressionFailureSum", "failed_response_compression_counter"},
{"requestDecompressionFailureSum", "failed_requests_decompression_counter"},
{"responseDecompressionFailureSum", "failed_response_decompression_counter"},
{"requestCompressionSuccessSum", "successful_request_compression_counter"},
{"responseCompressionSuccessSum", "successful_response_compression_counter"},
{"requestDecompressionSuccessSum", "successful_request_decompression_counter"},
{"responseDecompressionSuccessSum", "successful_response_decompression_counter"},
{"skippedSessionsUponCorruptedZipSum", "corrupted_zip_skipped_session_counter"},
{"attachmentThreadReachedTimeoutSum", "thread_exceeded_processing_time_counter"},
{"registrationThreadReachedTimeoutSum", "failed_registration_thread_counter"},
{"requestHeaderThreadReachedTimeoutSum", "request_headers_processing_thread_timeouts_counter"},
{"requestBodyThreadReachedTimeoutSum", "request_body_processing_thread_timeouts_counter"},
{"respondHeaderThreadReachedTimeoutSum", "response_headers_processing_thread_timeouts_counter"},
{"respondBodyThreadReachedTimeoutSum", "response_body_processing_thread_timeouts_counter"},
{"attachmentThreadFailureSum", "thread_failures_counter"},
{"httpRequestProcessingReachedTimeoutSum", "request_processing_timeouts_counter"},
{"httpRequestsSizeSum", "requests_total_size_counter"},
{"httpResponsesSizeSum", "response_total_size_counter"},
{"httpRequestFailedToReachWebServerUpstreamSum", "requests_failed_reach_upstram_counter"},
{"overallSessionProcessTimeToVerdictAvgSample", "overall_processing_time_until_verdict_average"},
{"overallSessionProcessTimeToVerdictMaxSample", "overall_processing_time_until_verdict_max"},
{"overallSessionProcessTimeToVerdictMinSample", "overall_processing_time_until_verdict_min"},
{"requestProcessTimeToVerdictAvgSample", "requests_processing_time_until_verdict_average"},
{"requestProcessTimeToVerdictMaxSample", "requests_processing_time_until_verdict_max"},
{"requestProcessTimeToVerdictMinSample", "requests_processing_time_until_verdict_min"},
{"responseProcessTimeToVerdictAvgSample", "response_processing_time_until_verdict_average"},
{"responseProcessTimeToVerdictMaxSample", "response_processing_time_until_verdict_max"},
{"responseProcessTimeToVerdictMinSample", "response_processing_time_until_verdict_min"},
{"requestBodySizeUponTimeoutAvgSample", "request_body_size_average"},
{"requestBodySizeUponTimeoutMaxSample", "request_body_size_max"},
{"requestBodySizeUponTimeoutMinSample", "request_body_size_min"},
{"responseBodySizeUponTimeoutAvgSample", "response_body_size_average"},
{"responseBodySizeUponTimeoutMaxSample", "response_body_size_max"},
{"responseBodySizeUponTimeoutMinSample", "response_body_size_min"},
// WaapTelemetrics
{"reservedNgenA", "total_requests_counter"},
{"reservedNgenB", "unique_sources_counter"},
{"reservedNgenC", "requests_blocked_by_force_and_exception_counter"},
{"reservedNgenD", "requests_blocked_by_waf_counter"},
{"reservedNgenE", "requests_blocked_by_open_api_counter"},
{"reservedNgenF", "requests_blocked_by_bot_protection_counter"},
{"reservedNgenG", "requests_threat_level_info_and_no_threat_counter"},
{"reservedNgenH", "requests_threat_level_low_counter"},
{"reservedNgenI", "requests_threat_level_medium_counter"},
{"reservedNgenJ", "requests_threat_level_high_counter"},
// WaapTrafficTelemetrics
{"reservedNgenA", "post_requests_counter"},
{"reservedNgenB", "get_requests_counter"},
{"reservedNgenC", "put_requests_counter"},
{"reservedNgenD", "patch_requests_counter"},
{"reservedNgenE", "delete_requests_counter"},
{"reservedNgenF", "other_requests_counter"},
{"reservedNgenG", "2xx_status_code_responses_counter"},
{"reservedNgenH", "4xx_status_code_responses_counter"},
{"reservedNgenI", "5xx_status_code_responses_counter"},
{"reservedNgenJ", "requests_time_latency_average"},
// WaapAttackTypesMetrics
{"reservedNgenA", "sql_injection_attacks_type_counter"},
{"reservedNgenB", "vulnerability_scanning_attacks_type_counter"},
{"reservedNgenC", "path_traversal_attacks_type_counter"},
{"reservedNgenD", "ldap_injection_attacks_type_counter"},
{"reservedNgenE", "evasion_techniques_attacks_type_counter"},
{"reservedNgenF", "remote_code_execution_attacks_type_counter"},
{"reservedNgenG", "xml_extern_entity_attacks_type_counter"},
{"reservedNgenH", "cross_site_scripting_attacks_type_counter"},
{"reservedNgenI", "general_attacks_type_counter"},
// AssetsMetric
{"numberOfProtectedApiAssetsSample", "api_assets_counter"},
{"numberOfProtectedWebAppAssetsSample", "web_api_assets_counter"},
{"numberOfProtectedAssetsSample", "all_assets_counter"},
// IPSMetric
{"preventEngineMatchesSample", "prevent_action_matches_counter"},
{"detectEngineMatchesSample", "detect_action_matches_counter"},
{"ignoreEngineMatchesSample", "ignore_action_matches_counter"},
// CPUMetric
{"cpuMaxSample", "cpu_usage_percentage_max"},
{"cpuAvgSample", "cpu_usage_percentage_average"},
{"cpuSample", "cpu_usage_percentage_last_value"},
// LogMetric
{"logQueueMaxSizeSample", "logs_queue_size_max"},
{"logQueueAvgSizeSample", "logs_queue_size_average"},
{"logQueueCurrentSizeSample", "logs_queue_size_last_value"},
{"sentLogsSum", "logs_sent_counter"},
{"sentLogsBulksSum", "bulk_logs_sent_counter"},
// MemoryMetric
{"serviceVirtualMemorySizeMaxSample", "service_virtual_memory_size_kb_max"},
{"serviceVirtualMemorySizeMinSample", "service_virtual_memory_size_kb_min"},
{"serviceVirtualMemorySizeAvgSample", "service_virtual_memory_size_kb_average"},
{"serviceRssMemorySizeMaxSample", "service_physical_memory_size_kb_max"},
{"serviceRssMemorySizeMinSample", "service_physical_memory_size_kb_min"},
{"serviceRssMemorySizeAvgSample", "service_physical_memory_size_kb_average"},
{"generalTotalMemorySizeMaxSample", "general_total_used_memory_max"},
{"generalTotalMemorySizeMinSample", "general_total_used_memory_min"},
{"generalTotalMemorySizeAvgSample", "general_total_used_memory_average"},
};
auto metric_names = original_to_representative_names.find(original_metric_name);
if (metric_names != original_to_representative_names.end()) return metric_names->second;
dbgDebug(D_PROMETHEUS)
<< "Metric don't have a representative name, originl name: "
<< original_metric_name;
return "";
}
#endif // __PROMETHEUS_METRIC_NAMES_H__

View File

@ -0,0 +1,8 @@
link_directories(${BOOST_ROOT}/lib)
link_directories(${BOOST_ROOT}/lib ${CMAKE_BINARY_DIR}/core/shmem_ipc)
add_unit_test(
prometheus_ut
"prometheus_ut.cc"
"prometheus_comp;logging;agent_details;waap_clib;table;singleton;time_proxy;metric;event_is;connkey;http_transaction_data;generic_rulebase;generic_rulebase_evaluators;ip_utilities;intelligence_is_v2;-lboost_regex;messaging;"
)

View File

@ -0,0 +1,79 @@
#include "prometheus_comp.h"
#include <sstream>
#include <fstream>
#include <vector>
#include "cmock.h"
#include "cptest.h"
#include "maybe_res.h"
#include "debug.h"
#include "config.h"
#include "environment.h"
#include "config_component.h"
#include "agent_details.h"
#include "time_proxy.h"
#include "mock/mock_mainloop.h"
#include "mock/mock_rest_api.h"
#include "mock/mock_messaging.h"
using namespace std;
using namespace testing;
USE_DEBUG_FLAG(D_PROMETHEUS);
class PrometheusCompTest : public Test
{
public:
PrometheusCompTest()
{
EXPECT_CALL(mock_rest, mockRestCall(_, "declare-boolean-variable", _)).WillOnce(Return(false));
env.preload();
config.preload();
env.init();
EXPECT_CALL(
mock_rest,
addGetCall("metrics", _)
).WillOnce(DoAll(SaveArg<1>(&get_metrics_func), Return(true)));
prometheus_comp.init();
}
::Environment env;
ConfigComponent config;
PrometheusComp prometheus_comp;
StrictMock<MockRestApi> mock_rest;
StrictMock<MockMainLoop> mock_ml;
NiceMock<MockMessaging> mock_messaging;
unique_ptr<ServerRest> agent_uninstall;
function<string()> get_metrics_func;
CPTestTempfile status_file;
string registered_services_file_path;
};
TEST_F(PrometheusCompTest, checkAddingMetric)
{
registered_services_file_path = cptestFnameInSrcDir(string("registered_services.json"));
setConfiguration(registered_services_file_path, "orchestration", "Orchestration registered services");
string metric_body = "{\n"
" \"metrics\": [\n"
" {\n"
" \"metric_name\": \"watchdogProcessStartupEventsSum\",\n"
" \"metric_type\": \"counter\",\n"
" \"metric_description\": \"\",\n"
" \"labels\": \"{method=\\\"post\\\",code=\\\"200\\\"}\",\n"
" \"value\": \"1534\"\n"
" }\n"
" ]\n"
"}";
string message_body;
EXPECT_CALL(mock_messaging, sendSyncMessage(_, "/service-metrics", _, _, _))
.Times(2).WillRepeatedly(Return(HTTPResponse(HTTPStatusCode::HTTP_OK, metric_body)));
string metric_str = "# TYPE nano_service_restarts_counter counter\n"
"nano_service_restarts_counter{method=\"post\",code=\"200\"} 1534\n\n";
EXPECT_EQ(metric_str, get_metrics_func());
}

View File

@ -0,0 +1,32 @@
{
"Registered Services": {
"cp-nano-orchestration": {
"Service name": "cp-nano-orchestration",
"Service ID": "cp-nano-orchestration",
"Service port": 7777,
"Relevant configs": [
"zones",
"triggers",
"rules",
"registration-data",
"parameters",
"orchestration",
"exceptions",
"agent-intelligence"
]
},
"cp-nano-prometheus": {
"Service name": "cp-nano-prometheus",
"Service ID": "cp-nano-prometheus",
"Service port": 7465,
"Relevant configs": [
"zones",
"triggers",
"rules",
"parameters",
"exceptions",
"agent-intelligence"
]
}
}
}

View File

@ -41,6 +41,7 @@ static in6_addr applyMaskV6(const in6_addr& addr, uint8_t prefixLength) {
in6_addr maskedAddr = addr;
int fullBytes = prefixLength / 8;
int remainingBits = prefixLength % 8;
uint8_t partialByte = maskedAddr.s6_addr[fullBytes];
// Mask full bytes
for (int i = fullBytes; i < 16; ++i) {
@ -50,7 +51,7 @@ static in6_addr applyMaskV6(const in6_addr& addr, uint8_t prefixLength) {
// Mask remaining bits
if (remainingBits > 0) {
uint8_t mask = ~((1 << (8 - remainingBits)) - 1);
maskedAddr.s6_addr[fullBytes] &= mask;
maskedAddr.s6_addr[fullBytes] = partialByte & mask;
}
return maskedAddr;

View File

@ -113,6 +113,9 @@ DeepParser::onKv(const char *k, size_t k_len, const char *v, size_t v_len, int f
<< parser_depth
<< " v_len = "
<< v_len;
dbgTrace(D_WAAP_DEEP_PARSER) << m_key;
// Decide whether to push/pop the value in the keystack.
bool shouldUpdateKeyStack = (flags & BUFFERED_RECEIVER_F_UNNAMED) == 0;
@ -275,13 +278,23 @@ DeepParser::onKv(const char *k, size_t k_len, const char *v, size_t v_len, int f
// Detect and decode potential base64 chunks in the value before further processing
bool base64ParamFound = false;
size_t base64_offset = 0;
Waap::Util::BinaryFileType base64BinaryFileType = Waap::Util::BinaryFileType::FILE_TYPE_NONE;
if (m_depth == 1 && flags == BUFFERED_RECEIVER_F_MIDDLE && m_key.depth() == 1 && m_key.first() != "#base64"){
dbgTrace(D_WAAP_DEEP_PARSER) << " === will not check base64 since prev data block was not b64-encoded ===";
} else {
dbgTrace(D_WAAP_DEEP_PARSER) << " ===Processing potential base64===";
if (isUrlPayload && m_depth == 1 && cur_val[0] == '/') {
dbgTrace(D_WAAP_DEEP_PARSER) << "removing leading '/' from URL param value";
base64_offset = 1;
}
std::string decoded_val, decoded_key;
base64_variants base64_status = Waap::Util::b64Test(cur_val, decoded_key, decoded_val, base64BinaryFileType);
base64_variants base64_status = Waap::Util::b64Test(
cur_val,
decoded_key,
decoded_val,
base64BinaryFileType,
base64_offset);
dbgTrace(D_WAAP_DEEP_PARSER)
<< " status = "
@ -289,16 +302,50 @@ DeepParser::onKv(const char *k, size_t k_len, const char *v, size_t v_len, int f
<< " key = "
<< decoded_key
<< " value = "
<< decoded_val;
<< decoded_val
<< "m_depth = "
<< m_depth;
switch (base64_status) {
case SINGLE_B64_CHUNK_CONVERT:
cur_val = decoded_val;
if (base64_offset) {
cur_val = "/" + decoded_val;
} else {
cur_val = decoded_val;
}
base64ParamFound = true;
break;
case CONTINUE_DUAL_SCAN:
if (decoded_val.size() > 0) {
decoded_key = "#base64";
base64ParamFound = false;
if (base64_offset) {
decoded_val = "/" + decoded_val;
}
dbgTrace(D_WAAP_DEEP_PARSER) << m_key;
rc = onKv(
decoded_key.c_str(),
decoded_key.size(),
decoded_val.data(),
decoded_val.size(),
flags,
parser_depth
);
dbgTrace(D_WAAP_DEEP_PARSER) << "After call to onKv with suspected value rc = " << rc;
dbgTrace(D_WAAP_DEEP_PARSER) << m_key;
break;
} else {
dbgTrace(D_WAAP) << "base64 decode suspected and empty value. Skipping.";
base64ParamFound = false;
break;
}
break;
case KEY_VALUE_B64_PAIR:
// going deep with new pair in case value is not empty
if (decoded_val.size() > 0) {
if (base64_offset) {
decoded_key = "/" + decoded_key;
}
cur_val = decoded_val;
base64ParamFound = true;
rc = onKv(
@ -309,9 +356,13 @@ DeepParser::onKv(const char *k, size_t k_len, const char *v, size_t v_len, int f
flags,
parser_depth
);
dbgTrace(D_WAAP_DEEP_PARSER) << " rc = " << rc;
dbgTrace(D_WAAP_DEEP_PARSER) << "After call to onKv with suspected value rc = " << rc;
dbgTrace(D_WAAP_DEEP_PARSER) << m_key;
if (rc != CONTINUE_PARSING) {
if (shouldUpdateKeyStack) {
m_key.pop("deep parser key");
}
m_depth--;
return rc;
}
}
@ -323,7 +374,7 @@ DeepParser::onKv(const char *k, size_t k_len, const char *v, size_t v_len, int f
}
if (base64ParamFound) {
dbgTrace(D_WAAP_DEEP_PARSER) << "DeepParser::onKv(): pushing #base64 prefix to the key.";
dbgTrace(D_WAAP_DEEP_PARSER) << "pushing #base64 prefix to the key.";
m_key.push("#base64", 7, false);
}
}
@ -437,7 +488,6 @@ DeepParser::onKv(const char *k, size_t k_len, const char *v, size_t v_len, int f
if (shouldUpdateKeyStack) {
m_key.pop("deep parser key");
}
m_depth--;
return rc;
}
@ -587,7 +637,6 @@ DeepParser::parseBuffer(
if (shouldUpdateKeyStack) {
m_key.pop("deep parser key");
}
m_depth--;
return DONE_PARSING;
}
@ -909,7 +958,6 @@ DeepParser::parseAfterMisleadingMultipartBoundaryCleaned(
return rc;
}
}
return rc;
}
@ -1081,7 +1129,7 @@ DeepParser::createInternalParser(
<< " isBodyPayload = "
<< isBodyPayload;
//Detect sensor_data format in body and just use dedicated filter for it
if (m_depth == 1
if ((m_depth == 1)
&& isBodyPayload
&& Waap::Util::detectKnownSource(cur_val) == Waap::Util::SOURCE_TYPE_SENSOR_DATA) {
m_parsersDeque.push_back(

View File

@ -37,14 +37,24 @@ void KeyStack::push(const char* subkey, size_t subkeySize, bool countDepth) {
m_nameDepth++;
}
dbgTrace(D_WAAP) << "KeyStack(" << m_name << ")::push(): '" << std::string(subkey, subkeySize) <<
"' => full_key='" << std::string(m_key.data(), m_key.size()) << "'";
dbgTrace(D_WAAP)
<< "KeyStack("
<< m_name
<< ")::push(): '"
<< std::string(subkey, subkeySize)
<< "' => full_key='"
<< std::string(m_key.data(), m_key.size())
<< "'";
}
void KeyStack::pop(const char* log, bool countDepth) {
// Keep depth balanced even if m_key[] buffer is full
if (m_key.empty() || m_stack.empty()) {
dbgDebug(D_WAAP) << "KeyStack(" << m_name << ")::pop(): [ERROR] ATTEMPT TO POP FROM EMPTY KEY STACK! " << log;
dbgDebug(D_WAAP)
<< "KeyStack("
<< m_name
<< ")::pop(): [ERROR] ATTEMPT TO POP FROM EMPTY KEY STACK! "
<< log;
return;
}
@ -55,6 +65,22 @@ void KeyStack::pop(const char* log, bool countDepth) {
// Remove last subkey.
m_key.erase(m_stack.back());
m_stack.pop_back();
dbgTrace(D_WAAP) << "KeyStack(" << m_name << ")::pop(): full_key='" <<
std::string(m_key.data(), (int)m_key.size()) << "': pop_key=" << log << "'";
dbgTrace(D_WAAP)
<< "KeyStack("
<< m_name
<< ")::pop(): full_key='"
<< std::string(m_key.data(), (int)m_key.size())
<< "': pop_key="
<< log
<< "'";
}
void KeyStack::print(std::ostream &os) const
{
os
<< "KeyStack("
<< m_name
<< ")::show(): full_key='"
<< std::string(m_key.data(), (int)m_key.size())
<< "'";
}

View File

@ -28,6 +28,7 @@ public:
void pop(const char* log, bool countDepth=true);
bool empty() const { return m_key.empty(); }
void clear() { m_key.clear(); m_stack.clear(); }
void print(std::ostream &os) const;
size_t depth() const { return m_nameDepth; }
size_t size() const {
return str().size();

View File

@ -111,8 +111,7 @@ int BufferedReceiver::onKvDone()
// This must be called even if m_value is empty in order to signal the BUFFERED_RECEIVER_F_LAST flag to the
// receiver!
dbgTrace(D_WAAP_PARSER)
<< " Call onKv on the remainder of the buffer not yet pushed to the receiver "
<< "calling onKv()";
<< " Call onKv on the remainder of the buffer not yet pushed to the receiver calling onKv()";
int rc = onKv(m_key.data(), m_key.size(), m_value.data(), m_value.size(), m_flags, m_parser_depth);
// Reset the object's state to allow reuse for other parsers

View File

@ -21,6 +21,7 @@ USE_DEBUG_FLAG(D_WAAP);
const std::string ParserPDF::m_parserName = "ParserPDF";
const char* PDF_TAIL = "%%EOF";
const size_t PDF_TAIL_LEN = 5;
ParserPDF::ParserPDF(
IParserStreamReceiver &receiver,
@ -44,16 +45,21 @@ ParserPDF::push(const char *buf, size_t len)
<< "' len="
<< len;
const char *c;
if (m_state == s_error) {
return 0;
}
if (len == 0)
{
dbgTrace(D_WAAP_PARSER_PDF) << "ParserPDF::push(): end of stream. m_state=" << m_state;
if (m_state == s_end) {
if (len == 0) {
dbgTrace(D_WAAP_PARSER_PDF) << "ParserPDF::push(): end of stream. m_state=" << m_state;
if (m_state == s_body && m_tailOffset >= PDF_TAIL_LEN) {
if (m_receiver.onKey("PDF", 3) != 0) {
m_state = s_error;
return 0;
}
if (m_receiver.onValue("", 0) != 0) {
m_state = s_error;
return 0;
}
m_receiver.onKvDone();
} else {
m_state = s_error;
@ -61,38 +67,43 @@ ParserPDF::push(const char *buf, size_t len)
return 0;
}
size_t start = (len > MAX_PDF_TAIL_LOOKUP) ? len - MAX_PDF_TAIL_LOOKUP : 0;
switch (m_state) {
case s_start:
m_state = s_body;
CP_FALL_THROUGH;
case s_body:
{
size_t tail_lookup_offset = (len > MAX_PDF_TAIL_LOOKUP) ? len - MAX_PDF_TAIL_LOOKUP : 0;
c = strstr(buf + tail_lookup_offset, PDF_TAIL);
for (size_t i = start; i < len; i++) {
dbgTrace(D_WAAP_PARSER_PDF)
<< "string to search: " << std::string(buf + tail_lookup_offset)
<< " c=" << c;
if (c) {
m_state = s_end;
CP_FALL_THROUGH;
<< "ParserPDF::push(): m_tailOffset="
<< m_tailOffset
<< " buf[i]="
<< buf[i];
if (m_tailOffset <= PDF_TAIL_LEN - 1) {
if (buf[i] == PDF_TAIL[m_tailOffset]) {
m_tailOffset++;
} else {
m_tailOffset = 0;
}
} else {
break;
if (buf[i] == '\r' || buf[i] == '\n' || buf[i] == ' ' || buf[i] == 0) {
m_tailOffset++;
} else {
m_tailOffset = 0;
i--;
}
}
}
case s_end:
if (m_receiver.onKey("PDF", 3) != 0) {
m_state = s_error;
return 0;
}
if (m_receiver.onValue("", 0) != 0) {
m_state = s_error;
return 0;
}
dbgTrace(D_WAAP_PARSER_PDF)
<< "ParserPDF::push()->s_body: m_tailOffset="
<< m_tailOffset;
break;
case s_error:
break;
default:
dbgTrace(D_WAAP_PARSER_PDF) << "ParserPDF::push(): unknown state: " << m_state;
dbgTrace(D_WAAP_PARSER_PDF)
<< "ParserPDF::push(): unknown state: "
<< m_state;
m_state = s_error;
return 0;
}

View File

@ -34,7 +34,6 @@ private:
enum state {
s_start,
s_body,
s_end,
s_error
};
@ -42,6 +41,7 @@ private:
enum state m_state;
static const std::string m_parserName;
size_t m_parser_depth;
size_t m_tailOffset = 0;
};
#endif // __PARSER_PDF_H__

View File

@ -617,6 +617,17 @@ void SerializeToLocalAndRemoteSyncBase::setInterval(ch::seconds newInterval)
bool SerializeToLocalAndRemoteSyncBase::localSyncAndProcess()
{
bool isBackupSyncEnabled = getProfileAgentSettingWithDefault<bool>(
true,
"appsecLearningSettings.backupLocalSync");
if (!isBackupSyncEnabled) {
dbgInfo(D_WAAP_CONFIDENCE_CALCULATOR) << "Local sync is disabled";
processData();
saveData();
return true;
}
RemoteFilesList rawDataFiles;
dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "Getting files of all agents";

View File

@ -424,6 +424,8 @@ WaapAssetState::WaapAssetState(std::shared_ptr<Signatures> signatures,
std::string unescape(const std::string & s) {
std::string text = s;
size_t orig_size = text.size();
size_t orig_capacity = text.capacity();
dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "unescape: (0) '" << text << "'";
fixBreakingSpace(text);
@ -433,7 +435,17 @@ WaapAssetState::WaapAssetState(std::shared_ptr<Signatures> signatures,
filterUnicode(text);
dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "unescape: (1) '" << text << "'";
// inplace unescaping must result in a string of the same size or smaller
dbgAssertOpt(text.size() <= orig_size && text.size() <= text.capacity() && text.capacity() <= orig_capacity)
<< AlertInfo(AlertTeam::CORE, "WAAP sample processing")
<< "unescape: original size=" << orig_size << " capacity=" << orig_capacity
<< " new size=" << text.size() << " capacity=" << text.capacity()
<< " text='" << text << "'";
text = filterUTF7(text);
// update orig_size and orig_capacity after string copy
orig_size = text.size();
orig_capacity = text.capacity();
dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "unescape: (1) (after filterUTF7) '" << text << "'";
// 2. Replace %xx sequences by their single-character equivalents.
@ -512,6 +524,14 @@ WaapAssetState::WaapAssetState(std::shared_ptr<Signatures> signatures,
}
dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "unescape: (12) '" << text << "'";
// inplace unescaping must result in a string of the same size or smaller
dbgAssertOpt(text.size() <= orig_size && text.size() <= text.capacity() && text.capacity() <= orig_capacity)
<< AlertInfo(AlertTeam::CORE, "WAAP sample processing")
<< "unescape: original size=" << orig_size << " capacity=" << orig_capacity
<< " new size=" << text.size() << " capacity=" << text.capacity()
<< " text='" << text << "'";
return text;
}

View File

@ -97,7 +97,9 @@ calcIndividualKeywords(
std::sort(keywords.begin(), keywords.end());
for (auto pKeyword = keywords.begin(); pKeyword != keywords.end(); ++pKeyword) {
addKeywordScore(scoreBuilder, poolName, *pKeyword, 2.0f, 0.3f, scoresArray, coefArray);
addKeywordScore(
scoreBuilder, poolName, *pKeyword, DEFAULT_KEYWORD_SCORE, DEFAULT_KEYWORD_COEF, scoresArray, coefArray
);
}
}
@ -112,8 +114,6 @@ calcCombinations(
std::vector<std::string>& keyword_combinations)
{
keyword_combinations.clear();
static const double max_combi_score = 1.0f;
double default_coef = 0.8f;
for (size_t i = 0; i < keyword_matches.size(); ++i) {
std::vector<std::string> combinations;
@ -137,8 +137,10 @@ calcCombinations(
default_score += scoreBuilder.getSnapshotKeywordScore(*it, 0.0f, poolName);
}
// set default combination score to be the sum of its keywords, bounded by 1
default_score = std::min(default_score, max_combi_score);
addKeywordScore(scoreBuilder, poolName, combination, default_score, default_coef, scoresArray, coefArray);
default_score = std::min(default_score, DEFAULT_COMBI_SCORE);
addKeywordScore(
scoreBuilder, poolName, combination, default_score, DEFAULT_COMBI_COEF, scoresArray, coefArray
);
keyword_combinations.push_back(combination);
}
}
@ -155,7 +157,7 @@ calcArrayScore(std::vector<double>& scoreArray)
// *pScore is always positive and there's a +10 offset
score = 10.0f - left * 10.0f / divisor;
}
dbgTrace(D_WAAP_SCORE_BUILDER) << "calculated score: " << score;
dbgDebug(D_WAAP_SCORE_BUILDER) << "calculated score: " << score;
return score;
}
@ -171,7 +173,9 @@ calcLogisticRegressionScore(std::vector<double> &coefArray, double intercept, do
}
// Apply the expit function to the log-odds to obtain the probability,
// and multiply by 10 to obtain a 'score' in the range [0, 10]
return 1.0f / (1.0f + exp(-log_odds)) * 10.0f;
double score = 1.0f / (1.0f + exp(-log_odds)) * 10.0f;
dbgDebug(D_WAAP_SCORE_BUILDER) << "calculated score (log_odds): " << score << " (" << log_odds << ")";
return score;
}
}

View File

@ -32,6 +32,11 @@ struct ModelLoggingSettings {
bool logToStream;
};
static const double DEFAULT_KEYWORD_COEF = 0.3f;
static const double DEFAULT_KEYWORD_SCORE = 2.0f;
static const double DEFAULT_COMBI_COEF = 0.8f;
static const double DEFAULT_COMBI_SCORE = 1.0f;
std::string getScorePoolNameByLocation(const std::string &location);
std::string getOtherScorePoolName();
ModelLoggingSettings getModelLoggingSettings();

View File

@ -40,6 +40,7 @@
#include "WaapOpenRedirectPolicy.h"
#include "WaapErrorDisclosurePolicy.h"
#include <boost/algorithm/string.hpp>
#include <boost/regex.hpp>
#include "generic_rulebase/parameters_config.h"
#include <iostream>
#include "ParserDelimiter.h"
@ -1092,12 +1093,9 @@ void Waf2Transaction::add_request_hdr(const char* name, int name_len, const char
void Waf2Transaction::end_request_hdrs() {
dbgFlow(D_WAAP) << "[transaction:" << this << "] end_request_hdrs";
m_isScanningRequired = setCurrentAssetContext();
if (m_siteConfig != NULL)
{
// getOverrideState also extracts the source identifier and populates m_source_identifier
// but the State itself is not needed now
Waap::Override::State overrideState = getOverrideState(m_siteConfig);
}
extractEnvSourceIdentifier();
m_pWaapAssetState->m_requestsMonitor->logSourceHit(m_source_identifier);
IdentifiersEvent ids(m_source_identifier, m_pWaapAssetState->m_assetId);
ids.notify();
@ -1390,6 +1388,20 @@ Waf2Transaction::findHtmlTagToInject(const char* data, int data_len, int& pos)
size_t tagHistPosCheck = m_tagHistPos;
for (size_t i=0; i < tagSize; ++i) {
if (tag[i] != ::tolower(m_tagHist[tagHistPosCheck])) {
if (i == tagSize - 1 && m_tagHist[tagHistPosCheck] == ' ') {
// match regex on head element with attributes
string dataStr = Waap::Util::charToString(data + pos, data_len - pos);
dataStr = dataStr.substr(0, dataStr.find('>')+1);
tagMatches = NGEN::Regex::regexMatch(
__FILE__,
__LINE__,
dataStr,
boost::regex("(?:\\s+[a-zA-Z_:][-a-zA-Z0-9_:.]*(?:\\s*=\\s*(\"[^\"]*\"|'[^']*'|[^\\s\"'>]*))?)*\\s*>")
);
pos += dataStr.length() - 1;
dbgTrace(D_WAAP_BOT_PROTECTION) << "matching head element with attributes: " << dataStr << ". match: " << tagMatches;
break;
}
tagMatches = false;
break;
}
@ -1403,12 +1415,8 @@ Waf2Transaction::findHtmlTagToInject(const char* data, int data_len, int& pos)
}
}
if(!headFound)
{
return false;
}
return true;
dbgTrace(D_WAAP_BOT_PROTECTION) << "head element tag found: " << headFound;
return headFound;
}
void
@ -1577,6 +1585,8 @@ Waf2Transaction::decideFinal(
dbgTrace(D_WAAP) << "Waf2Transaction::decideFinal(): got relevant API configuration from the I/S";
sitePolicy = &ngenAPIConfig;
m_overrideState = getOverrideState(sitePolicy);
// User limits
shouldBlock = (getUserLimitVerdict() == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP);
}
else if (WaapConfigApplication::getWaapSiteConfig(ngenSiteConfig)) {
@ -2322,10 +2332,11 @@ bool Waf2Transaction::decideResponse()
bool
Waf2Transaction::reportScanResult(const Waf2ScanResult &res) {
if (get_ignoreScore() || (res.score >= SCORE_THRESHOLD &&
(m_scanResult == nullptr || res.score > m_scanResult->score)))
if ((get_ignoreScore() || res.score >= SCORE_THRESHOLD) &&
(m_scanResult == nullptr || res.score > m_scanResult->score))
{
// Forget any previous scan result and replace with new
dbgTrace(D_WAAP) << "Setting scan result. New score: " << res.score;
// Forget any previous scan result and replace wit, h new
delete m_scanResult;
m_scanResult = new Waf2ScanResult(res);
return true;

View File

@ -594,8 +594,6 @@ Waap::Override::State Waf2Transaction::getOverrideState(IWaapConfig* sitePolicy)
overrideState.applyOverride(*overridePolicy, WaapOverrideFunctor(*this), m_matchedOverrideIds, true);
}
extractEnvSourceIdentifier();
if (overridePolicy) { // later we will run response overrides
m_overrideState.applyOverride(*overridePolicy, WaapOverrideFunctor(*this), m_matchedOverrideIds, false);
}

View File

@ -952,6 +952,145 @@ string filterUTF7(const string& text) {
return result;
}
// Decides the status of a Base64 decoded string based on various parameters.
// @param decoded The decoded string.
// @param entropy The entropy of the original encoded string.
// @param decoded_entropy The entropy of the decoded string.
// @param spacer_count The number of spacer characters in the decoded string.
// @param nonPrintableCharsCount The count of non-printable characters in the decoded string.
// @param clear_on_error Flag indicating whether to clear the decoded string on error.
// @param terminatorCharsSeen The number of terminator characters seen.
// @param called_with_prefix Flag indicating if the function was called with a prefix.
// @return The status of the Base64 decoding process.
//
// Idea:
// Check if input chunk should be replaced by decoded, suspected to be checked both as encoded and decoded
// or cleaned as binary data. Additional case - define as not base64 encoded.
// - in case decoded size less 5 - return invalid
// - check entropy delta based on that base64 encoded data has higher entropy than decoded, usually delta = 0.25
// - this check should rize suspect but cannot work vice versa
// check if decoded chunk has more than 10% of non-printable characters - this is supect for binary data encoded
// - if no suspect for binary data and entropy is suspected, check empiric conditions to decide if this binary data
// or invalid decoding
// - if suspect for binary data, first check is we have entropy suspection
// - if entropy is suspected and chunk is short and it have more than 25% of nonprintables, return invalid
// since this is not base64 encoded data
// - if entropy is not suspected and chunk is short and it have more than 50% of nonprintables, return invalid
// since this is not base64 encoded data
// - if entropy is suspected and chunk size is between 64-1024, perform additional empiric test
// This test will define if returm value should be treated as suspected or as binary data(cleared)
base64_decode_status decideStatusBase64Decoded(
string& decoded,
double entropy,
double decoded_entropy,
size_t spacer_count,
size_t nonPrintableCharsCount,
bool clear_on_error,
double terminatorCharsSeen,
bool called_with_prefix
)
{
base64_decode_status tmp_status = B64_DECODE_OK;
if (entropy - decoded_entropy + terminatorCharsSeen < BASE64_ENTROPY_THRESHOLD_DELTA) {
dbgTrace(D_WAAP_BASE64)
<< "The chunk is under suspect to be base64,"
<< "use dual processing because entropy delta is too low";
tmp_status = B64_DECODE_SUSPECTED;
}
bool empiric_condition = false;
if (decoded.size() >= 5) {
if (spacer_count > 1) {
nonPrintableCharsCount = nonPrintableCharsCount - spacer_count + 1;
}
dbgTrace(D_WAAP_BASE64)
<< "(before test for unprintables): decoded.size="
<< decoded.size()
<< ", nonPrintableCharsCount="
<< nonPrintableCharsCount
<< ", clear_on_error="
<< clear_on_error
<< ", called_with_prefix="
<< called_with_prefix;
if (nonPrintableCharsCount * 10 < decoded.size()) {
dbgTrace(D_WAAP_BASE64)
<< "(decode/replace due to small amount of nonprintables): will decide based on entropy values";
} else { // more than 10% of non-printable characters
dbgTrace(D_WAAP_BASE64) << "large amount of nonporintables";
if (tmp_status == B64_DECODE_SUSPECTED) {
// entropy - decoded_entropy + terminatorCharsSeen < 0.25
if (decoded.size() < 16 && nonPrintableCharsCount * 4 > decoded.size()) {
decoded.clear();
return B64_DECODE_INVALID;
}
dbgTrace(D_WAAP_BASE64)
<< "(large amount of nonporintables + entropy suspect), check emprirics because decoded."
<< " terminatorCharsSeen="
<< terminatorCharsSeen;
// empiric test based on investigation of real payloads
empiric_condition = entropy < decoded_entropy
&& entropy > BASE64_ENTROPY_BASE_THRESHOLD
&& decoded_entropy > BASE64_ENTROPY_DECODED_THRESHOLD
&& !called_with_prefix
&& decoded.size() > BASE64_MIN_SIZE_LIMIT
&& decoded.size() < BASE64_MAX_SIZE_LIMIT
&& terminatorCharsSeen != 0;
if (!empiric_condition) {
if (clear_on_error) decoded.clear();
return B64_DECODE_SUSPECTED;
} else {
if (clear_on_error) decoded.clear();
tmp_status = B64_DECODE_OK;
}
} else { // entropy - decoded_entropy + terminatorCharsSeen >= 0.25
// one more empiric based on uT and real payloads
if (decoded.size() < 16
&& nonPrintableCharsCount * 2 > decoded.size()
&& terminatorCharsSeen == 0) {
decoded.clear();
return B64_DECODE_INVALID;
}
dbgTrace(D_WAAP_BASE64)
<< "(delete as binary content) because decoded. Return B64_DECODE_INCOMPLETE";
if (clear_on_error) decoded.clear();
return B64_DECODE_INCOMPLETE;
}
} // less than 10% of non-printable characters
dbgTrace(D_WAAP_BASE64)
<< "After handling unprintables checking status";
if (tmp_status == B64_DECODE_OK) {
dbgTrace(D_WAAP_BASE64) << "replacing with decoded data, return B64_DECODE_OK";
return B64_DECODE_OK;
} else { // tmp_status == B64_DECODE_SUSPECTED, entropy - decoded_entropy + terminatorCharsSeen < 0.25
dbgTrace(D_WAAP_BASE64) << "Suspected due to entropy, making empiric test";
// and one more empiric test based on investigation of real payloads
empiric_condition = entropy < decoded_entropy
&& entropy > BASE64_ENTROPY_BASE_THRESHOLD
&& decoded_entropy > BASE64_ENTROPY_DECODED_THRESHOLD
&& !called_with_prefix
&& decoded.size() > BASE64_MIN_SIZE_LIMIT
&& decoded.size() < BASE64_MAX_SIZE_LIMIT;
if (empiric_condition) {
dbgTrace(D_WAAP_BASE64) << "Empiric test failed, non-base64 chunk, return B64_DECODE_INVALID";
decoded.clear();
return B64_DECODE_INVALID;
}
dbgTrace(D_WAAP_BASE64) << "Empiric test passed, return B64_DECODE_SUSPECTED";
return B64_DECODE_SUSPECTED;
}
return B64_DECODE_OK; // successfully decoded. Returns decoded data in "decoded" parameter
}
// If decoded size is too small - leave the encoded value (return false)
decoded.clear(); // discard partial data
dbgTrace(D_WAAP_BASE64)
<< "(leave as-is) because decoded too small. decoded.size="
<< decoded.size();
return B64_DECODE_INVALID;
}
// Attempts to validate and decode base64-encoded chunk.
// Value is the full value inside which potential base64-encoded chunk was found,
// it and end point to start and end of that chunk.
@ -980,18 +1119,28 @@ base64_decode_status decodeBase64Chunk(
uint32_t spacer_count = 0;
uint32_t length = end - it;
dbgTrace(D_WAAP) << "decodeBase64Chunk: value='" << value << "' match='" << string(it, end) << "'";
dbgTrace(D_WAAP)
<< "value='"
<< value
<< "' match='"
<< string(it, end)
<< "' clear_on_error='"
<< clear_on_error
<< "' called_with_prefix='"
<< called_with_prefix
<< "'";
string::const_iterator begin = it;
// The encoded data length (without the "base64," prefix) should be exactly divisible by 4
// len % 4 is not 0 i.e. this is not base64
if ((end - it) % 4 != 0) {
dbgTrace(D_WAAP_BASE64) <<
"b64DecodeChunk: (leave as-is) because encoded data length should be exactly divisible by 4.";
if ((end - it) % 4 == 1) {
dbgTrace(D_WAAP_BASE64)
<< "(leave as-is) because encoded data length should not be <4*x + 1>.";
return B64_DECODE_INVALID;
}
std::unordered_map<char, double> frequency;
std::unordered_map<char, double> original_occurences_counter;
std::unordered_map<char, double> decoded_occurences_counter;
while (it != end) {
unsigned char c = *it;
@ -999,9 +1148,8 @@ base64_decode_status decodeBase64Chunk(
if (terminatorCharsSeen) {
// terminator characters must all be '=', until end of match.
if (c != '=') {
dbgTrace(D_WAAP_BASE64) <<
"decodeBase64Chunk: (leave as-is) because terminator characters must all be '='," <<
"until end of match.";
dbgTrace(D_WAAP_BASE64)
<< "(leave as-is) because terminator characters must all be '=' until end of match.";
return B64_DECODE_INVALID;
}
@ -1009,13 +1157,13 @@ base64_decode_status decodeBase64Chunk(
terminatorCharsSeen++;
if (terminatorCharsSeen > 2) {
dbgTrace(D_WAAP_BASE64) << "decodeBase64Chunk: (leave as-is) because terminatorCharsSeen > 2";
dbgTrace(D_WAAP_BASE64) << "(leave as-is) because terminatorCharsSeen > 2";
return B64_DECODE_INVALID;
}
// allow for more terminator characters
it++;
frequency[c]++;
original_occurences_counter[c]++;
continue;
}
@ -1040,12 +1188,18 @@ base64_decode_status decodeBase64Chunk(
// Start tracking terminator characters
terminatorCharsSeen++;
it++;
frequency[c]++;
original_occurences_counter[c]++;
continue;
}
else {
dbgTrace(D_WAAP_BASE64) << "decodeBase64Chunk: (leave as-is) because of non-base64 character ('" <<
c << "', ASCII " << (unsigned int)c << ", offset " << (it-begin) << ")";
dbgTrace(D_WAAP_BASE64)
<< "(leave as-is) because of non-base64 character ('"
<< c
<< "', ASCII "
<< (unsigned int)c
<< ", offset "
<< (it-begin)
<< ")";
return B64_DECODE_INVALID; // non-base64 character
}
@ -1068,18 +1222,19 @@ base64_decode_status decodeBase64Chunk(
}
decoded += (char)code;
decoded_occurences_counter[(char)code]++;
}
it++;
frequency[c]++;
original_occurences_counter[c]++;
}
// end of encoded sequence decoded.
dbgTrace(D_WAAP_BASE64)
<< "decodeBase64Chunk: decoded.size="
<< "decoding done: decoded.size="
<< decoded.size()
<< ", nonPrintableCharsCount="
<< ", uncorrected nonPrintableCharsCount="
<< nonPrintableCharsCount
<< ", spacer_count = "
<< spacer_count
@ -1088,56 +1243,42 @@ base64_decode_status decodeBase64Chunk(
<< "; decoded='"
<< decoded << "'";
// Check if entropy is correlates with b64 threshold (initially > 4.5)
if (!called_with_prefix) {
double entropy = 0;
double p = 0;
for (const auto& pair : frequency) {
p = pair.second / length;
entropy -= p * std::log2(p);
}
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: base entropy = " << entropy << "length = " << length;
// Add short payload factor
if (length < 16)
entropy = entropy * 16 / length;
// Enforce tailoring '=' characters
entropy+=terminatorCharsSeen;
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: corrected entropy = " << entropy << "length = " << length;
if (entropy <= base64_entropy_threshold) {
return B64_DECODE_INVALID;
}
double entropy = 0;
double p = 0;
double decoded_entropy = 0;
for (const auto& pair : original_occurences_counter) {
p = pair.second / length;
entropy -= p * std::log2(p);
}
// Return success only if decoded.size>=5 and there are less than 10% of non-printable
// characters in output.
if (decoded.size() >= 5) {
if (spacer_count > 1) {
nonPrintableCharsCount = nonPrintableCharsCount - spacer_count + 1;
}
if (nonPrintableCharsCount * 10 < decoded.size()) {
dbgTrace(D_WAAP_BASE64) << "decodeBase64Chunk: (decode/replace) decoded.size=" << decoded.size() <<
", nonPrintableCharsCount=" << nonPrintableCharsCount << ": replacing with decoded data";
}
else {
dbgTrace(D_WAAP_BASE64) << "decodeBase64Chunk: (delete) because decoded.size=" << decoded.size() <<
", nonPrintableCharsCount=" << nonPrintableCharsCount <<
", clear_on_error=" << clear_on_error;
if (clear_on_error) decoded.clear();
return B64_DECODE_INCOMPLETE;
}
dbgTrace(D_WAAP_BASE64) << "returning true: successfully decoded."
<< " Returns decoded data in \"decoded\" parameter";
return B64_DECODE_OK; // successfully decoded. Returns decoded data in "decoded" parameter
for (const auto &pair : decoded_occurences_counter) {
p = pair.second / decoded.size();
decoded_entropy -= p * std::log2(p);
}
dbgTrace(D_WAAP_BASE64)
<< "Base entropy = "
<< entropy
<< " Decoded_entropy = "
<< decoded_entropy
<< "length = "
<< length;
base64_decode_status return_status = decideStatusBase64Decoded(
decoded,
entropy,
decoded_entropy,
spacer_count,
nonPrintableCharsCount,
clear_on_error,
terminatorCharsSeen,
called_with_prefix
);
dbgTrace(D_WAAP_BASE64)
<< "After decideStatusBase64Decoded return_status="
<< return_status;
return return_status;
// If decoded size is too small - leave the encoded value (return false)
decoded.clear(); // discard partial data
dbgTrace(D_WAAP_BASE64) << "decodeBase64Chunk: (leave as-is) because decoded too small. decoded.size=" <<
decoded.size() <<
", nonPrintableCharsCount=" << nonPrintableCharsCount <<
", clear_on_error=" << clear_on_error;
return B64_DECODE_INVALID;
}
// Attempts to detect and validate base64 chunk.
@ -1180,8 +1321,9 @@ b64DecodeChunk(
return false;
}
}
return decodeBase64Chunk(value, it, end, decoded) != B64_DECODE_INVALID;
base64_decode_status status = decodeBase64Chunk(value, it, end, decoded);
dbgTrace(D_WAAP_BASE64) << "b64DecodeChunk: status = " << status;
return status != B64_DECODE_INVALID;
}
vector<string> split(const string& s, char delim) {
@ -1281,6 +1423,7 @@ static void b64TestChunk(const string &s,
int &deletedCount,
string &outStr)
{
dbgTrace(D_WAAP_BASE64) << " ===b64TestChunk===: starting with = '" << s << "'";
size_t chunkLen = (chunkEnd - chunkStart);
if ((chunkEnd - chunkStart) > static_cast<int>(b64_prefix.size()) &&
@ -1289,11 +1432,9 @@ static void b64TestChunk(const string &s,
chunkLen -= b64_prefix.size();
}
size_t chunkRem = chunkLen % 4;
// Only match chunk whose length is divisible by 4
string repl;
if (chunkRem == 0 && cb(s, chunkStart, chunkEnd, repl)) {
dbgTrace(D_WAAP_BASE64) << " ===b64TestChunk===: chunkLen = " << chunkLen;
if (cb(s, chunkStart, chunkEnd, repl)) {
// Succesfully matched b64 chunk
if (!repl.empty()) {
outStr += repl;
@ -1340,9 +1481,7 @@ bool detectBase64Chunk(
dbgTrace(D_WAAP_BASE64) << " ===detectBase64Chunk===: isB64AlphaChar = true, '" << *it << "'";
start = it;
end = s.end();
if ((end - start) % 4 == 0) {
return true;
}
return true;
}
// non base64 before supposed chunk - will not process
return false;
@ -1381,17 +1520,31 @@ bool isBase64PrefixProcessingOK (
if (detectBase64Chunk(s, start, end)) {
dbgTrace(D_WAAP_BASE64) << " ===isBase64PrefixProcessingOK===: chunk detected";
if ((start != s.end()) && (end == s.end())) {
dbgTrace(D_WAAP_BASE64) << " ===isBase64PrefixProcessingOK===: chunk detected but not complete";
retVal = processDecodedChunk(s, start, end, value, binaryFileType, true);
dbgTrace(D_WAAP_BASE64)
<< " ===isBase64PrefixProcessingOK===: after processDecodedChunk retVal = "
<< retVal
<< " binaryFileType = "
<< binaryFileType;
}
} else if (start != s.end()) {
dbgTrace(D_WAAP_BASE64) << " ===isBase64PrefixProcessingOK===: chunk not detected."
" searching for known file header only";
dbgTrace(D_WAAP_BASE64)
<< " ===isBase64PrefixProcessingOK===: chunk not detected. searching for known file header only";
end = (start + MAX_HEADER_LOOKUP < s.end()) ? start + MAX_HEADER_LOOKUP : s.end();
processDecodedChunk(s, start, end, value, binaryFileType);
value.clear();
dbgTrace(D_WAAP_BASE64)
<< " ===isBase64PrefixProcessingOK===: after processDecodedChunk binaryFileType = "
<< binaryFileType;
return binaryFileType != Waap::Util::BinaryFileType::FILE_TYPE_NONE;
}
}
dbgTrace(D_WAAP_BASE64)
<< " ===isBase64PrefixProcessingOK===: retVal = "
<< retVal
<< " binaryFileType = "
<< binaryFileType;
return retVal != B64_DECODE_INVALID;
}
@ -1399,23 +1552,31 @@ base64_variants b64Test (
const string &s,
string &key,
string &value,
BinaryFileType &binaryFileType)
BinaryFileType &binaryFileType,
const size_t offset)
{
key.clear();
bool retVal;
binaryFileType = Waap::Util::BinaryFileType::FILE_TYPE_NONE;
auto begin = s.begin() + offset;
dbgTrace(D_WAAP_BASE64)
<< " ===b64Test===: string = "
<< s
<< " key = "
<< key
<< " value = "
<< value
<< " offset = "
<< offset;
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: string = " << s
<< " key = " << key << " value = " << value;
// Minimal length
if (s.size() < 8) {
if (s.size() < 8 + offset) {
return CONTINUE_AS_IS;
}
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: minimal lenght test passed";
std::string prefix_decoded_val;
string::const_iterator it = s.begin();
auto it = begin;
// 1st check if we have key candidate
if (base64_key_value_detector_re.hasMatch(s)) {
@ -1433,7 +1594,7 @@ base64_variants b64Test (
break;
case EQUAL:
if (*it == '=') {
it = s.begin();
it = begin;
state=MISDETECT;
continue;
}
@ -1455,7 +1616,7 @@ base64_variants b64Test (
if (it == s.end() || state == MISDETECT) {
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: detected *it = s.end()" << *it;
if (key.size() > 0) {
it = s.begin();
it = begin;
key.clear();
}
} else {
@ -1479,7 +1640,7 @@ base64_variants b64Test (
}
}
string::const_iterator start = s.end();
auto start = s.end();
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: B64 itself = " << *it << " =======";
bool isB64AlphaChar = Waap::Util::isAlphaAsciiFast(*it) || isdigit(*it) || *it=='/' || *it=='+';
if (isB64AlphaChar) {
@ -1487,11 +1648,6 @@ base64_variants b64Test (
dbgTrace(D_WAAP_BASE64) <<
" ===b64Test===: Start tracking potential b64 chunk = " << *it << " =======";
start = it;
if ((s.end() - start) % 4 != 0) {
key.clear();
value.clear();
return CONTINUE_AS_IS;
}
}
else {
dbgTrace(D_WAAP_BASE64) <<
@ -1512,17 +1668,37 @@ base64_variants b64Test (
key.pop_back();
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: FINAL key = '" << key << "'";
}
retVal = decodeBase64Chunk(s, start, s.end(), value) != B64_DECODE_INVALID;
base64_decode_status decode_chunk_status = decodeBase64Chunk(s, start, s.end(), value);
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: After testing and conversion value = "
<< value << "retVal = '" << retVal <<"'";
if (!retVal) {
dbgTrace(D_WAAP_BASE64)
<< " ===b64Test===: After testing and conversion value = "
<< value
<< "decode_chunk_status = '"
<< decode_chunk_status
<<"'";
if (decode_chunk_status == B64_DECODE_INVALID) {
key.clear();
value.clear();
return CONTINUE_AS_IS;
}
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: After tpassed retVal check = "
<< value << "retVal = '" << retVal <<"'" << "key = '" << key << "'";
if (decode_chunk_status == B64_DECODE_INCOMPLETE) {
value.clear();
}
if (decode_chunk_status == B64_DECODE_SUSPECTED) {
return CONTINUE_DUAL_SCAN;
}
dbgTrace(D_WAAP_BASE64)
<< " ===b64Test===: After tpassed retVal check = "
<< value
<< "decode_chunk_status = '"
<< decode_chunk_status
<<"'"
<< "key = '"
<< key
<< "'";
if (key.empty()) {
return SINGLE_B64_CHUNK_CONVERT;
} else {
@ -1548,7 +1724,7 @@ void b64Decode(
deletedCount = 0;
outStr = "";
int offsetFix = 0;
dbgTrace(D_WAAP_BASE64) << " ===b64Decode===: starting with = '" << s << "'";
string::const_iterator it = s.begin();
// Minimal length
@ -1596,6 +1772,11 @@ void b64Decode(
}
// Decode and add chunk
dbgTrace(D_WAAP_BASE64)
<< " ===b64Decode===: chunkStart = "
<< *chunkStart
<< " it = "
<< *it;
b64TestChunk(s, chunkStart, it, cb, decodedCount, deletedCount, outStr);
// stop tracking b64 chunk
@ -1607,6 +1788,7 @@ void b64Decode(
}
if (chunkStart != s.end()) {
dbgTrace(D_WAAP_BASE64) << " ===b64Decode===: chunkStart = " << *chunkStart;
b64TestChunk(s, chunkStart, it, cb, decodedCount, deletedCount, outStr);
}
}

View File

@ -32,9 +32,15 @@
#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
enum base64_variants {SINGLE_B64_CHUNK_CONVERT, KEY_VALUE_B64_PAIR, CONTINUE_AS_IS};
enum base64_variants {SINGLE_B64_CHUNK_CONVERT, KEY_VALUE_B64_PAIR, CONTINUE_AS_IS, CONTINUE_DUAL_SCAN};
enum base64_stage {BEFORE_EQUAL, EQUAL, DONE, MISDETECT};
enum base64_decode_status {B64_DECODE_INVALID, B64_DECODE_OK, B64_DECODE_INCOMPLETE};
enum base64_decode_status {B64_DECODE_INVALID, B64_DECODE_OK, B64_DECODE_INCOMPLETE, B64_DECODE_SUSPECTED};
#define BASE64_ENTROPY_BASE_THRESHOLD 5.0
#define BASE64_ENTROPY_DECODED_THRESHOLD 5.4
#define BASE64_ENTROPY_THRESHOLD_DELTA 0.25
#define BASE64_MIN_SIZE_LIMIT 16
#define BASE64_MAX_SIZE_LIMIT 1024
// This is portable version of stricmp(), which is non-standard function (not even in C).
// Contrary to stricmp(), for a slight optimization, s2 is ASSUMED to be already in lowercase.
@ -221,59 +227,66 @@ inline bool isHexDigit(const char ch) {
template<class _IT>
_IT escape_backslashes(_IT first, _IT last) {
_IT result = first;
_IT src = first;
_IT dst = first;
_IT mark = first;
enum { STATE_COPY, STATE_ESCAPE, STATE_OCTAL, STATE_HEX } state = STATE_COPY;
unsigned char accVal = 0;
unsigned char digitsCount = 0;
_IT mark = first;
for (; first != last; ++first) {
for (; src != last && dst < last; ++src) {
switch (state) {
case STATE_COPY:
if (*first == '\\') {
mark = first;
if (*src == '\\') {
mark = src;
state = STATE_ESCAPE;
}
else {
*result++ = *first;
} else {
*dst++ = *src;
}
break;
case STATE_ESCAPE: {
if (*first >= '0' && *first <= '7') {
accVal = *first - '0';
if (*src >= '0' && *src <= '7') {
accVal = *src - '0';
digitsCount = 1;
state = STATE_OCTAL;
break;
} else if (*first == 'x') {
} else if (*src == 'x') {
accVal = 0;
digitsCount = 0;
state = STATE_HEX;
break;
}
else {
switch (*first) {
case 'a': *result++ = 7; break; // BELL
case 'b': *result++ = 8; break; // BACKSPACE
case 't': *result++ = 9; break; // HORIZONTAL TAB
case 'n': *result++ = 10; break; // LINEFEED
case 'v': *result++ = 11; break; // VERTICAL TAB
case 'f': *result++ = 12; break; // FORMFEED
case 'r': *result++ = 13; break; // CARRIAGE RETURN
case '\\': *result++ = '\\'; break; // upon seeing double backslash - output only one
case '\"': *result++ = '"'; break; // backslash followed by '"' - output only '"'
} else {
switch (*src) {
// Copy a matching character without the backslash before it
case 'a': *dst++ = 7; break; // BELL
case 'b': *dst++ = 8; break; // BACKSPACE
case 'e': *dst++ = 27; break; // ESCAPE
case 't': *dst++ = 9; break; // HORIZONTAL TAB
case 'n': *dst++ = 10; break; // LINEFEED
case 'v': *dst++ = 11; break; // VERTICAL TAB
case 'f': *dst++ = 12; break; // FORMFEED
case 'r': *dst++ = 13; break; // CARRIAGE RETURN
case '\?': *dst++ = '\?'; break; // QUESTION MARK
case '\\': *dst++ = '\\'; break; // upon seeing double backslash - output only one
case '\"': *dst++ = '\"'; break; // DOUBLE QUOTE
case '\'': *dst++ = '\''; break; // SINGLE QUOTE
default:
// invalid escape sequence - do not replace it (return original characters)
// Copy from back-track, not including current character, and continue
while (mark < first) {
*result++ = *mark++;
while (dst <= mark && mark < src) {
*dst++ = *mark++;
}
// Copy current (terminator) character which is not "escape" and return to copy state
// If current character is escape - stay is "escape" state
if (*first != '\\') {
*result++ = *mark++;
if (*src != '\\') {
*dst++ = *src;
state = STATE_COPY;
} else {
mark = src;
}
break;
}
state = STATE_COPY;
@ -282,28 +295,26 @@ _IT escape_backslashes(_IT first, _IT last) {
break;
}
case STATE_OCTAL: {
if (*first >='0' && *first<='7') {
accVal = (accVal << 3) | (*first - '0');
if (*src >= '0' && *src <= '7') {
accVal = (accVal << 3) | (*src - '0');
digitsCount++;
// Up to 3 octal digits imposed by C standard, so after 3 digits accumulation stops.
if (digitsCount == 3) {
*result++ = accVal; // output character corresponding to collected accumulated value
*dst++ = accVal; // output character corresponding to collected accumulated value
digitsCount = 0;
state = STATE_COPY;
}
}
else {
} else {
// invalid octal digit stops the accumulation
*result++ = accVal; // output character corresponding to collected accumulated value
*dst++ = accVal; // output character corresponding to collected accumulated value
digitsCount = 0;
if (*first != '\\') {
if (*src != '\\') {
// If terminating character is not backslash output the terminating character
*result++ = *first;
*dst++ = *src;
state = STATE_COPY;
}
else {
} else {
// If terminating character is backslash start next escape sequence
mark = src;
state = STATE_ESCAPE;
}
}
@ -311,36 +322,33 @@ _IT escape_backslashes(_IT first, _IT last) {
break;
}
case STATE_HEX: {
if (!isHexDigit(*first)) {
// Copy from back-track, not including current character (which is absent), and continue
while (mark < first) {
*result++ = *mark++;
if (!isHexDigit(*src)) {
// Copy from back-track, not including *src character (which is absent), and continue
while (dst <= mark && mark < src) {
*dst++ = *mark++;
}
if (*first != '\\') {
if (*src != '\\') {
// If terminating character is not backslash output the terminating character
*result++ = *first;
*dst++ = *src;
state = STATE_COPY;
}
else {
} else {
// If terminating character is backslash start next escape sequence
mark = src;
state = STATE_ESCAPE;
}
}
else {
} else {
accVal = accVal << 4;
if (isdigit(*first)) {
accVal += *first - '0';
}
else if (*first >= 'a' && *first <= 'f') {
accVal += *first - 'a' + 10;
}
else if (*first >= 'A' && *first <= 'F') {
accVal += *first - 'A' + 10;
if (isdigit(*src)) {
accVal += *src - '0';
} else if (*src >= 'a' && *src <= 'f') {
accVal += *src - 'a' + 10;
} else if (*src >= 'A' && *src <= 'F') {
accVal += *src - 'A' + 10;
}
digitsCount++;
// exactly 2 hex digits are anticipated, so after 2 digits accumulation stops.
if (digitsCount == 2) {
*result++ = accVal; // output character corresponding to collected accumulated value
*dst++ = accVal; // output character corresponding to collected accumulated value
digitsCount = 0;
state = STATE_COPY;
}
@ -350,34 +358,36 @@ _IT escape_backslashes(_IT first, _IT last) {
}
}
// Handle state at end of input
bool copyBackTrack = true;
switch (state) {
case STATE_HEX:
// this can only happen on this sequence '\xH' where H is a single hex digit.
// in this case the sequence is considered invalid and should be copied verbatim (copyBackTrack=true)
break;
case STATE_OCTAL:
// this can only happen when less than 3 octal digits are found at the value end, like '\1' or '\12'
*result++ = accVal; // output character corresponding to collected accumulated value
copyBackTrack = false;
break;
case STATE_COPY:
copyBackTrack = false;
break;
case STATE_ESCAPE:
break;
}
if (dst < last) {
// Handle state at end of input
bool copyBackTrack = true;
switch (state) {
case STATE_HEX:
// this can only happen on this sequence '\xH' where H is a single hex digit.
// in this case the sequence is considered invalid and should be copied verbatim (copyBackTrack=true)
break;
case STATE_OCTAL:
// this can only happen when less than 3 octal digits are found at the value end, like '\1' or '\12'
*dst++ = accVal; // output character corresponding to collected accumulated value
copyBackTrack = false;
break;
case STATE_COPY:
copyBackTrack = false;
break;
case STATE_ESCAPE:
break;
}
if (copyBackTrack) {
// invalid escape sequence - do not replace it (return original characters)
// Copy from back-track
while (mark < first) {
*result++ = *mark++;
if (copyBackTrack) {
// invalid escape sequence - do not replace it (return original characters)
// Copy from back-track
while (dst <= mark && mark < src) {
*dst++ = *mark++;
}
}
}
return result;
return dst;
}
inline bool str_contains(const std::string &haystack, const std::string &needle)
@ -395,7 +405,8 @@ extern const size_t g_htmlEntitiesCount;
template<class _IT>
_IT escape_html(_IT first, _IT last) {
_IT result = first;
_IT dst = first;
_IT src = first;
enum {
STATE_COPY,
STATE_ESCAPE,
@ -408,26 +419,26 @@ _IT escape_html(_IT first, _IT last) {
std::list<size_t> potentialMatchIndices;
size_t matchLength = 0;
size_t lastKnownMatchIndex = -1;
_IT mark = first;
_IT mark = src;
for (; first != last; ++first) {
for (; src != last && dst < last; ++src) {
switch (state) {
case STATE_COPY:
if (*first == '&') {
mark = first;
if (*src == '&') {
mark = src;
state = STATE_ESCAPE;
}
else {
*result++ = *first;
*dst++ = *src;
}
break;
case STATE_ESCAPE:
if (isalpha(*first)) {
if (isalpha(*src)) {
// initialize potential matches list
potentialMatchIndices.clear();
for (size_t index = 0; index < g_htmlEntitiesCount; ++index) {
if (*first == g_htmlEntities[index].name[0]) {
if (*src == g_htmlEntities[index].name[0]) {
potentialMatchIndices.push_back(index);
lastKnownMatchIndex = index;
}
@ -435,8 +446,8 @@ _IT escape_html(_IT first, _IT last) {
// No potential matches - send ampersand and current character to output
if (potentialMatchIndices.size() == 0) {
*result++ = '&';
*result++ = *first;
*dst++ = '&';
*dst++ = *src;
state = STATE_COPY;
break;
}
@ -445,7 +456,7 @@ _IT escape_html(_IT first, _IT last) {
matchLength = 1;
state = STATE_NAMED_CHARACTER_REFERENCE;
}
else if (*first == '#') {
else if (*src == '#') {
digitsSeen = 0;
accVal = 0;
state = STATE_NUMERIC_START;
@ -453,8 +464,8 @@ _IT escape_html(_IT first, _IT last) {
else {
// not isalpha and not '#' - this is invalid character reference - do not replace it
// (return original characters)
*result++ = '&';
*result++ = *first;
*dst++ = '&';
*dst++ = *src;
state = STATE_COPY;
}
break;
@ -473,7 +484,7 @@ _IT escape_html(_IT first, _IT last) {
// If there are no more characters in the potntial match name,
// or the next tested character doesn't match - kill the match
if ((matchName[matchLength] == '\0') || (matchName[matchLength] != *first)) {
if ((matchName[matchLength] == '\0') || (matchName[matchLength] != *src)) {
// remove current element from the list of potential matches
pPotentialMatchIndex = potentialMatchIndices.erase(pPotentialMatchIndex);
}
@ -489,15 +500,15 @@ _IT escape_html(_IT first, _IT last) {
// No more potential matches: unsuccesful match -> flush all consumed characters back to output stream
if (potentialMatchIndices.size() == 0) {
// Send consumed ampersand to the output
*result++ = '&';
*dst++ = '&';
// Send those matched characters (these are the same that we consumed) - to the output
for (size_t i = 0; i < matchLength; i++) {
*result++ = g_htmlEntities[lastKnownMatchIndex].name[i];
*dst++ = g_htmlEntities[lastKnownMatchIndex].name[i];
}
// Send the character that terminated our search for possible matches
*result++ = *first;
*dst++ = *src;
// Continue copying text verbatim
state = STATE_COPY;
@ -505,23 +516,23 @@ _IT escape_html(_IT first, _IT last) {
}
// There are still potential matches and ';' is hit
if (*first == ';') {
if (*src == ';') {
// longest match found for the named character reference.
// translate it into output character(s) and we're done.
unsigned short value = g_htmlEntities[lastKnownMatchIndex].value;
// Encode UTF code point as UTF-8 bytes
if (value < 0x80) {
*result++ = value;
*dst++ = value;
}
else if (value < 0x800 ) {
*result++ = (value >> 6) | 0xC0;
*result++ = (value & 0x3F) | 0x80;
*dst++ = (value >> 6) | 0xC0;
*dst++ = (value & 0x3F) | 0x80;
}
else { // (value <= 0xFFFF : always true because value type is unsigned short which is 16-bit
*result++ = (value >> 12) | 0xE0;
*result++ = ((value >> 6) & 0x3F) | 0x80;
*result++ = (value & 0x3F) | 0x80;
*dst++ = (value >> 12) | 0xE0;
*dst++ = ((value >> 6) & 0x3F) | 0x80;
*dst++ = (value & 0x3F) | 0x80;
}
// Continue copying text verbatim
@ -532,178 +543,179 @@ _IT escape_html(_IT first, _IT last) {
case STATE_NUMERIC_START:
digitsSeen = false;
accVal = 0;
if (*first == 'x' || *first == 'X') {
if (*src == 'x' || *src == 'X') {
state = STATE_HEX;
}
else if (isdigit(*first)) {
else if (isdigit(*src)) {
digitsSeen = true;
accVal = *first - '0';
accVal = *src - '0';
state = STATE_NUMERIC;
}
else {
// Sequence started with these two characters: '&#', and here is the third, non-digit character
// Copy from back-track, not including current character, and continue
while (mark < first) {
*result++ = *mark++;
while (dst <= mark && mark < src) {
*dst++ = *mark++;
}
if (*first == '&') {
if (*src == '&') {
// Terminator is also start of next escape sequence
mark = first;
mark = src;
state = STATE_ESCAPE;
break;
}
else {
// Copy the terminating character too
*result++ = *first;
*dst++ = *src;
}
state = STATE_COPY;
}
break;
case STATE_NUMERIC:
if (!isdigit(*first)) {
if (!isdigit(*src)) {
if (digitsSeen) {
// Encode UTF code point as UTF-8 bytes
if (accVal < 0x80) {
*result++ = accVal;
*dst++ = accVal;
}
else if (accVal < 0x800 ) {
*result++ = (accVal >> 6) | 0xC0;
*result++ = (accVal & 0x3F) | 0x80;
*dst++ = (accVal >> 6) | 0xC0;
*dst++ = (accVal & 0x3F) | 0x80;
}
else { // (accVal <= 0xFFFF : always true because accVal type is unsigned short which is 16-bit
*result++ = (accVal >> 12) | 0xE0;
*result++ = ((accVal >> 6) & 0x3F) | 0x80;
*result++ = (accVal & 0x3F) | 0x80;
*dst++ = (accVal >> 12) | 0xE0;
*dst++ = ((accVal >> 6) & 0x3F) | 0x80;
*dst++ = (accVal & 0x3F) | 0x80;
}
}
else {
// Copy from back-track, not including current character (which is absent), and continue
while (mark < first) {
*result++ = *mark++;
while (dst <= mark && mark < src) {
*dst++ = *mark++;
}
}
if (*first == '&') {
if (*src == '&') {
// Terminator is also start of next escape sequence
mark = first;
mark = src;
state = STATE_ESCAPE;
break;
}
else if (!digitsSeen || *first != ';') {
else if (!digitsSeen || *src != ';') {
// Do not copy the ';' but do copy any other terminator
// Note: the ';' should remain in the output if there were no digits seen.
*result++ = *first;
*dst++ = *src;
}
state = STATE_COPY;
}
else {
digitsSeen = true;
accVal = accVal * 10 + *first - '0'; // TODO:: beware of integer overflow?
accVal = accVal * 10 + *src - '0'; // TODO:: beware of integer overflow?
}
break;
case STATE_HEX:
if (!isHexDigit(*first)) {
if (!isHexDigit(*src)) {
if (digitsSeen) {
// Encode UTF code point as UTF-8 bytes
if (accVal < 0x80) {
*result++ = accVal;
*dst++ = accVal;
}
else if (accVal < 0x800 ) {
*result++ = (accVal >> 6) | 0xC0;
*result++ = (accVal & 0x3F) | 0x80;
*dst++ = (accVal >> 6) | 0xC0;
*dst++ = (accVal & 0x3F) | 0x80;
}
else { // (accVal <= 0xFFFF : always true because accVal type is unsigned short which is 16-bit
*result++ = (accVal >> 12) | 0xE0;
*result++ = ((accVal >> 6) & 0x3F) | 0x80;
*result++ = (accVal & 0x3F) | 0x80;
*dst++ = (accVal >> 12) | 0xE0;
*dst++ = ((accVal >> 6) & 0x3F) | 0x80;
*dst++ = (accVal & 0x3F) | 0x80;
}
}
else {
// Copy from back-track, not including current character (which is absent), and continue
while (mark < first) {
*result++ = *mark++;
while (dst <= mark && mark < src) {
*dst++ = *mark++;
}
}
if (*first == '&') {
if (*src == '&') {
// Terminator is also start of next escape sequence
mark = first;
mark = src;
state = STATE_ESCAPE;
break;
}
else if (!digitsSeen || *first != ';') {
else if (!digitsSeen || *src != ';') {
// Do not copy the ';' but do copy any other terminator
// Note: the ';' should remain in the output if there were no digits seen.
*result++ = *first;
*dst++ = *src;
}
state = STATE_COPY;
}
else {
digitsSeen = true;
accVal = accVal << 4;
if (isdigit(*first)) {
accVal += *first - '0';
if (isdigit(*src)) {
accVal += *src - '0';
}
else if (*first >= 'a' && *first <= 'f') {
accVal += *first - 'a' + 10;
else if (*src >= 'a' && *src <= 'f') {
accVal += *src - 'a' + 10;
}
else if (*first >= 'A' && *first <= 'F') {
accVal += *first - 'A' + 10;
else if (*src >= 'A' && *src <= 'F') {
accVal += *src - 'A' + 10;
}
}
break;
}
}
if (state == STATE_ESCAPE) {
*result++ = '&';
if (state == STATE_ESCAPE && dst < last) {
*dst++ = '&';
}
else if (state == STATE_NAMED_CHARACTER_REFERENCE && potentialMatchIndices.size() > 0) {
else if (state == STATE_NAMED_CHARACTER_REFERENCE && potentialMatchIndices.size() > 0 && dst < last) {
// Send consumed ampersand to the output
*result++ = '&';
*dst++ = '&';
// Send those matched characters (these are the same that we consumed) - to the output
for (size_t i = 0; i < matchLength; i++) {
for (size_t i = 0; i < matchLength && dst < last; i++) {
// Even if there are multiple potential matches, all of them start with the same
// matchLength characters that we consumed!
*result++ = g_htmlEntities[lastKnownMatchIndex].name[i];
*dst++ = g_htmlEntities[lastKnownMatchIndex].name[i];
}
}
if (state == STATE_HEX && !digitsSeen) { // Special case of "&#x"
// Copy from back-track, not including current character (which is absent), and continue
while (mark < first) {
*result++ = *mark++;
while (dst <= mark && mark < src) {
*dst++ = *mark++;
}
state = STATE_COPY;
}
else if (state == STATE_HEX || state == STATE_NUMERIC || state == STATE_NUMERIC_START) {
if (digitsSeen) {
if (digitsSeen && dst < last) {
// Encode UTF code point as UTF-8 bytes
if (accVal < 0x80) {
*result++ = accVal;
*dst++ = accVal;
}
else if (accVal < 0x800 ) {
*result++ = (accVal >> 6) | 0xC0;
*result++ = (accVal & 0x3F) | 0x80;
else if (accVal < 0x800 && std::distance(dst, last) >= 2) {
*dst++ = (accVal >> 6) | 0xC0;
*dst++ = (accVal & 0x3F) | 0x80;
}
else { // (accVal <= 0xFFFF : always true because accVal type is unsigned short which is 16-bit
*result++ = (accVal >> 12) | 0xE0;
*result++ = ((accVal >> 6) & 0x3F) | 0x80;
*result++ = (accVal & 0x3F) | 0x80;
// (accVal <= 0xFFFF : always true because accVal type is unsigned short which is 16-bit
else if (std::distance(dst, last) >= 3) {
*dst++ = (accVal >> 12) | 0xE0;
*dst++ = ((accVal >> 6) & 0x3F) | 0x80;
*dst++ = (accVal & 0x3F) | 0x80;
}
}
else {
// Copy from back-track, not including current character (which is absent), and continue
while (mark < first) {
*result++ = *mark++;
while (dst <= mark && mark < src) {
*dst++ = *mark++;
}
state = STATE_COPY;
}
}
return result;
return dst;
}
// Compare two buffers, case insensitive. Return true if they are equal (case-insensitive)
@ -865,6 +877,17 @@ void unescapeUnicode(std::string &text);
// Try to find and decode UTF7 chunks
std::string filterUTF7(const std::string &text);
base64_decode_status
decideStatusBase64Decoded(
std::string& decoded,
double entropy,
double decoded_entropy,
size_t spacer_count,
size_t nonPrintableCharsCount,
bool clear_on_error,
double terminatorCharsSeen,
bool called_with_prefix);
base64_decode_status
decodeBase64Chunk(
const std::string &value,
@ -926,7 +949,8 @@ namespace Util {
const std::string &s,
std::string &key,
std::string &value,
BinaryFileType &binaryFileType);
BinaryFileType &binaryFileType,
size_t offset = 0);
// The original stdlib implementation of isalpha() supports locale settings which we do not really need.
// It is also proven to contribute to slow performance in some of the algorithms using it.

View File

@ -43,6 +43,7 @@
#include "agent_core_utilities.h"
#define stack_trace_max_len 64
#define STACK_SIZE (1024 * 1024) // 1 MB stack size
using namespace std;
using namespace ReportIS;
@ -57,6 +58,12 @@ public:
{
if (out_trace_file_fd != -1) close(out_trace_file_fd);
out_trace_file_fd = -1;
if (alt_stack.ss_sp != nullptr) {
free(alt_stack.ss_sp);
alt_stack.ss_sp = nullptr;
alt_stack_initialized = false;
}
}
void
@ -69,6 +76,7 @@ public:
void
init()
{
alt_stack.ss_sp = nullptr;
addSignalHandlerRoutine();
addReloadConfigurationRoutine();
}
@ -244,6 +252,28 @@ private:
setHandlerPerSignalNum();
}
bool
setupAlternateSignalStack()
{
if (alt_stack_initialized) return true;
alt_stack.ss_sp = malloc(STACK_SIZE);
if (alt_stack.ss_sp == nullptr) {
dbgWarning(D_SIGNAL_HANDLER) << "Failed to allocate alternate stack";
return false;
}
alt_stack.ss_size = STACK_SIZE;
alt_stack.ss_flags = 0;
if (sigaltstack(&alt_stack, nullptr) == -1) {
dbgWarning(D_SIGNAL_HANDLER) << "Failed to set up alternate stack";
free(alt_stack.ss_sp);
return false;
}
dbgInfo(D_SIGNAL_HANDLER) << "Alternate stack allocated successfully. Allocated size: " << STACK_SIZE;
alt_stack_initialized = true;
return true;
}
void
setHandlerPerSignalNum()
{
@ -261,8 +291,29 @@ private:
SIGUSR2
};
if (!setupAlternateSignalStack()) {
dbgWarning(D_SIGNAL_HANDLER) << "Failed to set up alternate signal stack";
for (int sig : signals) {
signal(sig, signalHandlerCB);
}
return;
}
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
sa.sa_sigaction = signalActionHandlerCB;
sigemptyset(&sa.sa_mask);
for (int sig : signals) {
signal(sig, signalHandlerCB);
if (sig == SIGKILL || sig == SIGSTOP) {
signal(sig, signalHandlerCB);
continue;
}
if (sigaction(sig, &sa, nullptr) == -1) {
dbgError(D_SIGNAL_HANDLER) << "Failed to set signal handler for signal " << sig;
}
}
}
@ -284,55 +335,30 @@ private:
static void
signalHandlerCB(int _signal)
{
const char *signal_name = "";
const char *signal_name = strsignal(_signal);
char signal_num[3];
snprintf(signal_num, sizeof(signal_num), "%d", _signal);
if (out_trace_file_fd == -1) exit(_signal);
reset_signal_handler = true;
switch(_signal) {
case SIGABRT: {
signal_name = "SIGABRT";
fini_signal_flag = true;
return;
}
case SIGKILL: {
signal_name = "SIGKILL";
fini_signal_flag = true;
return;
}
case SIGQUIT: {
signal_name = "SIGQUIT";
fini_signal_flag = true;
return;
}
case SIGINT: {
signal_name = "SIGINT";
fini_signal_flag = true;
return;
}
case SIGABRT:
case SIGKILL:
case SIGQUIT:
case SIGINT:
case SIGTERM: {
signal_name = "SIGTERM";
fini_signal_flag = true;
return;
}
case SIGSEGV: {
signal_name = "SIGSEGV";
break;
}
case SIGBUS: {
signal_name = "SIGBUS";
break;
}
case SIGILL: {
signal_name = "SIGILL";
break;
}
case SIGSEGV:
case SIGBUS:
case SIGILL:
case SIGFPE: {
signal_name = "SIGFPE";
break;
}
case SIGPIPE: {
signal_name = "SIGPIPE";
return;
}
case SIGUSR2: {
@ -341,13 +367,6 @@ private:
}
}
if (out_trace_file_fd == -1) exit(_signal);
for (uint i = 0; i < sizeof(signal_num); ++i) {
uint placement = sizeof(signal_num) - 1 - i;
signal_num[placement] = _signal%10 + '0';
_signal /= 10;
}
const char *signal_error_prefix = "Caught signal ";
writeData(signal_error_prefix, strlen(signal_error_prefix));
writeData(signal_num, sizeof(signal_num));
@ -367,6 +386,12 @@ private:
exit(_signal);
}
static void
signalActionHandlerCB(int signum, siginfo_t *, void *)
{
signalHandlerCB(signum);
}
static void
printStackTrace()
{
@ -391,16 +416,22 @@ private:
for (uint i = 0 ; i < stack_trace_max_len ; i++) {
unw_get_reg(&cursor, UNW_REG_IP, &ip);
unw_get_reg(&cursor, UNW_REG_SP, &sp);
if (unw_get_proc_name(&cursor, name, sizeof(name), &off) == 0) {
int procNameRc = unw_get_proc_name(&cursor, name, sizeof(name), &off);
if (procNameRc == 0 || procNameRc == -UNW_ENOMEM) {
const char *open_braces = "<";
writeData(open_braces, strlen(open_braces));
writeData(name, strlen(name));
writeData(name, strnlen(name, sizeof(name)));
if (procNameRc != 0) {
const char *dots = "...";
writeData(dots, strlen(dots));
}
const char *close_braces = ">\n";
writeData(close_braces, strlen(close_braces));
} else {
const char *error = " -- error: unable to obtain symbol name for this frame\n";
writeData(error, strlen(error));
}
if (unw_step(&cursor) <= 0) return;
}
@ -444,12 +475,16 @@ private:
static bool reload_settings_flag;
static bool reset_signal_handler;
static int out_trace_file_fd;
static stack_t alt_stack;
static bool alt_stack_initialized;
};
string SignalHandler::Impl::trace_file_path;
bool SignalHandler::Impl::reload_settings_flag = false;
bool SignalHandler::Impl::reset_signal_handler = false;
int SignalHandler::Impl::out_trace_file_fd = -1;
stack_t SignalHandler::Impl::alt_stack;
bool SignalHandler::Impl::alt_stack_initialized = false;
SignalHandler::SignalHandler() : Component("SignalHandler"), pimpl(make_unique<Impl>()) {}
SignalHandler::~SignalHandler() {}

View File

@ -103,6 +103,35 @@ WildcardHost::evalVariable() const
return lower_host_ctx == lower_host;
}
EqualWafTag::EqualWafTag(const vector<string> &params)
{
if (params.size() != 1) reportWrongNumberOfParams("EqualWafTag", params.size(), 1, 1);
waf_tag = params[0];
}
Maybe<bool, Context::Error>
EqualWafTag::evalVariable() const
{
I_Environment *env = Singleton::Consume<I_Environment>::by<EqualWafTag>();
auto maybe_waf_tag_ctx = env->get<string>(HttpTransactionData::waf_tag_ctx);
if (!maybe_waf_tag_ctx.ok())
{
dbgTrace(D_RULEBASE_CONFIG) << "didnt find waf tag in current context";
return false;
}
auto waf_tag_ctx = maybe_waf_tag_ctx.unpack();
dbgTrace(D_RULEBASE_CONFIG)
<< "trying to match waf tag context with its corresponding waf tag: "
<< waf_tag_ctx
<< ". Matcher waf tag: "
<< waf_tag;
return waf_tag_ctx == waf_tag;
}
EqualListeningIP::EqualListeningIP(const vector<string> &params)
{
if (params.size() != 1) reportWrongNumberOfParams("EqualListeningIP", params.size(), 1, 1);

View File

@ -80,6 +80,7 @@ GenericRulebase::Impl::preload()
addMatcher<IpProtocolMatcher>();
addMatcher<UrlMatcher>();
addMatcher<EqualHost>();
addMatcher<EqualWafTag>();
addMatcher<WildcardHost>();
addMatcher<EqualListeningIP>();
addMatcher<EqualListeningPort>();

View File

@ -53,6 +53,7 @@ const string HttpTransactionData::req_body = "transaction_request_body
const string HttpTransactionData::source_identifier = "sourceIdentifiers";
const string HttpTransactionData::proxy_ip_ctx = "proxy_ip";
const string HttpTransactionData::xff_vals_ctx = "xff_vals";
const string HttpTransactionData::waf_tag_ctx = "waf_tag";
const CompressionType HttpTransactionData::default_response_content_encoding = CompressionType::NO_COMPRESSION;

View File

@ -20,6 +20,7 @@
#include <vector>
#include <dirent.h>
#include <boost/regex.hpp>
#include <algorithm>
#include "debug.h"
#include "maybe_res.h"
@ -75,13 +76,13 @@ NginxConfCollector::expandIncludes(const string &include_pattern) const {
struct dirent *entry;
while ((entry = readdir(dir)) != nullptr) {
if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) continue;
if (NGEN::Regex::regexMatch(__FILE__, __LINE__, entry->d_name, pattern)) {
matching_files.push_back(maybe_directory + "/" + entry->d_name);
dbgTrace(D_NGINX_MANAGER) << "Matched file: " << maybe_directory << '/' << entry->d_name;
}
}
closedir(dir);
sort(matching_files.begin(), matching_files.end());
return matching_files;
}

View File

@ -137,6 +137,10 @@ spec:
type: array
items:
type: object
required:
- mode
- threatPreventionPractices
- accessControlPractices
properties:
name:
type: string
@ -1216,3 +1220,886 @@ spec:
kind: PolicyActivation
shortNames:
- policyactivation
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata :
name : policiesns.openappsec.io
creationTimestamp: null
spec:
group: openappsec.io
versions:
- name: v1beta2
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
appsecClassName:
type: string
default:
type: object
required:
- mode
- threatPreventionPractices
- accessControlPractices
properties:
mode:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
default: detect-learn
threatPreventionPractices:
type: array
items:
type: string
accessControlPractices:
type: array
items:
type: string
customResponse:
type: string
default: "403"
triggers:
type: array
items:
type: string
sourceIdentifiers:
type: string
trustedSources:
type: string
exceptions:
type: array
items:
type: string
specificRules:
type: array
items:
type: object
properties:
name:
type: string
host:
type: string
mode:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
default: detect-learn
threatPreventionPractices:
type: array
items:
type: string
accessControlPractices:
type: array
items:
type: string
triggers:
type: array
items:
type: string
customResponse:
type: string
sourceIdentifiers:
type: string
trustedSources:
type: string
exceptions:
type: array
items:
type: string
scope: Namespaced
names:
plural: policiesns
singular: policyns
kind: PolicyNS
shortNames:
- policyns
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata :
name : accesscontrolpracticesns.openappsec.io
creationTimestamp: null
spec:
group: openappsec.io
versions:
- name: v1beta2
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
required:
- rateLimit
properties:
appsecClassName:
type: string
practiceMode:
type: string
enum:
- inherited
- prevent
- detect
- inactive
default: inherited
rateLimit:
type: object
required:
- overrideMode
properties:
overrideMode:
type: string
enum:
- prevent
- detect
- inactive
- inherited
default: inactive
rules:
type: array
items:
type: object
properties:
action:
type: string
enum:
- inherited
- prevent
- detect
default: inherited
condition:
type: array
items:
type: object
required:
- key
- value
properties:
key:
type: string
value:
type: string
uri:
type: string
limit:
type: integer
unit:
type: string
enum:
- minute
- second
default: minute
triggers:
type: array
items:
type: string
comment:
type: string
scope: Namespaced
names:
plural: accesscontrolpracticesns
singular: accesscontrolpracticens
kind: AccessControlPracticeNS
shortNames:
- acpns
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name : customresponsesns.openappsec.io
creationTimestamp: null
spec:
group: openappsec.io
versions:
- name: v1beta2
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
required:
- mode
properties:
appsecClassName:
type: string
mode:
type: string
enum:
- block-page
- redirect
- response-code-only
default: response-code-only
messageTitle:
type: string
messageBody:
type: string
httpResponseCode:
type: integer
minimum: 100
maximum: 599
default: 403
redirectUrl:
type: string
redirectAddXEventId:
type: boolean
default: false
required:
- mode
scope: Namespaced
names:
plural: customresponsesns
singular: customresponsens
kind: CustomResponseNS
shortNames:
- customresponsens
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata :
name: exceptionsns.openappsec.io
spec:
group: openappsec.io
versions:
- name: v1beta2
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
required:
- action
- condition
properties:
appsecClassName:
type: string
action:
type: string
enum:
- skip
- accept
- drop
- suppressLog
default: accept
condition:
type: array
items:
type: object
required:
- key
- value
properties:
key:
type: string
value:
type: string
scope: Namespaced
names:
plural: exceptionsns
singular: exceptionns
kind: ExceptionNS
shortNames:
- exceptionns
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata :
name : logtriggersns.openappsec.io
creationTimestamp: null
spec:
group: openappsec.io
versions:
- name: v1beta2
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
required:
- accessControlLogging
- appsecLogging
- additionalSuspiciousEventsLogging
- extendedLogging
- logDestination
properties:
appsecClassName:
type: string
accessControlLogging:
type: object
properties:
allowEvents:
type: boolean
default: false
dropEvents:
type: boolean
default: true
appsecLogging:
type: object
properties:
detectEvents:
type: boolean
default: true
preventEvents:
type: boolean
default: true
allWebRequests:
type: boolean
default: false
additionalSuspiciousEventsLogging:
type: object
properties:
enabled:
type: boolean
default: true
minSeverity:
type: string
enum:
- high
- critical
default: high
responseBody:
type: boolean
default: false
responseCode:
type: boolean
default: true
extendedLogging:
type: object
properties:
urlPath:
type: boolean
default: false
urlQuery:
type: boolean
default: false
httpHeaders:
type: boolean
default: false
requestBody:
type: boolean
default: false
logDestination:
type: object
properties:
cloud:
type: boolean
default: false
syslogService:
type: array
items:
type: object
properties:
address:
type: string
port:
type: integer
logToAgent:
type: boolean
default: true
stdout:
type: object
properties:
format:
type: string
enum:
- json
- json-formatted
default: json
local-tuning:
type: boolean
cefService:
type: array
items:
type: object
properties:
address:
type: string
port:
type: integer
proto:
type: string
enum:
- tcp
- udp
scope: Namespaced
names:
plural: logtriggersns
singular: logtriggerns
kind: LogTriggerNS
shortNames:
- logtriggerns
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata :
name : sourcesidentifiersns.openappsec.io
creationTimestamp: null
spec:
group: openappsec.io
versions:
- name: v1beta2
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
type: object
required:
- sourcesIdentifiers
properties:
appsecClassName:
type: string
sourcesIdentifiers:
type: array
items:
type: object
required:
- identifier
properties:
identifier:
type: string
enum:
- headerkey
- JWTKey
- cookie
- sourceip
- x-forwarded-for
default: sourceip
value:
type: array
items:
type: string
scope: Namespaced
names:
plural: sourcesidentifiersns
singular: sourcesidentifierns
kind: SourcesIdentifierNS
shortNames:
- sourcesidentifierns
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata :
name : threatpreventionpracticesns.openappsec.io
creationTimestamp: null
spec:
group: openappsec.io
versions:
- name: v1beta2
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
required:
- webAttacks
- intrusionPrevention
- fileSecurity
- snortSignatures
properties:
appsecClassName:
type: string
practiceMode:
type: string
enum:
- inherited
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
default: inherited
webAttacks:
type: object
required:
- overrideMode
properties:
overrideMode:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- inherited
default: inactive
minimumConfidence:
type: string
enum:
- medium
- high
- critical
default: high
maxUrlSizeBytes:
type: integer
default: 32768
maxObjectDepth:
type: integer
default: 40
maxBodySizeKb:
type: integer
default: 1000000
maxHeaderSizeBytes:
type: integer
default: 102400
protections:
type: object
properties:
csrfProtection:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- inherited
default: inactive
errorDisclosure:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- inherited
default: inactive
openRedirect:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- inherited
default: inactive
nonValidHttpMethods:
type: boolean
default: false
antiBot:
type: object
required:
- overrideMode
properties:
overrideMode:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- inherited
default: inactive
injectedUris:
type: array
items:
type: object
properties:
uri:
type: string
validatedUris:
type: array
items:
type: object
properties:
uri:
type: string
snortSignatures:
type: object
required:
- overrideMode
properties:
overrideMode:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- inherited
default: inactive
configmap:
type: array
items:
type: string
files:
type: array
items:
type: string
schemaValidation:
type: object
required:
- overrideMode
properties:
overrideMode:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- inherited
default: inactive
enforcementLevel:
type: string
configmap:
type: array
items:
type: string
files:
type: array
items:
type: string
intrusionPrevention:
type: object
required:
- overrideMode
properties:
overrideMode:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- inherited
default: inactive
maxPerformanceImpact:
type: string
enum:
- low
- medium
- high
default: medium
minSeverityLevel:
type: string
enum:
- low
- medium
- high
- critical
default: medium
minCveYear:
type: integer
default: 2016
highConfidenceEventAction:
type: string
enum:
- prevent
- detect
- inactive
- inherited
default: inherited
mediumConfidenceEventAction:
type: string
enum:
- prevent
- detect
- inactive
- inherited
default: inherited
lowConfidenceEventAction:
type: string
enum:
- prevent
- detect
- inactive
- inherited
default: detect
fileSecurity:
type: object
required:
- overrideMode
properties:
overrideMode:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- inherited
default: inactive
minSeverityLevel:
type: string
enum:
- low
- medium
- high
- critical
default: medium
highConfidenceEventAction:
type: string
enum:
- prevent
- detect
- inactive
- inherited
default: inherited
mediumConfidenceEventAction:
type: string
enum:
- prevent
- detect
- inactive
- inherited
default: inherited
lowConfidenceEventAction:
type: string
enum:
- prevent
- detect
- inactive
- inherited
default: detect
archiveInspection:
type: object
properties:
extractArchiveFiles:
type: boolean
default: false
scanMaxFileSize:
type: integer
default: 10
scanMaxFileSizeUnit:
type: string
enum:
- bytes
- KB
- MB
- GB
default: MB
archivedFilesWithinArchivedFiles:
type: string
enum:
- prevent
- detect
- inactive
- inherited
default: inherited
archivedFilesWhereContentExtractionFailed:
type: string
enum:
- prevent
- detect
- inactive
- inherited
default: inherited
largeFileInspection:
type: object
properties:
fileSizeLimit:
type: integer
default: 10
fileSizeLimitUnit:
type: string
enum:
- bytes
- KB
- MB
- GB
default: MB
filesExceedingSizeLimitAction:
type: string
enum:
- prevent
- detect
- inactive
- inherited
default: inherited
unnamedFilesAction:
type: string
enum:
- prevent
- detect
- inactive
- inherited
default: inherited
threatEmulationEnabled:
type: boolean
default: false
scope: Namespaced
names:
plural: threatpreventionpracticesns
singular: threatpreventionpracticens
kind: ThreatPreventionPracticeNS
shortNames:
- tppns
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata :
name : trustedsourcesns.openappsec.io
creationTimestamp: null
spec:
group: openappsec.io
versions:
- name: v1beta2
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
required:
- minNumOfSources
- sourcesIdentifiers
properties:
appsecClassName:
type: string
minNumOfSources:
type: integer
default: 3
sourcesIdentifiers:
type: array
items:
type: string
scope: Namespaced
names:
plural: trustedsourcesns
singular: trustedsourcens
kind: TrustedSourceNS
shortNames:
- trustedsourcens

View File

@ -17,16 +17,6 @@ spec:
customResponse: default-web-user-response
triggers:
- default-log-trigger
specificRules:
- host: www.example.com
# this is an example for specific rule, adjust the values as required for the protected app
mode: detect-learn
threatPreventionPractices:
- default-threat-prevention-practice
accessControlPractices:
- default-access-control-practice
triggers:
- default-log-trigger
---
apiVersion: openappsec.io/v1beta2
kind: ThreatPreventionPractice
@ -112,7 +102,7 @@ spec:
responseCode: true
logDestination:
cloud: true
logToAgent: false
logToAgent: true
stdout:
format: json

View File

@ -17,16 +17,6 @@ spec:
customResponse: default-web-user-response
triggers:
- default-log-trigger
specificRules:
- host: www.example.com
# this is an example for specific rule, adjust the values as required for the protected app
mode: prevent-learn
threatPreventionPractices:
- default-threat-prevention-practice
accessControlPractices:
- default-access-control-practice
triggers:
- default-log-trigger
---
apiVersion: openappsec.io/v1beta2
kind: ThreatPreventionPractice
@ -112,7 +102,7 @@ spec:
responseCode: true
logDestination:
cloud: true
logToAgent: false
logToAgent: true
stdout:
format: json

View File

@ -0,0 +1,434 @@
ype: object
properties:
policies:
type: object
properties:
default:
type: object
properties:
custom-response:
type: string
exceptions:
items:
type: string
type: array
mode:
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
type: string
practices:
items:
type: string
type: array
source-identifiers:
type: string
triggers:
items:
type: string
type: array
trusted-sources:
type: string
required:
- mode
- practices
- triggers
specific-rules:
type: array
items:
properties:
host:
type: string
custom-response:
type: string
exceptions:
items:
type: string
type: array
mode:
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
type: string
practices:
items:
type: string
type: array
source-identifiers:
type: string
triggers:
items:
type: string
type: array
trusted-sources:
type: string
required:
- mode
- host
- practices
- triggers
type: object
practices:
type: array
items:
properties:
name:
type: string
anti-bot:
properties:
injected-URIs:
items:
properties:
uri:
type: string
type: object
type: array
override-mode:
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- as-top-level
type: string
default: "inactive"
validated-URIs:
items:
properties:
uri:
type: string
type: object
type: array
type: object
openapi-schema-validation:
properties:
files:
items:
type: string
type: array
override-mode:
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- as-top-level
type: string
type: object
snort-signatures:
properties:
files:
items:
type: string
type: array
override-mode:
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- as-top-level
type: string
type: object
web-attacks:
properties:
max-body-size-kb:
type: integer
max-header-size-bytes:
type: integer
max-object-depth:
type: integer
max-url-size-bytes:
type: integer
minimum-confidence:
enum:
- medium
- high
- critical
type: string
override-mode:
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- as-top-level
type: string
protections:
properties:
csrf-enabled:
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
type: string
error-disclosure-enabled:
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
type: string
non-valid-http-methods:
type: boolean
open-redirect-enabled:
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
type: string
type: object
type: object
required:
- name
custom-responses:
type: array
minItems: 0
items:
type: object
properties:
name:
type: string
http-response-code:
maximum: 599
minimum: 100
default: 403
type: integer
message-body:
type: string
default: "Attack blocked by web application protection"
message-title:
type: string
default: "Openappsec's <b>Application Security</b> has detected an attack and blocked it."
mode:
enum:
- block-page
- response-code-only
type: string
required:
- name
log-triggers:
type: array
minItems: 0
items:
type: object
properties:
name:
type: string
access-control-logging:
properties:
allow-events:
type: boolean
default: false
drop-events:
type: boolean
default: false
type: object
additional-suspicious-events-logging:
properties:
enabled:
type: boolean
default true:
minimum-severity:
enum:
- high
- critical
type: string
default: "high"
response-body:
type: boolean
default: false
response-code:
type: boolean
default: true
type: object
appsec-logging:
properties:
all-web-requests:
type: boolean
default: false
detect-events:
type: boolean
default: false
prevent-events:
type: boolean
default: true
type: object
extended-logging:
properties:
http-headers:
type: boolean
default: false
request-body:
type: boolean
default: false
url-path:
type: boolean
default: false
url-query:
type: boolean
default: false
type: object
log-destination:
properties:
cef-service:
minItems: 0
items:
properties:
address:
type: string
port:
type: integer
proto:
enum:
- tcp
- udp
type: string
type: object
type: array
cloud:
type: boolean
default: false
stdout:
properties:
format:
enum:
- json
- json-formatted
type: string
default: json
type: object
syslog-service:
minItems: 0
items:
properties:
address:
type: string
port:
type: integer
type: object
type: array
type: object
required:
- name
exceptions:
type: array
minItems: 0
items:
type: object
properties:
name:
type: string
action:
enum:
- skip
- accept
- drop
- suppressLog
type: string
comment:
type: string
countryCode:
items:
type: string
type: array
countryName:
items:
type: string
type: array
hostName:
items:
type: string
type: array
paramName:
items:
type: string
type: array
paramValue:
items:
type: string
type: array
protectionName:
items:
type: string
type: array
sourceIdentifier:
items:
type: string
type: array
sourceIp:
items:
type: string
type: array
url:
items:
type: string
type: array
required:
- name
- action
trusted-sources:
type: array
minItems: 0
items:
type: object
properties:
name:
type: string
minNumOfSources:
type: integer
minimum: 1
default: 3
sources-identifiers:
items:
type: string
type: array
required:
- name
- sources-identifiers
source-identifiers:
type: array
minItems: 0
items:
type: object
properties:
name:
type: string
identifiers:
type: array
minItems: 1
items:
type: object
source-identifier:
enum:
- headerkey
- JWTKey
- cookie
- sourceip
- x-forwarded-for
type: string
value:
items:
type: string
type: array
required:
- source-identifier
required:
- name
- identifiers
additionalProperties: false

View File

@ -0,0 +1,752 @@
type: object
properties:
apiVersion:
type: string
enum:
- v1beta1
- v1beta2
policies:
type: object
properties:
appsecClassName:
type: string
default:
type: object
required:
- mode
- threatPreventionPractices
- accessControlPractices
properties:
mode:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
default: detect-learn
threatPreventionPractices:
type: array
items:
type: string
accessControlPractices:
type: array
items:
type: string
customResponse:
type: string
default: "403"
triggers:
type: array
items:
type: string
sourceIdentifiers:
type: string
trustedSources:
type: string
exceptions:
type: array
items:
type: string
specificRules:
type: array
items:
type: object
properties:
name:
type: string
host:
type: string
mode:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
default: detect-learn
threatPreventionPractices:
type: array
items:
type: string
accessControlPractices:
type: array
items:
type: string
triggers:
type: array
items:
type: string
customResponse:
type: string
sourceIdentifiers:
type: string
trustedSources:
type: string
exceptions:
type: array
items:
type: string
logTriggers:
type: array
items:
type: object
required:
- accessControlLogging
- appsecLogging
- additionalSuspiciousEventsLogging
- extendedLogging
- logDestination
properties:
appsecClassName:
type: string
name:
type: string
accessControlLogging:
type: object
properties:
allowEvents:
type: boolean
default: false
dropEvents:
type: boolean
default: true
appsecLogging:
type: object
properties:
detectEvents:
type: boolean
default: true
preventEvents:
type: boolean
default: true
allWebRequests:
type: boolean
default: false
additionalSuspiciousEventsLogging:
type: object
properties:
enabled:
type: boolean
default: true
minSeverity:
type: string
enum:
- high
- critical
default: high
responseBody:
type: boolean
default: false
responseCode:
type: boolean
default: true
extendedLogging:
type: object
properties:
urlPath:
type: boolean
default: false
urlQuery:
type: boolean
default: false
httpHeaders:
type: boolean
default: false
requestBody:
type: boolean
default: false
logDestination:
type: object
properties:
cloud:
type: boolean
default: false
local-tuning:
type: boolean
default: false
syslogService:
type: array
items:
type: object
properties:
address:
type: string
port:
type: integer
logToAgent:
type: boolean
default: true
stdout:
type: object
properties:
format:
type: string
enum:
- json
- json-formatted
default: json
cefService:
type: array
items:
type: object
properties:
address:
type: string
port:
type: integer
proto:
type: string
enum:
- tcp
- udp
threatPreventionPractices:
type: array
items:
type: object
required:
- webAttacks
- intrusionPrevention
- fileSecurity
- snortSignatures
properties:
appsecClassName:
type: string
name:
type: string
practiceMode:
type: string
enum:
- inherited
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
default: inherited
webAttacks:
type: object
required:
- overrideMode
properties:
overrideMode:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- inherited
default: inactive
minimumConfidence:
type: string
enum:
- medium
- high
- critical
default: high
maxUrlSizeBytes:
type: integer
default: 32768
maxObjectDepth:
type: integer
default: 40
maxBodySizeKb:
type: integer
default: 1000000
maxHeaderSizeBytes:
type: integer
default: 102400
protections:
type: object
properties:
csrfProtection:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- inherited
default: inactive
errorDisclosure:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- inherited
default: inactive
openRedirect:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- inherited
default: inactive
nonValidHttpMethods:
type: boolean
default: false
antiBot:
type: object
required:
- overrideMode
properties:
overrideMode:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- inherited
default: inactive
injectedUris:
type: array
items:
type: object
properties:
uri:
type: string
validatedUris:
type: array
items:
type: object
properties:
uri:
type: string
snortSignatures:
type: object
required:
- overrideMode
properties:
overrideMode:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- inherited
default: inactive
configmap:
type: array
items:
type: string
files:
type: array
items:
type: string
schemaValidation:
type: object
required:
- overrideMode
properties:
overrideMode:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- inherited
default: inactive
enforcementLevel:
type: string
configmap:
type: array
items:
type: string
files:
type: array
items:
type: string
intrusionPrevention:
type: object
required:
- overrideMode
properties:
overrideMode:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- inherited
default: inactive
maxPerformanceImpact:
type: string
enum:
- low
- medium
- high
default: medium
minSeverityLevel:
type: string
enum:
- low
- medium
- high
- critical
default: medium
minCveYear:
type: integer
default: 2016
highConfidenceEventAction:
type: string
enum:
- prevent
- detect
- inactive
- inherited
default: inherited
mediumConfidenceEventAction:
type: string
enum:
- prevent
- detect
- inactive
- inherited
default: inherited
lowConfidenceEventAction:
type: string
enum:
- prevent
- detect
- inactive
- inherited
default: detect
fileSecurity:
type: object
required:
- overrideMode
properties:
overrideMode:
type: string
enum:
- prevent-learn
- detect-learn
- prevent
- detect
- inactive
- inherited
default: inactive
minSeverityLevel:
type: string
enum:
- low
- medium
- high
- critical
default: medium
highConfidenceEventAction:
type: string
enum:
- prevent
- detect
- inactive
- inherited
default: inherited
mediumConfidenceEventAction:
type: string
enum:
- prevent
- detect
- inactive
- inherited
default: inherited
lowConfidenceEventAction:
type: string
enum:
- prevent
- detect
- inactive
- inherited
default: detect
archiveInspection:
type: object
properties:
extractArchiveFiles:
type: boolean
default: false
scanMaxFileSize:
type: integer
default: 10
scanMaxFileSizeUnit:
type: string
enum:
- bytes
- KB
- MB
- GB
default: MB
archivedFilesWithinArchivedFiles:
type: string
enum:
- prevent
- detect
- inactive
- inherited #as set in overrideMode for fileSecurity
default: inherited
archivedFilesWhereContentExtractionFailed:
type: string
enum:
- prevent
- detect
- inactive
- inherited #as set in overrideMode for fileSecurity
default: inherited
largeFileInspection:
type: object
properties:
fileSizeLimit:
type: integer
default: 10
fileSizeLimitUnit:
type: string
enum:
- bytes
- KB
- MB
- GB
default: MB
filesExceedingSizeLimitAction:
type: string
enum:
- prevent
- detect
- inactive
- inherited #as set in overrideMode for fileSecurity
default: inherited
unnamedFilesAction:
type: string
enum:
- prevent
- detect
- inactive
- inherited #as set in overrideMode for fileSecurity
default: inherited
threatEmulationEnabled:
type: boolean
default: false
accessControlPractices:
type: array
items:
type: object
required:
- rateLimit
properties:
appsecClassName:
type: string
name:
type: string
practiceMode:
type: string
enum:
- inherited #inherited from mode set in policy
- prevent
- detect
- inactive
default: inherited
rateLimit:
type: object
required:
- overrideMode
properties:
overrideMode:
type: string
enum:
- prevent
- detect
- inactive
- inherited
default: inactive
rules:
type: array
items:
type: object
properties:
action: # currently not supported
type: string
enum:
- inherited
- prevent
- detect
default: inherited
condition: # currently not supported
type: array
items:
type: object
required:
- key
- value
properties:
key:
type: string
value:
type: string
uri:
type: string
limit:
type: integer
unit:
type: string
enum:
- minute
- second
default: minute
triggers:
type: array
items:
type: string
comment:
type: string
customResponses:
type: array
items:
type: object
required:
- mode
properties:
appsecClassName:
type: string
name:
type: string
mode:
type: string
enum:
- block-page
- redirect
- response-code-only
default: response-code-only
messageTitle:
type: string
messageBody:
type: string
httpResponseCode:
type: integer
minimum: 100
maximum: 599
default: 403
redirectUrl:
type: string
redirectAddXEventId:
type: boolean
default: false
sourcesIdentifiers:
type: array
items:
type: object
required:
- sourcesIdentifiers
properties:
name:
type: string
sourcesIdentifiers:
type: array
items:
type: object
required:
- identifier
properties:
identifier:
type: string
enum:
- headerkey
- JWTKey
- cookie
- sourceip
- x-forwarded-for
default: sourceip
value:
type: array
items:
type: string
exceptions:
type: array
items:
type: object
required:
- action
- condition
properties:
appsecClassName:
type: string
name:
type: string
action:
type: string
enum:
- skip
- accept
- drop
- suppressLog
default: accept
condition:
type: array
items:
type: object
required:
- key
- value
properties:
key:
type: string
value:
type: string
trustedSources:
type: array
items:
type: object
required:
- minNumOfSources
- sourcesIdentifiers
properties:
appsecClassName:
type: string
name:
type: string
minNumOfSources:
type: integer
default: 3
sourcesIdentifiers:
type: array
items:
type: string
policyActivations:
type: array
items:
type: object
properties:
appsecClassName:
type: string
enabledPolicies:
type: array
items:
type: object
properties:
name:
type: string
hosts:
type: array
items:
type: string
required:
- hosts
required:
- enabledPolicies
additionalProperties: false

View File

@ -276,6 +276,7 @@ void
AgentDetails::preload()
{
registerExpectedConfiguration<string>("orchestration", "Agent details path");
registerExpectedConfiguration<string>("Agent details", "File path");
registerConfigLoadCb([this] () { readAgentDetails(); });
}
@ -436,6 +437,7 @@ AgentDetails::loadProxyType(const string &proxy_type)
}
#ifdef gaia
(void)proxy_type;
I_ShellCmd *shell_cmd = Singleton::Consume<I_ShellCmd>::by<AgentDetails>();
auto proxy_ip = shell_cmd->getExecOutput("dbget proxy:ip-address| tr -d '\n'");
if (!proxy_ip.ok()) return proxy_ip;

View File

@ -68,6 +68,7 @@ public:
const Maybe<string> &agent_version
) override;
pair<string, string> generateTimeStamp();
bool addAttr(const string &key, const string &val, bool allow_override = false) override;
bool addAttr(const map<string, string> &attr, bool allow_override = false) override;
void deleteAttr(const string &key) override;
@ -218,6 +219,13 @@ AgentDetailsReporter::Impl::isPersistantAttr(const std::string &key)
return persistant_attributes.count(key) > 0;
}
pair<string, string>
AgentDetailsReporter::Impl::generateTimeStamp()
{
auto time_stamp = Singleton::Consume<I_TimeGet>::by<AgentDetailsReporter>()->getWalltimeStr();
return make_pair("timestamp", time_stamp);
}
bool
AgentDetailsReporter::Impl::sendAttributes()
{
@ -232,11 +240,10 @@ AgentDetailsReporter::Impl::sendAttributes()
attributes[new_attr.first] = new_attr.second;
}
AttributesSender attr_to_send(attributes);
if (is_server) {
AttrSerializer<ofstream, cereal::JSONOutputArchive>(attributes, "save");
attr_to_send.attributes.get().insert(generateTimeStamp());
messaging->sendAsyncMessage(HTTPMethod::PATCH, "/agents", attr_to_send);
dbgDebug(D_AGENT_DETAILS) << "Triggered persistent message request with attributes to the Fog";
new_attributes.clear();
@ -322,6 +329,36 @@ public:
attributes = attr;
}
void
addAttr(const string &key, const string &val, bool allow_override = false)
{
dbgDebug(D_AGENT_DETAILS)
<< "Trying to add new attribute. Key: "
<< key
<< ", Value: "
<< val
<< " Should allow override: "
<< (allow_override ? "true" : "false");
auto &attr = attributes.get();
if (!allow_override) {
if (attr.count(key) > 0) {
dbgWarning(D_AGENT_DETAILS)
<< "Cannot override an existing value with a new one. Existing Value: "
<< (attr.count(key) > 0 ? attr[key] : "");
return;
}
}
attr[key] = val;
if (!attributes.isActive()) attributes.setActive(true);
}
void
addAttr(const pair<string, string> &attr, bool allow_override = false)
{
addAttr(attr.first, attr.second, allow_override);
}
private:
C2S_PARAM(metaDataReport, additionalMetaData);
C2S_OPTIONAL_PARAM(string, agentVersion);
@ -402,6 +439,7 @@ AgentDetailsReporter::Impl::sendReport(
additional_metadata.setAdditionalAttributes(attributes);
}
additional_metadata.addAttr(generateTimeStamp());
messaging->sendAsyncMessage(HTTPMethod::PATCH, "/agents", additional_metadata);
}

View File

@ -8,6 +8,7 @@
#include "mock/mock_messaging.h"
#include "mock/mock_mainloop.h"
#include "mock/mock_rest_api.h"
#include "mock/mock_time_get.h"
#include "environment.h"
#include "agent_details_report.h"
@ -73,6 +74,7 @@ public:
StrictMock<MockMainLoop> mock_mainloop;
StrictMock<MockMessaging> mock_messaging;
StrictMock<MockRestApi> mock_rest;
StrictMock<MockTimeGet> mock_time_get;
I_MainLoop::Routine periodic_report;
I_AgentDetailsReporter *report;
CPTestTempfile persistence_attr_file;
@ -85,6 +87,7 @@ public:
TEST_F(AgentReporterTest, dataReport)
{
EXPECT_CALL(mock_time_get, getWalltimeStr()).WillOnce(Return("Best Time ever"));
string custom_data = "Linux version 24.00.15F";
EXPECT_CALL(mock_messaging, sendAsyncMessage(
HTTPMethod::PATCH,
@ -92,26 +95,33 @@ TEST_F(AgentReporterTest, dataReport)
"{\n"
" \"additionalMetaData\": {\n"
" \"custom_data\": \"Linux version 24.00.15F\"\n"
" }"
"\n}",
" },\n"
" \"attributes\": {\n"
" \"timestamp\": \"Best Time ever\"\n"
" }\n"
"}",
MessageCategory::GENERIC,
_,
_
)).Times(1);
AgentDataReport() << AgentReportField(custom_data);;
AgentDataReport() << AgentReportField(custom_data);
}
TEST_F(AgentReporterTest, labeledDataReport)
{
string data = "Linux version 24.00.15F";
EXPECT_CALL(mock_time_get, getWalltimeStr()).WillOnce(Return("Best Time ever"));
EXPECT_CALL(mock_messaging, sendAsyncMessage(
HTTPMethod::PATCH,
"/agents",
"{\n"
" \"additionalMetaData\": {\n"
" \"this_is_custom_label\": \"Linux version 24.00.15F\"\n"
" }"
"\n}",
" },\n"
" \"attributes\": {\n"
" \"timestamp\": \"Best Time ever\"\n"
" }\n"
"}",
MessageCategory::GENERIC,
_,
_
@ -123,6 +133,7 @@ TEST_F(AgentReporterTest, multiDataReport)
{
string custom_data = "Linux version 24.00.15F";
string data_to_report = "Agent Version 95.95.95.00A";
EXPECT_CALL(mock_time_get, getWalltimeStr()).WillOnce(Return("Best Time ever"));
EXPECT_CALL(mock_messaging, sendAsyncMessage(
HTTPMethod::PATCH,
"/agents",
@ -130,8 +141,11 @@ TEST_F(AgentReporterTest, multiDataReport)
" \"additionalMetaData\": {\n"
" \"custom_data\": \"Linux version 24.00.15F\",\n"
" \"this_is_custom_label\": \"Agent Version 95.95.95.00A\"\n"
" }"
"\n}",
" },\n"
" \"attributes\": {\n"
" \"timestamp\": \"Best Time ever\"\n"
" }\n"
"}",
MessageCategory::GENERIC,
_,
_
@ -146,7 +160,7 @@ TEST_F(AgentReporterTest, multiDataReportWithRegistrationData)
{
string custom_data = "Linux version 24.00.15F";
string data_to_report = "Agent Version 95.95.95.00A";
EXPECT_CALL(mock_time_get, getWalltimeStr()).WillOnce(Return("Best Time ever"));
EXPECT_CALL(mock_messaging, sendAsyncMessage(
HTTPMethod::PATCH,
"/agents",
@ -158,7 +172,10 @@ TEST_F(AgentReporterTest, multiDataReportWithRegistrationData)
" \"agentVersion\": \"1.15.9\",\n"
" \"policyVersion\": \"ccc\",\n"
" \"platform\": \"bbb\",\n"
" \"architecture\": \"aaa\"\n"
" \"architecture\": \"aaa\",\n"
" \"attributes\": {\n"
" \"timestamp\": \"Best Time ever\"\n"
" }\n"
"}",
MessageCategory::GENERIC,
_,
@ -178,11 +195,15 @@ TEST_F(AgentReporterTest, multiDataReportWithRegistrationData)
TEST_F(AgentReporterTest, basicAttrTest)
{
EXPECT_CALL(mock_time_get, getWalltimeStr()).WillOnce(Return("Best Time ever"));
EXPECT_CALL(mock_messaging, sendAsyncMessage(
HTTPMethod::PATCH,
"/agents",
"{\n"
" \"additionalMetaData\": {}\n"
" \"additionalMetaData\": {},\n"
" \"attributes\": {\n"
" \"timestamp\": \"Best Time ever\"\n"
" }\n"
"}",
MessageCategory::GENERIC,
_,
@ -193,6 +214,7 @@ TEST_F(AgentReporterTest, basicAttrTest)
AgentDataReport agent_data;
}
EXPECT_CALL(mock_time_get, getWalltimeStr()).WillOnce(Return("Best Time ever"));
EXPECT_CALL(mock_messaging, sendAsyncMessage(
HTTPMethod::PATCH,
"/agents",
@ -201,7 +223,8 @@ TEST_F(AgentReporterTest, basicAttrTest)
" \"attributes\": {\n"
" \"1\": \"2\",\n"
" \"a\": \"1\",\n"
" \"c\": \"d\"\n"
" \"c\": \"d\",\n"
" \"timestamp\": \"Best Time ever\"\n"
" }\n"
"}",
MessageCategory::GENERIC,
@ -219,11 +242,15 @@ TEST_F(AgentReporterTest, basicAttrTest)
AgentDataReport agent_data;
}
EXPECT_CALL(mock_time_get, getWalltimeStr()).WillOnce(Return("Best Time ever"));
EXPECT_CALL(mock_messaging, sendAsyncMessage(
HTTPMethod::PATCH,
"/agents",
"{\n"
" \"additionalMetaData\": {}\n"
" \"additionalMetaData\": {},\n"
" \"attributes\": {\n"
" \"timestamp\": \"Best Time ever\"\n"
" }\n"
"}",
MessageCategory::GENERIC,
_,
@ -242,7 +269,7 @@ TEST_F(AgentReporterTest, advancedAttrTest)
EXPECT_TRUE(report->addAttr({{"c", "d"}, {"1", "2"}, {"send", "me"}}));
EXPECT_TRUE(report->addAttr("a", "b"));
EXPECT_CALL(mock_time_get, getWalltimeStr()).WillOnce(Return("Best Time ever"));
EXPECT_CALL(mock_messaging, sendAsyncMessage(
HTTPMethod::PATCH,
"/agents",
@ -251,7 +278,8 @@ TEST_F(AgentReporterTest, advancedAttrTest)
" \"1\": \"2\",\n"
" \"a\": \"b\",\n"
" \"c\": \"d\",\n"
" \"send\": \"me\"\n"
" \"send\": \"me\",\n"
" \"timestamp\": \"Best Time ever\"\n"
" }\n"
"}",
MessageCategory::GENERIC,
@ -268,6 +296,7 @@ TEST_F(AgentReporterTest, advancedAttrTest)
EXPECT_TRUE(report->addAttr("new", "key val"));
EXPECT_TRUE(report->addAttr("a", "key val override", true));
EXPECT_CALL(mock_time_get, getWalltimeStr()).WillOnce(Return("Best Time ever"));
EXPECT_CALL(mock_messaging, sendAsyncMessage(
HTTPMethod::PATCH,
"/agents",
@ -277,7 +306,8 @@ TEST_F(AgentReporterTest, advancedAttrTest)
" \"a\": \"key val override\",\n"
" \"c\": \"d\",\n"
" \"new\": \"key val\",\n"
" \"send\": \"me\"\n"
" \"send\": \"me\",\n"
" \"timestamp\": \"Best Time ever\"\n"
" }\n"
"}",
MessageCategory::GENERIC,
@ -291,6 +321,7 @@ TEST_F(AgentReporterTest, advancedAttrTest)
TEST_F(AgentReporterTest, RestDetailsTest)
{
stringstream rest_call_parameters;
stringstream rest_call_parameters_with_timestamp;
rest_call_parameters
<< "{\n"
<< " \"attributes\": {\n"
@ -300,17 +331,28 @@ TEST_F(AgentReporterTest, RestDetailsTest)
<< " \"send\": \"me\"\n"
<< " }\n"
<< "}";
rest_call_parameters_with_timestamp
<< "{\n"
<< " \"attributes\": {\n"
<< " \"1\": \"2\",\n"
<< " \"a\": \"key val override\",\n"
<< " \"c\": \"d\",\n"
<< " \"send\": \"me\",\n"
<< " \"timestamp\": \"Best Time ever\"\n"
<< " }\n"
<< "}";
add_details_rest_cb->performRestCall(rest_call_parameters);
EXPECT_CALL(mock_time_get, getWalltimeStr()).WillOnce(Return("Best Time ever"));
EXPECT_CALL(mock_messaging, sendAsyncMessage(
HTTPMethod::PATCH,
"/agents",
rest_call_parameters.str(),
rest_call_parameters_with_timestamp.str(),
MessageCategory::GENERIC,
_,
_
)).Times(1);
EXPECT_TRUE(report->sendAttributes());
is_server_mode = false;
@ -365,6 +407,18 @@ TEST_F(AgentReporterTest, PersistenceAttrTest)
"}"
);
string expected_attributes_with_timestamp(
"{\n"
" \"attributes\": {\n"
" \"1\": \"2\",\n"
" \"a\": \"key val override\",\n"
" \"c\": \"d\",\n"
" \"send\": \"me\",\n"
" \"timestamp\": \"Best Time ever\"\n"
" }\n"
"}"
);
write_attributes << expected_attributes;
write_attributes.close();
@ -372,10 +426,11 @@ TEST_F(AgentReporterTest, PersistenceAttrTest)
EXPECT_CALL(mock_rest, mockRestCall(RestAction::ADD, "agent-details-attr", _)).WillOnce(Return(true));
agent_details_reporter_comp.init();
EXPECT_CALL(mock_time_get, getWalltimeStr()).WillOnce(Return("Best Time ever"));
EXPECT_CALL(mock_messaging, sendAsyncMessage(
HTTPMethod::PATCH,
"/agents",
expected_attributes,
expected_attributes_with_timestamp,
MessageCategory::GENERIC,
_,
_

View File

@ -203,6 +203,7 @@ private:
MessageMetadata service_config_req_md("127.0.0.1", 7777);
service_config_req_md.setConnectioFlag(MessageConnectionConfig::ONE_TIME_CONN);
service_config_req_md.setConnectioFlag(MessageConnectionConfig::UNSECURE_CONN);
service_config_req_md.setSuspension(false);
auto service_config_status = messaging->sendSyncMessage(
HTTPMethod::POST,
"/set-nano-service-config",
@ -214,6 +215,7 @@ private:
MessageMetadata secondary_port_req_md("127.0.0.1", 7778);
secondary_port_req_md.setConnectioFlag(MessageConnectionConfig::ONE_TIME_CONN);
secondary_port_req_md.setConnectioFlag(MessageConnectionConfig::UNSECURE_CONN);
secondary_port_req_md.setSuspension(false);
service_config_status = messaging->sendSyncMessage(
HTTPMethod::POST,
"/set-nano-service-config",
@ -251,6 +253,7 @@ private:
MessageMetadata service_config_req_md("127.0.0.1", 7777);
service_config_req_md.setConnectioFlag(MessageConnectionConfig::ONE_TIME_CONN);
service_config_req_md.setConnectioFlag(MessageConnectionConfig::UNSECURE_CONN);
service_config_req_md.setSuspension(false);
bool service_config_status = messaging->sendSyncMessageWithoutResponse(
HTTPMethod::POST,
"/set-reconf-status",
@ -262,6 +265,7 @@ private:
MessageMetadata secondary_port_req_md("127.0.0.1", 7778);
secondary_port_req_md.setConnectioFlag(MessageConnectionConfig::ONE_TIME_CONN);
secondary_port_req_md.setConnectioFlag(MessageConnectionConfig::UNSECURE_CONN);
secondary_port_req_md.setSuspension(false);
service_config_status = messaging->sendSyncMessageWithoutResponse(
HTTPMethod::POST,
"/set-reconf-status",

View File

@ -527,7 +527,7 @@ Debug::preload()
active_streams["FOG"] = make_shared<DebugFogStream>();
string branch = Version::getBranch();
if (branch == "master" || branch.substr(0, 6) == "hotfix") {
if (branch == "open-source" || branch == "master" || branch.substr(0, 6) == "hotfix") {
should_assert_optional = false;
} else {
should_assert_optional = true;

View File

@ -30,6 +30,7 @@ class AgentDetailsReporter
Singleton::Consume<I_Messaging>,
Singleton::Consume<I_MainLoop>,
Singleton::Consume<I_Environment>,
Singleton::Consume<I_TimeGet>,
Singleton::Consume<I_RestApi>
{
public:

View File

@ -69,14 +69,16 @@ public:
uint16_t _port_num,
Flags<MessageConnectionConfig> _conn_flags,
bool _should_buffer = false,
bool _is_to_fog = false
bool _is_to_fog = false,
bool _should_suspend = true
) :
host_name(_host_name),
port_num(_port_num),
conn_flags(_conn_flags),
should_buffer(_should_buffer),
is_to_fog(_is_to_fog),
should_send_access_token(true)
should_send_access_token(true),
should_suspend(_should_suspend)
{}
const bool &
@ -193,6 +195,12 @@ public:
is_dual_auth = true;
}
void
setSuspension(bool _should_suspend)
{
should_suspend = _should_suspend;
}
void
setExternalCertificate(const std::string &_external_certificate)
{
@ -211,6 +219,12 @@ public:
return should_buffer;
}
bool
shouldSuspend() const
{
return should_suspend;
}
bool
isProxySet() const
{
@ -314,6 +328,7 @@ private:
bool is_rate_limit_block = false;
uint rate_limit_block_time = 0;
bool should_send_access_token = true;
bool should_suspend = true;
};
#endif // __MESSAGING_METADATA_H__

View File

@ -53,6 +53,51 @@ private:
std::map<std::string, std::set<std::string>> set_string_attr;
};
class IpAddressRange
{
public:
IpAddressRange() = default;
IpAddressRange(const std::string &min, const std::string &max) : min(min), max(max) {}
bool operator==(const IpAddressRange &other) const { return min == other.min && max == other.max; }
const std::string getMin() const { return min; }
const std::string getMax() const { return max; }
template <class Archive>
void serialize(Archive &ar) {
ar(CEREAL_NVP(max), CEREAL_NVP(min));
}
private:
std::string min;
std::string max;
};
class IpAttributes
{
public:
IpAttributes() = default;
IpAttributes & addIpv4Addresses(const std::string &val);
IpAttributes & addIpv6Addresses(const std::string &val);
IpAttributes & addIpv4AddressRanges(const IpAddressRange &val);
IpAttributes & addIpv6AddressRanges(const IpAddressRange &val);
Maybe<std::vector<std::string>, void> getIpv4Addresses() const;
Maybe<std::vector<std::string>, void> getIpv6Addresses() const;
Maybe<std::vector<IpAddressRange>, void> getIpv4AddressRanges() const;
Maybe<std::vector<IpAddressRange>, void> getIpv6AddressRanges() const;
Maybe<std::string, void> genObject() const;
bool isEmpty() const;
bool matches(const IpAttributes &other) const;
void serialize(cereal::JSONInputArchive &ar);
void performOutputingSchema(std::ostream &, int);
private:
std::vector<std::string> ipv4_addresses;
std::vector<std::string> ipv6_addresses;
std::vector<IpAddressRange> ipv4_address_ranges;
std::vector<IpAddressRange> ipv6_address_ranges;
};
class Invalidation
{
public:
@ -60,14 +105,14 @@ public:
Invalidation & setClassifier(ClassifierType type, const std::string &val);
Invalidation & addMainAttr(const StrAttributes &attr);
Invalidation & addAttr(const StrAttributes &attr);
Invalidation & addAttr(const IpAttributes &attr);
Invalidation & setSourceId(const std::string &id);
Invalidation & setObjectType(ObjectType type);
Invalidation & setInvalidationType(InvalidationType type);
std::string getClassifier(ClassifierType type) const { return classifiers[type]; }
std::vector<StrAttributes> getMainAttributes() const { return main_attributes; }
std::vector<StrAttributes> getAttributes() const { return attributes; }
std::vector<IpAttributes> getAttributes() const { return attributes; }
const Maybe<std::string, void> & getSourceId() const { return source_id; }
const Maybe<ObjectType, void> & getObjectType() const { return object_type; }
const Maybe<InvalidationType, void> & getInvalidationType() const { return invalidation_type; }
@ -86,10 +131,11 @@ public:
private:
bool attr_matches(const std::vector<StrAttributes> &current, const std::vector<StrAttributes> &other) const;
bool attr_matches(const std::vector<IpAttributes> &current, const std::vector<IpAttributes> &other) const;
EnumArray<ClassifierType, std::string, 6> classifiers;
std::vector<StrAttributes> main_attributes;
std::vector<StrAttributes> attributes;
std::vector<IpAttributes> attributes;
Maybe<std::string, void> source_id;
Maybe<ObjectType, void> object_type;
Maybe<InvalidationType, void> invalidation_type;

View File

@ -254,7 +254,7 @@ private:
C2S_OPTIONAL_PARAM(string, sourceId);
C2S_OPTIONAL_PARAM(string, invalidationRegistrationId);
C2S_OPTIONAL_PARAM(vector<StrAttributes>, mainAttributes);
C2S_OPTIONAL_PARAM(vector<StrAttributes>, attributes);
C2S_OPTIONAL_PARAM(vector<IpAttributes>, attributes);
C2S_OPTIONAL_PARAM(string, invalidationType);
};
@ -624,7 +624,7 @@ private:
query_request.isBulk() ? queries_uri : query_uri,
*json_body,
MessageCategory::INTELLIGENCE,
global_req_md
query_request.getReqMD().getHostName().empty() ? global_req_md : query_request.getReqMD()
);
if (!req_data.ok()) {
auto response_error = req_data.getErr().toString();

View File

@ -32,6 +32,30 @@ TEST(StringAttributesBasic, SettersAndGetters)
EXPECT_FALSE(string_attributes.isEmpty());
EXPECT_EQ(string_attributes.getStringAttr("attr1").unpack(), "1");
EXPECT_EQ(string_attributes.getStringSetAttr("attr2").unpack(), vals);
IpAttributes attributes;
EXPECT_TRUE(attributes.isEmpty());
EXPECT_FALSE(attributes.getIpv4Addresses().ok());
EXPECT_FALSE(attributes.getIpv6Addresses().ok());
EXPECT_FALSE(attributes.getIpv4AddressRanges().ok());
EXPECT_FALSE(attributes.getIpv6AddressRanges().ok());
IpAddressRange range("1.1.1.1", "1.1.1.5");
attributes
.addIpv4Addresses("1.1.1.2")
.addIpv4AddressRanges(range)
.addIpv6Addresses("1.1.1.2")
.addIpv6AddressRanges(range);
EXPECT_FALSE(attributes.isEmpty());
vector<string> ip_vector = {"1.1.1.2"};
vector<IpAddressRange> ip_range_vector = {range};
EXPECT_EQ(attributes.getIpv4Addresses().unpack(), ip_vector);
EXPECT_EQ(attributes.getIpv4AddressRanges().unpack(), ip_range_vector);
EXPECT_EQ(attributes.getIpv6Addresses().unpack(), ip_vector);
EXPECT_EQ(attributes.getIpv6AddressRanges().unpack(), ip_range_vector);
}
TEST(StringAttributesBasic, attr_schema)
@ -51,6 +75,39 @@ TEST(StringAttributesBasic, attr_schema)
" ]\n"
"}";
EXPECT_EQ(ss.str(), expected_schema);
IpAddressRange range("1.1.1.1", "1.1.1.5");
IpAttributes attributes = IpAttributes()
.addIpv4Addresses("1.1.1.2")
.addIpv4Addresses("1.1.1.3")
.addIpv4AddressRanges(range)
.addIpv6Addresses("1.1.1.4")
.addIpv6AddressRanges(range);
stringstream attr_ss;
attributes.performOutputingSchema(attr_ss, 0);
expected_schema =
"{\n"
" \"ipv4Addresses\": [\n"
" \"1.1.1.2\",\n"
" \"1.1.1.3\"\n"
" ],\n"
" \"ipv6Addresses\": [\n"
" \"1.1.1.4\"\n"
" ],\n"
" \"ipv4AddressesRange\": [\n"
" {\n"
" \"max\": \"1.1.1.5\",\n"
" \"min\": \"1.1.1.1\"\n"
" }\n"
" ],\n"
" \"ipv6AddressesRange\": [\n"
" {\n"
" \"max\": \"1.1.1.5\",\n"
" \"min\": \"1.1.1.1\"\n"
" }\n"
" ]\n"
"}";
EXPECT_EQ(attr_ss.str(), expected_schema);
}
TEST(StringAttributesBasic, Matching)
@ -105,6 +162,20 @@ TEST(StringAttributesBasic, genObject)
string expected_json = "{ \"attr1\": \"1\", \"attr2\": [ \"2\", \"3\" ] }";
EXPECT_EQ(string_attributes.genObject().unpack(), expected_json);
IpAddressRange range("1.1.1.1", "1.1.1.5");
IpAttributes attributes = IpAttributes()
.addIpv4Addresses("1.1.1.2")
.addIpv4Addresses("1.1.1.3")
.addIpv4AddressRanges(range)
.addIpv6Addresses("1.1.1.4")
.addIpv6AddressRanges(range);
expected_json =
"{ \"ipv4Addresses\": [ \"1.1.1.2\", \"1.1.1.3\" ], \"ipv6Addresses\": [ \"1.1.1.4\" ], "
"\"ipv4AddressesRange\": [ { \"max\": \"1.1.1.5\", \"min\": \"1.1.1.1\" } ], "
"\"ipv6AddressesRange\": [ { \"max\": \"1.1.1.5\", \"min\": \"1.1.1.1\" } ] }";
EXPECT_EQ(attributes.genObject().unpack(), expected_json);
}
TEST(InvalidationBasic, SettersAndGetters)
@ -125,15 +196,15 @@ TEST(InvalidationBasic, SettersAndGetters)
EXPECT_FALSE(invalidation.getInvalidationType().ok());
set<string> main_vals = { "2", "3" };
set<string> vals = { "5", "6" };
vector<string> vals = {"1.1.1.1", "2.2.2.2"};
auto main_attr = StrAttributes()
.addStringAttr("main_attr1", "1")
.addStringSetAttr("main_attr2", main_vals);
auto attr = StrAttributes()
.addStringAttr("attr1", "4")
.addStringSetAttr("attr2", vals);
auto attr = IpAttributes()
.addIpv4Addresses("1.1.1.1")
.addIpv4Addresses("2.2.2.2");
invalidation
.setClassifier(ClassifierType::CATEGORY, "bbb")
@ -148,8 +219,7 @@ TEST(InvalidationBasic, SettersAndGetters)
EXPECT_EQ(invalidation.getClassifier(ClassifierType::FAMILY), "ccc");
EXPECT_EQ(invalidation.getMainAttributes().begin()->getStringAttr("main_attr1").unpack(), "1");
EXPECT_EQ(invalidation.getMainAttributes().begin()->getStringSetAttr("main_attr2").unpack(), main_vals);
EXPECT_EQ(invalidation.getAttributes().begin()->getStringAttr("attr1").unpack(), "4");
EXPECT_EQ(invalidation.getAttributes().begin()->getStringSetAttr("attr2").unpack(), vals);
EXPECT_EQ(invalidation.getAttributes().begin()->getIpv4Addresses().unpack(), vals);
EXPECT_EQ(invalidation.getSourceId().unpack(), "id");
EXPECT_EQ(invalidation.getObjectType().unpack(), Intelligence::ObjectType::ASSET);
EXPECT_EQ(invalidation.getInvalidationType().unpack(), InvalidationType::DELETE);
@ -164,9 +234,9 @@ TEST(InvalidationBasic, Matching)
.addStringAttr("main_attr1", "1")
.addStringSetAttr("main_attr2", main_vals);
auto attr = StrAttributes()
.addStringAttr("attr1", "4")
.addStringSetAttr("attr2", vals);
auto attr = IpAttributes()
.addIpv4Addresses("1.1.1.1")
.addIpv4Addresses("2.2.2.2");
auto base_invalidation = Invalidation("aaa")
.setClassifier(ClassifierType::CATEGORY, "bbb")
@ -179,10 +249,9 @@ TEST(InvalidationBasic, Matching)
.addStringSetAttr("main_attr2", main_vals)
.addStringAttr("main_attr3", "6");
auto matching_attr = StrAttributes()
.addStringAttr("attr1", "4")
.addStringSetAttr("attr2", vals)
.addStringAttr("attr3", "7");
auto matching_attr = IpAttributes()
.addIpv4Addresses("1.1.1.1")
.addIpv4Addresses("2.2.2.2");
auto matching_invalidation = Invalidation("aaa")
.setClassifier(ClassifierType::CATEGORY, "bbb")
@ -212,10 +281,9 @@ TEST(InvalidationBasic, Matching)
EXPECT_FALSE(base_invalidation.matches(missing_attr_invalidation_main));
auto missing_attr = StrAttributes()
.addStringAttr("attr1", "4")
.addStringAttr("attr2", "2")
.addStringAttr("attr3", "7");
auto missing_attr = IpAttributes()
.addIpv4Addresses("2.2.2.2")
.addIpv4Addresses("3.3.3.3");
auto missing_attr_invalidation = Invalidation("aaa")
.setClassifier(ClassifierType::CATEGORY, "bbb")
@ -280,7 +348,7 @@ public:
intelligence.preload();
intelligence.init();
main_attr.addStringAttr("attr2", "2");
attr.addStringAttr("attr3", "3");
attr.addIpv4Addresses("1.1.1.1");
}
bool
@ -291,7 +359,7 @@ public:
}
StrAttributes main_attr;
StrAttributes attr;
IpAttributes attr;
StrictMock<MockMessaging> messaging_mock;
StrictMock<MockMainLoop> mock_ml;
NiceMock<MockTimeGet> mock_time;
@ -350,7 +418,7 @@ TEST_F(IntelligenceInvalidation, sending_public_invalidation)
"\"objectType\": \"asset\", "
"\"sourceId\": \"id\", "
"\"mainAttributes\": [ { \"attr2\": \"2\" } ], "
"\"attributes\": [ { \"attr3\": \"3\" } ]"
"\"attributes\": [ { \"ipv4Addresses\": [ \"1.1.1.1\" ] } ]"
" } ] }";
EXPECT_EQ(invalidation_json, expected_json);
EXPECT_FALSE(md.getConnectionFlags().isSet(MessageConnectionConfig::UNSECURE_CONN));
@ -390,7 +458,7 @@ TEST_F(IntelligenceInvalidation, multiple_assets_invalidation)
"\"objectType\": \"asset\", "
"\"sourceId\": \"id\", "
"\"mainAttributes\": [ { \"attr2\": \"2\" }, { \"attr2\": \"22\", \"attr3\": [ \"33\", \"44\" ] } ], "
"\"attributes\": [ { \"attr3\": \"3\" } ]"
"\"attributes\": [ { \"ipv4Addresses\": [ \"1.1.1.1\" ] } ]"
" } ] }";
EXPECT_EQ(invalidation_json, expected_json);
}
@ -439,7 +507,7 @@ TEST_F(IntelligenceInvalidation, sending_private_invalidation)
"\"objectType\": \"asset\", "
"\"sourceId\": \"id\", "
"\"mainAttributes\": [ { \"attr2\": \"2\" } ], "
"\"attributes\": [ { \"attr3\": \"3\" } ]"
"\"attributes\": [ { \"ipv4Addresses\": [ \"1.1.1.1\" ] } ]"
" } ] }";
EXPECT_EQ(invalidation_json, expected_json);
EXPECT_TRUE(md.getConnectionFlags().isSet(MessageConnectionConfig::UNSECURE_CONN));
@ -484,7 +552,7 @@ TEST_F(IntelligenceInvalidation, register_for_invalidation)
EXPECT_THAT(body, HasSubstr("\"url\": \"http://127.0.0.1:7000/set-new-invalidation\""));
EXPECT_THAT(body, HasSubstr("\"apiVersion\": \"v2\", \"communicationType\": \"sync\""));
EXPECT_THAT(body, HasSubstr("\"mainAttributes\": [ { \"attr2\": \"2\" } ]"));
EXPECT_THAT(body, HasSubstr("\"attributes\": [ { \"attr3\": \"3\" } ]"));
EXPECT_THAT(body, HasSubstr("\"attributes\": [ { \"ipv4Addresses\": [ \"1.1.1.1\" ] } ]"));
EXPECT_TRUE(md.getConnectionFlags().isSet(MessageConnectionConfig::UNSECURE_CONN));
EXPECT_THAT(body, HasSubstr("\"capabilities\": { \"getBulkCallback\": true }"));
@ -888,11 +956,19 @@ TEST_F(IntelligenceInvalidation, invalidation_cb_match_by_registration_id)
configuration << "}";
Singleton::Consume<Config::I_Config>::from(conf)->loadConfiguration(configuration);
IpAddressRange range("1.1.1.1", "1.1.1.5");
IpAttributes attributes = IpAttributes()
.addIpv4Addresses("1.1.1.2")
.addIpv4AddressRanges(range)
.addIpv6Addresses("1.1.1.2")
.addIpv6AddressRanges(range);
auto base_main_attr2 = StrAttributes()
.addStringAttr("attr3", "3");
auto invalidation_to_register = Invalidation("aaa")
.addMainAttr(main_attr)
.addMainAttr(base_main_attr2)
.addAttr(attributes)
.setSourceId("id")
.setClassifier(ClassifierType::FAMILY, "ccc")
.setClassifier(ClassifierType::CATEGORY, "bbb")
@ -911,6 +987,7 @@ TEST_F(IntelligenceInvalidation, invalidation_cb_match_by_registration_id)
auto matching_invalidation = Invalidation("aaa")
.addMainAttr(matching_second_main_attribute)
.addAttr(attributes)
.setSourceId("id")
.setClassifier(ClassifierType::FAMILY, "ccc")
.setClassifier(ClassifierType::CATEGORY, "bbb")
@ -919,6 +996,7 @@ TEST_F(IntelligenceInvalidation, invalidation_cb_match_by_registration_id)
auto invalidation_2_to_register = Invalidation("aaa")
.addMainAttr(base_main_attr2)
.addAttr(attributes)
.setSourceId("id")
.setClassifier(ClassifierType::FAMILY, "ccc")
.setClassifier(ClassifierType::CATEGORY, "bbb")

View File

@ -22,7 +22,7 @@ using namespace std;
USE_DEBUG_FLAG(D_INTELLIGENCE);
static const unsigned int upper_assets_limit = 50;
static const unsigned int upper_assets_limit = 200;
static const unsigned int upper_confidence_limit = 1000;
Maybe<void>

View File

@ -20,6 +20,8 @@
#include "i_intelligence_is_v2.h"
USE_DEBUG_FLAG(D_INTELLIGENCE);
using namespace Intelligence;
using namespace std;
@ -203,6 +205,18 @@ Invalidation::attr_matches(const vector<StrAttributes> &current, const vector<St
return false;
}
bool
Invalidation::attr_matches(const vector<IpAttributes> &current, const vector<IpAttributes> &other) const
{
if (current.empty()) return true;
for (const auto &attr : current) {
for(const auto &other_attr : other) {
if (attr.matches(other_attr)) return true;
}
}
return false;
}
bool
Invalidation::matches(const Invalidation &other) const
{
@ -230,7 +244,7 @@ Invalidation::matches(const Invalidation &other) const
}
Invalidation &
Invalidation::addAttr(const StrAttributes &attr)
Invalidation::addAttr(const IpAttributes &attr)
{
attributes.emplace_back(attr);
return *this;
@ -378,3 +392,224 @@ StrAttributes::performOutputingSchema(ostream &out, int level) {
}
RestHelper::printIndent(out, level) << "}";
}
IpAttributes &
IpAttributes::addIpv4Addresses(const string &val)
{
ipv4_addresses.push_back(val);
return *this;
}
IpAttributes &
IpAttributes::addIpv6Addresses(const string &val)
{
ipv6_addresses.push_back(val);
return *this;
}
IpAttributes &
IpAttributes::addIpv4AddressRanges(const IpAddressRange &val)
{
ipv4_address_ranges.push_back(val);
return *this;
}
IpAttributes &
IpAttributes::addIpv6AddressRanges(const IpAddressRange &val)
{
ipv6_address_ranges.push_back(val);
return *this;
}
Maybe<vector<string>, void>
IpAttributes::getIpv4Addresses() const
{
if (ipv4_addresses.empty()) return genError<void>();
return ipv4_addresses;
}
Maybe<vector<string>, void>
IpAttributes::getIpv6Addresses() const
{
if (ipv6_addresses.empty()) return genError<void>();
return ipv6_addresses;
}
Maybe<vector<IpAddressRange>, void>
IpAttributes::getIpv4AddressRanges() const
{
if (ipv4_address_ranges.empty()) return genError<void>();
return ipv4_address_ranges;
}
Maybe<vector<IpAddressRange>, void>
IpAttributes::getIpv6AddressRanges() const
{
if (ipv6_address_ranges.empty()) return genError<void>();
return ipv6_address_ranges;
}
Maybe<string, void>
IpAttributes::genObject() const
{
stringstream attributes_ss;
if (this->isEmpty()) return genError<void>();
bool internal_first = true;
bool first = true;
attributes_ss << "{ ";
if (!ipv4_addresses.empty()) {
attributes_ss << "\"ipv4Addresses\": [ ";
for (auto &attr : ipv4_addresses) {
if (!internal_first) attributes_ss << ", ";
attributes_ss << "\"" << attr << "\"";
internal_first = false;
}
attributes_ss << " ]";
first = false;
}
if (!ipv6_addresses.empty()) {
if (!first) attributes_ss << ", ";
attributes_ss << "\"ipv6Addresses\": [ ";
internal_first = true;
for (auto &attr : ipv6_addresses) {
if (!internal_first) attributes_ss << ", ";
attributes_ss << "\"" << attr << "\"";
internal_first = false;
}
attributes_ss << " ]";
first = false;
}
if (!ipv4_address_ranges.empty()) {
if (!first) attributes_ss << ", ";
attributes_ss << "\"ipv4AddressesRange\": [ ";
internal_first = true;
for (auto &attr : ipv4_address_ranges) {
if (!internal_first) attributes_ss << ", ";
attributes_ss << "{ \"max\": \"" << attr.getMax() << "\", \"min\": \"" << attr.getMin() << "\" }";
internal_first = false;
}
attributes_ss << " ]";
first = false;
}
if (!ipv6_address_ranges.empty()) {
if (!first) attributes_ss << ", ";
attributes_ss << "\"ipv6AddressesRange\": [ ";
internal_first = true;
for (auto &attr : ipv6_address_ranges) {
if (!internal_first) attributes_ss << ", ";
attributes_ss << "{ \"max\": \"" << attr.getMax() << "\", \"min\": \"" << attr.getMin() << "\" }";
internal_first = false;
}
attributes_ss << " ]";
first = false;
}
attributes_ss << " }";
return attributes_ss.str();
}
bool
IpAttributes::isEmpty() const
{
return
ipv4_addresses.empty() &&
ipv6_addresses.empty() &&
ipv4_address_ranges.empty() &&
ipv6_address_ranges.empty();
}
bool
IpAttributes::matches(const IpAttributes &other) const
{
return
ipv4_addresses == other.ipv4_addresses &&
ipv6_addresses == other.ipv6_addresses &&
ipv4_address_ranges == other.ipv4_address_ranges &&
ipv6_address_ranges == other.ipv6_address_ranges;
}
void
IpAttributes::serialize(cereal::JSONInputArchive &ar)
{
try {
ar(cereal::make_nvp("ipv4Addresses", ipv4_addresses));
ar(cereal::make_nvp("ipv4AddressesRange", ipv4_address_ranges));
ar(cereal::make_nvp("ipv6Addresses", ipv6_addresses));
ar(cereal::make_nvp("ipv6AddressesRange", ipv6_address_ranges));
} catch (cereal::Exception &e) {
dbgError(D_INTELLIGENCE) << e.what();
}
}
void
IpAttributes::performOutputingSchema(ostream &out, int level)
{
bool first = true;
bool internal_first = true;
RestHelper::printIndent(out, level) << "{\n";
if (!ipv4_addresses.empty()) {
RestHelper::printIndent(out, level + 1) << "\"ipv4Addresses\": [\n";
for (auto &attr : ipv4_addresses) {
if (!internal_first) out << ",\n";
RestHelper::printIndent(out, level + 2) << "\"" << attr << "\"";
internal_first = false;
}
out << "\n";
RestHelper::printIndent(out, level + 1) << "]";
first = false;
}
if (!ipv6_addresses.empty()) {
if (!first) out << ",\n";
RestHelper::printIndent(out, level + 1) << "\"ipv6Addresses\": [\n";
internal_first = true;
for (auto &attr : ipv6_addresses) {
if (!internal_first) out << ",\n";
RestHelper::printIndent(out, level + 2) << "\"" << attr << "\"";
internal_first = false;
}
out << "\n";
RestHelper::printIndent(out, level + 1) << "]";
first = false;
}
if (!ipv4_address_ranges.empty()) {
if (!first) out << ",\n";
RestHelper::printIndent(out, level + 1) << "\"ipv4AddressesRange\": [\n";
internal_first = true;
for (auto &attr : ipv4_address_ranges) {
if (!internal_first) out << ",\n";
RestHelper::printIndent(out, level + 2) << "{\n";
RestHelper::printIndent(out, level + 3) << "\"max\": \"" << attr.getMax() << "\",\n";
RestHelper::printIndent(out, level + 3) << "\"min\": \"" << attr.getMin() << "\"\n";
RestHelper::printIndent(out, level + 2) << "}";
internal_first = false;
}
out << "\n";
RestHelper::printIndent(out, level + 1) << "]";
first = false;
}
if (!ipv6_address_ranges.empty()) {
if (!first) out << ",\n";
RestHelper::printIndent(out, level + 1) << "\"ipv6AddressesRange\": [\n";
internal_first = true;
for (auto &attr : ipv6_address_ranges) {
if (!internal_first) out << ",\n";
RestHelper::printIndent(out, level + 2) << "{\n";
RestHelper::printIndent(out, level + 3) << "\"max\": \"" << attr.getMax() << "\",\n";
RestHelper::printIndent(out, level + 3) << "\"min\": \"" << attr.getMin() << "\"\n";
RestHelper::printIndent(out, level + 2) << "}";
internal_first = false;
}
out << "\n";
RestHelper::printIndent(out, level + 1) << "]";
first = false;
}
RestHelper::printIndent(out, level) << "\n}";
}

View File

@ -125,7 +125,9 @@ MessagingComp::sendMessage(
}
Connection conn = maybe_conn.unpack();
if (conn.isSuspended()) return suspendMessage(body, method, uri, category, message_metadata);
if (message_metadata.shouldSuspend() && conn.isSuspended()) {
return suspendMessage(body, method, uri, category, message_metadata);
}
bool is_to_fog = isMessageToFog(message_metadata);
auto metadata = message_metadata;

View File

@ -332,7 +332,17 @@ vector<PrometheusData>
GenericMetric::getPromMetricsData()
{
vector<PrometheusData> all_metrics;
if (!getProfileAgentSettingWithDefault(false, "prometheus")) return all_metrics;
bool enable_prometheus = false;
auto prometheus_settings = getProfileAgentSetting<bool>("prometheus");
if (prometheus_settings.ok()) {
enable_prometheus = prometheus_settings.unpack();
} else {
const char *prometheus_env = getenv("PROMETHEUS");
if (prometheus_env != nullptr) {
enable_prometheus = string(prometheus_env) == "true";
}
}
if (!enable_prometheus) return all_metrics;
dbgTrace(D_METRICS) << "Get prometheus metrics";
for (auto &calc : prometheus_calcs) {

View File

@ -163,10 +163,14 @@ RestServer::Impl::init()
}
}
bool is_ipv6 = false;
if (accept_get_from_external_ip) {
is_ipv6 = true;
fd = socket(AF_INET6, SOCK_STREAM, 0);
} else {
}
if (fd == -1) {
fd = socket(AF_INET, SOCK_STREAM, 0);
is_ipv6 = false;
}
dbgAssert(fd >= 0) << alert << "Failed to open a socket";
@ -175,7 +179,8 @@ RestServer::Impl::init()
dbgWarning(D_API) << "Could not set the socket options";
}
if (accept_get_from_external_ip) {
if (is_ipv6) {
dbgDebug(D_API) << "IPv6 socket opened successfully";
int option = 0;
if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &option, sizeof(option)) < 0) {
dbgWarning(D_API) << "Could not set the IPV6_V6ONLY option";
@ -185,16 +190,24 @@ RestServer::Impl::init()
bzero(&addr6, sizeof(addr6));
addr6.sin6_family = AF_INET6;
addr6.sin6_addr = in6addr_any;
dbgDebug(D_API) << "Socket listening on any address";
while (!bindRestServerSocket(addr6, port_range)) {
mainloop->yield(bind_retry_interval_msec);
}
listening_port = ntohs(addr6.sin6_port);
} else {
dbgDebug(D_API) << "IPv4 socket opened successfully";
struct sockaddr_in addr;
bzero(&addr, sizeof(addr));
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
if (accept_get_from_external_ip) {
addr.sin_addr.s_addr = htonl(INADDR_ANY);
dbgDebug(D_API) << "Socket listening on any address";
} else {
addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
dbgDebug(D_API) << "Socket listening on local address";
}
while (!bindRestServerSocket(addr, port_range)) {
mainloop->yield(bind_retry_interval_msec);

View File

@ -37,7 +37,7 @@ services:
environment:
# adjust with your own email below
- user_email=user@email.com
- registered_server="APISIX Server"
- registered_server="APISIX"
- AGENT_TOKEN=<TOKEN>
volumes:
- ./appsec-config:/etc/cp/conf

View File

@ -28,8 +28,8 @@ services:
- user_email=${APPSEC_USER_EMAIL}
- AGENT_TOKEN=${APPSEC_AGENT_TOKEN}
- autoPolicyLoad=${APPSEC_AUTO_POLICY_LOAD}
- registered_server=APISIX Server
ipc: shareable
- registered_server=APISIX
ipc: host
restart: unless-stopped
volumes:
- ${APPSEC_CONFIG}:/etc/cp/conf
@ -41,7 +41,7 @@ services:
appsec-apisix:
image: ghcr.io/openappsec/apisix-attachment:${APPSEC_VERSION}
container_name: appsec-apisix
ipc: service:appsec-agent
ipc: host
restart: always
environment:
- APISIX_STAND_ALONE=true
@ -69,7 +69,7 @@ services:
- standalone
image: ghcr.io/openappsec/smartsync-shared-files:${APPSEC_VERSION}
container_name: appsec-shared-storage
ipc: service:appsec-agent
ipc: host
restart: always
## if you do not want to run this container as "root" user you can comment it out and instead run the below command after the deployment
## docker exec -u root appsec-shared-storage chown -R appuser:appuser /db
@ -128,4 +128,4 @@ services:
# driver_opts:
# type: nfs
# o: addr=fs-abcdef.efs.eu-west-1.amazonaws.com,rw,nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport
# device: ":/"
# device: ":/"

View File

@ -56,7 +56,7 @@ COMPOSE_PROFILES=
## Make sure to also adjust the envoy.yaml file in ENVOY_CONFIG path
## to add a routing configuration for forwarding external traffic on e.g. port 80 to the juiceshop-backend container
## you can use the example file available here:
## https://raw.githubusercontent.com/openappsec/openappsec/examples/juiceshop/envoy/envoy.yaml
## https://raw.githubusercontent.com/openappsec/openappsec/main/examples/juiceshop/envoy/envoy.yaml
## place the file above in ENVOY_CONFIG path
## note that juiceshop container listens on HTTP port 3000 by default

View File

@ -29,7 +29,7 @@ services:
- AGENT_TOKEN=${APPSEC_AGENT_TOKEN}
- autoPolicyLoad=${APPSEC_AUTO_POLICY_LOAD}
- registered_server="Envoy"
ipc: shareable
ipc: host
restart: unless-stopped
volumes:
- ${APPSEC_CONFIG}:/etc/cp/conf
@ -41,7 +41,7 @@ services:
appsec-envoy:
image: ghcr.io/openappsec/envoy-attachment:${APPSEC_VERSION}
container_name: appsec-envoy
ipc: service:appsec-agent
ipc: host
restart: unless-stopped
environment:
- ENVOY_UID=0
@ -75,7 +75,7 @@ services:
- standalone
image: ghcr.io/openappsec/smartsync-shared-files:${APPSEC_VERSION}
container_name: appsec-shared-storage
ipc: service:appsec-agent
ipc: host
restart: unless-stopped
## if you do not want to run this container as "root" user you can comment it out and instead run the below command after the deployment
## docker exec -u root appsec-shared-storage chown -R appuser:appuser /db

View File

@ -28,8 +28,8 @@ services:
- user_email=${APPSEC_USER_EMAIL}
- AGENT_TOKEN=${APPSEC_AGENT_TOKEN}
- autoPolicyLoad=${APPSEC_AUTO_POLICY_LOAD}
- registered_server=Kong Server
ipc: shareable
- registered_server=Kong
ipc: host
restart: unless-stopped
volumes:
- ${APPSEC_CONFIG}:/etc/cp/conf
@ -41,7 +41,7 @@ services:
appsec-kong:
image: ghcr.io/openappsec/${KONG_IMAGE}:${APPSEC_VERSION}
container_name: appsec-kong
ipc: service:appsec-agent
ipc: host
## This docker compose deploys Kong in DB-less mode with declarative Kong configuration
## please make sure to have a valid config present in {KONG_CONFIG}:
environment:
@ -72,7 +72,7 @@ services:
- standalone
image: ghcr.io/openappsec/smartsync-shared-files:${APPSEC_VERSION}
container_name: appsec-shared-storage
ipc: service:appsec-agent
ipc: host
restart: unless-stopped
## if you do not want to run this container as "root" user you can comment it out and instead run the below command after the deployment
## docker exec -u root appsec-shared-storage chown -R appuser:appuser /db

View File

@ -22,7 +22,7 @@ services:
appsec-agent:
image: ghcr.io/openappsec/agent:${APPSEC_VERSION}
container_name: appsec-agent
ipc: shareable
ipc: host
restart: unless-stopped
environment:
- SHARED_STORAGE_HOST=appsec-shared-storage
@ -43,7 +43,7 @@ services:
appsec-nginx-proxy-manager:
container_name: appsec-nginx-proxy-manager
image: ghcr.io/openappsec/nginx-proxy-manager-centrally-managed-attachment:${APPSEC_VERSION}
ipc: service:appsec-agent
ipc: host
restart: unless-stopped
ports:
- 80:80 # Public HTTP Port
@ -69,7 +69,7 @@ services:
- standalone
image: ghcr.io/openappsec/smartsync-shared-files:${APPSEC_VERSION}
container_name: appsec-shared-storage
ipc: service:appsec-agent
ipc: host
restart: unless-stopped
## if you do not want to run this container as "root" user you can comment it out and instead run the below command after the deployment
## docker exec -u root appsec-shared-storage chown -R appuser:appuser /db

View File

@ -22,7 +22,7 @@ services:
appsec-agent:
image: ghcr.io/openappsec/agent:${APPSEC_VERSION}
container_name: appsec-agent
ipc: service:appsec-nginx-proxy-manager
ipc: host
network_mode: service:appsec-nginx-proxy-manager
restart: unless-stopped
environment:
@ -44,7 +44,7 @@ services:
appsec-nginx-proxy-manager:
container_name: appsec-nginx-proxy-manager
image: ghcr.io/openappsec/nginx-proxy-manager-attachment:${APPSEC_VERSION}
ipc: shareable
ipc: host
restart: unless-stopped
ports:
- 80:80 # Public HTTP Port
@ -72,7 +72,7 @@ services:
- standalone
image: ghcr.io/openappsec/smartsync-shared-files:${APPSEC_VERSION}
container_name: appsec-shared-storage
ipc: service:appsec-agent
ipc: host
restart: unless-stopped
## if you do not want to run this container as "root" user you can comment it out and instead run the below command after the deployment
## docker exec -u root appsec-shared-storage chown -R appuser:appuser /db

View File

@ -29,7 +29,7 @@ services:
- user_email=${APPSEC_USER_EMAIL}
- AGENT_TOKEN=${APPSEC_AGENT_TOKEN}
- autoPolicyLoad=${APPSEC_AUTO_POLICY_LOAD}
ipc: shareable
ipc: host
volumes:
- ${APPSEC_CONFIG}:/etc/cp/conf
- ${APPSEC_DATA}:/etc/cp/data
@ -62,7 +62,7 @@ services:
- standalone
image: ghcr.io/openappsec/smartsync-shared-files:${APPSEC_VERSION}
container_name: appsec-shared-storage
ipc: service:appsec-agent-nginx-unified
ipc: host
restart: unless-stopped
## if you do not want to run this container as "root" user you can comment it out and instead run the below command after the deployment
## docker exec -u root appsec-shared-storage chown -R appuser:appuser /db

View File

@ -28,24 +28,25 @@ services:
- user_email=${APPSEC_USER_EMAIL}
- AGENT_TOKEN=${APPSEC_AGENT_TOKEN}
- autoPolicyLoad=${APPSEC_AUTO_POLICY_LOAD}
- registered_server="NGINX Server"
ipc: shareable
- registered_server="NGINX"
ipc: host
restart: unless-stopped
volumes:
- ${APPSEC_CONFIG}:/etc/cp/conf
- ${APPSEC_DATA}:/etc/cp/data
- ${APPSEC_LOGS}:/var/log/nano_agent
- ${APPSEC_LOCALCONFIG}:/ext/appsec
- shm-volume:/dev/shm/check-point
command: /cp-nano-agent
appsec-nginx:
image: ghcr.io/openappsec/nginx-attachment:${APPSEC_VERSION}
container_name: appsec-nginx
ipc: service:appsec-agent
ipc: host
restart: unless-stopped
volumes:
- ${NGINX_CONFIG}:/etc/nginx/conf.d
- shm-volume:/dev/shm/check-point
## advanced configuration - volume mount for nginx.conf file:
## To change global instructions it's possible to also mount your own nginx.conf file by uncommenting the line below
## then specify a desired local folder for NGINX_CONF_FILE in the .env file.
@ -73,7 +74,7 @@ services:
- standalone
image: ghcr.io/openappsec/smartsync-shared-files:${APPSEC_VERSION}
container_name: appsec-shared-storage
ipc: service:appsec-agent
ipc: host
restart: unless-stopped
## if you do not want to run this container as "root" user you can comment it out and instead run the below command after the deployment
## docker exec -u root appsec-shared-storage chown -R appuser:appuser /db
@ -123,6 +124,13 @@ services:
profiles:
- juiceshop
volumes:
shm-volume:
driver: local
driver_opts:
type: tmpfs
device: tmpfs
## advanced configuration: learning_nfs volume for nfs storage in shared_storage container
##
## when configuring nfs storage in shared_storage container configuration above, make sure to also specify learning_nfs volume (see example below for using AWS EFS storage)

View File

@ -29,8 +29,8 @@ services:
- user_email=${APPSEC_USER_EMAIL}
- AGENT_TOKEN=${APPSEC_AGENT_TOKEN}
- autoPolicyLoad=${APPSEC_AUTO_POLICY_LOAD}
- registered_server=SWAG Server
ipc: shareable
- registered_server=SWAG
ipc: host
volumes:
- ${APPSEC_CONFIG}:/etc/cp/conf
- ${APPSEC_DATA}:/etc/cp/data
@ -41,7 +41,7 @@ services:
appsec-swag:
image: ghcr.io/openappsec/swag-attachment:latest
container_name: appsec-swag
ipc: service:appsec-agent
ipc: host
restart: unless-stopped
cap_add:
- NET_ADMIN
@ -83,7 +83,7 @@ services:
- standalone
image: ghcr.io/openappsec/smartsync-shared-files:${APPSEC_VERSION}
container_name: appsec-shared-storage
ipc: service:appsec-agent
ipc: host
restart: unless-stopped
## if you do not want to run this container as "root" user you can comment it out and instead run the below command after the deployment
## docker exec -u root appsec-shared-storage chown -R appuser:appuser /db

View File

@ -11,7 +11,7 @@ services:
- user_email=${USER_EMAIL}
# - AGENT_TOKEN=${APPSEC_AGENT_TOKEN}
- autoPolicyLoad=${APPSEC_AUTO_POLICY_LOAD}
- registered_server="NGINX Server"
- registered_server="NGINX"
ipc: shareable
volumes:
- ${APPSEC_CONFIG}:/etc/cp/conf

View File

@ -30,7 +30,7 @@ services:
restart: unless-stopped
environment:
- user_email=user@email.com # adjust with your own email
- registered_server=SWAG Server
- registered_server="SWAG"
# if autoPolicyLoad is set to true, open-appsec will apply
# changes in local_policy.yaml automatically
- autoPolicyLoad=true

View File

@ -0,0 +1,9 @@
routes:
-
uri: /
upstream:
nodes:
"juiceshop-backend:3000": 1
type: roundrobin
#END

View File

@ -0,0 +1,56 @@
static_resources:
listeners:
- name: listener_0
address:
socket_address:
address: 0.0.0.0
port_value: 80
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
http_filters:
## The following 10 lines are required to load the envoy attachment filter for open-appsec
- name: envoy.filters.http.golang
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.golang.v3alpha.Config
library_id: cp_nano_filter
library_path: "/usr/lib/libenvoy_attachment.so"
plugin_name: cp_nano_filter
plugin_config:
"@type": type.googleapis.com/xds.type.v3.TypedStruct
value:
prefix_localreply_body: "Configured local reply from go"
- name: envoy.filters.http.router
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
##
## The following lines allow you to deploy routing of ingress traffic to the optional juice-shop example container available in the open-appsec docker-compose.yaml file.
##
route_config:
name: local_route
virtual_hosts:
- name: local_service
domains: ["*"]
routes:
- match:
prefix: "/"
route:
cluster: juiceshop
clusters:
- name: juiceshop
type: STRICT_DNS
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: juiceshop
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: juiceshop-backend
port_value: 3000

View File

@ -0,0 +1,9 @@
_format_version: "3.0"
services:
- name: juiceshop-service
url: http://juiceshop-backend:3000
routes:
- name: juiceshop-route
paths:
- /

View File

@ -0,0 +1,84 @@
## Version 2024/07/16 - Changelog: https://github.com/linuxserver/docker-swag/commits/master/root/defaults/nginx/site-confs/default.conf.sample
# redirect all traffic to https
server {
listen 80 default_server;
listen [::]:80 default_server;
location / {
return 301 https://$host$request_uri;
}
}
# main server block
server {
listen 443 ssl default_server;
listen [::]:443 ssl default_server;
server_name _;
include /config/nginx/ssl.conf;
# root /config/www;
# index index.html index.htm index.php;
# enable subfolder method reverse proxy confs
include /config/nginx/proxy-confs/*.subfolder.conf;
# enable for ldap auth (requires ldap-location.conf in the location block)
#include /config/nginx/ldap-server.conf;
# enable for Authelia (requires authelia-location.conf in the location block)
#include /config/nginx/authelia-server.conf;
# enable for Authentik (requires authentik-location.conf in the location block)
#include /config/nginx/authentik-server.conf;
#location / {
# enable for basic auth
#auth_basic "Restricted";
#auth_basic_user_file /config/nginx/.htpasswd;
# enable for ldap auth (requires ldap-server.conf in the server block)
#include /config/nginx/ldap-location.conf;
# enable for Authelia (requires authelia-server.conf in the server block)
#include /config/nginx/authelia-location.conf;
# enable for Authentik (requires authentik-server.conf in the server block)
#include /config/nginx/authentik-location.conf;
# try_files $uri $uri/ /index.html /index.htm /index.php$is_args$args;
#}
location ~ ^(.+\.php)(.*)$ {
# enable the next two lines for http auth
#auth_basic "Restricted";
#auth_basic_user_file /config/nginx/.htpasswd;
# enable for ldap auth (requires ldap-server.conf in the server block)
#include /config/nginx/ldap-location.conf;
# enable for Authelia (requires authelia-server.conf in the server block)
#include /config/nginx/authelia-location.conf;
# enable for Authentik (requires authentik-server.conf in the server block)
#include /config/nginx/authentik-location.conf;
fastcgi_split_path_info ^(.+\.php)(.*)$;
if (!-f $document_root$fastcgi_script_name) { return 404; }
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
include /etc/nginx/fastcgi_params;
}
# deny access to .htaccess/.htpasswd files
location ~ /\.ht {
deny all;
}
}
# enable subdomain method reverse proxy confs
include /config/nginx/proxy-confs/*.subdomain.conf;
# enable proxy cache for auth
proxy_cache_path cache/ keys_zone=auth_cache:10m;

View File

@ -0,0 +1,22 @@
location / {
# enable the next two lines for http auth
#auth_basic "Restricted";
#auth_basic_user_file /config/nginx/.htpasswd;
# enable for ldap auth (requires ldap-server.conf in the server block)
#include /config/nginx/ldap-location.conf;
# enable for Authelia (requires authelia-server.conf in the server block)
#include /config/nginx/authelia-location.conf;
# enable for Authentik (requires authentik-server.conf in the server block)
#include /config/nginx/authentik-location.conf;
include /config/nginx/proxy.conf;
include /config/nginx/resolver.conf;
set $upstream_app juiceshop-backend;
set $upstream_port 3000;
set $upstream_proto http;
proxy_pass $upstream_proto://$upstream_app:$upstream_port;
}

View File

@ -11,6 +11,7 @@ set(COMMON_LIBRARIES "-lngen_core;-lcompression_utils;-lssl;-lcrypto;-lz;-lboost
include(packaging.cmake)
add_subdirectory(orchestration)
add_subdirectory(prometheus)
add_subdirectory(agent_cache)
add_subdirectory(http_transaction_handler)
add_subdirectory(attachment_registration_manager)

View File

@ -29,4 +29,5 @@ cpview_metric_provider="cpviewMetricProvider 8282"
hello_world="hello_world"
crowdsec_aux="crowdsecAux 8081"
central_nginx_manager="centralNginxManager 7555"
prometheus="prometheus 7465"
# ## Please do not remove this comment - newline at end of file required.

View File

@ -29,8 +29,10 @@ is_wlp_orchestration="false"
ORCHESTRATION_EXE_SOURCE_PATH="./bin/orchestration_comp"
NGINX_METADAT_EXTRACTOR_PATH="./scripts/cp-nano-makefile-generator.sh"
NGINX_FRESH_METADATA_EXTRACTOR_PATH="./scripts/cp-nano-makefile-generator-fresh.sh"
ORCHESTRATION_FILE_NAME="cp-nano-orchestration"
NGINX_METADDATA_EXTRACTOR_NAME="cp-nano-makefile-generator.sh"
NGINX_FRESH_METADATA_EXTRACTOR_NAME="cp-nano-makefile-generator-fresh.sh"
GET_CLOUD_METADATA_PATH="get-cloud-metadata.sh"
AGENT_UNINSTALL="cp-agent-uninstall.sh"
ORCHESTRATION_NAME="orchestration"
@ -110,8 +112,14 @@ if [ $retval -eq 0 ]; then
fi
if [ $var_gaia_release -eq 0 ] || [ $var_mds_release -eq 0 ]; then
var_arch="gaia"
var_arch_flag="--gaia"
arch=$(uname -a | awk '{print $(NF -1) }')
if test "${arch}" == "aarch64"; then
var_arch="gaia_arm"
var_arch_flag="--gaia_arm"
else
var_arch="gaia"
var_arch_flag="--gaia"
fi
elif [ $var_alpine_release -eq 0 ]; then
var_is_alpine=true
else
@ -322,7 +330,7 @@ while true; do
LOG_FILE_PATH=$1
fi
echo "Log files path: ${LOG_FILE_PATH}"
elif [ "$1" = "--arm64_trustbox" ] || [ "$1" = "--arm64_linaro" ] || [ "$1" = "--arm32_rpi" ] || [ "$1" = "--gaia" ] || [ "$1" = "--smb_mrv_v1" ] || [ "$1" = "--smb_sve_v2" ] || [ "$1" = "--smb_thx_v3" ] || [ "$1" = "--x86" ] || [ "$1" = "./orchestration_package.sh" ]; then
elif [ "$1" = "--arm64_trustbox" ] || [ "$1" = "--arm64_linaro" ] || [ "$1" = "--arm32_rpi" ] || [ "$1" = "--gaia" ] || [ "$1" = "--gaia_arm" ] || [ "$1" = "--smb_mrv_v1" ] || [ "$1" = "--smb_sve_v2" ] || [ "$1" = "--smb_thx_v3" ] || [ "$1" = "--x86" ] || [ "$1" = "./orchestration_package.sh" ]; then
shift
continue
elif [ "$1" = "--skip_registration" ]; then
@ -330,6 +338,10 @@ while true; do
elif [ "$1" = "--cloud-storage" ]; then
shift
var_cloud_storage=$1
elif [ "$1" = "--only_unpack_lib64_path" ]; then
shift
USR_LIB_PATH=$1
var_only_unpack_lib64="set"
elif echo "$1" | grep -q ${FORCE_CLEAN_FLAG}; then
var_upgrade_mode=
elif echo "$1" | grep -q ${DEBUG_FLAG}; then
@ -347,7 +359,7 @@ done
# VS ID argument is available only on install, for other actions, extract it from the package location
if [ -z "$VS_ID" ]; then
parent_pid=$PPID
parent_cmdline=$(ps -o cmd= -p "$parent_pid")
parent_cmdline=$(cat /proc/"$parent_pid"/cmdline | tr '\0' ' ')
parent_dir=$(dirname "$parent_cmdline")
packages_folder=$(dirname "$parent_dir")
vs_folder=$(dirname "$packages_folder")
@ -416,7 +428,7 @@ if command -v which &>/dev/null; then
var_which_cmd_exists=1
fi
if [ $var_arch != "gaia" ] && [ $var_which_cmd_exists -eq 1 ]; then
if [ $var_arch != "gaia" ] && [ $var_arch != "gaia_arm" ] && [ $var_which_cmd_exists -eq 1 ]; then
if [ -n "$(which systemctl)" ]; then
var_startup_service="systemd"
else
@ -488,26 +500,26 @@ cp_copy() # Initials - cc
cp_print "Destination md5, after the copy:\n$DEST_AFTER_COPY"
}
update_cloudguard_appsec_manifest()
update_openappsec_manifest()
{
if [ -z ${CLOUDGUARD_APPSEC_STANDALONE} ] || [ -z ${DOCKER_RPM_ENABLED} ]; then
if [ -z ${OPENAPPSEC_NANO_AGENT} ] && { [ -z ${CLOUDGUARD_APPSEC_STANDALONE} ] || [ -z ${DOCKER_RPM_ENABLED} ]; }; then
return
fi
selected_cloudguard_appsec_manifest_path="${TMP_FOLDER}/cloudguard_appsec_manifest.json"
if [ "${DOCKER_RPM_ENABLED}" = "false" ]; then
selected_cloudguard_appsec_manifest_path="${TMP_FOLDER}/self_managed_cloudguard_appsec_manifest.json"
selected_openappsec_manifest_path="${TMP_FOLDER}/openappsec_manifest.json"
if [ "${DOCKER_RPM_ENABLED}" = "false" ] || [ "${OPENAPPSEC_NANO_AGENT}" = "TRUE" ]; then
selected_openappsec_manifest_path="${TMP_FOLDER}/self_managed_openappsec_manifest.json"
fi
if [ ! -f "$selected_cloudguard_appsec_manifest_path" ]; then
if [ ! -f "$selected_openappsec_manifest_path" ]; then
return
fi
cloudguard_appsec_manifest_path="${selected_cloudguard_appsec_manifest_path}.used"
mv "$selected_cloudguard_appsec_manifest_path" "$cloudguard_appsec_manifest_path"
openappsec_manifest_path="${selected_openappsec_manifest_path}.used"
mv "$selected_openappsec_manifest_path" "$openappsec_manifest_path"
fog_host=$(echo "$var_fog_address" | sed 's/https\?:\/\///')
fog_host=${fog_host%/}
sed "s/namespace/${fog_host}/g" ${cloudguard_appsec_manifest_path} > "${FILESYSTEM_PATH}/${CONF_PATH}/manifest.json"
sed "s/namespace/${fog_host}/g" ${openappsec_manifest_path} > "${FILESYSTEM_PATH}/${CONF_PATH}/manifest.json"
}
set_cloud_storage()
@ -564,7 +576,7 @@ install_watchdog_gaia()
# Add cp-nano-watchdog to DB
dbset process:${watchdog_pm_name} t
dbset process:${watchdog_pm_name}:path ${FILESYSTEM_PATH}/${WATCHDOG_PATH}
dbset process:${watchdog_pm_name}:arg:1 --gaia
dbset process:${watchdog_pm_name}:arg:1 ${var_arch_flag}
dbset process:${watchdog_pm_name}:runlevel 1
dbset :save
tellpm ${watchdog_pm_name} t
@ -616,7 +628,7 @@ install_watchdog()
cp_copy service/smb/nano_agent.init /storage/nano_agent/etc/nano_agent.init
chmod +rx /storage/nano_agent/etc/nano_agent.init
elif [ $var_container_mode = false ]; then
if [ $var_arch = "gaia" ]; then
if [ $var_arch = "gaia" ] || [ $var_arch = "gaia_arm" ]; then
cp_exec "ln -s ${FWDIR}/bin/curl_cli ${FWDIR}/bin/curl"
cp_exec "ln -s ${CPDIR}/bin/cpopenssl ${CPDIR}/bin/openssl"
cp_copy watchdog/access_pre_init $INIT_D_PATH/access_pre_init
@ -635,6 +647,9 @@ install_watchdog()
echo "ExecStart=ip netns exec CTX0000${VS_ID} ${FILESYSTEM_PATH}/${WATCHDOG_PATH}/cp-nano-watchdog" >> /etc/systemd/system/${NANO_AGENT_SERVICE_FILE}
fi
echo "Environment=\"FILESYSTEM_PATH=${FILESYSTEM_PATH}\"" >> /etc/systemd/system/${NANO_AGENT_SERVICE_FILE}
if [ -n "${PROMETHEUS}" ] ; then
echo "Environment=\"PROMETHEUS=${PROMETHEUS}\"" >> /etc/systemd/system/${NANO_AGENT_SERVICE_FILE}
fi
cp_exec "systemctl daemon-reload"
cp_exec "systemctl enable nano_agent"
@ -657,7 +672,7 @@ install_watchdog()
cp_exec "$INIT_D_PATH/nano_agent.init start"
elif [ "$is_smb" = "1" ]; then
cp_exec "/storage/nano_agent/etc/nano_agent.init start"
elif [ $var_arch = "gaia" ]; then
elif [ $var_arch = "gaia" ] || [ $var_arch = "gaia_arm" ]; then
install_watchdog_gaia
else
cp_exec "service $NANO_AGENT_SERVICE_NAME start"
@ -773,8 +788,9 @@ upgrade_conf_if_needed()
[ -f "${FILESYSTEM_PATH}/${SERVICE_PATH}/${ORCHESTRATION_FILE_NAME}.cfg" ] && . "${FILESYSTEM_PATH}/${SERVICE_PATH}/${ORCHESTRATION_FILE_NAME}.cfg"
previous_mode=$(cat ${FILESYSTEM_PATH}/${SERVICE_PATH}/${ORCHESTRATION_FILE_NAME}.cfg | grep "orchestration-mode" | cut -d = -f 3 | sed 's/"//')
if ! [ -z "$previous_mode" ]; then
[ -f "${FILESYSTEM_PATH}/${SERVICE_PATH}/${ORCHESTRATION_FILE_NAME}.cfg" ] && \
previous_mode=$(cat ${FILESYSTEM_PATH}/${SERVICE_PATH}/${ORCHESTRATION_FILE_NAME}.cfg | grep "orchestration-mode" | cut -d = -f 3 | sed 's/"//')
if ! [ -z "$previous_mode" ]; then
var_orchestration_mode=${previous_mode}
fi
@ -785,7 +801,7 @@ upgrade_conf_if_needed()
cp_exec "cp -f configuration/orchestration.cfg ${FILESYSTEM_PATH}/${SERVICE_PATH}/${ORCHESTRATION_FILE_NAME}.cfg"
execution_flags="execution_flags=\"--orchestration-mode=${var_orchestration_mode}\""
echo $execution_flags >> ${FILESYSTEM_PATH}/${SERVICE_PATH}/${ORCHESTRATION_FILE_NAME}.cfg
if [ $var_arch = "gaia" -o "$is_smb" = "1" ]; then
if [ $var_arch = "gaia" -o $var_arch = "gaia_arm" -o "$is_smb" = "1" ]; then
if [ -z "${gaia_ld_path}" ]; then
gaia_ld_path="${LD_LIBRARY_PATH}"
fi
@ -843,6 +859,13 @@ copy_nginx_metadata_script()
cp_exec "chmod +x ${FILESYSTEM_PATH}/${SCRIPTS_PATH}/${NGINX_METADDATA_EXTRACTOR_NAME}"
}
copy_nginx_fresh_metadata_script()
{
cp_copy "$NGINX_FRESH_METADATA_EXTRACTOR_PATH" ${FILESYSTEM_PATH}/${SCRIPTS_PATH}/${NGINX_FRESH_METADATA_EXTRACTOR_NAME}
cp_exec "chmod 700 ${FILESYSTEM_PATH}/${SCRIPTS_PATH}/${NGINX_FRESH_METADATA_EXTRACTOR_NAME}"
cp_exec "chmod +x ${FILESYSTEM_PATH}/${SCRIPTS_PATH}/${NGINX_FRESH_METADATA_EXTRACTOR_NAME}"
}
copy_and_run_cloud_metadata_script()
{
cp_copy "${SCRIPTS_PATH}/$GET_CLOUD_METADATA_PATH" ${FILESYSTEM_PATH}/${SCRIPTS_PATH}/${GET_CLOUD_METADATA_PATH}
@ -919,6 +942,19 @@ get_status_content()
install_orchestration()
{
INSTALLATION_TIME=$(date)
if [ "$var_only_unpack_lib64" = "set" ]; then
cp_exec "mkdir ${USR_LIB_PATH}"
if [ ! -d "$USR_LIB_PATH" ]; then
cp_print "No valid path: ${USR_LIB_PATH}. please do --only_unpack_lib64_path <Path>" ${FORCE_STDOUT}
exit 1
fi
${INSTALL_COMMAND} lib/*.so* ${USR_LIB_PATH}/
${INSTALL_COMMAND} lib/boost/*.so* ${USR_LIB_PATH}/
cp_print "Done successfully doing only unpacking lib64 to Path: ${USR_LIB_PATH}" ${FORCE_STDOUT}
exit 0
fi
if [ "$is_smb" != "1" ]; then
cp_exec "mkdir -p ${USR_LIB_PATH}/cpnano${VS_LIB_SUB_FOLDER}"
else
@ -988,7 +1024,9 @@ install_orchestration()
fi
[ -f "${FILESYSTEM_PATH}/${SERVICE_PATH}/${ORCHESTRATION_FILE_NAME}.cfg" ] && . "${FILESYSTEM_PATH}/${SERVICE_PATH}/${ORCHESTRATION_FILE_NAME}.cfg"
previous_mode=$(cat ${FILESYSTEM_PATH}/${SERVICE_PATH}/${ORCHESTRATION_FILE_NAME}.cfg | grep "orchestration-mode" | cut -d = -f 3 | sed 's/"//')
[ -f "${FILESYSTEM_PATH}/${SERVICE_PATH}/${ORCHESTRATION_FILE_NAME}.cfg" ] && \
previous_mode=$(cat ${FILESYSTEM_PATH}/${SERVICE_PATH}/${ORCHESTRATION_FILE_NAME}.cfg | grep "orchestration-mode" | cut -d = -f 3 | sed 's/"//')
if ! [ -z "$previous_mode" ]; then
var_orchestration_mode=${previous_mode}
@ -1001,7 +1039,7 @@ install_orchestration()
cp_exec "cp -f configuration/orchestration.cfg ${FILESYSTEM_PATH}/${SERVICE_PATH}/${ORCHESTRATION_FILE_NAME}.cfg"
execution_flags="execution_flags=\"--orchestration-mode=${var_orchestration_mode}\""
echo $execution_flags >> ${FILESYSTEM_PATH}/${SERVICE_PATH}/${ORCHESTRATION_FILE_NAME}.cfg
if [ $var_arch = "gaia" -o "$is_smb" = "1" ]; then
if [ $var_arch = "gaia" -o $var_arch = "gaia_arm" -o "$is_smb" = "1" ]; then
if [ -z "${gaia_ld_path}" ]; then
gaia_ld_path="${LD_LIBRARY_PATH}"
fi
@ -1012,7 +1050,8 @@ install_orchestration()
rm -f "${FILESYSTEM_PATH}/${CONF_PATH}/default_orchestration_flags"
fi
upgrade_conf_if_needed
update_openappsec_manifest
upgrade_conf_if_needed
cp_exec "${FILESYSTEM_PATH}/${WATCHDOG_PATH}/cp-nano-watchdog --un-register ${FILESYSTEM_PATH}/${SERVICE_PATH}/cp-nano-orchestration $var_arch_flag"
if [ "$IS_K8S_ENV" = "true" ]; then
@ -1032,6 +1071,7 @@ install_orchestration()
copy_orchestration_executable
copy_k8s_executable
copy_nginx_metadata_script
copy_nginx_fresh_metadata_script
copy_and_run_cloud_metadata_script
install_watchdog "--upgrade"
@ -1067,7 +1107,7 @@ install_orchestration()
cp_exec "mkdir -p ${LOG_FILE_PATH}/${LOG_PATH}"
cp_exec "mkdir -p ${FILESYSTEM_PATH}/${DATA_PATH}"
update_cloudguard_appsec_manifest
update_openappsec_manifest
if [ ! -f ${FILESYSTEM_PATH}/${DEFAULT_SETTINGS_PATH} ]; then
echo "{\"agentSettings\": []}" > ${FILESYSTEM_PATH}/${DEFAULT_SETTINGS_PATH}
@ -1101,7 +1141,14 @@ install_orchestration()
if [ -z "${var_token}" ] && [ ${var_hybrid_mode} = false ] && [ ${var_offline_mode} = false ] && [ -z ${EGG_MODE} ] && [ ${var_no_otp} = false ]; then
cp_print "Please enter OTP token []:" ${FORCE_STDOUT}
read -r var_token
attempts=0
max_attempts=3
while [ -z "$var_token" ]; do
attempts=$((attempts + 1))
if [ "$attempts" -gt "$max_attempts" ]; then
cp_print "Maximum attempts exceeded. open-appsec Nano Agent registration failed. Failed to get token" ${FORCE_STDOUT}
exit 1
fi
cp_print "You must enter OTP token[]:" ${FORCE_STDOUT}
read -r var_token
done
@ -1120,6 +1167,7 @@ install_orchestration()
copy_orchestration_executable
copy_k8s_executable
copy_nginx_metadata_script
copy_nginx_fresh_metadata_script
copy_and_run_cloud_metadata_script
install_cp_nano_ctl
@ -1201,7 +1249,7 @@ install_orchestration()
execution_flags="execution_flags=\"--orchestration-mode=${var_orchestration_mode}\""
echo $execution_flags >> ${FILESYSTEM_PATH}/${SERVICE_PATH}/${ORCHESTRATION_FILE_NAME}.cfg
if [ $var_arch = "gaia" -o "$is_smb" = "1" ]; then
if [ $var_arch = "gaia" -o $var_arch = "gaia_arm" -o "$is_smb" = "1" ]; then
sed -i '1i gaia_ld_path='"$LD_LIBRARY_PATH"'' ${FILESYSTEM_PATH}/${SERVICE_PATH}/${ORCHESTRATION_FILE_NAME}.cfg
fi

View File

@ -53,7 +53,12 @@ var_upgarde=false
get_profile_agent_setting_with_default() {
key="$1"
default_value="$2"
value=$(grep -oP "\"key\":\s*\"$key\".*?\"value\":\s*\"[^\"]+\"" $SETTINGS_FILE | sed -E 's/.*"value":\s*"([^"]+)".*/\1/')
value=$(grep -o "\"key\":\s*\"$key\".*?\"value\":\s*\"[^\"]*\"" $SETTINGS_FILE | sed -E 's/.*"value":\s*"([^"]*)".*/\1/')
if [ -z "$value" ]; then
value=$(grep -o "\"$key\":\s*\"[^\"]*\"" $SETTINGS_FILE | sed -E 's/.*"'"$key"'":\s*"([^"]*)".*/\1/')
fi
if [ "$value" = "null" ] || [ -z "$value" ]; then
echo "$default_value"
else

View File

@ -0,0 +1,419 @@
#!/bin/bash
LC_ALL=C
initializeEnvironment()
{
if [ "$IS_FILE_PATH_PROVIDED" != YES ]; then
FILE_NAME_PATH=
IS_FILE_PATH_PROVIDED=NO
fi
TMP_NGINX_VERSION_FILE="/tmp/nginx_version_file.txt"
if [ "$IS_CONFIG_FILE_PROVIDED" != YES ]; then
TMP_NGINX_CONFIG_FILE="/tmp/nginx_config_file.txt"
IS_CONFIG_FILE_PROVIDED=NO
fi
SERVER_TYPE="nginx"
nginx_cmd=nginx
NGX_CC_OPT=
NGX_LD_OPT=
USE_PCRE=NO
TEST_BUILD_EPOLL=NO
USE_THREADS=NO
HTTP_V3=NO
HTTP_SSL=NO
HTTP_GZIP=YES
HTTP_GUNZIP=NO
HTTP_GZIP_STATIC=NO
HTTP_PROXY=YES
HTTP_GEOIP=NO
HTTP_GEO=YES
HTTP_REALIP=NO
HTTP_DAV=NO
HTTP_CACHE=YES
HTTP_UPSTREAM_ZONE=YES
NGX_COMPAT=NO
GCC_VERSION=
NGINX_VERSION=
RELEASE_VERSION=
for i in {0..34}; do
var_name="NGX_MODULE_SIGNATURE_${i}"
eval $var_name=0
done
}
extract_nginx_version_and_release()
{
${nginx_cmd} -v &> "$TMP_NGINX_VERSION_FILE"
NGINX_VERSION=`cat "$TMP_NGINX_VERSION_FILE" | grep -oP [0-9]+.[0-9]+.[0-9]+`
RELEASE_VERSION=`cat /etc/*-release | grep -i "PRETTY_NAME\|Gaia" | cut -d"\"" -f2`
}
tearDown()
{
rm -f ${TMP_NGINX_VERSION_FILE}
rm -f ${TMP_NGINX_CONFIG_FILE}
}
filter_cc_opt() {
CC_OPT=
for cc_extra_opt in ${@}; do
if [[ ${cc_extra_opt} =~ ^-ffile-prefix-map ]]; then
echo "removing ${cc_extra_opt}"
continue
fi
if [[ ${cc_extra_opt} =~ ^-fdebug-prefix-map ]]; then
echo "removing ${cc_extra_opt}"
continue
fi
if [ -z "$CC_OPT" ]; then
CC_OPT="${cc_extra_opt}"
else
CC_OPT="${CC_OPT} ${cc_extra_opt}"
fi
done
if [[ "$@" != "${CC_OPT}" ]]; then
echo "Notice: reduced CC_OPT is '${CC_OPT}'"
fi
NGX_CC_OPT="${CC_OPT}"
}
extract_gcc()
{
GCC_VERSION=`echo "$1" | grep -oP "gcc "[0-9]+ | tr ' ' '-'`
if [[ "$GCC_VERSION" == "gcc-4" ]]; then
GCC_VERSION=gcc-5
elif [[ "$GCC_VERSION" == "gcc-10" ]] || [[ "$GCC_VERSION" == "gcc-11" ]] || [[ "$GCC_VERSION" == "gcc-12" ]] || [[ "$GCC_VERSION" == "gcc-13" ]]; then
GCC_VERSION=gcc-8
fi
}
extract_cc_opt_ld_opt() {
local loc_options="$1"
NGX_CC_OPT=$(echo "$loc_options" | sed -n "s/.*--with-cc-opt='\([^']*\)'.*/\1/p")
filter_cc_opt "$NGX_CC_OPT"
NGX_CC_OPT="$NGX_CC_OPT"
NGX_LD_OPT=$(echo "$loc_options" | sed -n "s/.*--with-ld-opt='\([^']*\)'.*/\1/p")
if [ -n "$NGX_LD_OPT" ]; then
NGX_LD_OPT="$NGX_LD_OPT"
fi
}
read_config_flags() {
for option; do
opt="$opt `echo $option | sed -e \"s/\(--[^=]*=\)\(.* .*\)/\1'\2'/\"`"
case "$option" in
-*=*) value=`echo "$option" | sed -e 's/[-_a-zA-Z0-9]*=//'` ;;
*) value="" ;;
esac
case "$option" in
--with-http_realip_module) HTTP_REALIP=YES ;;
--with-http_dav_module) HTTP_DAV=YES ;;
--with-compat) NGX_COMPAT=YES ;;
--without-http-cache) HTTP_CACHE=NO ;;
--without-http_upstream_zone_module) HTTP_UPSTREAM_ZONE=NO ;;
--without-http_geo_module) HTTP_GEO=NO ;;
--with-http_geoip_module) HTTP_GEOIP=YES ;;
--with-http_geoip_module=dynamic) HTTP_GEOIP=YES ;;
--without-http_proxy_module) HTTP_PROXY=NO ;;
--with-http_gunzip_module) HTTP_GUNZIP=YES ;;
--with-http_gzip_static_module) HTTP_GZIP_STATIC=YES ;;
--without-http_gzip_module) HTTP_GZIP=NO ;;
--with-http_v3_module) HTTP_V3=YES ;;
--with-threads) USE_THREADS=YES ;;
--test-build-epoll) TEST_BUILD_EPOLL=YES ;;
--with-pcre) USE_PCRE=YES ;;
--with-http_ssl_module) HTTP_SSL=YES ;;
*)
# echo "$0: uninteresting option: \"$option\""
;;
esac
done
if [ "$NGX_COMPAT" = YES ]; then
HTTP_GZIP=YES
HTTP_DAV=NO
HTTP_REALIP=NO
HTTP_PROXY=YES
HTTP_GEOIP=NO
HTTP_GEO=YES
HTTP_UPSTREAM_ZONE=YES
HTTP_GUNZIP=NO
HTTP_GZIP_STATIC=NO
HTTP_SSL=NO
USE_THREADS=NO
fi
}
decode_configuration_flags() {
DECODED_CONFIGURATION_FLAGS=""
if [ -n "$GCC_VERSION" ]; then
DECODED_CONFIGURATION_FLAGS="$DECODED_CONFIGURATION_FLAGS --with-cc=/usr/bin/${GCC_VERSION}"
fi
if [ "$HTTP_REALIP" = YES ]; then
DECODED_CONFIGURATION_FLAGS="$DECODED_CONFIGURATION_FLAGS --with-http_realip_module"
fi
if [ "$HTTP_DAV" = YES ]; then
DECODED_CONFIGURATION_FLAGS="$DECODED_CONFIGURATION_FLAGS --with-http_dav_module"
fi
if [ "$NGX_COMPAT" = YES ]; then
DECODED_CONFIGURATION_FLAGS="$DECODED_CONFIGURATION_FLAGS --with-compat"
fi
if [ "$HTTP_CACHE" = NO ]; then
DECODED_CONFIGURATION_FLAGS="$DECODED_CONFIGURATION_FLAGS --without-http-cache"
fi
if [ "$HTTP_UPSTREAM_ZONE" = NO ]; then
DECODED_CONFIGURATION_FLAGS="$DECODED_CONFIGURATION_FLAGS --without-http_upstream_zone_module"
fi
if [ "$HTTP_GEO" = NO ]; then
DECODED_CONFIGURATION_FLAGS="$DECODED_CONFIGURATION_FLAGS --without-http_geo_module"
fi
if [ "$HTTP_GEOIP" = YES ]; then
DECODED_CONFIGURATION_FLAGS="$DECODED_CONFIGURATION_FLAGS --with-http_geoip_module --with-http_geoip_module=dynamic"
fi
if [ "$HTTP_PROXY" = NO ]; then
DECODED_CONFIGURATION_FLAGS="$DECODED_CONFIGURATION_FLAGS --without-http_proxy_module"
fi
if [ "$HTTP_GUNZIP" = YES ]; then
DECODED_CONFIGURATION_FLAGS="$DECODED_CONFIGURATION_FLAGS --with-http_gunzip_module"
fi
if [ "$HTTP_GZIP_STATIC" = YES ]; then
DECODED_CONFIGURATION_FLAGS="$DECODED_CONFIGURATION_FLAGS --with-http_gzip_static_module"
fi
if [ "$HTTP_GZIP" = NO ]; then
DECODED_CONFIGURATION_FLAGS="$DECODED_CONFIGURATION_FLAGS --without-http_gzip_module"
fi
if [ "$HTTP_V3" = YES ]; then
DECODED_CONFIGURATION_FLAGS="$DECODED_CONFIGURATION_FLAGS --with-http_v3_module"
fi
if [ "$USE_THREADS" = YES ]; then
DECODED_CONFIGURATION_FLAGS="$DECODED_CONFIGURATION_FLAGS --with-threads"
fi
if [ "$TEST_BUILD_EPOLL" = YES ]; then
DECODED_CONFIGURATION_FLAGS="$DECODED_CONFIGURATION_FLAGS --test-build-epoll"
fi
if [ "$USE_PCRE" = YES ]; then
DECODED_CONFIGURATION_FLAGS="$DECODED_CONFIGURATION_FLAGS --with-pcre"
fi
if [ "$HTTP_SSL" = YES ]; then
DECODED_CONFIGURATION_FLAGS="$DECODED_CONFIGURATION_FLAGS --with-http_ssl_module"
fi
echo "$DECODED_CONFIGURATION_FLAGS"
}
set_signatures() {
NGX_MODULE_SIGNATURE_9=1
NGX_MODULE_SIGNATURE_10=1
NGX_MODULE_SIGNATURE_12=1
NGX_MODULE_SIGNATURE_17=0
NGX_MODULE_SIGNATURE_25=1
NGX_MODULE_SIGNATURE_27=1
if [ "$USE_PCRE" = YES ]; then
NGX_MODULE_SIGNATURE_23=1
fi
if [ "$TEST_BUILD_EPOLL" = YES ]; then
NGX_MODULE_SIGNATURE_5=1
NGX_MODULE_SIGNATURE_6=1
fi
if [ "$USE_THREADS" = YES ]; then
NGX_MODULE_SIGNATURE_22=1
fi
if [ "$HTTP_V3" = YES ]; then
NGX_MODULE_SIGNATURE_18=1
NGX_MODULE_SIGNATURE_24=1
fi
if [ "$HTTP_GUNZIP" = YES ] || [ "$HTTP_GZIP" = YES ] || [ "$HTTP_GZIP_STATIC" = YES ]; then
NGX_MODULE_SIGNATURE_26=1
fi
if [ "$HTTP_REALIP" = YES ]; then
NGX_MODULE_SIGNATURE_28=1
NGX_MODULE_SIGNATURE_29=1
fi
if [ "$HTTP_DAV" = YES ]; then
NGX_MODULE_SIGNATURE_31=1
fi
if [ "$HTTP_CACHE" = YES ]; then
NGX_MODULE_SIGNATURE_32=1
fi
if [ "$HTTP_UPSTREAM_ZONE" = YES ]; then
NGX_MODULE_SIGNATURE_33=1
fi
if [ "$NGX_COMPAT" = YES ]; then
NGX_MODULE_SIGNATURE_3=1
NGX_MODULE_SIGNATURE_4=1
NGX_MODULE_SIGNATURE_18=1
NGX_MODULE_SIGNATURE_22=1
NGX_MODULE_SIGNATURE_24=1
NGX_MODULE_SIGNATURE_26=1
NGX_MODULE_SIGNATURE_28=1
NGX_MODULE_SIGNATURE_29=1
NGX_MODULE_SIGNATURE_30=1
NGX_MODULE_SIGNATURE_31=1
NGX_MODULE_SIGNATURE_33=1
NGX_MODULE_SIGNATURE_34=1
fi
}
combine_signatures_into_bash() {
for i in {0..34}; do
var_name="NGX_MODULE_SIGNATURE_${i}"
NGX_SCRIPT_VERIFICATION_DATA="${NGX_SCRIPT_VERIFICATION_DATA}${!var_name}"
done
}
print_flags() {
echo "Saving configuration to ${FILE_NAME_PATH}"
echo -e "NGX_SCRIPT_VERIFICATION_DATA=$NGX_SCRIPT_VERIFICATION_DATA" >> ${FILE_NAME_PATH}
echo -e "NGX_MODULE_SIGNATURE=$(strings $(which ${nginx_cmd}) | grep -F '8,4,8')" >> ${FILE_NAME_PATH}
echo -e "USE_PCRE=$USE_PCRE" >> ${FILE_NAME_PATH}
echo -e "TEST_BUILD_EPOLL=$TEST_BUILD_EPOLL" >> ${FILE_NAME_PATH}
echo -e "USE_THREADS=$USE_THREADS" >> ${FILE_NAME_PATH}
echo -e "HTTP_V3=$HTTP_V3" >> ${FILE_NAME_PATH}
echo -e "HTTP_SSL=$HTTP_SSL" >> ${FILE_NAME_PATH}
echo -e "HTTP_GZIP=$HTTP_GZIP" >> ${FILE_NAME_PATH}
echo -e "HTTP_GUNZIP=$HTTP_GUNZIP" >> ${FILE_NAME_PATH}
echo -e "HTTP_GZIP_STATIC=$HTTP_GZIP_STATIC" >> ${FILE_NAME_PATH}
echo -e "HTTP_PROXY=$HTTP_PROXY" >> ${FILE_NAME_PATH}
echo -e "HTTP_GEOIP=$HTTP_GEOIP" >> ${FILE_NAME_PATH}
echo -e "HTTP_GEO=$HTTP_GEO" >> ${FILE_NAME_PATH}
echo -e "HTTP_REALIP=$HTTP_REALIP" >> ${FILE_NAME_PATH}
echo -e "HTTP_DAV=$HTTP_DAV" >> ${FILE_NAME_PATH}
echo -e "HTTP_CACHE=$HTTP_CACHE" >> ${FILE_NAME_PATH}
echo -e "HTTP_UPSTREAM_ZONE=$HTTP_UPSTREAM_ZONE" >> ${FILE_NAME_PATH}
echo -e "NGX_COMPAT=$NGX_COMPAT" >> ${FILE_NAME_PATH}
echo -e "NGX_CC_OPT=$NGX_CC_OPT" >> ${FILE_NAME_PATH}
echo -e "NGX_LD_OPT=$NGX_LD_OPT" >> ${FILE_NAME_PATH}
echo -e "GCC_VERSION=$GCC_VERSION" >> ${FILE_NAME_PATH}
echo -e "NGINX_VERSION=$NGINX_VERSION" >> ${FILE_NAME_PATH}
echo -e "RELEASE_VERSION=$RELEASE_VERSION" >> ${FILE_NAME_PATH}
}
save_config() {
initializeEnvironment
extract_nginx_version_and_release
if [ "$IS_FILE_PATH_PROVIDED" = NO ]; then
FILE_NAME_PATH="$(pwd)/$NGINX_VERSION.mk"
fi
rm -f ${FILE_NAME_PATH}
if [ "$IS_CONFIG_FILE_PROVIDED" = NO ]; then
${nginx_cmd} -V &> "$TMP_NGINX_CONFIG_FILE"
fi
gcc_argument=$(cat $TMP_NGINX_CONFIG_FILE | grep "built by gcc")
extract_gcc "${gcc_argument}"
configure_arguments=$(cat $TMP_NGINX_CONFIG_FILE | grep "^configure arguments:" | sed 's/^configure arguments: //')
extract_cc_opt_ld_opt "$configure_arguments"
configure_arguments=$(echo "$configure_arguments" | sed "s/--with-cc-opt='[^']*'//" | sed "s/--with-ld-opt='[^']*'//" | tr -s ' ')
read_config_flags $configure_arguments
set_signatures
combine_signatures_into_bash
print_flags
}
read_chkp_mk_file() {
input_file="$1"
if [[ ! -f "$input_file" ]]; then
echo "Error: File '$input_file' not found."
exit 1
fi
while IFS= read -r line; do
[[ -z "$line" || "$line" =~ ^# ]] && continue
if [[ "$line" =~ ^([A-Z_]+)=(.*)$ ]]; then
var_name="${BASH_REMATCH[1]}"
var_value="${BASH_REMATCH[2]}"
export "$var_name"="$var_value"
fi
done < "$input_file"
}
print_ngx_config() {
read_chkp_mk_file "$1"
decode_configuration_flags
}
parse_save_arguments() {
while [[ $# -gt 0 ]]; do
case "$1" in
--config_file)
if [[ -n "$2" ]]; then
TMP_NGINX_CONFIG_FILE="$2"
IS_CONFIG_FILE_PROVIDED=YES
shift 2
else
echo "Error: --config_file requires a value."
exit 1
fi
;;
--save-location)
if [[ -n "$2" ]]; then
FILE_NAME_PATH="$2"
IS_FILE_PATH_PROVIDED=YES
shift 2
else
echo "Error: --save-location requires a value."
exit 1
fi
;;
*)
echo "Error: Invalid argument '$1'."
echo "Usage: $0 save [--config_file <file_path>] [--save-location <file_path>]"
exit 1
;;
esac
done
}
if [[ "$1" == "save" ]]; then
shift
parse_save_arguments "$@"
save_config
elif [[ "$1" == "load" ]]; then
if [[ -n "$2" ]]; then
print_ngx_config "$2"
else
echo "Error: Missing file path for 'load' command."
echo "Usage: $0 load <file_path>"
exit 1
fi
else
echo "Error: Invalid command."
echo "Usage: $0 <save|load> [file_path]"
exit 1
fi

30
nodes/prometheus/CMakeLists.txt Executable file
View File

@ -0,0 +1,30 @@
add_subdirectory(package)
add_executable(prometheus main.cc)
target_link_libraries(prometheus
-Wl,--start-group
${COMMON_LIBRARIES}
generic_rulebase
generic_rulebase_evaluators
ip_utilities
version
signal_handler
prometheus_comp
http_transaction_data
-Wl,--end-group
)
add_dependencies(prometheus ngen_core)
install(TARGETS prometheus DESTINATION bin)
install(TARGETS prometheus DESTINATION prometheus_service/bin)
gen_package(
install-cp-nano-service-prometheus.sh
prometheus_service
./install-cp-nano-prometheus.sh
Check Point Prometheus Agent Version ${PACKAGE_VERSION} Install Package
)

15
nodes/prometheus/main.cc Executable file
View File

@ -0,0 +1,15 @@
#include "components_list.h"
#include "prometheus_comp.h"
int
main(int argc, char **argv)
{
NodeComponents<PrometheusComp> comps;
comps.registerGlobalValue<bool>("Is Rest primary routine", true);
comps.registerGlobalValue<uint>("Nano service API Port Primary", 7465);
comps.registerGlobalValue<uint>("Nano service API Port Alternative", 7466);
comps.registerGlobalValue<bool>("Nano service API Allow Get From External IP", true);
return comps.run("Prometheus Service", argc, argv);
}

View File

@ -0,0 +1,4 @@
install(FILES install-cp-nano-prometheus.sh DESTINATION prometheus_service/ PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ)
install(FILES cp-nano-prometheus.cfg DESTINATION prometheus_service/conf PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ)
install(FILES cp-nano-prometheus-conf.json DESTINATION prometheus_service/conf PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ)
install(FILES cp-nano-prometheus-debug-conf.json DESTINATION prometheus_service/conf PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ)

View File

@ -0,0 +1,21 @@
{
"connection": {
"Nano service API Port Primary": [
{
"value": 7465
}
],
"Nano service API Port Alternative": [
{
"value": 7466
}
]
},
"message": {
"Connection timeout": [
{
"value": 10000000
}
]
}
}

View File

@ -0,0 +1,11 @@
{
"Debug": [
{
"Streams": [
{
"Output": "nano_agent/cp-nano-prometheus.dbg"
}
]
}
]
}

Some files were not shown because too many files have changed in this diff Show More