mirror of
https://github.com/openappsec/openappsec.git
synced 2025-11-16 09:21:54 +03:00
Compare commits
133 Commits
Nov_28_202
...
Mar_17_202
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
51c2912434 | ||
|
|
0246b73bbd | ||
|
|
df7be864e2 | ||
|
|
ba8ec26344 | ||
|
|
97add465e8 | ||
|
|
38cb1f2c3b | ||
|
|
1dd9371840 | ||
|
|
f23d22a723 | ||
|
|
b51cf09190 | ||
|
|
ceb6469a7e | ||
|
|
b0ae283eed | ||
|
|
5fcb9bdc4a | ||
|
|
fb5698360b | ||
|
|
147626bc7f | ||
|
|
448991ef75 | ||
|
|
2b1ee84280 | ||
|
|
77dd288eee | ||
|
|
3cb4def82e | ||
|
|
a0dd7dd614 | ||
|
|
88eed946ec | ||
|
|
3e1ad8b0f7 | ||
|
|
bd35c421c6 | ||
|
|
9d6e883724 | ||
|
|
cd020a7ddd | ||
|
|
bb35eaf657 | ||
|
|
648f9ae2b1 | ||
|
|
47e47d706a | ||
|
|
b852809d1a | ||
|
|
a77732f84c | ||
|
|
a1a8e28019 | ||
|
|
a99c2ec4a3 | ||
|
|
f1303c1703 | ||
|
|
bd8174ead3 | ||
|
|
4ddcd2462a | ||
|
|
81433bac25 | ||
|
|
8d03b49176 | ||
|
|
84f9624c00 | ||
|
|
3ecda7b979 | ||
|
|
8f05508e02 | ||
|
|
f5b9c93fbe | ||
|
|
62b74c9a10 | ||
|
|
e3163cd4fa | ||
|
|
1e98fc8c66 | ||
|
|
6fbe272378 | ||
|
|
7b3320ce10 | ||
|
|
25cc2d66e7 | ||
|
|
66e2112afb | ||
|
|
ba7c9afd52 | ||
|
|
2aa0993d7e | ||
|
|
0cdfc9df90 | ||
|
|
010814d656 | ||
|
|
3779dd360d | ||
|
|
0e7dc2133d | ||
|
|
c9095acbef | ||
|
|
e47e29321d | ||
|
|
25a66e77df | ||
|
|
6eea40f165 | ||
|
|
cee6ed511a | ||
|
|
4f145fd74f | ||
|
|
3fe5c5b36f | ||
|
|
7542a85ddb | ||
|
|
fae4534e5c | ||
|
|
923a8a804b | ||
|
|
b1731237d1 | ||
|
|
3d3d6e73b9 | ||
|
|
3f80127ec5 | ||
|
|
abdee954bb | ||
|
|
9a516899e8 | ||
|
|
4fd2aa6c6b | ||
|
|
0db666ac4f | ||
|
|
493d9a6627 | ||
|
|
6db87fc7fe | ||
|
|
d2b9bc8c9c | ||
|
|
886a5befe1 | ||
|
|
1f2502f9e4 | ||
|
|
9e4c5014ce | ||
|
|
024423cce9 | ||
|
|
dc4b546bd1 | ||
|
|
a86aca13b4 | ||
|
|
87b34590d4 | ||
|
|
e0198a1a95 | ||
|
|
d024ad5845 | ||
|
|
46d42c8fa3 | ||
|
|
f6c36f3363 | ||
|
|
63541a4c3c | ||
|
|
d14fa7a468 | ||
|
|
ae0de5bf14 | ||
|
|
d39919f348 | ||
|
|
4f215e1409 | ||
|
|
f05b5f8cee | ||
|
|
949b656b13 | ||
|
|
bbe293d215 | ||
|
|
35b2df729f | ||
|
|
7600b6218f | ||
|
|
20e8e65e14 | ||
|
|
414130a789 | ||
|
|
9d704455e8 | ||
|
|
602442fed4 | ||
|
|
4e9a90db01 | ||
|
|
20f92afbc2 | ||
|
|
ee7adc37d0 | ||
|
|
c0b3e9c0d0 | ||
|
|
f1f4b13327 | ||
|
|
4354a98d37 | ||
|
|
09fa11516c | ||
|
|
446b043128 | ||
|
|
91bcadf930 | ||
|
|
0824cf4b23 | ||
|
|
108abdb35e | ||
|
|
64ebf013eb | ||
|
|
2c91793f08 | ||
|
|
72a263d25a | ||
|
|
4e14ff9a58 | ||
|
|
1fb28e14d6 | ||
|
|
e38bb9525c | ||
|
|
63b8bb22c2 | ||
|
|
11c97330f5 | ||
|
|
e56fb0bc1a | ||
|
|
4571d563f4 | ||
|
|
02c1db01f6 | ||
|
|
c557affd9b | ||
|
|
8889c3c054 | ||
|
|
f67eff87bc | ||
|
|
fa6a2e4233 | ||
|
|
b7e2efbf7e | ||
|
|
96ce290e5f | ||
|
|
de8e2d9970 | ||
|
|
0048708af1 | ||
|
|
4fe0f44e88 | ||
|
|
5f139d13d7 | ||
|
|
919d775a73 | ||
|
|
ac8e353598 | ||
|
|
0663f20691 |
@@ -74,7 +74,7 @@ For Linux, if you’ve built your own package use the following commands:
|
||||
|
||||
```bash
|
||||
$ install-cp-nano-agent.sh --install --hybrid_mode
|
||||
$ install-cp-nano-service-http-transaction-handler.sh –install
|
||||
$ install-cp-nano-service-http-transaction-handler.sh --install
|
||||
$ install-cp-nano-attachment-registration-manager.sh --install
|
||||
```
|
||||
You can add the ```--token <token>``` and ```--email <email address>``` options to the first command, to get a token follow [documentation](https://docs.openappsec.io/getting-started/using-the-web-ui-saas/connect-deployed-agents-to-saas-management-k8s-and-linux).
|
||||
|
||||
@@ -95,6 +95,18 @@ getFailOpenHoldTimeout()
|
||||
return conf_data.getNumericalValue("fail_open_hold_timeout");
|
||||
}
|
||||
|
||||
unsigned int
|
||||
getHoldVerdictPollingTime()
|
||||
{
|
||||
return conf_data.getNumericalValue("hold_verdict_polling_time");
|
||||
}
|
||||
|
||||
unsigned int
|
||||
getHoldVerdictRetries()
|
||||
{
|
||||
return conf_data.getNumericalValue("hold_verdict_retries");
|
||||
}
|
||||
|
||||
unsigned int
|
||||
getMaxSessionsPerMinute()
|
||||
{
|
||||
@@ -173,6 +185,12 @@ getReqBodySizeTrigger()
|
||||
return conf_data.getNumericalValue("body_size_trigger");
|
||||
}
|
||||
|
||||
unsigned int
|
||||
getRemoveResServerHeader()
|
||||
{
|
||||
return conf_data.getNumericalValue("remove_server_header");
|
||||
}
|
||||
|
||||
int
|
||||
isIPAddress(c_str ip_str)
|
||||
{
|
||||
|
||||
@@ -66,7 +66,10 @@ TEST_F(HttpAttachmentUtilTest, GetValidAttachmentConfiguration)
|
||||
"\"static_resources_path\": \"" + static_resources_path + "\",\n"
|
||||
"\"min_retries_for_verdict\": 1,\n"
|
||||
"\"max_retries_for_verdict\": 3,\n"
|
||||
"\"body_size_trigger\": 777\n"
|
||||
"\"hold_verdict_retries\": 3,\n"
|
||||
"\"hold_verdict_polling_time\": 1,\n"
|
||||
"\"body_size_trigger\": 777,\n"
|
||||
"\"remove_server_header\": 1\n"
|
||||
"}\n";
|
||||
ofstream valid_configuration_file(attachment_configuration_file_name);
|
||||
valid_configuration_file << valid_configuration;
|
||||
@@ -95,6 +98,9 @@ TEST_F(HttpAttachmentUtilTest, GetValidAttachmentConfiguration)
|
||||
EXPECT_EQ(getReqBodySizeTrigger(), 777u);
|
||||
EXPECT_EQ(getWaitingForVerdictThreadTimeout(), 75u);
|
||||
EXPECT_EQ(getInspectionMode(), ngx_http_inspection_mode::BLOCKING_THREAD);
|
||||
EXPECT_EQ(getRemoveResServerHeader(), 1u);
|
||||
EXPECT_EQ(getHoldVerdictRetries(), 3u);
|
||||
EXPECT_EQ(getHoldVerdictPollingTime(), 1u);
|
||||
|
||||
EXPECT_EQ(isDebugContext("1.2.3.4", "5.6.7.8", 80, "GET", "test", "/abc"), 1);
|
||||
EXPECT_EQ(isDebugContext("1.2.3.9", "5.6.7.8", 80, "GET", "test", "/abc"), 0);
|
||||
|
||||
@@ -98,19 +98,19 @@ while true; do
|
||||
init=true
|
||||
/etc/cp/watchdog/cp-nano-watchdog >/dev/null 2>&1 &
|
||||
sleep 5
|
||||
active_watchdog_pid=$(pgrep -f -x -o "/bin/bash /etc/cp/watchdog/cp-nano-watchdog")
|
||||
active_watchdog_pid=$(pgrep -f -x -o "/bin/(bash|sh) /etc/cp/watchdog/cp-nano-watchdog")
|
||||
fi
|
||||
|
||||
current_watchdog_pid=$(pgrep -f -x -o "/bin/bash /etc/cp/watchdog/cp-nano-watchdog")
|
||||
current_watchdog_pid=$(pgrep -f -x -o "/bin/(bash|sh) /etc/cp/watchdog/cp-nano-watchdog")
|
||||
if [ ! -f /tmp/restart_watchdog ] && [ "$current_watchdog_pid" != "$active_watchdog_pid" ]; then
|
||||
echo "Error: Watchdog exited abnormally"
|
||||
exit 1
|
||||
elif [ -f /tmp/restart_watchdog ]; then
|
||||
rm -f /tmp/restart_watchdog
|
||||
kill -9 "$(pgrep -f -x -o "/bin/bash /etc/cp/watchdog/cp-nano-watchdog")"
|
||||
kill -9 "$(pgrep -f -x -o "/bin/(bash|sh) /etc/cp/watchdog/cp-nano-watchdog")"
|
||||
/etc/cp/watchdog/cp-nano-watchdog >/dev/null 2>&1 &
|
||||
sleep 5
|
||||
active_watchdog_pid=$(pgrep -f -x -o "/bin/bash /etc/cp/watchdog/cp-nano-watchdog")
|
||||
active_watchdog_pid=$(pgrep -f -x -o "/bin/(bash|sh) /etc/cp/watchdog/cp-nano-watchdog")
|
||||
fi
|
||||
|
||||
sleep 5
|
||||
|
||||
@@ -7,3 +7,4 @@ add_subdirectory(pending_key)
|
||||
add_subdirectory(utils)
|
||||
add_subdirectory(attachment-intakers)
|
||||
add_subdirectory(security_apps)
|
||||
add_subdirectory(nginx_message_reader)
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
#include <stdarg.h>
|
||||
|
||||
#include <boost/range/iterator_range.hpp>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/regex.hpp>
|
||||
|
||||
#include "nginx_attachment_config.h"
|
||||
@@ -260,6 +261,22 @@ public:
|
||||
);
|
||||
}
|
||||
|
||||
const char* ignored_headers_env = getenv("SAAS_IGNORED_UPSTREAM_HEADERS");
|
||||
if (ignored_headers_env) {
|
||||
string ignored_headers_str = ignored_headers_env;
|
||||
ignored_headers_str = NGEN::Strings::trim(ignored_headers_str);
|
||||
|
||||
if (!ignored_headers_str.empty()) {
|
||||
dbgInfo(D_HTTP_MANAGER)
|
||||
<< "Ignoring SAAS_IGNORED_UPSTREAM_HEADERS environment variable: "
|
||||
<< ignored_headers_str;
|
||||
|
||||
vector<string> ignored_headers_vec;
|
||||
boost::split(ignored_headers_vec, ignored_headers_str, boost::is_any_of(";"));
|
||||
for (const string &header : ignored_headers_vec) ignored_headers.insert(header);
|
||||
}
|
||||
}
|
||||
|
||||
dbgInfo(D_NGINX_ATTACHMENT) << "Successfully initialized NGINX Attachment";
|
||||
}
|
||||
|
||||
@@ -1034,7 +1051,11 @@ private:
|
||||
case ChunkType::REQUEST_START:
|
||||
return handleStartTransaction(data, opaque);
|
||||
case ChunkType::REQUEST_HEADER:
|
||||
return handleMultiModifiableChunks(NginxParser::parseRequestHeaders(data), "request header", true);
|
||||
return handleMultiModifiableChunks(
|
||||
NginxParser::parseRequestHeaders(data, ignored_headers),
|
||||
"request header",
|
||||
true
|
||||
);
|
||||
case ChunkType::REQUEST_BODY:
|
||||
return handleModifiableChunk(NginxParser::parseRequestBody(data), "request body", true);
|
||||
case ChunkType::REQUEST_END: {
|
||||
@@ -1814,6 +1835,7 @@ private:
|
||||
HttpAttachmentConfig attachment_config;
|
||||
I_MainLoop::RoutineID attachment_routine_id = 0;
|
||||
bool traffic_indicator = false;
|
||||
unordered_set<string> ignored_headers;
|
||||
|
||||
// Interfaces
|
||||
I_Socket *i_socket = nullptr;
|
||||
|
||||
@@ -203,6 +203,13 @@ HttpAttachmentConfig::setFailOpenTimeout()
|
||||
"NGINX wait thread timeout msec"
|
||||
));
|
||||
|
||||
conf_data.setNumericalValue("remove_server_header", getAttachmentConf<uint>(
|
||||
0,
|
||||
"agent.removeServerHeader.nginxModule",
|
||||
"HTTP manager",
|
||||
"Response server header removal"
|
||||
));
|
||||
|
||||
uint inspection_mode = getAttachmentConf<uint>(
|
||||
static_cast<uint>(ngx_http_inspection_mode_e::NON_BLOCKING_THREAD),
|
||||
"agent.inspectionMode.nginxModule",
|
||||
@@ -233,6 +240,21 @@ HttpAttachmentConfig::setRetriesForVerdict()
|
||||
"Max retries for verdict"
|
||||
));
|
||||
|
||||
conf_data.setNumericalValue("hold_verdict_retries", getAttachmentConf<uint>(
|
||||
3,
|
||||
"agent.retriesForHoldVerdict.nginxModule",
|
||||
"HTTP manager",
|
||||
"Retries for hold verdict"
|
||||
));
|
||||
|
||||
conf_data.setNumericalValue("hold_verdict_polling_time", getAttachmentConf<uint>(
|
||||
1,
|
||||
"agent.holdVerdictPollingInterval.nginxModule",
|
||||
"HTTP manager",
|
||||
"Hold verdict polling interval seconds"
|
||||
));
|
||||
|
||||
|
||||
conf_data.setNumericalValue("body_size_trigger", getAttachmentConf<uint>(
|
||||
200000,
|
||||
"agent.reqBodySizeTrigger.nginxModule",
|
||||
|
||||
@@ -19,12 +19,15 @@
|
||||
|
||||
#include "config.h"
|
||||
#include "virtual_modifiers.h"
|
||||
#include "agent_core_utilities.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace boost::uuids;
|
||||
|
||||
USE_DEBUG_FLAG(D_HTTP_MANAGER);
|
||||
|
||||
extern bool is_keep_alive_ctx;
|
||||
|
||||
NginxAttachmentOpaque::NginxAttachmentOpaque(HttpTransactionData _transaction_data)
|
||||
:
|
||||
TableOpaqueSerialize<NginxAttachmentOpaque>(this),
|
||||
@@ -119,3 +122,47 @@ NginxAttachmentOpaque::setSavedData(const string &name, const string &data, EnvK
|
||||
saved_data[name] = data;
|
||||
ctx.registerValue(name, data, log_ctx);
|
||||
}
|
||||
|
||||
bool
|
||||
NginxAttachmentOpaque::setKeepAliveCtx(const string &hdr_key, const string &hdr_val)
|
||||
{
|
||||
if (!is_keep_alive_ctx) return false;
|
||||
|
||||
static pair<string, string> keep_alive_hdr;
|
||||
static bool keep_alive_hdr_initialized = false;
|
||||
|
||||
if (keep_alive_hdr_initialized) {
|
||||
if (!keep_alive_hdr.first.empty() && hdr_key == keep_alive_hdr.first && hdr_val == keep_alive_hdr.second) {
|
||||
dbgTrace(D_HTTP_MANAGER) << "Registering keep alive context";
|
||||
ctx.registerValue("keep_alive_request_ctx", true);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
const char* saas_keep_alive_hdr_name_env = getenv("SAAS_KEEP_ALIVE_HDR_NAME");
|
||||
if (saas_keep_alive_hdr_name_env) {
|
||||
keep_alive_hdr.first = NGEN::Strings::trim(saas_keep_alive_hdr_name_env);
|
||||
dbgInfo(D_HTTP_MANAGER) << "Using SAAS_KEEP_ALIVE_HDR_NAME environment variable: " << keep_alive_hdr.first;
|
||||
}
|
||||
|
||||
if (!keep_alive_hdr.first.empty()) {
|
||||
const char* saas_keep_alive_hdr_value_env = getenv("SAAS_KEEP_ALIVE_HDR_VALUE");
|
||||
if (saas_keep_alive_hdr_value_env) {
|
||||
keep_alive_hdr.second = NGEN::Strings::trim(saas_keep_alive_hdr_value_env);
|
||||
dbgInfo(D_HTTP_MANAGER)
|
||||
<< "Using SAAS_KEEP_ALIVE_HDR_VALUE environment variable: "
|
||||
<< keep_alive_hdr.second;
|
||||
}
|
||||
|
||||
if (!keep_alive_hdr.second.empty() && (hdr_key == keep_alive_hdr.first && hdr_val == keep_alive_hdr.second)) {
|
||||
dbgTrace(D_HTTP_MANAGER) << "Registering keep alive context";
|
||||
ctx.registerValue("keep_alive_request_ctx", true);
|
||||
keep_alive_hdr_initialized = true;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
keep_alive_hdr_initialized = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -85,6 +85,7 @@ public:
|
||||
EnvKeyAttr::LogSection log_ctx = EnvKeyAttr::LogSection::NONE
|
||||
);
|
||||
void setApplicationState(const ApplicationState &app_state) { application_state = app_state; }
|
||||
bool setKeepAliveCtx(const std::string &hdr_key, const std::string &hdr_val);
|
||||
|
||||
private:
|
||||
CompressionStream *response_compression_stream;
|
||||
|
||||
@@ -29,6 +29,7 @@ USE_DEBUG_FLAG(D_NGINX_ATTACHMENT_PARSER);
|
||||
Buffer NginxParser::tenant_header_key = Buffer();
|
||||
static const Buffer proxy_ip_header_key("X-Forwarded-For", 15, Buffer::MemoryType::STATIC);
|
||||
static const Buffer source_ip("sourceip", 8, Buffer::MemoryType::STATIC);
|
||||
bool is_keep_alive_ctx = getenv("SAAS_KEEP_ALIVE_HDR_NAME") != nullptr;
|
||||
|
||||
map<Buffer, CompressionType> NginxParser::content_encodings = {
|
||||
{Buffer("identity"), CompressionType::NO_COMPRESSION},
|
||||
@@ -177,22 +178,54 @@ getActivetenantAndProfile(const string &str, const string &deli = ",")
|
||||
}
|
||||
|
||||
Maybe<vector<HttpHeader>>
|
||||
NginxParser::parseRequestHeaders(const Buffer &data)
|
||||
NginxParser::parseRequestHeaders(const Buffer &data, const unordered_set<string> &ignored_headers)
|
||||
{
|
||||
auto parsed_headers = genHeaders(data);
|
||||
if (!parsed_headers.ok()) return parsed_headers.passErr();
|
||||
auto maybe_parsed_headers = genHeaders(data);
|
||||
if (!maybe_parsed_headers.ok()) return maybe_parsed_headers.passErr();
|
||||
|
||||
auto i_transaction_table = Singleton::Consume<I_TableSpecific<SessionID>>::by<NginxAttachment>();
|
||||
auto parsed_headers = maybe_parsed_headers.unpack();
|
||||
NginxAttachmentOpaque &opaque = i_transaction_table->getState<NginxAttachmentOpaque>();
|
||||
|
||||
for (const HttpHeader &header : *parsed_headers) {
|
||||
if (is_keep_alive_ctx || !ignored_headers.empty()) {
|
||||
bool is_last_header_removed = false;
|
||||
parsed_headers.erase(
|
||||
remove_if(
|
||||
parsed_headers.begin(),
|
||||
parsed_headers.end(),
|
||||
[&opaque, &is_last_header_removed, &ignored_headers](const HttpHeader &header)
|
||||
{
|
||||
string hdr_key = static_cast<string>(header.getKey());
|
||||
string hdr_val = static_cast<string>(header.getValue());
|
||||
if (
|
||||
opaque.setKeepAliveCtx(hdr_key, hdr_val)
|
||||
|| ignored_headers.find(hdr_key) != ignored_headers.end()
|
||||
) {
|
||||
dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "Header was removed from headers list: " << hdr_key;
|
||||
if (header.isLastHeader()) {
|
||||
dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "Last header was removed from headers list";
|
||||
is_last_header_removed = true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
),
|
||||
parsed_headers.end()
|
||||
);
|
||||
if (is_last_header_removed) {
|
||||
dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "Adjusting last header flag";
|
||||
if (!parsed_headers.empty()) parsed_headers.back().setIsLastHeader();
|
||||
}
|
||||
}
|
||||
|
||||
for (const HttpHeader &header : parsed_headers) {
|
||||
auto source_identifiers = getConfigurationWithDefault<UsersAllIdentifiersConfig>(
|
||||
UsersAllIdentifiersConfig(),
|
||||
"rulebase",
|
||||
"usersIdentifiers"
|
||||
);
|
||||
source_identifiers.parseRequestHeaders(header);
|
||||
|
||||
NginxAttachmentOpaque &opaque = i_transaction_table->getState<NginxAttachmentOpaque>();
|
||||
opaque.addToSavedData(
|
||||
HttpTransactionData::req_headers,
|
||||
static_cast<string>(header.getKey()) + ": " + static_cast<string>(header.getValue()) + "\r\n"
|
||||
|
||||
@@ -28,7 +28,10 @@ public:
|
||||
static Maybe<HttpTransactionData> parseStartTrasaction(const Buffer &data);
|
||||
static Maybe<ResponseCode> parseResponseCode(const Buffer &data);
|
||||
static Maybe<uint64_t> parseContentLength(const Buffer &data);
|
||||
static Maybe<std::vector<HttpHeader>> parseRequestHeaders(const Buffer &data);
|
||||
static Maybe<std::vector<HttpHeader>> parseRequestHeaders(
|
||||
const Buffer &data,
|
||||
const std::unordered_set<std::string> &ignored_headers
|
||||
);
|
||||
static Maybe<std::vector<HttpHeader>> parseResponseHeaders(const Buffer &data);
|
||||
static Maybe<HttpBody> parseRequestBody(const Buffer &data);
|
||||
static Maybe<HttpBody> parseResponseBody(const Buffer &raw_response_body, CompressionStream *compression_stream);
|
||||
|
||||
@@ -285,17 +285,21 @@ Maybe<string>
|
||||
UsersAllIdentifiersConfig::parseXForwardedFor(const string &str, ExtractType type) const
|
||||
{
|
||||
vector<string> header_values = split(str);
|
||||
|
||||
if (header_values.empty()) return genError("No IP found in the xff header list");
|
||||
|
||||
vector<string> xff_values = getHeaderValuesFromConfig("x-forwarded-for");
|
||||
vector<CIDRSData> cidr_values(xff_values.begin(), xff_values.end());
|
||||
string last_valid_ip;
|
||||
|
||||
for (auto it = header_values.rbegin(); it != header_values.rend() - 1; ++it) {
|
||||
if (!IPAddr::createIPAddr(*it).ok()) {
|
||||
dbgWarning(D_NGINX_ATTACHMENT_PARSER) << "Invalid IP address found in the xff header IPs list: " << *it;
|
||||
return genError("Invalid IP address");
|
||||
if (last_valid_ip.empty()) {
|
||||
return genError("Invalid IP address");
|
||||
}
|
||||
return last_valid_ip;
|
||||
}
|
||||
last_valid_ip = *it;
|
||||
if (type == ExtractType::PROXYIP) continue;
|
||||
if (!isIpTrusted(*it, cidr_values)) {
|
||||
dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "Found untrusted IP in the xff header IPs list: " << *it;
|
||||
@@ -307,7 +311,10 @@ UsersAllIdentifiersConfig::parseXForwardedFor(const string &str, ExtractType typ
|
||||
dbgWarning(D_NGINX_ATTACHMENT_PARSER)
|
||||
<< "Invalid IP address found in the xff header IPs list: "
|
||||
<< header_values[0];
|
||||
return genError("Invalid IP address");
|
||||
if (last_valid_ip.empty()) {
|
||||
return genError("No Valid Ip address was found");
|
||||
}
|
||||
return last_valid_ip;
|
||||
}
|
||||
|
||||
return header_values[0];
|
||||
|
||||
@@ -15,19 +15,18 @@
|
||||
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <sys/stat.h>
|
||||
#include <climits>
|
||||
#include <unordered_map>
|
||||
#include <boost/range/iterator_range.hpp>
|
||||
#include <unordered_set>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <fstream>
|
||||
#include <algorithm>
|
||||
|
||||
#include "common.h"
|
||||
#include "config.h"
|
||||
#include "table_opaque.h"
|
||||
#include "http_manager_opaque.h"
|
||||
#include "log_generator.h"
|
||||
#include "http_inspection_events.h"
|
||||
#include "agent_core_utilities.h"
|
||||
|
||||
USE_DEBUG_FLAG(D_HTTP_MANAGER);
|
||||
|
||||
@@ -95,6 +94,7 @@ public:
|
||||
|
||||
HttpManagerOpaque &state = i_transaction_table->getState<HttpManagerOpaque>();
|
||||
string event_key = static_cast<string>(event.getKey());
|
||||
|
||||
if (event_key == getProfileAgentSettingWithDefault<string>("", "agent.customHeaderValueLogging")) {
|
||||
string event_value = static_cast<string>(event.getValue());
|
||||
dbgTrace(D_HTTP_MANAGER)
|
||||
|
||||
45
components/include/central_nginx_manager.h
Executable file
45
components/include/central_nginx_manager.h
Executable file
@@ -0,0 +1,45 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef __CENTRAL_NGINX_MANAGER_H__
|
||||
#define __CENTRAL_NGINX_MANAGER_H__
|
||||
|
||||
#include "component.h"
|
||||
#include "singleton.h"
|
||||
#include "i_messaging.h"
|
||||
#include "i_rest_api.h"
|
||||
#include "i_mainloop.h"
|
||||
#include "i_agent_details.h"
|
||||
|
||||
class CentralNginxManager
|
||||
:
|
||||
public Component,
|
||||
Singleton::Consume<I_RestApi>,
|
||||
Singleton::Consume<I_Messaging>,
|
||||
Singleton::Consume<I_MainLoop>,
|
||||
Singleton::Consume<I_AgentDetails>
|
||||
{
|
||||
public:
|
||||
CentralNginxManager();
|
||||
~CentralNginxManager();
|
||||
|
||||
void preload() override;
|
||||
void init() override;
|
||||
void fini() override;
|
||||
|
||||
private:
|
||||
class Impl;
|
||||
std::unique_ptr<Impl> pimpl;
|
||||
};
|
||||
|
||||
#endif // __CENTRAL_NGINX_MANAGER_H__
|
||||
@@ -239,6 +239,7 @@ public:
|
||||
const Buffer & getValue() const { return value; }
|
||||
|
||||
bool isLastHeader() const { return is_last_header; }
|
||||
void setIsLastHeader() { is_last_header = true; }
|
||||
uint8_t getHeaderIndex() const { return header_index; }
|
||||
|
||||
private:
|
||||
|
||||
@@ -30,7 +30,7 @@ public:
|
||||
virtual bool isVersionAboveR8110() = 0;
|
||||
virtual bool isReverseProxy() = 0;
|
||||
virtual bool isCloudStorageEnabled() = 0;
|
||||
virtual Maybe<std::tuple<std::string, std::string, std::string>> parseNginxMetadata() = 0;
|
||||
virtual Maybe<std::tuple<std::string, std::string, std::string, std::string>> parseNginxMetadata() = 0;
|
||||
virtual Maybe<std::tuple<std::string, std::string, std::string, std::string, std::string>> readCloudMetadata() = 0;
|
||||
virtual std::map<std::string, std::string> getResolvedDetails() = 0;
|
||||
#if defined(gaia) || defined(smb)
|
||||
|
||||
@@ -62,6 +62,7 @@ public:
|
||||
|
||||
private:
|
||||
Maybe<std::string> downloadPackage(const Package &package, bool is_clean_installation);
|
||||
std::string getCurrentTimestamp();
|
||||
|
||||
std::string manifest_file_path;
|
||||
std::string temp_ext;
|
||||
|
||||
28
components/include/nginx_message_reader.h
Executable file
28
components/include/nginx_message_reader.h
Executable file
@@ -0,0 +1,28 @@
|
||||
#ifndef __NGINX_MESSAGE_READER_H__
|
||||
#define __NGINX_MESSAGE_READER_H__
|
||||
|
||||
#include "singleton.h"
|
||||
#include "i_mainloop.h"
|
||||
#include "i_socket_is.h"
|
||||
#include "component.h"
|
||||
|
||||
class NginxMessageReader
|
||||
:
|
||||
public Component,
|
||||
Singleton::Consume<I_MainLoop>,
|
||||
Singleton::Consume<I_Socket>
|
||||
{
|
||||
public:
|
||||
NginxMessageReader();
|
||||
~NginxMessageReader();
|
||||
|
||||
void init() override;
|
||||
void fini() override;
|
||||
void preload() override;
|
||||
|
||||
private:
|
||||
class Impl;
|
||||
std::unique_ptr<Impl> pimpl;
|
||||
};
|
||||
|
||||
#endif //__NGINX_MESSAGE_READER_H__
|
||||
51
components/include/nginx_utils.h
Executable file
51
components/include/nginx_utils.h
Executable file
@@ -0,0 +1,51 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef __NGINX_UTILS_H__
|
||||
#define __NGINX_UTILS_H__
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "maybe_res.h"
|
||||
#include "singleton.h"
|
||||
#include "i_shell_cmd.h"
|
||||
|
||||
class NginxConfCollector
|
||||
{
|
||||
public:
|
||||
NginxConfCollector(const std::string &nginx_conf_input_path, const std::string &nginx_conf_output_path);
|
||||
Maybe<std::string> generateFullNginxConf() const;
|
||||
|
||||
private:
|
||||
std::vector<std::string> expandIncludes(const std::string &includePattern) const;
|
||||
void processConfigFile(
|
||||
const std::string &path,
|
||||
std::ostringstream &conf_output,
|
||||
std::vector<std::string> &errors
|
||||
) const;
|
||||
|
||||
std::string main_conf_input_path;
|
||||
std::string main_conf_output_path;
|
||||
std::string main_conf_directory_path;
|
||||
};
|
||||
|
||||
class NginxUtils : Singleton::Consume<I_ShellCmd>
|
||||
{
|
||||
public:
|
||||
static std::string getModulesPath();
|
||||
static std::string getMainNginxConfPath();
|
||||
static Maybe<void> validateNginxConf(const std::string &nginx_conf_path);
|
||||
static Maybe<void> reloadNginx(const std::string &nginx_conf_path);
|
||||
};
|
||||
|
||||
#endif // __NGINX_UTILS_H__
|
||||
@@ -7,15 +7,21 @@
|
||||
#include "singleton.h"
|
||||
#include "i_mainloop.h"
|
||||
#include "i_environment.h"
|
||||
#include "i_geo_location.h"
|
||||
#include "i_generic_rulebase.h"
|
||||
#include "i_shell_cmd.h"
|
||||
#include "i_env_details.h"
|
||||
|
||||
class RateLimit
|
||||
:
|
||||
public Component,
|
||||
Singleton::Consume<I_MainLoop>,
|
||||
Singleton::Consume<I_TimeGet>,
|
||||
Singleton::Consume<I_GeoLocation>,
|
||||
Singleton::Consume<I_Environment>,
|
||||
Singleton::Consume<I_GenericRulebase>
|
||||
Singleton::Consume<I_GenericRulebase>,
|
||||
Singleton::Consume<I_ShellCmd>,
|
||||
Singleton::Consume<I_EnvDetails>
|
||||
{
|
||||
public:
|
||||
RateLimit();
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
#include "generic_metric.h"
|
||||
|
||||
#define LOGGING_INTERVAL_IN_MINUTES 10
|
||||
USE_DEBUG_FLAG(D_WAAP);
|
||||
enum class AssetType { API, WEB, ALL, COUNT };
|
||||
|
||||
class WaapTelemetryEvent : public Event<WaapTelemetryEvent>
|
||||
@@ -132,6 +133,7 @@ private:
|
||||
std::map<std::string, std::shared_ptr<T>>& telemetryMap
|
||||
) {
|
||||
if (!telemetryMap.count(asset_id)) {
|
||||
dbgTrace(D_WAAP) << "creating telemetry data for asset: " << data.assetName;
|
||||
telemetryMap.emplace(asset_id, std::make_shared<T>());
|
||||
telemetryMap[asset_id]->init(
|
||||
telemetryName,
|
||||
@@ -139,7 +141,9 @@ private:
|
||||
ReportIS::IssuingEngine::AGENT_CORE,
|
||||
std::chrono::minutes(LOGGING_INTERVAL_IN_MINUTES),
|
||||
true,
|
||||
ReportIS::Audience::SECURITY
|
||||
ReportIS::Audience::SECURITY,
|
||||
false,
|
||||
asset_id
|
||||
);
|
||||
|
||||
telemetryMap[asset_id]->template registerContext<std::string>(
|
||||
@@ -152,29 +156,30 @@ private:
|
||||
std::string("Web Application"),
|
||||
EnvKeyAttr::LogSection::SOURCE
|
||||
);
|
||||
telemetryMap[asset_id]->template registerContext<std::string>(
|
||||
"assetId",
|
||||
asset_id,
|
||||
EnvKeyAttr::LogSection::SOURCE
|
||||
);
|
||||
telemetryMap[asset_id]->template registerContext<std::string>(
|
||||
"assetName",
|
||||
data.assetName,
|
||||
EnvKeyAttr::LogSection::SOURCE
|
||||
);
|
||||
telemetryMap[asset_id]->template registerContext<std::string>(
|
||||
"practiceId",
|
||||
data.practiceId,
|
||||
EnvKeyAttr::LogSection::SOURCE
|
||||
);
|
||||
telemetryMap[asset_id]->template registerContext<std::string>(
|
||||
"practiceName",
|
||||
data.practiceName,
|
||||
EnvKeyAttr::LogSection::SOURCE
|
||||
);
|
||||
|
||||
telemetryMap[asset_id]->registerListener();
|
||||
}
|
||||
dbgTrace(D_WAAP) << "updating telemetry data for asset: " << data.assetName;
|
||||
|
||||
telemetryMap[asset_id]->template registerContext<std::string>(
|
||||
"assetId",
|
||||
asset_id,
|
||||
EnvKeyAttr::LogSection::SOURCE
|
||||
);
|
||||
telemetryMap[asset_id]->template registerContext<std::string>(
|
||||
"assetName",
|
||||
data.assetName,
|
||||
EnvKeyAttr::LogSection::SOURCE
|
||||
);
|
||||
telemetryMap[asset_id]->template registerContext<std::string>(
|
||||
"practiceId",
|
||||
data.practiceId,
|
||||
EnvKeyAttr::LogSection::SOURCE
|
||||
);
|
||||
telemetryMap[asset_id]->template registerContext<std::string>(
|
||||
"practiceName",
|
||||
data.practiceName,
|
||||
EnvKeyAttr::LogSection::SOURCE
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -33,6 +33,7 @@ class I_WaapAssetStatesManager;
|
||||
class I_Messaging;
|
||||
class I_AgentDetails;
|
||||
class I_Encryptor;
|
||||
class I_WaapModelResultLogger;
|
||||
|
||||
const std::string WAAP_APPLICATION_NAME = "waap application";
|
||||
|
||||
@@ -50,7 +51,8 @@ class WaapComponent
|
||||
Singleton::Consume<I_AgentDetails>,
|
||||
Singleton::Consume<I_Messaging>,
|
||||
Singleton::Consume<I_Encryptor>,
|
||||
Singleton::Consume<I_Environment>
|
||||
Singleton::Consume<I_Environment>,
|
||||
Singleton::Consume<I_WaapModelResultLogger>
|
||||
{
|
||||
public:
|
||||
WaapComponent();
|
||||
|
||||
3
components/nginx_message_reader/CMakeLists.txt
Executable file
3
components/nginx_message_reader/CMakeLists.txt
Executable file
@@ -0,0 +1,3 @@
|
||||
link_directories(${BOOST_ROOT}/lib)
|
||||
|
||||
add_library(nginx_message_reader nginx_message_reader.cc)
|
||||
735
components/nginx_message_reader/nginx_message_reader.cc
Executable file
735
components/nginx_message_reader/nginx_message_reader.cc
Executable file
@@ -0,0 +1,735 @@
|
||||
#include "nginx_message_reader.h"
|
||||
|
||||
#include <string>
|
||||
#include <boost/regex.hpp>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/algorithm/string/regex.hpp>
|
||||
|
||||
#include "config.h"
|
||||
#include "singleton.h"
|
||||
#include "i_mainloop.h"
|
||||
#include "enum_array.h"
|
||||
#include "log_generator.h"
|
||||
#include "maybe_res.h"
|
||||
#include "http_transaction_data.h"
|
||||
#include "generic_rulebase/rulebase_config.h"
|
||||
#include "generic_rulebase/evaluators/asset_eval.h"
|
||||
#include "generic_rulebase/triggers_config.h"
|
||||
#include "agent_core_utilities.h"
|
||||
#include "rate_limit_config.h"
|
||||
|
||||
USE_DEBUG_FLAG(D_NGINX_MESSAGE_READER);
|
||||
|
||||
using namespace std;
|
||||
|
||||
static const string syslog_regex_string = (
|
||||
"<[0-9]+>([A-Z][a-z][a-z]\\s{1,2}\\d{1,2}\\s\\d{2}"
|
||||
"[:]\\d{2}[:]\\d{2})\\s([\\w][\\w\\d\\.@-]*)\\s(nginx:)"
|
||||
);
|
||||
|
||||
static const boost::regex socket_address_regex("(\\d+\\.\\d+\\.\\d+\\.\\d+):(\\d+)");
|
||||
static const boost::regex syslog_regex(syslog_regex_string);
|
||||
static const boost::regex alert_log_regex(
|
||||
"("
|
||||
+ syslog_regex_string + ") "
|
||||
+ "(.+?\\[alert\\] )(.+?)"
|
||||
", (client: .+?)"
|
||||
", (server: .+?)"
|
||||
", (request: \".+?\")"
|
||||
", (upstream: \".+?\")"
|
||||
", (host: \".+?\")$"
|
||||
);
|
||||
|
||||
static const boost::regex error_log_regex(
|
||||
"("
|
||||
+ syslog_regex_string + ") "
|
||||
+ "(.+?\\[error\\] )(.+?)"
|
||||
", (client: .+?)"
|
||||
", (server: .+?)"
|
||||
", (request: \".+?\")"
|
||||
", (upstream: \".+?\")"
|
||||
", (host: \".+?\")$"
|
||||
);
|
||||
|
||||
static const boost::regex server_regex("(\\d+\\.\\d+\\.\\d+\\.\\d+)|(\\w+\\.\\w+)");
|
||||
static const boost::regex uri_regex("^/");
|
||||
static const boost::regex port_regex("\\d+");
|
||||
static const boost::regex response_code_regex("[0-9]{3}");
|
||||
static const boost::regex http_method_regex("[A-Za-z]+");
|
||||
|
||||
class NginxMessageReader::Impl
|
||||
{
|
||||
public:
|
||||
void
|
||||
init()
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||
I_MainLoop *mainloop = Singleton::Consume<I_MainLoop>::by<NginxMessageReader>();
|
||||
mainloop->addOneTimeRoutine(
|
||||
I_MainLoop::RoutineType::System,
|
||||
[this] ()
|
||||
{
|
||||
initSyslogServerSocket();
|
||||
handleNginxLogs();
|
||||
},
|
||||
"Initialize nginx syslog",
|
||||
true
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
preload()
|
||||
{
|
||||
registerConfigLoadCb([this]() { loadNginxMessageReaderConfig(); });
|
||||
}
|
||||
|
||||
void
|
||||
fini()
|
||||
{
|
||||
I_Socket *i_socket = Singleton::Consume<I_Socket>::by<NginxMessageReader>();
|
||||
i_socket->closeSocket(syslog_server_socket);
|
||||
}
|
||||
|
||||
void
|
||||
loadNginxMessageReaderConfig()
|
||||
{
|
||||
rate_limit_status_code = getProfileAgentSettingWithDefault<string>(
|
||||
"429",
|
||||
"accessControl.rateLimit.returnCode"
|
||||
);
|
||||
dbgTrace(D_NGINX_MESSAGE_READER) << "Selected rate-limit status code: " << rate_limit_status_code;
|
||||
}
|
||||
|
||||
private:
|
||||
enum class LogInfo {
|
||||
HTTP_METHOD,
|
||||
URI,
|
||||
RESPONSE_CODE,
|
||||
HOST,
|
||||
SOURCE,
|
||||
DESTINATION_IP,
|
||||
DESTINATION_PORT,
|
||||
EVENT_MESSAGE,
|
||||
ASSET_ID,
|
||||
ASSET_NAME,
|
||||
RULE_NAME,
|
||||
RULE_ID,
|
||||
COUNT
|
||||
};
|
||||
|
||||
void
|
||||
initSyslogServerSocket()
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||
I_MainLoop *mainloop = Singleton::Consume<I_MainLoop>::by<NginxMessageReader>();
|
||||
I_Socket *i_socket = Singleton::Consume<I_Socket>::by<NginxMessageReader>();
|
||||
string nginx_syslog_server_address = getProfileAgentSettingWithDefault<string>(
|
||||
"127.0.0.1:1514",
|
||||
"reverseProxy.nginx.syslogAddress"
|
||||
);
|
||||
dbgInfo(D_NGINX_MESSAGE_READER) << "Attempting to open a socket: " << nginx_syslog_server_address;
|
||||
do {
|
||||
Maybe<I_Socket::socketFd> new_socket = i_socket->genSocket(
|
||||
I_Socket::SocketType::UDP,
|
||||
false,
|
||||
true,
|
||||
nginx_syslog_server_address
|
||||
);
|
||||
if (!new_socket.ok()) {
|
||||
dbgError(D_NGINX_MESSAGE_READER) << "Failed to open a socket. Error: " << new_socket.getErr();
|
||||
mainloop->yield(chrono::milliseconds(500));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (new_socket.unpack() < 0) {
|
||||
dbgError(D_NGINX_MESSAGE_READER)<< "Generated socket is OK yet negative";
|
||||
mainloop->yield(chrono::milliseconds(500));
|
||||
continue;
|
||||
}
|
||||
syslog_server_socket = new_socket.unpack();
|
||||
dbgInfo(D_NGINX_MESSAGE_READER)
|
||||
<< "Opened socket for nginx logs over syslog. Socket: "
|
||||
<< syslog_server_socket;
|
||||
} while (syslog_server_socket < 0);
|
||||
}
|
||||
|
||||
void
|
||||
handleNginxLogs()
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||
I_MainLoop::Routine read_logs =
|
||||
[this] ()
|
||||
{
|
||||
Maybe<string> logs = getLogsFromSocket(syslog_server_socket);
|
||||
|
||||
if (!logs.ok()) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER)
|
||||
<< "Failed to get NGINX logs from the socket. Error: "
|
||||
<< logs.getErr();
|
||||
return;
|
||||
}
|
||||
string raw_logs_to_parse = logs.unpackMove();
|
||||
vector<string> logs_to_parse = separateLogs(raw_logs_to_parse);
|
||||
|
||||
for (auto const &log: logs_to_parse) {
|
||||
bool log_sent;
|
||||
if (isAccessLog(log)) {
|
||||
log_sent = sendAccessLog(log);
|
||||
} else if (isAlertErrorLog(log) || isErrorLog(log)) {
|
||||
log_sent = sendErrorLog(log);
|
||||
} else {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER) << "Unexpected nginx log format";
|
||||
continue;
|
||||
}
|
||||
if (!log_sent) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER) << "Failed to send Log to Infinity Portal";
|
||||
} else {
|
||||
dbgTrace(D_NGINX_MESSAGE_READER) << "Succesfully sent nginx log to Infinity Portal";
|
||||
}
|
||||
}
|
||||
};
|
||||
I_MainLoop *mainloop = Singleton::Consume<I_MainLoop>::by<NginxMessageReader>();
|
||||
mainloop->addFileRoutine(
|
||||
I_MainLoop::RoutineType::RealTime,
|
||||
syslog_server_socket,
|
||||
read_logs,
|
||||
"Process nginx logs",
|
||||
true
|
||||
);
|
||||
}
|
||||
|
||||
bool
|
||||
sendAccessLog(const string &log)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "Access log" << log;
|
||||
Maybe<EnumArray<LogInfo, string>> log_info = parseAccessLog(log);
|
||||
if (!log_info.ok()) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER)
|
||||
<< "Failed parsing the NGINX logs. Error: "
|
||||
<< log_info.getErr();
|
||||
return false;
|
||||
}
|
||||
auto unpacked_log_info = log_info.unpack();
|
||||
|
||||
if (unpacked_log_info[LogInfo::RESPONSE_CODE] == rate_limit_status_code) {
|
||||
return sendRateLimitLog(unpacked_log_info);
|
||||
}
|
||||
return sendLog(unpacked_log_info);
|
||||
}
|
||||
|
||||
bool
|
||||
sendErrorLog(const string &log)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "Error log" << log;
|
||||
Maybe<EnumArray<LogInfo, string>> log_info = parseErrorLog(log);
|
||||
if (!log_info.ok()) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER)
|
||||
<< "Failed parsing the NGINX logs. Error: "
|
||||
<< log_info.getErr();
|
||||
return false;
|
||||
}
|
||||
return sendLog(log_info.unpack());
|
||||
}
|
||||
|
||||
bool
|
||||
isAccessLog(const string &log) const
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "Chekck if string contains \"accessLog\"" << log;
|
||||
return log.find("accessLog") != string::npos;
|
||||
}
|
||||
|
||||
bool
|
||||
isAlertErrorLog(const string &log) const
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "Check if log is of type 'error log'. Log: " << log;
|
||||
return log.find("[alert]") != string::npos;
|
||||
}
|
||||
|
||||
bool
|
||||
isErrorLog(const string &log) const
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "Check if log is of type 'error log'. Log: " << log;
|
||||
return log.find("[error]") != string::npos;
|
||||
}
|
||||
|
||||
bool
|
||||
sendLog(const EnumArray<LogInfo, string> &log_info)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||
string event_name;
|
||||
switch (log_info[LogInfo::RESPONSE_CODE][0]) {
|
||||
case '4': {
|
||||
event_name = "Invalid request or incorrect reverse proxy configuration - Request dropped."
|
||||
" Please check the reverse proxy configuration of your relevant assets";
|
||||
break;
|
||||
}
|
||||
case '5': {
|
||||
event_name = "AppSec Gateway reverse proxy error - Request dropped. "
|
||||
"Please verify the reverse proxy configuration of your relevant assets. "
|
||||
"If the issue persists please contact Check Point Support";
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
dbgError(D_NGINX_MESSAGE_READER) << "Irrelevant status code";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
dbgTrace(D_NGINX_MESSAGE_READER)
|
||||
<< "Nginx log's event name and response code: "
|
||||
<< event_name
|
||||
<< ", "
|
||||
<< log_info[LogInfo::RESPONSE_CODE];
|
||||
LogGen log(
|
||||
event_name,
|
||||
ReportIS::Audience::SECURITY,
|
||||
ReportIS::Severity::INFO,
|
||||
ReportIS::Priority::LOW,
|
||||
ReportIS::Tags::REVERSE_PROXY
|
||||
);
|
||||
log << LogField("eventConfidence", "High");
|
||||
|
||||
for (LogInfo field : makeRange<LogInfo>()) {
|
||||
Maybe<string> string_field = convertLogFieldToString(field);
|
||||
if (!string_field.ok()) {
|
||||
dbgDebug(D_NGINX_MESSAGE_READER) << "Enum field was not converted: " << string_field.getErr();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (field != LogInfo::DESTINATION_PORT) {
|
||||
log << LogField(string_field.unpack(), log_info[field]);
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
log << LogField(string_field.unpack(), stoi(log_info[field]));
|
||||
} catch (const exception &e) {
|
||||
dbgError(D_NGINX_MESSAGE_READER)
|
||||
<< "Unable to convert port to numeric value: "
|
||||
<< e.what();
|
||||
log << LogField(string_field.unpack(), 0);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
sendRateLimitLog(const EnumArray<LogInfo, string> &log_info)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "Getting rate-limit rules of asset ID: " << log_info[LogInfo::ASSET_ID];
|
||||
|
||||
ScopedContext rate_limit_ctx;
|
||||
|
||||
rate_limit_ctx.registerValue<GenericConfigId>(AssetMatcher::ctx_key, log_info[LogInfo::ASSET_ID]);
|
||||
auto rate_limit_config = getConfiguration<RateLimitConfig>("rulebase", "rateLimit");
|
||||
if (!rate_limit_config.ok()) {
|
||||
dbgTrace(D_NGINX_MESSAGE_READER)
|
||||
<< "Rate limit context does not match asset ID: " << log_info[LogInfo::ASSET_ID];
|
||||
return false;
|
||||
}
|
||||
RateLimitConfig unpacked_rate_limit_config = rate_limit_config.unpack();
|
||||
|
||||
string nginx_uri = log_info[LogInfo::URI];
|
||||
const LogTriggerConf &rate_limit_trigger = unpacked_rate_limit_config.getRateLimitTrigger(nginx_uri);
|
||||
|
||||
dbgTrace(D_NGINX_MESSAGE_READER)<< "About to generate NGINX rate-limit log";
|
||||
|
||||
string event_name = "Rate limit";
|
||||
string security_action = "Drop";
|
||||
bool is_log_required = false;
|
||||
|
||||
// Prevent events checkbox (in triggers)
|
||||
if (rate_limit_trigger.isPreventLogActive(LogTriggerConf::SecurityType::AccessControl)) {
|
||||
is_log_required = true;
|
||||
}
|
||||
|
||||
if (!is_log_required) {
|
||||
dbgTrace(D_NGINX_MESSAGE_READER) << "Not sending NGINX rate-limit log as it is not required";
|
||||
return false;
|
||||
}
|
||||
|
||||
ostringstream src_ip;
|
||||
ostringstream dst_ip;
|
||||
src_ip << log_info[LogInfo::SOURCE];
|
||||
dst_ip << log_info[LogInfo::DESTINATION_IP];
|
||||
|
||||
ReportIS::Severity log_severity = ReportIS::Severity::MEDIUM;
|
||||
ReportIS::Priority log_priority = ReportIS::Priority::MEDIUM;
|
||||
|
||||
LogGen log = rate_limit_trigger(
|
||||
event_name,
|
||||
LogTriggerConf::SecurityType::AccessControl,
|
||||
log_severity,
|
||||
log_priority,
|
||||
true, // is drop
|
||||
LogField("practiceType", "Rate Limit"),
|
||||
ReportIS::Tags::RATE_LIMIT
|
||||
);
|
||||
|
||||
for (LogInfo field : makeRange<LogInfo>()) {
|
||||
Maybe<string> string_field = convertLogFieldToString(field);
|
||||
if (!string_field.ok()) {
|
||||
dbgDebug(D_NGINX_MESSAGE_READER) << "Enum field was not converted: " << string_field.getErr();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (
|
||||
field == LogInfo::HOST ||
|
||||
field == LogInfo::URI ||
|
||||
field == LogInfo::HTTP_METHOD ||
|
||||
field == LogInfo::SOURCE ||
|
||||
field == LogInfo::DESTINATION_IP ||
|
||||
field == LogInfo::ASSET_ID ||
|
||||
field == LogInfo::ASSET_NAME ||
|
||||
field == LogInfo::RESPONSE_CODE
|
||||
) {
|
||||
if (!log_info[field].empty()) {
|
||||
log << LogField(string_field.unpack(), log_info[field]);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (field == LogInfo::DESTINATION_PORT) {
|
||||
try {
|
||||
int numeric_dst_port = stoi(log_info[field]);
|
||||
log << LogField(string_field.unpack(), numeric_dst_port);
|
||||
} catch (const exception &e) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER)
|
||||
<< "Unable to convert dst port: "
|
||||
<< log_info[field]
|
||||
<< " to numberic value. Error: "
|
||||
<< e.what();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
convertLogFieldToString(LogInfo field)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||
switch (field) {
|
||||
case LogInfo::HTTP_METHOD:
|
||||
return string("httpMethod");
|
||||
case LogInfo::URI:
|
||||
return string("httpUriPath");
|
||||
case LogInfo::RESPONSE_CODE:
|
||||
return string("httpResponseCode");
|
||||
case LogInfo::HOST:
|
||||
return string("httpHostName");
|
||||
case LogInfo::SOURCE:
|
||||
return string("httpSourceId");
|
||||
case LogInfo::DESTINATION_IP:
|
||||
return string("destinationIp");
|
||||
case LogInfo::DESTINATION_PORT:
|
||||
return string("destinationPort");
|
||||
case LogInfo::ASSET_ID:
|
||||
return string("assetId");
|
||||
case LogInfo::ASSET_NAME:
|
||||
return string("assetName");
|
||||
case LogInfo::EVENT_MESSAGE:
|
||||
return string("httpResponseBody");
|
||||
case LogInfo::RULE_ID:
|
||||
return string("ruleId");
|
||||
case LogInfo::RULE_NAME:
|
||||
return string("ruleName");
|
||||
case LogInfo::COUNT:
|
||||
dbgError(D_NGINX_MESSAGE_READER) << "LogInfo::COUNT is not allowed";
|
||||
return genError("LogInfo::COUNT is not allowed");
|
||||
}
|
||||
dbgError(D_NGINX_MESSAGE_READER) << "No Enum found, int value: " << static_cast<int>(field);
|
||||
return genError("No Enum found");
|
||||
}
|
||||
|
||||
static vector<string>
|
||||
separateLogs(const string &raw_logs_to_parse)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "separating logs. logs: " << raw_logs_to_parse;
|
||||
dbgTrace(D_NGINX_MESSAGE_READER) << "separateLogs start of function. Logs to parse: " << raw_logs_to_parse;
|
||||
boost::smatch matcher;
|
||||
vector<string> logs;
|
||||
|
||||
if (raw_logs_to_parse.empty()) return logs;
|
||||
|
||||
size_t pos = 0;
|
||||
while (NGEN::Regex::regexSearch(__FILE__, __LINE__, raw_logs_to_parse.substr(pos), matcher, syslog_regex)) {
|
||||
if (pos == 0) {
|
||||
dbgTrace(D_NGINX_MESSAGE_READER) << "separateLogs pos = 0";
|
||||
pos++;
|
||||
continue;
|
||||
}
|
||||
auto log_length = matcher.position();
|
||||
logs.push_back(raw_logs_to_parse.substr(pos - 1, log_length));
|
||||
|
||||
pos += log_length + 1;
|
||||
}
|
||||
logs.push_back(raw_logs_to_parse.substr(pos - 1));
|
||||
dbgTrace(D_NGINX_MESSAGE_READER) << "separateLogs end of function";
|
||||
|
||||
return logs;
|
||||
}
|
||||
|
||||
static pair<string, string>
|
||||
parseErrorLogRequestField(const string &request)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "parsing request field. request: " << request;
|
||||
string formatted_request = request;
|
||||
vector<string> result;
|
||||
boost::erase_all(formatted_request, "\"");
|
||||
boost::erase_all(formatted_request, "\n");
|
||||
boost::split(result, formatted_request, boost::is_any_of(" "), boost::token_compress_on);
|
||||
|
||||
const int http_method_index = 1;
|
||||
const int uri_index = 2;
|
||||
return pair<string, string>(result[http_method_index], result[uri_index]);
|
||||
}
|
||||
|
||||
static string
|
||||
parseErrorLogField(const string &field)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "parsing error log field " << field;
|
||||
string formatted_field = field;
|
||||
vector<string> result;
|
||||
boost::erase_all(formatted_field, "\"");
|
||||
boost::erase_all(formatted_field, "\n");
|
||||
boost::split(result, formatted_field, boost::is_any_of(" "), boost::token_compress_on);
|
||||
|
||||
const int field_index = 1;
|
||||
return result[field_index];
|
||||
}
|
||||
|
||||
void
|
||||
addContextFieldsToLogInfo(EnumArray<LogInfo, string> &log_info)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||
ScopedContext ctx;
|
||||
|
||||
try {
|
||||
ctx.registerValue<uint16_t>(
|
||||
HttpTransactionData::listening_port_ctx,
|
||||
static_cast<uint16_t>(stoi(log_info[LogInfo::DESTINATION_PORT]))
|
||||
);
|
||||
} catch (const exception &e) {
|
||||
dbgError(D_NGINX_MESSAGE_READER) << "Failed register values for context " << e.what();
|
||||
}
|
||||
ctx.registerValue<string>(HttpTransactionData::host_name_ctx, log_info[LogInfo::HOST]);
|
||||
ctx.registerValue<string>(HttpTransactionData::uri_ctx, log_info[LogInfo::URI]);
|
||||
auto rule_by_ctx = getConfiguration<BasicRuleConfig>("rulebase", "rulesConfig");
|
||||
if (!rule_by_ctx.ok()) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER)
|
||||
<< "AssetId was not found by the given context. Reason: "
|
||||
<< rule_by_ctx.getErr();
|
||||
return;
|
||||
}
|
||||
|
||||
BasicRuleConfig context = rule_by_ctx.unpack();
|
||||
log_info[LogInfo::ASSET_ID] = context.getAssetId();
|
||||
log_info[LogInfo::ASSET_NAME] = context.getAssetName();
|
||||
log_info[LogInfo::RULE_ID] = context.getRuleId();
|
||||
log_info[LogInfo::RULE_NAME] = context.getRuleName();
|
||||
}
|
||||
|
||||
Maybe<EnumArray<LogInfo, string>>
|
||||
parseErrorLog(const string &log_line)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "Handling log line:" << log_line;
|
||||
string port;
|
||||
EnumArray<LogInfo, string> log_info(EnumArray<LogInfo, string>::Fill(), string(""));
|
||||
|
||||
boost::smatch matcher;
|
||||
vector<string> result;
|
||||
if (
|
||||
!NGEN::Regex::regexSearch(
|
||||
__FILE__,
|
||||
__LINE__,
|
||||
log_line,
|
||||
matcher,
|
||||
isAlertErrorLog(log_line) ? alert_log_regex : error_log_regex
|
||||
)
|
||||
) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER) << "Unexpected nginx log format";
|
||||
return genError("Unexpected nginx log format");
|
||||
}
|
||||
|
||||
const int event_message_index = 6;
|
||||
const int source_index = 7;
|
||||
const int request_index = 9;
|
||||
const int host_index = 11;
|
||||
string host = string(matcher[host_index].first, matcher[host_index].second);
|
||||
string source = string(matcher[source_index].first, matcher[source_index].second);
|
||||
string event_message = string(matcher[event_message_index].first, matcher[event_message_index].second);
|
||||
string request = string(matcher[request_index].first, matcher[request_index].second);
|
||||
|
||||
host = parseErrorLogField(host);
|
||||
source = parseErrorLogField(source);
|
||||
pair<string, string> parsed_request = parseErrorLogRequestField(request);
|
||||
string http_method = parsed_request.first;
|
||||
string uri = parsed_request.second;
|
||||
|
||||
if (NGEN::Regex::regexSearch(__FILE__, __LINE__, host, matcher, socket_address_regex)) {
|
||||
int host_index = 1;
|
||||
int port_index = 2;
|
||||
host = string(matcher[host_index].first, matcher[host_index].second);
|
||||
port = string(matcher[port_index].first, matcher[port_index].second);
|
||||
} else if (NGEN::Regex::regexSearch(__FILE__, __LINE__, host, matcher, boost::regex("https://"))) {
|
||||
port = "443";
|
||||
} else {
|
||||
port = "80";
|
||||
}
|
||||
|
||||
log_info[LogInfo::HOST] = host;
|
||||
log_info[LogInfo::URI] = uri;
|
||||
log_info[LogInfo::RESPONSE_CODE] = "500";
|
||||
log_info[LogInfo::HTTP_METHOD] = http_method;
|
||||
log_info[LogInfo::SOURCE] = source;
|
||||
log_info[LogInfo::DESTINATION_IP] = host;
|
||||
log_info[LogInfo::DESTINATION_PORT] = port;
|
||||
log_info[LogInfo::EVENT_MESSAGE] = event_message;
|
||||
|
||||
addContextFieldsToLogInfo(log_info);
|
||||
|
||||
if (!validateLog(log_info)) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER) << "Unexpected nginx log format";
|
||||
return genError("Unexpected nginx log format");
|
||||
}
|
||||
|
||||
return log_info;
|
||||
}
|
||||
|
||||
Maybe<EnumArray<LogInfo, string>>
|
||||
parseAccessLog(const string &log_line)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "Parsing log line: " << log_line;
|
||||
string formatted_log = log_line;
|
||||
EnumArray<LogInfo, string> log_info(EnumArray<LogInfo, string>::Fill(), string(""));
|
||||
vector<string> result;
|
||||
boost::erase_all(formatted_log, "\"");
|
||||
boost::erase_all(formatted_log, "\n");
|
||||
boost::split(result, formatted_log, boost::is_any_of(" "), boost::token_compress_on);
|
||||
|
||||
const int valid_log_size = 20;
|
||||
|
||||
if (result.size() < valid_log_size) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER) << "Unexpected nginx log format";
|
||||
return genError("Unexpected nginx log format");
|
||||
}
|
||||
|
||||
const int host_index = 6;
|
||||
const int host_port_index = 7;
|
||||
const int http_method_index = 13;
|
||||
const int uri_index = 14;
|
||||
const int response_cod_index = 16;
|
||||
const int source_index = 8;
|
||||
|
||||
log_info[LogInfo::HOST] = result[host_index];
|
||||
log_info[LogInfo::URI] = result[uri_index];
|
||||
log_info[LogInfo::RESPONSE_CODE] = result[response_cod_index];
|
||||
log_info[LogInfo::HTTP_METHOD] = result[http_method_index];
|
||||
log_info[LogInfo::SOURCE] = result[source_index];
|
||||
log_info[LogInfo::DESTINATION_IP] = result[host_index];
|
||||
log_info[LogInfo::DESTINATION_PORT] = result[host_port_index];
|
||||
log_info[LogInfo::EVENT_MESSAGE] = "Invalid request or incorrect reverse proxy configuration - "
|
||||
"Request dropped. Please check the reverse proxy configuration of your relevant assets";
|
||||
|
||||
addContextFieldsToLogInfo(log_info);
|
||||
|
||||
if (!validateLog(log_info)) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER) << "Unexpected nginx log format";
|
||||
return genError("Unexpected nginx log format");
|
||||
}
|
||||
return log_info;
|
||||
}
|
||||
|
||||
static bool
|
||||
validateLog(const EnumArray<LogInfo, string> &log_info)
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||
|
||||
boost::smatch matcher;
|
||||
if (!NGEN::Regex::regexSearch(__FILE__, __LINE__, log_info[LogInfo::HOST], matcher, server_regex)) {
|
||||
dbgTrace(D_NGINX_MESSAGE_READER) << "Could not validate server (Host): " << log_info[LogInfo::HOST];
|
||||
return false;
|
||||
}
|
||||
if (!NGEN::Regex::regexSearch(__FILE__, __LINE__, log_info[LogInfo::URI], matcher, uri_regex)) {
|
||||
dbgTrace(D_NGINX_MESSAGE_READER) << "Could not validate Uri: " << log_info[LogInfo::URI];
|
||||
return false;
|
||||
}
|
||||
|
||||
if (
|
||||
!NGEN::Regex::regexSearch(
|
||||
__FILE__,
|
||||
__LINE__,
|
||||
log_info[LogInfo::RESPONSE_CODE],
|
||||
matcher, response_code_regex
|
||||
)
|
||||
) {
|
||||
dbgTrace(D_NGINX_MESSAGE_READER)
|
||||
<< "Could not validate response code: "
|
||||
<< log_info[LogInfo::RESPONSE_CODE];
|
||||
return false;
|
||||
}
|
||||
|
||||
if (
|
||||
!NGEN::Regex::regexSearch(__FILE__, __LINE__, log_info[LogInfo::HTTP_METHOD], matcher, http_method_regex)
|
||||
) {
|
||||
dbgTrace(D_NGINX_MESSAGE_READER) << "Could not validate HTTP method: " << log_info[LogInfo::HTTP_METHOD];
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!NGEN::Regex::regexSearch(__FILE__, __LINE__, log_info[LogInfo::DESTINATION_PORT], matcher, port_regex)) {
|
||||
dbgTrace(D_NGINX_MESSAGE_READER)
|
||||
<< "Could not validate destination port : "
|
||||
<< log_info[LogInfo::DESTINATION_PORT];
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!NGEN::Regex::regexSearch(__FILE__, __LINE__, log_info[LogInfo::SOURCE], matcher, server_regex)) {
|
||||
dbgTrace(D_NGINX_MESSAGE_READER) << "Could not validate source : " << log_info[LogInfo::SOURCE];
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
getLogsFromSocket(const I_Socket::socketFd &client_socket) const
|
||||
{
|
||||
dbgFlow(D_NGINX_MESSAGE_READER) << "Reading logs from socket. fd: " << client_socket;
|
||||
I_Socket *i_socket = Singleton::Consume<I_Socket>::by<NginxMessageReader>();
|
||||
Maybe<vector<char>> raw_log_data = i_socket->receiveData(client_socket, 0, false);
|
||||
if (!raw_log_data.ok()) {
|
||||
dbgWarning(D_NGINX_MESSAGE_READER) << "Error receiving data from socket";
|
||||
return genError("Error receiving data from socket");
|
||||
}
|
||||
|
||||
string raw_log(raw_log_data.unpack().begin(), raw_log_data.unpack().end());
|
||||
return move(raw_log);
|
||||
}
|
||||
|
||||
I_Socket::socketFd syslog_server_socket = -1;
|
||||
string rate_limit_status_code = "429";
|
||||
};
|
||||
|
||||
NginxMessageReader::NginxMessageReader() : Component("NginxMessageReader"), pimpl(make_unique<Impl>()) {}
|
||||
|
||||
NginxMessageReader::~NginxMessageReader() {}
|
||||
|
||||
void
|
||||
NginxMessageReader::init()
|
||||
{
|
||||
pimpl->init();
|
||||
}
|
||||
|
||||
void
|
||||
NginxMessageReader::preload()
|
||||
{
|
||||
pimpl->preload();
|
||||
}
|
||||
|
||||
void
|
||||
NginxMessageReader::fini()
|
||||
{
|
||||
pimpl->fini();
|
||||
}
|
||||
@@ -5,3 +5,4 @@ add_subdirectory(local_policy_mgmt_gen)
|
||||
add_subdirectory(orchestration)
|
||||
add_subdirectory(rate_limit)
|
||||
add_subdirectory(waap)
|
||||
add_subdirectory(central_nginx_manager)
|
||||
|
||||
3
components/security_apps/central_nginx_manager/CMakeLists.txt
Executable file
3
components/security_apps/central_nginx_manager/CMakeLists.txt
Executable file
@@ -0,0 +1,3 @@
|
||||
include_directories(include)
|
||||
|
||||
add_library(central_nginx_manager central_nginx_manager.cc lets_encrypt_listener.cc)
|
||||
418
components/security_apps/central_nginx_manager/central_nginx_manager.cc
Executable file
418
components/security_apps/central_nginx_manager/central_nginx_manager.cc
Executable file
@@ -0,0 +1,418 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "central_nginx_manager.h"
|
||||
#include "lets_encrypt_listener.h"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <cereal/external/base64.hpp>
|
||||
|
||||
#include "debug.h"
|
||||
#include "config.h"
|
||||
#include "rest.h"
|
||||
#include "log_generator.h"
|
||||
#include "nginx_utils.h"
|
||||
#include "agent_core_utilities.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
USE_DEBUG_FLAG(D_NGINX_MANAGER);
|
||||
|
||||
class CentralNginxConfig
|
||||
{
|
||||
public:
|
||||
void load(cereal::JSONInputArchive &ar)
|
||||
{
|
||||
try {
|
||||
string nginx_conf_base64;
|
||||
ar(cereal::make_nvp("id", file_id));
|
||||
ar(cereal::make_nvp("name", file_name));
|
||||
ar(cereal::make_nvp("data", nginx_conf_base64));
|
||||
nginx_conf_content = cereal::base64::decode(nginx_conf_base64);
|
||||
central_nginx_conf_path = getCentralNginxConfPath();
|
||||
shared_config_path = getSharedConfigPath();
|
||||
if (!nginx_conf_content.empty()) configureCentralNginx();
|
||||
} catch (const cereal::Exception &e) {
|
||||
dbgDebug(D_NGINX_MANAGER) << "Could not load Central Management Config JSON. Error: " << e.what();
|
||||
ar.setNextName(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
const string & getFileId() const { return file_id; }
|
||||
const string & getFileName() const { return file_name; }
|
||||
const string & getFileContent() const { return nginx_conf_content; }
|
||||
|
||||
static string
|
||||
getCentralNginxConfPath()
|
||||
{
|
||||
string central_nginx_conf_path = getProfileAgentSettingWithDefault<string>(
|
||||
string("/tmp/central_nginx.conf"),
|
||||
"centralNginxManagement.confDownloadPath"
|
||||
);
|
||||
dbgInfo(D_NGINX_MANAGER) << "Central NGINX configuration path: " << central_nginx_conf_path;
|
||||
|
||||
return central_nginx_conf_path;
|
||||
}
|
||||
|
||||
static string
|
||||
getSharedConfigPath()
|
||||
{
|
||||
string central_shared_conf_path = getConfigurationWithDefault<string>(
|
||||
"/etc/cp/conf",
|
||||
"Config Component",
|
||||
"configuration path"
|
||||
);
|
||||
central_shared_conf_path += "/centralNginxManager/shared/central_nginx_shared.conf";
|
||||
dbgInfo(D_NGINX_MANAGER) << "Shared NGINX configuration path: " << central_shared_conf_path;
|
||||
|
||||
return central_shared_conf_path;
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
loadAttachmentModule()
|
||||
{
|
||||
string attachment_module_path = NginxUtils::getModulesPath() + "/ngx_cp_attachment_module.so";
|
||||
if (!NGEN::Filesystem::exists(attachment_module_path)) {
|
||||
dbgTrace(D_NGINX_MANAGER) << "Attachment module " << attachment_module_path << " does not exist";
|
||||
return;
|
||||
}
|
||||
|
||||
string attachment_module_conf = "load_module " + attachment_module_path + ";";
|
||||
if (nginx_conf_content.find(attachment_module_conf) != string::npos) {
|
||||
dbgTrace(D_NGINX_MANAGER) << "Attachment module " << attachment_module_path << " already loaded";
|
||||
return;
|
||||
}
|
||||
|
||||
nginx_conf_content = attachment_module_conf + "\n" + nginx_conf_content;
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
loadSharedDirective(const string &directive)
|
||||
{
|
||||
dbgFlow(D_NGINX_MANAGER) << "Loading shared directive into the servers " << directive;
|
||||
|
||||
if (!NGEN::Filesystem::copyFile(shared_config_path, shared_config_path + ".bak", true)) {
|
||||
return genError("Could not create a backup of the shared NGINX configuration file");
|
||||
}
|
||||
|
||||
ifstream shared_config(shared_config_path);
|
||||
if (!shared_config.is_open()) {
|
||||
return genError("Could not open shared NGINX configuration file");
|
||||
}
|
||||
|
||||
string shared_config_content((istreambuf_iterator<char>(shared_config)), istreambuf_iterator<char>());
|
||||
shared_config.close();
|
||||
|
||||
if (shared_config_content.find(directive) != string::npos) {
|
||||
dbgTrace(D_NGINX_MANAGER) << "Shared directive " << directive << " already loaded";
|
||||
return {};
|
||||
}
|
||||
|
||||
ofstream new_shared_config(shared_config_path, ios::app);
|
||||
if (!new_shared_config.is_open()) {
|
||||
return genError("Could not open shared NGINX configuration file");
|
||||
}
|
||||
|
||||
dbgTrace(D_NGINX_MANAGER) << "Adding shared directive " << directive;
|
||||
new_shared_config << directive << "\n";
|
||||
new_shared_config.close();
|
||||
|
||||
auto validation = NginxUtils::validateNginxConf(central_nginx_conf_path);
|
||||
if (!validation.ok()) {
|
||||
if (!NGEN::Filesystem::copyFile(shared_config_path + ".bak", shared_config_path, true)) {
|
||||
return genError("Could not restore the shared NGINX configuration file");
|
||||
}
|
||||
return genError("Could not validate shared NGINX configuration file. Error: " + validation.getErr());
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
loadSharedConfig()
|
||||
{
|
||||
dbgFlow(D_NGINX_MANAGER) << "Loading shared configuration into the servers";
|
||||
|
||||
ofstream shared_config(shared_config_path);
|
||||
if (!shared_config.is_open()) {
|
||||
return genError("Could not create shared NGINX configuration file");
|
||||
}
|
||||
shared_config.close();
|
||||
|
||||
string shared_config_directive = "include " + shared_config_path + ";\n";
|
||||
boost::regex server_regex("server\\s*\\{");
|
||||
nginx_conf_content = NGEN::Regex::regexReplace(
|
||||
__FILE__,
|
||||
__LINE__,
|
||||
nginx_conf_content,
|
||||
server_regex,
|
||||
"server {\n" + shared_config_directive
|
||||
);
|
||||
|
||||
ofstream nginx_conf_file(central_nginx_conf_path);
|
||||
if (!nginx_conf_file.is_open()) {
|
||||
return genError("Could not open a temporary central NGINX configuration file");
|
||||
}
|
||||
nginx_conf_file << nginx_conf_content;
|
||||
nginx_conf_file.close();
|
||||
|
||||
auto validation = NginxUtils::validateNginxConf(central_nginx_conf_path);
|
||||
if (!validation.ok()) {
|
||||
return genError("Could not validate central NGINX configuration file. Error: " + validation.getErr());
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
configureSyslog()
|
||||
{
|
||||
if (!getProfileAgentSettingWithDefault<bool>(false, "centralNginxManagement.syslogEnabled")) {
|
||||
dbgTrace(D_NGINX_MANAGER) << "Syslog is disabled via settings";
|
||||
return {};
|
||||
}
|
||||
|
||||
string syslog_directive = "error_log syslog:server=127.0.0.1:1514 warn;";
|
||||
auto load_shared_directive_result = loadSharedDirective(syslog_directive);
|
||||
if (!load_shared_directive_result.ok()) {
|
||||
return genError("Could not configure syslog directive, error: " + load_shared_directive_result.getErr());
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
saveBaseCentralNginxConf()
|
||||
{
|
||||
ofstream central_nginx_conf_base_file(central_nginx_conf_path + ".base");
|
||||
if (!central_nginx_conf_base_file.is_open()) {
|
||||
return genError("Could not open a temporary central NGINX configuration file");
|
||||
}
|
||||
central_nginx_conf_base_file << nginx_conf_content;
|
||||
central_nginx_conf_base_file.close();
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
void
|
||||
configureCentralNginx()
|
||||
{
|
||||
loadAttachmentModule();
|
||||
auto save_base_nginx_conf = saveBaseCentralNginxConf();
|
||||
if (!save_base_nginx_conf.ok()) {
|
||||
dbgWarning(D_NGINX_MANAGER)
|
||||
<< "Could not save base NGINX configuration. Error: "
|
||||
<< save_base_nginx_conf.getErr();
|
||||
return;
|
||||
}
|
||||
|
||||
string nginx_conf_content_backup = nginx_conf_content;
|
||||
auto shared_config_result = loadSharedConfig();
|
||||
if (!shared_config_result.ok()) {
|
||||
dbgWarning(D_NGINX_MANAGER)
|
||||
<< "Could not load shared configuration. Error: "
|
||||
<< shared_config_result.getErr();
|
||||
nginx_conf_content = nginx_conf_content_backup;
|
||||
return;
|
||||
}
|
||||
|
||||
auto syslog_result = configureSyslog();
|
||||
if (!syslog_result.ok()) {
|
||||
dbgWarning(D_NGINX_MANAGER) << "Could not configure syslog. Error: " << syslog_result.getErr();
|
||||
}
|
||||
}
|
||||
|
||||
string file_id;
|
||||
string file_name;
|
||||
string nginx_conf_content;
|
||||
string central_nginx_conf_path;
|
||||
string shared_config_path;
|
||||
};
|
||||
|
||||
class CentralNginxManager::Impl
|
||||
{
|
||||
public:
|
||||
void
|
||||
init()
|
||||
{
|
||||
dbgInfo(D_NGINX_MANAGER) << "Starting Central NGINX Manager";
|
||||
|
||||
string main_nginx_conf_path = NginxUtils::getMainNginxConfPath();
|
||||
if (
|
||||
NGEN::Filesystem::exists(main_nginx_conf_path)
|
||||
&& !NGEN::Filesystem::exists(main_nginx_conf_path + ".orig")
|
||||
) {
|
||||
dbgInfo(D_NGINX_MANAGER) << "Creating a backup of the original main NGINX configuration file";
|
||||
NGEN::Filesystem::copyFile(main_nginx_conf_path, main_nginx_conf_path + ".orig", true);
|
||||
}
|
||||
|
||||
i_mainloop = Singleton::Consume<I_MainLoop>::by<CentralNginxManager>();
|
||||
if (!lets_encrypt_listener.init()) {
|
||||
dbgWarning(D_NGINX_MANAGER) << "Could not start Lets Encrypt Listener, scheduling retry";
|
||||
i_mainloop->addOneTimeRoutine(
|
||||
I_MainLoop::RoutineType::System,
|
||||
[this] ()
|
||||
{
|
||||
while(!lets_encrypt_listener.init()) {
|
||||
dbgWarning(D_NGINX_MANAGER) << "Could not start Lets Encrypt Listener, will retry";
|
||||
i_mainloop->yield(chrono::seconds(5));
|
||||
}
|
||||
},
|
||||
"Lets Encrypt Listener initializer",
|
||||
false
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
loadPolicy()
|
||||
{
|
||||
auto central_nginx_config = getSetting<vector<CentralNginxConfig>>("centralNginxManagement");
|
||||
if (!central_nginx_config.ok() || central_nginx_config.unpack().empty()) {
|
||||
dbgWarning(D_NGINX_MANAGER)
|
||||
<< "Could not load Central NGINX Management settings. Error: "
|
||||
<< central_nginx_config.getErr();
|
||||
return;
|
||||
}
|
||||
|
||||
auto &config = central_nginx_config.unpack().front();
|
||||
if (config.getFileContent().empty()) {
|
||||
dbgWarning(D_NGINX_MANAGER) << "Empty NGINX configuration file";
|
||||
return;
|
||||
}
|
||||
|
||||
dbgTrace(D_NGINX_MANAGER)
|
||||
<< "Handling Central NGINX Management settings: "
|
||||
<< config.getFileId()
|
||||
<< ", "
|
||||
<< config.getFileName()
|
||||
<< ", "
|
||||
<< config.getFileContent();
|
||||
|
||||
string central_nginx_conf_path = config.getCentralNginxConfPath();
|
||||
ofstream central_nginx_conf_file(central_nginx_conf_path);
|
||||
if (!central_nginx_conf_file.is_open()) {
|
||||
dbgWarning(D_NGINX_MANAGER)
|
||||
<< "Could not open central NGINX configuration file: "
|
||||
<< central_nginx_conf_path;
|
||||
return;
|
||||
}
|
||||
central_nginx_conf_file << config.getFileContent();
|
||||
central_nginx_conf_file.close();
|
||||
|
||||
auto validation_result = NginxUtils::validateNginxConf(central_nginx_conf_path);
|
||||
if (!validation_result.ok()) {
|
||||
dbgWarning(D_NGINX_MANAGER)
|
||||
<< "Could not validate central NGINX configuration file. Error: "
|
||||
<< validation_result.getErr();
|
||||
logError(validation_result.getErr());
|
||||
return;
|
||||
}
|
||||
|
||||
dbgTrace(D_NGINX_MANAGER) << "Validated central NGINX configuration file";
|
||||
|
||||
auto reload_result = NginxUtils::reloadNginx(central_nginx_conf_path);
|
||||
if (!reload_result.ok()) {
|
||||
dbgWarning(D_NGINX_MANAGER)
|
||||
<< "Could not reload central NGINX configuration. Error: "
|
||||
<< reload_result.getErr();
|
||||
logError("Could not reload central NGINX configuration. Error: " + reload_result.getErr());
|
||||
return;
|
||||
}
|
||||
|
||||
logInfo("Central NGINX configuration has been successfully reloaded");
|
||||
}
|
||||
|
||||
void
|
||||
fini()
|
||||
{
|
||||
string central_nginx_base_path = CentralNginxConfig::getCentralNginxConfPath() + ".base";
|
||||
if (!NGEN::Filesystem::exists(central_nginx_base_path)) {
|
||||
dbgWarning(D_NGINX_MANAGER) << "Could not find base NGINX configuration file: " << central_nginx_base_path;
|
||||
return;
|
||||
}
|
||||
|
||||
NginxUtils::reloadNginx(central_nginx_base_path);
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
logError(const string &error)
|
||||
{
|
||||
LogGen log(
|
||||
error,
|
||||
ReportIS::Level::ACTION,
|
||||
ReportIS::Audience::SECURITY,
|
||||
ReportIS::Severity::CRITICAL,
|
||||
ReportIS::Priority::URGENT,
|
||||
ReportIS::Tags::POLICY_INSTALLATION
|
||||
);
|
||||
|
||||
log.addToOrigin(LogField("eventTopic", "Central NGINX Management"));
|
||||
log << LogField("notificationId", "4165c3b1-e9bc-44c3-888b-863e204c1bfb");
|
||||
log << LogField(
|
||||
"eventRemediation",
|
||||
"Please verify your NGINX configuration and enforce policy again. "
|
||||
"Contact Check Point support if the issue persists."
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
logInfo(const string &info)
|
||||
{
|
||||
LogGen log(
|
||||
info,
|
||||
ReportIS::Level::ACTION,
|
||||
ReportIS::Audience::SECURITY,
|
||||
ReportIS::Severity::INFO,
|
||||
ReportIS::Priority::LOW,
|
||||
ReportIS::Tags::POLICY_INSTALLATION
|
||||
);
|
||||
|
||||
log.addToOrigin(LogField("eventTopic", "Central NGINX Management"));
|
||||
log << LogField("notificationId", "4165c3b1-e9bc-44c3-888b-863e204c1bfb");
|
||||
log << LogField("eventRemediation", "No action required");
|
||||
}
|
||||
|
||||
I_MainLoop *i_mainloop = nullptr;
|
||||
LetsEncryptListener lets_encrypt_listener;
|
||||
};
|
||||
|
||||
CentralNginxManager::CentralNginxManager()
|
||||
:
|
||||
Component("Central NGINX Manager"),
|
||||
pimpl(make_unique<CentralNginxManager::Impl>()) {}
|
||||
|
||||
CentralNginxManager::~CentralNginxManager() {}
|
||||
|
||||
void
|
||||
CentralNginxManager::init()
|
||||
{
|
||||
pimpl->init();
|
||||
}
|
||||
|
||||
void
|
||||
CentralNginxManager::fini()
|
||||
{
|
||||
pimpl->fini();
|
||||
}
|
||||
|
||||
void
|
||||
CentralNginxManager::preload()
|
||||
{
|
||||
registerExpectedSetting<vector<CentralNginxConfig>>("centralNginxManagement");
|
||||
registerExpectedConfiguration<string>("Config Component", "configuration path");
|
||||
registerConfigLoadCb([this]() { pimpl->loadPolicy(); });
|
||||
}
|
||||
@@ -0,0 +1,30 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef __LETS_ENCRYPT_HANDLER_H__
|
||||
#define __LETS_ENCRYPT_HANDLER_H__
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "maybe_res.h"
|
||||
|
||||
class LetsEncryptListener
|
||||
{
|
||||
public:
|
||||
bool init();
|
||||
|
||||
private:
|
||||
Maybe<std::string> getChallengeValue(const std::string &uri) const;
|
||||
};
|
||||
|
||||
#endif // __LETS_ENCRYPT_HANDLER_H__
|
||||
76
components/security_apps/central_nginx_manager/lets_encrypt_listener.cc
Executable file
76
components/security_apps/central_nginx_manager/lets_encrypt_listener.cc
Executable file
@@ -0,0 +1,76 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "lets_encrypt_listener.h"
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "central_nginx_manager.h"
|
||||
#include "debug.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
USE_DEBUG_FLAG(D_NGINX_MANAGER);
|
||||
|
||||
bool
|
||||
LetsEncryptListener::init()
|
||||
{
|
||||
dbgInfo(D_NGINX_MANAGER) << "Starting Lets Encrypt Listener";
|
||||
return Singleton::Consume<I_RestApi>::by<CentralNginxManager>()->addWildcardGetCall(
|
||||
".well-known/acme-challenge/",
|
||||
[&] (const string &uri) -> string
|
||||
{
|
||||
Maybe<string> maybe_challenge_value = getChallengeValue(uri);
|
||||
if (!maybe_challenge_value.ok()) {
|
||||
dbgWarning(D_NGINX_MANAGER)
|
||||
<< "Could not get challenge value for uri: "
|
||||
<< uri
|
||||
<< ", error: "
|
||||
<< maybe_challenge_value.getErr();
|
||||
return string{""};
|
||||
};
|
||||
|
||||
dbgTrace(D_NGINX_MANAGER) << "Got challenge value: " << maybe_challenge_value.unpack();
|
||||
return maybe_challenge_value.unpack();
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
LetsEncryptListener::getChallengeValue(const string &uri) const
|
||||
{
|
||||
string challenge_key = uri.substr(uri.find_last_of('/') + 1);
|
||||
string api_query = "/api/lets-encrypt-challenge?http_challenge_key=" + challenge_key;
|
||||
|
||||
dbgInfo(D_NGINX_MANAGER) << "Getting challenge value via: " << api_query;
|
||||
|
||||
MessageMetadata md;
|
||||
md.insertHeader("X-Tenant-Id", Singleton::Consume<I_AgentDetails>::by<CentralNginxManager>()->getTenantId());
|
||||
Maybe<HTTPResponse, HTTPResponse> maybe_http_challenge_value =
|
||||
Singleton::Consume<I_Messaging>::by<CentralNginxManager>()->sendSyncMessage(
|
||||
HTTPMethod::GET,
|
||||
api_query,
|
||||
string("{}"),
|
||||
MessageCategory::GENERIC,
|
||||
md
|
||||
);
|
||||
|
||||
if (!maybe_http_challenge_value.ok()) return genError(maybe_http_challenge_value.getErr().getBody());
|
||||
|
||||
string challenge_value = maybe_http_challenge_value.unpack().getBody();
|
||||
if (!challenge_value.empty() && challenge_value.front() == '"' && challenge_value.back() == '"') {
|
||||
challenge_value = challenge_value.substr(1, challenge_value.size() - 2);
|
||||
}
|
||||
|
||||
return challenge_value;
|
||||
}
|
||||
@@ -88,9 +88,17 @@ public:
|
||||
dbgWarning(D_GEO_FILTER) << "failed to get source ip from env";
|
||||
return EventVerdict(default_action);
|
||||
}
|
||||
|
||||
auto source_ip = convertIpAddrToString(maybe_source_ip.unpack());
|
||||
ip_set.insert(source_ip);
|
||||
|
||||
// saas profile setting
|
||||
bool ignore_source_ip =
|
||||
getProfileAgentSettingWithDefault<bool>(false, "agent.geoProtaction.ignoreSourceIP");
|
||||
if (ignore_source_ip){
|
||||
dbgDebug(D_GEO_FILTER) << "Geo protection ignoring source ip: " << source_ip;
|
||||
} else {
|
||||
ip_set.insert(convertIpAddrToString(maybe_source_ip.unpack()));
|
||||
}
|
||||
|
||||
|
||||
ngx_http_cp_verdict_e exception_verdict = getExceptionVerdict(ip_set);
|
||||
if (exception_verdict != ngx_http_cp_verdict_e::TRAFFIC_VERDICT_IRRELEVANT) {
|
||||
@@ -343,7 +351,7 @@ private:
|
||||
|
||||
auto asset_location = i_geo_location->lookupLocation(maybe_source_ip.unpack());
|
||||
if (!asset_location.ok()) {
|
||||
dbgWarning(D_GEO_FILTER) << "Lookup location failed for source: " <<
|
||||
dbgDebug(D_GEO_FILTER) << "Lookup location failed for source: " <<
|
||||
source <<
|
||||
", Error: " <<
|
||||
asset_location.getErr();
|
||||
|
||||
@@ -336,9 +336,16 @@ public:
|
||||
return metadata.getYear();
|
||||
}
|
||||
|
||||
bool
|
||||
isOk() const
|
||||
{
|
||||
return is_loaded;
|
||||
}
|
||||
|
||||
private:
|
||||
IPSSignatureMetaData metadata;
|
||||
std::shared_ptr<BaseSignature> rule;
|
||||
bool is_loaded;
|
||||
};
|
||||
|
||||
/// \class SignatureAndAction
|
||||
|
||||
@@ -219,10 +219,16 @@ IPSSignatureMetaData::getYear() const
|
||||
void
|
||||
CompleteSignature::load(cereal::JSONInputArchive &ar)
|
||||
{
|
||||
ar(cereal::make_nvp("protectionMetadata", metadata));
|
||||
RuleDetection rule_detection(metadata.getName());
|
||||
ar(cereal::make_nvp("detectionRules", rule_detection));
|
||||
rule = rule_detection.getRule();
|
||||
try {
|
||||
ar(cereal::make_nvp("protectionMetadata", metadata));
|
||||
RuleDetection rule_detection(metadata.getName());
|
||||
ar(cereal::make_nvp("detectionRules", rule_detection));
|
||||
rule = rule_detection.getRule();
|
||||
is_loaded = true;
|
||||
} catch (cereal::Exception &e) {
|
||||
is_loaded = false;
|
||||
dbgWarning(D_IPS) << "Failed to load signature: " << e.what();
|
||||
}
|
||||
}
|
||||
|
||||
MatchType
|
||||
@@ -367,7 +373,16 @@ SignatureAndAction::matchSilent(const Buffer &sample) const
|
||||
if (method.ok()) log << LogField("httpMethod", method.unpack());
|
||||
|
||||
auto path = env->get<Buffer>("HTTP_PATH_DECODED");
|
||||
if (path.ok()) log << LogField("httpUriPath", getSubString(path, 1536), LogFieldOption::XORANDB64);
|
||||
if (path.ok()) {
|
||||
log << LogField("httpUriPath", getSubString(path, 1536), LogFieldOption::XORANDB64);
|
||||
} else {
|
||||
auto transaction_path = env->get<string>(HttpTransactionData::uri_path_decoded);
|
||||
if (transaction_path.ok()) {
|
||||
auto uri_path = transaction_path.unpack();
|
||||
auto question_mark = uri_path.find('?');
|
||||
log << LogField("httpUriPath", uri_path.substr(0, question_mark), LogFieldOption::XORANDB64);
|
||||
}
|
||||
}
|
||||
|
||||
auto req_header = ips_state.getTransactionData(IPSCommonTypes::requests_header_for_log);
|
||||
if (req_header.ok()) log << LogField("httpRequestHeaders", getSubString(req_header), LogFieldOption::XORANDB64);
|
||||
@@ -485,13 +500,30 @@ SignatureAndAction::isMatchedPrevent(const Buffer &context_buffer, const set<PMP
|
||||
auto method = env->get<string>(HttpTransactionData::method_ctx);
|
||||
if (method.ok()) log << LogField("httpMethod", method.unpack());
|
||||
uint max_size = getConfigurationWithDefault<uint>(1536, "IPS", "Max Field Size");
|
||||
auto path = env->get<Buffer>("HTTP_PATH_DECODED");
|
||||
if (path.ok() && trigger.isWebLogFieldActive(url_path)) {
|
||||
log << LogField("httpUriPath", getSubString(path, max_size), LogFieldOption::XORANDB64);
|
||||
|
||||
if (trigger.isWebLogFieldActive(url_path)) {
|
||||
auto path = env->get<Buffer>("HTTP_PATH_DECODED");
|
||||
if (path.ok()) {
|
||||
log << LogField("httpUriPath", getSubString(path, max_size), LogFieldOption::XORANDB64);
|
||||
} else {
|
||||
auto transaction_path = env->get<string>(HttpTransactionData::uri_path_decoded);
|
||||
if (transaction_path.ok()) {
|
||||
auto uri_path = transaction_path.unpack();
|
||||
auto question_mark = uri_path.find('?');
|
||||
log << LogField("httpUriPath", uri_path.substr(0, question_mark), LogFieldOption::XORANDB64);
|
||||
}
|
||||
}
|
||||
}
|
||||
auto query = env->get<Buffer>("HTTP_QUERY_DECODED");
|
||||
if (query.ok() && trigger.isWebLogFieldActive(url_query)) {
|
||||
log << LogField("httpUriQuery", getSubString(query, max_size), LogFieldOption::XORANDB64);
|
||||
if (trigger.isWebLogFieldActive(url_query)) {
|
||||
auto query = env->get<Buffer>("HTTP_QUERY_DECODED");
|
||||
if (query.ok()) {
|
||||
log << LogField("httpUriQuery", getSubString(query, max_size), LogFieldOption::XORANDB64);
|
||||
} else {
|
||||
auto transaction_query = env->get<string>(HttpTransactionData::uri_query_decoded);
|
||||
if (transaction_query.ok()) {
|
||||
log << LogField("httpUriQuery", transaction_query.unpack());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto res_code = env->get<Buffer>("HTTP_RESPONSE_CODE");
|
||||
@@ -533,7 +565,9 @@ IPSSignaturesResource::load(cereal::JSONInputArchive &ar)
|
||||
|
||||
all_signatures.reserve(sigs.size());
|
||||
for (auto &sig : sigs) {
|
||||
all_signatures.emplace_back(make_shared<CompleteSignature>(move(sig)));
|
||||
if (sig.isOk()) {
|
||||
all_signatures.emplace_back(make_shared<CompleteSignature>(move(sig)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -104,6 +104,12 @@ public:
|
||||
cereal::JSONInputArchive ar(ss);
|
||||
high_medium_confidance_signatures.load(ar);
|
||||
}
|
||||
{
|
||||
stringstream ss;
|
||||
ss << "[" << signature_performance_high << ", " << signature_broken << "]";
|
||||
cereal::JSONInputArchive ar(ss);
|
||||
single_broken_signature.load(ar);
|
||||
}
|
||||
}
|
||||
|
||||
~SignatureTest()
|
||||
@@ -250,6 +256,7 @@ public:
|
||||
IPSSignaturesResource performance_signatures1;
|
||||
IPSSignaturesResource performance_signatures2;
|
||||
IPSSignaturesResource performance_signatures3;
|
||||
IPSSignaturesResource single_broken_signature;
|
||||
NiceMock<MockTable> table;
|
||||
MockAgg mock_agg;
|
||||
|
||||
@@ -483,6 +490,26 @@ private:
|
||||
"\"context\": [\"HTTP_REQUEST_BODY\", \"HTTP_RESPONSE_BODY\"]"
|
||||
"}"
|
||||
"}";
|
||||
|
||||
string signature_broken =
|
||||
"{"
|
||||
"\"protectionMetadata\": {"
|
||||
"\"protectionName\": \"BrokenTest\","
|
||||
"\"maintrainId\": \"101\","
|
||||
"\"severity\": \"Medium High\","
|
||||
"\"confidenceLevel\": \"Low\","
|
||||
"\"performanceImpact\": \"High\","
|
||||
"\"lastUpdate\": \"20210420\","
|
||||
"\"tags\": [],"
|
||||
"\"cveList\": []"
|
||||
"},"
|
||||
"\"detectionRules\": {"
|
||||
"\"type\": \"simple\","
|
||||
"\"SSM\": \"\","
|
||||
"\"keywosrds\": \"data: \\\"www\\\";\","
|
||||
"\"context\": [\"HTTP_REQUEST_BODY\", \"HTTP_RESPONSE_BODY\"]"
|
||||
"}"
|
||||
"}";
|
||||
};
|
||||
|
||||
TEST_F(SignatureTest, basic_load_of_signatures)
|
||||
@@ -665,3 +692,14 @@ TEST_F(SignatureTest, high_confidance_signatures_matching)
|
||||
expectLog("\"protectionId\": \"Test4\"", "\"matchedSignatureConfidence\": \"Medium\"");
|
||||
EXPECT_FALSE(checkData("mmm"));
|
||||
}
|
||||
|
||||
TEST_F(SignatureTest, broken_signature)
|
||||
{
|
||||
load(single_broken_signature, "Low or above", "Low");
|
||||
EXPECT_FALSE(checkData("ggg"));
|
||||
|
||||
expectLog("\"matchedSignaturePerformance\": \"High\"");
|
||||
EXPECT_TRUE(checkData("fff"));
|
||||
|
||||
EXPECT_FALSE(checkData("www"));
|
||||
}
|
||||
|
||||
@@ -22,4 +22,5 @@ add_library(local_policy_mgmt_gen
|
||||
access_control_practice.cc
|
||||
configmaps.cc
|
||||
reverse_proxy_section.cc
|
||||
policy_activation_data.cc
|
||||
)
|
||||
|
||||
@@ -497,7 +497,8 @@ WebAppSection::WebAppSection(
|
||||
const AppsecPracticeAntiBotSection &_anti_bots,
|
||||
const LogTriggerSection &parsed_log_trigger,
|
||||
const AppSecTrustedSources &parsed_trusted_sources,
|
||||
const NewAppSecWebAttackProtections &protections)
|
||||
const NewAppSecWebAttackProtections &protections,
|
||||
const vector<InnerException> &exceptions)
|
||||
:
|
||||
application_urls(_application_urls),
|
||||
asset_id(_asset_id),
|
||||
@@ -541,6 +542,10 @@ WebAppSection::WebAppSection(
|
||||
overrides.push_back(AppSecOverride(source_ident));
|
||||
}
|
||||
|
||||
for (const auto &exception : exceptions) {
|
||||
overrides.push_back(AppSecOverride(exception));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// LCOV_EXCL_STOP
|
||||
|
||||
@@ -298,7 +298,8 @@ public:
|
||||
const AppsecPracticeAntiBotSection &_anti_bots,
|
||||
const LogTriggerSection &parsed_log_trigger,
|
||||
const AppSecTrustedSources &parsed_trusted_sources,
|
||||
const NewAppSecWebAttackProtections &protections);
|
||||
const NewAppSecWebAttackProtections &protections,
|
||||
const std::vector<InnerException> &exceptions);
|
||||
|
||||
void save(cereal::JSONOutputArchive &out_ar) const;
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ public:
|
||||
void init();
|
||||
|
||||
std::tuple<std::map<std::string, AppsecLinuxPolicy>, std::map<std::string, V1beta2AppsecLinuxPolicy>>
|
||||
createAppsecPoliciesFromIngresses();
|
||||
createAppsecPolicies();
|
||||
void getClusterId() const;
|
||||
|
||||
private:
|
||||
@@ -101,12 +101,18 @@ private:
|
||||
) const;
|
||||
|
||||
template<class T, class K>
|
||||
void createPolicy(
|
||||
void createPolicyFromIngress(
|
||||
T &appsec_policy,
|
||||
std::map<std::string, T> &policies,
|
||||
std::map<AnnotationKeys, std::string> &annotations_values,
|
||||
const SingleIngressData &item) const;
|
||||
|
||||
template<class T, class K>
|
||||
void createPolicyFromActivation(
|
||||
T &appsec_policy,
|
||||
std::map<std::string, T> &policies,
|
||||
const EnabledPolicy &policy) const;
|
||||
|
||||
std::tuple<Maybe<AppsecLinuxPolicy>, Maybe<V1beta2AppsecLinuxPolicy>> createAppsecPolicyK8s(
|
||||
const std::string &policy_name,
|
||||
const std::string &ingress_mode
|
||||
|
||||
@@ -0,0 +1,89 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef __POLICY_ACTIVATION_DATA_H__
|
||||
#define __POLICY_ACTIVATION_DATA_H__
|
||||
|
||||
#include <vector>
|
||||
#include <map>
|
||||
|
||||
#include "config.h"
|
||||
#include "debug.h"
|
||||
#include "rest.h"
|
||||
#include "cereal/archives/json.hpp"
|
||||
#include <cereal/types/map.hpp>
|
||||
#include "customized_cereal_map.h"
|
||||
|
||||
#include "local_policy_common.h"
|
||||
|
||||
class PolicyActivationMetadata
|
||||
{
|
||||
public:
|
||||
void load(cereal::JSONInputArchive &archive_in);
|
||||
|
||||
private:
|
||||
std::string name;
|
||||
};
|
||||
|
||||
class EnabledPolicy
|
||||
{
|
||||
public:
|
||||
void load(cereal::JSONInputArchive &archive_in);
|
||||
|
||||
const std::string & getName() const;
|
||||
const std::vector<std::string> & getHosts() const;
|
||||
|
||||
private:
|
||||
std::string name;
|
||||
std::vector<std::string> hosts;
|
||||
};
|
||||
|
||||
class PolicyActivationSpec
|
||||
{
|
||||
public:
|
||||
void load(cereal::JSONInputArchive &archive_in);
|
||||
|
||||
const std::vector<EnabledPolicy> & getPolicies() const;
|
||||
|
||||
private:
|
||||
std::string appsec_class_name;
|
||||
std::vector<EnabledPolicy> policies;
|
||||
};
|
||||
|
||||
class SinglePolicyActivationData
|
||||
{
|
||||
public:
|
||||
void load(cereal::JSONInputArchive &archive_in);
|
||||
|
||||
const PolicyActivationSpec & getSpec() const;
|
||||
|
||||
private:
|
||||
std::string api_version;
|
||||
std::string kind;
|
||||
PolicyActivationMetadata metadata;
|
||||
PolicyActivationSpec spec;
|
||||
};
|
||||
|
||||
class PolicyActivationData : public ClientRest
|
||||
{
|
||||
public:
|
||||
bool loadJson(const std::string &json);
|
||||
|
||||
const std::vector<SinglePolicyActivationData> & getItems() const;
|
||||
|
||||
private:
|
||||
std::string api_version;
|
||||
std::vector<SinglePolicyActivationData> items;
|
||||
};
|
||||
|
||||
#endif // __POLICY_ACTIVATION_DATA_H__
|
||||
@@ -32,6 +32,7 @@
|
||||
#include "i_messaging.h"
|
||||
#include "appsec_practice_section.h"
|
||||
#include "ingress_data.h"
|
||||
#include "policy_activation_data.h"
|
||||
#include "settings_section.h"
|
||||
#include "triggers_section.h"
|
||||
#include "local_policy_common.h"
|
||||
@@ -205,7 +206,8 @@ private:
|
||||
const RulesConfigRulebase& rule_config,
|
||||
const std::string &practice_id, const std::string &full_url,
|
||||
const std::string &default_mode,
|
||||
std::map<AnnotationTypes, std::string> &rule_annotations
|
||||
std::map<AnnotationTypes, std::string> &rule_annotations,
|
||||
std::vector<InnerException>
|
||||
);
|
||||
|
||||
void
|
||||
|
||||
@@ -577,7 +577,7 @@ K8sPolicyUtils::createAppsecPolicyK8s(const string &policy_name, const string &i
|
||||
|
||||
template<class T, class K>
|
||||
void
|
||||
K8sPolicyUtils::createPolicy(
|
||||
K8sPolicyUtils::createPolicyFromIngress(
|
||||
T &appsec_policy,
|
||||
map<std::string, T> &policies,
|
||||
map<AnnotationKeys, string> &annotations_values,
|
||||
@@ -615,10 +615,35 @@ K8sPolicyUtils::createPolicy(
|
||||
}
|
||||
}
|
||||
|
||||
std::tuple<map<string, AppsecLinuxPolicy>, map<string, V1beta2AppsecLinuxPolicy>>
|
||||
K8sPolicyUtils::createAppsecPoliciesFromIngresses()
|
||||
template<class T, class K>
|
||||
void
|
||||
K8sPolicyUtils::createPolicyFromActivation(
|
||||
T &appsec_policy,
|
||||
map<std::string, T> &policies,
|
||||
const EnabledPolicy &policy) const
|
||||
{
|
||||
dbgFlow(D_LOCAL_POLICY) << "Getting all policy object from Ingresses";
|
||||
if (policies.find(policy.getName()) == policies.end()) {
|
||||
policies[policy.getName()] = appsec_policy;
|
||||
}
|
||||
auto default_mode = appsec_policy.getAppsecPolicySpec().getDefaultRule().getMode();
|
||||
|
||||
for (const string &host : policy.getHosts()) {
|
||||
if (!appsec_policy.getAppsecPolicySpec().isAssetHostExist(host)) {
|
||||
dbgTrace(D_LOCAL_POLICY)
|
||||
<< "Inserting Host data to the specific asset set:"
|
||||
<< "URL: '"
|
||||
<< host
|
||||
<< "'";
|
||||
K ingress_rule = K(host, default_mode);
|
||||
policies[policy.getName()].addSpecificRule(ingress_rule);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::tuple<map<string, AppsecLinuxPolicy>, map<string, V1beta2AppsecLinuxPolicy>>
|
||||
K8sPolicyUtils::createAppsecPolicies()
|
||||
{
|
||||
dbgFlow(D_LOCAL_POLICY) << "Getting all policy object from Ingresses and PolicyActivation";
|
||||
map<string, AppsecLinuxPolicy> v1bet1_policies;
|
||||
map<string, V1beta2AppsecLinuxPolicy> v1bet2_policies;
|
||||
auto maybe_ingress = getObjectFromCluster<IngressData>("/apis/networking.k8s.io/v1/ingresses");
|
||||
@@ -628,7 +653,7 @@ K8sPolicyUtils::createAppsecPoliciesFromIngresses()
|
||||
dbgWarning(D_LOCAL_POLICY)
|
||||
<< "Failed to retrieve K8S Ingress configurations. Error: "
|
||||
<< maybe_ingress.getErr();
|
||||
return make_tuple(v1bet1_policies, v1bet2_policies);
|
||||
maybe_ingress = IngressData{};
|
||||
}
|
||||
|
||||
|
||||
@@ -658,19 +683,54 @@ K8sPolicyUtils::createAppsecPoliciesFromIngresses()
|
||||
|
||||
if (!std::get<0>(maybe_appsec_policy).ok()) {
|
||||
auto appsec_policy=std::get<1>(maybe_appsec_policy).unpack();
|
||||
createPolicy<V1beta2AppsecLinuxPolicy, NewParsedRule>(
|
||||
createPolicyFromIngress<V1beta2AppsecLinuxPolicy, NewParsedRule>(
|
||||
appsec_policy,
|
||||
v1bet2_policies,
|
||||
annotations_values,
|
||||
item);
|
||||
} else {
|
||||
auto appsec_policy=std::get<0>(maybe_appsec_policy).unpack();
|
||||
createPolicy<AppsecLinuxPolicy, ParsedRule>(
|
||||
createPolicyFromIngress<AppsecLinuxPolicy, ParsedRule>(
|
||||
appsec_policy,
|
||||
v1bet1_policies,
|
||||
annotations_values,
|
||||
item);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
string ns_suffix = getAppSecScopeType() == "namespaced" ? "ns" : "";
|
||||
string ns = getAppSecScopeType() == "namespaced" ? "namespaces/" : "";
|
||||
auto maybe_policy_activation = getObjectFromCluster<PolicyActivationData>(
|
||||
"/apis/openappsec.io/v1beta2/" + ns + agent_ns + "policyactivations" + ns_suffix
|
||||
);
|
||||
|
||||
if (!maybe_policy_activation.ok()) {
|
||||
dbgWarning(D_LOCAL_POLICY)
|
||||
<< "Failed to retrieve K8S PolicyActivation configurations. Error: "
|
||||
<< maybe_policy_activation.getErr();
|
||||
return make_tuple(v1bet1_policies, v1bet2_policies);
|
||||
}
|
||||
|
||||
PolicyActivationData policy_activation = maybe_policy_activation.unpack();
|
||||
for (const SinglePolicyActivationData &item : policy_activation.getItems()) {
|
||||
for (const auto &policy : item.getSpec().getPolicies()) {
|
||||
auto maybe_appsec_policy = createAppsecPolicyK8s(policy.getName(), "");
|
||||
|
||||
if (!std::get<1>(maybe_appsec_policy).ok()) {
|
||||
dbgWarning(D_LOCAL_POLICY)
|
||||
<< "Failed to create appsec policy. v1beta2 Error: "
|
||||
<< std::get<1>(maybe_appsec_policy).getErr();
|
||||
continue;
|
||||
} else {
|
||||
auto appsec_policy=std::get<1>(maybe_appsec_policy).unpack();
|
||||
createPolicyFromActivation<V1beta2AppsecLinuxPolicy, NewParsedRule>(
|
||||
appsec_policy,
|
||||
v1bet2_policies,
|
||||
policy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return make_tuple(v1bet1_policies, v1bet2_policies);
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@
|
||||
#include "customized_cereal_map.h"
|
||||
#include "include/appsec_practice_section.h"
|
||||
#include "include/ingress_data.h"
|
||||
#include "include/policy_activation_data.h"
|
||||
#include "include/settings_section.h"
|
||||
#include "include/triggers_section.h"
|
||||
#include "include/local_policy_common.h"
|
||||
@@ -85,7 +86,7 @@ public:
|
||||
K8sPolicyUtils k8s_policy_utils;
|
||||
k8s_policy_utils.init();
|
||||
|
||||
auto appsec_policies = k8s_policy_utils.createAppsecPoliciesFromIngresses();
|
||||
auto appsec_policies = k8s_policy_utils.createAppsecPolicies();
|
||||
if (!std::get<0>(appsec_policies).empty()) {
|
||||
return policy_maker_utils.proccesMultipleAppsecPolicies<AppsecLinuxPolicy, ParsedRule>(
|
||||
std::get<0>(appsec_policies),
|
||||
|
||||
@@ -69,7 +69,7 @@ Identifier::load(cereal::JSONInputArchive &archive_in)
|
||||
dbgWarning(D_LOCAL_POLICY) << "AppSec identifier invalid: " << identifier;
|
||||
identifier = "sourceip";
|
||||
}
|
||||
parseMandatoryAppsecJSONKey<vector<string>>("value", value, archive_in);
|
||||
parseAppsecJSONKey<vector<string>>("value", value, archive_in);
|
||||
}
|
||||
|
||||
const string &
|
||||
|
||||
103
components/security_apps/local_policy_mgmt_gen/policy_activation_data.cc
Executable file
103
components/security_apps/local_policy_mgmt_gen/policy_activation_data.cc
Executable file
@@ -0,0 +1,103 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "policy_activation_data.h"
|
||||
#include "customized_cereal_map.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
USE_DEBUG_FLAG(D_LOCAL_POLICY);
|
||||
|
||||
void
|
||||
PolicyActivationMetadata::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
dbgTrace(D_LOCAL_POLICY) << "PolicyActivationMetadata load";
|
||||
parseAppsecJSONKey<string>("name", name, archive_in);
|
||||
}
|
||||
|
||||
void
|
||||
EnabledPolicy::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
dbgTrace(D_LOCAL_POLICY) << "Loading policyActivation enabled policy";
|
||||
parseMandatoryAppsecJSONKey<vector<string>>("hosts", hosts, archive_in);
|
||||
parseAppsecJSONKey<string>("name", name, archive_in);
|
||||
}
|
||||
|
||||
const string &
|
||||
EnabledPolicy::getName() const
|
||||
{
|
||||
return name;
|
||||
}
|
||||
|
||||
const vector<string> &
|
||||
EnabledPolicy::getHosts() const
|
||||
{
|
||||
return hosts;
|
||||
}
|
||||
|
||||
void
|
||||
PolicyActivationSpec::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
dbgTrace(D_LOCAL_POLICY) << "PolicyActivationSpec load";
|
||||
parseAppsecJSONKey<string>("appsecClassName", appsec_class_name, archive_in);
|
||||
parseMandatoryAppsecJSONKey<vector<EnabledPolicy>>("enabledPolicies", policies, archive_in);
|
||||
}
|
||||
|
||||
const vector<EnabledPolicy> &
|
||||
PolicyActivationSpec::getPolicies() const
|
||||
{
|
||||
return policies;
|
||||
}
|
||||
|
||||
void
|
||||
SinglePolicyActivationData::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
dbgTrace(D_LOCAL_POLICY) << "Loading single policy activation data";
|
||||
parseAppsecJSONKey<string>("apiVersion", api_version, archive_in);
|
||||
parseAppsecJSONKey<string>("kind", kind, archive_in);
|
||||
parseAppsecJSONKey<PolicyActivationMetadata>("metadata", metadata, archive_in);
|
||||
parseAppsecJSONKey<PolicyActivationSpec>("spec", spec, archive_in);
|
||||
}
|
||||
|
||||
const PolicyActivationSpec &
|
||||
SinglePolicyActivationData::getSpec() const
|
||||
{
|
||||
return spec;
|
||||
}
|
||||
|
||||
bool
|
||||
PolicyActivationData::loadJson(const string &json)
|
||||
{
|
||||
string modified_json = json;
|
||||
modified_json.pop_back();
|
||||
stringstream in;
|
||||
in.str(modified_json);
|
||||
dbgTrace(D_LOCAL_POLICY) << "Loading policy activations data";
|
||||
try {
|
||||
cereal::JSONInputArchive in_ar(in);
|
||||
in_ar(
|
||||
cereal::make_nvp("apiVersion", api_version),
|
||||
cereal::make_nvp("items", items)
|
||||
);
|
||||
} catch (cereal::Exception &e) {
|
||||
dbgError(D_LOCAL_POLICY) << "Failed to load policy activations data JSON. Error: " << e.what();
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
const vector<SinglePolicyActivationData> &
|
||||
PolicyActivationData::getItems() const
|
||||
{
|
||||
return items;
|
||||
}
|
||||
@@ -928,7 +928,6 @@ createMultiRulesSections(
|
||||
PracticeSection practice = PracticeSection(practice_id, practice_type, practice_name);
|
||||
vector<ParametersSection> exceptions_result;
|
||||
for (auto exception : exceptions) {
|
||||
|
||||
const auto &exception_name = exception.first;
|
||||
for (const auto &inner_exception : exception.second) {
|
||||
exceptions_result.push_back(ParametersSection(inner_exception.getBehaviorId(), exception_name));
|
||||
@@ -1220,7 +1219,8 @@ PolicyMakerUtils::createWebAppSection(
|
||||
const string &practice_id,
|
||||
const string &full_url,
|
||||
const string &default_mode,
|
||||
map<AnnotationTypes, string> &rule_annotations)
|
||||
map<AnnotationTypes, string> &rule_annotations,
|
||||
vector<InnerException> rule_inner_exceptions)
|
||||
{
|
||||
auto apssec_practice =
|
||||
getAppsecPracticeSpec<V1beta2AppsecLinuxPolicy, NewAppSecPracticeSpec>(
|
||||
@@ -1255,7 +1255,8 @@ PolicyMakerUtils::createWebAppSection(
|
||||
apssec_practice.getAntiBot(),
|
||||
log_triggers[rule_annotations[AnnotationTypes::TRIGGER]],
|
||||
trusted_sources[rule_annotations[AnnotationTypes::TRUSTED_SOURCES]],
|
||||
apssec_practice.getWebAttacks().getProtections()
|
||||
apssec_practice.getWebAttacks().getProtections(),
|
||||
rule_inner_exceptions
|
||||
);
|
||||
web_apps[rule_config.getAssetName()] = web_app;
|
||||
}
|
||||
@@ -1366,7 +1367,8 @@ PolicyMakerUtils::createThreatPreventionPracticeSections(
|
||||
practice_id,
|
||||
asset_name,
|
||||
default_mode,
|
||||
rule_annotations);
|
||||
rule_annotations,
|
||||
inner_exceptions[rule_annotations[AnnotationTypes::EXCEPTION]]);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ add_subdirectory(details_resolver)
|
||||
add_subdirectory(health_check)
|
||||
add_subdirectory(health_check_manager)
|
||||
add_subdirectory(updates_process_reporter)
|
||||
add_subdirectory(env_details)
|
||||
add_subdirectory(external_sdk_server)
|
||||
|
||||
#add_subdirectory(orchestration_ut)
|
||||
|
||||
@@ -46,7 +46,7 @@ public:
|
||||
bool isReverseProxy() override;
|
||||
bool isCloudStorageEnabled() override;
|
||||
Maybe<tuple<string, string, string, string, string>> readCloudMetadata() override;
|
||||
Maybe<tuple<string, string, string>> parseNginxMetadata() override;
|
||||
Maybe<tuple<string, string, string, string>> parseNginxMetadata() override;
|
||||
#if defined(gaia) || defined(smb)
|
||||
bool compareCheckpointVersion(int cp_version, std::function<bool(int, int)> compare_operator) const override;
|
||||
#endif // gaia || smb
|
||||
@@ -80,7 +80,9 @@ DetailsResolver::Impl::getHostname()
|
||||
Maybe<string>
|
||||
DetailsResolver::Impl::getPlatform()
|
||||
{
|
||||
#if defined(gaia)
|
||||
#if defined(gaia_arm)
|
||||
return string("gaia_arm");
|
||||
#elif defined(gaia)
|
||||
return string("gaia");
|
||||
#elif defined(arm32_rpi)
|
||||
return string("glibc");
|
||||
@@ -228,7 +230,7 @@ isNoResponse(const string &cmd)
|
||||
return !res.ok() || res.unpack().empty();
|
||||
}
|
||||
|
||||
Maybe<tuple<string, string, string>>
|
||||
Maybe<tuple<string, string, string, string>>
|
||||
DetailsResolver::Impl::parseNginxMetadata()
|
||||
{
|
||||
auto output_path = getConfigurationWithDefault<string>(
|
||||
@@ -241,6 +243,11 @@ DetailsResolver::Impl::parseNginxMetadata()
|
||||
"/scripts/cp-nano-makefile-generator.sh -f -o " +
|
||||
output_path;
|
||||
|
||||
const string script_fresh_exe_cmd =
|
||||
getFilesystemPathConfig() +
|
||||
"/scripts/cp-nano-makefile-generator-fresh.sh save --save-location " +
|
||||
output_path;
|
||||
|
||||
dbgTrace(D_ORCHESTRATOR) << "Details resolver, srcipt exe cmd: " << srcipt_exe_cmd;
|
||||
if (isNoResponse("which nginx") && isNoResponse("which kong")) {
|
||||
return genError("Nginx or Kong isn't installed");
|
||||
@@ -263,7 +270,7 @@ DetailsResolver::Impl::parseNginxMetadata()
|
||||
return genError("Cannot open the file with nginx metadata, File: " + output_path);
|
||||
}
|
||||
|
||||
string line;
|
||||
string line;
|
||||
while (getline(input_stream, line)) {
|
||||
lines.push_back(line);
|
||||
}
|
||||
@@ -277,7 +284,37 @@ DetailsResolver::Impl::parseNginxMetadata()
|
||||
<< " Error: " << exception.what();
|
||||
}
|
||||
|
||||
if (!isNoResponse("which nginx")) {
|
||||
auto script_output = DetailsResolvingHanlder::getCommandOutput(script_fresh_exe_cmd);
|
||||
if (!script_output.ok()) {
|
||||
return genError("Failed to generate nginx fresh metadata, Error: " + script_output.getErr());
|
||||
}
|
||||
|
||||
try {
|
||||
ifstream input_stream(output_path);
|
||||
if (!input_stream) {
|
||||
return genError("Cannot open the file with nginx fresh metadata, File: " + output_path);
|
||||
}
|
||||
|
||||
string line;
|
||||
while (getline(input_stream, line)) {
|
||||
if (line.find("NGX_MODULE_SIGNATURE") == 0) {
|
||||
lines.push_back(line);
|
||||
}
|
||||
}
|
||||
input_stream.close();
|
||||
|
||||
orchestration_tools->removeFile(output_path);
|
||||
} catch (const ifstream::failure &exception) {
|
||||
dbgWarning(D_ORCHESTRATOR)
|
||||
<< "Cannot read the file with required nginx fresh metadata."
|
||||
<< " File: " << output_path
|
||||
<< " Error: " << exception.what();
|
||||
}
|
||||
}
|
||||
|
||||
if (lines.size() == 0) return genError("Failed to read nginx metadata file");
|
||||
string nginx_signature;
|
||||
string nginx_version;
|
||||
string config_opt;
|
||||
string cc_opt;
|
||||
@@ -292,6 +329,11 @@ DetailsResolver::Impl::parseNginxMetadata()
|
||||
nginx_version = "nginx-" + line.substr(eq_index + 1);
|
||||
continue;
|
||||
}
|
||||
if (line.find("NGX_MODULE_SIGNATURE") != string::npos) {
|
||||
auto eq_index = line.find("=");
|
||||
nginx_signature = line.substr(eq_index + 1);
|
||||
continue;
|
||||
}
|
||||
if (line.find("EXTRA_CC_OPT") != string::npos) {
|
||||
auto eq_index = line.find("=");
|
||||
cc_opt = line.substr(eq_index + 1);
|
||||
@@ -301,7 +343,7 @@ DetailsResolver::Impl::parseNginxMetadata()
|
||||
if (line.back() == '\\') line.pop_back();
|
||||
config_opt += line;
|
||||
}
|
||||
return make_tuple(config_opt, cc_opt, nginx_version);
|
||||
return make_tuple(config_opt, cc_opt, nginx_version, nginx_signature);
|
||||
}
|
||||
|
||||
Maybe<tuple<string, string, string, string, string>>
|
||||
|
||||
@@ -71,7 +71,18 @@ checkPepIdaIdnStatus(const string &command_output)
|
||||
Maybe<string>
|
||||
getRequiredNanoServices(const string &command_output)
|
||||
{
|
||||
return command_output;
|
||||
string idaRequiredServices[2] = {"idaSaml", "idaIdn"};
|
||||
string platform_str = "gaia";
|
||||
#if defined(gaia_arm)
|
||||
platform_str = "gaia_arm";
|
||||
#endif // gaia_arm
|
||||
string result = "";
|
||||
for(const string &serv : idaRequiredServices) {
|
||||
string add_service = serv + "_" + platform_str;
|
||||
result = result + add_service + ";";
|
||||
}
|
||||
command_output.empty(); // overcome unused variable
|
||||
return result;
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
@@ -342,6 +353,28 @@ getSMCBasedMgmtName(const string &command_output)
|
||||
return getAttr(command_output, "Mgmt object Name was not found");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
getSmbObjectUid(const string &command_output)
|
||||
{
|
||||
static const char centrally_managed_comd_output = '0';
|
||||
|
||||
if (command_output.empty() || command_output[0] != centrally_managed_comd_output) {
|
||||
return genError("Object UUID was not found");
|
||||
}
|
||||
|
||||
Maybe<string> obj_uuid = getAttrFromCpsdwanGetDataJson("uuid");
|
||||
if (obj_uuid.ok()) {
|
||||
return obj_uuid.unpack();
|
||||
}
|
||||
|
||||
static const string obj_path = (getenv("FWDIR") ? string(getenv("FWDIR")) : "") + "/database/myown.C";
|
||||
auto file_stream = std::make_shared<std::ifstream>(obj_path);
|
||||
if (!file_stream->is_open()) {
|
||||
return genError("Failed to open the object file");
|
||||
}
|
||||
return getMgmtObjAttr(file_stream, "uuid ");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
getSmbObjectName(const string &command_output)
|
||||
{
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
// shell command execution output as its input
|
||||
|
||||
#ifdef SHELL_PRE_CMD
|
||||
#if defined(gaia) || defined(smb)
|
||||
#if defined(gaia) || defined(smb) || defined(smb_thx_v3) || defined(smb_sve_v2) || defined(smb_mrv_v1)
|
||||
SHELL_PRE_CMD("read sdwan data",
|
||||
"(cpsdwan get_data > /tmp/cpsdwan_getdata_orch.json~) "
|
||||
"&& (mv /tmp/cpsdwan_getdata_orch.json~ /tmp/cpsdwan_getdata_orch.json)")
|
||||
@@ -40,17 +40,20 @@ SHELL_PRE_CMD("gunzip local.cfg", "gunzip -c $FWDIR/state/local/FW1/local.cfg.gz
|
||||
#endif
|
||||
|
||||
#ifdef SHELL_CMD_HANDLER
|
||||
#if defined(gaia) || defined(smb)
|
||||
#if defined(gaia) || defined(smb) || defined(smb_thx_v3) || defined(smb_sve_v2) || defined(smb_mrv_v1)
|
||||
SHELL_CMD_HANDLER("cpProductIntegrationMgmtObjectType", "cpprod_util CPPROD_IsMgmtMachine", getMgmtObjType)
|
||||
SHELL_CMD_HANDLER(
|
||||
"cpProductIntegrationMgmtObjectUid",
|
||||
"mgmt_cli --format json -r true show-session | jq -r '.[\"connected-server\"].uid'",
|
||||
getMgmtObjUid
|
||||
)
|
||||
SHELL_CMD_HANDLER("prerequisitesForHorizonTelemetry",
|
||||
"FS_PATH=<FILESYSTEM-PREFIX>; [ -f ${FS_PATH}/cp-nano-horizon-telemetry-prerequisites.log ] "
|
||||
"&& head -1 ${FS_PATH}/cp-nano-horizon-telemetry-prerequisites.log || echo ''",
|
||||
checkIsInstallHorizonTelemetrySucceeded)
|
||||
SHELL_CMD_HANDLER(
|
||||
"IS_AIOPS_RUNNING",
|
||||
"FS_PATH=<FILESYSTEM-PREFIX>; "
|
||||
"PID=$(ps auxf | grep -v grep | grep -E ${FS_PATH}.*cp-nano-horizon-telemetry | awk -F' ' '{printf $2}'); "
|
||||
"[ -z \"${PID}\" ] && echo 'false' || echo 'true'",
|
||||
getIsAiopsRunning)
|
||||
#endif
|
||||
#if defined(gaia)
|
||||
SHELL_CMD_HANDLER("GLOBAL_QUID", "[ -d /opt/CPquid ] "
|
||||
"&& python3 /opt/CPquid/Quid_Api.py -i /opt/CPotelcol/quid_api/get_global_id.json | jq -r .message || echo ''",
|
||||
getQUID)
|
||||
@@ -76,12 +79,21 @@ SHELL_CMD_HANDLER("MGMT_QUID", "[ -d /opt/CPquid ] "
|
||||
SHELL_CMD_HANDLER("AIOPS_AGENT_ROLE", "[ -d /opt/CPOtlpAgent/custom_scripts ] "
|
||||
"&& ENV_NO_FORMAT=1 /opt/CPOtlpAgent/custom_scripts/agent_role.sh",
|
||||
getOtlpAgentGaiaOsRole)
|
||||
SHELL_CMD_HANDLER(
|
||||
"IS_AIOPS_RUNNING",
|
||||
"FS_PATH=<FILESYSTEM-PREFIX>; "
|
||||
"PID=$(ps auxf | grep -v grep | grep -E ${FS_PATH}.*cp-nano-horizon-telemetry | awk -F' ' '{printf $2}'); "
|
||||
"[ -z \"{PID}\" ] && echo 'false' || echo 'true'",
|
||||
getIsAiopsRunning)
|
||||
#endif
|
||||
#if defined(smb) || defined(smb_thx_v3) || defined(smb_sve_v2) || defined(smb_mrv_v1)
|
||||
SHELL_CMD_HANDLER("GLOBAL_QUID",
|
||||
"cat $FWDIR/database/myown.C "
|
||||
"| awk -F'[()]' '/:name/ { found=1; next } found && /:uuid/ { uid=tolower($2); print uid; exit }'",
|
||||
getQUID)
|
||||
SHELL_CMD_HANDLER("QUID",
|
||||
"cat $FWDIR/database/myown.C "
|
||||
"| awk -F'[()]' '/:name/ { found=1; next } found && /:uuid/ { uid=tolower($2); print uid; exit }'",
|
||||
getQUID)
|
||||
SHELL_CMD_HANDLER("SMO_QUID", "echo ''", getQUID)
|
||||
SHELL_CMD_HANDLER("MGMT_QUID", "echo ''", getQUID)
|
||||
SHELL_CMD_HANDLER("AIOPS_AGENT_ROLE", "echo 'SMB'", getOtlpAgentGaiaOsRole)
|
||||
#endif
|
||||
#if defined(gaia) || defined(smb) || defined(smb_thx_v3) || defined(smb_sve_v2) || defined(smb_mrv_v1)
|
||||
SHELL_CMD_HANDLER("hasSDWan", "[ -f $FWDIR/bin/sdwan_steering ] && echo '1' || echo '0'", checkHasSDWan)
|
||||
SHELL_CMD_HANDLER(
|
||||
"canUpdateSDWanData",
|
||||
@@ -133,12 +145,17 @@ SHELL_CMD_HANDLER("hasSAMLSupportedBlade", "enabled_blades", checkSAMLSupportedB
|
||||
SHELL_CMD_HANDLER("hasIDABlade", "enabled_blades", checkIDABlade)
|
||||
SHELL_CMD_HANDLER("hasSAMLPortal", "mpclient status nac", checkSAMLPortal)
|
||||
SHELL_CMD_HANDLER("hasIdaIdnEnabled", "fw ctl get int nac_pep_identity_next_enabled", checkPepIdaIdnStatus)
|
||||
SHELL_CMD_HANDLER("requiredNanoServices", "echo 'idaSaml_gaia;idaIdn_gaia;'", getRequiredNanoServices)
|
||||
SHELL_CMD_HANDLER("requiredNanoServices", "echo ida", getRequiredNanoServices)
|
||||
SHELL_CMD_HANDLER(
|
||||
"cpProductIntegrationMgmtObjectName",
|
||||
"mgmt_cli --format json -r true show-session | jq -r '.[\"connected-server\"].name'",
|
||||
getMgmtObjName
|
||||
)
|
||||
SHELL_CMD_HANDLER(
|
||||
"cpProductIntegrationMgmtObjectUid",
|
||||
"mgmt_cli --format json -r true show-session | jq -r '.[\"connected-server\"].uid'",
|
||||
getMgmtObjUid
|
||||
)
|
||||
SHELL_CMD_HANDLER(
|
||||
"cpProductIntegrationMgmtParentObjectName",
|
||||
"cat $FWDIR/database/myself_objects.C "
|
||||
@@ -194,7 +211,7 @@ SHELL_CMD_HANDLER(
|
||||
)
|
||||
#endif //gaia
|
||||
|
||||
#if defined(smb)
|
||||
#if defined(smb) || defined(smb_thx_v3) || defined(smb_sve_v2) || defined(smb_mrv_v1)
|
||||
SHELL_CMD_HANDLER(
|
||||
"cpProductIntegrationMgmtParentObjectName",
|
||||
"jq -r .cluster_name /tmp/cpsdwan_getdata_orch.json",
|
||||
@@ -210,6 +227,11 @@ SHELL_CMD_HANDLER(
|
||||
"cpprod_util FwIsLocalMgmt",
|
||||
getSmbObjectName
|
||||
)
|
||||
SHELL_CMD_HANDLER(
|
||||
"cpProductIntegrationMgmtObjectUid",
|
||||
"cpprod_util FwIsLocalMgmt",
|
||||
getSmbObjectUid
|
||||
)
|
||||
SHELL_CMD_HANDLER(
|
||||
"Application Control",
|
||||
"cat $FWDIR/conf/active_blades.txt | grep -o 'APCL [01]' | cut -d ' ' -f2",
|
||||
@@ -252,7 +274,6 @@ SHELL_CMD_HANDLER(
|
||||
|
||||
SHELL_CMD_OUTPUT("kernel_version", "uname -r")
|
||||
SHELL_CMD_OUTPUT("helloWorld", "cat /tmp/agentHelloWorld 2>/dev/null")
|
||||
SHELL_CMD_OUTPUT("report_timestamp", "date -u +\%s")
|
||||
#endif // SHELL_CMD_OUTPUT
|
||||
|
||||
|
||||
@@ -282,7 +303,7 @@ FILE_CONTENT_HANDLER("AppSecModelVersion", "<FILESYSTEM-PREFIX>/conf/waap/waap.d
|
||||
#endif // FILE_CONTENT_HANDLER
|
||||
|
||||
#ifdef SHELL_POST_CMD
|
||||
#if defined(smb)
|
||||
#if defined(smb) || defined(smb_thx_v3) || defined(smb_sve_v2) || defined(smb_mrv_v1)
|
||||
SHELL_POST_CMD("remove local.cfg", "rm -rf /tmp/local.cfg")
|
||||
#endif //smb
|
||||
#endif
|
||||
|
||||
@@ -266,10 +266,10 @@ private:
|
||||
case OrchestrationStatusFieldType::COUNT : return "Count";
|
||||
}
|
||||
|
||||
dbgAssert(false)
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "orchestration health")
|
||||
<< "Trying to convert unknown orchestration status field to string.";
|
||||
return "";
|
||||
return "Unknown Field";
|
||||
}
|
||||
|
||||
HealthCheckStatus
|
||||
@@ -282,7 +282,7 @@ private:
|
||||
case UpdatesProcessResult::DEGRADED : return HealthCheckStatus::DEGRADED;
|
||||
}
|
||||
|
||||
dbgAssert(false)
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "orchestration health")
|
||||
<< "Trying to convert unknown update process result field to health check status.";
|
||||
return HealthCheckStatus::IGNORED;
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
#include "maybe_res.h"
|
||||
|
||||
std::ostream &
|
||||
operator<<(std::ostream &os, const Maybe<std::tuple<std::string, std::string, std::string>> &)
|
||||
operator<<(std::ostream &os, const Maybe<std::tuple<std::string, std::string, std::string, std::string>> &)
|
||||
{
|
||||
return os;
|
||||
}
|
||||
@@ -48,7 +48,7 @@ public:
|
||||
MOCK_METHOD0(isGwNotVsx, bool());
|
||||
MOCK_METHOD0(getResolvedDetails, std::map<std::string, std::string>());
|
||||
MOCK_METHOD0(isVersionAboveR8110, bool());
|
||||
MOCK_METHOD0(parseNginxMetadata, Maybe<std::tuple<std::string, std::string, std::string>>());
|
||||
MOCK_METHOD0(parseNginxMetadata, Maybe<std::tuple<std::string, std::string, std::string, std::string>>());
|
||||
MOCK_METHOD0(
|
||||
readCloudMetadata, Maybe<std::tuple<std::string, std::string, std::string, std::string, std::string>>());
|
||||
};
|
||||
|
||||
@@ -100,6 +100,7 @@ private:
|
||||
string packages_dir;
|
||||
string orch_service_name;
|
||||
set<string> ignore_packages;
|
||||
Maybe<string> forbidden_versions = genError("Forbidden versions file does not exist");
|
||||
};
|
||||
|
||||
void
|
||||
@@ -135,7 +136,8 @@ ManifestController::Impl::init()
|
||||
"Ignore packages list file path"
|
||||
);
|
||||
|
||||
if (Singleton::Consume<I_OrchestrationTools>::by<ManifestController>()->doesFileExist(ignore_packages_path)) {
|
||||
auto orchestration_tools = Singleton::Consume<I_OrchestrationTools>::by<ManifestController>();
|
||||
if (orchestration_tools->doesFileExist(ignore_packages_path)) {
|
||||
try {
|
||||
ifstream input_stream(ignore_packages_path);
|
||||
if (!input_stream) {
|
||||
@@ -156,6 +158,9 @@ ManifestController::Impl::init()
|
||||
<< " Error: " << f.what();
|
||||
}
|
||||
}
|
||||
|
||||
const string forbidden_versions_path = getFilesystemPathConfig() + "/revert/forbidden_versions";
|
||||
forbidden_versions = orchestration_tools->readFile(forbidden_versions_path);
|
||||
}
|
||||
|
||||
bool
|
||||
@@ -271,6 +276,17 @@ ManifestController::Impl::updateManifest(const string &new_manifest_file)
|
||||
}
|
||||
|
||||
map<string, Package> new_packages = parsed_manifest.unpack();
|
||||
if (!new_packages.empty()) {
|
||||
const Package &package = new_packages.begin()->second;
|
||||
if (forbidden_versions.ok() &&
|
||||
forbidden_versions.unpack().find(package.getVersion()) != string::npos
|
||||
) {
|
||||
dbgWarning(D_ORCHESTRATOR)
|
||||
<< "Packages version is in the forbidden versions list. No upgrade will be performed.";
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
map<string, Package> all_packages = parsed_manifest.unpack();
|
||||
map<string, Package> current_packages;
|
||||
parsed_manifest = orchestration_tools->loadPackagesFromJson(manifest_file_path);
|
||||
|
||||
@@ -58,6 +58,9 @@ public:
|
||||
Debug::setUnitTestFlag(D_ORCHESTRATOR, Debug::DebugLevel::TRACE);
|
||||
const string ignore_packages_file = "/etc/cp/conf/ignore-packages.txt";
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist(ignore_packages_file)).WillOnce(Return(false));
|
||||
Maybe<string> forbidden_versions(string("a1\na2"));
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile("/etc/cp/revert/forbidden_versions"))
|
||||
.WillOnce(Return(forbidden_versions));
|
||||
manifest_controller.init();
|
||||
manifest_file_path = getConfigurationWithDefault<string>(
|
||||
"/etc/cp/conf/manifest.json",
|
||||
@@ -224,6 +227,10 @@ TEST_F(ManifestControllerTest, createNewManifest)
|
||||
EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, manifest_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -363,6 +370,11 @@ TEST_F(ManifestControllerTest, updateManifest)
|
||||
EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).Times(2).WillRepeatedly(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).Times(2).WillRepeatedly(Return(true));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
|
||||
manifest =
|
||||
@@ -417,6 +429,9 @@ TEST_F(ManifestControllerTest, updateManifest)
|
||||
EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services));
|
||||
EXPECT_CALL(mock_orchestration_tools,
|
||||
loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillRepeatedly(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -478,6 +493,11 @@ TEST_F(ManifestControllerTest, selfUpdate)
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, copyFile("/tmp/temp_file", path +
|
||||
temp_ext)).WillOnce(Return(true));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -607,6 +627,10 @@ TEST_F(ManifestControllerTest, removeCurrentErrorPackage)
|
||||
EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
corrupted_packages.clear();
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
@@ -666,6 +690,10 @@ TEST_F(ManifestControllerTest, selfUpdateWithOldCopy)
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, copyFile("/tmp/temp_file", path +
|
||||
temp_ext)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -722,6 +750,10 @@ TEST_F(ManifestControllerTest, selfUpdateWithOldCopyWithError)
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist(path)).WillOnce(Return(false)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, copyFile(path, path + backup_ext + temp_ext)).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, getHostname()).WillOnce(Return(hostname));
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_FALSE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -798,6 +830,10 @@ TEST_F(ManifestControllerTest, installAndRemove)
|
||||
EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).Times(2).WillRepeatedly(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).Times(2).WillRepeatedly(Return(true));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
|
||||
string new_manifest =
|
||||
@@ -858,6 +894,63 @@ TEST_F(ManifestControllerTest, installAndRemove)
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/packages/my1/my1")).Times(2)
|
||||
.WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillRepeatedly(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
TEST_F(ManifestControllerTest, manifestWithForbiddenVersion)
|
||||
{
|
||||
new_services.clear();
|
||||
old_services.clear();
|
||||
|
||||
string manifest =
|
||||
"{"
|
||||
" \"packages\": ["
|
||||
" {"
|
||||
" \"download-path\": \"http://172.23.92.135/my.sh\","
|
||||
" \"relative-path\": \"\","
|
||||
" \"name\": \"my\","
|
||||
" \"version\": \"a1\","
|
||||
" \"checksum-type\": \"sha1sum\","
|
||||
" \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\","
|
||||
" \"package-type\": \"service\","
|
||||
" \"require\": []"
|
||||
" },"
|
||||
" {"
|
||||
" \"download-path\": \"http://172.23.92.135/my.sh\","
|
||||
" \"relative-path\": \"\","
|
||||
" \"name\": \"orchestration\","
|
||||
" \"version\": \"a1\","
|
||||
" \"checksum-type\": \"sha1sum\","
|
||||
" \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\","
|
||||
" \"package-type\": \"service\","
|
||||
" \"require\": []"
|
||||
" },"
|
||||
" {"
|
||||
" \"download-path\": \"\","
|
||||
" \"relative-path\": \"\","
|
||||
" \"name\": \"waap\","
|
||||
" \"version\": \"a1\","
|
||||
" \"checksum-type\": \"sha1sum\","
|
||||
" \"checksum\": \"\","
|
||||
" \"package-type\": \"service\","
|
||||
" \"status\": false,\n"
|
||||
" \"message\": \"This security app isn't valid for this agent\"\n"
|
||||
" }"
|
||||
" ]"
|
||||
"}";
|
||||
|
||||
map<string, Package> manifest_services;
|
||||
load(manifest, manifest_services);
|
||||
checkIfFileExistsCall(manifest_services.at("my"));
|
||||
|
||||
|
||||
load(manifest, new_services);
|
||||
load(old_manifest, old_services);
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services));
|
||||
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -947,6 +1040,10 @@ TEST_F(ManifestControllerTest, badInstall)
|
||||
EXPECT_CALL(mock_orchestration_tools,
|
||||
packagesToJsonFile(corrupted_packages, corrupted_file_list)).WillOnce(Return(true));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_FALSE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -1112,6 +1209,12 @@ TEST_F(ManifestControllerTest, requireUpdate)
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile("new_manifest.json")).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillRepeatedly(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status"))
|
||||
.WillOnce(Return(false))
|
||||
.WillRepeatedly(Return(true));;
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -1212,6 +1315,10 @@ TEST_F(ManifestControllerTest, sharedObjectNotInstalled)
|
||||
).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, copyFile("/tmp/temp_file1", path +
|
||||
temp_ext)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -1313,6 +1420,12 @@ TEST_F(ManifestControllerTest, requireSharedObjectUpdate)
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile("new_manifest.json"))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillRepeatedly(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status"))
|
||||
.WillOnce(Return(false))
|
||||
.WillRepeatedly(Return(true));;
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -1389,6 +1502,7 @@ TEST_F(ManifestControllerTest, failureOnDownloadSharedObject)
|
||||
EXPECT_CALL(mock_details_resolver, getHostname()).WillOnce(Return(string("hostname")));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile("/tmp/temp_file1")).WillOnce(Return(true));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillRepeatedly(Return("b"));
|
||||
EXPECT_FALSE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -1524,6 +1638,12 @@ TEST_F(ManifestControllerTest, multiRequireUpdate)
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile("new_manifest.json"))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillRepeatedly(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status"))
|
||||
.WillOnce(Return(false))
|
||||
.WillRepeatedly(Return(true));;
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -1610,6 +1730,12 @@ TEST_F(ManifestControllerTest, createNewManifestWithUninstallablePackage)
|
||||
EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillRepeatedly(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status"))
|
||||
.WillOnce(Return(false))
|
||||
.WillRepeatedly(Return(true));;
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -1624,7 +1750,7 @@ TEST_F(ManifestControllerTest, updateUninstallPackage)
|
||||
" \"download-path\": \"\","
|
||||
" \"relative-path\": \"\","
|
||||
" \"name\": \"my\","
|
||||
" \"version\": \"\","
|
||||
" \"version\": \"c\","
|
||||
" \"checksum-type\": \"sha1sum\","
|
||||
" \"checksum\": \"\","
|
||||
" \"package-type\": \"service\","
|
||||
@@ -1721,6 +1847,11 @@ TEST_F(ManifestControllerTest, updateUninstallPackage)
|
||||
EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services));
|
||||
EXPECT_CALL(mock_orchestration_tools,
|
||||
loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillOnce(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -1744,6 +1875,9 @@ public:
|
||||
setConfiguration<string>(ignore_packages_file, "orchestration", "Ignore packages list file path");
|
||||
writeIgnoreList(ignore_packages_file, ignore_services);
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist(ignore_packages_file)).WillOnce(Return(true));
|
||||
Maybe<string> forbidden_versions(string("a1\na2"));
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile("/etc/cp/revert/forbidden_versions"))
|
||||
.WillOnce(Return(forbidden_versions));
|
||||
manifest_controller.init();
|
||||
manifest_file_path = getConfigurationWithDefault<string>(
|
||||
"/etc/cp/conf/manifest.json",
|
||||
@@ -1839,6 +1973,7 @@ public:
|
||||
StrictMock<MockOrchestrationStatus> mock_status;
|
||||
StrictMock<MockDownloader> mock_downloader;
|
||||
StrictMock<MockOrchestrationTools> mock_orchestration_tools;
|
||||
StrictMock<MockDetailsResolver> mock_details_resolver;
|
||||
NiceMock<MockShellCmd> mock_shell_cmd;
|
||||
|
||||
ManifestController manifest_controller;
|
||||
@@ -2122,6 +2257,12 @@ TEST_F(ManifestControllerIgnorePakckgeTest, addIgnorePackageAndUpdateNormal)
|
||||
EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillRepeatedly(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status"))
|
||||
.WillOnce(Return(false))
|
||||
.WillRepeatedly(Return(true));;
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -2387,6 +2528,12 @@ TEST_F(ManifestControllerIgnorePakckgeTest, overrideIgnoredPackageFromProfileSet
|
||||
EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillRepeatedly(Return("b"));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status"))
|
||||
.WillOnce(Return(false))
|
||||
.WillRepeatedly(Return(true));;
|
||||
EXPECT_CALL(mock_orchestration_tools, writeFile(_, "/etc/cp/revert/upgrade_status", false))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_TRUE(i_manifest_controller->updateManifest(file_name));
|
||||
|
||||
EXPECT_THAT(capture_debug.str(), Not(HasSubstr("Ignoring a package from the manifest. Package name: my")));
|
||||
@@ -2411,6 +2558,9 @@ public:
|
||||
doesFileExist("/etc/cp/conf/ignore-packages.txt")
|
||||
).WillOnce(Return(false));
|
||||
|
||||
Maybe<string> forbidden_versions(string("a1\na2"));
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile("/etc/cp/revert/forbidden_versions"))
|
||||
.WillOnce(Return(forbidden_versions));
|
||||
manifest_controller.init();
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
#include "manifest_handler.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <ctime>
|
||||
|
||||
#include "debug.h"
|
||||
#include "config.h"
|
||||
@@ -201,18 +202,29 @@ ManifestHandler::installPackage(
|
||||
auto span_scope = i_env->startNewSpanScope(Span::ContextType::CHILD_OF);
|
||||
auto orchestration_status = Singleton::Consume<I_OrchestrationStatus>::by<ManifestHandler>();
|
||||
|
||||
auto details_resolver = Singleton::Consume<I_DetailsResolver>::by<ManifestHandler>();
|
||||
auto orchestration_tools = Singleton::Consume<I_OrchestrationTools>::by<ManifestHandler>();
|
||||
|
||||
auto &package = package_downloaded_file.first;
|
||||
auto &package_name = package.getName();
|
||||
auto &package_handler_path = package_downloaded_file.second;
|
||||
|
||||
dbgInfo(D_ORCHESTRATOR) << "Handling package installation. Package: " << package_name;
|
||||
|
||||
string upgrade_info =
|
||||
details_resolver->getAgentVersion() + " " + package.getVersion() + " " + getCurrentTimestamp();
|
||||
if (!orchestration_tools->doesFileExist(getFilesystemPathConfig() + "/revert/upgrade_status") &&
|
||||
!orchestration_tools->writeFile(upgrade_info, getFilesystemPathConfig() + "/revert/upgrade_status")
|
||||
) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to write to " + getFilesystemPathConfig() + "/revert/upgrade_status";
|
||||
}
|
||||
|
||||
if (package_name.compare(orch_service_name) == 0) {
|
||||
orchestration_status->writeStatusToFile();
|
||||
bool self_update_status = selfUpdate(package, current_packages, package_handler_path);
|
||||
if (!self_update_status) {
|
||||
auto details = Singleton::Consume<I_AgentDetails>::by<ManifestHandler>();
|
||||
auto hostname = Singleton::Consume<I_DetailsResolver>::by<ManifestHandler>()->getHostname();
|
||||
auto hostname = details_resolver->getHostname();
|
||||
string err_hostname = (hostname.ok() ? "on host '" + *hostname : "'" + details->getAgentId()) + "'";
|
||||
string install_error =
|
||||
"Warning: Agent/Gateway " +
|
||||
@@ -246,7 +258,6 @@ ManifestHandler::installPackage(
|
||||
return true;
|
||||
}
|
||||
string current_installation_file = packages_dir + "/" + package_name + "/" + package_name;
|
||||
auto orchestration_tools = Singleton::Consume<I_OrchestrationTools>::by<ManifestHandler>();
|
||||
bool is_clean_installation = !orchestration_tools->doesFileExist(current_installation_file);
|
||||
|
||||
|
||||
@@ -368,3 +379,13 @@ ManifestHandler::selfUpdate(
|
||||
package_handler->preInstallPackage(orch_service_name, current_installation_file) &&
|
||||
package_handler->installPackage(orch_service_name, current_installation_file, false);
|
||||
}
|
||||
|
||||
string
|
||||
ManifestHandler::getCurrentTimestamp()
|
||||
{
|
||||
time_t now = time(nullptr);
|
||||
tm* now_tm = localtime(&now);
|
||||
char timestamp[20];
|
||||
strftime(timestamp, sizeof(timestamp), "%Y-%m-%d %H:%M:%S", now_tm);
|
||||
return string(timestamp);
|
||||
}
|
||||
|
||||
@@ -429,7 +429,7 @@ public:
|
||||
status.insertServiceSetting(service_name, path);
|
||||
return;
|
||||
case OrchestrationStatusConfigType::MANIFEST:
|
||||
dbgAssert(false)
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "sesrvice configuration")
|
||||
<< "Manifest is not a service configuration file type";
|
||||
break;
|
||||
@@ -438,7 +438,9 @@ public:
|
||||
case OrchestrationStatusConfigType::COUNT:
|
||||
break;
|
||||
}
|
||||
dbgAssert(false) << AlertInfo(AlertTeam::CORE, "sesrvice configuration") << "Unknown configuration file type";
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "service configuration")
|
||||
<< "Unknown configuration file type";
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -55,6 +55,8 @@ USE_DEBUG_FLAG(D_ORCHESTRATOR);
|
||||
static string fw_last_update_time = "";
|
||||
#endif // gaia || smb
|
||||
|
||||
static const size_t MAX_SERVER_NAME_LENGTH = 253;
|
||||
|
||||
class SetAgentUninstall
|
||||
:
|
||||
public ServerRest,
|
||||
@@ -103,6 +105,19 @@ public:
|
||||
<< "Initializing Orchestration component, file system path prefix: "
|
||||
<< filesystem_prefix;
|
||||
|
||||
int check_upgrade_success_interval = getSettingWithDefault<uint>(10, "successUpgradeInterval");
|
||||
Singleton::Consume<I_MainLoop>::by<OrchestrationComp>()->addOneTimeRoutine(
|
||||
I_MainLoop::RoutineType::Timer,
|
||||
[this, check_upgrade_success_interval]()
|
||||
{
|
||||
Singleton::Consume<I_MainLoop>::by<OrchestrationComp>()->yield(
|
||||
std::chrono::minutes(check_upgrade_success_interval)
|
||||
);
|
||||
processUpgradeCompletion();
|
||||
},
|
||||
"Orchestration successfully updated (One-Time After Interval)",
|
||||
true
|
||||
);
|
||||
auto orch_policy = loadDefaultOrchestrationPolicy();
|
||||
if (!orch_policy.ok()) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to load Orchestration Policy. Error: " << orch_policy.getErr();
|
||||
@@ -141,6 +156,113 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
saveLastKnownOrchInfo(string curr_agent_version)
|
||||
{
|
||||
static const string upgrades_dir = filesystem_prefix + "/revert";
|
||||
static const string last_known_orchestrator = upgrades_dir + "/last_known_working_orchestrator";
|
||||
static const string current_orchestration_package =
|
||||
filesystem_prefix + "/packages/orchestration/orchestration";
|
||||
static const string last_known_manifest = upgrades_dir + "/last_known_manifest";
|
||||
static const string current_manifest_file = getConfigurationWithDefault<string>(
|
||||
filesystem_prefix + "/conf/manifest.json",
|
||||
"orchestration",
|
||||
"Manifest file path"
|
||||
);
|
||||
|
||||
if (!i_orchestration_tools->copyFile(current_orchestration_package, last_known_orchestrator)) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to copy the orchestration package to " << upgrades_dir;
|
||||
} else {
|
||||
dbgInfo(D_ORCHESTRATOR) << "last known orchestrator version updated to: " << curr_agent_version;
|
||||
}
|
||||
|
||||
if (!i_orchestration_tools->copyFile(current_manifest_file, last_known_manifest)) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to copy " << current_manifest_file << " to " << upgrades_dir;
|
||||
} else {
|
||||
dbgInfo(D_ORCHESTRATOR) << "last known manifest updated";
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
processUpgradeCompletion()
|
||||
{
|
||||
if (!is_first_check_update_success) {
|
||||
int check_upgrade_success_interval = getSettingWithDefault<uint>(10, "successUpgradeInterval");
|
||||
// LCOV_EXCL_START
|
||||
Singleton::Consume<I_MainLoop>::by<OrchestrationComp>()->addOneTimeRoutine(
|
||||
I_MainLoop::RoutineType::Timer,
|
||||
[this, check_upgrade_success_interval]()
|
||||
{
|
||||
Singleton::Consume<I_MainLoop>::by<OrchestrationComp>()->yield(
|
||||
std::chrono::minutes(check_upgrade_success_interval)
|
||||
);
|
||||
processUpgradeCompletion();
|
||||
},
|
||||
"Orchestration successfully updated",
|
||||
true
|
||||
);
|
||||
// LCOV_EXCL_STOP
|
||||
return;
|
||||
}
|
||||
|
||||
static const string upgrades_dir = filesystem_prefix + "/revert";
|
||||
static const string upgrade_status = upgrades_dir + "/upgrade_status";
|
||||
static const string last_known_orchestrator = upgrades_dir + "/last_known_working_orchestrator";
|
||||
static const string upgrade_failure_info_path = upgrades_dir + "/failed_upgrade_info";
|
||||
|
||||
I_DetailsResolver *i_details_resolver = Singleton::Consume<I_DetailsResolver>::by<OrchestrationComp>();
|
||||
|
||||
bool is_upgrade_status_exist = i_orchestration_tools->doesFileExist(upgrade_status);
|
||||
bool is_last_known_orchestrator_exist = i_orchestration_tools->doesFileExist(last_known_orchestrator);
|
||||
|
||||
if (!is_upgrade_status_exist) {
|
||||
if (!is_last_known_orchestrator_exist) {
|
||||
saveLastKnownOrchInfo(i_details_resolver->getAgentVersion());
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
auto maybe_upgrade_data = i_orchestration_tools->readFile(upgrade_status);
|
||||
string upgrade_data, from_version, to_version;
|
||||
if (maybe_upgrade_data.ok()) {
|
||||
upgrade_data = maybe_upgrade_data.unpack();
|
||||
istringstream stream(upgrade_data);
|
||||
stream >> from_version >> to_version;
|
||||
}
|
||||
i_orchestration_tools->removeFile(upgrade_status);
|
||||
|
||||
if (i_orchestration_tools->doesFileExist(upgrade_failure_info_path)) {
|
||||
string info = "Orchestration revert. ";
|
||||
auto failure_info = i_orchestration_tools->readFile(upgrade_failure_info_path);
|
||||
if (failure_info.ok()) info.append(failure_info.unpack());
|
||||
LogGen(
|
||||
info,
|
||||
ReportIS::Level::ACTION,
|
||||
ReportIS::Audience::INTERNAL,
|
||||
ReportIS::Severity::CRITICAL,
|
||||
ReportIS::Priority::URGENT,
|
||||
ReportIS::Tags::ORCHESTRATOR
|
||||
);
|
||||
dbgError(D_ORCHESTRATOR) <<
|
||||
"Error in orchestration version: " << to_version <<
|
||||
". Orchestration reverted to version: " << i_details_resolver->getAgentVersion();
|
||||
i_orchestration_tools->removeFile(upgrade_failure_info_path);
|
||||
return;
|
||||
}
|
||||
|
||||
saveLastKnownOrchInfo(i_details_resolver->getAgentVersion());
|
||||
i_orchestration_tools->writeFile(
|
||||
upgrade_data + "\n",
|
||||
getLogFilesPathConfig() + "/nano_agent/prev_upgrades",
|
||||
true
|
||||
);
|
||||
dbgWarning(D_ORCHESTRATOR) <<
|
||||
"Upgrade process from version: " << from_version <<
|
||||
" to version: " << to_version <<
|
||||
" completed successfully";
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
registerToTheFog()
|
||||
{
|
||||
@@ -1022,6 +1144,7 @@ private:
|
||||
UpdatesProcessResult::SUCCESS,
|
||||
UpdatesConfigType::GENERAL
|
||||
).notify();
|
||||
if (!is_first_check_update_success) is_first_check_update_success = true;
|
||||
return Maybe<void>();
|
||||
}
|
||||
|
||||
@@ -1342,14 +1465,17 @@ private:
|
||||
|
||||
auto nginx_data = i_details_resolver->parseNginxMetadata();
|
||||
if (nginx_data.ok()) {
|
||||
string nginx_signature;
|
||||
string nginx_version;
|
||||
string config_opt;
|
||||
string cc_opt;
|
||||
tie(config_opt, cc_opt, nginx_version) = nginx_data.unpack();
|
||||
tie(config_opt, cc_opt, nginx_version, nginx_signature) = nginx_data.unpack();
|
||||
agent_data_report
|
||||
<< make_pair("nginxVersion", nginx_version)
|
||||
<< make_pair("configureOpt", config_opt)
|
||||
<< make_pair("extraCompilerOpt", cc_opt);
|
||||
<< make_pair("attachmentVersion", "Legacy")
|
||||
<< make_pair("nginxSignature", nginx_signature)
|
||||
<< make_pair("nginxVersion", nginx_version)
|
||||
<< make_pair("configureOpt", config_opt)
|
||||
<< make_pair("extraCompilerOpt", cc_opt);
|
||||
} else {
|
||||
dbgDebug(D_ORCHESTRATOR) << nginx_data.getErr();
|
||||
}
|
||||
@@ -1389,6 +1515,8 @@ private:
|
||||
|
||||
agent_data_report << AgentReportFieldWithLabel("userEdition", FogCommunication::getUserEdition());
|
||||
|
||||
agent_data_report << make_pair("registeredServer", i_agent_details->getRegisteredServer());
|
||||
|
||||
#if defined(gaia) || defined(smb)
|
||||
if (i_details_resolver->compareCheckpointVersion(8100, greater_equal<int>())) {
|
||||
agent_data_report << AgentReportFieldWithLabel("isCheckpointVersionGER81", "true");
|
||||
@@ -1549,6 +1677,11 @@ private:
|
||||
<< LogField("agentType", "Orchestration")
|
||||
<< LogField("agentVersion", Version::get());
|
||||
|
||||
string registered_server = getAttribute("registered-server", "registered_server");
|
||||
dbgTrace(D_ORCHESTRATOR) << "Registered server: " << registered_server;
|
||||
if (!registered_server.empty()) {
|
||||
i_agent_details->setRegisteredServer(registered_server.substr(0, MAX_SERVER_NAME_LENGTH));
|
||||
}
|
||||
auto mainloop = Singleton::Consume<I_MainLoop>::by<OrchestrationComp>();
|
||||
mainloop->addOneTimeRoutine(
|
||||
I_MainLoop::RoutineType::Offline,
|
||||
@@ -1587,6 +1720,7 @@ private:
|
||||
}
|
||||
|
||||
setDelayedUpgradeTime();
|
||||
|
||||
while (true) {
|
||||
Singleton::Consume<I_Environment>::by<OrchestrationComp>()->startNewTrace(false);
|
||||
if (shouldReportAgentDetailsMetadata()) {
|
||||
@@ -1628,9 +1762,9 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
string server_name = getAttribute("registered-server", "registered_server");
|
||||
string server_name = Singleton::Consume<I_AgentDetails>::by<OrchestrationComp>()->getRegisteredServer();
|
||||
auto server = TagAndEnumManagement::convertStringToTag(server_name);
|
||||
if (server_name == "'SWAG'") server = Tags::WEB_SERVER_SWAG;
|
||||
if (server_name == "'SWAG'" || server_name == "'SWAG Server'") server = Tags::WEB_SERVER_SWAG;
|
||||
if (server.ok()) tags.insert(*server);
|
||||
|
||||
if (getAttribute("no-setting", "CROWDSEC_ENABLED") == "true") tags.insert(Tags::CROWDSEC);
|
||||
@@ -1652,7 +1786,7 @@ private:
|
||||
tags
|
||||
);
|
||||
|
||||
if (server_name != "") registration_report.addToOrigin(LogField("eventCategory", server_name));
|
||||
registration_report.addToOrigin(LogField("eventCategory", server_name));
|
||||
|
||||
auto email = getAttribute("email-address", "user_email");
|
||||
if (email != "") registration_report << LogField("userDefinedId", email);
|
||||
@@ -1695,13 +1829,19 @@ private:
|
||||
auto backup_installation_file = current_installation_file + backup_ext;
|
||||
auto temp_ext = getConfigurationWithDefault<string>("_temp", "orchestration", "Temp file extension");
|
||||
|
||||
dbgAssert(i_orchestration_tools->doesFileExist(backup_installation_file))
|
||||
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
||||
<< "There is no backup installation package";
|
||||
if (!i_orchestration_tools->doesFileExist(backup_installation_file)) {
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
||||
<< "There is no backup installation package";
|
||||
return;
|
||||
}
|
||||
|
||||
dbgAssert(i_orchestration_tools->copyFile(backup_installation_file, current_installation_file))
|
||||
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
||||
<< "Failed to copy backup installation package";
|
||||
if (!i_orchestration_tools->copyFile(backup_installation_file, current_installation_file)) {
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
||||
<< "Failed to copy backup installation package";
|
||||
return;
|
||||
}
|
||||
|
||||
// Copy the backup manifest file to the default manifest file path.
|
||||
auto manifest_file_path = getConfigurationWithDefault<string>(
|
||||
@@ -1716,12 +1856,18 @@ private:
|
||||
|
||||
auto package_handler = Singleton::Consume<I_PackageHandler>::by<OrchestrationComp>();
|
||||
// Install the backup orchestration service installation package.
|
||||
dbgAssert(package_handler->preInstallPackage(service_name, current_installation_file))
|
||||
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
||||
<< "Failed to restore from backup, pre install test failed";
|
||||
dbgAssert(package_handler->installPackage(service_name, current_installation_file, true))
|
||||
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
||||
<< "Failed to restore from backup, installation failed";
|
||||
if (!package_handler->preInstallPackage(service_name, current_installation_file)) {
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
||||
<< "Failed to restore from backup, pre install test failed";
|
||||
return;
|
||||
}
|
||||
if (!package_handler->installPackage(service_name, current_installation_file, true)) {
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
||||
<< "Failed to restore from backup, installation failed";
|
||||
return;
|
||||
}
|
||||
}
|
||||
// LCOV_EXCL_STOP
|
||||
|
||||
@@ -2052,10 +2198,10 @@ private:
|
||||
int failure_count = 0;
|
||||
unsigned int sleep_interval = 0;
|
||||
bool is_new_success = false;
|
||||
bool is_first_check_update_success = false;
|
||||
OrchestrationPolicy policy;
|
||||
UpdatesProcessReporter updates_process_reporter_listener;
|
||||
HybridModeMetric hybrid_mode_metric;
|
||||
EnvDetails env_details;
|
||||
chrono::minutes upgrade_delay_time;
|
||||
|
||||
string filesystem_prefix = "";
|
||||
@@ -2118,6 +2264,7 @@ OrchestrationComp::preload()
|
||||
registerExpectedSetting<vector<string>>("upgradeDay");
|
||||
registerExpectedSetting<string>("email-address");
|
||||
registerExpectedSetting<string>("registered-server");
|
||||
registerExpectedSetting<uint>("successUpgradeInterval");
|
||||
registerExpectedConfigFile("orchestration", Config::ConfigFileType::Policy);
|
||||
registerExpectedConfigFile("registration-data", Config::ConfigFileType::Policy);
|
||||
}
|
||||
|
||||
@@ -386,7 +386,7 @@ OrchestrationTools::Impl::calculateChecksum(Package::ChecksumTypes checksum_type
|
||||
return genError("Error while reading file " + path + ", " + e.what());
|
||||
}
|
||||
|
||||
dbgAssert(false)
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "service configuration")
|
||||
<< "Checksum type is not supported. Checksum type: "
|
||||
<< static_cast<unsigned int>(checksum_type);
|
||||
|
||||
@@ -89,6 +89,11 @@ public:
|
||||
|
||||
EXPECT_CALL(mock_service_controller, isServiceInstalled("Access Control")).WillRepeatedly(Return(false));
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(_, _, "Orchestration successfully updated (One-Time After Interval)", true)
|
||||
).WillOnce(DoAll(SaveArg<1>(&upgrade_routine), Return(0)));
|
||||
|
||||
// This Holding the Main Routine of the Orchestration.
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
@@ -135,7 +140,7 @@ public:
|
||||
void
|
||||
expectDetailsResolver()
|
||||
{
|
||||
Maybe<tuple<string, string, string>> no_nginx(genError("No nginx"));
|
||||
Maybe<tuple<string, string, string, string>> no_nginx(genError("No nginx"));
|
||||
EXPECT_CALL(mock_details_resolver, getPlatform()).WillRepeatedly(Return(string("linux")));
|
||||
EXPECT_CALL(mock_details_resolver, getArch()).WillRepeatedly(Return(string("x86_64")));
|
||||
EXPECT_CALL(mock_details_resolver, isReverseProxy()).WillRepeatedly(Return(false));
|
||||
@@ -156,6 +161,7 @@ public:
|
||||
runRoutine()
|
||||
{
|
||||
routine();
|
||||
upgrade_routine();
|
||||
}
|
||||
|
||||
void
|
||||
@@ -235,6 +241,7 @@ private:
|
||||
}
|
||||
|
||||
I_MainLoop::Routine routine;
|
||||
I_MainLoop::Routine upgrade_routine;
|
||||
I_MainLoop::Routine status_routine;
|
||||
};
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ std::ostream & operator<<(std::ostream &os, const Package &) { return os; }
|
||||
#include "health_check_status/health_check_status.h"
|
||||
#include "updates_process_event.h"
|
||||
#include "declarative_policy_utils.h"
|
||||
#include "mock/mock_env_details.h"
|
||||
|
||||
using namespace testing;
|
||||
using namespace std;
|
||||
@@ -82,6 +83,12 @@ public:
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path)).WillOnce(Return(response));
|
||||
EXPECT_CALL(mock_status, setFogAddress(host_url)).WillRepeatedly(Return());
|
||||
EXPECT_CALL(mock_orchestration_tools, setClusterId());
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(_, _, "Orchestration successfully updated (One-Time After Interval)", true)
|
||||
).WillOnce(DoAll(SaveArg<1>(&upgrade_routine), Return(0)));
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::System, _, "Orchestration runner", true)
|
||||
@@ -161,7 +168,7 @@ public:
|
||||
void
|
||||
expectDetailsResolver()
|
||||
{
|
||||
Maybe<tuple<string, string, string>> no_nginx(genError("No nginx"));
|
||||
Maybe<tuple<string, string, string, string>> no_nginx(genError("No nginx"));
|
||||
EXPECT_CALL(mock_details_resolver, getPlatform()).WillRepeatedly(Return(string("linux")));
|
||||
EXPECT_CALL(mock_details_resolver, getArch()).WillRepeatedly(Return(string("x86_64")));
|
||||
EXPECT_CALL(mock_details_resolver, isReverseProxy()).WillRepeatedly(Return(false));
|
||||
@@ -280,6 +287,12 @@ public:
|
||||
status_routine();
|
||||
}
|
||||
|
||||
void
|
||||
runUpgradeRoutine()
|
||||
{
|
||||
upgrade_routine();
|
||||
}
|
||||
|
||||
void
|
||||
preload()
|
||||
{
|
||||
@@ -324,6 +337,7 @@ public:
|
||||
StrictMock<MockOrchestrationTools> mock_orchestration_tools;
|
||||
StrictMock<MockDownloader> mock_downloader;
|
||||
StrictMock<MockShellCmd> mock_shell_cmd;
|
||||
StrictMock<EnvDetailsMocker> mock_env_details;
|
||||
StrictMock<MockMessaging> mock_message;
|
||||
StrictMock<MockRestApi> rest;
|
||||
StrictMock<MockServiceController> mock_service_controller;
|
||||
@@ -357,6 +371,7 @@ private:
|
||||
|
||||
I_MainLoop::Routine routine;
|
||||
I_MainLoop::Routine status_routine;
|
||||
I_MainLoop::Routine upgrade_routine;
|
||||
};
|
||||
|
||||
|
||||
@@ -583,6 +598,8 @@ TEST_F(OrchestrationTest, check_sending_registration_data)
|
||||
env.init();
|
||||
init();
|
||||
|
||||
EXPECT_CALL(mock_env_details, getEnvType()).WillRepeatedly(Return(EnvType::LINUX));
|
||||
|
||||
EXPECT_CALL(mock_service_controller, updateServiceConfiguration(_, _, _, _, _, _))
|
||||
.WillOnce(Return(Maybe<void>()));
|
||||
EXPECT_CALL(mock_orchestration_tools, calculateChecksum(_, _)).WillRepeatedly(Return(string()));
|
||||
@@ -597,14 +614,6 @@ TEST_F(OrchestrationTest, check_sending_registration_data)
|
||||
|
||||
string version = "1";
|
||||
EXPECT_CALL(mock_service_controller, getUpdatePolicyVersion()).WillOnce(ReturnRef(version));
|
||||
|
||||
EXPECT_CALL(mock_ml, yield(A<chrono::microseconds>()))
|
||||
.WillOnce(Return())
|
||||
.WillOnce(Invoke([] (chrono::microseconds) { throw invalid_argument("stop while loop"); }));
|
||||
try {
|
||||
runRoutine();
|
||||
} catch (const invalid_argument& e) {}
|
||||
|
||||
string config_json =
|
||||
"{\n"
|
||||
" \"email-address\": \"fake@example.com\",\n"
|
||||
@@ -613,9 +622,19 @@ TEST_F(OrchestrationTest, check_sending_registration_data)
|
||||
|
||||
istringstream ss(config_json);
|
||||
Singleton::Consume<Config::I_Config>::from(config_comp)->loadConfiguration(ss);
|
||||
EXPECT_CALL(mock_ml, yield(A<chrono::microseconds>()))
|
||||
.WillOnce(Return())
|
||||
.WillOnce(Invoke([] (chrono::microseconds) { throw invalid_argument("stop while loop"); }));
|
||||
try {
|
||||
runRoutine();
|
||||
} catch (const invalid_argument& e) {}
|
||||
|
||||
|
||||
sending_routine();
|
||||
|
||||
EXPECT_THAT(message_body, HasSubstr("\"userDefinedId\": \"fake@example.com\""));
|
||||
EXPECT_THAT(message_body, HasSubstr("\"eventCategory\""));
|
||||
|
||||
EXPECT_THAT(message_body, AnyOf(HasSubstr("\"Embedded Deployment\""), HasSubstr("\"Kubernetes Deployment\"")));
|
||||
EXPECT_THAT(message_body, HasSubstr("\"NGINX Server\""));
|
||||
}
|
||||
@@ -1000,6 +1019,11 @@ TEST_F(OrchestrationTest, loadOrchestrationPolicyFromBackup)
|
||||
);
|
||||
waitForRestCall();
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(_, _, "Orchestration successfully updated (One-Time After Interval)", true)
|
||||
);
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::System, _, "Orchestration runner", true)
|
||||
@@ -1166,6 +1190,29 @@ TEST_F(OrchestrationTest, manifestUpdate)
|
||||
try {
|
||||
runRoutine();
|
||||
} catch (const invalid_argument& e) {}
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/upgrade_status")).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/last_known_working_orchestrator"))
|
||||
.WillOnce(Return(true));
|
||||
|
||||
Maybe<string> upgrade_status(string("1.1.1 1.1.2 2025-01-28 07:53:23"));
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile("/etc/cp/revert/upgrade_status"))
|
||||
.WillOnce(Return(upgrade_status));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile("/etc/cp/revert/upgrade_status")).WillOnce(Return(true));
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/revert/failed_upgrade_info"))
|
||||
.WillOnce(Return(false));
|
||||
|
||||
EXPECT_CALL(mock_details_resolver, getAgentVersion()).WillRepeatedly(Return("1.1.2"));
|
||||
EXPECT_CALL(mock_orchestration_tools, copyFile(_, "/etc/cp/revert/last_known_working_orchestrator"))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, copyFile(_, "/etc/cp/revert/last_known_manifest")).WillOnce(Return(true));
|
||||
EXPECT_CALL(
|
||||
mock_orchestration_tools,
|
||||
writeFile("1.1.1 1.1.2 2025-01-28 07:53:23\n", "/var/log/nano_agent/prev_upgrades", true)
|
||||
).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_ml, yield(A<chrono::microseconds>())).WillOnce(Return());
|
||||
runUpgradeRoutine();
|
||||
}
|
||||
|
||||
TEST_F(OrchestrationTest, getBadPolicyUpdate)
|
||||
|
||||
@@ -141,11 +141,11 @@ packageHandlerActionsToString(PackageHandlerActions action)
|
||||
}
|
||||
}
|
||||
|
||||
dbgAssert(false)
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "service configuration")
|
||||
<< "Package handler action is not supported. Action: "
|
||||
<< static_cast<unsigned int>(action);
|
||||
return string();
|
||||
return string("--UNSUPPORTED");
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@@ -208,6 +208,7 @@ ServiceDetails::sendNewConfigurations(int configuration_id, const string &policy
|
||||
MessageMetadata new_config_req_md("127.0.0.1", service_port);
|
||||
new_config_req_md.setConnectioFlag(MessageConnectionConfig::ONE_TIME_CONN);
|
||||
new_config_req_md.setConnectioFlag(MessageConnectionConfig::UNSECURE_CONN);
|
||||
new_config_req_md.setSuspension(false);
|
||||
auto res = messaging->sendSyncMessage(
|
||||
HTTPMethod::POST,
|
||||
"/set-new-configuration",
|
||||
|
||||
@@ -168,10 +168,12 @@ FogAuthenticator::registerAgent(
|
||||
auto nginx_data = details_resolver->parseNginxMetadata();
|
||||
|
||||
if (nginx_data.ok()) {
|
||||
string nginx_signature;
|
||||
string nginx_version;
|
||||
string config_opt;
|
||||
string cc_opt;
|
||||
tie(config_opt, cc_opt, nginx_version) = nginx_data.unpack();
|
||||
tie(config_opt, cc_opt, nginx_version, nginx_signature) = nginx_data.unpack();
|
||||
request << make_pair("nginxSignature", nginx_signature);
|
||||
request << make_pair("nginxVersion", nginx_version);
|
||||
request << make_pair("configureOpt", config_opt);
|
||||
request << make_pair("extraCompilerOpt", cc_opt);
|
||||
@@ -377,9 +379,13 @@ FogAuthenticator::registerLocalAgentToFog()
|
||||
{
|
||||
auto local_reg_token = getRegistrationToken();
|
||||
if (!local_reg_token.ok()) return;
|
||||
|
||||
string reg_token = local_reg_token.unpack().getData();
|
||||
if (reg_token.empty()) return;
|
||||
|
||||
dbgInfo(D_ORCHESTRATOR) << "Start local agent registration to the fog";
|
||||
|
||||
string exec_command = "open-appsec-ctl --set-mode --online_mode --token " + local_reg_token.unpack().getData();
|
||||
string exec_command = "open-appsec-ctl --set-mode --online_mode --token " + reg_token;
|
||||
|
||||
auto i_agent_details = Singleton::Consume<I_AgentDetails>::by<FogAuthenticator>();
|
||||
auto fog_address = i_agent_details->getFogDomain();
|
||||
@@ -467,9 +473,9 @@ getDeplymentType()
|
||||
case EnvType::COUNT: break;
|
||||
}
|
||||
|
||||
dbgAssert(false)
|
||||
dbgAssertOpt(false)
|
||||
<< AlertInfo(AlertTeam::CORE, "fog communication")
|
||||
<< "Failed to get a legitimate deplyment type: "
|
||||
<< "Failed to get a legitimate deployment type: "
|
||||
<< static_cast<uint>(deplyment_type);
|
||||
return "Embedded";
|
||||
}
|
||||
|
||||
@@ -246,6 +246,27 @@ public:
|
||||
return matched_rule;
|
||||
}
|
||||
|
||||
void
|
||||
fetchReplicaCount()
|
||||
{
|
||||
string curl_cmd =
|
||||
"curl -H \"Authorization: Bearer " + kubernetes_token + "\" "
|
||||
"https://kubernetes.default.svc.cluster.local/apis/apps/v1/namespaces/" + kubernetes_namespace +
|
||||
"/deployments/${AGENT_DEPLOYMENT_NAME} -k -s | jq .status.replicas";
|
||||
auto maybe_replicas = i_shell_cmd->getExecOutput(curl_cmd);
|
||||
if (maybe_replicas.ok()) {
|
||||
try {
|
||||
replicas = std::stoi(maybe_replicas.unpack());
|
||||
} catch (const std::exception &e) {
|
||||
dbgWarning(D_RATE_LIMIT) << "error while converting replicas: " << e.what();
|
||||
}
|
||||
}
|
||||
if (replicas == 0) {
|
||||
dbgWarning(D_RATE_LIMIT) << "replicas is set to 0, setting replicas to 1";
|
||||
replicas = 1;
|
||||
}
|
||||
}
|
||||
|
||||
EventVerdict
|
||||
respond(const HttpRequestHeaderEvent &event) override
|
||||
{
|
||||
@@ -271,10 +292,72 @@ public:
|
||||
dbgDebug(D_RATE_LIMIT) << "source identifier value: " << source_identifier;
|
||||
|
||||
auto maybe_source_ip = env->get<IPAddr>(HttpTransactionData::client_ip_ctx);
|
||||
set<string> ip_set;
|
||||
string source_ip = "";
|
||||
if (maybe_source_ip.ok()) source_ip = ipAddrToStr(maybe_source_ip.unpack());
|
||||
if (maybe_source_ip.ok()) {
|
||||
source_ip = ipAddrToStr(maybe_source_ip.unpack());
|
||||
|
||||
unordered_map<string, set<string>> condition_map = createConditionMap(uri, source_ip, source_identifier);
|
||||
if (getProfileAgentSettingWithDefault<bool>(false, "agent.rateLimit.ignoreSourceIP")) {
|
||||
dbgDebug(D_RATE_LIMIT) << "Rate limit ignoring source ip: " << source_ip;
|
||||
} else {
|
||||
ip_set.insert(source_ip);
|
||||
}
|
||||
}
|
||||
|
||||
auto maybe_xff = env->get<string>(HttpTransactionData::xff_vals_ctx);
|
||||
if (!maybe_xff.ok()) {
|
||||
dbgTrace(D_RATE_LIMIT) << "Rate limit failed to get xff vals from env";
|
||||
} else {
|
||||
auto ips = split(maybe_xff.unpack(), ',');
|
||||
ip_set.insert(ips.begin(), ips.end());
|
||||
}
|
||||
|
||||
EnumArray<I_GeoLocation::GeoLocationField, string> geo_location_data;
|
||||
set<string> country_codes;
|
||||
set<string> country_names;
|
||||
for (const string& source : ip_set) {
|
||||
Maybe<IPAddr> maybe_source_ip = IPAddr::createIPAddr(source);
|
||||
if (!maybe_source_ip.ok()){
|
||||
dbgWarning(D_RATE_LIMIT)
|
||||
<< "Rate limit failed to create ip address from source: "
|
||||
<< source
|
||||
<< ", Error: "
|
||||
<< maybe_source_ip.getErr();
|
||||
continue;
|
||||
}
|
||||
auto asset_location =
|
||||
Singleton::Consume<I_GeoLocation>::by<RateLimit>()->lookupLocation(maybe_source_ip.unpack());
|
||||
if (!asset_location.ok()) {
|
||||
dbgWarning(D_RATE_LIMIT)
|
||||
<< "Rate limit lookup location failed for source: "
|
||||
<< source_ip
|
||||
<< ", Error: "
|
||||
<< asset_location.getErr();
|
||||
continue;
|
||||
}
|
||||
geo_location_data = asset_location.unpack();
|
||||
auto code = geo_location_data[I_GeoLocation::GeoLocationField::COUNTRY_CODE];
|
||||
auto name = geo_location_data[I_GeoLocation::GeoLocationField::COUNTRY_NAME];
|
||||
country_codes.insert(code);
|
||||
country_names.insert(name);
|
||||
dbgTrace(D_RATE_LIMIT)
|
||||
<< "Rate limit found "
|
||||
<< "country code: "
|
||||
<< code
|
||||
<< ", country name: "
|
||||
<< name
|
||||
<< ", source ip address: "
|
||||
<< source;
|
||||
}
|
||||
|
||||
|
||||
unordered_map<string, set<string>> condition_map = createConditionMap(
|
||||
uri,
|
||||
source_ip,
|
||||
source_identifier,
|
||||
country_codes,
|
||||
country_names
|
||||
);
|
||||
if (shouldApplyException(condition_map)) {
|
||||
dbgDebug(D_RATE_LIMIT) << "found accept exception, not enforcing rate limit on this URI: " << uri;
|
||||
return ACCEPT;
|
||||
@@ -293,11 +376,6 @@ public:
|
||||
return ACCEPT;
|
||||
}
|
||||
|
||||
auto replicas = getenv("REPLICA_COUNT") ? std::stoi(getenv("REPLICA_COUNT")) : 1;
|
||||
if (replicas == 0) {
|
||||
dbgWarning(D_RATE_LIMIT) << "REPLICA_COUNT environment variable is set to 0, setting REPLICA_COUNT to 1";
|
||||
replicas = 1;
|
||||
}
|
||||
burst = static_cast<float>(rule.getRateLimit()) / replicas;
|
||||
limit = static_cast<float>(calcRuleLimit(rule)) / replicas;
|
||||
|
||||
@@ -476,10 +554,18 @@ public:
|
||||
}
|
||||
|
||||
unordered_map<string, set<string>>
|
||||
createConditionMap(const string &uri, const string &source_ip, const string &source_identifier)
|
||||
createConditionMap(
|
||||
const string &uri,
|
||||
const string &source_ip,
|
||||
const string &source_identifier,
|
||||
const set<string> &country_codes,
|
||||
const set<string> &country_names
|
||||
)
|
||||
{
|
||||
unordered_map<string, set<string>> condition_map;
|
||||
if (!source_ip.empty()) condition_map["sourceIP"].insert(source_ip);
|
||||
if (!country_codes.empty()) condition_map["countryCode"].insert(country_codes.begin(), country_codes.end());
|
||||
if (!country_names.empty()) condition_map["countryName"].insert(country_names.begin(), country_names.end());
|
||||
condition_map["sourceIdentifier"].insert(source_identifier);
|
||||
condition_map["url"].insert(uri);
|
||||
|
||||
@@ -616,6 +702,21 @@ public:
|
||||
"Initialize rate limit component",
|
||||
false
|
||||
);
|
||||
|
||||
i_shell_cmd = Singleton::Consume<I_ShellCmd>::by<RateLimit>();
|
||||
i_env_details = Singleton::Consume<I_EnvDetails>::by<RateLimit>();
|
||||
env_type = i_env_details->getEnvType();
|
||||
if (env_type == EnvType::K8S) {
|
||||
kubernetes_token = i_env_details->getToken();
|
||||
kubernetes_namespace = i_env_details->getNameSpace();
|
||||
fetchReplicaCount();
|
||||
Singleton::Consume<I_MainLoop>::by<RateLimit>()->addRecurringRoutine(
|
||||
I_MainLoop::RoutineType::Offline,
|
||||
chrono::seconds(120),
|
||||
[this]() { fetchReplicaCount(); },
|
||||
"Fetch current replica count from the Kubernetes cluster"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@@ -624,6 +725,9 @@ public:
|
||||
disconnectRedis();
|
||||
}
|
||||
|
||||
I_ShellCmd *i_shell_cmd = nullptr;
|
||||
I_EnvDetails* i_env_details = nullptr;
|
||||
|
||||
private:
|
||||
static constexpr auto DROP = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP;
|
||||
static constexpr auto ACCEPT = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT;
|
||||
@@ -634,6 +738,10 @@ private:
|
||||
int burst;
|
||||
float limit;
|
||||
redisContext* redis = nullptr;
|
||||
int replicas = 1;
|
||||
EnvType env_type;
|
||||
string kubernetes_namespace = "";
|
||||
string kubernetes_token = "";
|
||||
};
|
||||
|
||||
RateLimit::RateLimit() : Component("RateLimit"), pimpl(make_unique<Impl>()) {}
|
||||
|
||||
@@ -137,9 +137,13 @@ public:
|
||||
void setRemoteSyncEnabled(bool enabled);
|
||||
protected:
|
||||
void mergeProcessedFromRemote();
|
||||
std::string getWindowId();
|
||||
void waitSync();
|
||||
std::string getPostDataUrl();
|
||||
std::string getUri();
|
||||
size_t getIntervalsCount();
|
||||
void incrementIntervalsCount();
|
||||
bool isBase();
|
||||
|
||||
template<typename T>
|
||||
bool sendObject(T &obj, HTTPMethod method, std::string uri)
|
||||
@@ -252,14 +256,13 @@ protected:
|
||||
const std::string m_remotePath; // Created from tenentId + / + assetId + / + class
|
||||
std::chrono::seconds m_interval;
|
||||
std::string m_owner;
|
||||
const std::string m_assetId;
|
||||
|
||||
private:
|
||||
bool localSyncAndProcess();
|
||||
void updateStateFromRemoteService();
|
||||
RemoteFilesList getProcessedFilesList();
|
||||
RemoteFilesList getRemoteProcessedFilesList();
|
||||
std::string getWindowId();
|
||||
bool isBase();
|
||||
std::string getLearningHost();
|
||||
std::string getSharedStorageHost();
|
||||
|
||||
@@ -270,7 +273,6 @@ private:
|
||||
size_t m_windowsCount;
|
||||
size_t m_intervalsCounter;
|
||||
bool m_remoteSyncEnabled;
|
||||
const std::string m_assetId;
|
||||
const bool m_isAssetIdUuid;
|
||||
std::string m_type;
|
||||
std::string m_lastProcessedModified;
|
||||
|
||||
@@ -19,12 +19,14 @@
|
||||
#include "../waap_clib/WaapParameters.h"
|
||||
#include "../waap_clib/WaapOpenRedirectPolicy.h"
|
||||
#include "../waap_clib/WaapErrorDisclosurePolicy.h"
|
||||
#include "../waap_clib/DecisionType.h"
|
||||
#include "../waap_clib/CsrfPolicy.h"
|
||||
#include "../waap_clib/UserLimitsPolicy.h"
|
||||
#include "../waap_clib/RateLimiting.h"
|
||||
#include "../waap_clib/SecurityHeadersPolicy.h"
|
||||
#include <memory>
|
||||
|
||||
|
||||
enum class BlockingLevel {
|
||||
NO_BLOCKING = 0,
|
||||
LOW_BLOCKING_LEVEL,
|
||||
@@ -44,8 +46,8 @@ public:
|
||||
virtual const std::string& get_AssetId() const = 0;
|
||||
virtual const std::string& get_AssetName() const = 0;
|
||||
virtual const BlockingLevel& get_BlockingLevel() const = 0;
|
||||
virtual const std::string& get_PracticeId() const = 0;
|
||||
virtual const std::string& get_PracticeName() const = 0;
|
||||
virtual const std::string& get_PracticeIdByPactice(DecisionType practiceType) const = 0;
|
||||
virtual const std::string& get_PracticeNameByPactice(DecisionType practiceType) const = 0;
|
||||
virtual const std::string& get_PracticeSubType() const = 0;
|
||||
virtual const std::string& get_RuleId() const = 0;
|
||||
virtual const std::string& get_RuleName() const = 0;
|
||||
|
||||
@@ -91,6 +91,7 @@ add_library(waap_clib
|
||||
ParserScreenedJson.cc
|
||||
ParserBinaryFile.cc
|
||||
RegexComparator.cc
|
||||
RequestsMonitor.cc
|
||||
)
|
||||
|
||||
add_definitions("-Wno-unused-function")
|
||||
|
||||
@@ -113,6 +113,9 @@ DeepParser::onKv(const char *k, size_t k_len, const char *v, size_t v_len, int f
|
||||
<< parser_depth
|
||||
<< " v_len = "
|
||||
<< v_len;
|
||||
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << m_key;
|
||||
|
||||
// Decide whether to push/pop the value in the keystack.
|
||||
bool shouldUpdateKeyStack = (flags & BUFFERED_RECEIVER_F_UNNAMED) == 0;
|
||||
|
||||
@@ -275,13 +278,23 @@ DeepParser::onKv(const char *k, size_t k_len, const char *v, size_t v_len, int f
|
||||
// Detect and decode potential base64 chunks in the value before further processing
|
||||
|
||||
bool base64ParamFound = false;
|
||||
size_t base64_offset = 0;
|
||||
Waap::Util::BinaryFileType base64BinaryFileType = Waap::Util::BinaryFileType::FILE_TYPE_NONE;
|
||||
if (m_depth == 1 && flags == BUFFERED_RECEIVER_F_MIDDLE && m_key.depth() == 1 && m_key.first() != "#base64"){
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << " === will not check base64 since prev data block was not b64-encoded ===";
|
||||
} else {
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << " ===Processing potential base64===";
|
||||
if (isUrlPayload && m_depth == 1 && cur_val[0] == '/') {
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << "removing leading '/' from URL param value";
|
||||
base64_offset = 1;
|
||||
}
|
||||
std::string decoded_val, decoded_key;
|
||||
base64_variants base64_status = Waap::Util::b64Test(cur_val, decoded_key, decoded_val, base64BinaryFileType);
|
||||
base64_variants base64_status = Waap::Util::b64Test(
|
||||
cur_val,
|
||||
decoded_key,
|
||||
decoded_val,
|
||||
base64BinaryFileType,
|
||||
base64_offset);
|
||||
|
||||
dbgTrace(D_WAAP_DEEP_PARSER)
|
||||
<< " status = "
|
||||
@@ -289,16 +302,50 @@ DeepParser::onKv(const char *k, size_t k_len, const char *v, size_t v_len, int f
|
||||
<< " key = "
|
||||
<< decoded_key
|
||||
<< " value = "
|
||||
<< decoded_val;
|
||||
<< decoded_val
|
||||
<< "m_depth = "
|
||||
<< m_depth;
|
||||
|
||||
switch (base64_status) {
|
||||
case SINGLE_B64_CHUNK_CONVERT:
|
||||
cur_val = decoded_val;
|
||||
if (base64_offset) {
|
||||
cur_val = "/" + decoded_val;
|
||||
} else {
|
||||
cur_val = decoded_val;
|
||||
}
|
||||
base64ParamFound = true;
|
||||
break;
|
||||
case CONTINUE_DUAL_SCAN:
|
||||
if (decoded_val.size() > 0) {
|
||||
decoded_key = "#base64";
|
||||
base64ParamFound = false;
|
||||
if (base64_offset) {
|
||||
decoded_val = "/" + decoded_val;
|
||||
}
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << m_key;
|
||||
rc = onKv(
|
||||
decoded_key.c_str(),
|
||||
decoded_key.size(),
|
||||
decoded_val.data(),
|
||||
decoded_val.size(),
|
||||
flags,
|
||||
parser_depth
|
||||
);
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << "After call to onKv with suspected value rc = " << rc;
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << m_key;
|
||||
break;
|
||||
} else {
|
||||
dbgTrace(D_WAAP) << "base64 decode suspected and empty value. Skipping.";
|
||||
base64ParamFound = false;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case KEY_VALUE_B64_PAIR:
|
||||
// going deep with new pair in case value is not empty
|
||||
if (decoded_val.size() > 0) {
|
||||
if (base64_offset) {
|
||||
decoded_key = "/" + decoded_key;
|
||||
}
|
||||
cur_val = decoded_val;
|
||||
base64ParamFound = true;
|
||||
rc = onKv(
|
||||
@@ -309,9 +356,13 @@ DeepParser::onKv(const char *k, size_t k_len, const char *v, size_t v_len, int f
|
||||
flags,
|
||||
parser_depth
|
||||
);
|
||||
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << " rc = " << rc;
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << "After call to onKv with suspected value rc = " << rc;
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << m_key;
|
||||
if (rc != CONTINUE_PARSING) {
|
||||
if (shouldUpdateKeyStack) {
|
||||
m_key.pop("deep parser key");
|
||||
}
|
||||
m_depth--;
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
@@ -323,7 +374,7 @@ DeepParser::onKv(const char *k, size_t k_len, const char *v, size_t v_len, int f
|
||||
}
|
||||
|
||||
if (base64ParamFound) {
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << "DeepParser::onKv(): pushing #base64 prefix to the key.";
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << "pushing #base64 prefix to the key.";
|
||||
m_key.push("#base64", 7, false);
|
||||
}
|
||||
}
|
||||
@@ -437,7 +488,6 @@ DeepParser::onKv(const char *k, size_t k_len, const char *v, size_t v_len, int f
|
||||
if (shouldUpdateKeyStack) {
|
||||
m_key.pop("deep parser key");
|
||||
}
|
||||
|
||||
m_depth--;
|
||||
return rc;
|
||||
}
|
||||
@@ -587,7 +637,6 @@ DeepParser::parseBuffer(
|
||||
if (shouldUpdateKeyStack) {
|
||||
m_key.pop("deep parser key");
|
||||
}
|
||||
|
||||
m_depth--;
|
||||
return DONE_PARSING;
|
||||
}
|
||||
@@ -909,7 +958,6 @@ DeepParser::parseAfterMisleadingMultipartBoundaryCleaned(
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -1081,7 +1129,7 @@ DeepParser::createInternalParser(
|
||||
<< " isBodyPayload = "
|
||||
<< isBodyPayload;
|
||||
//Detect sensor_data format in body and just use dedicated filter for it
|
||||
if (m_depth == 1
|
||||
if ((m_depth == 1)
|
||||
&& isBodyPayload
|
||||
&& Waap::Util::detectKnownSource(cur_val) == Waap::Util::SOURCE_TYPE_SENSOR_DATA) {
|
||||
m_parsersDeque.push_back(
|
||||
|
||||
@@ -37,14 +37,24 @@ void KeyStack::push(const char* subkey, size_t subkeySize, bool countDepth) {
|
||||
m_nameDepth++;
|
||||
}
|
||||
|
||||
dbgTrace(D_WAAP) << "KeyStack(" << m_name << ")::push(): '" << std::string(subkey, subkeySize) <<
|
||||
"' => full_key='" << std::string(m_key.data(), m_key.size()) << "'";
|
||||
dbgTrace(D_WAAP)
|
||||
<< "KeyStack("
|
||||
<< m_name
|
||||
<< ")::push(): '"
|
||||
<< std::string(subkey, subkeySize)
|
||||
<< "' => full_key='"
|
||||
<< std::string(m_key.data(), m_key.size())
|
||||
<< "'";
|
||||
}
|
||||
|
||||
void KeyStack::pop(const char* log, bool countDepth) {
|
||||
// Keep depth balanced even if m_key[] buffer is full
|
||||
if (m_key.empty() || m_stack.empty()) {
|
||||
dbgDebug(D_WAAP) << "KeyStack(" << m_name << ")::pop(): [ERROR] ATTEMPT TO POP FROM EMPTY KEY STACK! " << log;
|
||||
dbgDebug(D_WAAP)
|
||||
<< "KeyStack("
|
||||
<< m_name
|
||||
<< ")::pop(): [ERROR] ATTEMPT TO POP FROM EMPTY KEY STACK! "
|
||||
<< log;
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -55,6 +65,22 @@ void KeyStack::pop(const char* log, bool countDepth) {
|
||||
// Remove last subkey.
|
||||
m_key.erase(m_stack.back());
|
||||
m_stack.pop_back();
|
||||
dbgTrace(D_WAAP) << "KeyStack(" << m_name << ")::pop(): full_key='" <<
|
||||
std::string(m_key.data(), (int)m_key.size()) << "': pop_key=" << log << "'";
|
||||
dbgTrace(D_WAAP)
|
||||
<< "KeyStack("
|
||||
<< m_name
|
||||
<< ")::pop(): full_key='"
|
||||
<< std::string(m_key.data(), (int)m_key.size())
|
||||
<< "': pop_key="
|
||||
<< log
|
||||
<< "'";
|
||||
}
|
||||
|
||||
void KeyStack::print(std::ostream &os) const
|
||||
{
|
||||
os
|
||||
<< "KeyStack("
|
||||
<< m_name
|
||||
<< ")::show(): full_key='"
|
||||
<< std::string(m_key.data(), (int)m_key.size())
|
||||
<< "'";
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ public:
|
||||
void pop(const char* log, bool countDepth=true);
|
||||
bool empty() const { return m_key.empty(); }
|
||||
void clear() { m_key.clear(); m_stack.clear(); }
|
||||
void print(std::ostream &os) const;
|
||||
size_t depth() const { return m_nameDepth; }
|
||||
size_t size() const {
|
||||
return str().size();
|
||||
|
||||
@@ -111,8 +111,7 @@ int BufferedReceiver::onKvDone()
|
||||
// This must be called even if m_value is empty in order to signal the BUFFERED_RECEIVER_F_LAST flag to the
|
||||
// receiver!
|
||||
dbgTrace(D_WAAP_PARSER)
|
||||
<< " Call onKv on the remainder of the buffer not yet pushed to the receiver "
|
||||
<< "calling onKv()";
|
||||
<< " Call onKv on the remainder of the buffer not yet pushed to the receiver calling onKv()";
|
||||
int rc = onKv(m_key.data(), m_key.size(), m_value.data(), m_value.size(), m_flags, m_parser_depth);
|
||||
|
||||
// Reset the object's state to allow reuse for other parsers
|
||||
|
||||
@@ -21,6 +21,7 @@ USE_DEBUG_FLAG(D_WAAP);
|
||||
|
||||
const std::string ParserPDF::m_parserName = "ParserPDF";
|
||||
const char* PDF_TAIL = "%%EOF";
|
||||
const size_t PDF_TAIL_LEN = 5;
|
||||
|
||||
ParserPDF::ParserPDF(
|
||||
IParserStreamReceiver &receiver,
|
||||
@@ -44,16 +45,21 @@ ParserPDF::push(const char *buf, size_t len)
|
||||
<< "' len="
|
||||
<< len;
|
||||
|
||||
const char *c;
|
||||
|
||||
if (m_state == s_error) {
|
||||
return 0;
|
||||
}
|
||||
if (len == 0)
|
||||
{
|
||||
dbgTrace(D_WAAP_PARSER_PDF) << "ParserPDF::push(): end of stream. m_state=" << m_state;
|
||||
|
||||
if (m_state == s_end) {
|
||||
if (len == 0) {
|
||||
dbgTrace(D_WAAP_PARSER_PDF) << "ParserPDF::push(): end of stream. m_state=" << m_state;
|
||||
if (m_state == s_body && m_tailOffset >= PDF_TAIL_LEN) {
|
||||
if (m_receiver.onKey("PDF", 3) != 0) {
|
||||
m_state = s_error;
|
||||
return 0;
|
||||
}
|
||||
if (m_receiver.onValue("", 0) != 0) {
|
||||
m_state = s_error;
|
||||
return 0;
|
||||
}
|
||||
m_receiver.onKvDone();
|
||||
} else {
|
||||
m_state = s_error;
|
||||
@@ -61,38 +67,43 @@ ParserPDF::push(const char *buf, size_t len)
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t start = (len > MAX_PDF_TAIL_LOOKUP) ? len - MAX_PDF_TAIL_LOOKUP : 0;
|
||||
switch (m_state) {
|
||||
case s_start:
|
||||
m_state = s_body;
|
||||
CP_FALL_THROUGH;
|
||||
case s_body:
|
||||
{
|
||||
size_t tail_lookup_offset = (len > MAX_PDF_TAIL_LOOKUP) ? len - MAX_PDF_TAIL_LOOKUP : 0;
|
||||
c = strstr(buf + tail_lookup_offset, PDF_TAIL);
|
||||
for (size_t i = start; i < len; i++) {
|
||||
dbgTrace(D_WAAP_PARSER_PDF)
|
||||
<< "string to search: " << std::string(buf + tail_lookup_offset)
|
||||
<< " c=" << c;
|
||||
if (c) {
|
||||
m_state = s_end;
|
||||
CP_FALL_THROUGH;
|
||||
<< "ParserPDF::push(): m_tailOffset="
|
||||
<< m_tailOffset
|
||||
<< " buf[i]="
|
||||
<< buf[i];
|
||||
if (m_tailOffset <= PDF_TAIL_LEN - 1) {
|
||||
if (buf[i] == PDF_TAIL[m_tailOffset]) {
|
||||
m_tailOffset++;
|
||||
} else {
|
||||
m_tailOffset = 0;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
if (buf[i] == '\r' || buf[i] == '\n' || buf[i] == ' ' || buf[i] == 0) {
|
||||
m_tailOffset++;
|
||||
} else {
|
||||
m_tailOffset = 0;
|
||||
i--;
|
||||
}
|
||||
}
|
||||
}
|
||||
case s_end:
|
||||
if (m_receiver.onKey("PDF", 3) != 0) {
|
||||
m_state = s_error;
|
||||
return 0;
|
||||
}
|
||||
if (m_receiver.onValue("", 0) != 0) {
|
||||
m_state = s_error;
|
||||
return 0;
|
||||
}
|
||||
dbgTrace(D_WAAP_PARSER_PDF)
|
||||
<< "ParserPDF::push()->s_body: m_tailOffset="
|
||||
<< m_tailOffset;
|
||||
break;
|
||||
case s_error:
|
||||
break;
|
||||
default:
|
||||
dbgTrace(D_WAAP_PARSER_PDF) << "ParserPDF::push(): unknown state: " << m_state;
|
||||
dbgTrace(D_WAAP_PARSER_PDF)
|
||||
<< "ParserPDF::push(): unknown state: "
|
||||
<< m_state;
|
||||
m_state = s_error;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -34,7 +34,6 @@ private:
|
||||
enum state {
|
||||
s_start,
|
||||
s_body,
|
||||
s_end,
|
||||
s_error
|
||||
};
|
||||
|
||||
@@ -42,6 +41,7 @@ private:
|
||||
enum state m_state;
|
||||
static const std::string m_parserName;
|
||||
size_t m_parser_depth;
|
||||
size_t m_tailOffset = 0;
|
||||
};
|
||||
|
||||
#endif // __PARSER_PDF_H__
|
||||
|
||||
158
components/security_apps/waap/waap_clib/RequestsMonitor.cc
Normal file
158
components/security_apps/waap/waap_clib/RequestsMonitor.cc
Normal file
@@ -0,0 +1,158 @@
|
||||
#include "RequestsMonitor.h"
|
||||
#include "waap.h"
|
||||
#include "SyncLearningNotification.h"
|
||||
#include "report_messaging.h"
|
||||
#include "customized_cereal_map.h"
|
||||
|
||||
USE_DEBUG_FLAG(D_WAAP_CONFIDENCE_CALCULATOR);
|
||||
using namespace std;
|
||||
|
||||
SourcesRequestMonitor::SourcesRequestMonitor(
|
||||
const string& filePath,
|
||||
const string& remotePath,
|
||||
const string& assetId,
|
||||
const string& owner) :
|
||||
SerializeToLocalAndRemoteSyncBase(
|
||||
chrono::minutes(10),
|
||||
chrono::seconds(30),
|
||||
filePath,
|
||||
remotePath != "" ? remotePath + "/Monitor" : remotePath,
|
||||
assetId,
|
||||
owner
|
||||
), m_sourcesRequests()
|
||||
{
|
||||
}
|
||||
|
||||
SourcesRequestMonitor::~SourcesRequestMonitor()
|
||||
{
|
||||
}
|
||||
|
||||
void SourcesRequestMonitor::syncWorker()
|
||||
{
|
||||
dbgInfo(D_WAAP_CONFIDENCE_CALCULATOR) << "Running the sync worker for assetId='" << m_assetId << "', owner='" <<
|
||||
m_owner << "'";
|
||||
incrementIntervalsCount();
|
||||
OrchestrationMode mode = Singleton::exists<I_AgentDetails>() ?
|
||||
Singleton::Consume<I_AgentDetails>::by<WaapComponent>()->getOrchestrationMode() : OrchestrationMode::ONLINE;
|
||||
|
||||
bool enabled = getProfileAgentSettingWithDefault<bool>(false, "appsec.sourceRequestsMonitor.enabled");
|
||||
|
||||
if (mode == OrchestrationMode::OFFLINE || !enabled || isBase() || !postData()) {
|
||||
dbgInfo(D_WAAP_CONFIDENCE_CALCULATOR)
|
||||
<< "Did not report data. for asset: "
|
||||
<< m_assetId
|
||||
<< " Remote URL: "
|
||||
<< m_remotePath
|
||||
<< " is enabled: "
|
||||
<< to_string(enabled)
|
||||
<< ", mode: " << int(mode);
|
||||
return;
|
||||
}
|
||||
|
||||
dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "Waiting for all agents to post their data";
|
||||
waitSync();
|
||||
|
||||
if (mode == OrchestrationMode::HYBRID) {
|
||||
dbgDebug(D_WAAP_CONFIDENCE_CALCULATOR) << "detected running in standalone mode. not sending sync notification";
|
||||
} else {
|
||||
SyncLearningNotificationObject syncNotification(m_assetId, "Monitor", getWindowId());
|
||||
|
||||
dbgDebug(D_WAAP_CONFIDENCE_CALCULATOR) << "sending sync notification: " << syncNotification;
|
||||
|
||||
ReportMessaging(
|
||||
"sync notification for '" + m_assetId + "'",
|
||||
ReportIS::AudienceTeam::WAAP,
|
||||
syncNotification,
|
||||
MessageCategory::GENERIC,
|
||||
ReportIS::Tags::WAF,
|
||||
ReportIS::Notification::SYNC_LEARNING
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
void SourcesRequestMonitor::logSourceHit(const string& source)
|
||||
{
|
||||
m_sourcesRequests[chrono::duration_cast<chrono::minutes>(
|
||||
Singleton::Consume<I_TimeGet>::by<WaapComponent>()->getWalltime()
|
||||
).count()][source]++;
|
||||
}
|
||||
|
||||
// LCOV_EXCL_START Reason: internal functions not used
|
||||
|
||||
void SourcesRequestMonitor::pullData(const vector<string> &data)
|
||||
{
|
||||
// not used. report only
|
||||
}
|
||||
|
||||
void SourcesRequestMonitor::processData()
|
||||
{
|
||||
// not used. report only
|
||||
}
|
||||
|
||||
void SourcesRequestMonitor::postProcessedData()
|
||||
{
|
||||
// not used. report only
|
||||
}
|
||||
|
||||
void SourcesRequestMonitor::pullProcessedData(const vector<string> &data)
|
||||
{
|
||||
// not used. report only
|
||||
}
|
||||
|
||||
void SourcesRequestMonitor::updateState(const vector<string> &data)
|
||||
{
|
||||
// not used. report only
|
||||
}
|
||||
|
||||
// LCOV_EXCL_STOP
|
||||
|
||||
typedef map<string, map<string, size_t>> MonitorJsonData;
|
||||
|
||||
class SourcesRequestsReport : public RestGetFile
|
||||
{
|
||||
public:
|
||||
SourcesRequestsReport(MonitorData& _sourcesRequests, const string& _agentId)
|
||||
: sourcesRequests(), agentId(_agentId)
|
||||
{
|
||||
MonitorJsonData montiorData;
|
||||
for (const auto& window : _sourcesRequests) {
|
||||
for (const auto& source : window.second) {
|
||||
montiorData[to_string(window.first)][source.first] = source.second;
|
||||
}
|
||||
}
|
||||
sourcesRequests = montiorData;
|
||||
}
|
||||
private:
|
||||
C2S_PARAM(MonitorJsonData, sourcesRequests);
|
||||
C2S_PARAM(string, agentId);
|
||||
};
|
||||
|
||||
bool SourcesRequestMonitor::postData()
|
||||
{
|
||||
dbgInfo(D_WAAP_CONFIDENCE_CALCULATOR) << "Sending the data to remote";
|
||||
// send collected data to remote and clear the local data
|
||||
string url = getPostDataUrl();
|
||||
string agentId = Singleton::Consume<I_AgentDetails>::by<WaapComponent>()->getAgentId();
|
||||
SourcesRequestsReport currentWindow(m_sourcesRequests, agentId);
|
||||
bool ok = sendNoReplyObjectWithRetry(currentWindow,
|
||||
HTTPMethod::PUT,
|
||||
url);
|
||||
if (!ok) {
|
||||
dbgError(D_WAAP_CONFIDENCE_CALCULATOR) << "Failed to post collected data to: " << url;
|
||||
}
|
||||
dbgInfo(D_WAAP_CONFIDENCE_CALCULATOR) << "Data sent to remote: " << ok;
|
||||
m_sourcesRequests.clear();
|
||||
return ok;
|
||||
}
|
||||
|
||||
void SourcesRequestMonitor::serialize(ostream& stream)
|
||||
{
|
||||
cereal::JSONOutputArchive archive(stream);
|
||||
archive(m_sourcesRequests);
|
||||
}
|
||||
|
||||
void SourcesRequestMonitor::deserialize(istream& stream)
|
||||
{
|
||||
cereal::JSONInputArchive archive(stream);
|
||||
archive(m_sourcesRequests);
|
||||
}
|
||||
33
components/security_apps/waap/waap_clib/RequestsMonitor.h
Normal file
33
components/security_apps/waap/waap_clib/RequestsMonitor.h
Normal file
@@ -0,0 +1,33 @@
|
||||
#ifndef __REQUESTS_MONITOR_H__
|
||||
#define __REQUESTS_MONITOR_H__
|
||||
#include "i_serialize.h"
|
||||
|
||||
typedef std::map<uint64_t, std::map<std::string, size_t>> MonitorData;
|
||||
|
||||
class SourcesRequestMonitor : public SerializeToLocalAndRemoteSyncBase
|
||||
{
|
||||
public:
|
||||
SourcesRequestMonitor(
|
||||
const std::string& filePath,
|
||||
const std::string& remotePath,
|
||||
const std::string& assetId,
|
||||
const std::string& owner);
|
||||
virtual ~SourcesRequestMonitor();
|
||||
virtual void syncWorker() override;
|
||||
void logSourceHit(const std::string& source);
|
||||
protected:
|
||||
virtual void pullData(const std::vector<std::string> &data) override;
|
||||
virtual void processData() override;
|
||||
virtual void postProcessedData() override;
|
||||
virtual void pullProcessedData(const std::vector<std::string> &data) override;
|
||||
virtual void updateState(const std::vector<std::string> &data) override;
|
||||
virtual bool postData() override;
|
||||
|
||||
void serialize(std::ostream& stream);
|
||||
void deserialize(std::istream& stream);
|
||||
private:
|
||||
// map of sources and their requests per minute (UNIX)
|
||||
MonitorData m_sourcesRequests;
|
||||
};
|
||||
|
||||
#endif // __REQUESTS_MONITOR_H__
|
||||
@@ -194,6 +194,10 @@ void SerializeToFileBase::saveData()
|
||||
dbgWarning(D_WAAP_CONFIDENCE_CALCULATOR) << "Failed to gzip data";
|
||||
} else {
|
||||
ss.str(string((const char *)res.output, res.num_output_bytes));
|
||||
// free the memory allocated by compressData
|
||||
if (res.output) free(res.output);
|
||||
res.output = nullptr;
|
||||
res.num_output_bytes = 0;
|
||||
}
|
||||
if (res.output) free(res.output);
|
||||
res.output = nullptr;
|
||||
@@ -403,6 +407,7 @@ SerializeToLocalAndRemoteSyncBase::SerializeToLocalAndRemoteSyncBase(
|
||||
m_remotePath(replaceAllCopy(remotePath, "//", "/")),
|
||||
m_interval(0),
|
||||
m_owner(owner),
|
||||
m_assetId(replaceAllCopy(assetId, "/", "")),
|
||||
m_pMainLoop(nullptr),
|
||||
m_waitForSync(waitForSync),
|
||||
m_workerRoutineId(0),
|
||||
@@ -410,7 +415,6 @@ SerializeToLocalAndRemoteSyncBase::SerializeToLocalAndRemoteSyncBase(
|
||||
m_windowsCount(0),
|
||||
m_intervalsCounter(0),
|
||||
m_remoteSyncEnabled(true),
|
||||
m_assetId(replaceAllCopy(assetId, "/", "")),
|
||||
m_isAssetIdUuid(Waap::Util::isUuid(assetId)),
|
||||
m_shared_storage_host(genError("not set")),
|
||||
m_learning_host(genError("not set"))
|
||||
@@ -465,6 +469,15 @@ bool SerializeToLocalAndRemoteSyncBase::isBase()
|
||||
return m_remotePath == "";
|
||||
}
|
||||
|
||||
void SerializeToLocalAndRemoteSyncBase::waitSync()
|
||||
{
|
||||
if (m_pMainLoop == nullptr)
|
||||
{
|
||||
return;
|
||||
}
|
||||
m_pMainLoop->yield(m_waitForSync);
|
||||
}
|
||||
|
||||
string SerializeToLocalAndRemoteSyncBase::getUri()
|
||||
{
|
||||
static const string hybridModeUri = "/api";
|
||||
@@ -480,6 +493,11 @@ size_t SerializeToLocalAndRemoteSyncBase::getIntervalsCount()
|
||||
return m_intervalsCounter;
|
||||
}
|
||||
|
||||
void SerializeToLocalAndRemoteSyncBase::incrementIntervalsCount()
|
||||
{
|
||||
m_intervalsCounter++;
|
||||
}
|
||||
|
||||
SerializeToLocalAndRemoteSyncBase::~SerializeToLocalAndRemoteSyncBase()
|
||||
{
|
||||
|
||||
@@ -599,6 +617,17 @@ void SerializeToLocalAndRemoteSyncBase::setInterval(ch::seconds newInterval)
|
||||
|
||||
bool SerializeToLocalAndRemoteSyncBase::localSyncAndProcess()
|
||||
{
|
||||
bool isBackupSyncEnabled = getProfileAgentSettingWithDefault<bool>(
|
||||
true,
|
||||
"appsecLearningSettings.backupLocalSync");
|
||||
|
||||
if (!isBackupSyncEnabled) {
|
||||
dbgInfo(D_WAAP_CONFIDENCE_CALCULATOR) << "Local sync is disabled";
|
||||
processData();
|
||||
saveData();
|
||||
return true;
|
||||
}
|
||||
|
||||
RemoteFilesList rawDataFiles;
|
||||
|
||||
dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "Getting files of all agents";
|
||||
@@ -655,7 +684,7 @@ void SerializeToLocalAndRemoteSyncBase::syncWorker()
|
||||
{
|
||||
dbgInfo(D_WAAP_CONFIDENCE_CALCULATOR) << "Running the sync worker for assetId='" << m_assetId << "', owner='" <<
|
||||
m_owner << "'" << " last modified state: " << m_lastProcessedModified;
|
||||
m_intervalsCounter++;
|
||||
incrementIntervalsCount();
|
||||
OrchestrationMode mode = Singleton::exists<I_AgentDetails>() ?
|
||||
Singleton::Consume<I_AgentDetails>::by<WaapComponent>()->getOrchestrationMode() : OrchestrationMode::ONLINE;
|
||||
|
||||
@@ -674,7 +703,7 @@ void SerializeToLocalAndRemoteSyncBase::syncWorker()
|
||||
}
|
||||
|
||||
dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "Waiting for all agents to post their data";
|
||||
m_pMainLoop->yield(m_waitForSync);
|
||||
waitSync();
|
||||
// check if learning service is operational
|
||||
if (m_lastProcessedModified == "")
|
||||
{
|
||||
|
||||
@@ -33,6 +33,7 @@ WaapTelemetryBase::sendLog(const LogRest &metric_client_rest) const
|
||||
OrchestrationMode mode = Singleton::Consume<I_AgentDetails>::by<GenericMetric>()->getOrchestrationMode();
|
||||
|
||||
GenericMetric::sendLog(metric_client_rest);
|
||||
dbgTrace(D_WAAP) << "Waap telemetry log sent: " << metric_client_rest.genJson().unpack();
|
||||
|
||||
if (mode == OrchestrationMode::ONLINE) {
|
||||
return;
|
||||
@@ -79,7 +80,16 @@ void
|
||||
WaapTelemetrics::updateMetrics(const string &asset_id, const DecisionTelemetryData &data)
|
||||
{
|
||||
initMetrics();
|
||||
requests.report(1);
|
||||
|
||||
auto is_keep_alive_ctx = Singleton::Consume<I_Environment>::by<GenericMetric>()->get<bool>(
|
||||
"keep_alive_request_ctx"
|
||||
);
|
||||
if (!is_keep_alive_ctx.ok() || !*is_keep_alive_ctx) {
|
||||
requests.report(1);
|
||||
} else {
|
||||
dbgTrace(D_WAAP) << "Not increasing the number of requests due to keep alive";
|
||||
}
|
||||
|
||||
if (sources_seen.find(data.source) == sources_seen.end()) {
|
||||
if (sources.getCounter() == 0) sources_seen.clear();
|
||||
sources_seen.insert(data.source);
|
||||
@@ -274,7 +284,9 @@ WaapMetricWrapper::upon(const WaapTelemetryEvent &event)
|
||||
ReportIS::IssuingEngine::AGENT_CORE,
|
||||
chrono::minutes(LOGGING_INTERVAL_IN_MINUTES),
|
||||
true,
|
||||
ReportIS::Audience::INTERNAL
|
||||
ReportIS::Audience::INTERNAL,
|
||||
false,
|
||||
asset_id
|
||||
);
|
||||
metrics[asset_id]->registerListener();
|
||||
}
|
||||
@@ -286,7 +298,9 @@ WaapMetricWrapper::upon(const WaapTelemetryEvent &event)
|
||||
ReportIS::IssuingEngine::AGENT_CORE,
|
||||
chrono::minutes(LOGGING_INTERVAL_IN_MINUTES),
|
||||
true,
|
||||
ReportIS::Audience::INTERNAL
|
||||
ReportIS::Audience::INTERNAL,
|
||||
false,
|
||||
asset_id
|
||||
);
|
||||
attack_types[asset_id]->registerListener();
|
||||
}
|
||||
|
||||
@@ -135,6 +135,7 @@ WaapAssetState::WaapAssetState(std::shared_ptr<Signatures> signatures,
|
||||
m_Signatures(signatures),
|
||||
m_waapDataFileName(waapDataFileName),
|
||||
m_assetId(assetId),
|
||||
m_requestsMonitor(nullptr),
|
||||
scoreBuilder(this),
|
||||
m_rateLimitingState(nullptr),
|
||||
m_errorLimitingState(nullptr),
|
||||
@@ -152,10 +153,14 @@ WaapAssetState::WaapAssetState(std::shared_ptr<Signatures> signatures,
|
||||
I_AgentDetails* agentDetails = Singleton::Consume<I_AgentDetails>::by<WaapComponent>();
|
||||
std::string path = agentDetails->getTenantId() + "/" + assetId;
|
||||
m_filtersMngr = std::make_shared<IndicatorsFiltersManager>(path, assetId, this);
|
||||
m_requestsMonitor = std::make_shared<SourcesRequestMonitor>
|
||||
(getWaapDataDir() + "/monitor.data", path, assetId, "State");
|
||||
}
|
||||
else
|
||||
{
|
||||
m_filtersMngr = std::make_shared<IndicatorsFiltersManager>("", "", this);
|
||||
m_requestsMonitor = std::make_shared<SourcesRequestMonitor>
|
||||
(getWaapDataDir() + "/monitor.data", "", assetId, "State");
|
||||
}
|
||||
// Load keyword scores - copy from ScoreBuilder
|
||||
updateScores();
|
||||
@@ -419,6 +424,8 @@ WaapAssetState::WaapAssetState(std::shared_ptr<Signatures> signatures,
|
||||
|
||||
std::string unescape(const std::string & s) {
|
||||
std::string text = s;
|
||||
size_t orig_size = text.size();
|
||||
size_t orig_capacity = text.capacity();
|
||||
dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "unescape: (0) '" << text << "'";
|
||||
|
||||
fixBreakingSpace(text);
|
||||
@@ -428,7 +435,17 @@ WaapAssetState::WaapAssetState(std::shared_ptr<Signatures> signatures,
|
||||
filterUnicode(text);
|
||||
dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "unescape: (1) '" << text << "'";
|
||||
|
||||
// inplace unescaping must result in a string of the same size or smaller
|
||||
dbgAssertOpt(text.size() <= orig_size && text.size() <= text.capacity() && text.capacity() <= orig_capacity)
|
||||
<< AlertInfo(AlertTeam::CORE, "WAAP sample processing")
|
||||
<< "unescape: original size=" << orig_size << " capacity=" << orig_capacity
|
||||
<< " new size=" << text.size() << " capacity=" << text.capacity()
|
||||
<< " text='" << text << "'";
|
||||
|
||||
text = filterUTF7(text);
|
||||
// update orig_size and orig_capacity after string copy
|
||||
orig_size = text.size();
|
||||
orig_capacity = text.capacity();
|
||||
dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "unescape: (1) (after filterUTF7) '" << text << "'";
|
||||
|
||||
// 2. Replace %xx sequences by their single-character equivalents.
|
||||
@@ -507,6 +524,14 @@ WaapAssetState::WaapAssetState(std::shared_ptr<Signatures> signatures,
|
||||
}
|
||||
|
||||
dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "unescape: (12) '" << text << "'";
|
||||
|
||||
// inplace unescaping must result in a string of the same size or smaller
|
||||
dbgAssertOpt(text.size() <= orig_size && text.size() <= text.capacity() && text.capacity() <= orig_capacity)
|
||||
<< AlertInfo(AlertTeam::CORE, "WAAP sample processing")
|
||||
<< "unescape: original size=" << orig_size << " capacity=" << orig_capacity
|
||||
<< " new size=" << text.size() << " capacity=" << text.capacity()
|
||||
<< " text='" << text << "'";
|
||||
|
||||
return text;
|
||||
}
|
||||
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
#include "KeywordTypeValidator.h"
|
||||
#include "ScanResult.h"
|
||||
#include "WaapSampleValue.h"
|
||||
#include "RequestsMonitor.h"
|
||||
|
||||
enum space_stage {SPACE_SYNBOL, BR_SYMBOL, BN_SYMBOL, BRN_SEQUENCE, BNR_SEQUENCE, NO_SPACES};
|
||||
|
||||
@@ -67,6 +68,8 @@ public:
|
||||
|
||||
const std::string m_assetId;
|
||||
|
||||
std::shared_ptr<SourcesRequestMonitor> m_requestsMonitor;
|
||||
|
||||
ScoreBuilder scoreBuilder;
|
||||
std::shared_ptr<Waap::RateLimiting::State> m_rateLimitingState;
|
||||
std::shared_ptr<Waap::RateLimiting::State> m_errorLimitingState;
|
||||
@@ -90,6 +93,7 @@ public:
|
||||
void logIndicatorsInFilters(const std::string ¶m, Waap::Keywords::KeywordsSet& keywords,
|
||||
IWaf2Transaction* pTransaction);
|
||||
void logParamHit(Waf2ScanResult& res, IWaf2Transaction* pTransaction);
|
||||
void logSourceHit(const std::string& source);
|
||||
void filterKeywords(const std::string ¶m, Waap::Keywords::KeywordsSet& keywords,
|
||||
std::vector<std::string>& filteredKeywords);
|
||||
void clearFilterVerbose();
|
||||
|
||||
@@ -329,14 +329,37 @@ const std::string& WaapConfigBase::get_AssetName() const
|
||||
return m_assetName;
|
||||
}
|
||||
|
||||
const std::string& WaapConfigBase::get_PracticeId() const
|
||||
const std::string& WaapConfigBase::get_PracticeIdByPactice(DecisionType practiceType) const
|
||||
{
|
||||
return m_practiceId;
|
||||
|
||||
switch (practiceType)
|
||||
{
|
||||
case DecisionType::AUTONOMOUS_SECURITY_DECISION:
|
||||
return m_practiceId;
|
||||
default:
|
||||
dbgError(D_WAAP)
|
||||
<< "Can't find practice type for practice ID by practice: "
|
||||
<< practiceType
|
||||
<< ", return web app practice ID";
|
||||
return m_practiceId;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
const std::string& WaapConfigBase::get_PracticeName() const
|
||||
const std::string& WaapConfigBase::get_PracticeNameByPactice(DecisionType practiceType) const
|
||||
{
|
||||
return m_practiceName;
|
||||
switch (practiceType)
|
||||
{
|
||||
case DecisionType::AUTONOMOUS_SECURITY_DECISION:
|
||||
return m_practiceName;
|
||||
default:
|
||||
dbgError(D_WAAP)
|
||||
<< "Can't find practice type for practice name by practice: "
|
||||
<< practiceType
|
||||
<< ", return web app practice name";
|
||||
return m_practiceName;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
const std::string& WaapConfigBase::get_RuleId() const
|
||||
|
||||
@@ -39,8 +39,8 @@ public:
|
||||
virtual const std::string& get_AssetId() const;
|
||||
virtual const std::string& get_AssetName() const;
|
||||
virtual const BlockingLevel& get_BlockingLevel() const;
|
||||
virtual const std::string& get_PracticeId() const;
|
||||
virtual const std::string& get_PracticeName() const;
|
||||
virtual const std::string& get_PracticeIdByPactice(DecisionType practiceType) const;
|
||||
virtual const std::string& get_PracticeNameByPactice(DecisionType practiceType) const;
|
||||
virtual const std::string& get_RuleId() const;
|
||||
virtual const std::string& get_RuleName() const;
|
||||
virtual const bool& get_WebAttackMitigation() const;
|
||||
|
||||
@@ -89,7 +89,7 @@ bool WaapOverrideFunctor::operator()(
|
||||
}
|
||||
else if (tagLower == "url") {
|
||||
for (const auto &rx : rxes) {
|
||||
if (W2T_REGX_MATCH(getUriStr)) return true;
|
||||
if (W2T_REGX_MATCH(getUri)) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ ResponseInjectReasons::ResponseInjectReasons()
|
||||
:
|
||||
csrf(false),
|
||||
antibot(false),
|
||||
captcha(false),
|
||||
securityHeaders(false)
|
||||
{
|
||||
}
|
||||
@@ -53,6 +54,13 @@ ResponseInjectReasons::setAntibot(bool flag)
|
||||
antibot = flag;
|
||||
}
|
||||
|
||||
void
|
||||
ResponseInjectReasons::setCaptcha(bool flag)
|
||||
{
|
||||
dbgTrace(D_WAAP) << "Change ResponseInjectReasons(Captcha) " << captcha << " to " << flag;
|
||||
captcha = flag;
|
||||
}
|
||||
|
||||
void
|
||||
ResponseInjectReasons::setCsrf(bool flag)
|
||||
{
|
||||
@@ -74,6 +82,13 @@ ResponseInjectReasons::shouldInjectAntibot() const
|
||||
return antibot;
|
||||
}
|
||||
|
||||
bool
|
||||
ResponseInjectReasons::shouldInjectCaptcha() const
|
||||
{
|
||||
dbgTrace(D_WAAP) << "shouldInjectCaptcha():: " << captcha;
|
||||
return captcha;
|
||||
}
|
||||
|
||||
bool
|
||||
ResponseInjectReasons::shouldInjectCsrf() const
|
||||
{
|
||||
|
||||
@@ -21,14 +21,17 @@ public:
|
||||
void clear();
|
||||
bool shouldInject() const;
|
||||
void setAntibot(bool flag);
|
||||
void setCaptcha(bool flag);
|
||||
void setCsrf(bool flag);
|
||||
void setSecurityHeaders(bool flag);
|
||||
bool shouldInjectAntibot() const;
|
||||
bool shouldInjectCaptcha() const;
|
||||
bool shouldInjectCsrf() const;
|
||||
bool shouldInjectSecurityHeaders() const;
|
||||
private:
|
||||
bool csrf;
|
||||
bool antibot;
|
||||
bool captcha;
|
||||
bool securityHeaders;
|
||||
};
|
||||
|
||||
|
||||
@@ -112,20 +112,6 @@ double Waap::Scanner::getScoreData(Waf2ScanResult& res, const std::string &poolN
|
||||
}
|
||||
double res_score = getScoreFromPool(res, newKeywords, poolName);
|
||||
|
||||
std::string other_pool_name = Waap::Scores::getOtherScorePoolName();
|
||||
Waap::Scores::ModelLoggingSettings modelLoggingSettings = Waap::Scores::getModelLoggingSettings();
|
||||
|
||||
if (applyLearning && poolName != other_pool_name &&
|
||||
modelLoggingSettings.logLevel != Waap::Scores::ModelLogLevel::OFF) {
|
||||
double other_score = getScoreFromPool(res, newKeywords, other_pool_name);
|
||||
dbgDebug(D_WAAP_SCANNER) << "Comparing score from pool " << poolName << ": " << res_score
|
||||
<< ", vs. pool " << other_pool_name << ": " << other_score
|
||||
<< ", score difference: " << res_score - other_score
|
||||
<< ", sample: " << res.unescaped_line;
|
||||
res.other_model_score = other_score;
|
||||
} else {
|
||||
res.other_model_score = res_score;
|
||||
}
|
||||
return res_score;
|
||||
}
|
||||
|
||||
|
||||
@@ -97,7 +97,9 @@ calcIndividualKeywords(
|
||||
std::sort(keywords.begin(), keywords.end());
|
||||
|
||||
for (auto pKeyword = keywords.begin(); pKeyword != keywords.end(); ++pKeyword) {
|
||||
addKeywordScore(scoreBuilder, poolName, *pKeyword, 2.0f, 0.3f, scoresArray, coefArray);
|
||||
addKeywordScore(
|
||||
scoreBuilder, poolName, *pKeyword, DEFAULT_KEYWORD_SCORE, DEFAULT_KEYWORD_COEF, scoresArray, coefArray
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,8 +114,6 @@ calcCombinations(
|
||||
std::vector<std::string>& keyword_combinations)
|
||||
{
|
||||
keyword_combinations.clear();
|
||||
static const double max_combi_score = 1.0f;
|
||||
double default_coef = 0.8f;
|
||||
|
||||
for (size_t i = 0; i < keyword_matches.size(); ++i) {
|
||||
std::vector<std::string> combinations;
|
||||
@@ -137,8 +137,10 @@ calcCombinations(
|
||||
default_score += scoreBuilder.getSnapshotKeywordScore(*it, 0.0f, poolName);
|
||||
}
|
||||
// set default combination score to be the sum of its keywords, bounded by 1
|
||||
default_score = std::min(default_score, max_combi_score);
|
||||
addKeywordScore(scoreBuilder, poolName, combination, default_score, default_coef, scoresArray, coefArray);
|
||||
default_score = std::min(default_score, DEFAULT_COMBI_SCORE);
|
||||
addKeywordScore(
|
||||
scoreBuilder, poolName, combination, default_score, DEFAULT_COMBI_COEF, scoresArray, coefArray
|
||||
);
|
||||
keyword_combinations.push_back(combination);
|
||||
}
|
||||
}
|
||||
@@ -155,7 +157,7 @@ calcArrayScore(std::vector<double>& scoreArray)
|
||||
// *pScore is always positive and there's a +10 offset
|
||||
score = 10.0f - left * 10.0f / divisor;
|
||||
}
|
||||
dbgTrace(D_WAAP_SCORE_BUILDER) << "calculated score: " << score;
|
||||
dbgDebug(D_WAAP_SCORE_BUILDER) << "calculated score: " << score;
|
||||
return score;
|
||||
}
|
||||
|
||||
@@ -171,7 +173,9 @@ calcLogisticRegressionScore(std::vector<double> &coefArray, double intercept, do
|
||||
}
|
||||
// Apply the expit function to the log-odds to obtain the probability,
|
||||
// and multiply by 10 to obtain a 'score' in the range [0, 10]
|
||||
return 1.0f / (1.0f + exp(-log_odds)) * 10.0f;
|
||||
double score = 1.0f / (1.0f + exp(-log_odds)) * 10.0f;
|
||||
dbgDebug(D_WAAP_SCORE_BUILDER) << "calculated score (log_odds): " << score << " (" << log_odds << ")";
|
||||
return score;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -32,6 +32,11 @@ struct ModelLoggingSettings {
|
||||
bool logToStream;
|
||||
};
|
||||
|
||||
static const double DEFAULT_KEYWORD_COEF = 0.3f;
|
||||
static const double DEFAULT_KEYWORD_SCORE = 2.0f;
|
||||
static const double DEFAULT_COMBI_COEF = 0.8f;
|
||||
static const double DEFAULT_COMBI_SCORE = 1.0f;
|
||||
|
||||
std::string getScorePoolNameByLocation(const std::string &location);
|
||||
std::string getOtherScorePoolName();
|
||||
ModelLoggingSettings getModelLoggingSettings();
|
||||
|
||||
@@ -40,6 +40,7 @@
|
||||
#include "WaapOpenRedirectPolicy.h"
|
||||
#include "WaapErrorDisclosurePolicy.h"
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/regex.hpp>
|
||||
#include "generic_rulebase/parameters_config.h"
|
||||
#include <iostream>
|
||||
#include "ParserDelimiter.h"
|
||||
@@ -1098,6 +1099,7 @@ void Waf2Transaction::end_request_hdrs() {
|
||||
// but the State itself is not needed now
|
||||
Waap::Override::State overrideState = getOverrideState(m_siteConfig);
|
||||
}
|
||||
m_pWaapAssetState->m_requestsMonitor->logSourceHit(m_source_identifier);
|
||||
IdentifiersEvent ids(m_source_identifier, m_pWaapAssetState->m_assetId);
|
||||
ids.notify();
|
||||
// Read relevant headers and extract meta information such as host name
|
||||
@@ -1389,6 +1391,20 @@ Waf2Transaction::findHtmlTagToInject(const char* data, int data_len, int& pos)
|
||||
size_t tagHistPosCheck = m_tagHistPos;
|
||||
for (size_t i=0; i < tagSize; ++i) {
|
||||
if (tag[i] != ::tolower(m_tagHist[tagHistPosCheck])) {
|
||||
if (i == tagSize - 1 && m_tagHist[tagHistPosCheck] == ' ') {
|
||||
// match regex on head element with attributes
|
||||
string dataStr = Waap::Util::charToString(data + pos, data_len - pos);
|
||||
dataStr = dataStr.substr(0, dataStr.find('>')+1);
|
||||
tagMatches = NGEN::Regex::regexMatch(
|
||||
__FILE__,
|
||||
__LINE__,
|
||||
dataStr,
|
||||
boost::regex("(?:\\s+[a-zA-Z_:][-a-zA-Z0-9_:.]*(?:\\s*=\\s*(\"[^\"]*\"|'[^']*'|[^\\s\"'>]*))?)*\\s*>")
|
||||
);
|
||||
pos += dataStr.length() - 1;
|
||||
dbgTrace(D_WAAP_BOT_PROTECTION) << "matching head element with attributes: " << dataStr << ". match: " << tagMatches;
|
||||
break;
|
||||
}
|
||||
tagMatches = false;
|
||||
break;
|
||||
}
|
||||
@@ -1402,12 +1418,8 @@ Waf2Transaction::findHtmlTagToInject(const char* data, int data_len, int& pos)
|
||||
}
|
||||
}
|
||||
|
||||
if(!headFound)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
dbgTrace(D_WAAP_BOT_PROTECTION) << "head element tag found: " << headFound;
|
||||
return headFound;
|
||||
}
|
||||
|
||||
void
|
||||
@@ -1421,6 +1433,15 @@ Waf2Transaction::completeInjectionResponseBody(std::string& strInjection)
|
||||
m_responseInjectReasons.setAntibot(false);
|
||||
}
|
||||
|
||||
if(m_responseInjectReasons.shouldInjectCaptcha()) {
|
||||
dbgTrace(D_WAAP_BOT_PROTECTION) <<
|
||||
"Waf2Transaction::completeInjectionResponseBody(): Injecting data (captcha)";
|
||||
//todo add captcha script
|
||||
strInjection += "<script src=\"cp-cp.js\"></script>";
|
||||
// No need to inject more than once
|
||||
m_responseInjectReasons.setCaptcha(false);
|
||||
}
|
||||
|
||||
if (m_responseInjectReasons.shouldInjectCsrf()) {
|
||||
dbgTrace(D_WAAP) << "Waf2Transaction::completeInjectionResponseBody(): Injecting data (csrf)";
|
||||
strInjection += "<script src=\"cp-csrf.js\"></script>";
|
||||
@@ -1568,6 +1589,8 @@ Waf2Transaction::decideFinal(
|
||||
sitePolicy = &ngenAPIConfig;
|
||||
m_overrideState = getOverrideState(sitePolicy);
|
||||
|
||||
// User limits
|
||||
shouldBlock = (getUserLimitVerdict() == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP);
|
||||
}
|
||||
else if (WaapConfigApplication::getWaapSiteConfig(ngenSiteConfig)) {
|
||||
dbgTrace(D_WAAP) << "Waf2Transaction::decideFinal(): got relevant Application configuration from the I/S";
|
||||
@@ -1646,7 +1669,9 @@ void Waf2Transaction::appendCommonLogFields(LogGen& waapLog,
|
||||
const std::shared_ptr<Waap::Trigger::Log> &triggerLog,
|
||||
bool shouldBlock,
|
||||
const std::string& logOverride,
|
||||
const std::string& incidentType) const
|
||||
const std::string& incidentType,
|
||||
const std::string& practiceID,
|
||||
const std::string& practiceName) const
|
||||
{
|
||||
auto env = Singleton::Consume<I_Environment>::by<WaapComponent>();
|
||||
auto active_id = env->get<std::string>("ActiveTenantId");
|
||||
@@ -1737,8 +1762,8 @@ void Waf2Transaction::appendCommonLogFields(LogGen& waapLog,
|
||||
waapLog << LogField("practiceType", "Threat Prevention");
|
||||
waapLog << LogField("practiceSubType", m_siteConfig->get_PracticeSubType());
|
||||
waapLog << LogField("ruleName", m_siteConfig->get_RuleName());
|
||||
waapLog << LogField("practiceId", m_siteConfig->get_PracticeId());
|
||||
waapLog << LogField("practiceName", m_siteConfig->get_PracticeName());
|
||||
waapLog << LogField("practiceId", practiceID);
|
||||
waapLog << LogField("practiceName", practiceName);
|
||||
waapLog << LogField("waapIncidentType", incidentType);
|
||||
|
||||
// Registering this value would append the list of matched override IDs to the unified log
|
||||
@@ -1805,8 +1830,8 @@ Waf2Transaction::sendLog()
|
||||
|
||||
telemetryData.source = getSourceIdentifier();
|
||||
telemetryData.assetName = m_siteConfig->get_AssetName();
|
||||
telemetryData.practiceId = m_siteConfig->get_PracticeId();
|
||||
telemetryData.practiceName = m_siteConfig->get_PracticeName();
|
||||
telemetryData.practiceId = m_siteConfig->get_PracticeIdByPactice(AUTONOMOUS_SECURITY_DECISION);
|
||||
telemetryData.practiceName = m_siteConfig->get_PracticeNameByPactice(AUTONOMOUS_SECURITY_DECISION);
|
||||
if (m_scanResult) {
|
||||
telemetryData.attackTypes = m_scanResult->attack_types;
|
||||
}
|
||||
@@ -1947,7 +1972,11 @@ Waf2Transaction::sendLog()
|
||||
shouldBlock);
|
||||
|
||||
LogGen& waap_log = logGenWrapper.getLogGen();
|
||||
appendCommonLogFields(waap_log, triggerLog, shouldBlock, logOverride, incidentType);
|
||||
appendCommonLogFields(
|
||||
waap_log, triggerLog, shouldBlock, logOverride, incidentType,
|
||||
m_siteConfig->get_PracticeIdByPactice(AUTONOMOUS_SECURITY_DECISION),
|
||||
m_siteConfig->get_PracticeNameByPactice(AUTONOMOUS_SECURITY_DECISION)
|
||||
);
|
||||
waap_log << LogField("waapIncidentDetails", incidentDetails);
|
||||
waap_log << LogField("eventConfidence", "High");
|
||||
break;
|
||||
@@ -1980,7 +2009,11 @@ Waf2Transaction::sendLog()
|
||||
waap_log << LogField("waapFoundIndicators", getKeywordMatchesStr(), LogFieldOption::XORANDB64);
|
||||
}
|
||||
|
||||
appendCommonLogFields(waap_log, triggerLog, shouldBlock, logOverride, incidentType);
|
||||
appendCommonLogFields(
|
||||
waap_log, triggerLog, shouldBlock, logOverride, incidentType,
|
||||
m_siteConfig->get_PracticeIdByPactice(AUTONOMOUS_SECURITY_DECISION),
|
||||
m_siteConfig->get_PracticeNameByPactice(AUTONOMOUS_SECURITY_DECISION)
|
||||
);
|
||||
|
||||
waap_log << LogField("waapIncidentDetails", incidentDetails);
|
||||
break;
|
||||
@@ -1996,7 +2029,11 @@ Waf2Transaction::sendLog()
|
||||
shouldBlock);
|
||||
|
||||
LogGen& waap_log = logGenWrapper.getLogGen();
|
||||
appendCommonLogFields(waap_log, triggerLog, shouldBlock, logOverride, "Cross Site Request Forgery");
|
||||
appendCommonLogFields(
|
||||
waap_log, triggerLog, shouldBlock, logOverride, "Cross Site Request Forgery",
|
||||
m_siteConfig->get_PracticeIdByPactice(AUTONOMOUS_SECURITY_DECISION),
|
||||
m_siteConfig->get_PracticeNameByPactice(AUTONOMOUS_SECURITY_DECISION)
|
||||
);
|
||||
waap_log << LogField("waapIncidentDetails", "CSRF Attack discovered.");
|
||||
break;
|
||||
}
|
||||
@@ -2177,14 +2214,13 @@ Waf2Transaction::decideAutonomousSecurity(
|
||||
" effective overrides count: " << m_effectiveOverrideIds.size() <<
|
||||
" learned overrides count: " << m_exceptionLearned.size();
|
||||
|
||||
|
||||
|
||||
bool log_all = false;
|
||||
const std::shared_ptr<Waap::Trigger::Policy> triggerPolicy = sitePolicy.get_TriggerPolicy();
|
||||
if (triggerPolicy) {
|
||||
const std::shared_ptr<Waap::Trigger::Log> triggerLog = getTriggerLog(triggerPolicy);
|
||||
if (triggerLog && triggerLog->webRequests) log_all = true;
|
||||
}
|
||||
|
||||
if(decision->getThreatLevel() <= ThreatLevel::THREAT_INFO && !log_all) {
|
||||
decision->setLog(false);
|
||||
} else {
|
||||
@@ -2299,10 +2335,11 @@ bool Waf2Transaction::decideResponse()
|
||||
|
||||
bool
|
||||
Waf2Transaction::reportScanResult(const Waf2ScanResult &res) {
|
||||
if (get_ignoreScore() || (res.score >= SCORE_THRESHOLD &&
|
||||
(m_scanResult == nullptr || res.score > m_scanResult->score)))
|
||||
if ((get_ignoreScore() || res.score >= SCORE_THRESHOLD) &&
|
||||
(m_scanResult == nullptr || res.score > m_scanResult->score))
|
||||
{
|
||||
// Forget any previous scan result and replace with new
|
||||
dbgTrace(D_WAAP) << "Setting scan result. New score: " << res.score;
|
||||
// Forget any previous scan result and replace wit, h new
|
||||
delete m_scanResult;
|
||||
m_scanResult = new Waf2ScanResult(res);
|
||||
return true;
|
||||
|
||||
@@ -247,7 +247,9 @@ private:
|
||||
const std::shared_ptr<Waap::Trigger::Log> &triggerLog,
|
||||
bool shouldBlock,
|
||||
const std::string& logOverride,
|
||||
const std::string& incidentType) const;
|
||||
const std::string& incidentType,
|
||||
const std::string& practiceID,
|
||||
const std::string& practiceName) const;
|
||||
std::string getUserReputationStr(double relativeReputation) const;
|
||||
bool isTrustedSource() const;
|
||||
|
||||
|
||||
@@ -381,7 +381,11 @@ void Waf2Transaction::sendAutonomousSecurityLog(
|
||||
waap_log << LogField("eventConfidence", confidence);
|
||||
}
|
||||
|
||||
appendCommonLogFields(waap_log, triggerLog, shouldBlock, logOverride, attackTypes);
|
||||
appendCommonLogFields(
|
||||
waap_log, triggerLog, shouldBlock, logOverride, attackTypes,
|
||||
m_siteConfig->get_PracticeIdByPactice(AUTONOMOUS_SECURITY_DECISION),
|
||||
m_siteConfig->get_PracticeNameByPactice(AUTONOMOUS_SECURITY_DECISION)
|
||||
);
|
||||
|
||||
std::string sampleString = getSample();
|
||||
if (sampleString.length() > MAX_LOG_FIELD_SIZE) {
|
||||
|
||||
@@ -952,6 +952,145 @@ string filterUTF7(const string& text) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Decides the status of a Base64 decoded string based on various parameters.
|
||||
// @param decoded The decoded string.
|
||||
// @param entropy The entropy of the original encoded string.
|
||||
// @param decoded_entropy The entropy of the decoded string.
|
||||
// @param spacer_count The number of spacer characters in the decoded string.
|
||||
// @param nonPrintableCharsCount The count of non-printable characters in the decoded string.
|
||||
// @param clear_on_error Flag indicating whether to clear the decoded string on error.
|
||||
// @param terminatorCharsSeen The number of terminator characters seen.
|
||||
// @param called_with_prefix Flag indicating if the function was called with a prefix.
|
||||
// @return The status of the Base64 decoding process.
|
||||
//
|
||||
// Idea:
|
||||
// Check if input chunk should be replaced by decoded, suspected to be checked both as encoded and decoded
|
||||
// or cleaned as binary data. Additional case - define as not base64 encoded.
|
||||
// - in case decoded size less 5 - return invalid
|
||||
// - check entropy delta based on that base64 encoded data has higher entropy than decoded, usually delta = 0.25
|
||||
// - this check should rize suspect but cannot work vice versa
|
||||
// check if decoded chunk has more than 10% of non-printable characters - this is supect for binary data encoded
|
||||
// - if no suspect for binary data and entropy is suspected, check empiric conditions to decide if this binary data
|
||||
// or invalid decoding
|
||||
// - if suspect for binary data, first check is we have entropy suspection
|
||||
// - if entropy is suspected and chunk is short and it have more than 25% of nonprintables, return invalid
|
||||
// since this is not base64 encoded data
|
||||
// - if entropy is not suspected and chunk is short and it have more than 50% of nonprintables, return invalid
|
||||
// since this is not base64 encoded data
|
||||
// - if entropy is suspected and chunk size is between 64-1024, perform additional empiric test
|
||||
// This test will define if returm value should be treated as suspected or as binary data(cleared)
|
||||
|
||||
base64_decode_status decideStatusBase64Decoded(
|
||||
string& decoded,
|
||||
double entropy,
|
||||
double decoded_entropy,
|
||||
size_t spacer_count,
|
||||
size_t nonPrintableCharsCount,
|
||||
bool clear_on_error,
|
||||
double terminatorCharsSeen,
|
||||
bool called_with_prefix
|
||||
)
|
||||
{
|
||||
base64_decode_status tmp_status = B64_DECODE_OK;
|
||||
if (entropy - decoded_entropy + terminatorCharsSeen < BASE64_ENTROPY_THRESHOLD_DELTA) {
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< "The chunk is under suspect to be base64,"
|
||||
<< "use dual processing because entropy delta is too low";
|
||||
tmp_status = B64_DECODE_SUSPECTED;
|
||||
}
|
||||
|
||||
bool empiric_condition = false;
|
||||
if (decoded.size() >= 5) {
|
||||
if (spacer_count > 1) {
|
||||
nonPrintableCharsCount = nonPrintableCharsCount - spacer_count + 1;
|
||||
}
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< "(before test for unprintables): decoded.size="
|
||||
<< decoded.size()
|
||||
<< ", nonPrintableCharsCount="
|
||||
<< nonPrintableCharsCount
|
||||
<< ", clear_on_error="
|
||||
<< clear_on_error
|
||||
<< ", called_with_prefix="
|
||||
<< called_with_prefix;
|
||||
if (nonPrintableCharsCount * 10 < decoded.size()) {
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< "(decode/replace due to small amount of nonprintables): will decide based on entropy values";
|
||||
} else { // more than 10% of non-printable characters
|
||||
dbgTrace(D_WAAP_BASE64) << "large amount of nonporintables";
|
||||
if (tmp_status == B64_DECODE_SUSPECTED) {
|
||||
// entropy - decoded_entropy + terminatorCharsSeen < 0.25
|
||||
if (decoded.size() < 16 && nonPrintableCharsCount * 4 > decoded.size()) {
|
||||
decoded.clear();
|
||||
return B64_DECODE_INVALID;
|
||||
}
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< "(large amount of nonporintables + entropy suspect), check emprirics because decoded."
|
||||
<< " terminatorCharsSeen="
|
||||
<< terminatorCharsSeen;
|
||||
// empiric test based on investigation of real payloads
|
||||
empiric_condition = entropy < decoded_entropy
|
||||
&& entropy > BASE64_ENTROPY_BASE_THRESHOLD
|
||||
&& decoded_entropy > BASE64_ENTROPY_DECODED_THRESHOLD
|
||||
&& !called_with_prefix
|
||||
&& decoded.size() > BASE64_MIN_SIZE_LIMIT
|
||||
&& decoded.size() < BASE64_MAX_SIZE_LIMIT
|
||||
&& terminatorCharsSeen != 0;
|
||||
if (!empiric_condition) {
|
||||
if (clear_on_error) decoded.clear();
|
||||
return B64_DECODE_SUSPECTED;
|
||||
} else {
|
||||
if (clear_on_error) decoded.clear();
|
||||
tmp_status = B64_DECODE_OK;
|
||||
}
|
||||
} else { // entropy - decoded_entropy + terminatorCharsSeen >= 0.25
|
||||
// one more empiric based on uT and real payloads
|
||||
if (decoded.size() < 16
|
||||
&& nonPrintableCharsCount * 2 > decoded.size()
|
||||
&& terminatorCharsSeen == 0) {
|
||||
decoded.clear();
|
||||
return B64_DECODE_INVALID;
|
||||
}
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< "(delete as binary content) because decoded. Return B64_DECODE_INCOMPLETE";
|
||||
if (clear_on_error) decoded.clear();
|
||||
return B64_DECODE_INCOMPLETE;
|
||||
}
|
||||
} // less than 10% of non-printable characters
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< "After handling unprintables checking status";
|
||||
if (tmp_status == B64_DECODE_OK) {
|
||||
dbgTrace(D_WAAP_BASE64) << "replacing with decoded data, return B64_DECODE_OK";
|
||||
return B64_DECODE_OK;
|
||||
} else { // tmp_status == B64_DECODE_SUSPECTED, entropy - decoded_entropy + terminatorCharsSeen < 0.25
|
||||
dbgTrace(D_WAAP_BASE64) << "Suspected due to entropy, making empiric test";
|
||||
// and one more empiric test based on investigation of real payloads
|
||||
empiric_condition = entropy < decoded_entropy
|
||||
&& entropy > BASE64_ENTROPY_BASE_THRESHOLD
|
||||
&& decoded_entropy > BASE64_ENTROPY_DECODED_THRESHOLD
|
||||
&& !called_with_prefix
|
||||
&& decoded.size() > BASE64_MIN_SIZE_LIMIT
|
||||
&& decoded.size() < BASE64_MAX_SIZE_LIMIT;
|
||||
if (empiric_condition) {
|
||||
dbgTrace(D_WAAP_BASE64) << "Empiric test failed, non-base64 chunk, return B64_DECODE_INVALID";
|
||||
decoded.clear();
|
||||
return B64_DECODE_INVALID;
|
||||
}
|
||||
dbgTrace(D_WAAP_BASE64) << "Empiric test passed, return B64_DECODE_SUSPECTED";
|
||||
return B64_DECODE_SUSPECTED;
|
||||
}
|
||||
return B64_DECODE_OK; // successfully decoded. Returns decoded data in "decoded" parameter
|
||||
}
|
||||
|
||||
// If decoded size is too small - leave the encoded value (return false)
|
||||
decoded.clear(); // discard partial data
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< "(leave as-is) because decoded too small. decoded.size="
|
||||
<< decoded.size();
|
||||
return B64_DECODE_INVALID;
|
||||
}
|
||||
|
||||
|
||||
// Attempts to validate and decode base64-encoded chunk.
|
||||
// Value is the full value inside which potential base64-encoded chunk was found,
|
||||
// it and end point to start and end of that chunk.
|
||||
@@ -980,18 +1119,28 @@ base64_decode_status decodeBase64Chunk(
|
||||
uint32_t spacer_count = 0;
|
||||
uint32_t length = end - it;
|
||||
|
||||
dbgTrace(D_WAAP) << "decodeBase64Chunk: value='" << value << "' match='" << string(it, end) << "'";
|
||||
dbgTrace(D_WAAP)
|
||||
<< "value='"
|
||||
<< value
|
||||
<< "' match='"
|
||||
<< string(it, end)
|
||||
<< "' clear_on_error='"
|
||||
<< clear_on_error
|
||||
<< "' called_with_prefix='"
|
||||
<< called_with_prefix
|
||||
<< "'";
|
||||
string::const_iterator begin = it;
|
||||
|
||||
// The encoded data length (without the "base64," prefix) should be exactly divisible by 4
|
||||
// len % 4 is not 0 i.e. this is not base64
|
||||
if ((end - it) % 4 != 0) {
|
||||
dbgTrace(D_WAAP_BASE64) <<
|
||||
"b64DecodeChunk: (leave as-is) because encoded data length should be exactly divisible by 4.";
|
||||
if ((end - it) % 4 == 1) {
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< "(leave as-is) because encoded data length should not be <4*x + 1>.";
|
||||
return B64_DECODE_INVALID;
|
||||
}
|
||||
|
||||
std::unordered_map<char, double> frequency;
|
||||
std::unordered_map<char, double> original_occurences_counter;
|
||||
std::unordered_map<char, double> decoded_occurences_counter;
|
||||
|
||||
while (it != end) {
|
||||
unsigned char c = *it;
|
||||
@@ -999,9 +1148,8 @@ base64_decode_status decodeBase64Chunk(
|
||||
if (terminatorCharsSeen) {
|
||||
// terminator characters must all be '=', until end of match.
|
||||
if (c != '=') {
|
||||
dbgTrace(D_WAAP_BASE64) <<
|
||||
"decodeBase64Chunk: (leave as-is) because terminator characters must all be '='," <<
|
||||
"until end of match.";
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< "(leave as-is) because terminator characters must all be '=' until end of match.";
|
||||
return B64_DECODE_INVALID;
|
||||
}
|
||||
|
||||
@@ -1009,13 +1157,13 @@ base64_decode_status decodeBase64Chunk(
|
||||
terminatorCharsSeen++;
|
||||
|
||||
if (terminatorCharsSeen > 2) {
|
||||
dbgTrace(D_WAAP_BASE64) << "decodeBase64Chunk: (leave as-is) because terminatorCharsSeen > 2";
|
||||
dbgTrace(D_WAAP_BASE64) << "(leave as-is) because terminatorCharsSeen > 2";
|
||||
return B64_DECODE_INVALID;
|
||||
}
|
||||
|
||||
// allow for more terminator characters
|
||||
it++;
|
||||
frequency[c]++;
|
||||
original_occurences_counter[c]++;
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -1040,12 +1188,18 @@ base64_decode_status decodeBase64Chunk(
|
||||
// Start tracking terminator characters
|
||||
terminatorCharsSeen++;
|
||||
it++;
|
||||
frequency[c]++;
|
||||
original_occurences_counter[c]++;
|
||||
continue;
|
||||
}
|
||||
else {
|
||||
dbgTrace(D_WAAP_BASE64) << "decodeBase64Chunk: (leave as-is) because of non-base64 character ('" <<
|
||||
c << "', ASCII " << (unsigned int)c << ", offset " << (it-begin) << ")";
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< "(leave as-is) because of non-base64 character ('"
|
||||
<< c
|
||||
<< "', ASCII "
|
||||
<< (unsigned int)c
|
||||
<< ", offset "
|
||||
<< (it-begin)
|
||||
<< ")";
|
||||
return B64_DECODE_INVALID; // non-base64 character
|
||||
}
|
||||
|
||||
@@ -1068,18 +1222,19 @@ base64_decode_status decodeBase64Chunk(
|
||||
}
|
||||
|
||||
decoded += (char)code;
|
||||
decoded_occurences_counter[(char)code]++;
|
||||
}
|
||||
|
||||
it++;
|
||||
frequency[c]++;
|
||||
original_occurences_counter[c]++;
|
||||
}
|
||||
|
||||
// end of encoded sequence decoded.
|
||||
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< "decodeBase64Chunk: decoded.size="
|
||||
<< "decoding done: decoded.size="
|
||||
<< decoded.size()
|
||||
<< ", nonPrintableCharsCount="
|
||||
<< ", uncorrected nonPrintableCharsCount="
|
||||
<< nonPrintableCharsCount
|
||||
<< ", spacer_count = "
|
||||
<< spacer_count
|
||||
@@ -1088,56 +1243,42 @@ base64_decode_status decodeBase64Chunk(
|
||||
<< "; decoded='"
|
||||
<< decoded << "'";
|
||||
|
||||
// Check if entropy is correlates with b64 threshold (initially > 4.5)
|
||||
if (!called_with_prefix) {
|
||||
double entropy = 0;
|
||||
double p = 0;
|
||||
for (const auto& pair : frequency) {
|
||||
p = pair.second / length;
|
||||
entropy -= p * std::log2(p);
|
||||
}
|
||||
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: base entropy = " << entropy << "length = " << length;
|
||||
// Add short payload factor
|
||||
if (length < 16)
|
||||
entropy = entropy * 16 / length;
|
||||
// Enforce tailoring '=' characters
|
||||
entropy+=terminatorCharsSeen;
|
||||
|
||||
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: corrected entropy = " << entropy << "length = " << length;
|
||||
if (entropy <= base64_entropy_threshold) {
|
||||
return B64_DECODE_INVALID;
|
||||
}
|
||||
double entropy = 0;
|
||||
double p = 0;
|
||||
double decoded_entropy = 0;
|
||||
for (const auto& pair : original_occurences_counter) {
|
||||
p = pair.second / length;
|
||||
entropy -= p * std::log2(p);
|
||||
}
|
||||
|
||||
// Return success only if decoded.size>=5 and there are less than 10% of non-printable
|
||||
// characters in output.
|
||||
if (decoded.size() >= 5) {
|
||||
if (spacer_count > 1) {
|
||||
nonPrintableCharsCount = nonPrintableCharsCount - spacer_count + 1;
|
||||
}
|
||||
if (nonPrintableCharsCount * 10 < decoded.size()) {
|
||||
dbgTrace(D_WAAP_BASE64) << "decodeBase64Chunk: (decode/replace) decoded.size=" << decoded.size() <<
|
||||
", nonPrintableCharsCount=" << nonPrintableCharsCount << ": replacing with decoded data";
|
||||
}
|
||||
else {
|
||||
dbgTrace(D_WAAP_BASE64) << "decodeBase64Chunk: (delete) because decoded.size=" << decoded.size() <<
|
||||
", nonPrintableCharsCount=" << nonPrintableCharsCount <<
|
||||
", clear_on_error=" << clear_on_error;
|
||||
if (clear_on_error) decoded.clear();
|
||||
return B64_DECODE_INCOMPLETE;
|
||||
}
|
||||
dbgTrace(D_WAAP_BASE64) << "returning true: successfully decoded."
|
||||
<< " Returns decoded data in \"decoded\" parameter";
|
||||
return B64_DECODE_OK; // successfully decoded. Returns decoded data in "decoded" parameter
|
||||
for (const auto &pair : decoded_occurences_counter) {
|
||||
p = pair.second / decoded.size();
|
||||
decoded_entropy -= p * std::log2(p);
|
||||
}
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< "Base entropy = "
|
||||
<< entropy
|
||||
<< " Decoded_entropy = "
|
||||
<< decoded_entropy
|
||||
<< "length = "
|
||||
<< length;
|
||||
|
||||
base64_decode_status return_status = decideStatusBase64Decoded(
|
||||
decoded,
|
||||
entropy,
|
||||
decoded_entropy,
|
||||
spacer_count,
|
||||
nonPrintableCharsCount,
|
||||
clear_on_error,
|
||||
terminatorCharsSeen,
|
||||
called_with_prefix
|
||||
);
|
||||
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< "After decideStatusBase64Decoded return_status="
|
||||
<< return_status;
|
||||
|
||||
return return_status;
|
||||
|
||||
// If decoded size is too small - leave the encoded value (return false)
|
||||
decoded.clear(); // discard partial data
|
||||
dbgTrace(D_WAAP_BASE64) << "decodeBase64Chunk: (leave as-is) because decoded too small. decoded.size=" <<
|
||||
decoded.size() <<
|
||||
", nonPrintableCharsCount=" << nonPrintableCharsCount <<
|
||||
", clear_on_error=" << clear_on_error;
|
||||
return B64_DECODE_INVALID;
|
||||
}
|
||||
|
||||
// Attempts to detect and validate base64 chunk.
|
||||
@@ -1180,8 +1321,9 @@ b64DecodeChunk(
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return decodeBase64Chunk(value, it, end, decoded) != B64_DECODE_INVALID;
|
||||
base64_decode_status status = decodeBase64Chunk(value, it, end, decoded);
|
||||
dbgTrace(D_WAAP_BASE64) << "b64DecodeChunk: status = " << status;
|
||||
return status != B64_DECODE_INVALID;
|
||||
}
|
||||
|
||||
vector<string> split(const string& s, char delim) {
|
||||
@@ -1281,6 +1423,7 @@ static void b64TestChunk(const string &s,
|
||||
int &deletedCount,
|
||||
string &outStr)
|
||||
{
|
||||
dbgTrace(D_WAAP_BASE64) << " ===b64TestChunk===: starting with = '" << s << "'";
|
||||
size_t chunkLen = (chunkEnd - chunkStart);
|
||||
|
||||
if ((chunkEnd - chunkStart) > static_cast<int>(b64_prefix.size()) &&
|
||||
@@ -1289,11 +1432,9 @@ static void b64TestChunk(const string &s,
|
||||
chunkLen -= b64_prefix.size();
|
||||
}
|
||||
|
||||
size_t chunkRem = chunkLen % 4;
|
||||
|
||||
// Only match chunk whose length is divisible by 4
|
||||
string repl;
|
||||
if (chunkRem == 0 && cb(s, chunkStart, chunkEnd, repl)) {
|
||||
dbgTrace(D_WAAP_BASE64) << " ===b64TestChunk===: chunkLen = " << chunkLen;
|
||||
if (cb(s, chunkStart, chunkEnd, repl)) {
|
||||
// Succesfully matched b64 chunk
|
||||
if (!repl.empty()) {
|
||||
outStr += repl;
|
||||
@@ -1340,9 +1481,7 @@ bool detectBase64Chunk(
|
||||
dbgTrace(D_WAAP_BASE64) << " ===detectBase64Chunk===: isB64AlphaChar = true, '" << *it << "'";
|
||||
start = it;
|
||||
end = s.end();
|
||||
if ((end - start) % 4 == 0) {
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
// non base64 before supposed chunk - will not process
|
||||
return false;
|
||||
@@ -1381,17 +1520,31 @@ bool isBase64PrefixProcessingOK (
|
||||
if (detectBase64Chunk(s, start, end)) {
|
||||
dbgTrace(D_WAAP_BASE64) << " ===isBase64PrefixProcessingOK===: chunk detected";
|
||||
if ((start != s.end()) && (end == s.end())) {
|
||||
dbgTrace(D_WAAP_BASE64) << " ===isBase64PrefixProcessingOK===: chunk detected but not complete";
|
||||
retVal = processDecodedChunk(s, start, end, value, binaryFileType, true);
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< " ===isBase64PrefixProcessingOK===: after processDecodedChunk retVal = "
|
||||
<< retVal
|
||||
<< " binaryFileType = "
|
||||
<< binaryFileType;
|
||||
}
|
||||
} else if (start != s.end()) {
|
||||
dbgTrace(D_WAAP_BASE64) << " ===isBase64PrefixProcessingOK===: chunk not detected."
|
||||
" searching for known file header only";
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< " ===isBase64PrefixProcessingOK===: chunk not detected. searching for known file header only";
|
||||
end = (start + MAX_HEADER_LOOKUP < s.end()) ? start + MAX_HEADER_LOOKUP : s.end();
|
||||
processDecodedChunk(s, start, end, value, binaryFileType);
|
||||
value.clear();
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< " ===isBase64PrefixProcessingOK===: after processDecodedChunk binaryFileType = "
|
||||
<< binaryFileType;
|
||||
return binaryFileType != Waap::Util::BinaryFileType::FILE_TYPE_NONE;
|
||||
}
|
||||
}
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< " ===isBase64PrefixProcessingOK===: retVal = "
|
||||
<< retVal
|
||||
<< " binaryFileType = "
|
||||
<< binaryFileType;
|
||||
return retVal != B64_DECODE_INVALID;
|
||||
}
|
||||
|
||||
@@ -1399,23 +1552,31 @@ base64_variants b64Test (
|
||||
const string &s,
|
||||
string &key,
|
||||
string &value,
|
||||
BinaryFileType &binaryFileType)
|
||||
BinaryFileType &binaryFileType,
|
||||
const size_t offset)
|
||||
{
|
||||
|
||||
key.clear();
|
||||
bool retVal;
|
||||
binaryFileType = Waap::Util::BinaryFileType::FILE_TYPE_NONE;
|
||||
auto begin = s.begin() + offset;
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< " ===b64Test===: string = "
|
||||
<< s
|
||||
<< " key = "
|
||||
<< key
|
||||
<< " value = "
|
||||
<< value
|
||||
<< " offset = "
|
||||
<< offset;
|
||||
|
||||
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: string = " << s
|
||||
<< " key = " << key << " value = " << value;
|
||||
// Minimal length
|
||||
if (s.size() < 8) {
|
||||
if (s.size() < 8 + offset) {
|
||||
return CONTINUE_AS_IS;
|
||||
}
|
||||
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: minimal lenght test passed";
|
||||
|
||||
std::string prefix_decoded_val;
|
||||
string::const_iterator it = s.begin();
|
||||
auto it = begin;
|
||||
|
||||
// 1st check if we have key candidate
|
||||
if (base64_key_value_detector_re.hasMatch(s)) {
|
||||
@@ -1433,7 +1594,7 @@ base64_variants b64Test (
|
||||
break;
|
||||
case EQUAL:
|
||||
if (*it == '=') {
|
||||
it = s.begin();
|
||||
it = begin;
|
||||
state=MISDETECT;
|
||||
continue;
|
||||
}
|
||||
@@ -1455,7 +1616,7 @@ base64_variants b64Test (
|
||||
if (it == s.end() || state == MISDETECT) {
|
||||
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: detected *it = s.end()" << *it;
|
||||
if (key.size() > 0) {
|
||||
it = s.begin();
|
||||
it = begin;
|
||||
key.clear();
|
||||
}
|
||||
} else {
|
||||
@@ -1479,7 +1640,7 @@ base64_variants b64Test (
|
||||
}
|
||||
}
|
||||
|
||||
string::const_iterator start = s.end();
|
||||
auto start = s.end();
|
||||
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: B64 itself = " << *it << " =======";
|
||||
bool isB64AlphaChar = Waap::Util::isAlphaAsciiFast(*it) || isdigit(*it) || *it=='/' || *it=='+';
|
||||
if (isB64AlphaChar) {
|
||||
@@ -1487,11 +1648,6 @@ base64_variants b64Test (
|
||||
dbgTrace(D_WAAP_BASE64) <<
|
||||
" ===b64Test===: Start tracking potential b64 chunk = " << *it << " =======";
|
||||
start = it;
|
||||
if ((s.end() - start) % 4 != 0) {
|
||||
key.clear();
|
||||
value.clear();
|
||||
return CONTINUE_AS_IS;
|
||||
}
|
||||
}
|
||||
else {
|
||||
dbgTrace(D_WAAP_BASE64) <<
|
||||
@@ -1512,17 +1668,37 @@ base64_variants b64Test (
|
||||
key.pop_back();
|
||||
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: FINAL key = '" << key << "'";
|
||||
}
|
||||
retVal = decodeBase64Chunk(s, start, s.end(), value) != B64_DECODE_INVALID;
|
||||
base64_decode_status decode_chunk_status = decodeBase64Chunk(s, start, s.end(), value);
|
||||
|
||||
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: After testing and conversion value = "
|
||||
<< value << "retVal = '" << retVal <<"'";
|
||||
if (!retVal) {
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< " ===b64Test===: After testing and conversion value = "
|
||||
<< value
|
||||
<< "decode_chunk_status = '"
|
||||
<< decode_chunk_status
|
||||
<<"'";
|
||||
if (decode_chunk_status == B64_DECODE_INVALID) {
|
||||
key.clear();
|
||||
value.clear();
|
||||
return CONTINUE_AS_IS;
|
||||
}
|
||||
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: After tpassed retVal check = "
|
||||
<< value << "retVal = '" << retVal <<"'" << "key = '" << key << "'";
|
||||
|
||||
if (decode_chunk_status == B64_DECODE_INCOMPLETE) {
|
||||
value.clear();
|
||||
}
|
||||
|
||||
if (decode_chunk_status == B64_DECODE_SUSPECTED) {
|
||||
return CONTINUE_DUAL_SCAN;
|
||||
}
|
||||
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< " ===b64Test===: After tpassed retVal check = "
|
||||
<< value
|
||||
<< "decode_chunk_status = '"
|
||||
<< decode_chunk_status
|
||||
<<"'"
|
||||
<< "key = '"
|
||||
<< key
|
||||
<< "'";
|
||||
if (key.empty()) {
|
||||
return SINGLE_B64_CHUNK_CONVERT;
|
||||
} else {
|
||||
@@ -1548,7 +1724,7 @@ void b64Decode(
|
||||
deletedCount = 0;
|
||||
outStr = "";
|
||||
int offsetFix = 0;
|
||||
|
||||
dbgTrace(D_WAAP_BASE64) << " ===b64Decode===: starting with = '" << s << "'";
|
||||
string::const_iterator it = s.begin();
|
||||
|
||||
// Minimal length
|
||||
@@ -1596,6 +1772,11 @@ void b64Decode(
|
||||
}
|
||||
|
||||
// Decode and add chunk
|
||||
dbgTrace(D_WAAP_BASE64)
|
||||
<< " ===b64Decode===: chunkStart = "
|
||||
<< *chunkStart
|
||||
<< " it = "
|
||||
<< *it;
|
||||
b64TestChunk(s, chunkStart, it, cb, decodedCount, deletedCount, outStr);
|
||||
|
||||
// stop tracking b64 chunk
|
||||
@@ -1607,6 +1788,7 @@ void b64Decode(
|
||||
}
|
||||
|
||||
if (chunkStart != s.end()) {
|
||||
dbgTrace(D_WAAP_BASE64) << " ===b64Decode===: chunkStart = " << *chunkStart;
|
||||
b64TestChunk(s, chunkStart, it, cb, decodedCount, deletedCount, outStr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,9 +32,15 @@
|
||||
|
||||
#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
|
||||
|
||||
enum base64_variants {SINGLE_B64_CHUNK_CONVERT, KEY_VALUE_B64_PAIR, CONTINUE_AS_IS};
|
||||
enum base64_variants {SINGLE_B64_CHUNK_CONVERT, KEY_VALUE_B64_PAIR, CONTINUE_AS_IS, CONTINUE_DUAL_SCAN};
|
||||
enum base64_stage {BEFORE_EQUAL, EQUAL, DONE, MISDETECT};
|
||||
enum base64_decode_status {B64_DECODE_INVALID, B64_DECODE_OK, B64_DECODE_INCOMPLETE};
|
||||
enum base64_decode_status {B64_DECODE_INVALID, B64_DECODE_OK, B64_DECODE_INCOMPLETE, B64_DECODE_SUSPECTED};
|
||||
|
||||
#define BASE64_ENTROPY_BASE_THRESHOLD 5.0
|
||||
#define BASE64_ENTROPY_DECODED_THRESHOLD 5.4
|
||||
#define BASE64_ENTROPY_THRESHOLD_DELTA 0.25
|
||||
#define BASE64_MIN_SIZE_LIMIT 16
|
||||
#define BASE64_MAX_SIZE_LIMIT 1024
|
||||
|
||||
// This is portable version of stricmp(), which is non-standard function (not even in C).
|
||||
// Contrary to stricmp(), for a slight optimization, s2 is ASSUMED to be already in lowercase.
|
||||
@@ -221,59 +227,66 @@ inline bool isHexDigit(const char ch) {
|
||||
|
||||
template<class _IT>
|
||||
_IT escape_backslashes(_IT first, _IT last) {
|
||||
_IT result = first;
|
||||
_IT src = first;
|
||||
_IT dst = first;
|
||||
_IT mark = first;
|
||||
|
||||
enum { STATE_COPY, STATE_ESCAPE, STATE_OCTAL, STATE_HEX } state = STATE_COPY;
|
||||
unsigned char accVal = 0;
|
||||
unsigned char digitsCount = 0;
|
||||
_IT mark = first;
|
||||
|
||||
for (; first != last; ++first) {
|
||||
for (; src != last && dst < last; ++src) {
|
||||
switch (state) {
|
||||
case STATE_COPY:
|
||||
if (*first == '\\') {
|
||||
mark = first;
|
||||
if (*src == '\\') {
|
||||
mark = src;
|
||||
state = STATE_ESCAPE;
|
||||
}
|
||||
else {
|
||||
*result++ = *first;
|
||||
} else {
|
||||
*dst++ = *src;
|
||||
}
|
||||
break;
|
||||
case STATE_ESCAPE: {
|
||||
if (*first >= '0' && *first <= '7') {
|
||||
accVal = *first - '0';
|
||||
if (*src >= '0' && *src <= '7') {
|
||||
accVal = *src - '0';
|
||||
digitsCount = 1;
|
||||
state = STATE_OCTAL;
|
||||
break;
|
||||
} else if (*first == 'x') {
|
||||
} else if (*src == 'x') {
|
||||
accVal = 0;
|
||||
digitsCount = 0;
|
||||
state = STATE_HEX;
|
||||
break;
|
||||
}
|
||||
else {
|
||||
switch (*first) {
|
||||
case 'a': *result++ = 7; break; // BELL
|
||||
case 'b': *result++ = 8; break; // BACKSPACE
|
||||
case 't': *result++ = 9; break; // HORIZONTAL TAB
|
||||
case 'n': *result++ = 10; break; // LINEFEED
|
||||
case 'v': *result++ = 11; break; // VERTICAL TAB
|
||||
case 'f': *result++ = 12; break; // FORMFEED
|
||||
case 'r': *result++ = 13; break; // CARRIAGE RETURN
|
||||
case '\\': *result++ = '\\'; break; // upon seeing double backslash - output only one
|
||||
case '\"': *result++ = '"'; break; // backslash followed by '"' - output only '"'
|
||||
} else {
|
||||
switch (*src) {
|
||||
// Copy a matching character without the backslash before it
|
||||
case 'a': *dst++ = 7; break; // BELL
|
||||
case 'b': *dst++ = 8; break; // BACKSPACE
|
||||
case 'e': *dst++ = 27; break; // ESCAPE
|
||||
case 't': *dst++ = 9; break; // HORIZONTAL TAB
|
||||
case 'n': *dst++ = 10; break; // LINEFEED
|
||||
case 'v': *dst++ = 11; break; // VERTICAL TAB
|
||||
case 'f': *dst++ = 12; break; // FORMFEED
|
||||
case 'r': *dst++ = 13; break; // CARRIAGE RETURN
|
||||
case '\?': *dst++ = '\?'; break; // QUESTION MARK
|
||||
case '\\': *dst++ = '\\'; break; // upon seeing double backslash - output only one
|
||||
case '\"': *dst++ = '\"'; break; // DOUBLE QUOTE
|
||||
case '\'': *dst++ = '\''; break; // SINGLE QUOTE
|
||||
default:
|
||||
// invalid escape sequence - do not replace it (return original characters)
|
||||
// Copy from back-track, not including current character, and continue
|
||||
while (mark < first) {
|
||||
*result++ = *mark++;
|
||||
while (dst <= mark && mark < src) {
|
||||
*dst++ = *mark++;
|
||||
}
|
||||
|
||||
// Copy current (terminator) character which is not "escape" and return to copy state
|
||||
// If current character is escape - stay is "escape" state
|
||||
if (*first != '\\') {
|
||||
*result++ = *mark++;
|
||||
if (*src != '\\') {
|
||||
*dst++ = *src;
|
||||
state = STATE_COPY;
|
||||
} else {
|
||||
mark = src;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
state = STATE_COPY;
|
||||
@@ -282,28 +295,26 @@ _IT escape_backslashes(_IT first, _IT last) {
|
||||
break;
|
||||
}
|
||||
case STATE_OCTAL: {
|
||||
if (*first >='0' && *first<='7') {
|
||||
accVal = (accVal << 3) | (*first - '0');
|
||||
if (*src >= '0' && *src <= '7') {
|
||||
accVal = (accVal << 3) | (*src - '0');
|
||||
digitsCount++;
|
||||
|
||||
// Up to 3 octal digits imposed by C standard, so after 3 digits accumulation stops.
|
||||
if (digitsCount == 3) {
|
||||
*result++ = accVal; // output character corresponding to collected accumulated value
|
||||
*dst++ = accVal; // output character corresponding to collected accumulated value
|
||||
digitsCount = 0;
|
||||
state = STATE_COPY;
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
// invalid octal digit stops the accumulation
|
||||
*result++ = accVal; // output character corresponding to collected accumulated value
|
||||
*dst++ = accVal; // output character corresponding to collected accumulated value
|
||||
digitsCount = 0;
|
||||
if (*first != '\\') {
|
||||
if (*src != '\\') {
|
||||
// If terminating character is not backslash output the terminating character
|
||||
*result++ = *first;
|
||||
*dst++ = *src;
|
||||
state = STATE_COPY;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
// If terminating character is backslash start next escape sequence
|
||||
mark = src;
|
||||
state = STATE_ESCAPE;
|
||||
}
|
||||
}
|
||||
@@ -311,36 +322,33 @@ _IT escape_backslashes(_IT first, _IT last) {
|
||||
break;
|
||||
}
|
||||
case STATE_HEX: {
|
||||
if (!isHexDigit(*first)) {
|
||||
// Copy from back-track, not including current character (which is absent), and continue
|
||||
while (mark < first) {
|
||||
*result++ = *mark++;
|
||||
if (!isHexDigit(*src)) {
|
||||
// Copy from back-track, not including *src character (which is absent), and continue
|
||||
while (dst <= mark && mark < src) {
|
||||
*dst++ = *mark++;
|
||||
}
|
||||
if (*first != '\\') {
|
||||
if (*src != '\\') {
|
||||
// If terminating character is not backslash output the terminating character
|
||||
*result++ = *first;
|
||||
*dst++ = *src;
|
||||
state = STATE_COPY;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
// If terminating character is backslash start next escape sequence
|
||||
mark = src;
|
||||
state = STATE_ESCAPE;
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
accVal = accVal << 4;
|
||||
if (isdigit(*first)) {
|
||||
accVal += *first - '0';
|
||||
}
|
||||
else if (*first >= 'a' && *first <= 'f') {
|
||||
accVal += *first - 'a' + 10;
|
||||
}
|
||||
else if (*first >= 'A' && *first <= 'F') {
|
||||
accVal += *first - 'A' + 10;
|
||||
if (isdigit(*src)) {
|
||||
accVal += *src - '0';
|
||||
} else if (*src >= 'a' && *src <= 'f') {
|
||||
accVal += *src - 'a' + 10;
|
||||
} else if (*src >= 'A' && *src <= 'F') {
|
||||
accVal += *src - 'A' + 10;
|
||||
}
|
||||
digitsCount++;
|
||||
// exactly 2 hex digits are anticipated, so after 2 digits accumulation stops.
|
||||
if (digitsCount == 2) {
|
||||
*result++ = accVal; // output character corresponding to collected accumulated value
|
||||
*dst++ = accVal; // output character corresponding to collected accumulated value
|
||||
digitsCount = 0;
|
||||
state = STATE_COPY;
|
||||
}
|
||||
@@ -350,34 +358,36 @@ _IT escape_backslashes(_IT first, _IT last) {
|
||||
}
|
||||
}
|
||||
|
||||
// Handle state at end of input
|
||||
bool copyBackTrack = true;
|
||||
switch (state) {
|
||||
case STATE_HEX:
|
||||
// this can only happen on this sequence '\xH' where H is a single hex digit.
|
||||
// in this case the sequence is considered invalid and should be copied verbatim (copyBackTrack=true)
|
||||
break;
|
||||
case STATE_OCTAL:
|
||||
// this can only happen when less than 3 octal digits are found at the value end, like '\1' or '\12'
|
||||
*result++ = accVal; // output character corresponding to collected accumulated value
|
||||
copyBackTrack = false;
|
||||
break;
|
||||
case STATE_COPY:
|
||||
copyBackTrack = false;
|
||||
break;
|
||||
case STATE_ESCAPE:
|
||||
break;
|
||||
}
|
||||
if (dst < last) {
|
||||
// Handle state at end of input
|
||||
bool copyBackTrack = true;
|
||||
switch (state) {
|
||||
case STATE_HEX:
|
||||
// this can only happen on this sequence '\xH' where H is a single hex digit.
|
||||
// in this case the sequence is considered invalid and should be copied verbatim (copyBackTrack=true)
|
||||
break;
|
||||
case STATE_OCTAL:
|
||||
// this can only happen when less than 3 octal digits are found at the value end, like '\1' or '\12'
|
||||
*dst++ = accVal; // output character corresponding to collected accumulated value
|
||||
copyBackTrack = false;
|
||||
break;
|
||||
case STATE_COPY:
|
||||
copyBackTrack = false;
|
||||
break;
|
||||
case STATE_ESCAPE:
|
||||
break;
|
||||
}
|
||||
|
||||
if (copyBackTrack) {
|
||||
// invalid escape sequence - do not replace it (return original characters)
|
||||
// Copy from back-track
|
||||
while (mark < first) {
|
||||
*result++ = *mark++;
|
||||
if (copyBackTrack) {
|
||||
// invalid escape sequence - do not replace it (return original characters)
|
||||
// Copy from back-track
|
||||
while (dst <= mark && mark < src) {
|
||||
*dst++ = *mark++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
return dst;
|
||||
}
|
||||
|
||||
inline bool str_contains(const std::string &haystack, const std::string &needle)
|
||||
@@ -395,7 +405,8 @@ extern const size_t g_htmlEntitiesCount;
|
||||
|
||||
template<class _IT>
|
||||
_IT escape_html(_IT first, _IT last) {
|
||||
_IT result = first;
|
||||
_IT dst = first;
|
||||
_IT src = first;
|
||||
enum {
|
||||
STATE_COPY,
|
||||
STATE_ESCAPE,
|
||||
@@ -408,26 +419,26 @@ _IT escape_html(_IT first, _IT last) {
|
||||
std::list<size_t> potentialMatchIndices;
|
||||
size_t matchLength = 0;
|
||||
size_t lastKnownMatchIndex = -1;
|
||||
_IT mark = first;
|
||||
_IT mark = src;
|
||||
|
||||
for (; first != last; ++first) {
|
||||
for (; src != last && dst < last; ++src) {
|
||||
switch (state) {
|
||||
case STATE_COPY:
|
||||
if (*first == '&') {
|
||||
mark = first;
|
||||
if (*src == '&') {
|
||||
mark = src;
|
||||
state = STATE_ESCAPE;
|
||||
}
|
||||
else {
|
||||
*result++ = *first;
|
||||
*dst++ = *src;
|
||||
}
|
||||
break;
|
||||
case STATE_ESCAPE:
|
||||
if (isalpha(*first)) {
|
||||
if (isalpha(*src)) {
|
||||
// initialize potential matches list
|
||||
potentialMatchIndices.clear();
|
||||
|
||||
for (size_t index = 0; index < g_htmlEntitiesCount; ++index) {
|
||||
if (*first == g_htmlEntities[index].name[0]) {
|
||||
if (*src == g_htmlEntities[index].name[0]) {
|
||||
potentialMatchIndices.push_back(index);
|
||||
lastKnownMatchIndex = index;
|
||||
}
|
||||
@@ -435,8 +446,8 @@ _IT escape_html(_IT first, _IT last) {
|
||||
|
||||
// No potential matches - send ampersand and current character to output
|
||||
if (potentialMatchIndices.size() == 0) {
|
||||
*result++ = '&';
|
||||
*result++ = *first;
|
||||
*dst++ = '&';
|
||||
*dst++ = *src;
|
||||
state = STATE_COPY;
|
||||
break;
|
||||
}
|
||||
@@ -445,7 +456,7 @@ _IT escape_html(_IT first, _IT last) {
|
||||
matchLength = 1;
|
||||
state = STATE_NAMED_CHARACTER_REFERENCE;
|
||||
}
|
||||
else if (*first == '#') {
|
||||
else if (*src == '#') {
|
||||
digitsSeen = 0;
|
||||
accVal = 0;
|
||||
state = STATE_NUMERIC_START;
|
||||
@@ -453,8 +464,8 @@ _IT escape_html(_IT first, _IT last) {
|
||||
else {
|
||||
// not isalpha and not '#' - this is invalid character reference - do not replace it
|
||||
// (return original characters)
|
||||
*result++ = '&';
|
||||
*result++ = *first;
|
||||
*dst++ = '&';
|
||||
*dst++ = *src;
|
||||
state = STATE_COPY;
|
||||
}
|
||||
break;
|
||||
@@ -473,7 +484,7 @@ _IT escape_html(_IT first, _IT last) {
|
||||
|
||||
// If there are no more characters in the potntial match name,
|
||||
// or the next tested character doesn't match - kill the match
|
||||
if ((matchName[matchLength] == '\0') || (matchName[matchLength] != *first)) {
|
||||
if ((matchName[matchLength] == '\0') || (matchName[matchLength] != *src)) {
|
||||
// remove current element from the list of potential matches
|
||||
pPotentialMatchIndex = potentialMatchIndices.erase(pPotentialMatchIndex);
|
||||
}
|
||||
@@ -489,15 +500,15 @@ _IT escape_html(_IT first, _IT last) {
|
||||
// No more potential matches: unsuccesful match -> flush all consumed characters back to output stream
|
||||
if (potentialMatchIndices.size() == 0) {
|
||||
// Send consumed ampersand to the output
|
||||
*result++ = '&';
|
||||
*dst++ = '&';
|
||||
|
||||
// Send those matched characters (these are the same that we consumed) - to the output
|
||||
for (size_t i = 0; i < matchLength; i++) {
|
||||
*result++ = g_htmlEntities[lastKnownMatchIndex].name[i];
|
||||
*dst++ = g_htmlEntities[lastKnownMatchIndex].name[i];
|
||||
}
|
||||
|
||||
// Send the character that terminated our search for possible matches
|
||||
*result++ = *first;
|
||||
*dst++ = *src;
|
||||
|
||||
// Continue copying text verbatim
|
||||
state = STATE_COPY;
|
||||
@@ -505,23 +516,23 @@ _IT escape_html(_IT first, _IT last) {
|
||||
}
|
||||
|
||||
// There are still potential matches and ';' is hit
|
||||
if (*first == ';') {
|
||||
if (*src == ';') {
|
||||
// longest match found for the named character reference.
|
||||
// translate it into output character(s) and we're done.
|
||||
unsigned short value = g_htmlEntities[lastKnownMatchIndex].value;
|
||||
|
||||
// Encode UTF code point as UTF-8 bytes
|
||||
if (value < 0x80) {
|
||||
*result++ = value;
|
||||
*dst++ = value;
|
||||
}
|
||||
else if (value < 0x800 ) {
|
||||
*result++ = (value >> 6) | 0xC0;
|
||||
*result++ = (value & 0x3F) | 0x80;
|
||||
*dst++ = (value >> 6) | 0xC0;
|
||||
*dst++ = (value & 0x3F) | 0x80;
|
||||
}
|
||||
else { // (value <= 0xFFFF : always true because value type is unsigned short which is 16-bit
|
||||
*result++ = (value >> 12) | 0xE0;
|
||||
*result++ = ((value >> 6) & 0x3F) | 0x80;
|
||||
*result++ = (value & 0x3F) | 0x80;
|
||||
*dst++ = (value >> 12) | 0xE0;
|
||||
*dst++ = ((value >> 6) & 0x3F) | 0x80;
|
||||
*dst++ = (value & 0x3F) | 0x80;
|
||||
}
|
||||
|
||||
// Continue copying text verbatim
|
||||
@@ -532,178 +543,179 @@ _IT escape_html(_IT first, _IT last) {
|
||||
case STATE_NUMERIC_START:
|
||||
digitsSeen = false;
|
||||
accVal = 0;
|
||||
if (*first == 'x' || *first == 'X') {
|
||||
if (*src == 'x' || *src == 'X') {
|
||||
state = STATE_HEX;
|
||||
}
|
||||
else if (isdigit(*first)) {
|
||||
else if (isdigit(*src)) {
|
||||
digitsSeen = true;
|
||||
accVal = *first - '0';
|
||||
accVal = *src - '0';
|
||||
state = STATE_NUMERIC;
|
||||
}
|
||||
else {
|
||||
// Sequence started with these two characters: '&#', and here is the third, non-digit character
|
||||
|
||||
// Copy from back-track, not including current character, and continue
|
||||
while (mark < first) {
|
||||
*result++ = *mark++;
|
||||
while (dst <= mark && mark < src) {
|
||||
*dst++ = *mark++;
|
||||
}
|
||||
|
||||
if (*first == '&') {
|
||||
if (*src == '&') {
|
||||
// Terminator is also start of next escape sequence
|
||||
mark = first;
|
||||
mark = src;
|
||||
state = STATE_ESCAPE;
|
||||
break;
|
||||
}
|
||||
else {
|
||||
// Copy the terminating character too
|
||||
*result++ = *first;
|
||||
*dst++ = *src;
|
||||
}
|
||||
state = STATE_COPY;
|
||||
}
|
||||
break;
|
||||
case STATE_NUMERIC:
|
||||
if (!isdigit(*first)) {
|
||||
if (!isdigit(*src)) {
|
||||
if (digitsSeen) {
|
||||
// Encode UTF code point as UTF-8 bytes
|
||||
if (accVal < 0x80) {
|
||||
*result++ = accVal;
|
||||
*dst++ = accVal;
|
||||
}
|
||||
else if (accVal < 0x800 ) {
|
||||
*result++ = (accVal >> 6) | 0xC0;
|
||||
*result++ = (accVal & 0x3F) | 0x80;
|
||||
*dst++ = (accVal >> 6) | 0xC0;
|
||||
*dst++ = (accVal & 0x3F) | 0x80;
|
||||
}
|
||||
else { // (accVal <= 0xFFFF : always true because accVal type is unsigned short which is 16-bit
|
||||
*result++ = (accVal >> 12) | 0xE0;
|
||||
*result++ = ((accVal >> 6) & 0x3F) | 0x80;
|
||||
*result++ = (accVal & 0x3F) | 0x80;
|
||||
*dst++ = (accVal >> 12) | 0xE0;
|
||||
*dst++ = ((accVal >> 6) & 0x3F) | 0x80;
|
||||
*dst++ = (accVal & 0x3F) | 0x80;
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Copy from back-track, not including current character (which is absent), and continue
|
||||
while (mark < first) {
|
||||
*result++ = *mark++;
|
||||
while (dst <= mark && mark < src) {
|
||||
*dst++ = *mark++;
|
||||
}
|
||||
}
|
||||
|
||||
if (*first == '&') {
|
||||
if (*src == '&') {
|
||||
// Terminator is also start of next escape sequence
|
||||
mark = first;
|
||||
mark = src;
|
||||
state = STATE_ESCAPE;
|
||||
break;
|
||||
}
|
||||
else if (!digitsSeen || *first != ';') {
|
||||
else if (!digitsSeen || *src != ';') {
|
||||
// Do not copy the ';' but do copy any other terminator
|
||||
// Note: the ';' should remain in the output if there were no digits seen.
|
||||
*result++ = *first;
|
||||
*dst++ = *src;
|
||||
}
|
||||
state = STATE_COPY;
|
||||
}
|
||||
else {
|
||||
digitsSeen = true;
|
||||
accVal = accVal * 10 + *first - '0'; // TODO:: beware of integer overflow?
|
||||
accVal = accVal * 10 + *src - '0'; // TODO:: beware of integer overflow?
|
||||
}
|
||||
break;
|
||||
case STATE_HEX:
|
||||
if (!isHexDigit(*first)) {
|
||||
if (!isHexDigit(*src)) {
|
||||
if (digitsSeen) {
|
||||
// Encode UTF code point as UTF-8 bytes
|
||||
if (accVal < 0x80) {
|
||||
*result++ = accVal;
|
||||
*dst++ = accVal;
|
||||
}
|
||||
else if (accVal < 0x800 ) {
|
||||
*result++ = (accVal >> 6) | 0xC0;
|
||||
*result++ = (accVal & 0x3F) | 0x80;
|
||||
*dst++ = (accVal >> 6) | 0xC0;
|
||||
*dst++ = (accVal & 0x3F) | 0x80;
|
||||
}
|
||||
else { // (accVal <= 0xFFFF : always true because accVal type is unsigned short which is 16-bit
|
||||
*result++ = (accVal >> 12) | 0xE0;
|
||||
*result++ = ((accVal >> 6) & 0x3F) | 0x80;
|
||||
*result++ = (accVal & 0x3F) | 0x80;
|
||||
*dst++ = (accVal >> 12) | 0xE0;
|
||||
*dst++ = ((accVal >> 6) & 0x3F) | 0x80;
|
||||
*dst++ = (accVal & 0x3F) | 0x80;
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Copy from back-track, not including current character (which is absent), and continue
|
||||
while (mark < first) {
|
||||
*result++ = *mark++;
|
||||
while (dst <= mark && mark < src) {
|
||||
*dst++ = *mark++;
|
||||
}
|
||||
}
|
||||
|
||||
if (*first == '&') {
|
||||
if (*src == '&') {
|
||||
// Terminator is also start of next escape sequence
|
||||
mark = first;
|
||||
mark = src;
|
||||
state = STATE_ESCAPE;
|
||||
break;
|
||||
}
|
||||
else if (!digitsSeen || *first != ';') {
|
||||
else if (!digitsSeen || *src != ';') {
|
||||
// Do not copy the ';' but do copy any other terminator
|
||||
// Note: the ';' should remain in the output if there were no digits seen.
|
||||
*result++ = *first;
|
||||
*dst++ = *src;
|
||||
}
|
||||
state = STATE_COPY;
|
||||
}
|
||||
else {
|
||||
digitsSeen = true;
|
||||
accVal = accVal << 4;
|
||||
if (isdigit(*first)) {
|
||||
accVal += *first - '0';
|
||||
if (isdigit(*src)) {
|
||||
accVal += *src - '0';
|
||||
}
|
||||
else if (*first >= 'a' && *first <= 'f') {
|
||||
accVal += *first - 'a' + 10;
|
||||
else if (*src >= 'a' && *src <= 'f') {
|
||||
accVal += *src - 'a' + 10;
|
||||
}
|
||||
else if (*first >= 'A' && *first <= 'F') {
|
||||
accVal += *first - 'A' + 10;
|
||||
else if (*src >= 'A' && *src <= 'F') {
|
||||
accVal += *src - 'A' + 10;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (state == STATE_ESCAPE) {
|
||||
*result++ = '&';
|
||||
if (state == STATE_ESCAPE && dst < last) {
|
||||
*dst++ = '&';
|
||||
}
|
||||
else if (state == STATE_NAMED_CHARACTER_REFERENCE && potentialMatchIndices.size() > 0) {
|
||||
else if (state == STATE_NAMED_CHARACTER_REFERENCE && potentialMatchIndices.size() > 0 && dst < last) {
|
||||
// Send consumed ampersand to the output
|
||||
*result++ = '&';
|
||||
*dst++ = '&';
|
||||
|
||||
// Send those matched characters (these are the same that we consumed) - to the output
|
||||
for (size_t i = 0; i < matchLength; i++) {
|
||||
for (size_t i = 0; i < matchLength && dst < last; i++) {
|
||||
// Even if there are multiple potential matches, all of them start with the same
|
||||
// matchLength characters that we consumed!
|
||||
*result++ = g_htmlEntities[lastKnownMatchIndex].name[i];
|
||||
*dst++ = g_htmlEntities[lastKnownMatchIndex].name[i];
|
||||
}
|
||||
}
|
||||
if (state == STATE_HEX && !digitsSeen) { // Special case of "&#x"
|
||||
// Copy from back-track, not including current character (which is absent), and continue
|
||||
while (mark < first) {
|
||||
*result++ = *mark++;
|
||||
while (dst <= mark && mark < src) {
|
||||
*dst++ = *mark++;
|
||||
}
|
||||
state = STATE_COPY;
|
||||
}
|
||||
else if (state == STATE_HEX || state == STATE_NUMERIC || state == STATE_NUMERIC_START) {
|
||||
if (digitsSeen) {
|
||||
if (digitsSeen && dst < last) {
|
||||
// Encode UTF code point as UTF-8 bytes
|
||||
if (accVal < 0x80) {
|
||||
*result++ = accVal;
|
||||
*dst++ = accVal;
|
||||
}
|
||||
else if (accVal < 0x800 ) {
|
||||
*result++ = (accVal >> 6) | 0xC0;
|
||||
*result++ = (accVal & 0x3F) | 0x80;
|
||||
else if (accVal < 0x800 && std::distance(dst, last) >= 2) {
|
||||
*dst++ = (accVal >> 6) | 0xC0;
|
||||
*dst++ = (accVal & 0x3F) | 0x80;
|
||||
}
|
||||
else { // (accVal <= 0xFFFF : always true because accVal type is unsigned short which is 16-bit
|
||||
*result++ = (accVal >> 12) | 0xE0;
|
||||
*result++ = ((accVal >> 6) & 0x3F) | 0x80;
|
||||
*result++ = (accVal & 0x3F) | 0x80;
|
||||
// (accVal <= 0xFFFF : always true because accVal type is unsigned short which is 16-bit
|
||||
else if (std::distance(dst, last) >= 3) {
|
||||
*dst++ = (accVal >> 12) | 0xE0;
|
||||
*dst++ = ((accVal >> 6) & 0x3F) | 0x80;
|
||||
*dst++ = (accVal & 0x3F) | 0x80;
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Copy from back-track, not including current character (which is absent), and continue
|
||||
while (mark < first) {
|
||||
*result++ = *mark++;
|
||||
while (dst <= mark && mark < src) {
|
||||
*dst++ = *mark++;
|
||||
}
|
||||
state = STATE_COPY;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
return dst;
|
||||
}
|
||||
|
||||
// Compare two buffers, case insensitive. Return true if they are equal (case-insensitive)
|
||||
@@ -865,6 +877,17 @@ void unescapeUnicode(std::string &text);
|
||||
// Try to find and decode UTF7 chunks
|
||||
std::string filterUTF7(const std::string &text);
|
||||
|
||||
base64_decode_status
|
||||
decideStatusBase64Decoded(
|
||||
std::string& decoded,
|
||||
double entropy,
|
||||
double decoded_entropy,
|
||||
size_t spacer_count,
|
||||
size_t nonPrintableCharsCount,
|
||||
bool clear_on_error,
|
||||
double terminatorCharsSeen,
|
||||
bool called_with_prefix);
|
||||
|
||||
base64_decode_status
|
||||
decodeBase64Chunk(
|
||||
const std::string &value,
|
||||
@@ -926,7 +949,8 @@ namespace Util {
|
||||
const std::string &s,
|
||||
std::string &key,
|
||||
std::string &value,
|
||||
BinaryFileType &binaryFileType);
|
||||
BinaryFileType &binaryFileType,
|
||||
size_t offset = 0);
|
||||
|
||||
// The original stdlib implementation of isalpha() supports locale settings which we do not really need.
|
||||
// It is also proven to contribute to slow performance in some of the algorithms using it.
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
#include "agent_core_utilities.h"
|
||||
|
||||
#define stack_trace_max_len 64
|
||||
#define STACK_SIZE (1024 * 1024) // 1 MB stack size
|
||||
|
||||
using namespace std;
|
||||
using namespace ReportIS;
|
||||
@@ -57,6 +58,12 @@ public:
|
||||
{
|
||||
if (out_trace_file_fd != -1) close(out_trace_file_fd);
|
||||
out_trace_file_fd = -1;
|
||||
|
||||
if (alt_stack.ss_sp != nullptr) {
|
||||
free(alt_stack.ss_sp);
|
||||
alt_stack.ss_sp = nullptr;
|
||||
alt_stack_initialized = false;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@@ -69,6 +76,7 @@ public:
|
||||
void
|
||||
init()
|
||||
{
|
||||
alt_stack.ss_sp = nullptr;
|
||||
addSignalHandlerRoutine();
|
||||
addReloadConfigurationRoutine();
|
||||
}
|
||||
@@ -244,6 +252,28 @@ private:
|
||||
setHandlerPerSignalNum();
|
||||
}
|
||||
|
||||
bool
|
||||
setupAlternateSignalStack()
|
||||
{
|
||||
if (alt_stack_initialized) return true;
|
||||
alt_stack.ss_sp = malloc(STACK_SIZE);
|
||||
if (alt_stack.ss_sp == nullptr) {
|
||||
dbgWarning(D_SIGNAL_HANDLER) << "Failed to allocate alternate stack";
|
||||
return false;
|
||||
}
|
||||
alt_stack.ss_size = STACK_SIZE;
|
||||
alt_stack.ss_flags = 0;
|
||||
|
||||
if (sigaltstack(&alt_stack, nullptr) == -1) {
|
||||
dbgWarning(D_SIGNAL_HANDLER) << "Failed to set up alternate stack";
|
||||
free(alt_stack.ss_sp);
|
||||
return false;
|
||||
}
|
||||
dbgInfo(D_SIGNAL_HANDLER) << "Alternate stack allocated successfully. Allocated size: " << STACK_SIZE;
|
||||
alt_stack_initialized = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
setHandlerPerSignalNum()
|
||||
{
|
||||
@@ -261,8 +291,29 @@ private:
|
||||
SIGUSR2
|
||||
};
|
||||
|
||||
if (!setupAlternateSignalStack()) {
|
||||
dbgWarning(D_SIGNAL_HANDLER) << "Failed to set up alternate signal stack";
|
||||
for (int sig : signals) {
|
||||
signal(sig, signalHandlerCB);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
struct sigaction sa;
|
||||
memset(&sa, 0, sizeof(sa));
|
||||
sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
|
||||
sa.sa_sigaction = signalActionHandlerCB;
|
||||
|
||||
sigemptyset(&sa.sa_mask);
|
||||
|
||||
for (int sig : signals) {
|
||||
signal(sig, signalHandlerCB);
|
||||
if (sig == SIGKILL || sig == SIGSTOP) {
|
||||
signal(sig, signalHandlerCB);
|
||||
continue;
|
||||
}
|
||||
if (sigaction(sig, &sa, nullptr) == -1) {
|
||||
dbgError(D_SIGNAL_HANDLER) << "Failed to set signal handler for signal " << sig;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -284,55 +335,30 @@ private:
|
||||
static void
|
||||
signalHandlerCB(int _signal)
|
||||
{
|
||||
const char *signal_name = "";
|
||||
const char *signal_name = strsignal(_signal);
|
||||
char signal_num[3];
|
||||
snprintf(signal_num, sizeof(signal_num), "%d", _signal);
|
||||
|
||||
if (out_trace_file_fd == -1) exit(_signal);
|
||||
|
||||
reset_signal_handler = true;
|
||||
|
||||
switch(_signal) {
|
||||
case SIGABRT: {
|
||||
signal_name = "SIGABRT";
|
||||
fini_signal_flag = true;
|
||||
return;
|
||||
}
|
||||
case SIGKILL: {
|
||||
signal_name = "SIGKILL";
|
||||
fini_signal_flag = true;
|
||||
return;
|
||||
}
|
||||
case SIGQUIT: {
|
||||
signal_name = "SIGQUIT";
|
||||
fini_signal_flag = true;
|
||||
return;
|
||||
}
|
||||
case SIGINT: {
|
||||
signal_name = "SIGINT";
|
||||
fini_signal_flag = true;
|
||||
return;
|
||||
}
|
||||
case SIGABRT:
|
||||
case SIGKILL:
|
||||
case SIGQUIT:
|
||||
case SIGINT:
|
||||
case SIGTERM: {
|
||||
signal_name = "SIGTERM";
|
||||
fini_signal_flag = true;
|
||||
return;
|
||||
}
|
||||
case SIGSEGV: {
|
||||
signal_name = "SIGSEGV";
|
||||
break;
|
||||
}
|
||||
case SIGBUS: {
|
||||
signal_name = "SIGBUS";
|
||||
break;
|
||||
}
|
||||
case SIGILL: {
|
||||
signal_name = "SIGILL";
|
||||
break;
|
||||
}
|
||||
case SIGSEGV:
|
||||
case SIGBUS:
|
||||
case SIGILL:
|
||||
case SIGFPE: {
|
||||
signal_name = "SIGFPE";
|
||||
break;
|
||||
}
|
||||
case SIGPIPE: {
|
||||
signal_name = "SIGPIPE";
|
||||
return;
|
||||
}
|
||||
case SIGUSR2: {
|
||||
@@ -341,13 +367,6 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
if (out_trace_file_fd == -1) exit(_signal);
|
||||
|
||||
for (uint i = 0; i < sizeof(signal_num); ++i) {
|
||||
uint placement = sizeof(signal_num) - 1 - i;
|
||||
signal_num[placement] = _signal%10 + '0';
|
||||
_signal /= 10;
|
||||
}
|
||||
const char *signal_error_prefix = "Caught signal ";
|
||||
writeData(signal_error_prefix, strlen(signal_error_prefix));
|
||||
writeData(signal_num, sizeof(signal_num));
|
||||
@@ -367,6 +386,12 @@ private:
|
||||
exit(_signal);
|
||||
}
|
||||
|
||||
static void
|
||||
signalActionHandlerCB(int signum, siginfo_t *, void *)
|
||||
{
|
||||
signalHandlerCB(signum);
|
||||
}
|
||||
|
||||
static void
|
||||
printStackTrace()
|
||||
{
|
||||
@@ -391,16 +416,22 @@ private:
|
||||
for (uint i = 0 ; i < stack_trace_max_len ; i++) {
|
||||
unw_get_reg(&cursor, UNW_REG_IP, &ip);
|
||||
unw_get_reg(&cursor, UNW_REG_SP, &sp);
|
||||
|
||||
if (unw_get_proc_name(&cursor, name, sizeof(name), &off) == 0) {
|
||||
int procNameRc = unw_get_proc_name(&cursor, name, sizeof(name), &off);
|
||||
if (procNameRc == 0 || procNameRc == -UNW_ENOMEM) {
|
||||
const char *open_braces = "<";
|
||||
writeData(open_braces, strlen(open_braces));
|
||||
writeData(name, strlen(name));
|
||||
writeData(name, strnlen(name, sizeof(name)));
|
||||
if (procNameRc != 0) {
|
||||
const char *dots = "...";
|
||||
writeData(dots, strlen(dots));
|
||||
}
|
||||
const char *close_braces = ">\n";
|
||||
writeData(close_braces, strlen(close_braces));
|
||||
} else {
|
||||
const char *error = " -- error: unable to obtain symbol name for this frame\n";
|
||||
writeData(error, strlen(error));
|
||||
}
|
||||
|
||||
|
||||
if (unw_step(&cursor) <= 0) return;
|
||||
}
|
||||
|
||||
@@ -444,12 +475,16 @@ private:
|
||||
static bool reload_settings_flag;
|
||||
static bool reset_signal_handler;
|
||||
static int out_trace_file_fd;
|
||||
static stack_t alt_stack;
|
||||
static bool alt_stack_initialized;
|
||||
};
|
||||
|
||||
string SignalHandler::Impl::trace_file_path;
|
||||
bool SignalHandler::Impl::reload_settings_flag = false;
|
||||
bool SignalHandler::Impl::reset_signal_handler = false;
|
||||
int SignalHandler::Impl::out_trace_file_fd = -1;
|
||||
stack_t SignalHandler::Impl::alt_stack;
|
||||
bool SignalHandler::Impl::alt_stack_initialized = false;
|
||||
|
||||
SignalHandler::SignalHandler() : Component("SignalHandler"), pimpl(make_unique<Impl>()) {}
|
||||
SignalHandler::~SignalHandler() {}
|
||||
|
||||
@@ -5,3 +5,5 @@ add_subdirectory(ip_utilities)
|
||||
add_subdirectory(keywords)
|
||||
add_subdirectory(pm)
|
||||
add_subdirectory(service_health_status)
|
||||
add_subdirectory(nginx_utils)
|
||||
add_subdirectory(utilities)
|
||||
|
||||
1
components/utils/nginx_utils/CMakeLists.txt
Executable file
1
components/utils/nginx_utils/CMakeLists.txt
Executable file
@@ -0,0 +1 @@
|
||||
add_library(nginx_utils nginx_utils.cc)
|
||||
282
components/utils/nginx_utils/nginx_utils.cc
Executable file
282
components/utils/nginx_utils/nginx_utils.cc
Executable file
@@ -0,0 +1,282 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "nginx_utils.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <dirent.h>
|
||||
#include <boost/regex.hpp>
|
||||
#include <algorithm>
|
||||
|
||||
#include "debug.h"
|
||||
#include "maybe_res.h"
|
||||
#include "config.h"
|
||||
#include "agent_core_utilities.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
USE_DEBUG_FLAG(D_NGINX_MANAGER);
|
||||
|
||||
NginxConfCollector::NginxConfCollector(const string &input_path, const string &output_path)
|
||||
:
|
||||
main_conf_input_path(input_path),
|
||||
main_conf_output_path(output_path)
|
||||
{
|
||||
main_conf_directory_path = main_conf_input_path.substr(0, main_conf_input_path.find_last_of('/'));
|
||||
}
|
||||
|
||||
vector<string>
|
||||
NginxConfCollector::expandIncludes(const string &include_pattern) const {
|
||||
vector<string> matching_files;
|
||||
string absolute_include_pattern = include_pattern;
|
||||
string maybe_directory = include_pattern.substr(0, include_pattern.find_last_of('/'));
|
||||
if (!maybe_directory.empty() && maybe_directory.front() != '/') {
|
||||
dbgTrace(D_NGINX_MANAGER) << "Include pattern is a relative path: " << include_pattern;
|
||||
maybe_directory = main_conf_directory_path + '/' + maybe_directory;
|
||||
absolute_include_pattern = main_conf_directory_path + '/' + include_pattern;
|
||||
}
|
||||
|
||||
if (!NGEN::Filesystem::exists(maybe_directory)) {
|
||||
dbgTrace(D_NGINX_MANAGER) << "Include pattern directory/file does not exist: " << maybe_directory;
|
||||
return matching_files;
|
||||
}
|
||||
|
||||
string filename_pattern = absolute_include_pattern.substr(absolute_include_pattern.find_last_of('/') + 1);
|
||||
boost::regex wildcard_regex("\\*");
|
||||
boost::regex pattern(
|
||||
NGEN::Regex::regexReplace(__FILE__, __LINE__, filename_pattern, wildcard_regex, string("[^/]*"))
|
||||
);
|
||||
|
||||
if (!NGEN::Filesystem::isDirectory(maybe_directory)) {
|
||||
dbgTrace(D_NGINX_MANAGER) << "Include pattern is a file: " << absolute_include_pattern;
|
||||
matching_files.push_back(absolute_include_pattern);
|
||||
return matching_files;
|
||||
}
|
||||
|
||||
DIR* dir = opendir(maybe_directory.c_str());
|
||||
if (!dir) {
|
||||
dbgTrace(D_NGINX_MANAGER) << "Could not open directory: " << maybe_directory;
|
||||
return matching_files;
|
||||
}
|
||||
|
||||
struct dirent *entry;
|
||||
while ((entry = readdir(dir)) != nullptr) {
|
||||
if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) continue;
|
||||
if (NGEN::Regex::regexMatch(__FILE__, __LINE__, entry->d_name, pattern)) {
|
||||
matching_files.push_back(maybe_directory + "/" + entry->d_name);
|
||||
dbgTrace(D_NGINX_MANAGER) << "Matched file: " << maybe_directory << '/' << entry->d_name;
|
||||
}
|
||||
}
|
||||
closedir(dir);
|
||||
sort(matching_files.begin(), matching_files.end());
|
||||
|
||||
return matching_files;
|
||||
}
|
||||
|
||||
void
|
||||
NginxConfCollector::processConfigFile(const string &path, ostringstream &conf_output, vector<string> &errors) const
|
||||
{
|
||||
ifstream file(path);
|
||||
if (!file.is_open()) return;
|
||||
|
||||
string content((istreambuf_iterator<char>(file)), istreambuf_iterator<char>());
|
||||
file.close();
|
||||
|
||||
dbgTrace(D_NGINX_MANAGER) << "Processing file: " << path;
|
||||
|
||||
if (content.empty()) return;
|
||||
|
||||
try {
|
||||
boost::regex include_regex(R"(^\s*include\s+([^;]+);)");
|
||||
boost::smatch match;
|
||||
|
||||
while (NGEN::Regex::regexSearch(__FILE__, __LINE__, content, match, include_regex)) {
|
||||
string include_pattern = match[1].str();
|
||||
include_pattern = NGEN::Strings::trim(include_pattern);
|
||||
dbgTrace(D_NGINX_MANAGER) << "Include pattern: " << include_pattern;
|
||||
|
||||
vector<string> included_files = expandIncludes(include_pattern);
|
||||
if (included_files.empty()) {
|
||||
dbgTrace(D_NGINX_MANAGER) << "No files matched the include pattern: " << include_pattern;
|
||||
content.replace(match.position(), match.length(), "");
|
||||
continue;
|
||||
}
|
||||
|
||||
ostringstream included_content;
|
||||
for (const string &included_file : included_files) {
|
||||
dbgTrace(D_NGINX_MANAGER) << "Processing included file: " << included_file;
|
||||
processConfigFile(included_file, included_content, errors);
|
||||
}
|
||||
content.replace(match.position(), match.length(), included_content.str());
|
||||
}
|
||||
} catch (const boost::regex_error &e) {
|
||||
errors.emplace_back(e.what());
|
||||
return;
|
||||
} catch (const exception &e) {
|
||||
errors.emplace_back(e.what());
|
||||
return;
|
||||
}
|
||||
|
||||
conf_output << content;
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
NginxConfCollector::generateFullNginxConf() const
|
||||
{
|
||||
if (!NGEN::Filesystem::exists(main_conf_input_path)) {
|
||||
return genError("Input file does not exist: " + main_conf_input_path);
|
||||
}
|
||||
|
||||
ostringstream conf_output;
|
||||
vector<string> errors;
|
||||
processConfigFile(main_conf_input_path, conf_output, errors);
|
||||
|
||||
if (!errors.empty()) {
|
||||
for (const string &error : errors) dbgWarning(D_NGINX_MANAGER) << error;
|
||||
return genError("Errors occurred while processing configuration files");
|
||||
}
|
||||
|
||||
ofstream single_nginx_conf_file(main_conf_output_path);
|
||||
if (!single_nginx_conf_file.is_open()) return genError("Could not create output file: " + main_conf_output_path);
|
||||
|
||||
single_nginx_conf_file << conf_output.str();
|
||||
single_nginx_conf_file.close();
|
||||
|
||||
return NGEN::Filesystem::resolveFullPath(main_conf_output_path);
|
||||
}
|
||||
|
||||
string
|
||||
NginxUtils::getMainNginxConfPath()
|
||||
{
|
||||
static string main_nginx_conf_path;
|
||||
if (!main_nginx_conf_path.empty()) return main_nginx_conf_path;
|
||||
|
||||
auto main_nginx_conf_path_setting = getProfileAgentSetting<string>("centralNginxManagement.mainConfPath");
|
||||
if (main_nginx_conf_path_setting.ok()) {
|
||||
main_nginx_conf_path = main_nginx_conf_path_setting.unpack();
|
||||
return main_nginx_conf_path;
|
||||
}
|
||||
|
||||
string default_main_nginx_conf_path = "/etc/nginx/nginx.conf";
|
||||
string command = "nginx -V 2>&1";
|
||||
auto result = Singleton::Consume<I_ShellCmd>::by<NginxUtils>()->getExecOutputAndCode(command);
|
||||
if (!result.ok()) return default_main_nginx_conf_path;
|
||||
|
||||
string output = result.unpack().first;
|
||||
boost::regex conf_regex(R"(--conf-path=([^ ]+))");
|
||||
boost::smatch match;
|
||||
if (!NGEN::Regex::regexSearch(__FILE__, __LINE__, output, match, conf_regex)) {
|
||||
main_nginx_conf_path = default_main_nginx_conf_path;
|
||||
return main_nginx_conf_path;
|
||||
}
|
||||
|
||||
string conf_path = match[1].str();
|
||||
conf_path = NGEN::Strings::trim(conf_path);
|
||||
if (conf_path.empty()) {
|
||||
main_nginx_conf_path = default_main_nginx_conf_path;
|
||||
return main_nginx_conf_path;
|
||||
}
|
||||
|
||||
main_nginx_conf_path = conf_path;
|
||||
return main_nginx_conf_path;
|
||||
}
|
||||
|
||||
string
|
||||
NginxUtils::getModulesPath()
|
||||
{
|
||||
static string main_modules_path;
|
||||
if (!main_modules_path.empty()) return main_modules_path;
|
||||
|
||||
auto modules_path_setting = getProfileAgentSetting<string>("centralNginxManagement.modulesPath");
|
||||
if (modules_path_setting.ok()) {
|
||||
main_modules_path = modules_path_setting.unpack();
|
||||
return main_modules_path;
|
||||
}
|
||||
|
||||
string default_modules_path = "/usr/share/nginx/modules";
|
||||
string command = "nginx -V 2>&1";
|
||||
auto result = Singleton::Consume<I_ShellCmd>::by<NginxUtils>()->getExecOutputAndCode(command);
|
||||
if (!result.ok()) return default_modules_path;
|
||||
|
||||
string output = result.unpack().first;
|
||||
boost::regex modules_regex(R"(--modules-path=([^ ]+))");
|
||||
boost::smatch match;
|
||||
if (!NGEN::Regex::regexSearch(__FILE__, __LINE__, output, match, modules_regex)) {
|
||||
main_modules_path = default_modules_path;
|
||||
return main_modules_path;
|
||||
}
|
||||
|
||||
string modules_path = match[1].str();
|
||||
modules_path = NGEN::Strings::trim(modules_path);
|
||||
if (modules_path.empty()) {
|
||||
main_modules_path = default_modules_path;
|
||||
return main_modules_path;
|
||||
}
|
||||
|
||||
main_modules_path = modules_path;
|
||||
return modules_path;
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
NginxUtils::validateNginxConf(const string &nginx_conf_path)
|
||||
{
|
||||
dbgTrace(D_NGINX_MANAGER) << "Validating NGINX configuration file: " << nginx_conf_path;
|
||||
if (!NGEN::Filesystem::exists(nginx_conf_path)) return genError("Nginx configuration file does not exist");
|
||||
|
||||
string command = "nginx -t -c " + nginx_conf_path + " 2>&1";
|
||||
auto result = Singleton::Consume<I_ShellCmd>::by<NginxUtils>()->getExecOutputAndCode(command);
|
||||
if (!result.ok()) return genError(result.getErr());
|
||||
if (result.unpack().second != 0) return genError(result.unpack().first);
|
||||
|
||||
dbgTrace(D_NGINX_MANAGER) << "NGINX configuration file is valid";
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
NginxUtils::reloadNginx(const string &nginx_conf_path)
|
||||
{
|
||||
dbgTrace(D_NGINX_MANAGER) << "Applying and reloading new NGINX configuration file: " << nginx_conf_path;
|
||||
string main_nginx_conf_path = getMainNginxConfPath();
|
||||
|
||||
string backup_conf_path = main_nginx_conf_path + ".bak";
|
||||
if (
|
||||
NGEN::Filesystem::exists(main_nginx_conf_path)
|
||||
&& !NGEN::Filesystem::copyFile(main_nginx_conf_path, backup_conf_path, true)
|
||||
) {
|
||||
return genError("Could not create backup of NGINX configuration file");
|
||||
}
|
||||
|
||||
dbgTrace(D_NGINX_MANAGER) << "Copying new NGINX configuration file to: " << main_nginx_conf_path;
|
||||
if (!NGEN::Filesystem::copyFile(nginx_conf_path, main_nginx_conf_path, true)) {
|
||||
return genError("Could not copy new NGINX configuration file");
|
||||
}
|
||||
|
||||
string command = "nginx -s reload 2>&1";
|
||||
auto result = Singleton::Consume<I_ShellCmd>::by<NginxUtils>()->getExecOutputAndCode(command);
|
||||
if (!result.ok() || result.unpack().second != 0) {
|
||||
if (!NGEN::Filesystem::copyFile(backup_conf_path, main_nginx_conf_path, true)) {
|
||||
return genError("Could not restore backup of NGINX configuration file");
|
||||
}
|
||||
dbgTrace(D_NGINX_MANAGER) << "Successfully restored backup of NGINX configuration file";
|
||||
return result.ok() ? genError(result.unpack().first) : genError(result.getErr());
|
||||
}
|
||||
|
||||
dbgInfo(D_NGINX_MANAGER) << "Successfully reloaded NGINX configuration file";
|
||||
|
||||
return {};
|
||||
}
|
||||
@@ -46,7 +46,7 @@ panicCFmt(const string &func, uint line, const char *fmt, ...)
|
||||
{
|
||||
va_list va;
|
||||
va_start(va, fmt);
|
||||
Debug("PM", func, line).getStreamAggr() << CFmtPrinter(fmt, va);
|
||||
Debug("PM", func, line, true).getStreamAggr() << CFmtPrinter(fmt, va);
|
||||
va_end(va);
|
||||
}
|
||||
|
||||
|
||||
1
components/utils/utilities/CMakeLists.txt
Executable file
1
components/utils/utilities/CMakeLists.txt
Executable file
@@ -0,0 +1 @@
|
||||
add_subdirectory(nginx_conf_collector)
|
||||
37
components/utils/utilities/nginx_conf_collector/CMakeLists.txt
Executable file
37
components/utils/utilities/nginx_conf_collector/CMakeLists.txt
Executable file
@@ -0,0 +1,37 @@
|
||||
include_directories(${PROJECT_SOURCE_DIR}/core/include/)
|
||||
|
||||
link_directories(${Boost_LIBRARY_DIRS})
|
||||
link_directories(${ZLIB_ROOT}/lib)
|
||||
|
||||
link_directories(${ZLIB_ROOT}/lib)
|
||||
link_directories(${CMAKE_BINARY_DIR}/core)
|
||||
link_directories(${CMAKE_BINARY_DIR}/core/compression)
|
||||
|
||||
SET(EXECUTABLE_NAME "nginx_conf_collector_bin")
|
||||
add_executable(${EXECUTABLE_NAME} nginx_conf_collector.cc)
|
||||
target_compile_definitions(${EXECUTABLE_NAME} PRIVATE "NGINX_CONF_COLLECTOR_VERSION=\"$ENV{CI_PIPELINE_ID}\"")
|
||||
|
||||
target_link_libraries(${EXECUTABLE_NAME}
|
||||
shell_cmd
|
||||
mainloop
|
||||
messaging
|
||||
event_is
|
||||
metric
|
||||
compression_utils
|
||||
z
|
||||
nginx_utils
|
||||
time_proxy
|
||||
debug_is
|
||||
version
|
||||
report
|
||||
config
|
||||
environment
|
||||
singleton
|
||||
rest
|
||||
boost_context
|
||||
boost_regex
|
||||
pthread
|
||||
)
|
||||
|
||||
install(TARGETS ${EXECUTABLE_NAME} DESTINATION bin)
|
||||
install(PROGRAMS ${EXECUTABLE_NAME} DESTINATION central_nginx_manager/bin RENAME cp-nano-nginx-conf-collector)
|
||||
148
components/utils/utilities/nginx_conf_collector/nginx_conf_collector.cc
Executable file
148
components/utils/utilities/nginx_conf_collector/nginx_conf_collector.cc
Executable file
@@ -0,0 +1,148 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <iostream>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "agent_core_utilities.h"
|
||||
#include "debug.h"
|
||||
#include "internal/shell_cmd.h"
|
||||
#include "mainloop.h"
|
||||
#include "nginx_utils.h"
|
||||
#include "time_proxy.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
USE_DEBUG_FLAG(D_NGINX_MANAGER);
|
||||
|
||||
class MainComponent
|
||||
{
|
||||
public:
|
||||
MainComponent()
|
||||
{
|
||||
time_proxy.init();
|
||||
environment.init();
|
||||
mainloop.init();
|
||||
shell_cmd.init();
|
||||
}
|
||||
|
||||
~MainComponent()
|
||||
{
|
||||
shell_cmd.fini();
|
||||
mainloop.fini();
|
||||
environment.fini();
|
||||
time_proxy.fini();
|
||||
}
|
||||
private:
|
||||
ShellCmd shell_cmd;
|
||||
MainloopComponent mainloop;
|
||||
Environment environment;
|
||||
TimeProxyComponent time_proxy;
|
||||
};
|
||||
|
||||
void
|
||||
printVersion()
|
||||
{
|
||||
#ifdef NGINX_CONF_COLLECTOR_VERSION
|
||||
cout << "Check Point NGINX configuration collector version: " << NGINX_CONF_COLLECTOR_VERSION << '\n';
|
||||
#else
|
||||
cout << "Check Point NGINX configuration collector version: Private" << '\n';
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
printUsage(const char *prog_name)
|
||||
{
|
||||
cout << "Usage: " << prog_name << " [-v] [-i /path/to/nginx.conf] [-o /path/to/output.conf]" << '\n';
|
||||
cout << " -V Print version" << '\n';
|
||||
cout << " -v Enable verbose output" << '\n';
|
||||
cout << " -i input_file Specify input file (default is /etc/nginx/nginx.conf)" << '\n';
|
||||
cout << " -o output_file Specify output file (default is ./full_nginx.conf)" << '\n';
|
||||
cout << " -h Print this help message" << '\n';
|
||||
}
|
||||
|
||||
int
|
||||
main(int argc, char *argv[])
|
||||
{
|
||||
string nginx_input_file = "/etc/nginx/nginx.conf";
|
||||
string nginx_output_file = "full_nginx.conf";
|
||||
|
||||
int opt;
|
||||
while ((opt = getopt(argc, argv, "Vvhi:o:h")) != -1) {
|
||||
switch (opt) {
|
||||
case 'V':
|
||||
printVersion();
|
||||
return 0;
|
||||
case 'v':
|
||||
Debug::setUnitTestFlag(D_NGINX_MANAGER, Debug::DebugLevel::TRACE);
|
||||
break;
|
||||
case 'i':
|
||||
nginx_input_file = optarg;
|
||||
break;
|
||||
case 'o':
|
||||
nginx_output_file = optarg;
|
||||
break;
|
||||
case 'h':
|
||||
printUsage(argv[0]);
|
||||
return 0;
|
||||
default:
|
||||
printUsage(argv[0]);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = optind; i < argc;) {
|
||||
cerr << "Unknown argument: " << argv[i] << '\n';
|
||||
printUsage(argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
dbgTrace(D_NGINX_MANAGER) << "Starting nginx configuration collector";
|
||||
|
||||
MainComponent main_component;
|
||||
auto validation_result = NginxUtils::validateNginxConf(nginx_input_file);
|
||||
if (!validation_result.ok()) {
|
||||
cerr
|
||||
<< "Could not validate nginx configuration file: "
|
||||
<< nginx_input_file
|
||||
<< '\n'
|
||||
<< validation_result.getErr();
|
||||
return 1;
|
||||
}
|
||||
|
||||
NginxConfCollector nginx_collector(nginx_input_file, nginx_output_file);
|
||||
auto result = nginx_collector.generateFullNginxConf();
|
||||
if (!result.ok()) {
|
||||
cerr << "Could not generate full nginx configuration file, error: " << result.getErr() << '\n';
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (result.unpack().empty() || !NGEN::Filesystem::exists(result.unpack())) {
|
||||
cerr << "Generated nginx configuration file does not exist: " << result.unpack() << '\n';
|
||||
return 1;
|
||||
}
|
||||
|
||||
validation_result = NginxUtils::validateNginxConf(result.unpack());
|
||||
if (!validation_result.ok()) {
|
||||
cerr
|
||||
<< "Could not validate generated nginx configuration file: "
|
||||
<< nginx_output_file
|
||||
<< '\n'
|
||||
<< validation_result.getErr();
|
||||
return 1;
|
||||
}
|
||||
|
||||
cout << "Full nginx configuration file was successfully generated: " << result.unpack() << '\n';
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
Enter file contents hereapiVersion: apiextensions.k8s.io/v1
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata :
|
||||
name : customresponses.openappsec.io
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user