mirror of
https://github.com/openappsec/openappsec.git
synced 2025-11-16 01:12:18 +03:00
Compare commits
11 Commits
orianelou-
...
1.1.14
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
57ea5c72c5 | ||
|
|
962bd31d46 | ||
|
|
01770475ec | ||
|
|
78b114a274 | ||
|
|
81b1aec487 | ||
|
|
fd5d093b24 | ||
|
|
d6debf8d8d | ||
|
|
395b754575 | ||
|
|
dc000372c4 | ||
|
|
941c641174 | ||
|
|
fdc148aa9b |
@@ -1,7 +1,7 @@
|
||||
cmake_minimum_required (VERSION 2.8.4)
|
||||
project (ngen)
|
||||
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall -Wno-terminate")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2 -fPIC -Wall -Wno-terminate")
|
||||
|
||||
execute_process(COMMAND grep -c "Alpine Linux" /etc/os-release OUTPUT_VARIABLE IS_ALPINE)
|
||||
if(NOT IS_ALPINE EQUAL "0")
|
||||
|
||||
@@ -11,6 +11,7 @@ var_fog_address=
|
||||
var_proxy=
|
||||
var_mode=
|
||||
var_token=
|
||||
var_ignore=
|
||||
init=
|
||||
|
||||
if [ ! -f /nano-service-installers/$ORCHESTRATION_INSTALLATION_SCRIPT ]; then
|
||||
@@ -33,6 +34,8 @@ while true; do
|
||||
var_proxy="$1"
|
||||
elif [ "$1" == "--hybrid-mode" ] || [ "$1" == "--standalone" ]; then
|
||||
var_mode="--hybrid_mode"
|
||||
elif [ "$1" == "--no-upgrade" ]; then
|
||||
var_ignore="--ignore all"
|
||||
elif [ "$1" == "--token" ]; then
|
||||
shift
|
||||
var_token="$1"
|
||||
@@ -60,6 +63,9 @@ fi
|
||||
if [ ! -z $var_mode ]; then
|
||||
orchestration_service_installation_flags="$orchestration_service_installation_flags $var_mode"
|
||||
fi
|
||||
if [ ! -z "$var_ignore" ]; then
|
||||
orchestration_service_installation_flags="$orchestration_service_installation_flags $var_ignore"
|
||||
fi
|
||||
|
||||
|
||||
/nano-service-installers/$ORCHESTRATION_INSTALLATION_SCRIPT --install $orchestration_service_installation_flags
|
||||
|
||||
@@ -4,7 +4,6 @@ add_subdirectory(signal_handler)
|
||||
add_subdirectory(gradual_deployment)
|
||||
add_subdirectory(packet)
|
||||
add_subdirectory(pending_key)
|
||||
add_subdirectory(health_check_manager)
|
||||
|
||||
add_subdirectory(utils)
|
||||
add_subdirectory(attachment-intakers)
|
||||
|
||||
@@ -1698,7 +1698,7 @@ private:
|
||||
}
|
||||
};
|
||||
mainloop->addFileRoutine(
|
||||
I_MainLoop::RoutineType::RealTime,
|
||||
I_MainLoop::RoutineType::System,
|
||||
server_sock,
|
||||
accept_attachment_routine,
|
||||
"Nginx Attachment registration listener",
|
||||
|
||||
@@ -306,17 +306,20 @@ UsersAllIdentifiersConfig::parseXForwardedFor(const string &str) const
|
||||
void
|
||||
UsersAllIdentifiersConfig::setXFFValuesToOpaqueCtx(const HttpHeader &header, ExtractType type) const
|
||||
{
|
||||
auto i_transaction_table = Singleton::Consume<I_TableSpecific<SessionID>>::by<NginxAttachment>();
|
||||
if (!i_transaction_table || !i_transaction_table->hasState<NginxAttachmentOpaque>()) {
|
||||
dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "Can't get the transaction table";
|
||||
return;
|
||||
}
|
||||
NginxAttachmentOpaque &opaque = i_transaction_table->getState<NginxAttachmentOpaque>();
|
||||
opaque.setSavedData(HttpTransactionData::xff_vals_ctx, header.getValue());
|
||||
dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "xff found, value from header: " << static_cast<string>(header.getValue());
|
||||
auto value = parseXForwardedFor(header.getValue());
|
||||
if (!value.ok()) {
|
||||
dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "Could not extract source identifier from X-Forwarded-For header";
|
||||
return;
|
||||
};
|
||||
auto i_transaction_table = Singleton::Consume<I_TableSpecific<SessionID>>::by<NginxAttachment>();
|
||||
if (!i_transaction_table || !i_transaction_table->hasState<NginxAttachmentOpaque>()) {
|
||||
dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "Can't get the transaction table";
|
||||
return;
|
||||
}
|
||||
NginxAttachmentOpaque &opaque = i_transaction_table->getState<NginxAttachmentOpaque>();
|
||||
|
||||
if (type == ExtractType::SOURCEIDENTIFIER) {
|
||||
opaque.setSourceIdentifier(header.getKey(), value.unpack());
|
||||
dbgDebug(D_NGINX_ATTACHMENT_PARSER)
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
include_directories(${CMAKE_SOURCE_DIR}/components/include)
|
||||
link_directories(${BOOST_ROOT}/lib)
|
||||
|
||||
add_unit_test(
|
||||
health_check_manager_ut
|
||||
"health_check_manager_ut.cc"
|
||||
"singleton;messaging;mainloop;health_check_manager;event_is;metric;-lboost_regex"
|
||||
)
|
||||
@@ -15,7 +15,8 @@ class HttpGeoFilter
|
||||
public Component,
|
||||
Singleton::Consume<I_MainLoop>,
|
||||
Singleton::Consume<I_GeoLocation>,
|
||||
Singleton::Consume<I_GenericRulebase>
|
||||
Singleton::Consume<I_GenericRulebase>,
|
||||
Singleton::Consume<I_Environment>
|
||||
{
|
||||
public:
|
||||
HttpGeoFilter();
|
||||
|
||||
@@ -136,6 +136,7 @@ public:
|
||||
static const std::string req_body;
|
||||
static const std::string source_identifier;
|
||||
static const std::string proxy_ip_ctx;
|
||||
static const std::string xff_vals_ctx;
|
||||
|
||||
static const CompressionType default_response_content_encoding;
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@ public:
|
||||
virtual bool isReverseProxy() = 0;
|
||||
virtual bool isCloudStorageEnabled() = 0;
|
||||
virtual Maybe<std::tuple<std::string, std::string, std::string>> parseNginxMetadata() = 0;
|
||||
virtual Maybe<std::tuple<std::string, std::string, std::string, std::string, std::string>> readCloudMetadata() = 0;
|
||||
virtual std::map<std::string, std::string> getResolvedDetails() = 0;
|
||||
#if defined(gaia) || defined(smb)
|
||||
virtual bool compareCheckpointVersion(int cp_version, std::function<bool(int, int)> compare_operator) const = 0;
|
||||
|
||||
@@ -64,7 +64,7 @@ public:
|
||||
const std::string &service_id
|
||||
) = 0;
|
||||
|
||||
virtual std::map<std::string, PortNumber> getServiceToPortMap() = 0;
|
||||
virtual std::map<std::string, std::vector<PortNumber>> getServiceToPortMap() = 0;
|
||||
|
||||
protected:
|
||||
virtual ~I_ServiceController() {}
|
||||
|
||||
@@ -36,7 +36,6 @@ public:
|
||||
title,
|
||||
audience_team,
|
||||
obj,
|
||||
false,
|
||||
MessageCategory::GENERIC,
|
||||
std::forward<Args>(args)...
|
||||
)
|
||||
@@ -48,26 +47,6 @@ public:
|
||||
const std::string &title,
|
||||
const ReportIS::AudienceTeam &audience_team,
|
||||
const T &obj,
|
||||
bool is_async_message,
|
||||
Args ...args)
|
||||
:
|
||||
ReportMessaging(
|
||||
title,
|
||||
audience_team,
|
||||
obj,
|
||||
is_async_message,
|
||||
MessageCategory::GENERIC,
|
||||
std::forward<Args>(args)...
|
||||
)
|
||||
{
|
||||
}
|
||||
|
||||
template <typename ...Args, typename T>
|
||||
ReportMessaging(
|
||||
const std::string &title,
|
||||
const ReportIS::AudienceTeam &audience_team,
|
||||
const T &obj,
|
||||
bool is_async_message,
|
||||
const MessageCategory &message_type,
|
||||
Args ...args)
|
||||
:
|
||||
@@ -77,7 +56,6 @@ public:
|
||||
ReportIS::Severity::INFO,
|
||||
ReportIS::Priority::LOW,
|
||||
obj,
|
||||
is_async_message,
|
||||
message_type,
|
||||
std::forward<Args>(args)...
|
||||
)
|
||||
@@ -99,7 +77,6 @@ public:
|
||||
severity,
|
||||
priority,
|
||||
obj,
|
||||
false,
|
||||
MessageCategory::GENERIC,
|
||||
std::forward<Args>(args)...
|
||||
)
|
||||
@@ -114,7 +91,6 @@ public:
|
||||
const ReportIS::Severity &severity,
|
||||
const ReportIS::Priority &priority,
|
||||
const T &obj,
|
||||
bool _is_async_message,
|
||||
const MessageCategory &message_type,
|
||||
Args ...args)
|
||||
:
|
||||
@@ -131,7 +107,6 @@ public:
|
||||
std::chrono::seconds(0),
|
||||
std::forward<Args>(args)...
|
||||
),
|
||||
is_async_message(_is_async_message),
|
||||
message_type_tag(message_type)
|
||||
{
|
||||
report << LogField("eventObject", obj);
|
||||
@@ -141,11 +116,13 @@ public:
|
||||
|
||||
ReportMessaging & operator<<(const LogField &field);
|
||||
|
||||
Maybe<void, HTTPResponse> sendReportSynchronously();
|
||||
|
||||
void setForceBuffering(bool _force_buffering);
|
||||
|
||||
private:
|
||||
Report report;
|
||||
bool is_async_message;
|
||||
bool is_async_message = true;
|
||||
bool force_buffering = false;
|
||||
MessageCategory message_type_tag;
|
||||
};
|
||||
|
||||
@@ -24,6 +24,7 @@ static const string url = "/api/v1/agents/events";
|
||||
ReportMessaging::~ReportMessaging()
|
||||
{
|
||||
if (!Singleton::exists<I_Messaging>()) return;
|
||||
if (!is_async_message) return;
|
||||
|
||||
LogRest log_rest(report);
|
||||
|
||||
@@ -47,6 +48,25 @@ ReportMessaging::operator<<(const LogField &field)
|
||||
return *this;
|
||||
}
|
||||
|
||||
class LogRestWithReply : public LogRest
|
||||
{
|
||||
public:
|
||||
LogRestWithReply(const Report &report) : LogRest(report) {}
|
||||
|
||||
bool loadJson(const string &) const { return true; }
|
||||
};
|
||||
|
||||
Maybe<void, HTTPResponse>
|
||||
ReportMessaging::sendReportSynchronously()
|
||||
{
|
||||
is_async_message = false;
|
||||
|
||||
LogRestWithReply log_rest(report);
|
||||
|
||||
auto messaging = Singleton::Consume<I_Messaging>::by<ReportMessaging>();
|
||||
return messaging->sendSyncMessage(HTTPMethod::POST, url, log_rest, message_type_tag);
|
||||
}
|
||||
|
||||
void
|
||||
ReportMessaging::setForceBuffering(bool _force_buffering)
|
||||
{
|
||||
|
||||
@@ -103,7 +103,48 @@ TEST_F(ReportMessagingTest, title_only)
|
||||
_
|
||||
)
|
||||
).Times(1);
|
||||
ReportMessaging("test", ReportIS::AudienceTeam::AGENT_CORE, 1, true, ReportIS::Tags::ACCESS_CONTROL);
|
||||
ReportMessaging("test", ReportIS::AudienceTeam::AGENT_CORE, 1, ReportIS::Tags::ACCESS_CONTROL);
|
||||
}
|
||||
|
||||
TEST_F(ReportMessagingTest, sync_sending)
|
||||
{
|
||||
EXPECT_CALL(
|
||||
mock_messaging,
|
||||
sendSyncMessage(
|
||||
_,
|
||||
_,
|
||||
"{\n"
|
||||
" \"log\": {\n"
|
||||
" \"eventTime\": \"Best Time ever\",\n"
|
||||
" \"eventName\": \"test\",\n"
|
||||
" \"eventSeverity\": \"Info\",\n"
|
||||
" \"eventPriority\": \"Low\",\n"
|
||||
" \"eventType\": \"Event Driven\",\n"
|
||||
" \"eventLevel\": \"Log\",\n"
|
||||
" \"eventLogLevel\": \"info\",\n"
|
||||
" \"eventAudience\": \"Internal\",\n"
|
||||
" \"eventAudienceTeam\": \"Agent Core\",\n"
|
||||
" \"eventFrequency\": 0,\n"
|
||||
" \"eventTags\": [\n"
|
||||
" \"Access Control\"\n"
|
||||
" ],\n"
|
||||
" \"eventSource\": {\n"
|
||||
" \"eventTraceId\": \"\",\n"
|
||||
" \"eventSpanId\": \"\",\n"
|
||||
" \"issuingEngineVersion\": \"\",\n"
|
||||
" \"serviceName\": \"Unnamed Nano Service\"\n"
|
||||
" },\n"
|
||||
" \"eventData\": {\n"
|
||||
" \"eventObject\": 1\n"
|
||||
" }\n"
|
||||
" }\n"
|
||||
"}",
|
||||
_,
|
||||
_
|
||||
)
|
||||
).WillOnce(Return(HTTPResponse(HTTPStatusCode::HTTP_OK, "response!!")));
|
||||
ReportMessaging report("test", ReportIS::AudienceTeam::AGENT_CORE, 1, ReportIS::Tags::ACCESS_CONTROL);
|
||||
EXPECT_TRUE(report.sendReportSynchronously().ok());
|
||||
}
|
||||
|
||||
TEST_F(ReportMessagingTest, with_buffering)
|
||||
@@ -144,7 +185,7 @@ TEST_F(ReportMessagingTest, with_buffering)
|
||||
true
|
||||
)
|
||||
).Times(1);
|
||||
ReportMessaging report("test", ReportIS::AudienceTeam::AGENT_CORE, 1, true, ReportIS::Tags::ACCESS_CONTROL);
|
||||
ReportMessaging report("test", ReportIS::AudienceTeam::AGENT_CORE, 1, ReportIS::Tags::ACCESS_CONTROL);
|
||||
report.setForceBuffering(true);
|
||||
}
|
||||
|
||||
|
||||
@@ -1 +1,5 @@
|
||||
include_directories(../waap/include)
|
||||
include_directories(../waap/waap_clib)
|
||||
include_directories(../../attachment-intakers/nginx_attachment)
|
||||
|
||||
add_library(http_geo_filter http_geo_filter.cc)
|
||||
|
||||
@@ -4,10 +4,16 @@
|
||||
#include <unistd.h>
|
||||
#include <stddef.h>
|
||||
#include <algorithm>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
|
||||
#include "cidrs_data.h"
|
||||
#include "generic_rulebase/generic_rulebase.h"
|
||||
#include "generic_rulebase/parameters_config.h"
|
||||
#include "generic_rulebase/triggers_config.h"
|
||||
#include "user_identifiers_config.h"
|
||||
#include "debug.h"
|
||||
#include "config.h"
|
||||
#include "rest.h"
|
||||
@@ -21,9 +27,10 @@ USE_DEBUG_FLAG(D_GEO_FILTER);
|
||||
|
||||
static const LogTriggerConf default_triger;
|
||||
|
||||
class HttpGeoFilter::Impl : public Listener<NewHttpTransactionEvent>
|
||||
class HttpGeoFilter::Impl : public Listener<HttpRequestHeaderEvent>
|
||||
{
|
||||
public:
|
||||
|
||||
void
|
||||
init()
|
||||
{
|
||||
@@ -55,32 +62,42 @@ public:
|
||||
}
|
||||
|
||||
EventVerdict
|
||||
respond(const NewHttpTransactionEvent &event) override
|
||||
respond(const HttpRequestHeaderEvent &event) override
|
||||
{
|
||||
dbgTrace(D_GEO_FILTER) << getListenerName() << " new transaction event";
|
||||
|
||||
if (!ParameterException::isGeoLocationExceptionExists() &&
|
||||
!getConfiguration<GeoConfig>("rulebase", "httpGeoFilter").ok()
|
||||
) {
|
||||
dbgTrace(D_GEO_FILTER) << "No geo location practice nor exception was found. Returning default verdict";
|
||||
if (!event.isLastHeader()) return EventVerdict(ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT);
|
||||
std::set<std::string> xff_set;
|
||||
auto env = Singleton::Consume<I_Environment>::by<HttpGeoFilter>();
|
||||
auto maybe_xff = env->get<std::string>(HttpTransactionData::xff_vals_ctx);
|
||||
if (!maybe_xff.ok()) {
|
||||
dbgTrace(D_GEO_FILTER) << "failed to get xff vals from env";
|
||||
} else {
|
||||
xff_set = split(maybe_xff.unpack(), ',');
|
||||
}
|
||||
dbgDebug(D_GEO_FILTER) << getListenerName() << " last header, start lookup";
|
||||
|
||||
if (xff_set.size() > 0) {
|
||||
removeTrustedIpsFromXff(xff_set);
|
||||
} else {
|
||||
dbgDebug(D_GEO_FILTER) << "xff not found in headers";
|
||||
}
|
||||
|
||||
auto maybe_source_ip = env->get<IPAddr>(HttpTransactionData::client_ip_ctx);
|
||||
if (!maybe_source_ip.ok()) {
|
||||
dbgWarning(D_GEO_FILTER) << "failed to get source ip from env";
|
||||
return EventVerdict(default_action);
|
||||
}
|
||||
|
||||
I_GeoLocation *i_geo_location = Singleton::Consume<I_GeoLocation>::by<HttpGeoFilter>();
|
||||
auto asset_location = i_geo_location->lookupLocation(event.getSourceIP());
|
||||
if (!asset_location.ok()) {
|
||||
dbgTrace(D_GEO_FILTER) << "Lookup location failed, Error: " << asset_location.getErr();
|
||||
return EventVerdict(default_action);
|
||||
}
|
||||
auto source_ip = convertIpAddrToString(maybe_source_ip.unpack());
|
||||
xff_set.insert(source_ip);
|
||||
|
||||
EnumArray<I_GeoLocation::GeoLocationField, std::string> geo_location_data = asset_location.unpack();
|
||||
|
||||
ngx_http_cp_verdict_e exception_verdict = getExceptionVerdict(event, geo_location_data);
|
||||
ngx_http_cp_verdict_e exception_verdict = getExceptionVerdict(xff_set);
|
||||
if (exception_verdict != ngx_http_cp_verdict_e::TRAFFIC_VERDICT_IRRELEVANT) {
|
||||
return EventVerdict(exception_verdict);
|
||||
}
|
||||
|
||||
ngx_http_cp_verdict_e geo_lookup_verdict = getGeoLookupVerdict(event, geo_location_data);
|
||||
ngx_http_cp_verdict_e geo_lookup_verdict = getGeoLookupVerdict(xff_set);
|
||||
if (geo_lookup_verdict != ngx_http_cp_verdict_e::TRAFFIC_VERDICT_IRRELEVANT) {
|
||||
return EventVerdict(geo_lookup_verdict);
|
||||
}
|
||||
@@ -88,6 +105,73 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
std::set<std::string>
|
||||
split(const std::string& s, char delim) {
|
||||
std::set<std::string> elems;
|
||||
std::stringstream ss(s);
|
||||
std::string value;
|
||||
while (std::getline(ss, value, delim)) {
|
||||
elems.insert(trim(value));
|
||||
}
|
||||
return elems;
|
||||
}
|
||||
|
||||
static inline std::string <rim(std::string &s) {
|
||||
s.erase(s.begin(), std::find_if(s.begin(), s.end(),
|
||||
[] (char c) { return !std::isspace(c); }));
|
||||
return s;
|
||||
}
|
||||
|
||||
// trim from end
|
||||
static inline std::string &rtrim(std::string &s) {
|
||||
s.erase(std::find_if(s.rbegin(), s.rend(),
|
||||
[] (char c) { return !std::isspace(c); }).base(), s.end());
|
||||
return s;
|
||||
}
|
||||
|
||||
// trim from both ends
|
||||
static inline std::string &trim(std::string &s) {
|
||||
return ltrim(rtrim(s));
|
||||
}
|
||||
|
||||
void
|
||||
removeTrustedIpsFromXff(std::set<std::string> &xff_set)
|
||||
{
|
||||
auto identify_config = getConfiguration<UsersAllIdentifiersConfig>(
|
||||
"rulebase",
|
||||
"usersIdentifiers"
|
||||
);
|
||||
if (!identify_config.ok()) {
|
||||
dbgDebug(D_GEO_FILTER) << "did not find users identifiers definition in policy";
|
||||
} else {
|
||||
auto trusted_ips = (*identify_config).getHeaderValuesFromConfig("x-forwarded-for");
|
||||
for (auto it = xff_set.begin(); it != xff_set.end();) {
|
||||
if (isIpTrusted(*it, trusted_ips)) {
|
||||
dbgTrace(D_GEO_FILTER) << "xff value is in trusted ips: " << *it;
|
||||
it = xff_set.erase(it);
|
||||
} else {
|
||||
dbgTrace(D_GEO_FILTER) << "xff value is not in trusted ips: " << *it;
|
||||
++it;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
isIpTrusted(const string &ip, const vector<string> &trusted_ips)
|
||||
{
|
||||
for (const auto &trusted_ip : trusted_ips) {
|
||||
CIDRSData cidr_data(trusted_ip);
|
||||
if (
|
||||
ip == trusted_ip ||
|
||||
(cidr_data.contains(ip))
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
string
|
||||
convertIpAddrToString(const IPAddr &ip_to_convert)
|
||||
{
|
||||
@@ -117,54 +201,75 @@ private:
|
||||
}
|
||||
|
||||
ngx_http_cp_verdict_e
|
||||
getGeoLookupVerdict(
|
||||
const NewHttpTransactionEvent &event,
|
||||
const EnumArray<I_GeoLocation::GeoLocationField, std::string> &geo_location_data)
|
||||
getGeoLookupVerdict(const std::set<std::string> &sources)
|
||||
{
|
||||
auto maybe_geo_config = getConfiguration<GeoConfig>("rulebase", "httpGeoFilter");
|
||||
if (!maybe_geo_config.ok()) {
|
||||
dbgWarning(D_GEO_FILTER) << "Failed to load HTTP Geo Filter config. Error:" << maybe_geo_config.getErr();
|
||||
dbgTrace(D_GEO_FILTER) << "Failed to load HTTP Geo Filter config. Error:" << maybe_geo_config.getErr();
|
||||
return ngx_http_cp_verdict_e::TRAFFIC_VERDICT_IRRELEVANT;
|
||||
}
|
||||
GeoConfig geo_config = maybe_geo_config.unpack();
|
||||
string country_code = geo_location_data[I_GeoLocation::GeoLocationField::COUNTRY_CODE];
|
||||
EnumArray<I_GeoLocation::GeoLocationField, std::string> geo_location_data;
|
||||
I_GeoLocation *i_geo_location = Singleton::Consume<I_GeoLocation>::by<HttpGeoFilter>();
|
||||
|
||||
if (geo_config.isAllowedCountry(country_code)) {
|
||||
dbgTrace(D_GEO_FILTER)
|
||||
<< "geo verdict ACCEPT, practice id: "
|
||||
<< geo_config.getId()
|
||||
<< ", country code: "
|
||||
<< country_code;
|
||||
generateVerdictLog(
|
||||
ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT,
|
||||
event,
|
||||
geo_config.getId(),
|
||||
true,
|
||||
geo_location_data
|
||||
);
|
||||
return ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT;
|
||||
}
|
||||
if (geo_config.isBlockedCountry(country_code)) {
|
||||
dbgTrace(D_GEO_FILTER)
|
||||
<< "geo verdict DROP, practice id: "
|
||||
<< geo_config.getId()
|
||||
<< ", country code: "
|
||||
<< country_code;
|
||||
generateVerdictLog(
|
||||
ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP,
|
||||
event,
|
||||
geo_config.getId(),
|
||||
true,
|
||||
geo_location_data
|
||||
);
|
||||
return ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP;
|
||||
for (const std::string& source : sources) {
|
||||
Maybe<IPAddr> maybe_source_ip = IPAddr::createIPAddr(source);
|
||||
if (!maybe_source_ip.ok()){
|
||||
dbgWarning(D_GEO_FILTER) <<
|
||||
"create ip address failed for source: " <<
|
||||
source <<
|
||||
", Error: " <<
|
||||
maybe_source_ip.getErr();
|
||||
continue;
|
||||
}
|
||||
auto asset_location = i_geo_location->lookupLocation(maybe_source_ip.unpack());
|
||||
if (!asset_location.ok()) {
|
||||
dbgWarning(D_GEO_FILTER) <<
|
||||
"Lookup location failed for source: " <<
|
||||
source <<
|
||||
", Error: " <<
|
||||
asset_location.getErr();
|
||||
continue;
|
||||
}
|
||||
|
||||
geo_location_data = asset_location.unpack();
|
||||
|
||||
string country_code = geo_location_data[I_GeoLocation::GeoLocationField::COUNTRY_CODE];
|
||||
|
||||
if (geo_config.isAllowedCountry(country_code)) {
|
||||
dbgTrace(D_GEO_FILTER)
|
||||
<< "geo verdict ACCEPT, practice id: "
|
||||
<< geo_config.getId()
|
||||
<< ", country code: "
|
||||
<< country_code;
|
||||
generateVerdictLog(
|
||||
ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT,
|
||||
geo_config.getId(),
|
||||
true,
|
||||
geo_location_data
|
||||
);
|
||||
return ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT;
|
||||
}
|
||||
if (geo_config.isBlockedCountry(country_code)) {
|
||||
dbgTrace(D_GEO_FILTER)
|
||||
<< "geo verdict DROP, practice id: "
|
||||
<< geo_config.getId()
|
||||
<< ", country code: "
|
||||
<< country_code;
|
||||
generateVerdictLog(
|
||||
ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP,
|
||||
geo_config.getId(),
|
||||
true,
|
||||
geo_location_data
|
||||
);
|
||||
return ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP;
|
||||
}
|
||||
}
|
||||
dbgTrace(D_GEO_FILTER)
|
||||
<< "No matched practice. Returned default action: "
|
||||
<< geo_config.getDefaultAction();
|
||||
generateVerdictLog(
|
||||
convertActionToVerdict(geo_config.getDefaultAction()),
|
||||
event,
|
||||
geo_config.getId(),
|
||||
true,
|
||||
geo_location_data,
|
||||
@@ -176,7 +281,6 @@ private:
|
||||
Maybe<pair<ngx_http_cp_verdict_e, string>>
|
||||
getBehaviorsVerdict(
|
||||
const unordered_map<string, set<string>> &behaviors_map_to_search,
|
||||
const NewHttpTransactionEvent &event,
|
||||
EnumArray<I_GeoLocation::GeoLocationField, std::string> geo_location_data)
|
||||
{
|
||||
bool is_matched = false;
|
||||
@@ -193,7 +297,6 @@ private:
|
||||
dbgTrace(D_GEO_FILTER) << "behavior verdict: DROP, exception id: " << behavior.getId();
|
||||
generateVerdictLog(
|
||||
matched_verdict,
|
||||
event,
|
||||
behavior.getId(),
|
||||
false,
|
||||
geo_location_data
|
||||
@@ -218,63 +321,74 @@ private:
|
||||
}
|
||||
|
||||
ngx_http_cp_verdict_e
|
||||
getExceptionVerdict(
|
||||
const NewHttpTransactionEvent &event,
|
||||
EnumArray<I_GeoLocation::GeoLocationField, std::string> geo_location_data
|
||||
){
|
||||
string country_code = geo_location_data[I_GeoLocation::GeoLocationField::COUNTRY_CODE];
|
||||
string country_name = geo_location_data[I_GeoLocation::GeoLocationField::COUNTRY_NAME];
|
||||
string source_ip = convertIpAddrToString(event.getSourceIP());
|
||||
getExceptionVerdict(const std::set<std::string> &sources) {
|
||||
|
||||
pair<ngx_http_cp_verdict_e, string> curr_matched_behavior;
|
||||
ngx_http_cp_verdict_e verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_IRRELEVANT;
|
||||
I_GeoLocation *i_geo_location = Singleton::Consume<I_GeoLocation>::by<HttpGeoFilter>();
|
||||
EnumArray<I_GeoLocation::GeoLocationField, std::string> geo_location_data;
|
||||
|
||||
dbgTrace(D_GEO_FILTER)
|
||||
for (const std::string& source : sources) {
|
||||
|
||||
Maybe<IPAddr> maybe_source_ip = IPAddr::createIPAddr(source);
|
||||
if (!maybe_source_ip.ok()){
|
||||
dbgWarning(D_GEO_FILTER) <<
|
||||
"create ip address failed for source: " <<
|
||||
source <<
|
||||
", Error: " <<
|
||||
maybe_source_ip.getErr();
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
auto asset_location = i_geo_location->lookupLocation(maybe_source_ip.unpack());
|
||||
if (!asset_location.ok()) {
|
||||
dbgWarning(D_GEO_FILTER) << "Lookup location failed for source: " <<
|
||||
source <<
|
||||
", Error: " <<
|
||||
asset_location.getErr();
|
||||
continue;
|
||||
}
|
||||
geo_location_data = asset_location.unpack();
|
||||
string country_code = geo_location_data[I_GeoLocation::GeoLocationField::COUNTRY_CODE];
|
||||
string country_name = geo_location_data[I_GeoLocation::GeoLocationField::COUNTRY_NAME];
|
||||
dbgTrace(D_GEO_FILTER)
|
||||
<< "Get exception verdict. "
|
||||
<< "country code: "
|
||||
<< country_code
|
||||
<< ", country name: "
|
||||
<< country_name
|
||||
<< ", source ip address: "
|
||||
<< source_ip;
|
||||
<< source;
|
||||
|
||||
unordered_map<string, set<string>> exception_value_source_ip = {{"sourceIP", {source_ip}}};
|
||||
auto matched_behavior_maybe = getBehaviorsVerdict(exception_value_source_ip, event, geo_location_data);
|
||||
if (matched_behavior_maybe.ok()) {
|
||||
curr_matched_behavior = matched_behavior_maybe.unpack();
|
||||
verdict = curr_matched_behavior.first;
|
||||
if (verdict == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP) {
|
||||
return verdict;
|
||||
unordered_map<string, set<string>> exception_value_country_code = {
|
||||
{"countryCode", {country_code}}
|
||||
};
|
||||
auto matched_behavior_maybe = getBehaviorsVerdict(exception_value_country_code, geo_location_data);
|
||||
if (matched_behavior_maybe.ok()) {
|
||||
curr_matched_behavior = matched_behavior_maybe.unpack();
|
||||
verdict = curr_matched_behavior.first;
|
||||
if (verdict == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP) {
|
||||
return verdict;
|
||||
}
|
||||
}
|
||||
|
||||
unordered_map<string, set<string>> exception_value_country_name = {
|
||||
{"countryName", {country_name}}
|
||||
};
|
||||
matched_behavior_maybe = getBehaviorsVerdict(exception_value_country_name, geo_location_data);
|
||||
if (matched_behavior_maybe.ok()) {
|
||||
curr_matched_behavior = matched_behavior_maybe.unpack();
|
||||
verdict = curr_matched_behavior.first;
|
||||
if (verdict == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP) {
|
||||
return verdict;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unordered_map<string, set<string>> exception_value_country_code = {
|
||||
{"countryCode", {country_code}}
|
||||
};
|
||||
matched_behavior_maybe = getBehaviorsVerdict(exception_value_country_code, event, geo_location_data);
|
||||
if (matched_behavior_maybe.ok()) {
|
||||
curr_matched_behavior = matched_behavior_maybe.unpack();
|
||||
verdict = curr_matched_behavior.first;
|
||||
if (verdict == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP) {
|
||||
return verdict;
|
||||
}
|
||||
}
|
||||
|
||||
unordered_map<string, set<string>> exception_value_country_name = {
|
||||
{"countryName", {country_name}}
|
||||
};
|
||||
matched_behavior_maybe = getBehaviorsVerdict(exception_value_country_name, event, geo_location_data);
|
||||
if (matched_behavior_maybe.ok()) {
|
||||
curr_matched_behavior = matched_behavior_maybe.unpack();
|
||||
verdict = curr_matched_behavior.first;
|
||||
if (verdict == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP) {
|
||||
return verdict;
|
||||
}
|
||||
}
|
||||
if (verdict == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT) {
|
||||
generateVerdictLog(
|
||||
verdict,
|
||||
event,
|
||||
curr_matched_behavior.second,
|
||||
false,
|
||||
geo_location_data
|
||||
@@ -286,7 +400,6 @@ private:
|
||||
void
|
||||
generateVerdictLog(
|
||||
const ngx_http_cp_verdict_e &verdict,
|
||||
const NewHttpTransactionEvent &event,
|
||||
const string &matched_id,
|
||||
bool is_geo_filter,
|
||||
const EnumArray<I_GeoLocation::GeoLocationField, std::string> geo_location_data,
|
||||
@@ -307,14 +420,27 @@ private:
|
||||
LogField(matched_on, matched_id),
|
||||
ReportIS::Tags::HTTP_GEO_FILTER
|
||||
);
|
||||
log
|
||||
<< LogField("sourceIP", convertIpAddrToString(event.getSourceIP()))
|
||||
<< LogField("sourcePort", event.getSourcePort())
|
||||
<< LogField("hostName", event.getDestinationHost())
|
||||
<< LogField("httpMethod", event.getHttpMethod())
|
||||
<< LogField("securityAction", is_prevent ? "Prevent" : "Detect");
|
||||
auto env = Singleton::Consume<I_Environment>::by<HttpGeoFilter>();
|
||||
auto source_ip = env->get<IPAddr>(HttpTransactionData::client_ip_ctx);
|
||||
if (source_ip.ok()) log << LogField("sourceIP", convertIpAddrToString(source_ip.unpack()));
|
||||
|
||||
auto source_identifier = env->get<string>(HttpTransactionData::source_identifier);
|
||||
if (source_identifier.ok()) log << LogField("httpSourceId", source_identifier.unpack());
|
||||
|
||||
auto source_port = env->get<string>(HttpTransactionData::client_port_ctx);
|
||||
if (source_port.ok()) log << LogField("sourcePort", source_port.unpack());
|
||||
|
||||
auto host_name = env->get<string>(HttpTransactionData::host_name_ctx);
|
||||
if (host_name.ok()) log << LogField("hostName", host_name.unpack());
|
||||
|
||||
auto method = env->get<string>(HttpTransactionData::method_ctx);
|
||||
if (method.ok()) log << LogField("httpMethod", method.unpack());
|
||||
|
||||
log << LogField("securityAction", is_prevent ? "Prevent" : "Detect");
|
||||
|
||||
if (is_default_action) log << LogField("isDefaultSecurityAction", true);
|
||||
auto xff = env->get<string>(HttpTransactionData::xff_vals_ctx);
|
||||
if (xff.ok()) log << LogField("proxyIP", xff.unpack());
|
||||
|
||||
log
|
||||
<< LogField("sourceCountryCode", geo_location_data[I_GeoLocation::GeoLocationField::COUNTRY_CODE])
|
||||
|
||||
@@ -142,6 +142,13 @@ string disabled_settings =
|
||||
"}"
|
||||
"],\n";
|
||||
|
||||
|
||||
string local_intelligence =
|
||||
"\"intelligence\":{"
|
||||
" \"local intelligence server ip\":\"127.0.0.1\","
|
||||
" \"local intelligence server primary port\":9090"
|
||||
"}\n,";
|
||||
|
||||
string policy =
|
||||
"\"rulebase\": {"
|
||||
"\"usersIdentifiers\": ["
|
||||
@@ -259,7 +266,7 @@ Layer7AccessControlTest::verifyReport(
|
||||
|
||||
TEST_F(Layer7AccessControlTest, ReturnAcceptVerdict)
|
||||
{
|
||||
stringstream ss_conf(prevent_settings + policy);
|
||||
stringstream ss_conf(prevent_settings + local_intelligence + policy);
|
||||
Singleton::Consume<Config::I_Config>::from(config)->loadConfiguration(ss_conf);
|
||||
|
||||
string intelligence_response_ok = loadIntelligenceResponse("data/ok_intelligence_response.json");
|
||||
@@ -305,7 +312,7 @@ TEST_F(Layer7AccessControlTest, ReturnAcceptVerdict)
|
||||
|
||||
TEST_F(Layer7AccessControlTest, ReturnDropVerdictOnMaliciousReputation)
|
||||
{
|
||||
stringstream ss_conf(prevent_settings + policy);
|
||||
stringstream ss_conf(prevent_settings + local_intelligence + policy);
|
||||
Singleton::Consume<Config::I_Config>::from(config)->loadConfiguration(ss_conf);
|
||||
|
||||
string malicious_intelligence_response = loadIntelligenceResponse("data/malicious_intelligence_response.json");
|
||||
@@ -351,7 +358,7 @@ TEST_F(Layer7AccessControlTest, ReturnDropVerdictOnMaliciousReputation)
|
||||
|
||||
TEST_F(Layer7AccessControlTest, ReturnDropVerdictCacheBased)
|
||||
{
|
||||
stringstream ss_conf(prevent_settings + policy);
|
||||
stringstream ss_conf(prevent_settings + local_intelligence + policy);
|
||||
Singleton::Consume<Config::I_Config>::from(config)->loadConfiguration(ss_conf);
|
||||
|
||||
string malicious_intelligence_response = loadIntelligenceResponse("data/malicious_intelligence_response.json");
|
||||
@@ -403,7 +410,7 @@ TEST_F(Layer7AccessControlTest, ReturnDropVerdictCacheBased)
|
||||
|
||||
TEST_F(Layer7AccessControlTest, AcceptOnDetect)
|
||||
{
|
||||
stringstream ss_conf(detect_settings + policy);
|
||||
stringstream ss_conf(detect_settings + local_intelligence + policy);
|
||||
Singleton::Consume<Config::I_Config>::from(config)->loadConfiguration(ss_conf);
|
||||
|
||||
string malicious_intelligence_response = loadIntelligenceResponse("data/malicious_intelligence_response.json");
|
||||
@@ -449,7 +456,7 @@ TEST_F(Layer7AccessControlTest, AcceptOnDetect)
|
||||
|
||||
TEST_F(Layer7AccessControlTest, FallbackToSourceIPAndDrop)
|
||||
{
|
||||
stringstream ss_conf(prevent_settings + policy);
|
||||
stringstream ss_conf(prevent_settings + local_intelligence + policy);
|
||||
Singleton::Consume<Config::I_Config>::from(config)->loadConfiguration(ss_conf);
|
||||
|
||||
string malicious_intelligence_response = loadIntelligenceResponse("data/malicious_intelligence_response.json");
|
||||
|
||||
@@ -132,7 +132,7 @@ void
|
||||
NewLoggingService::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
parseAppsecJSONKey<string>("address", address, archive_in);
|
||||
parseAppsecJSONKey<string>("proto", proto, archive_in);
|
||||
parseAppsecJSONKey<string>("proto", proto, archive_in, "tcp");
|
||||
if (valid_protocols.count(proto) == 0) {
|
||||
dbgWarning(D_LOCAL_POLICY) << "AppSec Logging Service - proto invalid: " << proto;
|
||||
throw PolicyGenException("AppSec Logging Service - proto invalid: " + proto);
|
||||
|
||||
@@ -12,6 +12,8 @@ add_subdirectory(manifest_controller)
|
||||
add_subdirectory(update_communication)
|
||||
add_subdirectory(details_resolver)
|
||||
add_subdirectory(health_check)
|
||||
add_subdirectory(health_check_manager)
|
||||
add_subdirectory(updates_process_reporter)
|
||||
add_subdirectory(env_details)
|
||||
|
||||
#add_subdirectory(orchestration_ut)
|
||||
|
||||
@@ -45,6 +45,7 @@ public:
|
||||
bool isVersionAboveR8110() override;
|
||||
bool isReverseProxy() override;
|
||||
bool isCloudStorageEnabled() override;
|
||||
Maybe<tuple<string, string, string, string, string>> readCloudMetadata() override;
|
||||
Maybe<tuple<string, string, string>> parseNginxMetadata() override;
|
||||
#if defined(gaia) || defined(smb)
|
||||
bool compareCheckpointVersion(int cp_version, std::function<bool(int, int)> compare_operator) const override;
|
||||
@@ -151,6 +152,7 @@ DetailsResolver::Impl::isCloudStorageEnabled()
|
||||
bool
|
||||
DetailsResolver::Impl::isKernelVersion3OrHigher()
|
||||
{
|
||||
#if defined(gaia) || defined(smb)
|
||||
static const string cmd =
|
||||
"clish -c 'show version os kernel' | awk '{print $4}' "
|
||||
"| cut -d '.' -f 1 | awk -F: '{ if ( $1 >= 3 ) {print 1} else {print 0}}'";
|
||||
@@ -159,12 +161,14 @@ DetailsResolver::Impl::isKernelVersion3OrHigher()
|
||||
if (is_gogo.ok() && !is_gogo.unpack().empty()) {
|
||||
return is_gogo.unpack().front() == '1';
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
DetailsResolver::Impl::isGwNotVsx()
|
||||
{
|
||||
#if defined(gaia) || defined(smb)
|
||||
static const string is_gw_cmd = "cpprod_util FwIsFirewallModule";
|
||||
static const string is_vsx_cmd = "cpprod_util FWisVSX";
|
||||
auto is_gw = DetailsResolvingHanlder::getCommandOutput(is_gw_cmd);
|
||||
@@ -172,6 +176,7 @@ DetailsResolver::Impl::isGwNotVsx()
|
||||
if (is_gw.ok() && is_vsx.ok() && !is_gw.unpack().empty() && !is_vsx.unpack().empty()) {
|
||||
return is_gw.unpack().front() == '1' && is_vsx.unpack().front() == '0';
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -188,17 +193,16 @@ DetailsResolver::Impl::getCheckpointVersion() const
|
||||
{
|
||||
#ifdef gaia
|
||||
static const string cmd =
|
||||
"echo $CPDIR | awk -F'-' '{print $NF}' | cut -c 2- |"
|
||||
" awk -F'.' '{ if( NF == 1 ) {print $1\"00\"} else {print $1$2} }'";
|
||||
"echo $CPDIR | awk '{sub(/.*-R/,\"\"); sub(/\\/.*/,\"\")}/^[0-9]*$/{$0=$0\".00\"}{sub(/\\./, \"\"); print}'";
|
||||
#else // smb
|
||||
static const string cmd = "sqlcmd 'select major,minor from cpver' |"
|
||||
"awk '{if ($1 == \"major\") v += (substr($3,2) * 100);"
|
||||
" if ($1 == \"minor\") v += $3; } END { print v}'";
|
||||
|
||||
#endif // gaia
|
||||
auto version_out = DetailsResolvingHanlder::getCommandOutput(cmd);
|
||||
int cp_version = 0;
|
||||
if (version_out.ok()) {
|
||||
dbgTrace(D_ORCHESTRATOR) << "Identified version " << version_out.unpack();
|
||||
stringstream version_stream(version_out.unpack());
|
||||
version_stream >> cp_version;
|
||||
}
|
||||
@@ -300,6 +304,67 @@ DetailsResolver::Impl::parseNginxMetadata()
|
||||
return make_tuple(config_opt, cc_opt, nginx_version);
|
||||
}
|
||||
|
||||
Maybe<tuple<string, string, string, string, string>>
|
||||
DetailsResolver::Impl::readCloudMetadata()
|
||||
{
|
||||
auto env_read_cloud_metadata = []() -> Maybe<tuple<string, string, string, string, string>> {
|
||||
string account_id = getenv("CLOUD_ACCOUNT_ID") ? getenv("CLOUD_ACCOUNT_ID") : "";
|
||||
string vpc_id = getenv("CLOUD_VPC_ID") ? getenv("CLOUD_VPC_ID") : "";
|
||||
string instance_id = getenv("CLOUD_INSTANCE_ID") ? getenv("CLOUD_INSTANCE_ID") : "";
|
||||
string instance_local_ip = getenv("CLOUD_INSTANCE_LOCAL_IP") ? getenv("CLOUD_INSTANCE_LOCAL_IP") : "";
|
||||
string region = getenv("CLOUD_REGION") ? getenv("CLOUD_REGION") : "";
|
||||
|
||||
if (
|
||||
account_id.empty() ||
|
||||
vpc_id.empty() ||
|
||||
instance_id.empty() ||
|
||||
instance_local_ip.empty() ||
|
||||
region.empty()) {
|
||||
return genError("Could not read cloud metadata");
|
||||
}
|
||||
|
||||
return make_tuple(account_id, vpc_id, instance_id, instance_local_ip, region);
|
||||
};
|
||||
|
||||
auto cloud_metadata = env_read_cloud_metadata();
|
||||
if (!cloud_metadata.ok()) {
|
||||
const string cmd = getFilesystemPathConfig() + "/scripts/get-cloud-metadata.sh";
|
||||
dbgTrace(D_ORCHESTRATOR) << cloud_metadata.getErr() << ", trying to fetch it via cmd: " << cmd;
|
||||
|
||||
auto result = DetailsResolvingHanlder::getCommandOutput(cmd);
|
||||
if (result.ok()) {
|
||||
istringstream iss(result.unpack());
|
||||
string line;
|
||||
while (getline(iss, line)) {
|
||||
size_t pos = line.find('=');
|
||||
if (pos != string::npos) {
|
||||
string key = line.substr(0, pos);
|
||||
string value = line.substr(pos + 1);
|
||||
if (!key.empty() && !value.empty()) setenv(key.c_str(), value.c_str(), 1);
|
||||
}
|
||||
}
|
||||
cloud_metadata = env_read_cloud_metadata();
|
||||
} else {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Could not fetch cloud metadata from cmd: " << result.getErr();
|
||||
}
|
||||
}
|
||||
|
||||
if (!cloud_metadata.ok()) {
|
||||
dbgWarning(D_ORCHESTRATOR) << cloud_metadata.getErr();
|
||||
return genError("Failed to fetch cloud metadata");
|
||||
}
|
||||
|
||||
dbgTrace(D_ORCHESTRATOR)
|
||||
<< "Successfully fetched cloud metadata: "
|
||||
<< ::get<0>(cloud_metadata.unpack()) << ", "
|
||||
<< ::get<1>(cloud_metadata.unpack()) << ", "
|
||||
<< ::get<2>(cloud_metadata.unpack()) << ", "
|
||||
<< ::get<3>(cloud_metadata.unpack()) << ", "
|
||||
<< ::get<4>(cloud_metadata.unpack());
|
||||
|
||||
return cloud_metadata;
|
||||
}
|
||||
|
||||
DetailsResolver::DetailsResolver() : Component("DetailsResolver"), pimpl(make_unique<Impl>()) {}
|
||||
|
||||
DetailsResolver::~DetailsResolver() {}
|
||||
|
||||
@@ -15,21 +15,25 @@
|
||||
#define __CHECKPOINT_PRODUCT_HANDLERS_H__
|
||||
|
||||
#include <algorithm>
|
||||
#include <regex>
|
||||
#include <boost/regex.hpp>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
|
||||
#if defined(gaia)
|
||||
|
||||
Maybe<string>
|
||||
checkSAMLSupportedBlade(const string &command_output)
|
||||
{
|
||||
string supportedBlades[3] = {"identityServer", "vpn", "cvpn"};
|
||||
// uncomment when vpn will support SAML authentication
|
||||
// string supportedBlades[3] = {"identityServer", "vpn", "cvpn"};
|
||||
string supportedBlades[1] = {"identityServer"};
|
||||
for(const string &blade : supportedBlades) {
|
||||
if (command_output.find(blade) != string::npos) {
|
||||
return string("true");
|
||||
}
|
||||
}
|
||||
|
||||
return genError("Current host does not have SAML capability");
|
||||
return string("false");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
@@ -40,7 +44,7 @@ checkIDABlade(const string &command_output)
|
||||
return string("true");
|
||||
}
|
||||
|
||||
return genError("Current host does not have IDA installed");
|
||||
return string("false");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
@@ -50,23 +54,26 @@ checkSAMLPortal(const string &command_output)
|
||||
return string("true");
|
||||
}
|
||||
|
||||
return genError("Current host does not have SAML Portal configured");
|
||||
return string("false");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
checkPepIdaIdnStatus(const string &command_output)
|
||||
{
|
||||
if (command_output.find("ida_idn_nano_service_enabled=1") != string::npos) {
|
||||
if (command_output.find("nac_pep_scaled_sharing_enabled = 1") != string::npos) {
|
||||
return string("true");
|
||||
}
|
||||
|
||||
return genError("Current host does not have PEP control IDA IDN enabled");
|
||||
return string("false");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
getIDAGaiaPackages(const string &command_output)
|
||||
{
|
||||
return string("idaSaml_gaia;idaIdn_gaia;idaIdnBg_gaia;");
|
||||
string result = "idaSaml_gaia;idaIdn_gaia;idaIdnBg_gaia;";
|
||||
if (command_output.find("nac_pep_scaled_sharing_enabled = 1") != string::npos) {
|
||||
result += "agentIntelligenceService_gaia;";
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
@@ -82,7 +89,7 @@ checkIDP(shared_ptr<istream> file_stream)
|
||||
}
|
||||
}
|
||||
|
||||
return genError("Identity Provider was not found");
|
||||
return string("false");
|
||||
}
|
||||
|
||||
#endif // gaia
|
||||
@@ -324,6 +331,34 @@ getSmbGWIPSecVPNBlade(const string &command_output)
|
||||
{
|
||||
return getSmbBlade(command_output, "IPSec VPN Blade was not found");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
extractManagements(const string &command_output)
|
||||
{
|
||||
size_t start_pos = command_output.find(":masters(");
|
||||
if (start_pos == string::npos) {
|
||||
return genError("Starting pattern \":masters(\" not found.");
|
||||
}
|
||||
size_t end_pos = command_output.find("))):", start_pos);
|
||||
if (end_pos == string::npos) {
|
||||
return genError("Ending pattern \"))):\" not found.");
|
||||
}
|
||||
string input_string = command_output.substr(start_pos, end_pos - start_pos + 3);
|
||||
string json_output = "[";
|
||||
regex pattern("\\(ReferenceObject\\:Uid\\(\"\\{([\\w-]+)\\}\"\\)\\:Name\\(([^\\)]+)\\)\\:Table\\(([^\\)]+)\\)\\)");
|
||||
smatch matches;
|
||||
auto words_begin = sregex_iterator(input_string.begin(), input_string.end(), pattern);
|
||||
auto words_end = sregex_iterator();
|
||||
for (sregex_iterator i = words_begin; i != words_end; ++i) {
|
||||
const smatch& match = *i;
|
||||
string uid = boost::algorithm::to_lower_copy(match[1].str());
|
||||
string name = match[2].str();
|
||||
if (json_output.back() != '[') json_output += ",";
|
||||
json_output += "{\"Uid\":\"" + uid + "\",\"Name\":\"" + name + "\"}";
|
||||
}
|
||||
json_output += "]";
|
||||
return json_output;
|
||||
}
|
||||
#endif // gaia || smb
|
||||
|
||||
#if defined(gaia)
|
||||
|
||||
@@ -43,12 +43,15 @@ SHELL_PRE_CMD("gunzip local.cfg", "gunzip -c $FWDIR/state/local/FW1/local.cfg.gz
|
||||
#if defined(gaia) || defined(smb)
|
||||
SHELL_CMD_HANDLER("cpProductIntegrationMgmtObjectType", "cpprod_util CPPROD_IsMgmtMachine", getMgmtObjType)
|
||||
SHELL_CMD_HANDLER("prerequisitesForHorizonTelemetry",
|
||||
"[ -f /var/log/nano_agent/cp-nano-horizon-telemetry-prerequisites.log ] "
|
||||
"&& head -1 /var/log/nano_agent/cp-nano-horizon-telemetry-prerequisites.log || echo ''",
|
||||
"FS_PATH=<FILESYSTEM-PREFIX>; [ -f ${FS_PATH}/cp-nano-horizon-telemetry-prerequisites.log ] "
|
||||
"&& head -1 ${FS_PATH}/cp-nano-horizon-telemetry-prerequisites.log || echo ''",
|
||||
checkIsInstallHorizonTelemetrySucceeded)
|
||||
SHELL_CMD_HANDLER("QUID", "[ -d /opt/CPquid ] "
|
||||
"&& python3 /opt/CPquid/Quid_Api.py -i /opt/CPotelcol/quid_api/get_global_id.json | jq -r .message || echo ''",
|
||||
getQUID)
|
||||
SHELL_CMD_HANDLER("SMO_QUID", "[ -d /opt/CPquid ] "
|
||||
"&& python3 /opt/CPquid/Quid_Api.py -i /opt/CPotelcol/quid_api/get_smo_quid.json | jq -r .message || echo ''",
|
||||
getQUID)
|
||||
SHELL_CMD_HANDLER("hasSDWan", "[ -f $FWDIR/bin/sdwan_steering ] && echo '1' || echo '0'", checkHasSDWan)
|
||||
SHELL_CMD_HANDLER(
|
||||
"canUpdateSDWanData",
|
||||
@@ -99,8 +102,8 @@ SHELL_CMD_HANDLER(
|
||||
SHELL_CMD_HANDLER("hasSAMLSupportedBlade", "enabled_blades", checkSAMLSupportedBlade)
|
||||
SHELL_CMD_HANDLER("hasIDABlade", "enabled_blades", checkIDABlade)
|
||||
SHELL_CMD_HANDLER("hasSAMLPortal", "mpclient status nac", checkSAMLPortal)
|
||||
SHELL_CMD_HANDLER("hasIdaIdnEnabled", "pep control IDN_nano_Srv_support status", checkPepIdaIdnStatus)
|
||||
SHELL_CMD_HANDLER("requiredNanoServices", "ida_packages", getIDAGaiaPackages)
|
||||
SHELL_CMD_HANDLER("hasIdaIdnEnabled", "fw ctl get int nac_pep_scaled_sharing_enabled", checkPepIdaIdnStatus)
|
||||
SHELL_CMD_HANDLER("requiredNanoServices", "fw ctl get int nac_pep_scaled_sharing_enabled", getIDAGaiaPackages)
|
||||
SHELL_CMD_HANDLER(
|
||||
"cpProductIntegrationMgmtParentObjectName",
|
||||
"cat $FWDIR/database/myself_objects.C "
|
||||
@@ -149,6 +152,12 @@ SHELL_CMD_HANDLER(
|
||||
"| awk -F '[:()]' '/:masters/ {found=1; next} found && /:Name/ {print $3; exit}'",
|
||||
getSMCBasedMgmtName
|
||||
)
|
||||
SHELL_CMD_HANDLER(
|
||||
"managements",
|
||||
"sed -n '/:masters (/,$p' $FWDIR/database/myself_objects.C |"
|
||||
" sed -e ':a' -e 'N' -e '$!ba' -e 's/\\n//g' -e 's/\t//g' -e 's/ //g' | sed 's/))):.*/)))):/'",
|
||||
extractManagements
|
||||
)
|
||||
#endif //gaia
|
||||
|
||||
#if defined(smb)
|
||||
@@ -199,6 +208,13 @@ SHELL_CMD_HANDLER(
|
||||
"| awk -F '[:()]' '/:masters/ {found=1; next} found && /:Name/ {print $3; exit}'",
|
||||
getSMCBasedMgmtName
|
||||
)
|
||||
|
||||
SHELL_CMD_HANDLER(
|
||||
"managements",
|
||||
"sed -n '/:masters (/,$p' /tmp/local.cfg |"
|
||||
" sed -e ':a' -e 'N' -e '$!ba' -e 's/\\n//g' -e 's/\t//g' -e 's/ //g' | sed 's/))):.*/)))):/'",
|
||||
extractManagements
|
||||
)
|
||||
#endif//smb
|
||||
|
||||
SHELL_CMD_OUTPUT("kernel_version", "uname -r")
|
||||
|
||||
@@ -77,14 +77,22 @@ void
|
||||
DetailsResolvingHanlder::Impl::init()
|
||||
{
|
||||
string actual_filesystem_prefix = getFilesystemPathConfig();
|
||||
size_t place_holder_size = filesystem_place_holder.size();
|
||||
|
||||
for (auto &file_handler : file_content_handlers) {
|
||||
string &path = file_handler.second.first;
|
||||
size_t place_holder_size = filesystem_place_holder.size();
|
||||
if (path.substr(0, place_holder_size) == filesystem_place_holder) {
|
||||
path = actual_filesystem_prefix + path.substr(place_holder_size);
|
||||
}
|
||||
}
|
||||
|
||||
for (auto &cmd_handler_pair : shell_command_handlers) {
|
||||
string &cmd_str = cmd_handler_pair.second.first;
|
||||
size_t fs_pos = cmd_str.find(filesystem_place_holder);
|
||||
if (fs_pos != string::npos) {
|
||||
cmd_str.replace(fs_pos, place_holder_size, actual_filesystem_prefix);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
map<string, string>
|
||||
|
||||
@@ -246,7 +246,7 @@ private:
|
||||
}
|
||||
}
|
||||
routine_id = i_mainloop->addFileRoutine(
|
||||
I_MainLoop::RoutineType::RealTime,
|
||||
I_MainLoop::RoutineType::System,
|
||||
server_sock,
|
||||
[this] () { handleConnection(); },
|
||||
"Health check probe server",
|
||||
@@ -344,7 +344,7 @@ private:
|
||||
dbgDebug(D_HEALTH_CHECK) << "Successfully accepted client, client fd: " << new_client_socket;
|
||||
open_connections_counter++;
|
||||
auto curr_routine = i_mainloop->addOneTimeRoutine(
|
||||
I_MainLoop::RoutineType::RealTime,
|
||||
I_MainLoop::RoutineType::System,
|
||||
[this] ()
|
||||
{
|
||||
auto curr_routine_id = i_mainloop->getCurrentRoutineId().unpack();
|
||||
|
||||
@@ -3,5 +3,5 @@ link_directories(${BOOST_ROOT}/lib)
|
||||
add_unit_test(
|
||||
health_check_ut
|
||||
"health_check_ut.cc"
|
||||
"health_check;messaging;mainloop;singleton;agent_details;config;logging;metric;event_is;health_check_manager;-lboost_regex;-lboost_system"
|
||||
"health_check;updates_process_reporter;messaging;mainloop;singleton;agent_details;config;logging;metric;event_is;health_check_manager;-lboost_regex;-lboost_system"
|
||||
)
|
||||
|
||||
@@ -111,12 +111,12 @@ TEST_F(HealthCheckerTest, clientConnection)
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_mainloop,
|
||||
addFileRoutine(I_MainLoop::RoutineType::RealTime, _, _, _, true)
|
||||
addFileRoutine(I_MainLoop::RoutineType::System, _, _, _, true)
|
||||
).WillRepeatedly(DoAll(SaveArg<2>(&connection_handler_routine), Return(0)));
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_mainloop,
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::RealTime, _, "Health check probe connection handler", true)
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::System, _, "Health check probe connection handler", true)
|
||||
).WillOnce(DoAll(SaveArg<1>(&connection_handler_routine), Return(0)));
|
||||
|
||||
int socket = 1;
|
||||
@@ -145,7 +145,7 @@ TEST_F(HealthCheckerTest, loadFromDynamicConfiguration)
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_mainloop,
|
||||
addFileRoutine(I_MainLoop::RoutineType::RealTime, _, _, _, true)
|
||||
addFileRoutine(I_MainLoop::RoutineType::System, _, _, _, true)
|
||||
).WillRepeatedly(DoAll(SaveArg<2>(&connection_handler_routine), Return(0)));
|
||||
|
||||
health_checker.init();
|
||||
@@ -183,7 +183,7 @@ TEST_F(HealthCheckerTest, connectionsLimit)
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_mainloop,
|
||||
addFileRoutine(I_MainLoop::RoutineType::RealTime, _, _, _, true)
|
||||
addFileRoutine(I_MainLoop::RoutineType::System, _, _, _, true)
|
||||
).WillRepeatedly(DoAll(SaveArg<2>(&connection_handler_routine), Return(0)));
|
||||
|
||||
EXPECT_CALL(mock_mainloop, doesRoutineExist(_)).WillRepeatedly(Return(false));
|
||||
@@ -218,12 +218,12 @@ TEST_F(HealthCheckerTest, disablingAfterEnabled)
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_mainloop,
|
||||
addFileRoutine(I_MainLoop::RoutineType::RealTime, _, _, _, true)
|
||||
addFileRoutine(I_MainLoop::RoutineType::System, _, _, _, true)
|
||||
).WillRepeatedly(DoAll(SaveArg<2>(&connection_handler_routine), Return(0)));
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_mainloop,
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::RealTime, _, "Health check probe connection handler", true)
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::System, _, "Health check probe connection handler", true)
|
||||
).WillOnce(DoAll(SaveArg<1>(&connection_handler_routine), Return(0)));
|
||||
|
||||
int socket = 1;
|
||||
@@ -273,12 +273,12 @@ TEST_F(HealthCheckerTest, changePortIpConfig)
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_mainloop,
|
||||
addFileRoutine(I_MainLoop::RoutineType::RealTime, _, _, _, true)
|
||||
addFileRoutine(I_MainLoop::RoutineType::System, _, _, _, true)
|
||||
).WillRepeatedly(DoAll(SaveArg<2>(&connection_handler_routine), Return(0)));
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_mainloop,
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::RealTime, _, "Health check probe connection handler", true)
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::System, _, "Health check probe connection handler", true)
|
||||
).WillOnce(DoAll(SaveArg<1>(&connection_handler_routine), Return(0)));
|
||||
|
||||
int socket = 1;
|
||||
@@ -321,12 +321,12 @@ TEST_F(HealthCheckerTest, FailedHealthCheck)
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_mainloop,
|
||||
addFileRoutine(I_MainLoop::RoutineType::RealTime, _, _, _, true)
|
||||
addFileRoutine(I_MainLoop::RoutineType::System, _, _, _, true)
|
||||
).WillRepeatedly(DoAll(SaveArg<2>(&connection_handler_routine), Return(0)));
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_mainloop,
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::RealTime, _, "Health check probe connection handler", true)
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::System, _, "Health check probe connection handler", true)
|
||||
).WillOnce(DoAll(SaveArg<1>(&connection_handler_routine), Return(0)));
|
||||
|
||||
int socket = 1;
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
add_library(health_check_manager health_check_manager.cc)
|
||||
|
||||
add_subdirectory(health_check_manager_ut)
|
||||
@@ -21,6 +21,7 @@
|
||||
#include "config.h"
|
||||
#include "cereal/archives/json.hpp"
|
||||
#include "customized_cereal_map.h"
|
||||
#include "updates_process_event.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
@@ -79,19 +80,22 @@ class HealthCheckValue
|
||||
public:
|
||||
HealthCheckValue() = default;
|
||||
|
||||
HealthCheckValue(HealthCheckStatus raw_status, const map<string, HealthCheckStatusReply> &descriptions)
|
||||
HealthCheckValue(HealthCheckStatus raw_status, const HealthCheckStatusReply &description)
|
||||
:
|
||||
status(raw_status)
|
||||
{
|
||||
for (const auto &single_stat : descriptions) {
|
||||
if (single_stat.second.getStatus() == HealthCheckStatus::HEALTHY) {
|
||||
dbgTrace(D_HEALTH_CHECK_MANAGER) << "Ignoring healthy status reply. Comp name: " << single_stat.first;
|
||||
continue;
|
||||
}
|
||||
if (description.getStatus() == HealthCheckStatus::HEALTHY) {
|
||||
dbgTrace(D_HEALTH_CHECK_MANAGER)
|
||||
<< "Ignoring healthy status reply. Comp name: "
|
||||
<< description.getCompName();
|
||||
return;
|
||||
}
|
||||
|
||||
for (const auto &status : single_stat.second.getExtendedStatus()) {
|
||||
errors.push_back(HealthCheckError(single_stat.first + " " + status.first, status.second));
|
||||
}
|
||||
for (const auto &extended_status : description.getExtendedStatus()) {
|
||||
errors.push_back(
|
||||
HealthCheckError(description.getCompName() + " " + extended_status.first,
|
||||
extended_status.second
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,9 +117,9 @@ private:
|
||||
class HealthCheckPatch : public ClientRest
|
||||
{
|
||||
public:
|
||||
HealthCheckPatch(HealthCheckStatus raw_status, const map<string, HealthCheckStatusReply> &descriptions)
|
||||
HealthCheckPatch(HealthCheckStatus raw_status, const HealthCheckStatusReply &description)
|
||||
{
|
||||
health_check = HealthCheckValue(raw_status, descriptions);
|
||||
health_check = HealthCheckValue(raw_status, description);
|
||||
}
|
||||
|
||||
C2S_LABEL_PARAM(HealthCheckValue, health_check, "healthCheck");
|
||||
@@ -123,7 +127,8 @@ public:
|
||||
|
||||
class HealthCheckManager::Impl
|
||||
:
|
||||
Singleton::Provide<I_Health_Check_Manager>::From<HealthCheckManager>
|
||||
Singleton::Provide<I_Health_Check_Manager>::From<HealthCheckManager>,
|
||||
public Listener<UpdatesProcessEvent>
|
||||
{
|
||||
public:
|
||||
void
|
||||
@@ -132,6 +137,7 @@ public:
|
||||
auto rest = Singleton::Consume<I_RestApi>::by<HealthCheckManager>();
|
||||
rest->addRestCall<HealthCheckOnDemand>(RestAction::SHOW, "health-check-on-demand");
|
||||
|
||||
registerListener();
|
||||
int interval_in_seconds =
|
||||
getProfileAgentSettingWithDefault<int>(30, "agent.healthCheck.intervalInSeconds");
|
||||
|
||||
@@ -157,9 +163,62 @@ public:
|
||||
void
|
||||
printRepliesHealthStatus(ofstream &oputput_file)
|
||||
{
|
||||
getRegisteredComponentsHealthStatus();
|
||||
cereal::JSONOutputArchive ar(oputput_file);
|
||||
ar(cereal::make_nvp("allComponentsHealthCheckReplies", all_comps_health_status));
|
||||
ar(cereal::make_nvp(health_check_reply.getCompName(), health_check_reply));
|
||||
}
|
||||
|
||||
void
|
||||
upon(const UpdatesProcessEvent &event)
|
||||
{
|
||||
|
||||
OrchestrationStatusFieldType status_field_type = event.getStatusFieldType();
|
||||
HealthCheckStatus _status = convertResultToHealthCheckStatus(event.getResult());
|
||||
string status_field_type_str = convertOrchestrationStatusFieldTypeToStr(status_field_type);
|
||||
|
||||
extended_status[status_field_type_str] =
|
||||
_status == HealthCheckStatus::HEALTHY ?
|
||||
"Success" :
|
||||
event.parseDescription();
|
||||
field_types_status[status_field_type_str] = _status;
|
||||
|
||||
switch(_status) {
|
||||
case HealthCheckStatus::UNHEALTHY: {
|
||||
general_health_aggregated_status = HealthCheckStatus::UNHEALTHY;
|
||||
break;
|
||||
}
|
||||
case HealthCheckStatus::DEGRADED: {
|
||||
for (const auto &type_status : field_types_status) {
|
||||
if ((type_status.first != status_field_type_str)
|
||||
&& (type_status.second == HealthCheckStatus::UNHEALTHY))
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
general_health_aggregated_status = HealthCheckStatus::DEGRADED;
|
||||
break;
|
||||
}
|
||||
case HealthCheckStatus::HEALTHY: {
|
||||
for (const auto &type_status : field_types_status) {
|
||||
if ((type_status.first != status_field_type_str)
|
||||
&& (type_status.second == HealthCheckStatus::UNHEALTHY
|
||||
|| type_status.second == HealthCheckStatus::DEGRADED)
|
||||
)
|
||||
{
|
||||
break;
|
||||
}
|
||||
general_health_aggregated_status = HealthCheckStatus::HEALTHY;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case HealthCheckStatus::IGNORED: {
|
||||
break;
|
||||
}
|
||||
}
|
||||
health_check_reply = HealthCheckStatusReply(
|
||||
"Orchestration",
|
||||
general_health_aggregated_status,
|
||||
extended_status
|
||||
);
|
||||
}
|
||||
|
||||
private:
|
||||
@@ -168,9 +227,10 @@ private:
|
||||
{
|
||||
dbgFlow(D_HEALTH_CHECK_MANAGER) << "Sending a health check patch";
|
||||
|
||||
HealthCheckPatch patch_to_send(general_health_aggregated_status, all_comps_health_status);
|
||||
auto messaging = Singleton::Consume<I_Messaging>::by<HealthCheckManager>();
|
||||
return messaging->sendSyncMessageWithoutResponse(
|
||||
HealthCheckPatch patch_to_send(general_health_aggregated_status, health_check_reply);
|
||||
extended_status.clear();
|
||||
field_types_status.clear();
|
||||
return Singleton::Consume<I_Messaging>::by<HealthCheckManager>()->sendSyncMessageWithoutResponse(
|
||||
HTTPMethod::PATCH,
|
||||
"/agents",
|
||||
patch_to_send,
|
||||
@@ -178,59 +238,11 @@ private:
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
getRegisteredComponentsHealthStatus()
|
||||
{
|
||||
vector<HealthCheckStatusReply> health_check_event_reply = HealthCheckStatusEvent().query();
|
||||
all_comps_health_status.clear();
|
||||
for (const auto &reply : health_check_event_reply) {
|
||||
if (reply.getStatus() != HealthCheckStatus::IGNORED) {
|
||||
all_comps_health_status.emplace(reply.getCompName(), reply);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
calcGeneralHealthAggregatedStatus()
|
||||
{
|
||||
general_health_aggregated_status = HealthCheckStatus::HEALTHY;
|
||||
|
||||
for (const auto &reply : all_comps_health_status) {
|
||||
HealthCheckStatus status = reply.second.getStatus();
|
||||
|
||||
dbgTrace(D_HEALTH_CHECK_MANAGER)
|
||||
<< "Current aggregated status is: "
|
||||
<< HealthCheckStatusReply::convertHealthCheckStatusToStr(
|
||||
general_health_aggregated_status
|
||||
)
|
||||
<< ". Got health status: "
|
||||
<< HealthCheckStatusReply::convertHealthCheckStatusToStr(status)
|
||||
<< "for component: "
|
||||
<< reply.first;
|
||||
|
||||
switch (status) {
|
||||
case HealthCheckStatus::UNHEALTHY : {
|
||||
general_health_aggregated_status = HealthCheckStatus::UNHEALTHY;
|
||||
return;
|
||||
}
|
||||
case HealthCheckStatus::DEGRADED : {
|
||||
general_health_aggregated_status = HealthCheckStatus::DEGRADED;
|
||||
break;
|
||||
}
|
||||
case HealthCheckStatus::IGNORED : break;
|
||||
case HealthCheckStatus::HEALTHY : break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
executeHealthCheck()
|
||||
{
|
||||
dbgFlow(D_HEALTH_CHECK_MANAGER) << "Collecting health status from all registered components.";
|
||||
|
||||
getRegisteredComponentsHealthStatus();
|
||||
calcGeneralHealthAggregatedStatus();
|
||||
|
||||
dbgTrace(D_HEALTH_CHECK_MANAGER)
|
||||
<< "Aggregated status: "
|
||||
<< HealthCheckStatusReply::convertHealthCheckStatusToStr(general_health_aggregated_status);
|
||||
@@ -244,9 +256,43 @@ private:
|
||||
};
|
||||
}
|
||||
|
||||
HealthCheckStatus general_health_aggregated_status;
|
||||
map<string, HealthCheckStatusReply> all_comps_health_status;
|
||||
string
|
||||
convertOrchestrationStatusFieldTypeToStr(OrchestrationStatusFieldType type)
|
||||
{
|
||||
switch (type) {
|
||||
case OrchestrationStatusFieldType::REGISTRATION : return "Registration";
|
||||
case OrchestrationStatusFieldType::MANIFEST : return "Manifest";
|
||||
case OrchestrationStatusFieldType::LAST_UPDATE : return "Last Update";
|
||||
case OrchestrationStatusFieldType::COUNT : return "Count";
|
||||
}
|
||||
|
||||
dbgAssert(false) << "Trying to convert unknown orchestration status field to string.";
|
||||
return "";
|
||||
}
|
||||
|
||||
HealthCheckStatus
|
||||
convertResultToHealthCheckStatus(UpdatesProcessResult result)
|
||||
{
|
||||
switch (result) {
|
||||
case UpdatesProcessResult::SUCCESS : return HealthCheckStatus::HEALTHY;
|
||||
case UpdatesProcessResult::UNSET : return HealthCheckStatus::IGNORED;
|
||||
case UpdatesProcessResult::FAILED : return HealthCheckStatus::UNHEALTHY;
|
||||
case UpdatesProcessResult::DEGRADED : return HealthCheckStatus::DEGRADED;
|
||||
}
|
||||
|
||||
dbgAssert(false) << "Trying to convert unknown update process result field to health check status.";
|
||||
return HealthCheckStatus::IGNORED;
|
||||
}
|
||||
|
||||
HealthCheckStatus general_health_aggregated_status = HealthCheckStatus::HEALTHY;
|
||||
HealthCheckStatusReply health_check_reply = HealthCheckStatusReply(
|
||||
"Orchestration",
|
||||
HealthCheckStatus::HEALTHY,
|
||||
{}
|
||||
);
|
||||
bool should_patch_report;
|
||||
map<string, string> extended_status;
|
||||
map<string, HealthCheckStatus> field_types_status;
|
||||
};
|
||||
|
||||
HealthCheckManager::HealthCheckManager() : Component("HealthCheckManager"), pimpl(make_unique<Impl>()) {}
|
||||
@@ -13,42 +13,13 @@
|
||||
#include "mock/mock_mainloop.h"
|
||||
#include "mock/mock_messaging.h"
|
||||
#include "mock/mock_rest_api.h"
|
||||
#include "updates_process_event.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace testing;
|
||||
|
||||
USE_DEBUG_FLAG(D_HEALTH_CHECK);
|
||||
|
||||
class TestHealthCheckStatusListener : public Listener<HealthCheckStatusEvent>
|
||||
{
|
||||
public:
|
||||
void upon(const HealthCheckStatusEvent &) override {}
|
||||
|
||||
HealthCheckStatusReply
|
||||
respond(const HealthCheckStatusEvent &) override
|
||||
{
|
||||
map<string, string> extended_status;
|
||||
extended_status["team"] = team;
|
||||
extended_status["city"] = city;
|
||||
HealthCheckStatusReply reply(comp_name, status, extended_status);
|
||||
return reply;
|
||||
}
|
||||
|
||||
void setStatus(HealthCheckStatus new_status) { status = new_status; }
|
||||
|
||||
string getListenerName() const { return "TestHealthCheckStatusListener"; }
|
||||
|
||||
private:
|
||||
static const string comp_name;
|
||||
HealthCheckStatus status = HealthCheckStatus::HEALTHY;
|
||||
static const string team;
|
||||
static const string city;
|
||||
};
|
||||
|
||||
const string TestHealthCheckStatusListener::comp_name = "Test";
|
||||
const string TestHealthCheckStatusListener::team = "Hapoel";
|
||||
const string TestHealthCheckStatusListener::city = "Tel-Aviv";
|
||||
|
||||
class TestEnd {};
|
||||
|
||||
class HealthCheckManagerTest : public Test
|
||||
@@ -56,8 +27,7 @@ class HealthCheckManagerTest : public Test
|
||||
public:
|
||||
HealthCheckManagerTest()
|
||||
{
|
||||
Debug::setNewDefaultStdout(&debug_output);
|
||||
Debug::setUnitTestFlag(D_HEALTH_CHECK, Debug::DebugLevel::INFO);
|
||||
Debug::setUnitTestFlag(D_HEALTH_CHECK, Debug::DebugLevel::NOISE);
|
||||
|
||||
EXPECT_CALL(mock_ml, addRecurringRoutine(_, _, _, _, _)).WillRepeatedly(
|
||||
DoAll(SaveArg<2>(&health_check_periodic_routine), Return(1))
|
||||
@@ -70,7 +40,6 @@ public:
|
||||
);
|
||||
|
||||
env.preload();
|
||||
event_listener.registerListener();
|
||||
|
||||
env.init();
|
||||
|
||||
@@ -98,14 +67,12 @@ public:
|
||||
StrictMock<MockMainLoop> mock_ml;
|
||||
StrictMock<MockRestApi> mock_rest;
|
||||
StrictMock<MockMessaging> mock_message;
|
||||
stringstream debug_output;
|
||||
ConfigComponent config;
|
||||
Config::I_Config *i_config = nullptr;
|
||||
::Environment env;
|
||||
HealthCheckManager health_check_manager;
|
||||
I_Health_Check_Manager *i_health_check_manager;
|
||||
unique_ptr<ServerRest> health_check_server;
|
||||
TestHealthCheckStatusListener event_listener;
|
||||
};
|
||||
|
||||
TEST_F(HealthCheckManagerTest, runPeriodicHealthCheckTest)
|
||||
@@ -142,7 +109,20 @@ TEST_F(HealthCheckManagerTest, runPeriodicHealthCheckTest)
|
||||
EXPECT_EQ(actual_body, expected_healthy_body);
|
||||
EXPECT_EQ("Healthy", aggregated_status_str);
|
||||
|
||||
event_listener.setStatus(HealthCheckStatus::DEGRADED);
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::DEGRADED,
|
||||
UpdatesConfigType::SETTINGS,
|
||||
UpdatesFailureReason::DOWNLOAD_FILE,
|
||||
"setting.json",
|
||||
"File not found"
|
||||
).notify();
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::DEGRADED,
|
||||
UpdatesConfigType::MANIFEST,
|
||||
UpdatesFailureReason::DOWNLOAD_FILE,
|
||||
"manifest.json",
|
||||
"File not found"
|
||||
).notify();
|
||||
try {
|
||||
health_check_periodic_routine();
|
||||
} catch (const TestEnd &t) {}
|
||||
@@ -156,16 +136,16 @@ TEST_F(HealthCheckManagerTest, runPeriodicHealthCheckTest)
|
||||
" \"status\": \"Degraded\",\n"
|
||||
" \"errors\": [\n"
|
||||
" {\n"
|
||||
" \"code\": \"Test city\",\n"
|
||||
" \"code\": \"Orchestration Last Update\",\n"
|
||||
" \"message\": [\n"
|
||||
" \"Tel-Aviv\"\n"
|
||||
" \"Failed to download the file setting.json. Error: File not found\"\n"
|
||||
" ],\n"
|
||||
" \"internal\": true\n"
|
||||
" },\n"
|
||||
" {\n"
|
||||
" \"code\": \"Test team\",\n"
|
||||
" \"code\": \"Orchestration Manifest\",\n"
|
||||
" \"message\": [\n"
|
||||
" \"Hapoel\"\n"
|
||||
" \"Failed to download the file manifest.json. Error: File not found\"\n"
|
||||
" ],\n"
|
||||
" \"internal\": true\n"
|
||||
" }\n"
|
||||
@@ -196,19 +176,24 @@ TEST_F(HealthCheckManagerTest, runOnDemandHealthCheckTest)
|
||||
config.preload();
|
||||
Singleton::Consume<Config::I_Config>::from(config)->loadConfiguration(ss);
|
||||
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::MANIFEST,
|
||||
UpdatesFailureReason::DOWNLOAD_FILE,
|
||||
"manifest.json",
|
||||
"File not found"
|
||||
).notify();
|
||||
|
||||
stringstream is;
|
||||
is << "{}";
|
||||
health_check_server->performRestCall(is);
|
||||
|
||||
string expected_status =
|
||||
"{\n"
|
||||
" \"allComponentsHealthCheckReplies\": {\n"
|
||||
" \"Test\": {\n"
|
||||
" \"status\": \"Healthy\",\n"
|
||||
" \"extendedStatus\": {\n"
|
||||
" \"city\": \"Tel-Aviv\",\n"
|
||||
" \"team\": \"Hapoel\"\n"
|
||||
" }\n"
|
||||
" \"Orchestration\": {\n"
|
||||
" \"status\": \"Unhealthy\",\n"
|
||||
" \"extendedStatus\": {\n"
|
||||
" \"Manifest\": \"Failed to download the file manifest.json. Error: File not found\"\n"
|
||||
" }\n"
|
||||
" }\n"
|
||||
"}";
|
||||
@@ -51,6 +51,7 @@ public:
|
||||
|
||||
private:
|
||||
I_DeclarativePolicy *i_declarative_policy = nullptr;
|
||||
std::string profile_mode;
|
||||
};
|
||||
|
||||
#endif // __FOG_COMMUNICATION_H__
|
||||
|
||||
@@ -26,6 +26,13 @@ operator<<(std::ostream &os, const Maybe<std::tuple<std::string, std::string, st
|
||||
return os;
|
||||
}
|
||||
|
||||
std::ostream &
|
||||
operator<<(
|
||||
std::ostream &os, const Maybe<std::tuple<std::string, std::string, std::string, std::string, std::string>> &)
|
||||
{
|
||||
return os;
|
||||
}
|
||||
|
||||
class MockDetailsResolver
|
||||
:
|
||||
public Singleton::Provide<I_DetailsResolver>::From<MockProvider<I_DetailsResolver>>
|
||||
@@ -42,6 +49,8 @@ public:
|
||||
MOCK_METHOD0(getResolvedDetails, std::map<std::string, std::string>());
|
||||
MOCK_METHOD0(isVersionAboveR8110, bool());
|
||||
MOCK_METHOD0(parseNginxMetadata, Maybe<std::tuple<std::string, std::string, std::string>>());
|
||||
MOCK_METHOD0(
|
||||
readCloudMetadata, Maybe<std::tuple<std::string, std::string, std::string, std::string, std::string>>());
|
||||
};
|
||||
|
||||
#endif // __MOCK_DETAILS_RESOLVER_H__
|
||||
|
||||
@@ -64,7 +64,7 @@ public:
|
||||
)
|
||||
);
|
||||
|
||||
typedef std::map<std::string, PortNumber> ServicePortMap;
|
||||
typedef std::map<std::string, std::vector<PortNumber>> ServicePortMap;
|
||||
MOCK_METHOD0(getServiceToPortMap, ServicePortMap());
|
||||
MOCK_METHOD3(updateReconfStatus, void(int id, const std::string &service_name, ReconfStatus status));
|
||||
MOCK_METHOD4(
|
||||
|
||||
@@ -0,0 +1,130 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef __UPDATES_PROCESS_EVENT_H__
|
||||
#define __UPDATES_PROCESS_EVENT_H__
|
||||
|
||||
#include "event.h"
|
||||
#include "singleton.h"
|
||||
#include "config.h"
|
||||
#include "debug.h"
|
||||
#include "i_orchestration_status.h"
|
||||
#include "health_check_status/health_check_status.h"
|
||||
#include "customized_cereal_map.h"
|
||||
|
||||
USE_DEBUG_FLAG(D_UPDATES_PROCESS_REPORTER);
|
||||
|
||||
enum class UpdatesFailureReason {
|
||||
CHECK_UPDATE,
|
||||
REGISTRATION,
|
||||
ORCHESTRATION_SELF_UPDATE,
|
||||
GET_UPDATE_REQUEST,
|
||||
DOWNLOAD_FILE,
|
||||
HANDLE_FILE,
|
||||
INSTALLATION_QUEUE,
|
||||
INSTALL_PACKAGE,
|
||||
CHECKSUM_UNMATCHED,
|
||||
POLICY_CONFIGURATION,
|
||||
POLICY_FOG_CONFIGURATION,
|
||||
NONE
|
||||
|
||||
};
|
||||
|
||||
enum class UpdatesConfigType { MANIFEST, POLICY, SETTINGS, DATA, GENERAL };
|
||||
enum class UpdatesProcessResult { UNSET, SUCCESS, FAILED, DEGRADED };
|
||||
|
||||
static inline std::string
|
||||
convertUpdatesFailureReasonToStr(UpdatesFailureReason reason)
|
||||
{
|
||||
switch (reason) {
|
||||
case UpdatesFailureReason::CHECK_UPDATE : return "CHECK_UPDATE";
|
||||
case UpdatesFailureReason::REGISTRATION : return "REGISTRATION";
|
||||
case UpdatesFailureReason::ORCHESTRATION_SELF_UPDATE : return "ORCHESTRATION_SELF_UPDATE";
|
||||
case UpdatesFailureReason::GET_UPDATE_REQUEST : return "GET_UPDATE_REQUEST";
|
||||
case UpdatesFailureReason::DOWNLOAD_FILE : return "DOWNLOAD_FILE";
|
||||
case UpdatesFailureReason::HANDLE_FILE : return "HANDLE_FILE";
|
||||
case UpdatesFailureReason::INSTALLATION_QUEUE : return "INSTALLATION_QUEUE";
|
||||
case UpdatesFailureReason::INSTALL_PACKAGE : return "INSTALL_PACKAGE";
|
||||
case UpdatesFailureReason::CHECKSUM_UNMATCHED : return "CHECKSUM_UNMATCHED";
|
||||
case UpdatesFailureReason::POLICY_CONFIGURATION : return "POLICY_CONFIGURATION";
|
||||
case UpdatesFailureReason::POLICY_FOG_CONFIGURATION : return "POLICY_FOG_CONFIGURATION";
|
||||
case UpdatesFailureReason::NONE : return "NONE";
|
||||
}
|
||||
|
||||
dbgWarning(D_UPDATES_PROCESS_REPORTER) << "Trying to convert unknown updates failure reason to string.";
|
||||
return "";
|
||||
}
|
||||
|
||||
static inline std::string
|
||||
convertUpdatesConfigTypeToStr(UpdatesConfigType type)
|
||||
{
|
||||
switch (type) {
|
||||
case UpdatesConfigType::MANIFEST : return "MANIFEST";
|
||||
case UpdatesConfigType::POLICY : return "POLICY";
|
||||
case UpdatesConfigType::SETTINGS : return "SETTINGS";
|
||||
case UpdatesConfigType::DATA : return "DATA";
|
||||
case UpdatesConfigType::GENERAL : return "GENERAL";
|
||||
}
|
||||
|
||||
dbgWarning(D_UPDATES_PROCESS_REPORTER) << "Trying to convert unknown updates failure reason to string.";
|
||||
return "";
|
||||
}
|
||||
|
||||
static inline std::string
|
||||
convertUpdateProcessResultToStr(UpdatesProcessResult result)
|
||||
{
|
||||
switch (result) {
|
||||
case UpdatesProcessResult::SUCCESS : return "SUCCESS";
|
||||
case UpdatesProcessResult::UNSET : return "UNSET";
|
||||
case UpdatesProcessResult::FAILED : return "FAILURE";
|
||||
case UpdatesProcessResult::DEGRADED : return "DEGRADED";
|
||||
}
|
||||
|
||||
dbgWarning(D_UPDATES_PROCESS_REPORTER) << "Trying to convert unknown updates failure reason to string.";
|
||||
return "";
|
||||
}
|
||||
|
||||
class UpdatesProcessEvent : public Event<UpdatesProcessEvent>
|
||||
{
|
||||
public:
|
||||
UpdatesProcessEvent() {}
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult _result,
|
||||
UpdatesConfigType _type,
|
||||
UpdatesFailureReason _reason = UpdatesFailureReason::NONE,
|
||||
const std::string &_detail = "",
|
||||
const std::string &_description = "");
|
||||
|
||||
~UpdatesProcessEvent() {}
|
||||
|
||||
UpdatesProcessResult getResult() const { return result; }
|
||||
UpdatesConfigType getType() const { return type; }
|
||||
UpdatesFailureReason getReason() const { return reason; }
|
||||
std::string getDetail() const { return detail; }
|
||||
std::string getDescription() const { return description; }
|
||||
|
||||
OrchestrationStatusFieldType getStatusFieldType() const;
|
||||
OrchestrationStatusResult getOrchestrationStatusResult() const;
|
||||
|
||||
std::string parseDescription() const;
|
||||
|
||||
private:
|
||||
UpdatesProcessResult result;
|
||||
UpdatesConfigType type;
|
||||
UpdatesFailureReason reason;
|
||||
std::string detail;
|
||||
std::string description;
|
||||
|
||||
};
|
||||
|
||||
#endif // __UPDATES_PROCESS_EVENT_H__
|
||||
@@ -0,0 +1,61 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef __UPDATES_PROCESS_REPORT_H__
|
||||
#define __UPDATES_PROCESS_REPORT_H__
|
||||
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
|
||||
#include "singleton.h"
|
||||
#include "i_time_get.h"
|
||||
#include "updates_process_event.h"
|
||||
|
||||
class UpdatesProcessReport : Singleton::Consume<I_TimeGet>
|
||||
{
|
||||
public:
|
||||
UpdatesProcessReport(
|
||||
UpdatesProcessResult result,
|
||||
UpdatesConfigType type,
|
||||
UpdatesFailureReason reason,
|
||||
const std::string &description)
|
||||
:
|
||||
result(result), type(type), reason(reason), description(description)
|
||||
{
|
||||
time_stamp = Singleton::Consume<I_TimeGet>::by<UpdatesProcessReport>()->getWalltimeStr();
|
||||
}
|
||||
|
||||
std::string
|
||||
toString() const
|
||||
{
|
||||
std::stringstream report;
|
||||
report
|
||||
<< "["
|
||||
<< time_stamp << "] - "
|
||||
<< convertUpdateProcessResultToStr(result) << " | "
|
||||
<< convertUpdatesConfigTypeToStr(type) << " | "
|
||||
<< convertUpdatesFailureReasonToStr(reason) << " | "
|
||||
<< description;
|
||||
|
||||
return report.str();
|
||||
}
|
||||
|
||||
private:
|
||||
UpdatesProcessResult result;
|
||||
UpdatesConfigType type;
|
||||
UpdatesFailureReason reason;
|
||||
std::string description;
|
||||
std::string time_stamp;
|
||||
};
|
||||
|
||||
#endif // __UPDATES_PROCESS_EVENT_H__
|
||||
@@ -0,0 +1,39 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef __UPDATES_PROCESS_REPORTER_H__
|
||||
#define __UPDATES_PROCESS_REPORTER_H__
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "event.h"
|
||||
#include "singleton.h"
|
||||
#include "config.h"
|
||||
#include "debug.h"
|
||||
#include "i_orchestration_status.h"
|
||||
#include "health_check_status/health_check_status.h"
|
||||
#include "updates_process_event.h"
|
||||
#include "updates_process_report.h"
|
||||
|
||||
class UpdatesProcessReporter : public Listener<UpdatesProcessEvent>
|
||||
{
|
||||
public:
|
||||
void upon(const UpdatesProcessEvent &event) override;
|
||||
|
||||
private:
|
||||
void sendReoprt();
|
||||
|
||||
static std::vector<UpdatesProcessReport> reports;
|
||||
};
|
||||
|
||||
#endif // __UPDATES_PROCESS_REPORTER_H__
|
||||
@@ -21,6 +21,7 @@
|
||||
#include "version.h"
|
||||
#include "log_generator.h"
|
||||
#include "orchestration_comp.h"
|
||||
#include "updates_process_event.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace ReportIS;
|
||||
@@ -219,6 +220,13 @@ ManifestController::Impl::updateManifest(const string &new_manifest_file)
|
||||
if (isIgnoreFile(new_manifest_file)) {
|
||||
if (!orchestration_tools->copyFile(new_manifest_file, manifest_file_path)) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to copy a new manifest file";
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::MANIFEST,
|
||||
UpdatesFailureReason::HANDLE_FILE,
|
||||
new_manifest_file,
|
||||
"Failed to copy a new manifest file"
|
||||
).notify();
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@@ -237,6 +245,13 @@ ManifestController::Impl::updateManifest(const string &new_manifest_file)
|
||||
|
||||
if (!orchestration_tools->copyFile(new_manifest_file, manifest_file_path)) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to copy a new manifest file";
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::MANIFEST,
|
||||
UpdatesFailureReason::HANDLE_FILE,
|
||||
new_manifest_file,
|
||||
"Failed to copy a new manifest file"
|
||||
).notify();
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@@ -245,6 +260,13 @@ ManifestController::Impl::updateManifest(const string &new_manifest_file)
|
||||
Maybe<map<string, Package>> parsed_manifest = orchestration_tools->loadPackagesFromJson(new_manifest_file);
|
||||
if (!parsed_manifest.ok()) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to parse the new manifest file. File: " << new_manifest_file;
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::MANIFEST,
|
||||
UpdatesFailureReason::HANDLE_FILE,
|
||||
new_manifest_file,
|
||||
"Failed to parse the new manifest file"
|
||||
).notify();
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -332,6 +354,13 @@ ManifestController::Impl::updateManifest(const string &new_manifest_file)
|
||||
dbgWarning(D_ORCHESTRATOR)
|
||||
<< "Failed building installation queue. Error: "
|
||||
<< installation_queue_res.getErr();
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::MANIFEST,
|
||||
UpdatesFailureReason::INSTALLATION_QUEUE,
|
||||
"",
|
||||
installation_queue_res.getErr()
|
||||
).notify();
|
||||
return false;
|
||||
}
|
||||
const vector<Package> &installation_queue = installation_queue_res.unpack();
|
||||
@@ -447,11 +476,25 @@ ManifestController::Impl::changeManifestFile(const string &new_manifest_file)
|
||||
dbgDebug(D_ORCHESTRATOR) << "Writing new manifest to file";
|
||||
if (!orchestration_tools->copyFile(new_manifest_file, manifest_file_path)) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed write new manifest to file";
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::MANIFEST,
|
||||
UpdatesFailureReason::HANDLE_FILE,
|
||||
new_manifest_file,
|
||||
"Failed write new manifest to file"
|
||||
).notify();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!orchestration_tools->isNonEmptyFile(manifest_file_path)) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to get manifest file data";
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::MANIFEST,
|
||||
UpdatesFailureReason::HANDLE_FILE,
|
||||
manifest_file_path,
|
||||
"Failed to get manifest file data"
|
||||
).notify();
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -281,13 +281,7 @@ TEST_F(ManifestControllerTest, badChecksum)
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/packages/my/my")).WillOnce(Return(false));
|
||||
|
||||
string hostname = "hostname";
|
||||
string empty_err;
|
||||
EXPECT_CALL(mock_status, getManifestError()).WillOnce(ReturnRef(empty_err));
|
||||
EXPECT_CALL(mock_details_resolver, getHostname()).WillOnce(Return( Maybe<string>(hostname)));
|
||||
EXPECT_CALL(
|
||||
mock_status,
|
||||
setFieldStatus(OrchestrationStatusFieldType::MANIFEST, OrchestrationStatusResult::FAILED, _)
|
||||
);
|
||||
EXPECT_FALSE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -710,10 +704,6 @@ TEST_F(ManifestControllerTest, selfUpdateWithOldCopyWithError)
|
||||
string hostname = "hostname";
|
||||
string empty_err;
|
||||
EXPECT_CALL(mock_status, getManifestError()).WillOnce(ReturnRef(empty_err));
|
||||
EXPECT_CALL(
|
||||
mock_status,
|
||||
setFieldStatus(OrchestrationStatusFieldType::MANIFEST, OrchestrationStatusResult::FAILED, _)
|
||||
);
|
||||
load(manifest, new_services);
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools,
|
||||
@@ -932,10 +922,6 @@ TEST_F(ManifestControllerTest, badInstall)
|
||||
string empty_err;
|
||||
EXPECT_CALL(mock_status, getManifestError()).WillOnce(ReturnRef(empty_err));
|
||||
EXPECT_CALL(mock_details_resolver, getHostname()).WillOnce(Return( Maybe<string>(hostname)));
|
||||
EXPECT_CALL(
|
||||
mock_status,
|
||||
setFieldStatus(OrchestrationStatusFieldType::MANIFEST, OrchestrationStatusResult::FAILED, _)
|
||||
);
|
||||
|
||||
string corrupted_packages_manifest =
|
||||
"{"
|
||||
@@ -1008,12 +994,6 @@ TEST_F(ManifestControllerTest, failToDownloadWithselfUpdate)
|
||||
doesFileExist("/etc/cp/packages/orchestration/orchestration")
|
||||
).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, getHostname()).WillOnce(Return(string("hostname")));
|
||||
EXPECT_CALL(
|
||||
mock_status,
|
||||
setFieldStatus(OrchestrationStatusFieldType::MANIFEST, OrchestrationStatusResult::FAILED, _)
|
||||
);
|
||||
string not_error;
|
||||
EXPECT_CALL(mock_status, getManifestError()).WillOnce(ReturnRef(not_error));
|
||||
EXPECT_FALSE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
|
||||
@@ -1404,12 +1384,6 @@ TEST_F(ManifestControllerTest, failureOnDownloadSharedObject)
|
||||
).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, getHostname()).WillOnce(Return(string("hostname")));
|
||||
EXPECT_CALL(mock_orchestration_tools, removeFile("/tmp/temp_file1")).WillOnce(Return(true));
|
||||
EXPECT_CALL(
|
||||
mock_status,
|
||||
setFieldStatus(OrchestrationStatusFieldType::MANIFEST, OrchestrationStatusResult::FAILED, _)
|
||||
);
|
||||
string not_error;
|
||||
EXPECT_CALL(mock_status, getManifestError()).WillOnce(ReturnRef(not_error));
|
||||
|
||||
EXPECT_FALSE(i_manifest_controller->updateManifest(file_name));
|
||||
}
|
||||
@@ -2538,12 +2512,6 @@ TEST_F(ManifestDownloadTest, download_relative_path)
|
||||
doesFileExist("/etc/cp/packages/orchestration/orchestration")
|
||||
).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, getHostname()).WillOnce(Return(string("hostname")));
|
||||
EXPECT_CALL(
|
||||
mock_status,
|
||||
setFieldStatus(OrchestrationStatusFieldType::MANIFEST, OrchestrationStatusResult::FAILED, _)
|
||||
);
|
||||
string not_error;
|
||||
EXPECT_CALL(mock_status, getManifestError()).WillOnce(ReturnRef(not_error));
|
||||
|
||||
EXPECT_FALSE(i_manifest_controller->updateManifest(manifest_file.fname));
|
||||
}
|
||||
@@ -2589,8 +2557,6 @@ TEST_F(ManifestDownloadTest, download_relative_path_no_fog_domain)
|
||||
mock_orchestration_tools,
|
||||
doesFileExist("/etc/cp/packages/orchestration/orchestration")
|
||||
).WillOnce(Return(false));
|
||||
string not_error;
|
||||
EXPECT_CALL(mock_status, getManifestError()).WillOnce(ReturnRef(not_error));
|
||||
|
||||
checkIfFileExistsCall(new_packages.at("orchestration"));
|
||||
|
||||
@@ -2604,10 +2570,6 @@ TEST_F(ManifestDownloadTest, download_relative_path_no_fog_domain)
|
||||
)
|
||||
).WillOnce(Return(downloaded_package));
|
||||
EXPECT_CALL(mock_details_resolver, getHostname()).WillOnce(Return(string("hostname")));
|
||||
EXPECT_CALL(
|
||||
mock_status,
|
||||
setFieldStatus(OrchestrationStatusFieldType::MANIFEST, OrchestrationStatusResult::FAILED, _)
|
||||
);
|
||||
|
||||
EXPECT_FALSE(i_manifest_controller->updateManifest(manifest_file.fname));
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#include "config.h"
|
||||
#include "agent_details.h"
|
||||
#include "orchestration_comp.h"
|
||||
#include "updates_process_event.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
@@ -174,14 +175,13 @@ ManifestHandler::downloadPackages(const map<string, Package> &new_packages_to_do
|
||||
" software update failed. Agent is running previous software. Contact Check Point support.";
|
||||
}
|
||||
|
||||
auto orchestration_status = Singleton::Consume<I_OrchestrationStatus>::by<ManifestHandler>();
|
||||
if (orchestration_status->getManifestError().find("Gateway was not fully deployed") == string::npos) {
|
||||
orchestration_status->setFieldStatus(
|
||||
OrchestrationStatusFieldType::MANIFEST,
|
||||
OrchestrationStatusResult::FAILED,
|
||||
install_error
|
||||
);
|
||||
}
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::MANIFEST,
|
||||
UpdatesFailureReason::DOWNLOAD_FILE,
|
||||
package.getName(),
|
||||
install_error
|
||||
).notify();
|
||||
return genError(
|
||||
"Failed to download installation package. Package: " +
|
||||
package.getName() +
|
||||
@@ -219,11 +219,13 @@ ManifestHandler::installPackage(
|
||||
err_hostname +
|
||||
" software update failed. Agent is running previous software. Contact Check Point support.";
|
||||
if (orchestration_status->getManifestError().find("Gateway was not fully deployed") == string::npos) {
|
||||
orchestration_status->setFieldStatus(
|
||||
OrchestrationStatusFieldType::MANIFEST,
|
||||
OrchestrationStatusResult::FAILED,
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::MANIFEST,
|
||||
UpdatesFailureReason::INSTALL_PACKAGE,
|
||||
package_name,
|
||||
install_error
|
||||
);
|
||||
).notify();
|
||||
}
|
||||
}
|
||||
return self_update_status;
|
||||
@@ -289,11 +291,13 @@ ManifestHandler::installPackage(
|
||||
|
||||
auto orchestration_status = Singleton::Consume<I_OrchestrationStatus>::by<ManifestHandler>();
|
||||
if (orchestration_status->getManifestError().find("Gateway was not fully deployed") == string::npos) {
|
||||
orchestration_status->setFieldStatus(
|
||||
OrchestrationStatusFieldType::MANIFEST,
|
||||
OrchestrationStatusResult::FAILED,
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::MANIFEST,
|
||||
UpdatesFailureReason::INSTALL_PACKAGE,
|
||||
package_name,
|
||||
install_error
|
||||
);
|
||||
).notify();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include "mock/mock_agent_details.h"
|
||||
#include "mock/mock_mainloop.h"
|
||||
#include "mock/mock_rest_api.h"
|
||||
#include "updates_process_event.h"
|
||||
|
||||
using namespace testing;
|
||||
using namespace std;
|
||||
@@ -200,6 +201,19 @@ TEST_F(OrchestrationStatusTest, checkUpdateStatus)
|
||||
auto result = orchestrationStatusFileToString();
|
||||
EXPECT_EQ(buildOrchestrationStatusJSON("attempt time", "Succeeded ", "current time"), result);
|
||||
}
|
||||
TEST_F(OrchestrationStatusTest, checkUpdateStatusByRaiseEvent)
|
||||
{
|
||||
init();
|
||||
EXPECT_CALL(time, getLocalTimeStr())
|
||||
.WillOnce(Return(string("attempt time")))
|
||||
.WillOnce(Return(string("current time")));
|
||||
|
||||
i_orchestration_status->setLastUpdateAttempt();
|
||||
|
||||
UpdatesProcessEvent(UpdatesProcessResult::SUCCESS, UpdatesConfigType::GENERAL).notify();
|
||||
auto result = orchestrationStatusFileToString();
|
||||
EXPECT_EQ(buildOrchestrationStatusJSON("attempt time", "Succeeded ", "current time"), result);
|
||||
}
|
||||
|
||||
TEST_F(OrchestrationStatusTest, recoveryFields)
|
||||
{
|
||||
@@ -482,3 +496,69 @@ TEST_F(OrchestrationStatusTest, setAllFields)
|
||||
EXPECT_EQ(i_orchestration_status->getServiceSettings(), service_map_a);
|
||||
EXPECT_EQ(i_orchestration_status->getRegistrationDetails(), agent_details);
|
||||
}
|
||||
|
||||
TEST_F(OrchestrationStatusTest, checkErrorByRaiseEvent)
|
||||
{
|
||||
init();
|
||||
string fog_address = "http://fog.address";
|
||||
string registar_error = "Fail to registar";
|
||||
string manifest_error = "Fail to achieve manifest";
|
||||
string last_update_error = "Fail to update";
|
||||
|
||||
EXPECT_CALL(time, getLocalTimeStr()).Times(3).WillRepeatedly(Return(string("Time")));
|
||||
|
||||
UpdatesProcessEvent(UpdatesProcessResult::SUCCESS, UpdatesConfigType::GENERAL).notify();
|
||||
i_orchestration_status->setIsConfigurationUpdated(
|
||||
EnumArray<OrchestrationStatusConfigType, bool>(true, true, true)
|
||||
);
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::GENERAL,
|
||||
UpdatesFailureReason::NONE,
|
||||
"",
|
||||
last_update_error
|
||||
).notify();
|
||||
i_orchestration_status->setIsConfigurationUpdated(
|
||||
EnumArray<OrchestrationStatusConfigType, bool>(false, false, false)
|
||||
);
|
||||
|
||||
i_orchestration_status->setUpgradeMode("Online upgrades");
|
||||
i_orchestration_status->setFogAddress(fog_address);
|
||||
|
||||
i_orchestration_status->setUpgradeMode("Online upgrades");
|
||||
i_orchestration_status->setFogAddress(fog_address);
|
||||
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::GENERAL,
|
||||
UpdatesFailureReason::REGISTRATION,
|
||||
"",
|
||||
registar_error
|
||||
).notify();
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::MANIFEST,
|
||||
UpdatesFailureReason::NONE,
|
||||
"",
|
||||
manifest_error
|
||||
).notify();
|
||||
EXPECT_EQ(i_orchestration_status->getManifestError(), manifest_error);
|
||||
|
||||
auto result = orchestrationStatusFileToString();
|
||||
EXPECT_EQ(
|
||||
buildOrchestrationStatusJSON(
|
||||
"None",
|
||||
"Failed. Reason: " + last_update_error,
|
||||
"Time",
|
||||
"Time",
|
||||
"",
|
||||
"Time",
|
||||
"Time",
|
||||
"Online upgrades",
|
||||
fog_address,
|
||||
"Failed. Reason: Registration failed. Error: " + registar_error,
|
||||
"Failed. Reason: " + manifest_error
|
||||
),
|
||||
result
|
||||
);
|
||||
}
|
||||
|
||||
@@ -19,6 +19,8 @@
|
||||
|
||||
#include "debug.h"
|
||||
#include "config.h"
|
||||
#include "updates_process_event.h"
|
||||
#include "health_check_status/health_check_status.h"
|
||||
|
||||
using namespace cereal;
|
||||
using namespace std;
|
||||
@@ -383,7 +385,10 @@ private:
|
||||
map<string, string> service_settings;
|
||||
};
|
||||
|
||||
class OrchestrationStatus::Impl : Singleton::Provide<I_OrchestrationStatus>::From<OrchestrationStatus>
|
||||
class OrchestrationStatus::Impl
|
||||
:
|
||||
Singleton::Provide<I_OrchestrationStatus>::From<OrchestrationStatus>,
|
||||
public Listener<UpdatesProcessEvent>
|
||||
{
|
||||
public:
|
||||
void
|
||||
@@ -462,6 +467,13 @@ public:
|
||||
},
|
||||
"Write Orchestration status file"
|
||||
);
|
||||
registerListener();
|
||||
}
|
||||
|
||||
void
|
||||
upon(const UpdatesProcessEvent &event) override
|
||||
{
|
||||
setFieldStatus(event.getStatusFieldType(), event.getOrchestrationStatusResult(), event.parseDescription());
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
@@ -42,6 +42,8 @@
|
||||
#include "hybrid_communication.h"
|
||||
#include "agent_core_utilities.h"
|
||||
#include "fog_communication.h"
|
||||
#include "updates_process_event.h"
|
||||
#include "updates_process_reporter.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace chrono;
|
||||
@@ -53,85 +55,6 @@ USE_DEBUG_FLAG(D_ORCHESTRATOR);
|
||||
static string fw_last_update_time = "";
|
||||
#endif // gaia || smb
|
||||
|
||||
class HealthCheckStatusListener : public Listener<HealthCheckStatusEvent>
|
||||
{
|
||||
public:
|
||||
void upon(const HealthCheckStatusEvent &) override {}
|
||||
|
||||
HealthCheckStatusReply
|
||||
respond(const HealthCheckStatusEvent &) override
|
||||
{
|
||||
return HealthCheckStatusReply(comp_name, status, extended_status);
|
||||
}
|
||||
|
||||
string getListenerName() const override { return "HealthCheckStatusListener"; }
|
||||
|
||||
void
|
||||
setStatus(
|
||||
HealthCheckStatus _status,
|
||||
OrchestrationStatusFieldType _status_field_type,
|
||||
const string &_status_description = "Success")
|
||||
{
|
||||
string status_field_type_str = convertOrchestrationStatusFieldTypeToStr(_status_field_type);
|
||||
extended_status[status_field_type_str] = _status_description;
|
||||
field_types_status[status_field_type_str] = _status;
|
||||
|
||||
switch(_status) {
|
||||
case HealthCheckStatus::UNHEALTHY: {
|
||||
status = HealthCheckStatus::UNHEALTHY;
|
||||
return;
|
||||
}
|
||||
case HealthCheckStatus::DEGRADED: {
|
||||
for (const auto &type_status : field_types_status) {
|
||||
if ((type_status.first != status_field_type_str)
|
||||
&& (type_status.second == HealthCheckStatus::UNHEALTHY))
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
status = HealthCheckStatus::DEGRADED;
|
||||
return;
|
||||
}
|
||||
case HealthCheckStatus::HEALTHY: {
|
||||
for (const auto &type_status : field_types_status) {
|
||||
if ((type_status.first != status_field_type_str)
|
||||
&& (type_status.second == HealthCheckStatus::UNHEALTHY
|
||||
|| type_status.second == HealthCheckStatus::DEGRADED)
|
||||
)
|
||||
{
|
||||
return;
|
||||
}
|
||||
status = HealthCheckStatus::HEALTHY;
|
||||
}
|
||||
return;
|
||||
}
|
||||
case HealthCheckStatus::IGNORED: {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
string
|
||||
convertOrchestrationStatusFieldTypeToStr(OrchestrationStatusFieldType type)
|
||||
{
|
||||
switch (type) {
|
||||
case OrchestrationStatusFieldType::REGISTRATION : return "Registration";
|
||||
case OrchestrationStatusFieldType::MANIFEST : return "Manifest";
|
||||
case OrchestrationStatusFieldType::LAST_UPDATE : return "Last Update";
|
||||
case OrchestrationStatusFieldType::COUNT : return "Count";
|
||||
}
|
||||
|
||||
dbgError(D_ORCHESTRATOR) << "Trying to convert unknown orchestration status field to string.";
|
||||
return "";
|
||||
}
|
||||
|
||||
string comp_name = "Orchestration";
|
||||
HealthCheckStatus status = HealthCheckStatus::IGNORED;
|
||||
map<string, string> extended_status;
|
||||
map<string, HealthCheckStatus> field_types_status;
|
||||
};
|
||||
|
||||
class SetAgentUninstall
|
||||
:
|
||||
public ServerRest,
|
||||
@@ -203,7 +126,7 @@ public:
|
||||
loadFogAddress();
|
||||
|
||||
Singleton::Consume<I_MainLoop>::by<OrchestrationComp>()->addOneTimeRoutine(
|
||||
I_MainLoop::RoutineType::RealTime,
|
||||
I_MainLoop::RoutineType::System,
|
||||
[this] () { run(); },
|
||||
"Orchestration runner",
|
||||
true
|
||||
@@ -257,6 +180,13 @@ private:
|
||||
<< "Failed to load Orchestration Policy. Error: "
|
||||
<< maybe_policy.getErr()
|
||||
<< "Trying to load from backup.";
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::POLICY,
|
||||
UpdatesFailureReason::POLICY_CONFIGURATION,
|
||||
orchestration_policy_file,
|
||||
maybe_policy.getErr()
|
||||
).notify();
|
||||
return loadOrchestrationPolicyFromBackup();
|
||||
}
|
||||
|
||||
@@ -280,6 +210,13 @@ private:
|
||||
return maybe_policy;
|
||||
}
|
||||
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::POLICY,
|
||||
UpdatesFailureReason::POLICY_CONFIGURATION,
|
||||
orchestration_policy_file + backup_ext,
|
||||
maybe_policy.getErr()
|
||||
).notify();
|
||||
return genError("Failed to load Orchestration policy from backup.");
|
||||
}
|
||||
|
||||
@@ -337,17 +274,13 @@ private:
|
||||
<< new_manifest_file.getErr()
|
||||
<< " Presenting the next message to the user: "
|
||||
<< install_error;
|
||||
i_orchestration_status->setFieldStatus(
|
||||
OrchestrationStatusFieldType::MANIFEST,
|
||||
OrchestrationStatusResult::FAILED,
|
||||
install_error
|
||||
);
|
||||
|
||||
health_check_status_listener.setStatus(
|
||||
HealthCheckStatus::UNHEALTHY,
|
||||
OrchestrationStatusFieldType::MANIFEST,
|
||||
install_error
|
||||
);
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::MANIFEST,
|
||||
UpdatesFailureReason::DOWNLOAD_FILE,
|
||||
resource_file.getFileName(),
|
||||
new_manifest_file.getErr()
|
||||
).notify();
|
||||
|
||||
return genError(install_error);
|
||||
}
|
||||
@@ -372,23 +305,12 @@ private:
|
||||
<< "Manifest failed to be updated. Presenting the next message to the user: "
|
||||
<< install_error;
|
||||
|
||||
health_check_status_listener.setStatus(
|
||||
HealthCheckStatus::UNHEALTHY,
|
||||
OrchestrationStatusFieldType::MANIFEST,
|
||||
install_error
|
||||
);
|
||||
|
||||
return genError(install_error);
|
||||
}
|
||||
|
||||
i_orchestration_status->setFieldStatus(
|
||||
OrchestrationStatusFieldType::MANIFEST,
|
||||
OrchestrationStatusResult::SUCCESS
|
||||
);
|
||||
health_check_status_listener.setStatus(
|
||||
HealthCheckStatus::HEALTHY,
|
||||
OrchestrationStatusFieldType::MANIFEST
|
||||
);
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::SUCCESS,
|
||||
UpdatesConfigType::MANIFEST
|
||||
).notify();
|
||||
|
||||
ifstream restart_watchdog_orch(filesystem_prefix + "/orchestration/restart_watchdog");
|
||||
if (restart_watchdog_orch.good()) {
|
||||
@@ -473,6 +395,13 @@ private:
|
||||
if (!updateFogAddress(policy.getFogAddress())) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to restore the old Fog address.";
|
||||
}
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::POLICY,
|
||||
UpdatesFailureReason::POLICY_FOG_CONFIGURATION,
|
||||
orchestration_policy.getFogAddress(),
|
||||
"Failed to update the new Fog address."
|
||||
).notify();
|
||||
return "";
|
||||
}
|
||||
|
||||
@@ -499,13 +428,19 @@ private:
|
||||
// Handling policy update.
|
||||
dbgInfo(D_ORCHESTRATOR) << "There is a new policy file.";
|
||||
GetResourceFile resource_file(GetResourceFile::ResourceFileType::POLICY);
|
||||
Maybe<string> new_policy_file =
|
||||
Singleton::Consume<I_Downloader>::by<OrchestrationComp>()->downloadFile(
|
||||
Maybe<string> new_policy_file = Singleton::Consume<I_Downloader>::by<OrchestrationComp>()->downloadFile(
|
||||
new_policy.unpack(),
|
||||
I_OrchestrationTools::SELECTED_CHECKSUM_TYPE,
|
||||
resource_file
|
||||
);
|
||||
if (!new_policy_file.ok()) {
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::POLICY,
|
||||
UpdatesFailureReason::DOWNLOAD_FILE,
|
||||
resource_file.getFileName(),
|
||||
new_policy_file.getErr()
|
||||
).notify();
|
||||
return genError("Failed to download the new policy file. Error: " + new_policy_file.getErr());
|
||||
}
|
||||
|
||||
@@ -564,6 +499,13 @@ private:
|
||||
<< LogField("policyVersion", updated_policy_version)
|
||||
<< LogField("previousPolicyVersion", old_policy_version);
|
||||
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::POLICY,
|
||||
UpdatesFailureReason::POLICY_CONFIGURATION,
|
||||
updated_policy_version,
|
||||
res.getErr()
|
||||
).notify();
|
||||
return genError(error_str);
|
||||
}
|
||||
i_service_controller->moveChangedPolicies();
|
||||
@@ -648,6 +590,11 @@ private:
|
||||
"Send policy update report"
|
||||
);
|
||||
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::SUCCESS,
|
||||
UpdatesConfigType::POLICY
|
||||
).notify();
|
||||
|
||||
dbgInfo(D_ORCHESTRATOR) << "Policy update report was successfully sent to fog";
|
||||
|
||||
return Maybe<void>();
|
||||
@@ -683,10 +630,24 @@ private:
|
||||
);
|
||||
|
||||
if (!new_data_files.ok()) {
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::DATA,
|
||||
UpdatesFailureReason::DOWNLOAD_FILE,
|
||||
resource_file.getFileName(),
|
||||
new_data_files.getErr()
|
||||
).notify();
|
||||
return genError("Failed to download new data file, Error: " + new_data_files.getErr());
|
||||
}
|
||||
auto new_data_file_input = i_orchestration_tools->readFile(new_data_files.unpack());
|
||||
if (!new_data_file_input.ok()) {
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::DATA,
|
||||
UpdatesFailureReason::HANDLE_FILE,
|
||||
resource_file.getFileName(),
|
||||
"Failed to read new data file, Error: " + new_data_file_input.getErr()
|
||||
).notify();
|
||||
return genError("Failed to read new data file, Error: " + new_data_file_input.getErr());
|
||||
}
|
||||
|
||||
@@ -702,21 +663,35 @@ private:
|
||||
<< e.what()
|
||||
<< ". Content: "
|
||||
<< new_data_files.unpack();
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::DATA,
|
||||
UpdatesFailureReason::HANDLE_FILE,
|
||||
new_data_files.unpack(),
|
||||
string("Failed to load data from JSON file, Error: ") + e.what()
|
||||
).notify();
|
||||
return genError(e.what());
|
||||
}
|
||||
|
||||
for (const auto &data_file : parsed_data) {
|
||||
const string data_file_save_path = getPolicyConfigPath(data_file.first, Config::ConfigFileType::Data);
|
||||
Maybe<string> new_data_file =
|
||||
Singleton::Consume<I_Downloader>::by<OrchestrationComp>()->downloadFileFromURL(
|
||||
data_file.second.getDownloadPath(),
|
||||
data_file.second.getChecksum(),
|
||||
I_OrchestrationTools::SELECTED_CHECKSUM_TYPE,
|
||||
"data_" + data_file.first
|
||||
);
|
||||
Singleton::Consume<I_Downloader>::by<OrchestrationComp>()->downloadFileFromURL(
|
||||
data_file.second.getDownloadPath(),
|
||||
data_file.second.getChecksum(),
|
||||
I_OrchestrationTools::SELECTED_CHECKSUM_TYPE,
|
||||
"data_" + data_file.first
|
||||
);
|
||||
|
||||
if (!new_data_file.ok()) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to download the " << data_file.first << " data file.";
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::DATA,
|
||||
UpdatesFailureReason::DOWNLOAD_FILE,
|
||||
data_file.first,
|
||||
new_data_file.getErr()
|
||||
).notify();
|
||||
return new_data_file.passErr();
|
||||
}
|
||||
auto data_new_checksum = getChecksum(new_data_file.unpack());
|
||||
@@ -729,6 +704,16 @@ private:
|
||||
<< data_new_checksum;
|
||||
|
||||
dbgWarning(D_ORCHESTRATOR) << current_error.str();
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::DATA,
|
||||
UpdatesFailureReason::CHECKSUM_UNMATCHED,
|
||||
data_file.first,
|
||||
" Expected checksum: " +
|
||||
data_file.second.getChecksum() +
|
||||
". Downloaded checksum: " +
|
||||
data_new_checksum
|
||||
).notify();
|
||||
return genError(current_error.str());
|
||||
}
|
||||
if (!i_orchestration_tools->copyFile(new_data_file.unpack(), data_file_save_path)) {
|
||||
@@ -741,6 +726,10 @@ private:
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to copy a new agents' data file to " << data_file_path;
|
||||
}
|
||||
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::SUCCESS,
|
||||
UpdatesConfigType::DATA
|
||||
).notify();
|
||||
return Maybe<void>();
|
||||
}
|
||||
|
||||
@@ -751,8 +740,7 @@ private:
|
||||
|
||||
dbgInfo(D_ORCHESTRATOR) << "There is a new settings file.";
|
||||
GetResourceFile resource_file(GetResourceFile::ResourceFileType::SETTINGS);
|
||||
Maybe<string> new_settings_file =
|
||||
Singleton::Consume<I_Downloader>::by<OrchestrationComp>()->downloadFile(
|
||||
Maybe<string> new_settings_file = Singleton::Consume<I_Downloader>::by<OrchestrationComp>()->downloadFile(
|
||||
orch_settings.unpack(),
|
||||
I_OrchestrationTools::SELECTED_CHECKSUM_TYPE,
|
||||
resource_file
|
||||
@@ -762,6 +750,13 @@ private:
|
||||
dbgWarning(D_ORCHESTRATOR)
|
||||
<< "Failed to download the new settings file. Error: "
|
||||
<< new_settings_file.getErr();
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::SETTINGS,
|
||||
UpdatesFailureReason::DOWNLOAD_FILE,
|
||||
resource_file.getFileName(),
|
||||
new_settings_file.getErr()
|
||||
).notify();
|
||||
return genError("Failed to download the new settings file. Error: " + new_settings_file.getErr());
|
||||
}
|
||||
|
||||
@@ -769,6 +764,10 @@ private:
|
||||
if (res.ok()) {
|
||||
settings_file_path = *res;
|
||||
reloadConfiguration();
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::SUCCESS,
|
||||
UpdatesConfigType::SETTINGS
|
||||
).notify();
|
||||
return Maybe<void>();
|
||||
}
|
||||
|
||||
@@ -877,11 +876,13 @@ private:
|
||||
|
||||
if (!response.ok()) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to get the update. Error: " << response.getErr();
|
||||
i_orchestration_status->setFieldStatus(
|
||||
OrchestrationStatusFieldType::LAST_UPDATE,
|
||||
OrchestrationStatusResult::FAILED,
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::GENERAL,
|
||||
UpdatesFailureReason::GET_UPDATE_REQUEST,
|
||||
"",
|
||||
"Warning: Agent/Gateway failed during the update process. Contact Check Point support."
|
||||
);
|
||||
).notify();
|
||||
|
||||
return genError(response.getErr());
|
||||
}
|
||||
@@ -924,10 +925,10 @@ private:
|
||||
OrchSettings orch_settings = response.getSettings();
|
||||
OrchData orch_data = response.getData();
|
||||
|
||||
i_orchestration_status->setFieldStatus(
|
||||
OrchestrationStatusFieldType::LAST_UPDATE,
|
||||
OrchestrationStatusResult::SUCCESS
|
||||
);
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::SUCCESS,
|
||||
UpdatesConfigType::GENERAL
|
||||
).notify();
|
||||
i_orchestration_status->setIsConfigurationUpdated(
|
||||
EnumArray<OrchestrationStatusConfigType, bool>(
|
||||
orch_manifest.ok(), orch_policy.ok(), orch_settings.ok(), orch_data.ok()
|
||||
@@ -1017,6 +1018,10 @@ private:
|
||||
}
|
||||
|
||||
if (maybe_errors != "") return genError(maybe_errors);
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::SUCCESS,
|
||||
UpdatesConfigType::GENERAL
|
||||
).notify();
|
||||
return Maybe<void>();
|
||||
}
|
||||
|
||||
@@ -1196,6 +1201,13 @@ private:
|
||||
dbgTrace(D_ORCHESTRATOR) << "The settings directory is " << settings_file_path;
|
||||
if (!i_orchestration_tools->copyFile(new_settings_file, settings_file_path)) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to update the settings.";
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::SETTINGS,
|
||||
UpdatesFailureReason::HANDLE_FILE,
|
||||
settings_file_path,
|
||||
"Failed to update the settings"
|
||||
).notify();
|
||||
return genError("Failed to update the settings");
|
||||
}
|
||||
|
||||
@@ -1290,6 +1302,23 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
reportCloudMetadata(AgentDataReport &report)
|
||||
{
|
||||
I_DetailsResolver *i_details_resolver = Singleton::Consume<I_DetailsResolver>::by<OrchestrationComp>();
|
||||
auto cloud_metadata = i_details_resolver->readCloudMetadata();
|
||||
if (!cloud_metadata.ok()) {
|
||||
dbgDebug(D_ORCHESTRATOR) << cloud_metadata.getErr();
|
||||
return;
|
||||
}
|
||||
|
||||
report << make_pair("cloudAccountId", ::get<0>(cloud_metadata.unpack()));
|
||||
report << make_pair("cloudVpcId", ::get<1>(cloud_metadata.unpack()));
|
||||
report << make_pair("cloudInstanceId", ::get<2>(cloud_metadata.unpack()));
|
||||
report << make_pair("cloudInstanceLocalIp", ::get<3>(cloud_metadata.unpack()));
|
||||
report << make_pair("cloudRegion", ::get<4>(cloud_metadata.unpack()));
|
||||
}
|
||||
|
||||
void
|
||||
reportAgentDetailsMetaData()
|
||||
{
|
||||
@@ -1335,6 +1364,8 @@ private:
|
||||
agent_data_report << AgentReportFieldWithLabel("cloud_storage_service", "false");
|
||||
}
|
||||
|
||||
reportCloudMetadata(agent_data_report);
|
||||
|
||||
if (i_details_resolver->isKernelVersion3OrHigher()) {
|
||||
agent_data_report << AgentReportFieldWithLabel("isKernelVersion3OrHigher", "true");
|
||||
}
|
||||
@@ -1426,20 +1457,24 @@ private:
|
||||
<< check_update_result.getErr()
|
||||
<< ", new check will be every: "
|
||||
<< sleep_interval << " seconds";
|
||||
|
||||
health_check_status_listener.setStatus(
|
||||
HealthCheckStatus::UNHEALTHY,
|
||||
OrchestrationStatusFieldType::LAST_UPDATE,
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::GENERAL,
|
||||
UpdatesFailureReason::CHECK_UPDATE,
|
||||
"",
|
||||
"Failed during check update. Error: " + check_update_result.getErr()
|
||||
);
|
||||
).notify();
|
||||
return;
|
||||
}
|
||||
failure_count = 0;
|
||||
dbgDebug(D_ORCHESTRATOR) << "Check update process completed successfully";
|
||||
health_check_status_listener.setStatus(
|
||||
HealthCheckStatus::HEALTHY,
|
||||
OrchestrationStatusFieldType::LAST_UPDATE
|
||||
);
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::SUCCESS,
|
||||
UpdatesConfigType::GENERAL,
|
||||
UpdatesFailureReason::CHECK_UPDATE,
|
||||
"",
|
||||
"Check update procces succeeded!"
|
||||
).notify();
|
||||
sleep_interval = policy.getSleepInterval();
|
||||
if (!is_new_success) {
|
||||
dbgInfo(D_ORCHESTRATOR)
|
||||
@@ -1474,11 +1509,13 @@ private:
|
||||
sleep_interval = policy.getErrorSleepInterval();
|
||||
Maybe<void> registration_status(genError("Not running yet."));
|
||||
while (!(registration_status = registerToTheFog()).ok()) {
|
||||
health_check_status_listener.setStatus(
|
||||
HealthCheckStatus::UNHEALTHY,
|
||||
OrchestrationStatusFieldType::REGISTRATION,
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::GENERAL,
|
||||
UpdatesFailureReason::REGISTRATION,
|
||||
"",
|
||||
registration_status.getErr()
|
||||
);
|
||||
).notify();
|
||||
sleep_interval = getConfigurationWithDefault<int>(
|
||||
30,
|
||||
"orchestration",
|
||||
@@ -1498,10 +1535,11 @@ private:
|
||||
|
||||
Singleton::Consume<I_MainLoop>::by<OrchestrationComp>()->yield(chrono::seconds(1));
|
||||
|
||||
health_check_status_listener.setStatus(
|
||||
HealthCheckStatus::HEALTHY,
|
||||
OrchestrationStatusFieldType::REGISTRATION
|
||||
);
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::SUCCESS,
|
||||
UpdatesConfigType::GENERAL,
|
||||
UpdatesFailureReason::REGISTRATION
|
||||
).notify();
|
||||
|
||||
LogGen(
|
||||
"Check Point Orchestration nano service successfully started",
|
||||
@@ -1535,16 +1573,18 @@ private:
|
||||
if (!Singleton::Consume<I_ManifestController>::by<OrchestrationComp>()->loadAfterSelfUpdate()) {
|
||||
// Should restore from backup
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to load Orchestration after self-update";
|
||||
health_check_status_listener.setStatus(
|
||||
HealthCheckStatus::UNHEALTHY,
|
||||
OrchestrationStatusFieldType::LAST_UPDATE,
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::GENERAL,
|
||||
UpdatesFailureReason::ORCHESTRATION_SELF_UPDATE,
|
||||
"",
|
||||
"Failed to load Orchestration after self-update"
|
||||
);
|
||||
).notify();
|
||||
} else {
|
||||
health_check_status_listener.setStatus(
|
||||
HealthCheckStatus::HEALTHY,
|
||||
OrchestrationStatusFieldType::MANIFEST
|
||||
);
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::SUCCESS,
|
||||
UpdatesConfigType::MANIFEST
|
||||
).notify();
|
||||
}
|
||||
|
||||
setUpgradeTime();
|
||||
@@ -1894,7 +1934,7 @@ private:
|
||||
ReportIS::Audience::INTERNAL
|
||||
);
|
||||
hybrid_mode_metric.registerListener();
|
||||
health_check_status_listener.registerListener();
|
||||
updates_process_reporter_listener.registerListener();
|
||||
}
|
||||
|
||||
void
|
||||
@@ -2007,7 +2047,7 @@ private:
|
||||
unsigned int sleep_interval = 0;
|
||||
bool is_new_success = false;
|
||||
OrchestrationPolicy policy;
|
||||
HealthCheckStatusListener health_check_status_listener;
|
||||
UpdatesProcessReporter updates_process_reporter_listener;
|
||||
HybridModeMetric hybrid_mode_metric;
|
||||
EnvDetails env_details;
|
||||
chrono::minutes upgrade_delay_time;
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include "cereal/types/set.hpp"
|
||||
#include "agent_core_utilities.h"
|
||||
#include "namespace_data.h"
|
||||
#include "updates_process_event.h"
|
||||
|
||||
#include <netdb.h>
|
||||
#include <arpa/inet.h>
|
||||
@@ -469,6 +470,13 @@ OrchestrationTools::Impl::packagesToJsonFile(const map<packageName, Package> &pa
|
||||
archive_out(cereal::make_nvp("packages", packges_vector));
|
||||
} catch (cereal::Exception &e) {
|
||||
dbgDebug(D_ORCHESTRATOR) << "Failed to write vector of packages to JSON file " << path << ", " << e.what();
|
||||
UpdatesProcessEvent(
|
||||
UpdatesProcessResult::FAILED,
|
||||
UpdatesConfigType::MANIFEST,
|
||||
UpdatesFailureReason::HANDLE_FILE,
|
||||
path,
|
||||
string("Failed to write vector of packages to JSON file. Error: ") + e.what()
|
||||
).notify();
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
||||
@@ -88,7 +88,7 @@ public:
|
||||
// This Holding the Main Routine of the Orchestration.
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::RealTime, _, "Orchestration runner", true)
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::System, _, "Orchestration runner", true)
|
||||
).WillOnce(DoAll(SaveArg<1>(&routine), Return(1)));
|
||||
|
||||
EXPECT_CALL(mock_shell_cmd, getExecOutput("openssl version -d | cut -d\" \" -f2 | cut -d\"\\\"\" -f2", _, _))
|
||||
@@ -143,6 +143,9 @@ public:
|
||||
|
||||
map<string, string> resolved_mgmt_details({{"kernel_version", "4.4.0-87-generic"}});
|
||||
EXPECT_CALL(mock_details_resolver, getResolvedDetails()).WillRepeatedly(Return(resolved_mgmt_details));
|
||||
EXPECT_CALL(mock_details_resolver, readCloudMetadata()).WillRepeatedly(
|
||||
Return(Maybe<tuple<string, string, string, string, string>>(genError("No cloud metadata")))
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -281,7 +284,7 @@ TEST_F(OrchestrationMultitenancyTest, handle_virtual_resource)
|
||||
EXPECT_CALL(mock_service_controller, getPolicyVersion())
|
||||
.Times(3).WillRepeatedly(ReturnRef(first_policy_version));
|
||||
|
||||
map<string, PortNumber> empty_service_to_port_map;
|
||||
map<string, vector<PortNumber>> empty_service_to_port_map;
|
||||
EXPECT_CALL(mock_service_controller, getServiceToPortMap()).WillRepeatedly(Return(empty_service_to_port_map));
|
||||
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include "agent_details.h"
|
||||
#include "customized_cereal_map.h"
|
||||
#include "health_check_status/health_check_status.h"
|
||||
#include "updates_process_event.h"
|
||||
#include "declarative_policy_utils.h"
|
||||
|
||||
using namespace testing;
|
||||
@@ -79,7 +80,7 @@ public:
|
||||
EXPECT_CALL(mock_orchestration_tools, setClusterId());
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::RealTime, _, "Orchestration runner", true)
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::System, _, "Orchestration runner", true)
|
||||
).WillOnce(DoAll(SaveArg<1>(&routine), Return(1)));
|
||||
|
||||
EXPECT_CALL(
|
||||
@@ -99,7 +100,7 @@ public:
|
||||
)
|
||||
);
|
||||
|
||||
map<string, PortNumber> empty_service_to_port_map;
|
||||
map<string, vector<PortNumber>> empty_service_to_port_map;
|
||||
EXPECT_CALL(mock_service_controller, getServiceToPortMap()).WillRepeatedly(Return(empty_service_to_port_map));
|
||||
|
||||
EXPECT_CALL(rest, mockRestCall(RestAction::SHOW, "orchestration-status", _)).WillOnce(
|
||||
@@ -170,6 +171,9 @@ public:
|
||||
|
||||
map<string, string> resolved_mgmt_details({{"kernel_version", "4.4.0-87-generic"}});
|
||||
EXPECT_CALL(mock_details_resolver, getResolvedDetails()).WillRepeatedly(Return(resolved_mgmt_details));
|
||||
EXPECT_CALL(mock_details_resolver, readCloudMetadata()).WillRepeatedly(
|
||||
Return(Maybe<tuple<string, string, string, string, string>>(genError("No cloud metadata")))
|
||||
);
|
||||
}
|
||||
|
||||
string
|
||||
@@ -355,6 +359,7 @@ private:
|
||||
TEST_F(OrchestrationTest, hybridModeRegisterLocalAgentRoutine)
|
||||
{
|
||||
EXPECT_CALL(rest, mockRestCall(_, _, _)).WillRepeatedly(Return(true));
|
||||
|
||||
Singleton::Consume<Config::I_Config>::from(config_comp)->loadConfiguration(
|
||||
vector<string>{"--orchestration-mode=hybrid_mode"}
|
||||
);
|
||||
@@ -373,7 +378,6 @@ TEST_F(OrchestrationTest, hybridModeRegisterLocalAgentRoutine)
|
||||
expectDetailsResolver();
|
||||
EXPECT_CALL(mock_update_communication, getUpdate(_));
|
||||
EXPECT_CALL(mock_status, setLastUpdateAttempt());
|
||||
EXPECT_CALL(mock_status, setFieldStatus(_, _, _));
|
||||
EXPECT_CALL(mock_status, setIsConfigurationUpdated(_));
|
||||
|
||||
EXPECT_CALL(mock_ml, yield(A<chrono::microseconds>()))
|
||||
@@ -581,7 +585,6 @@ TEST_F(OrchestrationTest, check_sending_registration_data)
|
||||
expectDetailsResolver();
|
||||
EXPECT_CALL(mock_update_communication, getUpdate(_));
|
||||
EXPECT_CALL(mock_status, setLastUpdateAttempt());
|
||||
EXPECT_CALL(mock_status, setFieldStatus(_, _, _));
|
||||
EXPECT_CALL(mock_status, setIsConfigurationUpdated(_));
|
||||
|
||||
EXPECT_CALL(mock_ml, yield(A<chrono::microseconds>()))
|
||||
@@ -758,10 +761,6 @@ TEST_F(OrchestrationTest, orchestrationPolicyUpdatRollback)
|
||||
).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_update_communication, setAddressExtenesion("/test"));
|
||||
EXPECT_CALL(mock_status, setLastUpdateAttempt());
|
||||
EXPECT_CALL(
|
||||
mock_status,
|
||||
setFieldStatus(OrchestrationStatusFieldType::LAST_UPDATE, OrchestrationStatusResult::SUCCESS, "")
|
||||
);
|
||||
EXPECT_CALL(mock_status, setIsConfigurationUpdated(A<EnumArray<OrchestrationStatusConfigType, bool>>())
|
||||
).WillOnce(
|
||||
Invoke(
|
||||
@@ -937,10 +936,6 @@ TEST_F(OrchestrationTest, orchestrationPolicyUpdate)
|
||||
).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_update_communication, setAddressExtenesion("/test"));
|
||||
EXPECT_CALL(mock_status, setLastUpdateAttempt());
|
||||
EXPECT_CALL(
|
||||
mock_status,
|
||||
setFieldStatus(OrchestrationStatusFieldType::LAST_UPDATE, OrchestrationStatusResult::SUCCESS, "")
|
||||
);
|
||||
EXPECT_CALL(mock_status, setIsConfigurationUpdated(A<EnumArray<OrchestrationStatusConfigType, bool>>())
|
||||
).WillOnce(
|
||||
Invoke(
|
||||
@@ -990,7 +985,7 @@ TEST_F(OrchestrationTest, loadOrchestrationPolicyFromBackup)
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::RealTime, _, "Orchestration runner", true)
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::System, _, "Orchestration runner", true)
|
||||
);
|
||||
|
||||
EXPECT_CALL(
|
||||
@@ -1010,7 +1005,7 @@ TEST_F(OrchestrationTest, loadOrchestrationPolicyFromBackup)
|
||||
)
|
||||
);
|
||||
|
||||
map<string, PortNumber> empty_service_to_port_map;
|
||||
map<string, vector<PortNumber>> empty_service_to_port_map;
|
||||
EXPECT_CALL(mock_service_controller, getServiceToPortMap()).WillRepeatedly(Return(empty_service_to_port_map));
|
||||
|
||||
EXPECT_CALL(rest, mockRestCall(RestAction::SHOW, "orchestration-status", _));
|
||||
@@ -1105,14 +1100,6 @@ TEST_F(OrchestrationTest, manifestUpdate)
|
||||
);
|
||||
|
||||
EXPECT_CALL(mock_status, setLastUpdateAttempt());
|
||||
EXPECT_CALL(
|
||||
mock_status,
|
||||
setFieldStatus(OrchestrationStatusFieldType::LAST_UPDATE, OrchestrationStatusResult::SUCCESS, "")
|
||||
);
|
||||
EXPECT_CALL(
|
||||
mock_status,
|
||||
setFieldStatus(OrchestrationStatusFieldType::MANIFEST, OrchestrationStatusResult::SUCCESS, "")
|
||||
);
|
||||
EXPECT_CALL(mock_status, setIsConfigurationUpdated(A<EnumArray<OrchestrationStatusConfigType, bool>>())
|
||||
).WillOnce(
|
||||
Invoke(
|
||||
@@ -1234,10 +1221,6 @@ TEST_F(OrchestrationTest, getBadPolicyUpdate)
|
||||
.WillOnce(ReturnRef(second_val)
|
||||
);
|
||||
EXPECT_CALL(mock_status, setLastUpdateAttempt());
|
||||
EXPECT_CALL(
|
||||
mock_status,
|
||||
setFieldStatus(OrchestrationStatusFieldType::LAST_UPDATE, OrchestrationStatusResult::SUCCESS, "")
|
||||
);
|
||||
EXPECT_CALL(mock_status, setIsConfigurationUpdated(A<EnumArray<OrchestrationStatusConfigType, bool>>())
|
||||
).WillOnce(
|
||||
Invoke(
|
||||
@@ -1268,7 +1251,7 @@ TEST_F(OrchestrationTest, getBadPolicyUpdate)
|
||||
EXPECT_CALL(
|
||||
mock_service_controller,
|
||||
updateServiceConfiguration(string("policy path"), "", expected_data_types, "", "", _)
|
||||
).WillOnce(Return(Maybe<void>(genError(string("")))));
|
||||
).WillOnce(Return(Maybe<void>(genError(string("Fail to load policy")))));
|
||||
|
||||
EXPECT_CALL(mock_ml, yield(A<chrono::microseconds>()))
|
||||
.WillOnce(
|
||||
@@ -1325,6 +1308,7 @@ TEST_F(OrchestrationTest, failedDownloadSettings)
|
||||
|
||||
EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe<void>()));
|
||||
expectDetailsResolver();
|
||||
|
||||
EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, manifest_file_path))
|
||||
.WillOnce(Return(manifest_checksum));
|
||||
@@ -1356,22 +1340,10 @@ TEST_F(OrchestrationTest, failedDownloadSettings)
|
||||
);
|
||||
|
||||
EXPECT_CALL(mock_status, setLastUpdateAttempt());
|
||||
EXPECT_CALL(
|
||||
mock_status,
|
||||
setFieldStatus(OrchestrationStatusFieldType::LAST_UPDATE, OrchestrationStatusResult::SUCCESS, "")
|
||||
).Times(1);
|
||||
|
||||
string manifest_err =
|
||||
"Critical Error: Agent/Gateway was not fully deployed on host 'hostname' "
|
||||
"and is not enforcing a security policy. Retry installation or contact Check Point support.";
|
||||
EXPECT_CALL(
|
||||
mock_status,
|
||||
setFieldStatus(
|
||||
OrchestrationStatusFieldType::MANIFEST,
|
||||
OrchestrationStatusResult::FAILED,
|
||||
manifest_err
|
||||
)
|
||||
).Times(1);
|
||||
EXPECT_CALL(mock_status, getManifestError()).WillOnce(ReturnRef(manifest_err));
|
||||
|
||||
EXPECT_CALL(mock_status, setIsConfigurationUpdated(A<EnumArray<OrchestrationStatusConfigType, bool>>())
|
||||
@@ -1472,10 +1444,6 @@ TEST_P(OrchestrationTest, orchestrationFirstRun)
|
||||
.WillOnce(Return(data_checksum));
|
||||
|
||||
EXPECT_CALL(mock_status, setLastUpdateAttempt());
|
||||
EXPECT_CALL(
|
||||
mock_status,
|
||||
setFieldStatus(OrchestrationStatusFieldType::LAST_UPDATE, OrchestrationStatusResult::SUCCESS, "")
|
||||
);
|
||||
EXPECT_CALL(mock_status, setIsConfigurationUpdated(A<EnumArray<OrchestrationStatusConfigType, bool>>())
|
||||
).WillOnce(
|
||||
Invoke(
|
||||
@@ -1534,23 +1502,6 @@ TEST_P(OrchestrationTest, orchestrationFirstRun)
|
||||
} catch (const invalid_argument& e) {}
|
||||
EXPECT_CALL(mock_status, writeStatusToFile());
|
||||
|
||||
vector<HealthCheckStatusReply> reply;
|
||||
bool is_named_query = GetParam();
|
||||
if (is_named_query) {
|
||||
auto all_comps_status_reply = HealthCheckStatusEvent().performNamedQuery();
|
||||
for (auto &elem : all_comps_status_reply) {
|
||||
reply.push_back(elem.second);
|
||||
}
|
||||
} else {
|
||||
reply = HealthCheckStatusEvent().query();
|
||||
}
|
||||
|
||||
ASSERT_EQ(reply.size(), 1);
|
||||
EXPECT_EQ(reply[0].getCompName(), "Orchestration");
|
||||
EXPECT_EQ(reply[0].getStatus(), HealthCheckStatus::HEALTHY);
|
||||
|
||||
HealthCheckStatusEvent().notify();
|
||||
|
||||
orchestration_comp.fini();
|
||||
}
|
||||
|
||||
@@ -1718,10 +1669,6 @@ TEST_F(OrchestrationTest, dataUpdate)
|
||||
);
|
||||
|
||||
EXPECT_CALL(mock_status, setLastUpdateAttempt());
|
||||
EXPECT_CALL(
|
||||
mock_status,
|
||||
setFieldStatus(OrchestrationStatusFieldType::LAST_UPDATE, OrchestrationStatusResult::SUCCESS, "")
|
||||
);
|
||||
EXPECT_CALL(mock_status, setIsConfigurationUpdated(A<EnumArray<OrchestrationStatusConfigType, bool>>())
|
||||
).WillOnce(
|
||||
Invoke(
|
||||
|
||||
@@ -333,7 +333,7 @@ private:
|
||||
|
||||
ReconfStatus getUpdatedReconfStatus();
|
||||
Maybe<ServiceDetails> getServiceDetails(const string &service_name);
|
||||
map<string, PortNumber> getServiceToPortMap();
|
||||
map<string, vector<PortNumber>> getServiceToPortMap();
|
||||
|
||||
template<class Archive>
|
||||
void serializeRegisterServices(Archive &ar) { ar(pending_services); }
|
||||
@@ -358,6 +358,7 @@ private:
|
||||
string filesystem_prefix;
|
||||
bool is_multi_tenant_env = false;
|
||||
set<string> changed_policy_files;
|
||||
ServiceDetails orchestration_service_details;
|
||||
|
||||
I_OrchestrationTools *orchestration_tools = nullptr;
|
||||
I_MainLoop *mainloop = nullptr;
|
||||
@@ -374,8 +375,13 @@ public:
|
||||
for (auto const& entry: ports_map) {
|
||||
string service = entry.first;
|
||||
replace(service.begin(), service.end(), ' ', '-');
|
||||
output << service << ":";
|
||||
output << entry.second << ",";
|
||||
output << service;
|
||||
char delim = ':';
|
||||
for (PortNumber port : entry.second) {
|
||||
output << delim << port;
|
||||
delim = ',';
|
||||
}
|
||||
output << ";";
|
||||
}
|
||||
ports_list = output.str();
|
||||
}
|
||||
@@ -500,6 +506,7 @@ ServiceController::Impl::loadRegisteredServicesFromFile()
|
||||
stringstream ss(maybe_registered_services_str.unpack());
|
||||
cereal::JSONInputArchive ar(ss);
|
||||
ar(cereal::make_nvp("Registered Services", pending_services));
|
||||
pending_services.erase("cp-nano-orchestration");
|
||||
|
||||
dbgInfo(D_SERVICE_CONTROLLER)
|
||||
<< "Orchestration pending services loaded from file."
|
||||
@@ -529,16 +536,24 @@ ServiceController::Impl::writeRegisteredServicesToFile()
|
||||
"Orchestration registered services"
|
||||
);
|
||||
|
||||
map<string, ServiceDetails> registered_services_with_orch = registered_services;
|
||||
if (orchestration_service_details.getServiceID() != "") {
|
||||
registered_services_with_orch.emplace(
|
||||
orchestration_service_details.getServiceID(),
|
||||
orchestration_service_details
|
||||
);
|
||||
}
|
||||
|
||||
ofstream ss(registered_services_file);
|
||||
cereal::JSONOutputArchive ar(ss);
|
||||
ar(cereal::make_nvp("Registered Services", registered_services));
|
||||
ar(cereal::make_nvp("Registered Services", registered_services_with_orch));
|
||||
|
||||
dbgInfo(D_SERVICE_CONTROLLER)
|
||||
<< "Orchestration registered services file has been updated. File: "
|
||||
<< registered_services_file
|
||||
<< ". Registered Services:";
|
||||
|
||||
for (const auto &id_service_pair : registered_services) {
|
||||
for (const auto &id_service_pair : registered_services_with_orch) {
|
||||
const auto &service = id_service_pair.second;
|
||||
dbgInfo(D_SERVICE_CONTROLLER)
|
||||
<< "Service name: "
|
||||
@@ -591,20 +606,20 @@ ServiceController::Impl::cleanUpVirtualFiles()
|
||||
}
|
||||
}
|
||||
|
||||
map<string, PortNumber>
|
||||
map<string, vector<PortNumber>>
|
||||
ServiceController::Impl::getServiceToPortMap()
|
||||
{
|
||||
map<string, PortNumber> ports_map;
|
||||
map<string, vector<PortNumber>> ports_map;
|
||||
for (auto const& entry: registered_services) {
|
||||
const string &service = entry.first;
|
||||
const string &service = entry.second.getServiceName();
|
||||
PortNumber port = entry.second.getPort();
|
||||
ports_map[service] = port;
|
||||
ports_map[service].push_back(port);
|
||||
}
|
||||
|
||||
for (auto const& entry: pending_services) {
|
||||
const string &service = entry.first;
|
||||
const string &service = entry.second.getServiceName();
|
||||
PortNumber port = entry.second.getPort();
|
||||
ports_map[service] = port;
|
||||
ports_map[service].push_back(port);
|
||||
}
|
||||
|
||||
return ports_map;
|
||||
@@ -624,6 +639,12 @@ ServiceController::Impl::registerServiceConfig(
|
||||
service_id
|
||||
);
|
||||
|
||||
if (service_name == "cp-nano-orchestration") {
|
||||
dbgTrace(D_SERVICE_CONTROLLER) << "Save the orchestration service details";
|
||||
orchestration_service_details = service_config;
|
||||
return;
|
||||
}
|
||||
|
||||
pending_services.erase(service_config.getServiceID());
|
||||
pending_services.insert({service_config.getServiceID(), service_config});
|
||||
refreshPendingServices();
|
||||
|
||||
@@ -178,16 +178,17 @@ public:
|
||||
void
|
||||
expectNewConfigRequest(const string &response)
|
||||
{
|
||||
Maybe<HTTPResponse, HTTPResponse> res = HTTPResponse(HTTPStatusCode::HTTP_OK, response);
|
||||
EXPECT_CALL(
|
||||
mock_message,
|
||||
sendSyncMessage(
|
||||
HTTPMethod::POST,
|
||||
"/set-new-configuration",
|
||||
HasSubstr("1.0.2"),
|
||||
_,
|
||||
_,
|
||||
_
|
||||
)
|
||||
).WillOnce(Return(HTTPResponse(HTTPStatusCode::HTTP_OK, response)));
|
||||
).WillOnce(DoAll(SaveArg<2>(&version_body), Return(res)));
|
||||
}
|
||||
|
||||
CPTestTempfile status_file;
|
||||
@@ -196,6 +197,7 @@ public:
|
||||
::Environment env;
|
||||
ConfigComponent config;
|
||||
DeclarativePolicyUtils declarative_policy_utils;
|
||||
string version_body;
|
||||
string configuration_dir;
|
||||
string policy_extension;
|
||||
string settings_extension;
|
||||
@@ -229,19 +231,21 @@ public:
|
||||
string old_version = "1.0.1";
|
||||
|
||||
string versions =
|
||||
"["
|
||||
" {"
|
||||
" \"id\": \"d8c3cc3c-f9df-83c8-f875-322dd8a0c161\","
|
||||
" \"name\": \"Linux Embedded Agents\","
|
||||
" \"version\": \"1.0.2\""
|
||||
" }"
|
||||
"[\n"
|
||||
" {\n"
|
||||
" \"id\": \"d8c3cc3c-f9df-83c8-f875-322dd8a0c161\",\n"
|
||||
" \"name\": \"Linux Embedded Agents\",\n"
|
||||
" \"version\": \"1.0.2\",\n"
|
||||
" \"profileType\": \"Embedded\"\n"
|
||||
" }\n"
|
||||
"]";
|
||||
string old_versions =
|
||||
"["
|
||||
" {"
|
||||
" \"id\": \"d8c3cc3c-f9df-83c8-f875-322dd8a0c161\","
|
||||
" \"name\": \"Linux Embedded Agents\","
|
||||
" \"version\": \"1.0.1\""
|
||||
" \"version\": \"1.0.1\","
|
||||
" \"profileType\": \"Embedded\""
|
||||
" }"
|
||||
"]";
|
||||
|
||||
@@ -338,6 +342,23 @@ TEST_F(ServiceControllerTest, UpdateConfiguration)
|
||||
EXPECT_EQ(i_service_controller->getPolicyVersion(), version_value);
|
||||
EXPECT_EQ(i_service_controller->getPolicyVersions(), versions);
|
||||
EXPECT_EQ(i_service_controller->getUpdatePolicyVersion(), version_value);
|
||||
|
||||
stringstream ver_ss;
|
||||
ver_ss
|
||||
<< "{\n"
|
||||
<< " \"id\": 1,\n"
|
||||
<< " \"policy_version\": \"1.0.2,[\\n"
|
||||
<< " {\\n"
|
||||
<< " \\\"id\\\": \\\"d8c3cc3c-f9df-83c8-f875-322dd8a0c161\\\",\\n"
|
||||
<< " \\\"name\\\": \\\"Linux Embedded Agents\\\",\\n"
|
||||
<< " \\\"version\\\": \\\"1.0.2\\\",\\n"
|
||||
<< " \\\"profileType\\\": \\\"Embedded\\\"\\n"
|
||||
<< " }\\n"
|
||||
<< "]\"\n}";
|
||||
EXPECT_EQ(
|
||||
version_body,
|
||||
ver_ss.str()
|
||||
);
|
||||
}
|
||||
|
||||
TEST_F(ServiceControllerTest, supportVersions)
|
||||
@@ -527,13 +548,13 @@ TEST_F(ServiceControllerTest, TimeOutUpdateConfiguration)
|
||||
TEST_F(ServiceControllerTest, readRegisteredServicesFromFile)
|
||||
{
|
||||
init();
|
||||
int family1_id3_port = 1111;
|
||||
uint16_t family1_id3_port = 1111;
|
||||
string registered_services_json = "{\n"
|
||||
" \"Registered Services\": {\n"
|
||||
" \"family1_id3\": {\n"
|
||||
" \"Service name\": \"mock access control\",\n"
|
||||
" \"Service ID\": \"family1_id3\",\n"
|
||||
" \"Service port\": 1111,\n"
|
||||
" \"Service port\": " + to_string(family1_id3_port) + ",\n"
|
||||
" \"Relevant configs\": [\n"
|
||||
" \"non updated capability\",\n"
|
||||
" \"l4_firewall\"\n"
|
||||
@@ -573,7 +594,8 @@ TEST_F(ServiceControllerTest, readRegisteredServicesFromFile)
|
||||
service_controller.init();
|
||||
|
||||
auto services_to_port_map = i_service_controller->getServiceToPortMap();
|
||||
EXPECT_EQ(services_to_port_map.find("family1_id3")->second, family1_id3_port);
|
||||
vector<PortNumber> ports = {l4_firewall_service_port, family1_id3_port};
|
||||
EXPECT_EQ(services_to_port_map.find("mock access control")->second, ports);
|
||||
}
|
||||
|
||||
TEST_F(ServiceControllerTest, noPolicyUpdate)
|
||||
@@ -1589,7 +1611,7 @@ TEST_F(ServiceControllerTest, testPortsRest)
|
||||
empty_json << "{}";
|
||||
auto res = get_services_ports->performRestCall(empty_json);
|
||||
ASSERT_TRUE(res.ok());
|
||||
EXPECT_THAT(res.unpack(), HasSubstr("family1_id2:8888"));
|
||||
EXPECT_THAT(res.unpack(), HasSubstr("mock-access-control:8888;"));
|
||||
}
|
||||
|
||||
TEST_F(ServiceControllerTest, testMultitenantConfFiles)
|
||||
|
||||
@@ -141,7 +141,7 @@ DeclarativePolicyUtils::sendUpdatesToFog(
|
||||
auto shell_cmd = Singleton::Consume<I_ShellCmd>::by<DeclarativePolicyUtils>();
|
||||
string exec_command =
|
||||
getFilesystemPathConfig()
|
||||
+ "/scripts/open-appsec-cloud-mgmt --upload_policy_only"
|
||||
+ "/scripts/open-appsec-cloud-mgmt --config-upload-only"
|
||||
+ " --access_token " + access_token
|
||||
+ " --tenant_id " + tenant_id
|
||||
+ " --profile_id " + profile_id;
|
||||
|
||||
@@ -179,6 +179,17 @@ FogAuthenticator::registerAgent(
|
||||
dbgDebug(D_ORCHESTRATOR) << nginx_data.getErr();
|
||||
}
|
||||
|
||||
auto cloud_metadata = details_resolver->readCloudMetadata();
|
||||
if (cloud_metadata.ok()) {
|
||||
request << make_pair("cloudAccountId", ::get<0>(cloud_metadata.unpack()));
|
||||
request << make_pair("cloudVpcId", ::get<1>(cloud_metadata.unpack()));
|
||||
request << make_pair("cloudInstanceId", ::get<2>(cloud_metadata.unpack()));
|
||||
request << make_pair("cloudInstanceLocalIp", ::get<3>(cloud_metadata.unpack()));
|
||||
request << make_pair("cloudRegion", ::get<4>(cloud_metadata.unpack()));
|
||||
} else {
|
||||
dbgDebug(D_ORCHESTRATOR) << cloud_metadata.getErr();
|
||||
}
|
||||
|
||||
for (const pair<string, string> details : details_resolver->getResolvedDetails()) {
|
||||
request << details;
|
||||
}
|
||||
@@ -450,9 +461,9 @@ getDeplymentType()
|
||||
auto deplyment_type = Singleton::Consume<I_EnvDetails>::by<FogAuthenticator>()->getEnvType();
|
||||
switch (deplyment_type) {
|
||||
case EnvType::LINUX: return "Embedded";
|
||||
case EnvType::DOCKER: return "Embedded";
|
||||
case EnvType::DOCKER: return "Docker";
|
||||
case EnvType::NON_CRD_K8S:
|
||||
case EnvType::K8S: return "Embedded";
|
||||
case EnvType::K8S: return "K8S";
|
||||
case EnvType::COUNT: break;
|
||||
}
|
||||
|
||||
@@ -579,7 +590,7 @@ FogAuthenticator::authenticateAgent()
|
||||
auto mainloop = Singleton::Consume<I_MainLoop>::by<FogAuthenticator>();
|
||||
if (!mainloop->doesRoutineExist(routine)) {
|
||||
routine = mainloop->addOneTimeRoutine(
|
||||
I_MainLoop::RoutineType::RealTime,
|
||||
I_MainLoop::RoutineType::System,
|
||||
[this, min_expiration_time] ()
|
||||
{
|
||||
uint expiration_time;
|
||||
|
||||
@@ -32,6 +32,7 @@ FogCommunication::init()
|
||||
{
|
||||
FogAuthenticator::init();
|
||||
i_declarative_policy = Singleton::Consume<I_DeclarativePolicy>::from<DeclarativePolicyUtils>();
|
||||
profile_mode = getSettingWithDefault<string>("management", "profileManagedMode");
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
@@ -66,6 +67,16 @@ FogCommunication::getUpdate(CheckUpdateRequest &request)
|
||||
Maybe<string> maybe_new_data = request.getData();
|
||||
string data_checksum = maybe_new_data.ok() ? maybe_new_data.unpack() : "";
|
||||
|
||||
if (profile_mode != policy_mgmt_mode) {
|
||||
dbgTrace(D_ORCHESTRATOR)
|
||||
<< "The profile managed mode was changed from: "
|
||||
<< profile_mode
|
||||
<< " to: "
|
||||
<< policy_mgmt_mode;
|
||||
profile_mode = policy_mgmt_mode;
|
||||
i_declarative_policy->turnOnApplyPolicyFlag();
|
||||
}
|
||||
|
||||
if (i_declarative_policy->shouldApplyPolicy()) {
|
||||
string policy_response = i_declarative_policy->getUpdate(request);
|
||||
if (!policy_response.empty()) {
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
using namespace std;
|
||||
|
||||
USE_DEBUG_FLAG(D_ORCHESTRATOR);
|
||||
|
||||
class UpdateCommunication::Impl
|
||||
:
|
||||
public ServerRest,
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
add_library(updates_process_reporter updates_process_event.cc updates_process_reporter.cc)
|
||||
@@ -0,0 +1,124 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "updates_process_event.h"
|
||||
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
|
||||
#include "debug.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
USE_DEBUG_FLAG(D_UPDATES_PROCESS_REPORTER);
|
||||
|
||||
UpdatesProcessEvent::UpdatesProcessEvent(
|
||||
UpdatesProcessResult _result,
|
||||
UpdatesConfigType _type,
|
||||
UpdatesFailureReason _reason,
|
||||
const std::string &_detail,
|
||||
const std::string &_description)
|
||||
:
|
||||
result(_result),
|
||||
type(_type),
|
||||
reason(_reason),
|
||||
detail(_detail),
|
||||
description(_description)
|
||||
{
|
||||
string report =
|
||||
"Result: " + convertUpdateProcessResultToStr(result) +
|
||||
", Reason: " + convertUpdatesFailureReasonToStr(reason) +
|
||||
", Type: " + convertUpdatesConfigTypeToStr(type) +
|
||||
", Detail: " + detail +
|
||||
", Description: " + description;
|
||||
dbgTrace(D_UPDATES_PROCESS_REPORTER) << "Updates process event: " << report;
|
||||
}
|
||||
|
||||
OrchestrationStatusFieldType
|
||||
UpdatesProcessEvent::getStatusFieldType() const
|
||||
{
|
||||
if (reason == UpdatesFailureReason::REGISTRATION) {
|
||||
return OrchestrationStatusFieldType::REGISTRATION;
|
||||
}
|
||||
if (type == UpdatesConfigType::MANIFEST) {
|
||||
return OrchestrationStatusFieldType::MANIFEST;
|
||||
}
|
||||
return OrchestrationStatusFieldType::LAST_UPDATE;
|
||||
}
|
||||
|
||||
OrchestrationStatusResult
|
||||
UpdatesProcessEvent::getOrchestrationStatusResult() const
|
||||
{
|
||||
return result == UpdatesProcessResult::SUCCESS ?
|
||||
OrchestrationStatusResult::SUCCESS :
|
||||
OrchestrationStatusResult::FAILED;
|
||||
}
|
||||
|
||||
string
|
||||
UpdatesProcessEvent::parseDescription() const
|
||||
{
|
||||
stringstream err;
|
||||
if (description.empty() || result == UpdatesProcessResult::SUCCESS) return "";
|
||||
|
||||
switch (reason) {
|
||||
case UpdatesFailureReason::CHECK_UPDATE: {
|
||||
err << description;
|
||||
break;
|
||||
}
|
||||
case UpdatesFailureReason::REGISTRATION: {
|
||||
err << "Registration failed. Error: " << description;
|
||||
break;
|
||||
}
|
||||
case UpdatesFailureReason::GET_UPDATE_REQUEST: {
|
||||
err << "Failed to get update request. Error: " << description;
|
||||
break;
|
||||
}
|
||||
case UpdatesFailureReason::DOWNLOAD_FILE : {
|
||||
err << "Failed to download the file " << detail << ". Error: " << description;
|
||||
break;
|
||||
}
|
||||
case UpdatesFailureReason::HANDLE_FILE : {
|
||||
err << "Failed to handle the file " << detail << ". " << description;
|
||||
break;
|
||||
}
|
||||
case UpdatesFailureReason::INSTALLATION_QUEUE : {
|
||||
err << "Installation queue creation failed. Error: " << description;
|
||||
break;
|
||||
}
|
||||
case UpdatesFailureReason::INSTALL_PACKAGE : {
|
||||
err << "Failed to install the package " << detail << ". Error: " << description;
|
||||
break;
|
||||
}
|
||||
case UpdatesFailureReason::CHECKSUM_UNMATCHED : {
|
||||
err << "Checksums do not match for the file: " << detail << ". " << description;
|
||||
break;
|
||||
}
|
||||
case UpdatesFailureReason::POLICY_CONFIGURATION : {
|
||||
err << "Failed to configure policy version: " << detail << ". Error: " << description;
|
||||
break;
|
||||
}
|
||||
case UpdatesFailureReason::POLICY_FOG_CONFIGURATION : {
|
||||
err << "Failed to configure the fog address: " << detail << ". Error: " << description;
|
||||
break;
|
||||
}
|
||||
case UpdatesFailureReason::ORCHESTRATION_SELF_UPDATE : {
|
||||
err << description;
|
||||
break;
|
||||
}
|
||||
case UpdatesFailureReason::NONE : {
|
||||
err << description;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return err.str();
|
||||
}
|
||||
@@ -0,0 +1,71 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "updates_process_reporter.h"
|
||||
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
|
||||
#include "debug.h"
|
||||
#include "log_generator.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
USE_DEBUG_FLAG(D_UPDATES_PROCESS_REPORTER);
|
||||
|
||||
vector<UpdatesProcessReport> UpdatesProcessReporter::reports;
|
||||
|
||||
void
|
||||
UpdatesProcessReporter::upon(const UpdatesProcessEvent &event)
|
||||
{
|
||||
if (event.getReason() == UpdatesFailureReason::CHECK_UPDATE) {
|
||||
if (event.getResult() == UpdatesProcessResult::SUCCESS && reports.empty()) {
|
||||
dbgTrace(D_UPDATES_PROCESS_REPORTER) << "Update proccess finished successfully";
|
||||
return;
|
||||
}
|
||||
dbgTrace(D_UPDATES_PROCESS_REPORTER) << "Update proccess finished with errors";
|
||||
reports.emplace_back(
|
||||
UpdatesProcessReport(
|
||||
event.getResult(),
|
||||
event.getType(),
|
||||
event.getReason(),
|
||||
event.parseDescription()
|
||||
)
|
||||
);
|
||||
sendReoprt();
|
||||
return;
|
||||
}
|
||||
if (event.getResult() == UpdatesProcessResult::SUCCESS || event.getResult() == UpdatesProcessResult::UNSET) return;
|
||||
reports.emplace_back(
|
||||
UpdatesProcessReport(event.getResult(), event.getType(), event.getReason(), event.parseDescription())
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
UpdatesProcessReporter::sendReoprt()
|
||||
{
|
||||
stringstream all_reports;
|
||||
all_reports << "Updates process reports:" << endl;
|
||||
for (const auto &report : reports) {
|
||||
all_reports << report.toString() << endl;
|
||||
}
|
||||
reports.clear();
|
||||
dbgTrace(D_UPDATES_PROCESS_REPORTER) << "Sending updates process report: " << endl << all_reports.str();
|
||||
LogGen(
|
||||
"Updates process report",
|
||||
ReportIS::Audience::INTERNAL,
|
||||
ReportIS::Severity::HIGH,
|
||||
ReportIS::Priority::HIGH,
|
||||
ReportIS::Tags::ORCHESTRATOR
|
||||
) << LogField("eventMessage", all_reports.str());
|
||||
}
|
||||
@@ -87,6 +87,7 @@ add_library(waap_clib
|
||||
ParserPairs.cc
|
||||
Waf2Util2.cc
|
||||
ParserPDF.cc
|
||||
ParserBinaryFile.cc
|
||||
)
|
||||
|
||||
add_definitions("-Wno-unused-function")
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
#include "ParserPairs.h"
|
||||
#include "ParserDelimiter.h"
|
||||
#include "ParserPDF.h"
|
||||
#include "ParserBinaryFile.h"
|
||||
#include "WaapAssetState.h"
|
||||
#include "Waf2Regex.h"
|
||||
#include "Waf2Util.h"
|
||||
@@ -272,54 +273,58 @@ DeepParser::onKv(const char *k, size_t k_len, const char *v, size_t v_len, int f
|
||||
// Detect and decode potential base64 chunks in the value before further processing
|
||||
|
||||
bool base64ParamFound = false;
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << " ===Processing potential base64===";
|
||||
std::string decoded_val, decoded_key;
|
||||
base64_variants base64_status = Waap::Util::b64Test(cur_val, decoded_key, decoded_val);
|
||||
Waap::Util::BinaryFileType base64BinaryFileType = Waap::Util::BinaryFileType::FILE_TYPE_NONE;
|
||||
if (m_depth == 1 && flags == BUFFERED_RECEIVER_F_MIDDLE && m_key.depth() == 1 && m_key.first() != "#base64"){
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << " === will not check base64 since prev data block was not b64-encoded ===";
|
||||
} else {
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << " ===Processing potential base64===";
|
||||
std::string decoded_val, decoded_key;
|
||||
base64_variants base64_status = Waap::Util::b64Test(cur_val, decoded_key, decoded_val, base64BinaryFileType);
|
||||
|
||||
dbgTrace(D_WAAP_DEEP_PARSER)
|
||||
<< " status = "
|
||||
<< base64_status
|
||||
<< " key = "
|
||||
<< decoded_key
|
||||
<< " value = "
|
||||
<< decoded_val;
|
||||
dbgTrace(D_WAAP_DEEP_PARSER)
|
||||
<< " status = "
|
||||
<< base64_status
|
||||
<< " key = "
|
||||
<< decoded_key
|
||||
<< " value = "
|
||||
<< decoded_val;
|
||||
|
||||
switch (base64_status) {
|
||||
case SINGLE_B64_CHUNK_CONVERT:
|
||||
cur_val = decoded_val;
|
||||
base64ParamFound = true;
|
||||
break;
|
||||
case KEY_VALUE_B64_PAIR:
|
||||
// going deep with new pair in case value is not empty
|
||||
if (decoded_val.size() > 0) {
|
||||
switch (base64_status) {
|
||||
case SINGLE_B64_CHUNK_CONVERT:
|
||||
cur_val = decoded_val;
|
||||
base64ParamFound = true;
|
||||
rc = onKv(
|
||||
decoded_key.c_str(),
|
||||
decoded_key.size(),
|
||||
cur_val.data(),
|
||||
cur_val.size(),
|
||||
flags,
|
||||
parser_depth
|
||||
break;
|
||||
case KEY_VALUE_B64_PAIR:
|
||||
// going deep with new pair in case value is not empty
|
||||
if (decoded_val.size() > 0) {
|
||||
cur_val = decoded_val;
|
||||
base64ParamFound = true;
|
||||
rc = onKv(
|
||||
decoded_key.c_str(),
|
||||
decoded_key.size(),
|
||||
cur_val.data(),
|
||||
cur_val.size(),
|
||||
flags,
|
||||
parser_depth
|
||||
);
|
||||
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << " rc = " << rc;
|
||||
if (rc != CONTINUE_PARSING) {
|
||||
return rc;
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << " rc = " << rc;
|
||||
if (rc != CONTINUE_PARSING) {
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
case CONTINUE_AS_IS:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case CONTINUE_AS_IS:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (base64ParamFound) {
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << "DeepParser::onKv(): pushing #base64 prefix to the key.";
|
||||
m_key.push("#base64", 7, false);
|
||||
if (base64ParamFound) {
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << "DeepParser::onKv(): pushing #base64 prefix to the key.";
|
||||
m_key.push("#base64", 7, false);
|
||||
}
|
||||
}
|
||||
|
||||
// cur_val is later passed through some filters (such as urldecode) before JSON, XML or HTML is detected/decoded
|
||||
std::string orig_val = cur_val;
|
||||
|
||||
@@ -355,7 +360,8 @@ DeepParser::onKv(const char *k, size_t k_len, const char *v, size_t v_len, int f
|
||||
isUrlPayload,
|
||||
isUrlParamPayload,
|
||||
flags,
|
||||
parser_depth
|
||||
parser_depth,
|
||||
base64BinaryFileType
|
||||
);
|
||||
} else {
|
||||
offset = 0;
|
||||
@@ -425,7 +431,8 @@ DeepParser::onKv(const char *k, size_t k_len, const char *v, size_t v_len, int f
|
||||
isUrlParamPayload,
|
||||
flags,
|
||||
parser_depth,
|
||||
base64ParamFound
|
||||
base64ParamFound,
|
||||
base64BinaryFileType
|
||||
);
|
||||
if (rc != CONTINUE_PARSING) {
|
||||
return rc;
|
||||
@@ -468,19 +475,19 @@ DeepParser::onKv(const char *k, size_t k_len, const char *v, size_t v_len, int f
|
||||
if (rc != CONTINUE_PARSING) {
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (Waap::Util::detectJSONasParameter(cur_val, decoded_key, decoded_val)) {
|
||||
std::string json_decoded_val, json_decoded_key;
|
||||
if (Waap::Util::detectJSONasParameter(cur_val, json_decoded_key, json_decoded_val)) {
|
||||
dbgTrace(D_WAAP_DEEP_PARSER)
|
||||
<< " detectJSONasParameter was true: key = "
|
||||
<< decoded_key
|
||||
<< json_decoded_key
|
||||
<< " value = "
|
||||
<< decoded_val;
|
||||
<< json_decoded_val;
|
||||
|
||||
rc = onKv(
|
||||
decoded_key.c_str(),
|
||||
decoded_key.size(),
|
||||
decoded_val.data(),
|
||||
decoded_val.size(),
|
||||
json_decoded_key.c_str(),
|
||||
json_decoded_key.size(),
|
||||
json_decoded_val.data(),
|
||||
json_decoded_val.size(),
|
||||
flags,
|
||||
parser_depth
|
||||
);
|
||||
@@ -798,7 +805,8 @@ DeepParser::parseAfterMisleadingMultipartBoundaryCleaned(
|
||||
bool isUrlParamPayload,
|
||||
int flags,
|
||||
size_t parser_depth,
|
||||
bool base64ParamFound)
|
||||
bool base64ParamFound,
|
||||
Waap::Util::BinaryFileType b64FileType)
|
||||
{
|
||||
int offset = -1;
|
||||
int rc = 0;
|
||||
@@ -815,7 +823,8 @@ DeepParser::parseAfterMisleadingMultipartBoundaryCleaned(
|
||||
isUrlPayload,
|
||||
isUrlParamPayload,
|
||||
flags,
|
||||
parser_depth
|
||||
parser_depth,
|
||||
b64FileType
|
||||
);
|
||||
} else {
|
||||
offset = 0;
|
||||
@@ -919,7 +928,8 @@ DeepParser::createInternalParser(
|
||||
bool isUrlPayload,
|
||||
bool isUrlParamPayload,
|
||||
int flags,
|
||||
size_t parser_depth
|
||||
size_t parser_depth,
|
||||
Waap::Util::BinaryFileType b64FileType
|
||||
)
|
||||
{
|
||||
dbgTrace(D_WAAP_DEEP_PARSER)
|
||||
@@ -1152,10 +1162,25 @@ DeepParser::createInternalParser(
|
||||
m_parsersDeque.push_back(std::make_shared<BufferedParser<ParserPDF>>(*this, parser_depth + 1));
|
||||
offset = 0;
|
||||
} else {
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << "Starting to parse a binary file";
|
||||
m_parsersDeque.push_back(std::make_shared<BufferedParser<ParserBinary>>(*this, parser_depth + 1));
|
||||
offset = 0;
|
||||
Waap::Util::BinaryFileType fileType = ParserBinaryFile::detectBinaryFileHeader(cur_val);
|
||||
if (fileType != Waap::Util::BinaryFileType::FILE_TYPE_NONE) {
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << "Starting to parse a known binary file (type=" << fileType << ")";
|
||||
m_parsersDeque.push_back(
|
||||
std::make_shared<BufferedParser<ParserBinaryFile>>(*this, parser_depth + 1, false, fileType)
|
||||
);
|
||||
offset = 0;
|
||||
} else {
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << "Starting to parse a binary file";
|
||||
m_parsersDeque.push_back(std::make_shared<BufferedParser<ParserBinary>>(*this, parser_depth + 1));
|
||||
offset = 0;
|
||||
}
|
||||
}
|
||||
} else if (b64FileType != Waap::Util::BinaryFileType::FILE_TYPE_NONE) {
|
||||
dbgTrace(D_WAAP_DEEP_PARSER) << "Starting to parse a known binary file, base64 encoded";
|
||||
m_parsersDeque.push_back(
|
||||
std::make_shared<BufferedParser<ParserBinaryFile>>(*this, parser_depth + 1, true, b64FileType)
|
||||
);
|
||||
offset = 0;
|
||||
}
|
||||
}
|
||||
if (offset < 0) {
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#include "KeyStack.h"
|
||||
#include "WaapAssetState.h"
|
||||
#include "Waf2Regex.h"
|
||||
#include "Waf2Util.h"
|
||||
#include "maybe_res.h"
|
||||
#include <deque>
|
||||
|
||||
@@ -129,7 +130,8 @@ private:
|
||||
bool isUrlPayload,
|
||||
bool isUrlParamPayload,
|
||||
int flags,
|
||||
size_t parser_depth
|
||||
size_t parser_depth,
|
||||
Waap::Util::BinaryFileType b64FileType
|
||||
);
|
||||
|
||||
int createUrlParserForJson(
|
||||
@@ -160,7 +162,8 @@ private:
|
||||
bool isUrlParamPayload,
|
||||
int flags,
|
||||
size_t parser_depth,
|
||||
bool base64ParamFound
|
||||
bool base64ParamFound,
|
||||
Waap::Util::BinaryFileType b64FileType
|
||||
);
|
||||
int pushValueToTopParser(std::string &cur_val, int flags, bool base64ParamFound, int offset, size_t parser_depth);
|
||||
int parseBuffer(
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#define BUFFERED_RECEIVER_F_LAST 0x02
|
||||
#define BUFFERED_RECEIVER_F_BOTH (BUFFERED_RECEIVER_F_FIRST | BUFFERED_RECEIVER_F_LAST)
|
||||
#define BUFFERED_RECEIVER_F_UNNAMED 0x04
|
||||
#define BUFFERED_RECEIVER_F_MIDDLE 0x00
|
||||
|
||||
#if (DISTRO_centos6)
|
||||
// pre c++11 compiler doesn' support the "final" keyword
|
||||
|
||||
199
components/security_apps/waap/waap_clib/ParserBinaryFile.cc
Normal file
199
components/security_apps/waap/waap_clib/ParserBinaryFile.cc
Normal file
@@ -0,0 +1,199 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "ParserBinaryFile.h"
|
||||
#include "Waf2Util.h"
|
||||
#include "debug.h"
|
||||
#include <string.h>
|
||||
#include <map>
|
||||
#include <tuple>
|
||||
|
||||
using namespace std;
|
||||
using Waap::Util::BinaryFileType;
|
||||
|
||||
USE_DEBUG_FLAG(D_WAAP_PARSER_BINARY_FILE);
|
||||
USE_DEBUG_FLAG(D_WAAP);
|
||||
|
||||
const string ParserBinaryFile::m_parserName = "ParserBinaryFile";
|
||||
|
||||
static const map<BinaryFileType, pair<string, string>> m_head_tail_map = {
|
||||
{BinaryFileType::FILE_TYPE_PNG,
|
||||
{string("\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"), // PNG
|
||||
string("\x49\x45\x4e\x44\xae\x42\x60\x82")}}, // IEND
|
||||
{BinaryFileType::FILE_TYPE_JPEG,
|
||||
{string("\xff\xd8\xff"),
|
||||
string("\xff\xd9")}},
|
||||
{BinaryFileType::FILE_TYPE_PDF,
|
||||
{string("%PDF-"),
|
||||
string("%%EOF")}}
|
||||
};
|
||||
|
||||
ParserBinaryFile::ParserBinaryFile(
|
||||
IParserStreamReceiver &receiver,
|
||||
size_t parser_depth,
|
||||
bool is_base64,
|
||||
BinaryFileType file_type
|
||||
) :
|
||||
m_receiver(receiver),
|
||||
m_state(s_start),
|
||||
m_parser_depth(parser_depth),
|
||||
m_is_base64(is_base64),
|
||||
m_file_type(file_type)
|
||||
{}
|
||||
|
||||
ParserBinaryFile::~ParserBinaryFile()
|
||||
{}
|
||||
|
||||
BinaryFileType
|
||||
ParserBinaryFile::detectBinaryFileHeader(const string &buf)
|
||||
{
|
||||
if (buf.size() < MIN_HEADER_LOOKUP) {
|
||||
dbgTrace(D_WAAP_PARSER_BINARY_FILE) << "Buffer size too small (" << buf.size() << ")";
|
||||
return BinaryFileType::FILE_TYPE_NONE;
|
||||
}
|
||||
const string searchStr = buf.substr(0, MAX_HEADER_LOOKUP);
|
||||
for (const auto &entry : m_head_tail_map) {
|
||||
const string &head = entry.second.first;
|
||||
size_t pos = searchStr.find(head);
|
||||
if (pos != string::npos) {
|
||||
if (buf.size() - pos >= MIN_HEADER_LOOKUP) {
|
||||
dbgTrace(D_WAAP_PARSER_BINARY_FILE) << "Found. type=" << entry.first;
|
||||
return entry.first;
|
||||
} else {
|
||||
dbgTrace(D_WAAP_PARSER_BINARY_FILE) << "Remaining size after header is too small";
|
||||
return BinaryFileType::FILE_TYPE_NONE;
|
||||
}
|
||||
}
|
||||
}
|
||||
return BinaryFileType::FILE_TYPE_NONE;
|
||||
}
|
||||
|
||||
|
||||
size_t
|
||||
ParserBinaryFile::push(const char *buf, size_t len)
|
||||
{
|
||||
dbgTrace(D_WAAP_PARSER_BINARY_FILE)
|
||||
<< "buf="
|
||||
<< buf
|
||||
<< "len="
|
||||
<< len;
|
||||
|
||||
const char *c;
|
||||
|
||||
if (m_state == s_error) {
|
||||
return 0;
|
||||
}
|
||||
if (len == 0) {
|
||||
dbgTrace(D_WAAP_PARSER_BINARY_FILE) << "end of stream. m_state=" << m_state;
|
||||
|
||||
if (m_state == s_end) {
|
||||
m_receiver.onKvDone();
|
||||
} else if (m_is_base64) {
|
||||
dbgTrace(D_WAAP_PARSER_BINARY_FILE) << "finished parsing";
|
||||
if (m_receiver.onKey("BinaryFileSkip", 14) != 0) {
|
||||
m_state = s_error;
|
||||
return 0;
|
||||
}
|
||||
if (m_receiver.onValue("", 0) != 0) {
|
||||
m_state = s_error;
|
||||
return 0;
|
||||
}
|
||||
m_receiver.onKvDone();
|
||||
} else {
|
||||
m_state = s_error;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
if (m_head_tail_map.find(m_file_type) == m_head_tail_map.end()) {
|
||||
dbgTrace(D_WAAP_PARSER_BINARY_FILE) << "unknown file type: " << m_file_type;
|
||||
m_state = s_error;
|
||||
return 0;
|
||||
}
|
||||
const string tail = m_head_tail_map.at(m_file_type).second;
|
||||
|
||||
switch (m_state) {
|
||||
case s_start:
|
||||
m_state = s_body;
|
||||
CP_FALL_THROUGH;
|
||||
case s_body:
|
||||
if (m_is_base64) {
|
||||
dbgTrace(D_WAAP_PARSER_BINARY_FILE) << "parsing base64";
|
||||
bool keepParsing = true;
|
||||
for (size_t i = 0; i < len; i++) {
|
||||
bool isB64AlphaChar =
|
||||
Waap::Util::isAlphaAsciiFast(buf[i]) || isdigit(buf[i]) || buf[i] == '/' || buf[i] == '+';
|
||||
if (buf[i] == '=') {
|
||||
dbgTrace(D_WAAP_PARSER_BINARY_FILE)
|
||||
<< "base64 padding found (offset=" << i << "). end of stream.";
|
||||
m_state = s_end;
|
||||
keepParsing = false;
|
||||
break;
|
||||
} else if (!isB64AlphaChar) {
|
||||
dbgTrace(D_WAAP_PARSER_BINARY_FILE)
|
||||
<< "non-base64 char found (c=" << buf[i] << ",offset=" << i << "). return error";
|
||||
m_state = s_error;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if (keepParsing) { // keep "parsing" on next call to push()
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
dbgTrace(D_WAAP_PARSER_BINARY_FILE) << "parsing binary. Searching for tail: " << tail;
|
||||
c = strstr(buf + len - tail.size(), tail.c_str());
|
||||
dbgTrace(D_WAAP_PARSER_BINARY_FILE) << "search result: c=" << c;
|
||||
if (c) {
|
||||
m_state = s_end;
|
||||
} else { // keep "parsing" on next call to push()
|
||||
break;
|
||||
}
|
||||
}
|
||||
CP_FALL_THROUGH;
|
||||
case s_end:
|
||||
dbgTrace(D_WAAP_PARSER_BINARY_FILE) << "finished parsing";
|
||||
if (m_receiver.onKey("BinaryFileSkip", 14) != 0) {
|
||||
m_state = s_error;
|
||||
return 0;
|
||||
}
|
||||
if (m_receiver.onValue("", 0) != 0) {
|
||||
m_state = s_error;
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
case s_error:
|
||||
dbgTrace(D_WAAP_PARSER_BINARY_FILE) << "error detected";
|
||||
break;
|
||||
default:
|
||||
dbgTrace(D_WAAP_PARSER_BINARY_FILE) << "unknown state: " << m_state;
|
||||
m_state = s_error;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
|
||||
void ParserBinaryFile::finish()
|
||||
{
|
||||
push(NULL, 0);
|
||||
}
|
||||
|
||||
const string& ParserBinaryFile::name() const
|
||||
{
|
||||
return m_parserName;
|
||||
}
|
||||
|
||||
bool ParserBinaryFile::error() const
|
||||
{
|
||||
return m_state == s_error;
|
||||
}
|
||||
57
components/security_apps/waap/waap_clib/ParserBinaryFile.h
Normal file
57
components/security_apps/waap/waap_clib/ParserBinaryFile.h
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef __PARSER_BINARY_FILE_H__
|
||||
#define __PARSER_BINARY_FILE_H__
|
||||
|
||||
#include "ParserBase.h"
|
||||
#include "Waf2Util.h"
|
||||
#include <string.h>
|
||||
|
||||
#define MIN_HEADER_LOOKUP 16
|
||||
#define MAX_HEADER_LOOKUP 64
|
||||
#define MAX_TAIL_LOOKUP 5
|
||||
|
||||
class ParserBinaryFile : public ParserBase {
|
||||
public:
|
||||
static Waap::Util::BinaryFileType detectBinaryFileHeader(const std::string &buf);
|
||||
|
||||
ParserBinaryFile(
|
||||
IParserStreamReceiver &receiver,
|
||||
size_t parser_depth,
|
||||
bool is_base64,
|
||||
Waap::Util::BinaryFileType file_type);
|
||||
virtual ~ParserBinaryFile();
|
||||
virtual size_t push(const char *buf, size_t len);
|
||||
virtual void finish();
|
||||
virtual const std::string &name() const;
|
||||
virtual bool error() const;
|
||||
virtual size_t depth() { return 1; }
|
||||
|
||||
private:
|
||||
enum state {
|
||||
s_start,
|
||||
s_body,
|
||||
s_end,
|
||||
s_error
|
||||
};
|
||||
|
||||
IParserStreamReceiver &m_receiver;
|
||||
enum state m_state;
|
||||
static const std::string m_parserName;
|
||||
size_t m_parser_depth;
|
||||
bool m_is_base64;
|
||||
Waap::Util::BinaryFileType m_file_type;
|
||||
};
|
||||
|
||||
#endif // __PARSER_BINARY_FILE_H__
|
||||
@@ -304,6 +304,7 @@ ParserJson::ParserJson(
|
||||
m_key.push("json", 4);
|
||||
}
|
||||
|
||||
|
||||
ParserJson::~ParserJson()
|
||||
{
|
||||
// Cleanup JSON
|
||||
|
||||
@@ -23,6 +23,7 @@ unescaped_line(),
|
||||
param_name(),
|
||||
location(),
|
||||
score(0.0f),
|
||||
scoreNoFilter(0.0f),
|
||||
scoreArray(),
|
||||
keywordCombinations(),
|
||||
attack_types(),
|
||||
@@ -40,6 +41,7 @@ void Waf2ScanResult::clear()
|
||||
param_name.clear();
|
||||
location.clear();
|
||||
score = 0;
|
||||
scoreNoFilter = 0;
|
||||
scoreArray.clear();
|
||||
keywordCombinations.clear();
|
||||
attack_types.clear();
|
||||
|
||||
@@ -29,6 +29,7 @@ struct Waf2ScanResult {
|
||||
std::string param_name;
|
||||
std::string location;
|
||||
double score;
|
||||
double scoreNoFilter;
|
||||
std::vector<double> scoreArray;
|
||||
std::vector<std::string> keywordCombinations;
|
||||
std::set<std::string> attack_types;
|
||||
|
||||
@@ -727,7 +727,6 @@ void SerializeToLocalAndRemoteSyncBase::syncWorker()
|
||||
"sync notification for '" + m_assetId + "'",
|
||||
ReportIS::AudienceTeam::WAAP,
|
||||
syncNotification,
|
||||
false,
|
||||
MessageCategory::GENERIC,
|
||||
ReportIS::Tags::WAF,
|
||||
ReportIS::Notification::SYNC_LEARNING
|
||||
|
||||
@@ -39,7 +39,7 @@ namespace Conversions {
|
||||
return HIGH_THREAT;
|
||||
}
|
||||
|
||||
bool shouldDoWafBlocking(const IWaapConfig* pWaapConfig, ThreatLevel threatLevel)
|
||||
bool shouldDoWafBlocking(const IWaapConfig* const pWaapConfig, ThreatLevel threatLevel)
|
||||
{
|
||||
if (pWaapConfig == NULL)
|
||||
{
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
namespace Waap {
|
||||
namespace Conversions {
|
||||
ThreatLevel convertFinalScoreToThreatLevel(double finalScore);
|
||||
bool shouldDoWafBlocking(const IWaapConfig* pSitePolicy, ThreatLevel threatLevel);
|
||||
bool shouldDoWafBlocking(const IWaapConfig* const pSitePolicy, ThreatLevel threatLevel);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -39,12 +39,19 @@ public:
|
||||
m_op = to_lower_copy(m_op);
|
||||
m_isCidr = false;
|
||||
m_value = "";
|
||||
m_isValid = true;
|
||||
|
||||
if (m_op == "basic") {
|
||||
// If op == "BASIC" - read numeric value
|
||||
ar(cereal::make_nvp("tag", m_tag));
|
||||
m_tag = to_lower_copy(m_tag);
|
||||
|
||||
if (m_tag != "sourceip" && m_tag != "sourceidentifier" && m_tag != "url" && m_tag != "hostname" &&
|
||||
m_tag != "keyword" && m_tag != "paramname" && m_tag != "paramvalue" && m_tag != "paramlocation" &&
|
||||
m_tag != "responsebody" && m_tag != "headername" && m_tag != "headervalue" ) {
|
||||
m_isValid = false;
|
||||
dbgDebug(D_WAAP_OVERRIDE) << "Invalid override tag: " << m_tag;
|
||||
}
|
||||
// The name "value" here is misleading. The real meaning is "regex pattern string"
|
||||
ar(cereal::make_nvp("value", m_value));
|
||||
|
||||
@@ -73,12 +80,14 @@ public:
|
||||
m_operand2 = std::make_shared<Match>();
|
||||
ar(cereal::make_nvp("operand2", *m_operand2));
|
||||
m_isOverrideResponse = m_operand1->m_isOverrideResponse || m_operand2->m_isOverrideResponse;
|
||||
m_isValid = m_operand1->m_isValid && m_operand2->m_isValid;
|
||||
}
|
||||
else if (m_op == "not") {
|
||||
// If op is "NOT" get one operand
|
||||
m_operand1 = std::make_shared<Match>();
|
||||
ar(cereal::make_nvp("operand1", *m_operand1));
|
||||
m_isOverrideResponse = m_operand1->m_isOverrideResponse;
|
||||
m_isValid = m_operand1->m_isValid;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -120,6 +129,10 @@ public:
|
||||
return m_isOverrideResponse;
|
||||
}
|
||||
|
||||
bool isValidMatch() const{
|
||||
return m_isValid;
|
||||
}
|
||||
|
||||
private:
|
||||
std::string m_op;
|
||||
std::shared_ptr<Match> m_operand1;
|
||||
@@ -130,6 +143,7 @@ private:
|
||||
Waap::Util::CIDRData m_cidr;
|
||||
bool m_isCidr;
|
||||
bool m_isOverrideResponse;
|
||||
bool m_isValid;
|
||||
};
|
||||
|
||||
class Behavior
|
||||
@@ -189,6 +203,9 @@ private:
|
||||
|
||||
class Rule {
|
||||
public:
|
||||
|
||||
Rule(): m_match(), m_isChangingRequestData(false), isValid(true){}
|
||||
|
||||
bool operator==(const Rule &other) const;
|
||||
|
||||
template <typename _A>
|
||||
@@ -202,6 +219,11 @@ public:
|
||||
m_id.clear();
|
||||
}
|
||||
ar(cereal::make_nvp("parsedMatch", m_match));
|
||||
if (!m_match.isValidMatch()) {
|
||||
dbgDebug(D_WAAP_OVERRIDE) << "An override rule was not load";
|
||||
isValid = false;
|
||||
}
|
||||
|
||||
ar(cereal::make_nvp("parsedBehavior", m_behaviors));
|
||||
|
||||
m_isChangingRequestData = false;
|
||||
@@ -242,6 +264,7 @@ public:
|
||||
dbgTrace(D_WAAP_OVERRIDE) << "Rule not matched";
|
||||
}
|
||||
|
||||
|
||||
bool isChangingRequestData() const {
|
||||
return m_isChangingRequestData;
|
||||
}
|
||||
@@ -253,11 +276,16 @@ public:
|
||||
return m_id;
|
||||
}
|
||||
|
||||
bool isValidRule() const {
|
||||
return isValid;
|
||||
}
|
||||
|
||||
private:
|
||||
Match m_match;
|
||||
bool m_isChangingRequestData;
|
||||
std::vector<Behavior> m_behaviors;
|
||||
std::string m_id;
|
||||
bool isValid;
|
||||
};
|
||||
|
||||
class Policy {
|
||||
@@ -270,6 +298,10 @@ public:
|
||||
|
||||
for (std::vector<Waap::Override::Rule>::const_iterator it = rules.begin(); it != rules.end(); ++it) {
|
||||
const Waap::Override::Rule& rule = *it;
|
||||
if (!rule.isValidRule()) {
|
||||
dbgWarning(D_WAAP_OVERRIDE) << "rule is not valid";
|
||||
continue;
|
||||
}
|
||||
if (rule.isChangingRequestData())
|
||||
{
|
||||
m_RequestOverrides.push_back(rule);
|
||||
|
||||
@@ -145,6 +145,6 @@ bool WaapOverrideFunctor::operator()(const std::string& tag, const boost::regex&
|
||||
}
|
||||
|
||||
// Unknown tag: should not occur
|
||||
dbgWarning(D_WAAP) << "Invalid override tag: " << tag;
|
||||
dbgDebug(D_WAAP) << "Invalid override tag: " << tag;
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ USE_DEBUG_FLAG(D_OA_SCHEMA_UPDATER);
|
||||
// id generated by xml parser for an entity attribute
|
||||
const std::string Waap::Scanner::xmlEntityAttributeId = "08a80340-06d3-11ea-9f87-0242ac11000f";
|
||||
|
||||
double Waap::Scanner::getScoreData(Waf2ScanResult& res, const std::string &poolName)
|
||||
double Waap::Scanner::getScoreData(Waf2ScanResult& res, const std::string &poolName, bool applyLearning)
|
||||
{
|
||||
std::string source = m_transaction->getSourceIdentifier();
|
||||
|
||||
@@ -33,21 +33,24 @@ double Waap::Scanner::getScoreData(Waf2ScanResult& res, const std::string &poolN
|
||||
Waap::Keywords::KeywordsSet keywordsSet;
|
||||
Waap::Keywords::computeKeywordsSet(keywordsSet, res.keyword_matches, res.found_patterns);
|
||||
|
||||
std::string param_name = IndicatorsFiltersManager::generateKey(res.location, res.param_name, m_transaction);
|
||||
dbgTrace(D_WAAP_SCANNER) << "filter processing for parameter: " << param_name;
|
||||
m_transaction->getAssetState()->logIndicatorsInFilters(param_name, keywordsSet, m_transaction);
|
||||
if (applyLearning) {
|
||||
std::string param_name = IndicatorsFiltersManager::generateKey(res.location, res.param_name, m_transaction);
|
||||
dbgTrace(D_WAAP_SCANNER) << "filter processing for parameter: " << param_name <<
|
||||
", indicators count: " << keywordsSet.size();
|
||||
m_transaction->getAssetState()->logIndicatorsInFilters(param_name, keywordsSet, m_transaction);
|
||||
|
||||
m_transaction->getAssetState()->filterKeywords(param_name, keywordsSet, res.filtered_keywords);
|
||||
if (m_transaction->getSiteConfig() != nullptr)
|
||||
{
|
||||
auto waapParams = m_transaction->getSiteConfig()->get_WaapParametersPolicy();
|
||||
if (waapParams != nullptr && waapParams->getParamVal("filtersVerbose", "false") == "true") {
|
||||
m_transaction->getAssetState()->filterVerbose(param_name, res.filtered_keywords);
|
||||
m_transaction->getAssetState()->filterKeywords(param_name, keywordsSet, res.filtered_keywords);
|
||||
if (m_transaction->getSiteConfig() != nullptr)
|
||||
{
|
||||
auto waapParams = m_transaction->getSiteConfig()->get_WaapParametersPolicy();
|
||||
if (waapParams != nullptr && waapParams->getParamVal("filtersVerbose", "false") == "true") {
|
||||
m_transaction->getAssetState()->filterVerbose(param_name, res.filtered_keywords);
|
||||
}
|
||||
}
|
||||
m_transaction->getAssetState()->filterKeywordsByParameters(res.param_name, keywordsSet);
|
||||
|
||||
dbgTrace(D_WAAP_SCANNER) << "post filtering indicators count: " << keywordsSet.size();
|
||||
}
|
||||
m_transaction->getAssetState()->filterKeywordsByParameters(res.param_name, keywordsSet);
|
||||
|
||||
|
||||
// The keywords are only removed in production, they are still used while building scores
|
||||
if (!m_transaction->get_ignoreScore()) {
|
||||
m_transaction->getAssetState()->removeKeywords(keywordsSet);
|
||||
@@ -148,9 +151,16 @@ bool Waap::Scanner::suspiciousHit(Waf2ScanResult& res, DeepParser &dp,
|
||||
// Select scores pool by location
|
||||
std::string poolName = Waap::Scores::getScorePoolNameByLocation(location);
|
||||
|
||||
Waf2ScanResult nonFilterRes = res;
|
||||
res.scoreNoFilter = getScoreData(nonFilterRes, poolName, false);
|
||||
|
||||
double score = getScoreData(res, poolName);
|
||||
|
||||
dbgTrace(D_WAAP_SCANNER) << "score: " << score;
|
||||
// call shouldIgnoreOverride post score calculation and filtering to evaluate ignore override effectivness
|
||||
res.score = score;
|
||||
m_transaction->shouldIgnoreOverride(res);
|
||||
|
||||
dbgTrace(D_WAAP_SCANNER) << "score: " << score << " should ignore: " << ignoreOverride;
|
||||
// Add record about scores to the notes[] log (also reported in logs)
|
||||
if (score > 1.0f) {
|
||||
DetectionEvent(location, res.keyword_matches).notify();
|
||||
@@ -166,6 +176,7 @@ bool Waap::Scanner::suspiciousHit(Waf2ScanResult& res, DeepParser &dp,
|
||||
if (isKeyCspReport(key, res, dp) || ignoreOverride) {
|
||||
dbgTrace(D_WAAP_SCANNER) << "Ignoring parameter key/value " << res.param_name <<
|
||||
" due to ignore action in override";
|
||||
res.score = 0;
|
||||
m_bIgnoreOverride = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ namespace Waap {
|
||||
|
||||
static const std::string xmlEntityAttributeId;
|
||||
private:
|
||||
double getScoreData(Waf2ScanResult& res, const std::string &poolName);
|
||||
double getScoreData(Waf2ScanResult& res, const std::string &poolName, bool applyLearning = true);
|
||||
bool shouldIgnoreOverride(const Waf2ScanResult &res);
|
||||
bool isKeyCspReport(const std::string &key, Waf2ScanResult &res, DeepParser &dp);
|
||||
|
||||
|
||||
@@ -329,6 +329,7 @@ Waf2Transaction::Waf2Transaction() :
|
||||
is_schema_validation(false),
|
||||
m_waf2TransactionFlags()
|
||||
{
|
||||
m_overrideOriginalMaxScore[OVERRIDE_ACCEPT] = 0;
|
||||
I_TimeGet *timeGet = Singleton::Consume<I_TimeGet>::by<Waf2Transaction>();
|
||||
m_entry_time = chrono::duration_cast<chrono::milliseconds>(timeGet->getMonotonicTime());
|
||||
}
|
||||
@@ -1516,6 +1517,7 @@ Waf2Transaction::decideAfterHeaders()
|
||||
return finalizeDecision(sitePolicy, shouldBlock);
|
||||
}
|
||||
|
||||
|
||||
// Note: the only user of the transactionResult structure filled by this method is waap_automation.
|
||||
// TODO: Consider removing this parameter (and provide access to this information by other means)
|
||||
int
|
||||
@@ -1728,6 +1730,11 @@ void Waf2Transaction::appendCommonLogFields(LogGen& waapLog,
|
||||
std::copy(m_effectiveOverrideIds.begin(), m_effectiveOverrideIds.end(), vEffectiveOverrideIds.begin());
|
||||
waapLog.addToOrigin(LogField("effectiveExceptionIdList", vEffectiveOverrideIds));
|
||||
}
|
||||
if (!m_exceptionLearned.empty()) {
|
||||
std::vector<std::string> vLearningAffected(m_exceptionLearned.size());
|
||||
std::copy(m_exceptionLearned.begin(), m_exceptionLearned.end(), vLearningAffected.begin());
|
||||
waapLog.addToOrigin(LogField("redundantExceptionIdList", vLearningAffected));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1808,12 +1815,6 @@ Waf2Transaction::sendLog()
|
||||
return;
|
||||
}
|
||||
|
||||
dbgTrace(D_WAAP) << "force exception: " << m_overrideState.bForceException <<
|
||||
" force block: " << m_overrideState.bForceBlock <<
|
||||
" matched overrides count: " << m_matchedOverrideIds.size() <<
|
||||
" effective overrides count: " << m_effectiveOverrideIds.size();
|
||||
|
||||
|
||||
bool shouldBlock = false;
|
||||
if (m_overrideState.bForceBlock) {
|
||||
// If override forces "reject" decision, mention it in the "override" log field.
|
||||
@@ -2090,7 +2091,30 @@ Waf2Transaction::decideAutonomousSecurity(
|
||||
transactionResult.threatLevel = threat;
|
||||
}
|
||||
|
||||
dbgTrace(D_WAAP_OVERRIDE) << "override ids count: " << m_matchedOverrideIds.size();
|
||||
// Apply overrides
|
||||
for (auto it = m_overridePostFilterMaxScore.begin(); it != m_overridePostFilterMaxScore.end(); it++) {
|
||||
const string id = it->first;
|
||||
if (m_overrideState.forceBlockIds.find(id) != m_overrideState.forceBlockIds.end()) {
|
||||
// blocked effectivness is calculates later from the force block exception ids list
|
||||
continue;
|
||||
}
|
||||
ThreatLevel threat = Waap::Conversions::convertFinalScoreToThreatLevel(it->second);
|
||||
bool shouldBlock = Waap::Conversions::shouldDoWafBlocking(m_siteConfig, threat);
|
||||
dbgTrace(D_WAAP_OVERRIDE) << "checking effectivness of override: " << id << ", should have blocked: " << shouldBlock
|
||||
<< ", scores: " << m_overridePostFilterMaxScore[id] << ", " << m_overrideOriginalMaxScore[id];
|
||||
if (shouldBlock) {
|
||||
m_effectiveOverrideIds.insert(id);
|
||||
} else {
|
||||
ThreatLevel threatNoFilter = Waap::Conversions::convertFinalScoreToThreatLevel(
|
||||
m_overrideOriginalMaxScore[id]
|
||||
);
|
||||
if (Waap::Conversions::shouldDoWafBlocking(m_siteConfig, threatNoFilter)) {
|
||||
m_exceptionLearned.insert(id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (m_overrideState.bForceBlock) {
|
||||
dbgTrace(D_WAAP) << "decideAutonomousSecurity(): decision was " << decision->shouldBlock() <<
|
||||
" and override forces REJECT ...";
|
||||
@@ -2104,25 +2128,25 @@ Waf2Transaction::decideAutonomousSecurity(
|
||||
}
|
||||
}
|
||||
else if (m_overrideState.bForceException) {
|
||||
dbgTrace(D_WAAP) << "decideAutonomousSecurity(): decision was " << decision->shouldBlock() <<
|
||||
dbgTrace(D_WAAP) << "de cideAutonomousSecurity(): decision was " << decision->shouldBlock() <<
|
||||
" and override forces ALLOW ...";
|
||||
if (m_scanResult) {
|
||||
// on accept exception the decision is not set and needs to be calculated to determine effectivness
|
||||
ThreatLevel threat = Waap::Conversions::convertFinalScoreToThreatLevel(m_scanResult->score);
|
||||
bool shouldBlock = Waap::Conversions::shouldDoWafBlocking(&sitePolicy, threat);
|
||||
if (shouldBlock) {
|
||||
m_effectiveOverrideIds.insert(
|
||||
m_overrideState.forceExceptionIds.begin(), m_overrideState.forceExceptionIds.end()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
decision->setBlock(false);
|
||||
if (!m_overrideState.bIgnoreLog)
|
||||
{
|
||||
decision->setOverridesLog(true);
|
||||
}
|
||||
} else if (!m_matchedOverrideIds.empty()) {
|
||||
if (!m_overrideState.bIgnoreLog)
|
||||
{
|
||||
decision->setOverridesLog(true);
|
||||
}
|
||||
}
|
||||
dbgTrace(D_WAAP_OVERRIDE) << "force exception: " << m_overrideState.bForceException <<
|
||||
" force block: " << m_overrideState.bForceBlock <<
|
||||
" matched overrides count: " << m_matchedOverrideIds.size() <<
|
||||
" effective overrides count: " << m_effectiveOverrideIds.size() <<
|
||||
" learned overrides count: " << m_exceptionLearned.size();
|
||||
|
||||
|
||||
|
||||
bool log_all = false;
|
||||
@@ -2261,7 +2285,7 @@ bool
|
||||
Waf2Transaction::shouldIgnoreOverride(const Waf2ScanResult &res) {
|
||||
auto exceptions = getConfiguration<ParameterException>("rulebase", "exception");
|
||||
if (!exceptions.ok()) {
|
||||
dbgTrace(D_WAAP_OVERRIDE) << "matching exceptions error:" << exceptions.getErr();
|
||||
dbgTrace(D_WAAP_OVERRIDE) << "matching exceptions error: " << exceptions.getErr();
|
||||
return false;
|
||||
}
|
||||
dbgTrace(D_WAAP_OVERRIDE) << "matching exceptions";
|
||||
@@ -2304,6 +2328,24 @@ Waf2Transaction::shouldIgnoreOverride(const Waf2ScanResult &res) {
|
||||
auto behaviors = exceptions.unpack().getBehavior(exceptions_dict,
|
||||
getAssetState()->m_filtersMngr->getMatchedOverrideKeywords());
|
||||
for (const auto &behavior : behaviors) {
|
||||
if (!res.filtered_keywords.empty() || res.score > 0) {
|
||||
dbgTrace(D_WAAP_OVERRIDE) << "matched exceptions for " << res.param_name << " with filtered indicators";
|
||||
std::string overrideId = behavior.getId();
|
||||
if (m_overrideOriginalMaxScore.find(overrideId) == m_overrideOriginalMaxScore.end()){
|
||||
m_overrideOriginalMaxScore[overrideId] = res.scoreNoFilter;
|
||||
m_overridePostFilterMaxScore[overrideId] = res.score;
|
||||
} else {
|
||||
if (res.scoreNoFilter > m_overrideOriginalMaxScore[overrideId]) {
|
||||
m_overrideOriginalMaxScore[overrideId] = res.scoreNoFilter;
|
||||
}
|
||||
if (res.score > m_overridePostFilterMaxScore[overrideId]) {
|
||||
m_overridePostFilterMaxScore[overrideId] = res.score;
|
||||
}
|
||||
}
|
||||
if (res.scoreNoFilter > m_overrideOriginalMaxScore[OVERRIDE_ACCEPT]) {
|
||||
m_overrideOriginalMaxScore[OVERRIDE_ACCEPT] = res.scoreNoFilter;
|
||||
}
|
||||
}
|
||||
if (behavior == action_ignore)
|
||||
{
|
||||
dbgTrace(D_WAAP_OVERRIDE) << "matched exceptions for " << res.param_name << " should ignore.";
|
||||
@@ -2311,12 +2353,6 @@ Waf2Transaction::shouldIgnoreOverride(const Waf2ScanResult &res) {
|
||||
if (!overrideId.empty()) {
|
||||
m_matchedOverrideIds.insert(overrideId);
|
||||
}
|
||||
if (!res.keyword_matches.empty() || res.unescaped_line == Waap::Scanner::xmlEntityAttributeId)
|
||||
{
|
||||
if (!overrideId.empty()) {
|
||||
m_effectiveOverrideIds.insert(overrideId);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -293,6 +293,9 @@ private:
|
||||
// Matched override IDs
|
||||
std::set<std::string> m_matchedOverrideIds;
|
||||
std::set<std::string> m_effectiveOverrideIds;
|
||||
std::set<std::string> m_exceptionLearned;
|
||||
std::map<std::string, double> m_overrideOriginalMaxScore;
|
||||
std::map<std::string, double> m_overridePostFilterMaxScore;
|
||||
|
||||
//csrf state
|
||||
Waap::CSRF::State m_csrfState;
|
||||
|
||||
@@ -459,9 +459,15 @@ Waf2Transaction::getUserLimitVerdict()
|
||||
}
|
||||
else if (mode == AttackMitigationMode::PREVENT) {
|
||||
decision->setLog(true);
|
||||
decision->setBlock(true);
|
||||
dbgInfo(D_WAAP_ULIMITS) << msg << "BLOCK" << reason;
|
||||
verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP;
|
||||
if (!m_overrideState.bForceException) {
|
||||
decision->setBlock(true);
|
||||
dbgInfo(D_WAAP_ULIMITS) << msg << "BLOCK" << reason;
|
||||
verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP;
|
||||
} else {
|
||||
decision->setBlock(true);
|
||||
dbgInfo(D_WAAP_ULIMITS) << msg << "Override Accept" << reason;
|
||||
verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT;
|
||||
}
|
||||
}
|
||||
|
||||
return verdict;
|
||||
@@ -567,12 +573,11 @@ Waap::Override::State Waf2Transaction::getOverrideState(IWaapConfig* sitePolicy)
|
||||
|
||||
extractEnvSourceIdentifier();
|
||||
|
||||
Waap::Override::State overrideStateResponse;
|
||||
if (overridePolicy) { // later we will run response overrides
|
||||
overrideStateResponse.applyOverride(*overridePolicy, WaapOverrideFunctor(*this), m_matchedOverrideIds, false);
|
||||
m_overrideState.applyOverride(*overridePolicy, WaapOverrideFunctor(*this), m_matchedOverrideIds, false);
|
||||
}
|
||||
m_isHeaderOverrideScanRequired = false;
|
||||
return overrideStateResponse;
|
||||
return m_overrideState;
|
||||
}
|
||||
|
||||
Waf2TransactionFlags &Waf2Transaction::getTransactionFlags()
|
||||
|
||||
@@ -34,6 +34,7 @@
|
||||
#include "generic_rulebase/rulebase_config.h"
|
||||
#include "user_identifiers_config.h"
|
||||
#include "Waf2Regex.h"
|
||||
#include "ParserBinaryFile.h"
|
||||
|
||||
using boost::algorithm::to_lower_copy;
|
||||
using namespace std;
|
||||
@@ -960,11 +961,12 @@ string filterUTF7(const string& text) {
|
||||
// 4. percent of non-printable characters (!isprint())
|
||||
// in decoded data is less than 10% (statistical garbage detection).
|
||||
// Returns false above checks fail.
|
||||
bool decodeBase64Chunk(
|
||||
base64_decode_status decodeBase64Chunk(
|
||||
const string& value,
|
||||
string::const_iterator it,
|
||||
string::const_iterator end,
|
||||
string& decoded)
|
||||
string& decoded,
|
||||
bool clear_on_error)
|
||||
{
|
||||
decoded.clear();
|
||||
uint32_t acc = 0;
|
||||
@@ -974,13 +976,14 @@ bool decodeBase64Chunk(
|
||||
uint32_t spacer_count = 0;
|
||||
|
||||
dbgTrace(D_WAAP) << "decodeBase64Chunk: value='" << value << "' match='" << string(it, end) << "'";
|
||||
string::const_iterator begin = it;
|
||||
|
||||
// The encoded data length (without the "base64," prefix) should be exactly divisible by 4
|
||||
// len % 4 is not 0 i.e. this is not base64
|
||||
if ((end - it) % 4 != 0) {
|
||||
dbgTrace(D_WAAP_BASE64) <<
|
||||
"b64DecodeChunk: (leave as-is) because encoded data length should be exactly divisible by 4.";
|
||||
return false;
|
||||
return B64_DECODE_INVALID;
|
||||
}
|
||||
|
||||
while (it != end) {
|
||||
@@ -992,7 +995,7 @@ bool decodeBase64Chunk(
|
||||
dbgTrace(D_WAAP_BASE64) <<
|
||||
"decodeBase64Chunk: (leave as-is) because terminator characters must all be '='," <<
|
||||
"until end of match.";
|
||||
return false;
|
||||
return B64_DECODE_INVALID;
|
||||
}
|
||||
|
||||
// We should see 0, 1 or 2 (no more) terminator characters
|
||||
@@ -1000,7 +1003,7 @@ bool decodeBase64Chunk(
|
||||
|
||||
if (terminatorCharsSeen > 2) {
|
||||
dbgTrace(D_WAAP_BASE64) << "decodeBase64Chunk: (leave as-is) because terminatorCharsSeen > 2";
|
||||
return false;
|
||||
return B64_DECODE_INVALID;
|
||||
}
|
||||
|
||||
// allow for more terminator characters
|
||||
@@ -1033,8 +1036,8 @@ bool decodeBase64Chunk(
|
||||
}
|
||||
else {
|
||||
dbgTrace(D_WAAP_BASE64) << "decodeBase64Chunk: (leave as-is) because of non-base64 character ('" <<
|
||||
c << "', ASCII " << (unsigned int)c << ")";
|
||||
return false; // non-base64 character
|
||||
c << "', ASCII " << (unsigned int)c << ", offset " << (it-begin) << ")";
|
||||
return B64_DECODE_INVALID; // non-base64 character
|
||||
}
|
||||
|
||||
acc = (acc << 6) | val;
|
||||
@@ -1087,20 +1090,23 @@ bool decodeBase64Chunk(
|
||||
}
|
||||
else {
|
||||
dbgTrace(D_WAAP_BASE64) << "decodeBase64Chunk: (delete) because decoded.size=" << decoded.size() <<
|
||||
", nonPrintableCharsCount=" << nonPrintableCharsCount;
|
||||
decoded.clear();
|
||||
", nonPrintableCharsCount=" << nonPrintableCharsCount <<
|
||||
", clear_on_error=" << clear_on_error;
|
||||
if (clear_on_error) decoded.clear();
|
||||
return B64_DECODE_INCOMPLETE;
|
||||
}
|
||||
dbgTrace(D_WAAP_BASE64) << "returning true: successfully decoded."
|
||||
<< " Returns decoded data in \"decoded\" parameter";
|
||||
return true; // successfully decoded. Returns decoded data in "decoded" parameter
|
||||
return B64_DECODE_OK; // successfully decoded. Returns decoded data in "decoded" parameter
|
||||
}
|
||||
|
||||
// If decoded size is too small - leave the encoded value (return false)
|
||||
decoded.clear(); // discard partial data
|
||||
dbgTrace(D_WAAP_BASE64) << "decodeBase64Chunk: (leave as-is) because decoded too small. decoded.size=" <<
|
||||
decoded.size() <<
|
||||
", nonPrintableCharsCount=" << nonPrintableCharsCount;
|
||||
return false;
|
||||
", nonPrintableCharsCount=" << nonPrintableCharsCount <<
|
||||
", clear_on_error=" << clear_on_error;
|
||||
return B64_DECODE_INVALID;
|
||||
}
|
||||
|
||||
// Attempts to detect and validate base64 chunk.
|
||||
@@ -1144,7 +1150,7 @@ b64DecodeChunk(
|
||||
}
|
||||
}
|
||||
|
||||
return decodeBase64Chunk(value, it, end, decoded);
|
||||
return decodeBase64Chunk(value, it, end, decoded) != B64_DECODE_INVALID;
|
||||
}
|
||||
|
||||
vector<string> split(const string& s, char delim) {
|
||||
@@ -1281,6 +1287,8 @@ bool detectBase64Chunk(
|
||||
{
|
||||
dbgTrace(D_WAAP_BASE64) << " ===detectBase64Chunk===: starting with = '" << s << "'";
|
||||
string::const_iterator it = s.begin();
|
||||
start = s.end();
|
||||
end = s.end();
|
||||
|
||||
//detect "base64," prefix to start search after this
|
||||
for (; it != s.end()-7; it++) {
|
||||
@@ -1309,33 +1317,62 @@ bool detectBase64Chunk(
|
||||
return false;
|
||||
}
|
||||
|
||||
base64_decode_status
|
||||
processDecodedChunk(
|
||||
const string &s,
|
||||
string::const_iterator start,
|
||||
string::const_iterator end,
|
||||
string &value,
|
||||
BinaryFileType &binaryFileType
|
||||
)
|
||||
{
|
||||
base64_decode_status retVal = decodeBase64Chunk(s, start, end, value, false);
|
||||
dbgTrace(D_WAAP_BASE64) << " ===isBase64PrefixProcessingOK===: after decode. retVal=" << retVal
|
||||
<< " value.size()=" << value.size();
|
||||
if (retVal != B64_DECODE_INVALID && !value.empty()) {
|
||||
binaryFileType = ParserBinaryFile::detectBinaryFileHeader(value);
|
||||
if (retVal == B64_DECODE_INCOMPLETE) value.clear();
|
||||
}
|
||||
return retVal;
|
||||
}
|
||||
|
||||
bool isBase64PrefixProcessingOK (
|
||||
const string &s,
|
||||
string &value)
|
||||
string &value,
|
||||
BinaryFileType &binaryFileType)
|
||||
{
|
||||
string::const_iterator start, end;
|
||||
bool retVal = false;
|
||||
base64_decode_status retVal = B64_DECODE_INVALID;
|
||||
dbgTrace(D_WAAP_BASE64) << " ===isBase64PrefixProcessingOK===: before regex for prefix for string '" << s << "'";
|
||||
if (base64_prefix_detector_re.hasMatch(s)) {
|
||||
dbgTrace(D_WAAP_BASE64) << " ===isBase64PrefixProcessingOK===: prefix detected on string '" << s << "'";
|
||||
if (detectBase64Chunk(s, start, end)) {
|
||||
dbgTrace(D_WAAP_BASE64) << " ===isBase64PrefixProcessingOK===: chunk detected";
|
||||
if ((start != s.end()) && (end == s.end())) {
|
||||
retVal = decodeBase64Chunk(s, start, end, value);
|
||||
retVal = processDecodedChunk(s, start, end, value, binaryFileType);
|
||||
}
|
||||
} else if (start != s.end()) {
|
||||
dbgTrace(D_WAAP_BASE64) << " ===isBase64PrefixProcessingOK===: chunk not detected."
|
||||
" searching for known file header only";
|
||||
end = (start + MAX_HEADER_LOOKUP < s.end()) ? start + MAX_HEADER_LOOKUP : s.end();
|
||||
processDecodedChunk(s, start, end, value, binaryFileType);
|
||||
value.clear();
|
||||
return binaryFileType != Waap::Util::BinaryFileType::FILE_TYPE_NONE;
|
||||
}
|
||||
}
|
||||
return retVal;
|
||||
return retVal != B64_DECODE_INVALID;
|
||||
}
|
||||
|
||||
base64_variants b64Test (
|
||||
const string &s,
|
||||
string &key,
|
||||
string &value)
|
||||
string &value,
|
||||
BinaryFileType &binaryFileType)
|
||||
{
|
||||
|
||||
key.clear();
|
||||
bool retVal;
|
||||
binaryFileType = Waap::Util::BinaryFileType::FILE_TYPE_NONE;
|
||||
|
||||
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: string = " << s
|
||||
<< " key = " << key << " value = " << value;
|
||||
@@ -1397,7 +1434,7 @@ base64_variants b64Test (
|
||||
}
|
||||
|
||||
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: after processing key = '" << key << "'";
|
||||
bool found = isBase64PrefixProcessingOK(s, prefix_decoded_val);
|
||||
bool found = isBase64PrefixProcessingOK(s, prefix_decoded_val, binaryFileType);
|
||||
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: after prefix test found = "
|
||||
<< found << " new value is '" << prefix_decoded_val << "' - done";
|
||||
if (found) {
|
||||
@@ -1421,7 +1458,7 @@ base64_variants b64Test (
|
||||
if ((s.end() - start) % 4 != 0) {
|
||||
key.clear();
|
||||
value.clear();
|
||||
return CONTINUE_AS_IS;;
|
||||
return CONTINUE_AS_IS;
|
||||
}
|
||||
}
|
||||
else {
|
||||
@@ -1443,7 +1480,7 @@ base64_variants b64Test (
|
||||
key.pop_back();
|
||||
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: FINAL key = '" << key << "'";
|
||||
}
|
||||
retVal = decodeBase64Chunk(s, start, s.end(), value);
|
||||
retVal = decodeBase64Chunk(s, start, s.end(), value) != B64_DECODE_INVALID;
|
||||
|
||||
dbgTrace(D_WAAP_BASE64) << " ===b64Test===: After testing and conversion value = "
|
||||
<< value << "retVal = '" << retVal <<"'";
|
||||
|
||||
@@ -34,6 +34,7 @@
|
||||
|
||||
enum base64_variants {SINGLE_B64_CHUNK_CONVERT, KEY_VALUE_B64_PAIR, CONTINUE_AS_IS};
|
||||
enum base64_stage {BEFORE_EQUAL, EQUAL, DONE, MISDETECT};
|
||||
enum base64_decode_status {B64_DECODE_INVALID, B64_DECODE_OK, B64_DECODE_INCOMPLETE};
|
||||
|
||||
// This is portable version of stricmp(), which is non-standard function (not even in C).
|
||||
// Contrary to stricmp(), for a slight optimization, s2 is ASSUMED to be already in lowercase.
|
||||
@@ -858,12 +859,13 @@ void unescapeUnicode(std::string &text);
|
||||
// Try to find and decode UTF7 chunks
|
||||
std::string filterUTF7(const std::string &text);
|
||||
|
||||
bool
|
||||
base64_decode_status
|
||||
decodeBase64Chunk(
|
||||
const std::string &value,
|
||||
std::string::const_iterator it,
|
||||
std::string::const_iterator end,
|
||||
std::string &decoded);
|
||||
std::string &decoded,
|
||||
bool clear_on_error = true);
|
||||
|
||||
bool
|
||||
b64DecodeChunk(
|
||||
@@ -889,6 +891,13 @@ namespace Util {
|
||||
std::string &key,
|
||||
std::string &value);
|
||||
|
||||
enum BinaryFileType {
|
||||
FILE_TYPE_NONE,
|
||||
FILE_TYPE_PNG,
|
||||
FILE_TYPE_JPEG,
|
||||
FILE_TYPE_PDF
|
||||
};
|
||||
|
||||
void b64Decode(
|
||||
const std::string &s,
|
||||
RegexSubCallback_f cb,
|
||||
@@ -899,7 +908,8 @@ namespace Util {
|
||||
base64_variants b64Test (
|
||||
const std::string &s,
|
||||
std::string &key,
|
||||
std::string &value);
|
||||
std::string &value,
|
||||
BinaryFileType &binaryFileType);
|
||||
|
||||
// The original stdlib implementation of isalpha() supports locale settings which we do not really need.
|
||||
// It is also proven to contribute to slow performance in some of the algorithms using it.
|
||||
|
||||
@@ -14,7 +14,8 @@ enum SchemaKeyType
|
||||
StartObjectKeyName,
|
||||
StartArrayKeyName,
|
||||
EndArrayKeyName,
|
||||
OtherKey
|
||||
OtherKey,
|
||||
JsonFailure
|
||||
};
|
||||
|
||||
#endif // __OA_SCHEMA_UPDATER_KEYS_H__
|
||||
|
||||
@@ -42,6 +42,7 @@ using namespace std;
|
||||
USE_DEBUG_FLAG(D_WAAP);
|
||||
USE_DEBUG_FLAG(D_WAAP_ULIMITS);
|
||||
USE_DEBUG_FLAG(D_OA_SCHEMA_UPDATER);
|
||||
USE_DEBUG_FLAG(D_NGINX_EVENTS);
|
||||
|
||||
WaapComponent::Impl::Impl() :
|
||||
pending_response(ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT),
|
||||
@@ -124,7 +125,7 @@ WaapComponent::Impl::getListenerName() const
|
||||
EventVerdict
|
||||
WaapComponent::Impl::respond(const NewHttpTransactionEvent &event)
|
||||
{
|
||||
dbgTrace(D_WAAP) << " * \e[32mNGEN_EVENT: NewTransactionEvent\e[0m";
|
||||
dbgTrace(D_NGINX_EVENTS) << " * \e[32mNGEN_EVENT: NewTransactionEvent\e[0m";
|
||||
|
||||
if (waapStateTable->hasState<Waf2Transaction>()) {
|
||||
dbgWarning(D_WAAP) << " * \e[31 -- NewTransactionEvent called twice on same entry \e[0m";
|
||||
@@ -202,7 +203,7 @@ WaapComponent::Impl::respond(const HttpRequestHeaderEvent &event)
|
||||
auto &header_name = event.getKey();
|
||||
auto &header_value = event.getValue();
|
||||
|
||||
dbgTrace(D_WAAP)
|
||||
dbgTrace(D_NGINX_EVENTS)
|
||||
<< " * \e[32mNGEN_EVENT: HttpHeaderRequest event: "
|
||||
<< string(header_name)
|
||||
<< ": "
|
||||
@@ -210,7 +211,7 @@ WaapComponent::Impl::respond(const HttpRequestHeaderEvent &event)
|
||||
<< "\e[0m";
|
||||
|
||||
if (!waapStateTable->hasState<Waf2Transaction>()) {
|
||||
dbgWarning(D_WAAP)
|
||||
dbgWarning(D_NGINX_EVENTS)
|
||||
<< " * \e[31mNGEN_EVENT: http_header - "
|
||||
<< "failed to get waf2 transaction, state not exist\e[0m";
|
||||
|
||||
@@ -257,10 +258,10 @@ WaapComponent::Impl::respond(const HttpRequestHeaderEvent &event)
|
||||
EventVerdict
|
||||
WaapComponent::Impl::respond(const HttpRequestBodyEvent &event)
|
||||
{
|
||||
dbgTrace(D_WAAP) << " * \e[32mNGEN_EVENT: HttpBodyRequest data buffer event\e[0m";
|
||||
dbgTrace(D_NGINX_EVENTS) << " * \e[32mNGEN_EVENT: HttpBodyRequest data buffer event\e[0m";
|
||||
|
||||
if (!waapStateTable->hasState<Waf2Transaction>()) {
|
||||
dbgWarning(D_WAAP) <<
|
||||
dbgWarning(D_NGINX_EVENTS) <<
|
||||
" * \e[31mNGEN_EVENT: data buffer - failed to get waf2 transaction, state not exist\e[0m";
|
||||
return drop_response;
|
||||
}
|
||||
@@ -295,10 +296,10 @@ WaapComponent::Impl::respond(const HttpRequestBodyEvent &event)
|
||||
EventVerdict
|
||||
WaapComponent::Impl::respond(const EndRequestEvent &)
|
||||
{
|
||||
dbgTrace(D_WAAP) << " * \e[32mNGEN_EVENT: endRequest event\e[0m";
|
||||
dbgTrace(D_NGINX_EVENTS) << " * \e[32mNGEN_EVENT: endRequest event\e[0m";
|
||||
|
||||
if (!waapStateTable->hasState<Waf2Transaction>()) {
|
||||
dbgWarning(D_WAAP)
|
||||
dbgWarning(D_NGINX_EVENTS)
|
||||
<< "* \e[31mNGEN_EVENT: endRequest - failed to get waf2 transaction, state does not exist\e[0m";
|
||||
return drop_response;
|
||||
}
|
||||
@@ -333,13 +334,13 @@ WaapComponent::Impl::respond(const EndRequestEvent &)
|
||||
EventVerdict
|
||||
WaapComponent::Impl::respond(const ResponseCodeEvent &event)
|
||||
{
|
||||
dbgTrace(D_WAAP)
|
||||
dbgTrace(D_NGINX_EVENTS)
|
||||
<< " * \e[32mNGEN_EVENT: ResponseCodeTransactionEvent event code = "
|
||||
<< event.getResponseCode()
|
||||
<< "\e[0m";
|
||||
|
||||
if (!waapStateTable->hasState<Waf2Transaction>()) {
|
||||
dbgWarning(D_WAAP)
|
||||
dbgWarning(D_NGINX_EVENTS)
|
||||
<< " * \e[31mNGEN_EVENT: ResponseCodeTransactionEvent - failed to get waf2 transaction, "
|
||||
<< "state does not exist\e[0m";
|
||||
return drop_response;
|
||||
@@ -385,7 +386,7 @@ WaapComponent::Impl::respond(const HttpResponseHeaderEvent &event)
|
||||
auto &header_name = event.getKey();
|
||||
auto &header_value = event.getValue();
|
||||
|
||||
dbgTrace(D_WAAP)
|
||||
dbgTrace(D_NGINX_EVENTS)
|
||||
<< " * \e[32mNGEN_EVENT: HttpHeaderResponse event: "
|
||||
<< string(header_name)
|
||||
<< ": "
|
||||
@@ -393,7 +394,7 @@ WaapComponent::Impl::respond(const HttpResponseHeaderEvent &event)
|
||||
<< "\e[0m";
|
||||
|
||||
if (!waapStateTable->hasState<Waf2Transaction>()) {
|
||||
dbgWarning(D_WAAP)
|
||||
dbgWarning(D_NGINX_EVENTS)
|
||||
<< " * \e[31mNGEN_EVENT: HttpHeaderResponse - "
|
||||
<< "failed to get waf2 transaction, state does not exist\e[0m";
|
||||
return drop_response;
|
||||
@@ -491,10 +492,10 @@ WaapComponent::Impl::respond(const HttpResponseHeaderEvent &event)
|
||||
EventVerdict
|
||||
WaapComponent::Impl::respond(const HttpResponseBodyEvent &event)
|
||||
{
|
||||
dbgTrace(D_WAAP) << " * \e[32mNGEN_EVENT: HttpBodyResponse data buffer event\e[0m";
|
||||
dbgTrace(D_NGINX_EVENTS) << " * \e[32mNGEN_EVENT: HttpBodyResponse data buffer event\e[0m";
|
||||
|
||||
if (!waapStateTable->hasState<Waf2Transaction>()) {
|
||||
dbgWarning(D_WAAP) <<
|
||||
dbgWarning(D_NGINX_EVENTS) <<
|
||||
" * \e[31mNGEN_EVENT: HttpBodyResponse - failed to get waf2 transaction, state does not exist\e[0m";
|
||||
return drop_response;
|
||||
}
|
||||
@@ -591,7 +592,7 @@ EventVerdict
|
||||
WaapComponent::Impl::respond(const EndTransactionEvent &)
|
||||
{
|
||||
if (!waapStateTable->hasState<Waf2Transaction>()) {
|
||||
dbgWarning(D_WAAP) <<
|
||||
dbgWarning(D_NGINX_EVENTS) <<
|
||||
" * \e[31mNGEN_EVENT: endTransaction - failed to get waf2 transaction, state does not exist\e[0m";
|
||||
return EventVerdict(drop_response);
|
||||
}
|
||||
|
||||
@@ -52,6 +52,7 @@ const string HttpTransactionData::req_headers = "transaction_request_head
|
||||
const string HttpTransactionData::req_body = "transaction_request_body";
|
||||
const string HttpTransactionData::source_identifier = "sourceIdentifiers";
|
||||
const string HttpTransactionData::proxy_ip_ctx = "proxy_ip";
|
||||
const string HttpTransactionData::xff_vals_ctx = "xff_vals";
|
||||
|
||||
const CompressionType HttpTransactionData::default_response_content_encoding = CompressionType::NO_COMPRESSION;
|
||||
|
||||
|
||||
@@ -175,7 +175,7 @@ copyFile(const string &src, const string &dest, bool overide_if_exists, mode_t p
|
||||
struct stat stat_buf;
|
||||
int source_fd = open(src.c_str(), O_RDONLY);
|
||||
fstat(source_fd, &stat_buf);
|
||||
int dest_fd = open(dest.c_str(), O_WRONLY | O_CREAT, permission);
|
||||
int dest_fd = open(dest.c_str(), O_WRONLY | O_CREAT | O_TRUNC, permission);
|
||||
int bytes_copied = 1;
|
||||
while (bytes_copied > 0) {
|
||||
static const int buf_size = 4096*1000;
|
||||
|
||||
@@ -37,8 +37,11 @@ TEST_F(AgentCoreUtilUT, filesTest)
|
||||
EXPECT_FALSE(NGEN::Filesystem::exists("/i/am/not/a/real/path"));
|
||||
|
||||
const vector<string> lines{"i am a line in the text file", "i am iron man"};
|
||||
const vector<string> lines_b{"i am a line 2 in the text file", "i am iron man 2", "hello again"};
|
||||
CPTestTempfile test_file(lines);
|
||||
CPTestTempfile test_file_b(lines_b);
|
||||
ASSERT_TRUE(NGEN::Filesystem::exists(test_file.fname));
|
||||
ASSERT_TRUE(NGEN::Filesystem::exists(test_file_b.fname));
|
||||
|
||||
string output_orig = test_file.readFile();
|
||||
string new_path = test_file.fname + ".new";
|
||||
@@ -46,6 +49,7 @@ TEST_F(AgentCoreUtilUT, filesTest)
|
||||
ASSERT_TRUE(NGEN::Filesystem::exists(new_path));
|
||||
ASSERT_FALSE(NGEN::Filesystem::copyFile(test_file.fname, new_path, false));
|
||||
ASSERT_TRUE(NGEN::Filesystem::copyFile(test_file.fname, new_path, true));
|
||||
ASSERT_TRUE(NGEN::Filesystem::copyFile(test_file.fname, test_file_b.fname, true));
|
||||
string output_new;
|
||||
{
|
||||
ifstream new_file_stream(new_path);
|
||||
@@ -55,11 +59,20 @@ TEST_F(AgentCoreUtilUT, filesTest)
|
||||
output_new = buffer.str();
|
||||
}
|
||||
|
||||
string output_test_b;
|
||||
ifstream new_file_stream(test_file_b.fname);
|
||||
ASSERT_TRUE(new_file_stream.good());
|
||||
stringstream buffer;
|
||||
buffer << new_file_stream.rdbuf();
|
||||
output_test_b = buffer.str();
|
||||
|
||||
EXPECT_EQ(output_orig, output_new);
|
||||
EXPECT_EQ(output_orig, output_test_b);
|
||||
EXPECT_THAT(output_new, HasSubstr("i am a line in the text file"));
|
||||
EXPECT_THAT(output_new, HasSubstr("i am iron man"));
|
||||
EXPECT_TRUE(NGEN::Filesystem::deleteFile(test_file.fname));
|
||||
EXPECT_TRUE(NGEN::Filesystem::deleteFile(new_path));
|
||||
EXPECT_TRUE(NGEN::Filesystem::deleteFile(test_file_b.fname));
|
||||
EXPECT_FALSE(NGEN::Filesystem::exists(test_file.fname));
|
||||
EXPECT_FALSE(NGEN::Filesystem::exists(new_path));
|
||||
}
|
||||
|
||||
@@ -348,14 +348,12 @@ ConfigComponent::Impl::init()
|
||||
if (!Singleton::exists<I_MainLoop>()) return;
|
||||
auto mainloop = Singleton::Consume<I_MainLoop>::by<ConfigComponent>();
|
||||
|
||||
if (executable_name != "cp-nano-orchestration") {
|
||||
mainloop->addOneTimeRoutine(
|
||||
I_MainLoop::RoutineType::System,
|
||||
[this] () { periodicRegistrationRefresh(); },
|
||||
"Configuration update registration",
|
||||
false
|
||||
);
|
||||
}
|
||||
mainloop->addOneTimeRoutine(
|
||||
I_MainLoop::RoutineType::System,
|
||||
[this] () { periodicRegistrationRefresh(); },
|
||||
"Configuration update registration",
|
||||
false
|
||||
);
|
||||
}
|
||||
|
||||
static bool
|
||||
|
||||
@@ -124,7 +124,7 @@ cptestGenerateHex(const std::vector<u_char> &vec, bool print_offsets)
|
||||
std::string
|
||||
cptestFnameInExeDir(const std::string &name)
|
||||
{
|
||||
auto const &bin_path = ::testing::internal::GetArgvs()[0]; // Internal ugly API.
|
||||
auto bin_path = ::testing::internal::GetArgvs()[0]; // Internal ugly API.
|
||||
auto slash = bin_path.rfind('/');
|
||||
if (slash==string::npos) {
|
||||
// bin_path contains no dir. So return name with no dir
|
||||
|
||||
@@ -234,6 +234,55 @@ public:
|
||||
|
||||
static DebugConfiguration default_config;
|
||||
|
||||
class ChangeDebugConfiguration : public ServerRest
|
||||
{
|
||||
public:
|
||||
void
|
||||
doCall() override
|
||||
{
|
||||
string debug_config_file_name = Debug::getExecutableName();
|
||||
if (debug_config_file_name == "") {
|
||||
output = "Error. Cannot get debug config file path";
|
||||
return;
|
||||
}
|
||||
string debug_config_file_path = getConfigurationWithDefault<string>(
|
||||
getFilesystemPathConfig() + "/conf/" + debug_config_file_name + "-debug-conf.json",
|
||||
"Debug I/S",
|
||||
"Debug conf file path"
|
||||
);
|
||||
try {
|
||||
ifstream input_stream(debug_config_file_path);
|
||||
if (!input_stream) {
|
||||
output = "Error. Cannot open the debug conf file: " + debug_config_file_path;
|
||||
return;
|
||||
}
|
||||
cereal::JSONInputArchive ar(input_stream);
|
||||
Debug::prepareConfig();
|
||||
vector<DebugConfiguration> debug_config;
|
||||
try {
|
||||
ar(cereal::make_nvp("Debug", debug_config));
|
||||
} catch (cereal::Exception &e) {
|
||||
output = "Error. Failed loading debug conf file, error: " + string(e.what());
|
||||
Debug::abortConfig();
|
||||
return;
|
||||
}
|
||||
input_stream.close();
|
||||
setConfiguration(debug_config[0], "Debug");
|
||||
Debug::commitConfig();
|
||||
output = "New debug configuration set succesfully";
|
||||
} catch (ifstream::failure &f) {
|
||||
output =
|
||||
"Error. Cannot open the debug conf file " +
|
||||
debug_config_file_path +
|
||||
", error: " +
|
||||
string(f.what());
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
S2C_PARAM(string, output);
|
||||
};
|
||||
|
||||
// LCOV_EXCL_START - function is covered in unit-test, but not detected bt gcov
|
||||
Debug::Debug(
|
||||
const string &file_name,
|
||||
@@ -446,6 +495,7 @@ Debug::preload()
|
||||
{
|
||||
registerExpectedConfiguration<DebugConfiguration>("Debug");
|
||||
registerExpectedConfiguration<string>("Debug I/S", "Fog Debug URI");
|
||||
registerExpectedConfiguration<string>("Debug I/S", "Debug conf file path");
|
||||
registerExpectedConfiguration<bool>("Debug I/S", "Enable bulk of debugs");
|
||||
registerExpectedConfiguration<uint>("Debug I/S", "Debug bulk size");
|
||||
registerExpectedConfiguration<uint>("Debug I/S", "Debug bulk sending interval in msec");
|
||||
@@ -467,22 +517,18 @@ Debug::init()
|
||||
mainloop = Singleton::Consume<I_MainLoop>::by<Debug>();
|
||||
env = Singleton::Consume<I_Environment>::by<Debug>();
|
||||
|
||||
auto executable = env->get<string>("Executable Name");
|
||||
|
||||
if (executable.ok() && *executable != "") {
|
||||
string default_debug_output_file_path = *executable;
|
||||
auto file_path_end = default_debug_output_file_path.find_last_of("/");
|
||||
if (file_path_end != string::npos) {
|
||||
default_debug_file_stream_path = default_debug_output_file_path.substr(file_path_end + 1);
|
||||
}
|
||||
auto file_sufix_start = default_debug_file_stream_path.find_first_of(".");
|
||||
if (file_sufix_start != string::npos) {
|
||||
default_debug_file_stream_path = default_debug_file_stream_path.substr(0, file_sufix_start);
|
||||
}
|
||||
|
||||
default_debug_file_stream_path = getExecutableName();
|
||||
if (default_debug_file_stream_path != "") {
|
||||
string log_files_prefix = getLogFilesPathConfig();
|
||||
default_debug_file_stream_path = log_files_prefix + "/nano_agent/" + default_debug_file_stream_path + ".dbg";
|
||||
}
|
||||
|
||||
if (Singleton::exists<I_RestApi>()) {
|
||||
Singleton::Consume<I_RestApi>::by<Debug>()->addRestCall<ChangeDebugConfiguration>(
|
||||
RestAction::SET,
|
||||
"change-debug-config"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@@ -706,6 +752,27 @@ Debug::findDebugFilePrefix(const string &file_name)
|
||||
return "";
|
||||
}
|
||||
|
||||
string
|
||||
Debug::getExecutableName()
|
||||
{
|
||||
auto executable = env->get<string>("Executable Name");
|
||||
if (!executable.ok() || *executable == "") {
|
||||
return "";
|
||||
}
|
||||
|
||||
string executable_name = *executable;
|
||||
auto file_path_end = executable_name.find_last_of("/");
|
||||
if (file_path_end != string::npos) {
|
||||
executable_name = executable_name.substr(file_path_end + 1);
|
||||
}
|
||||
auto file_sufix_start = executable_name.find_first_of(".");
|
||||
if (file_sufix_start != string::npos) {
|
||||
executable_name = executable_name.substr(0, file_sufix_start);
|
||||
}
|
||||
|
||||
return executable_name;
|
||||
}
|
||||
|
||||
void
|
||||
Debug::addActiveStream(const string &name)
|
||||
{
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
#include "report/report.h"
|
||||
#include "i_agent_details.h"
|
||||
#include "i_environment.h"
|
||||
#include "i_rest_api.h"
|
||||
#include "i_mainloop.h"
|
||||
#include "report/report_bulks.h"
|
||||
|
||||
|
||||
13
core/debug_is/debug_is_ut/debug-ut-debug-conf.json
Normal file
13
core/debug_is/debug_is_ut/debug-ut-debug-conf.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"Debug": [
|
||||
{
|
||||
"Streams": [
|
||||
{
|
||||
"D_ALL": "Info",
|
||||
"D_FW": "Debug",
|
||||
"Output": "/var/log/nano_agent/cp-nano-orchestration.dbg"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -14,6 +14,7 @@
|
||||
#include "mock/mock_messaging.h"
|
||||
#include "mock/mock_mainloop.h"
|
||||
#include "mock/mock_environment.h"
|
||||
#include "mock/mock_rest_api.h"
|
||||
#include "mock/mock_instance_awareness.h"
|
||||
|
||||
using namespace std;
|
||||
@@ -551,7 +552,6 @@ public:
|
||||
Debug::setNewDefaultStdout(&capture_debug);
|
||||
}
|
||||
|
||||
|
||||
~DebugConfigTest()
|
||||
{
|
||||
loadConfiguration("");
|
||||
@@ -583,10 +583,19 @@ public:
|
||||
Singleton::Consume<Config::I_Config>::from(conf)->loadConfiguration(configuration);
|
||||
}
|
||||
|
||||
bool
|
||||
setDebugConfig(const unique_ptr<RestInit> &p)
|
||||
{
|
||||
set_debug_config = p->getRest();
|
||||
return true;
|
||||
}
|
||||
|
||||
ConfigComponent conf;
|
||||
::Environment env;
|
||||
stringstream capture_debug;
|
||||
StrictMock<MockAgentDetails> mock_agent_details;
|
||||
NiceMock<MockRestApi> mock_rest;
|
||||
unique_ptr<ServerRest> set_debug_config;
|
||||
};
|
||||
|
||||
TEST_F(DebugConfigTest, basic_configuration)
|
||||
@@ -786,6 +795,54 @@ TEST_F(DebugConfigTest, fail_configuration)
|
||||
EXPECT_FALSE(loadConfiguration(debug_config));
|
||||
}
|
||||
|
||||
TEST_F(DebugConfigTest, testSetConfig)
|
||||
{
|
||||
NiceMock<MockMainLoop> mock_mainloop;
|
||||
StrictMock<MockTimeGet> mock_time;
|
||||
Debug::setUnitTestFlag(D_FW, Debug::DebugLevel::WARNING);
|
||||
EXPECT_TRUE(Debug::isFlagAtleastLevel(D_FW, Debug::DebugLevel::ERROR));
|
||||
EXPECT_FALSE(Debug::isFlagAtleastLevel(D_FW, Debug::DebugLevel::INFO));
|
||||
EXPECT_FALSE(Debug::isFlagAtleastLevel(D_FW, Debug::DebugLevel::DEBUG));
|
||||
EXPECT_FALSE(Debug::isFlagAtleastLevel(D_FW, Debug::DebugLevel::TRACE));
|
||||
|
||||
EXPECT_CALL(mock_rest, mockRestCall(RestAction::SET, "change-debug-config", _))
|
||||
.WillOnce(WithArg<2>(Invoke(this, &DebugConfigTest::setDebugConfig))
|
||||
);
|
||||
Maybe<I_MainLoop::RoutineID> error_id = genError("no id");
|
||||
EXPECT_CALL(mock_mainloop, getCurrentRoutineId()).WillRepeatedly(Return(error_id));
|
||||
EXPECT_CALL(mock_time, getWalltimeStr()).WillRepeatedly(Return(string("")));
|
||||
|
||||
EXPECT_CALL(mock_rest, mockRestCall(RestAction::ADD, "declare-boolean-variable", _)).WillOnce(Return(true));
|
||||
|
||||
env.preload();
|
||||
Singleton::Consume<I_Environment>::from(env)->registerValue<string>("Executable Name", "debug-ut");
|
||||
env.init();
|
||||
|
||||
Debug::init();
|
||||
|
||||
setConfiguration(
|
||||
cptestFnameInSrcDir(string("debug-ut-debug-conf.json")),
|
||||
string("Debug I/S"),
|
||||
string("Debug conf file path")
|
||||
);
|
||||
|
||||
stringstream ss("{}");
|
||||
Maybe<string> maybe_res = set_debug_config->performRestCall(ss);
|
||||
ASSERT_TRUE(maybe_res.ok());
|
||||
EXPECT_EQ(maybe_res.unpack(), "{\n \"output\": \"New debug configuration set succesfully\"\n}");
|
||||
|
||||
EXPECT_TRUE(Debug::isFlagAtleastLevel(D_FW, Debug::DebugLevel::ERROR));
|
||||
EXPECT_TRUE(Debug::isFlagAtleastLevel(D_FW, Debug::DebugLevel::INFO));
|
||||
EXPECT_TRUE(Debug::isFlagAtleastLevel(D_FW, Debug::DebugLevel::DEBUG));
|
||||
EXPECT_FALSE(Debug::isFlagAtleastLevel(D_FW, Debug::DebugLevel::TRACE));
|
||||
Debug::setUnitTestFlag(D_FW, Debug::DebugLevel::WARNING); // Reset debug level so it won't effect other tests
|
||||
EXPECT_TRUE(Debug::isFlagAtleastLevel(D_FW, Debug::DebugLevel::ERROR));
|
||||
EXPECT_FALSE(Debug::isFlagAtleastLevel(D_FW, Debug::DebugLevel::INFO));
|
||||
EXPECT_FALSE(Debug::isFlagAtleastLevel(D_FW, Debug::DebugLevel::DEBUG));
|
||||
EXPECT_FALSE(Debug::isFlagAtleastLevel(D_FW, Debug::DebugLevel::TRACE));
|
||||
Debug::fini();
|
||||
}
|
||||
|
||||
ACTION(InvokeMainLoopCB)
|
||||
{
|
||||
arg1();
|
||||
|
||||
@@ -58,7 +58,6 @@ public:
|
||||
|
||||
string getCurrentTrace() const override;
|
||||
string getCurrentSpan() const override;
|
||||
string getCurrentHeaders() override;
|
||||
map<string, string> getCurrentHeadersMap() override;
|
||||
|
||||
void startNewTrace(bool new_span, const string &_trace_id) override;
|
||||
@@ -239,34 +238,6 @@ Environment::Impl::getCurrentSpan() const
|
||||
return "";
|
||||
}
|
||||
|
||||
string
|
||||
Environment::Impl::getCurrentHeaders()
|
||||
{
|
||||
string tracing_headers;
|
||||
auto trace_id = getCurrentTrace();
|
||||
if (!trace_id.empty()) {
|
||||
tracing_headers += "X-Trace-Id: " + trace_id + "\r\n";
|
||||
} else {
|
||||
string correlation_id_string = "00000000-0000-0000-0000-000000000000";
|
||||
try {
|
||||
boost::uuids::random_generator uuid_random_gen;
|
||||
correlation_id_string = boost::uuids::to_string(uuid_random_gen());
|
||||
} catch (const boost::uuids::entropy_error &e) {
|
||||
dbgTrace(D_ENVIRONMENT)
|
||||
<< "Failed to generate random correlation id - entropy exception. Exception: "
|
||||
<< e.what();
|
||||
tracing_status = TracingStatus::DISABLED;
|
||||
}
|
||||
tracing_headers += "X-Trace-Id: " + correlation_id_string + "\r\n";
|
||||
}
|
||||
|
||||
auto span_id = getCurrentSpan();
|
||||
if (!span_id.empty()) {
|
||||
tracing_headers += "X-Span-Id: " + span_id + "\r\n";
|
||||
}
|
||||
return tracing_headers;
|
||||
}
|
||||
|
||||
map<string, string>
|
||||
Environment::Impl::getCurrentHeadersMap()
|
||||
{
|
||||
@@ -292,6 +263,21 @@ Environment::Impl::getCurrentHeadersMap()
|
||||
if (!span_id.empty()) {
|
||||
tracing_headers["X-Span-Id"] = span_id;
|
||||
}
|
||||
|
||||
auto exec_name = get<string>("Executable Name");
|
||||
if (exec_name.ok() && *exec_name != "") {
|
||||
string executable_name = *exec_name;
|
||||
auto file_path_end = executable_name.find_last_of("/");
|
||||
if (file_path_end != string::npos) {
|
||||
executable_name = executable_name.substr(file_path_end + 1);
|
||||
}
|
||||
auto file_sufix_start = executable_name.find_first_of(".");
|
||||
if (file_sufix_start != string::npos) {
|
||||
executable_name = executable_name.substr(0, file_sufix_start);
|
||||
}
|
||||
tracing_headers["X-Calling-Service"] = executable_name;
|
||||
}
|
||||
|
||||
return tracing_headers;
|
||||
}
|
||||
|
||||
|
||||
@@ -345,12 +345,16 @@ public:
|
||||
TEST_F(TracingCompRoutinesTest, 2SpansDifFlow)
|
||||
{
|
||||
I_MainLoop::Routine routine = [&] () {
|
||||
string service_name = "test-service-name";
|
||||
i_env->registerValue("Executable Name", service_name);
|
||||
|
||||
i_env->startNewTrace(true, "a687b388-1108-4083-9852-07c33b1074e9");
|
||||
trace_id = i_env->getCurrentTrace();
|
||||
span_id = i_env->getCurrentSpan();
|
||||
string headers = i_env->getCurrentHeaders();
|
||||
EXPECT_THAT(headers, HasSubstr("X-Trace-Id: " + trace_id));
|
||||
EXPECT_THAT(headers, HasSubstr("X-Span-Id: " + span_id));
|
||||
auto headers = i_env->getCurrentHeadersMap();
|
||||
EXPECT_THAT(headers["X-Trace-Id"], trace_id);
|
||||
EXPECT_THAT(headers["X-Span-Id"], span_id);
|
||||
EXPECT_THAT(headers["X-Calling-Service"], service_name);
|
||||
|
||||
EXPECT_EQ(trace_id, "a687b388-1108-4083-9852-07c33b1074e9");
|
||||
EXPECT_NE("", i_env->getCurrentSpan());
|
||||
|
||||
@@ -30,6 +30,7 @@ class I_Environment;
|
||||
class I_InstanceAwareness;
|
||||
class I_Encryptor;
|
||||
class I_AgentDetails;
|
||||
class I_RestApi;
|
||||
class I_SignalHandler;
|
||||
|
||||
namespace Config { enum class Errors; }
|
||||
@@ -44,6 +45,7 @@ class Debug
|
||||
Singleton::Consume<I_Environment>,
|
||||
Singleton::Consume<I_Encryptor>,
|
||||
Singleton::Consume<I_AgentDetails>,
|
||||
Singleton::Consume<I_RestApi>,
|
||||
Singleton::Consume<I_SignalHandler>
|
||||
{
|
||||
public:
|
||||
@@ -178,6 +180,7 @@ public:
|
||||
static void setUnitTestFlag(DebugFlags flag, DebugLevel level);
|
||||
|
||||
static std::string findDebugFilePrefix(const std::string &file_name);
|
||||
static std::string getExecutableName();
|
||||
|
||||
private:
|
||||
template <typename T, typename... Args>
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#include "i_time_get.h"
|
||||
#include "i_encryptor.h"
|
||||
#include "i_shell_cmd.h"
|
||||
#include "i_rest_api.h"
|
||||
#include "i_instance_awareness.h"
|
||||
|
||||
#include "config.h"
|
||||
@@ -41,6 +42,7 @@ class Messaging
|
||||
Singleton::Consume<I_TimeGet>,
|
||||
Singleton::Consume<I_ShellCmd>,
|
||||
Singleton::Consume<I_MainLoop>,
|
||||
Singleton::Consume<I_RestApi>,
|
||||
Singleton::Consume<I_InstanceAwareness>
|
||||
{
|
||||
public:
|
||||
|
||||
@@ -79,7 +79,6 @@ public:
|
||||
|
||||
virtual std::string getCurrentTrace() const = 0;
|
||||
virtual std::string getCurrentSpan() const = 0;
|
||||
virtual std::string getCurrentHeaders() = 0;
|
||||
virtual std::map<std::string, std::string> getCurrentHeadersMap() = 0;
|
||||
virtual void startNewTrace(bool new_span = true, const std::string &_trace_id = std::string()) = 0;
|
||||
virtual void startNewSpan(
|
||||
|
||||
@@ -46,11 +46,17 @@ public:
|
||||
const std::vector<QueryRequest> &query_requests,
|
||||
bool is_pretty,
|
||||
bool is_bulk,
|
||||
bool is_proxy,
|
||||
const MessageMetadata &req_md
|
||||
) const = 0;
|
||||
|
||||
virtual Maybe<Intelligence::Response>
|
||||
getResponse(const QueryRequest &query_request, bool is_pretty, const MessageMetadata &req_md) const = 0;
|
||||
getResponse(
|
||||
const QueryRequest &query_request,
|
||||
bool is_pretty,
|
||||
bool is_proxy,
|
||||
const MessageMetadata &req_md
|
||||
) const = 0;
|
||||
|
||||
template<typename Data>
|
||||
Maybe<std::vector<AssetReply<Data>>>
|
||||
@@ -58,6 +64,7 @@ public:
|
||||
QueryRequest &query_request,
|
||||
bool ignore_in_progress = false,
|
||||
bool is_pretty = true,
|
||||
bool is_proxy = false,
|
||||
MessageMetadata req_md = MessageMetadata("", 0)
|
||||
);
|
||||
|
||||
@@ -66,6 +73,7 @@ public:
|
||||
queryIntelligence(
|
||||
std::vector<QueryRequest> &query_requests,
|
||||
bool is_pretty = true,
|
||||
bool is_proxy = false,
|
||||
MessageMetadata req_md = MessageMetadata("", 0)
|
||||
);
|
||||
|
||||
|
||||
@@ -24,10 +24,11 @@ I_Intelligence_IS_V2::queryIntelligence(
|
||||
QueryRequest &query_request,
|
||||
bool ignore_in_progress,
|
||||
bool is_pretty,
|
||||
bool is_proxy,
|
||||
MessageMetadata req_md
|
||||
)
|
||||
{
|
||||
auto response = getResponse(query_request, is_pretty, req_md);
|
||||
auto response = getResponse(query_request, is_pretty, is_proxy, req_md);
|
||||
|
||||
if (!response.ok()) return response.passErr();
|
||||
auto serializable_response = response->getSerializableResponse<Data>();
|
||||
@@ -36,7 +37,9 @@ I_Intelligence_IS_V2::queryIntelligence(
|
||||
query_request.setCursor(Intelligence_IS_V2::CursorState::DONE, "");
|
||||
} else {
|
||||
query_request.setCursor(Intelligence_IS_V2::CursorState::IN_PROGRESS, response->getCursor());
|
||||
if (ignore_in_progress) return genError("Query intelligence response with InProgress status");
|
||||
if (ignore_in_progress && response->getResponseStatus() == Intelligence_IS_V2::ResponseStatus::IN_PROGRESS) {
|
||||
return genError("Query intelligence response with InProgress status");
|
||||
}
|
||||
}
|
||||
|
||||
return serializable_response.getData();
|
||||
@@ -47,10 +50,11 @@ Maybe<std::vector<Maybe<std::vector<AssetReply<Data>>>>>
|
||||
I_Intelligence_IS_V2::queryIntelligence(
|
||||
std::vector<QueryRequest> &query_requests,
|
||||
bool is_pretty,
|
||||
bool is_proxy,
|
||||
MessageMetadata req_md
|
||||
)
|
||||
{
|
||||
auto res = getResponse(query_requests, is_pretty, true, req_md);
|
||||
auto res = getResponse(query_requests, is_pretty, true, is_proxy, req_md);
|
||||
if (!res.ok()) return res.passErr();
|
||||
|
||||
return res->getBulkData<Data>();
|
||||
|
||||
@@ -73,6 +73,7 @@ public:
|
||||
SerializableQueryFilter(Condition condition_type, const std::string &key, const std::string &value);
|
||||
SerializableQueryFilter(Condition condition_type, const std::string &key, const int64_t &value);
|
||||
SerializableQueryFilter(Condition condition_type, const std::string &key, const std::vector<std::string> &value);
|
||||
SerializableQueryFilter(const SerializableQueryCondition &condition);
|
||||
|
||||
void save(cereal::JSONOutputArchive &ar) const;
|
||||
|
||||
|
||||
@@ -28,7 +28,6 @@ public:
|
||||
|
||||
MOCK_CONST_METHOD0(getCurrentTrace, std::string());
|
||||
MOCK_CONST_METHOD0(getCurrentSpan, std::string());
|
||||
MOCK_METHOD0(getCurrentHeaders, std::string());
|
||||
MOCK_METHOD0(getCurrentHeadersMap, std::map<std::string, std::string>());
|
||||
MOCK_METHOD2(startNewTrace, void(bool, const std::string &));
|
||||
MOCK_METHOD3(startNewSpan, void(Span::ContextType, const std::string &, const std::string &));
|
||||
|
||||
@@ -26,21 +26,26 @@ public:
|
||||
MOCK_CONST_METHOD1(sendInvalidation, bool(const Invalidation &invalidation));
|
||||
MOCK_METHOD2(registerInvalidation, Maybe<uint>(const Invalidation &invalidation, const InvalidationCb &callback));
|
||||
MOCK_METHOD1(unregisterInvalidation, void(uint id));
|
||||
MOCK_CONST_METHOD4(
|
||||
MOCK_CONST_METHOD5(
|
||||
getResponse,
|
||||
Maybe<Response>(
|
||||
const std::vector<QueryRequest> &query_requests,
|
||||
bool is_pretty,
|
||||
bool is_bulk,
|
||||
bool is_proxy,
|
||||
const MessageMetadata &req_md
|
||||
)
|
||||
);
|
||||
MOCK_CONST_METHOD3(
|
||||
MOCK_CONST_METHOD4(
|
||||
getResponse,
|
||||
Maybe<Response>(const QueryRequest &query_request, bool is_pretty, const MessageMetadata &req_md)
|
||||
Maybe<Response>(
|
||||
const QueryRequest &query_request,
|
||||
bool is_pretty,
|
||||
bool is_proxy,
|
||||
const MessageMetadata &req_md
|
||||
)
|
||||
);
|
||||
MOCK_CONST_METHOD0(getIsOfflineOnly, bool(void));
|
||||
MOCK_CONST_METHOD1(getOfflineInfoString, Maybe<std::string>(const SerializableQueryFilter &query));
|
||||
};
|
||||
|
||||
#endif // __MOCK_INTELLIGENCE_H__
|
||||
|
||||
@@ -209,13 +209,13 @@ class ComponentListCore
|
||||
ShellCmd,
|
||||
GenericMetric,
|
||||
Messaging,
|
||||
MainloopComponent,
|
||||
ConfigComponent,
|
||||
InstanceAwareness,
|
||||
IntelligenceComponentV2,
|
||||
AgentDetails,
|
||||
LoggingComp,
|
||||
TimeProxyComponent,
|
||||
MainloopComponent,
|
||||
SignalHandler,
|
||||
RestServer,
|
||||
Encryptor,
|
||||
|
||||
@@ -61,27 +61,34 @@ DEFINE_FLAG(D_COMPONENT, D_ALL)
|
||||
DEFINE_FLAG(D_STREAMING, D_COMPONENT)
|
||||
DEFINE_FLAG(D_STREAMING_DATA, D_STREAMING)
|
||||
DEFINE_FLAG(D_CHECKSUM, D_STREAMING)
|
||||
DEFINE_FLAG(D_WAAP, D_COMPONENT)
|
||||
DEFINE_FLAG(D_OA_SCHEMA_UPDATER, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_API, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_AUTOMATION, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_REGEX, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_SAMPLE_PREPROCESS, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_SAMPLE_SCAN, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_EVASIONS, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_ASSET_STATE, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_CONFIDENCE_CALCULATOR, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_REPUTATION, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_SCORE_BUILDER, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_ULIMITS, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_SCANNER, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_DEEP_PARSER, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_BASE64, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_JSON, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_BOT_PROTECTION, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_STREAMING_PARSING, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_HEADERS, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_PARSER, D_WAAP)
|
||||
|
||||
DEFINE_FLAG(D_WAAP_GLOBAL, D_COMPONENT)
|
||||
DEFINE_FLAG(D_WAAP, D_WAAP_GLOBAL)
|
||||
DEFINE_FLAG(D_NGINX_EVENTS, D_WAAP)
|
||||
DEFINE_FLAG(D_OA_SCHEMA_UPDATER, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_API, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_AUTOMATION, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_REGEX, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_SAMPLE_SCAN, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_ASSET_STATE, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_CONFIDENCE_CALCULATOR, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_REPUTATION, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_SCORE_BUILDER, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_ULIMITS, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_SCANNER, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_DEEP_PARSER, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_BASE64, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_JSON, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_BOT_PROTECTION, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_STREAMING_PARSING, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_HEADERS, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_OVERRIDE, D_WAAP)
|
||||
|
||||
DEFINE_FLAG(D_WAAP_SAMPLE_HANDLING, D_WAAP_GLOBAL)
|
||||
DEFINE_FLAG(D_WAAP_SAMPLE_PREPROCESS, D_WAAP_SAMPLE_HANDLING)
|
||||
DEFINE_FLAG(D_WAAP_EVASIONS, D_WAAP_SAMPLE_HANDLING)
|
||||
|
||||
DEFINE_FLAG(D_WAAP_PARSER, D_WAAP_GLOBAL)
|
||||
DEFINE_FLAG(D_WAAP_PARSER_XML, D_WAAP_PARSER)
|
||||
DEFINE_FLAG(D_WAAP_PARSER_HTML, D_WAAP_PARSER)
|
||||
DEFINE_FLAG(D_WAAP_PARSER_BINARY, D_WAAP_PARSER)
|
||||
@@ -98,7 +105,7 @@ DEFINE_FLAG(D_COMPONENT, D_ALL)
|
||||
DEFINE_FLAG(D_WAAP_PARSER_PERCENT, D_WAAP_PARSER)
|
||||
DEFINE_FLAG(D_WAAP_PARSER_PAIRS, D_WAAP_PARSER)
|
||||
DEFINE_FLAG(D_WAAP_PARSER_PDF, D_WAAP_PARSER)
|
||||
DEFINE_FLAG(D_WAAP_OVERRIDE, D_WAAP)
|
||||
DEFINE_FLAG(D_WAAP_PARSER_BINARY_FILE, D_WAAP_PARSER)
|
||||
|
||||
DEFINE_FLAG(D_IPS, D_COMPONENT)
|
||||
DEFINE_FLAG(D_FILE_UPLOAD, D_COMPONENT)
|
||||
@@ -135,6 +142,7 @@ DEFINE_FLAG(D_COMPONENT, D_ALL)
|
||||
DEFINE_FLAG(D_LOCAL_POLICY, D_ORCHESTRATOR)
|
||||
DEFINE_FLAG(D_NGINX_POLICY, D_ORCHESTRATOR)
|
||||
DEFINE_FLAG(D_SERVICE_CONTROLLER, D_ORCHESTRATOR)
|
||||
DEFINE_FLAG(D_UPDATES_PROCESS_REPORTER, D_ORCHESTRATOR)
|
||||
|
||||
DEFINE_FLAG(D_GRADUAL_DEPLOYMENT, D_COMPONENT)
|
||||
DEFINE_FLAG(D_SDWAN, D_COMPONENT)
|
||||
|
||||
@@ -89,11 +89,4 @@ private:
|
||||
std::map<std::string, std::string> extended_status = {};
|
||||
};
|
||||
|
||||
class HealthCheckStatusEvent : public Event<HealthCheckStatusEvent, HealthCheckStatusReply>
|
||||
{
|
||||
public:
|
||||
HealthCheckStatusEvent() {}
|
||||
~HealthCheckStatusEvent() {}
|
||||
};
|
||||
|
||||
#endif // __HEALTH_CHECK_STATUS_H__
|
||||
|
||||
@@ -69,6 +69,8 @@ enum class Tags {
|
||||
NGINX_PROXY_MANAGER,
|
||||
WEB_SERVER_APISIX,
|
||||
DEPLOYMENT_DOCKER,
|
||||
WEB_SERVER_SWAG,
|
||||
WEB_SERVER_NGINX_UNIFIED,
|
||||
|
||||
COUNT
|
||||
};
|
||||
|
||||
@@ -29,10 +29,11 @@ public:
|
||||
const std::vector<QueryRequest> &queries,
|
||||
bool is_pretty,
|
||||
bool is_bulk,
|
||||
bool _is_proxy,
|
||||
const MessageMetadata &req_md
|
||||
)
|
||||
:
|
||||
queries(queries), is_pretty(is_pretty), is_bulk(is_bulk), req_md(req_md)
|
||||
queries(queries), is_pretty(is_pretty), is_bulk(is_bulk), is_proxy(_is_proxy), req_md(req_md)
|
||||
{}
|
||||
|
||||
Maybe<void> checkAssetsLimit() const;
|
||||
@@ -51,6 +52,7 @@ private:
|
||||
const std::vector<QueryRequest> &queries;
|
||||
bool is_pretty = true;
|
||||
bool is_bulk = false;
|
||||
bool is_proxy = false;
|
||||
Maybe<std::string> response_from_fog = genError("Uninitialized");
|
||||
const MessageMetadata &req_md;
|
||||
};
|
||||
|
||||
@@ -20,7 +20,6 @@
|
||||
#include "intelligence_invalidation.h"
|
||||
#include "intelligence_is_v2/intelligence_response.h"
|
||||
#include "intelligence_request.h"
|
||||
#include "intelligence_server.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace chrono;
|
||||
@@ -33,6 +32,8 @@ static const string primary_port_setting = "local intelligence server primary po
|
||||
static const string secondary_port_setting = "local intelligence server secondary port";
|
||||
static const string invalidation_uri = "/api/v2/intelligence/invalidation";
|
||||
static const string registration_uri = "/api/v2/intelligence/invalidation/register";
|
||||
static const string query_uri = "/api/v2/intelligence/assets/query";
|
||||
static const string queries_uri = "/api/v2/intelligence/assets/queries";
|
||||
|
||||
class I_InvalidationCallBack
|
||||
{
|
||||
@@ -245,6 +246,51 @@ private:
|
||||
C2S_OPTIONAL_PARAM(string, invalidationType);
|
||||
};
|
||||
|
||||
class PagingController
|
||||
{
|
||||
public:
|
||||
PagingController()
|
||||
{
|
||||
uint request_overall_timeout_conf = getConfigurationWithDefault<uint>(
|
||||
20,
|
||||
"intelligence",
|
||||
"request overall timeout"
|
||||
);
|
||||
|
||||
timer = Singleton::Consume<I_TimeGet>::by<IntelligenceComponentV2>();
|
||||
mainloop = Singleton::Consume<I_MainLoop>::by<IntelligenceComponentV2>();
|
||||
|
||||
paging_timeout = timer->getMonotonicTime() + chrono::microseconds(request_overall_timeout_conf * 1000000);
|
||||
}
|
||||
|
||||
bool
|
||||
isMoreResponses(const Maybe<Response> &res, const IntelligenceRequest &req)
|
||||
{
|
||||
response = res;
|
||||
if (!res.ok() || req.getPagingStatus().ok()) return false;
|
||||
if (res->getResponseStatus() != ResponseStatus::IN_PROGRESS) return false;
|
||||
dbgTrace(D_INTELLIGENCE) << "Intelligence paging response is in progress";
|
||||
mainloop->yield(true);
|
||||
return hasTimeoutRemaining();
|
||||
}
|
||||
|
||||
Maybe<Response> getResponse() const { return response; }
|
||||
|
||||
private:
|
||||
bool
|
||||
hasTimeoutRemaining() const
|
||||
{
|
||||
if (timer->getMonotonicTime() < paging_timeout) return true;
|
||||
dbgDebug(D_INTELLIGENCE) << "Intelligence paging response reached timeout";
|
||||
return false;
|
||||
}
|
||||
|
||||
chrono::microseconds paging_timeout;
|
||||
Maybe<Response> response = genError("Paging response is uninitialized");
|
||||
I_TimeGet *timer;
|
||||
I_MainLoop *mainloop;
|
||||
};
|
||||
|
||||
class IntelligenceComponentV2::Impl
|
||||
:
|
||||
Singleton::Provide<I_Intelligence_IS_V2>::From<IntelligenceComponentV2>
|
||||
@@ -255,13 +301,12 @@ public:
|
||||
init()
|
||||
{
|
||||
message = Singleton::Consume<I_Messaging>::by<IntelligenceComponentV2>();
|
||||
timer = Singleton::Consume<I_TimeGet>::by<IntelligenceComponentV2>();
|
||||
mainloop = Singleton::Consume<I_MainLoop>::by<IntelligenceComponentV2>();
|
||||
|
||||
mainloop->addRecurringRoutine(
|
||||
I_MainLoop::RoutineType::System,
|
||||
chrono::minutes(12),
|
||||
[this] () { sendReccurringInvalidationRegistration(); },
|
||||
[this] () { sendRecurringInvalidationRegistration(); },
|
||||
"Sending intelligence invalidation"
|
||||
);
|
||||
|
||||
@@ -272,12 +317,7 @@ public:
|
||||
bool
|
||||
sendInvalidation(const Invalidation &invalidation) const override
|
||||
{
|
||||
if (hasLocalInvalidationSupport()) {
|
||||
return sendLocalInvalidation(invalidation);
|
||||
}
|
||||
else {
|
||||
return sendGlobalInvalidation(invalidation);
|
||||
}
|
||||
return sendIntelligence(invalidation).ok();
|
||||
}
|
||||
|
||||
Maybe<uint>
|
||||
@@ -285,7 +325,7 @@ public:
|
||||
{
|
||||
if (!invalidation.isLegalInvalidation()) return genError("Attempting to register invalid invalidation");
|
||||
auto res = invalidations.emplace(invalidation, cb);
|
||||
sendReccurringInvalidationRegistration();
|
||||
sendRecurringInvalidationRegistration();
|
||||
return res;
|
||||
}
|
||||
|
||||
@@ -300,10 +340,11 @@ public:
|
||||
const vector<QueryRequest> &query_requests,
|
||||
bool is_pretty,
|
||||
bool is_bulk,
|
||||
bool is_proxy,
|
||||
const MessageMetadata &req_md
|
||||
) const override
|
||||
{
|
||||
IntelligenceRequest intelligence_req(query_requests, is_pretty, is_bulk, req_md);
|
||||
IntelligenceRequest intelligence_req(query_requests, is_pretty, is_bulk, is_proxy, req_md);
|
||||
if (!intelligence_req.checkAssetsLimit().ok()) return intelligence_req.checkAssetsLimit().passErr();
|
||||
if (!intelligence_req.checkMinConfidence().ok()) return intelligence_req.checkMinConfidence().passErr();
|
||||
if (intelligence_req.isPagingActivated()) {
|
||||
@@ -312,100 +353,181 @@ public:
|
||||
return genError("Paging is activated and already finished. No need for more queries.");
|
||||
}
|
||||
}
|
||||
Sender intelligence_server(intelligence_req);
|
||||
auto response = intelligence_server.sendIntelligenceRequest();
|
||||
auto response = sendIntelligenceRequest(intelligence_req);
|
||||
return response;
|
||||
}
|
||||
|
||||
Maybe<Intelligence::Response>
|
||||
getResponse(const QueryRequest &query_request, bool is_pretty, const MessageMetadata &req_md) const override
|
||||
getResponse(
|
||||
const QueryRequest &query_request,
|
||||
bool is_pretty,
|
||||
bool is_proxy,
|
||||
const MessageMetadata &req_md
|
||||
) const override
|
||||
{
|
||||
vector<QueryRequest> queries = {query_request};
|
||||
return getResponse(queries, is_pretty, false, req_md);
|
||||
return getResponse(queries, is_pretty, false, is_proxy, req_md);
|
||||
}
|
||||
|
||||
private:
|
||||
bool
|
||||
hasLocalInvalidationSupport() const
|
||||
hasLocalIntelligenceSupport() const
|
||||
{
|
||||
auto is_supported = getProfileAgentSettingWithDefault<bool>(false, "agent.config.useLocalIntelligence");
|
||||
if (getProfileAgentSettingWithDefault<bool>(false, "agent.config.useLocalIntelligence")) return true;
|
||||
|
||||
if (!is_supported) {
|
||||
is_supported = getProfileAgentSettingWithDefault<bool>(false, "agent.config.supportInvalidation");
|
||||
auto crowsec_env = getenv("CROWDSEC_ENABLED");
|
||||
bool crowdsec_enabled = crowsec_env != nullptr && string(crowsec_env) == "true";
|
||||
|
||||
if (getProfileAgentSettingWithDefault<bool>(crowdsec_enabled, "layer7AccessControl.crowdsec.enabled")) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!is_supported) {
|
||||
is_supported = getConfigurationWithDefault(false, "intelligence", "support Invalidation");
|
||||
if (getProfileAgentSettingWithDefault<bool>(false, "agent.config.supportInvalidation")) return true;
|
||||
dbgTrace(D_INTELLIGENCE) << "Local intelligence not supported";
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename IntelligenceRest>
|
||||
Maybe<Response>
|
||||
sendIntelligence(const IntelligenceRest &rest_req) const
|
||||
{
|
||||
dbgFlow(D_INTELLIGENCE) << "Sending intelligence request";
|
||||
auto res = sendLocalIntelligenceToLocalServer(rest_req);
|
||||
if (res.ok()) return res;
|
||||
return sendGlobalIntelligence(rest_req);
|
||||
}
|
||||
|
||||
template <typename IntelligenceRest>
|
||||
Maybe<Response>
|
||||
sendLocalIntelligenceToLocalServer(const IntelligenceRest &rest_req) const
|
||||
{
|
||||
dbgFlow(D_INTELLIGENCE) << "Sending local intelligence request";
|
||||
if (!hasLocalIntelligenceSupport()) {
|
||||
dbgDebug(D_INTELLIGENCE) << "Local intelligence not supported";
|
||||
return genError("Local intelligence not configured");
|
||||
}
|
||||
|
||||
return is_supported;
|
||||
}
|
||||
|
||||
bool
|
||||
sendLocalInvalidation(const Invalidation &invalidation) const
|
||||
{
|
||||
dbgFlow(D_INTELLIGENCE) << "Starting local invalidation";
|
||||
return sendLocalInvalidationImpl(invalidation) || sendGlobalInvalidation(invalidation);
|
||||
}
|
||||
|
||||
bool
|
||||
sendLocalInvalidationImpl(const Invalidation &invalidation) const
|
||||
{
|
||||
auto server = getSetting<string>("intelligence", "local intelligence server ip");
|
||||
if (!server.ok()) {
|
||||
dbgWarning(D_INTELLIGENCE) << "Local intelligence server not configured";
|
||||
return false;
|
||||
dbgWarning(D_INTELLIGENCE) << "Local intelligence server ip not configured";
|
||||
return genError("Local intelligence server ip not configured");
|
||||
}
|
||||
|
||||
return
|
||||
sendLocalInvalidationImpl(invalidation, *server, primary_port_setting) ||
|
||||
sendLocalInvalidationImpl(invalidation, *server, secondary_port_setting);
|
||||
auto res = sendLocalIntelligenceToLocalServer(rest_req, *server, primary_port_setting);
|
||||
if (res.ok()) return res;
|
||||
return sendLocalIntelligenceToLocalServer(rest_req, *server, secondary_port_setting);
|
||||
}
|
||||
|
||||
bool
|
||||
sendLocalInvalidationImpl(const Invalidation &invalidation, const string &server, const string &port_setting) const
|
||||
template <typename IntelligenceRest>
|
||||
Maybe<Response>
|
||||
sendLocalIntelligenceToLocalServer(
|
||||
const IntelligenceRest &rest_req,
|
||||
const string &server,
|
||||
const string &port_setting
|
||||
) const
|
||||
{
|
||||
dbgFlow(D_INTELLIGENCE) << "Sending to local intelligence";
|
||||
|
||||
auto port = getSetting<uint>("intelligence", port_setting);
|
||||
if (!port.ok()) {
|
||||
dbgWarning(D_INTELLIGENCE) << "Could not resolve port for " << port_setting;
|
||||
return false;
|
||||
dbgWarning(D_INTELLIGENCE) << "Could not resolve port for " + port_setting;
|
||||
return genError("Could not resolve port for " + port_setting);
|
||||
}
|
||||
|
||||
dbgTrace(D_INTELLIGENCE)
|
||||
<< "Invalidation value: "
|
||||
<< (invalidation.genJson().ok() ? invalidation.genJson().unpack() : invalidation.genJson().getErr());
|
||||
<< "Intelligence rest request value: "
|
||||
<< (rest_req.genJson().ok() ? rest_req.genJson().unpack() : rest_req.genJson().getErr());
|
||||
|
||||
MessageMetadata invalidation_req_md(server, *port);
|
||||
invalidation_req_md.insertHeaders(getHTTPHeaders());
|
||||
invalidation_req_md.setConnectioFlag(MessageConnectionConfig::UNSECURE_CONN);
|
||||
return message->sendSyncMessageWithoutResponse(
|
||||
HTTPMethod::POST,
|
||||
invalidation_uri,
|
||||
invalidation,
|
||||
MessageCategory::INTELLIGENCE,
|
||||
invalidation_req_md
|
||||
);
|
||||
MessageMetadata req_md(server, *port);
|
||||
req_md.insertHeaders(getHTTPHeaders());
|
||||
req_md.setConnectioFlag(MessageConnectionConfig::UNSECURE_CONN);
|
||||
return sendIntelligenceRequestImpl(rest_req, req_md);
|
||||
}
|
||||
|
||||
bool
|
||||
sendGlobalInvalidation(const Invalidation &invalidation) const
|
||||
template <typename IntelligenceRest>
|
||||
Maybe<Response>
|
||||
sendGlobalIntelligence(const IntelligenceRest &rest_req) const
|
||||
{
|
||||
dbgFlow(D_INTELLIGENCE) << "Starting global invalidation";
|
||||
dbgFlow(D_INTELLIGENCE) << "Sending global intelligence request";
|
||||
|
||||
dbgTrace(D_INTELLIGENCE)
|
||||
<< "Invalidation value: "
|
||||
<< (invalidation.genJson().ok() ? invalidation.genJson().unpack() : invalidation.genJson().getErr());
|
||||
MessageMetadata global_invalidation_req_md;
|
||||
global_invalidation_req_md.insertHeaders(getHTTPHeaders());
|
||||
return message->sendSyncMessageWithoutResponse(
|
||||
<< "Intelligence rest value: "
|
||||
<< (rest_req.genJson().ok() ? rest_req.genJson().unpack() : rest_req.genJson().getErr());
|
||||
MessageMetadata global_req_md;
|
||||
global_req_md.insertHeaders(getHTTPHeaders());
|
||||
return sendIntelligenceRequestImpl(rest_req, global_req_md);
|
||||
}
|
||||
|
||||
Maybe<Response>
|
||||
createResponse(const string &response_body, const IntelligenceRequest &query_request) const
|
||||
{
|
||||
Response response(response_body, query_request.getSize(), query_request.isBulk());
|
||||
auto load_status = response.load();
|
||||
if (load_status.ok()) return response;
|
||||
dbgWarning(D_INTELLIGENCE) << "Could not create intelligence response.";
|
||||
return load_status.passErr();
|
||||
}
|
||||
|
||||
Maybe<Response>
|
||||
sendIntelligenceRequestImpl(const Invalidation &invalidation, const MessageMetadata &local_req_md) const
|
||||
{
|
||||
dbgFlow(D_INTELLIGENCE) << "Sending intelligence invalidation";
|
||||
auto res = message->sendSyncMessageWithoutResponse(
|
||||
HTTPMethod::POST,
|
||||
invalidation_uri,
|
||||
invalidation,
|
||||
MessageCategory::INTELLIGENCE,
|
||||
global_invalidation_req_md
|
||||
local_req_md
|
||||
);
|
||||
if (res) return Response();
|
||||
dbgWarning(D_INTELLIGENCE) << "Could not send local intelligence invalidation.";
|
||||
return genError("Could not send local intelligence invalidation");
|
||||
}
|
||||
|
||||
Maybe<Response>
|
||||
sendIntelligenceRequestImpl(
|
||||
const InvalidationRegistration::RestCall ®istration,
|
||||
const MessageMetadata ®istration_req_md
|
||||
) const
|
||||
{
|
||||
dbgFlow(D_INTELLIGENCE) << "Sending intelligence invalidation registration";
|
||||
auto res = message->sendSyncMessageWithoutResponse(
|
||||
HTTPMethod::POST,
|
||||
registration_uri,
|
||||
registration,
|
||||
MessageCategory::INTELLIGENCE,
|
||||
registration_req_md
|
||||
);
|
||||
if (res) return Response();
|
||||
dbgWarning(D_INTELLIGENCE) << "Could not send intelligence invalidation registration.";
|
||||
return genError("Could not send intelligence invalidation registration");
|
||||
}
|
||||
|
||||
Maybe<Response>
|
||||
sendIntelligenceRequestImpl(const IntelligenceRequest &query_request, const MessageMetadata &global_req_md) const
|
||||
{
|
||||
dbgFlow(D_INTELLIGENCE) << "Sending intelligence query";
|
||||
auto json_body = query_request.genJson();
|
||||
if (!json_body.ok()) return json_body.passErr();
|
||||
auto req_data = message->sendSyncMessage(
|
||||
HTTPMethod::POST,
|
||||
query_request.isBulk() ? queries_uri : query_uri,
|
||||
*json_body,
|
||||
MessageCategory::INTELLIGENCE,
|
||||
global_req_md
|
||||
);
|
||||
if (!req_data.ok()) {
|
||||
auto response_error = req_data.getErr().toString();
|
||||
dbgWarning(D_INTELLIGENCE)
|
||||
<< "Could not send intelligence query. "
|
||||
<< req_data.getErr().getBody()
|
||||
<< " "
|
||||
<< req_data.getErr().toString();
|
||||
return genError("Could not send intelligence query.");
|
||||
} else if (req_data->getHTTPStatusCode() != HTTPStatusCode::HTTP_OK) {
|
||||
dbgWarning(D_INTELLIGENCE) << "Invalid intelligence response: " << req_data->toString();
|
||||
return genError(req_data->toString());
|
||||
}
|
||||
|
||||
return createResponse(req_data->getBody(), query_request);
|
||||
}
|
||||
|
||||
map<string, string>
|
||||
@@ -423,66 +545,26 @@ private:
|
||||
return headers;
|
||||
}
|
||||
|
||||
bool
|
||||
sendRegistration(const Invalidation &invalidation) const
|
||||
{
|
||||
InvalidationRegistration registration;
|
||||
registration.addInvalidation(invalidation);
|
||||
|
||||
return sendLocalRegistrationImpl(registration.genJson());
|
||||
}
|
||||
|
||||
bool
|
||||
sendLocalRegistrationImpl(const InvalidationRegistration::RestCall ®istration) const
|
||||
{
|
||||
auto server = getSetting<string>("intelligence", "local intelligence server ip");
|
||||
if (!server.ok()) {
|
||||
dbgWarning(D_INTELLIGENCE) << "Local intelligence server not configured";
|
||||
return false;
|
||||
}
|
||||
return
|
||||
sendLocalRegistrationImpl(registration, *server, primary_port_setting) ||
|
||||
sendLocalRegistrationImpl(registration, *server, secondary_port_setting);
|
||||
}
|
||||
|
||||
bool
|
||||
sendLocalRegistrationImpl(
|
||||
const InvalidationRegistration::RestCall ®istration,
|
||||
const string &server,
|
||||
const string &port_setting
|
||||
) const
|
||||
{
|
||||
dbgFlow(D_INTELLIGENCE) << "Sending to local registration";
|
||||
|
||||
auto port = getSetting<uint>("intelligence", port_setting);
|
||||
if (!port.ok()) {
|
||||
dbgWarning(D_INTELLIGENCE) << "Could not resolve port for " << port_setting;
|
||||
return false;
|
||||
}
|
||||
|
||||
dbgTrace(D_INTELLIGENCE) << "Invalidation value: " << registration.genJson();
|
||||
MessageMetadata registration_req_md(server, *port);
|
||||
registration_req_md.setConnectioFlag(MessageConnectionConfig::UNSECURE_CONN);
|
||||
return message->sendSyncMessageWithoutResponse(
|
||||
HTTPMethod::POST,
|
||||
registration_uri,
|
||||
registration,
|
||||
MessageCategory::INTELLIGENCE,
|
||||
registration_req_md
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
sendReccurringInvalidationRegistration() const
|
||||
sendRecurringInvalidationRegistration() const
|
||||
{
|
||||
if (!hasLocalInvalidationSupport() || invalidations.empty()) return;
|
||||
if (invalidations.empty()) return;
|
||||
|
||||
sendLocalRegistrationImpl(invalidations.getRegistration());
|
||||
sendLocalIntelligenceToLocalServer(invalidations.getRegistration());
|
||||
}
|
||||
|
||||
Maybe<Response>
|
||||
sendIntelligenceRequest(const IntelligenceRequest& req) const
|
||||
{
|
||||
PagingController paging;
|
||||
|
||||
while (paging.isMoreResponses(sendIntelligence(req), req));
|
||||
|
||||
return paging.getResponse();
|
||||
}
|
||||
|
||||
InvalidationCallBack invalidations;
|
||||
I_Messaging *message = nullptr;
|
||||
I_TimeGet *timer = nullptr;
|
||||
I_MainLoop *mainloop = nullptr;
|
||||
};
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user