Compare commits

..

3 Commits

Author SHA1 Message Date
wiaamm
1f80b79bd0 fix watchdog restarts 2025-06-09 15:35:21 +03:00
wiaamm
59da201d5c fix restarting agent 2025-06-05 21:11:23 +03:00
wiaamm
8edb695346 don't exit 2025-05-28 14:54:37 +03:00
31 changed files with 25 additions and 1081 deletions

View File

@@ -6,8 +6,6 @@ HTTP_TRANSACTION_HANDLER_SERVICE="install-cp-nano-service-http-transaction-handl
ATTACHMENT_REGISTRATION_SERVICE="install-cp-nano-attachment-registration-manager.sh"
ORCHESTRATION_INSTALLATION_SCRIPT="install-cp-nano-agent.sh"
CACHE_INSTALLATION_SCRIPT="install-cp-nano-agent-cache.sh"
PROMETHEUS_INSTALLATION_SCRIPT="install-cp-nano-service-prometheus.sh"
NGINX_CENTRAL_MANAGER_INSTALLATION_SCRIPT="install-cp-nano-central-nginx-manager.sh"
var_fog_address=
var_proxy=
@@ -83,14 +81,6 @@ fi
/nano-service-installers/$CACHE_INSTALLATION_SCRIPT --install
/nano-service-installers/$HTTP_TRANSACTION_HANDLER_SERVICE --install
if [ "$PROMETHEUS" == "true" ]; then
/nano-service-installers/$PROMETHEUS_INSTALLATION_SCRIPT --install
fi
if [ "$CENTRAL_NGINX_MANAGER" == "true" ]; then
/nano-service-installers/$NGINX_CENTRAL_MANAGER_INSTALLATION_SCRIPT --install
fi
if [ "$CROWDSEC_ENABLED" == "true" ]; then
/nano-service-installers/$INTELLIGENCE_INSTALLATION_SCRIPT --install
/nano-service-installers/$CROWDSEC_INSTALLATION_SCRIPT --install

View File

@@ -28,7 +28,6 @@ USE_DEBUG_FLAG(D_NGINX_ATTACHMENT_PARSER);
Buffer NginxParser::tenant_header_key = Buffer();
static const Buffer proxy_ip_header_key("X-Forwarded-For", 15, Buffer::MemoryType::STATIC);
static const Buffer waf_tag_key("x-waf-tag", 9, Buffer::MemoryType::STATIC);
static const Buffer source_ip("sourceip", 8, Buffer::MemoryType::STATIC);
bool is_keep_alive_ctx = getenv("SAAS_KEEP_ALIVE_HDR_NAME") != nullptr;
@@ -232,20 +231,17 @@ NginxParser::parseRequestHeaders(const Buffer &data, const unordered_set<string>
static_cast<string>(header.getKey()) + ": " + static_cast<string>(header.getValue()) + "\r\n"
);
const auto &header_key = header.getKey();
if (NginxParser::tenant_header_key == header_key) {
if (NginxParser::tenant_header_key == header.getKey()) {
dbgDebug(D_NGINX_ATTACHMENT_PARSER)
<< "Identified active tenant header. Key: "
<< dumpHex(header_key)
<< dumpHex(header.getKey())
<< ", Value: "
<< dumpHex(header.getValue());
auto active_tenant_and_profile = getActivetenantAndProfile(header.getValue());
opaque.setSessionTenantAndProfile(active_tenant_and_profile[0], active_tenant_and_profile[1]);
} else if (proxy_ip_header_key == header_key) {
} else if (proxy_ip_header_key == header.getKey()) {
source_identifiers.setXFFValuesToOpaqueCtx(header, UsersAllIdentifiersConfig::ExtractType::PROXYIP);
} else if (waf_tag_key == header_key) {
source_identifiers.setWafTagValuesToOpaqueCtx(header);
}
}

View File

@@ -366,24 +366,6 @@ UsersAllIdentifiersConfig::setCustomHeaderToOpaqueCtx(const HttpHeader &header)
return;
}
void
UsersAllIdentifiersConfig::setWafTagValuesToOpaqueCtx(const HttpHeader &header) const
{
auto i_transaction_table = Singleton::Consume<I_TableSpecific<SessionID>>::by<NginxAttachment>();
if (!i_transaction_table || !i_transaction_table->hasState<NginxAttachmentOpaque>()) {
dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "Can't get the transaction table";
return;
}
NginxAttachmentOpaque &opaque = i_transaction_table->getState<NginxAttachmentOpaque>();
opaque.setSavedData(HttpTransactionData::waf_tag_ctx, static_cast<string>(header.getValue()));
dbgDebug(D_NGINX_ATTACHMENT_PARSER)
<< "Added waf tag to context: "
<< static_cast<string>(header.getValue());
return;
}
Maybe<string>
UsersAllIdentifiersConfig::parseCookieElement(
const string::const_iterator &start,

View File

@@ -45,19 +45,6 @@ private:
std::string host;
};
class EqualWafTag : public EnvironmentEvaluator<bool>, Singleton::Consume<I_Environment>
{
public:
EqualWafTag(const std::vector<std::string> &params);
static std::string getName() { return "EqualWafTag"; }
Maybe<bool, Context::Error> evalVariable() const override;
private:
std::string waf_tag;
};
class EqualListeningIP : public EnvironmentEvaluator<bool>, Singleton::Consume<I_Environment>
{
public:

View File

@@ -137,7 +137,6 @@ public:
static const std::string source_identifier;
static const std::string proxy_ip_ctx;
static const std::string xff_vals_ctx;
static const std::string waf_tag_ctx;
static const CompressionType default_response_content_encoding;

View File

@@ -1,30 +0,0 @@
#ifndef __PROMETHEUS_COMP_H__
#define __PROMETHEUS_COMP_H__
#include <memory>
#include "component.h"
#include "singleton.h"
#include "i_rest_api.h"
#include "i_messaging.h"
#include "generic_metric.h"
class PrometheusComp
:
public Component,
Singleton::Consume<I_RestApi>,
Singleton::Consume<I_Messaging>
{
public:
PrometheusComp();
~PrometheusComp();
void init() override;
private:
class Impl;
std::unique_ptr<Impl> pimpl;
};
#endif // __PROMETHEUS_COMP_H__

View File

@@ -30,7 +30,6 @@ public:
void parseRequestHeaders(const HttpHeader &header) const;
std::vector<std::string> getHeaderValuesFromConfig(const std::string &header_key) const;
void setXFFValuesToOpaqueCtx(const HttpHeader &header, ExtractType type) const;
void setWafTagValuesToOpaqueCtx(const HttpHeader &header) const;
private:
class UsersIdentifiersConfig

View File

@@ -50,28 +50,6 @@ static const boost::regex error_log_regex(
", (upstream: \".+?\")"
", (host: \".+?\")$"
);
// Generic regexes for fallback parsing
static const boost::regex generic_crit_log_regex(
"("
+ syslog_regex_string + ") "
+ "(?:\\d{4}/\\d{2}/\\d{2} \\d{2}:\\d{2}:\\d{2} )?" // Optional nginx timestamp
+ "\\[crit\\] (.+)$"
);
static const boost::regex generic_emerg_log_regex(
"("
+ syslog_regex_string + ") "
+ "(?:\\d{4}/\\d{2}/\\d{2} \\d{2}:\\d{2}:\\d{2} )?" // Optional nginx timestamp
+ "\\[emerg\\] (.+)$"
);
// Generic regex to extract time, log level and message for fallback parsing
static const boost::regex generic_fallback_log_regex(
"("
+ syslog_regex_string + ") "
+ "(?:\\d{4}/\\d{2}/\\d{2} \\d{2}:\\d{2}:\\d{2} )?" // Optional nginx timestamp
+ "\\[(\\w+)\\] (.+)$"
);
static const boost::regex server_regex("(\\d+\\.\\d+\\.\\d+\\.\\d+)|(\\w+\\.\\w+)");
static const boost::regex uri_regex("^/");
@@ -79,9 +57,6 @@ static const boost::regex port_regex("\\d+");
static const boost::regex response_code_regex("[0-9]{3}");
static const boost::regex http_method_regex("[A-Za-z]+");
static const string central_nginx_manager = "Central NGINX Manager";
static const string reverse_proxe = "Reverse Proxy";
class NginxMessageReader::Impl
{
public:
@@ -89,16 +64,6 @@ public:
init()
{
dbgFlow(D_NGINX_MESSAGE_READER);
if (Singleton::exists<I_Environment>()) {
auto name = Singleton::Consume<I_Environment>::by<Report>()->get<string>("Service Name");
if (name.ok())
{
dbgInfo(D_NGINX_MESSAGE_READER) << "Service name: " << *name;
service_name = *name;
}
}
I_MainLoop *mainloop = Singleton::Consume<I_MainLoop>::by<NginxMessageReader>();
mainloop->addOneTimeRoutine(
I_MainLoop::RoutineType::System,
@@ -152,12 +117,6 @@ private:
COUNT
};
struct GenericLogInfo {
string timestamp;
string severity;
string message;
};
void
initSyslogServerSocket()
{
@@ -216,10 +175,10 @@ private:
bool log_sent;
if (isAccessLog(log)) {
log_sent = sendAccessLog(log);
} else if (isAlertErrorLog(log) || isErrorLog(log) || isCritErrorLog(log) || isEmergErrorLog(log)) {
} else if (isAlertErrorLog(log) || isErrorLog(log)) {
log_sent = sendErrorLog(log);
} else {
dbgWarning(D_NGINX_MESSAGE_READER) << "Unexpected nginx log format for message: "<< log;
dbgWarning(D_NGINX_MESSAGE_READER) << "Unexpected nginx log format";
continue;
}
if (!log_sent) {
@@ -263,22 +222,13 @@ private:
{
dbgFlow(D_NGINX_MESSAGE_READER) << "Error log" << log;
Maybe<EnumArray<LogInfo, string>> log_info = parseErrorLog(log);
if (log_info.ok()) {
return sendLog(log_info.unpack());
if (!log_info.ok()) {
dbgWarning(D_NGINX_MESSAGE_READER)
<< "Failed parsing the NGINX logs. Error: "
<< log_info.getErr();
return false;
}
if (service_name == central_nginx_manager) {
dbgDebug(D_NGINX_MESSAGE_READER) << "Detailed parsing failed, trying generic parsing";
Maybe<GenericLogInfo> generic_log = parseGenericErrorLog(log);
if (generic_log.ok()) {
return sendGenericLog(generic_log.unpack());
}
}
dbgWarning(D_NGINX_MESSAGE_READER)
<< "Failed parsing the NGINX logs. Error: "
<< log_info.getErr();
return false;
return sendLog(log_info.unpack());
}
bool
@@ -303,45 +253,7 @@ private:
}
bool
isCritErrorLog(const string &log) const
{
dbgFlow(D_NGINX_MESSAGE_READER) << "Check if log is of type 'crit log'. Log: " << log;
return log.find("[crit]") != string::npos;
}
bool
isEmergErrorLog(const string &log) const
{
dbgFlow(D_NGINX_MESSAGE_READER) << "Check if log is of type 'emerg log'. Log: " << log;
return log.find("[emerg]") != string::npos;
}
string
getCNMEventName(const EnumArray<LogInfo, string> &log_info) const
{
dbgFlow(D_NGINX_MESSAGE_READER);
string event_name;
switch (log_info[LogInfo::RESPONSE_CODE][0]) {
case '4': {
event_name = "NGINX Proxy Error: Invalid request or incorrect NGINX configuration - Request dropped."
" Please check the reverse proxy configuration of your relevant assets";
break;
}
case '5': {
event_name = "NGINX Proxy Error: Request failed! Please verify your proxy configuration."
"If the issue persists please contact open-appsec support";
break;
}
default: {
dbgError(D_NGINX_MESSAGE_READER) << "Irrelevant status code";
return "";
}
}
return event_name;
}
string
getRPMEventName(const EnumArray<LogInfo, string> &log_info) const
sendLog(const EnumArray<LogInfo, string> &log_info)
{
dbgFlow(D_NGINX_MESSAGE_READER);
string event_name;
@@ -359,45 +271,9 @@ private:
}
default: {
dbgError(D_NGINX_MESSAGE_READER) << "Irrelevant status code";
return "";
return false;
}
}
return event_name;
}
string
getServiceName()
{
string service_name = "Unnamed Nano Service";
if (Singleton::exists<I_Environment>()) {
auto name = Singleton::Consume<I_Environment>::by<Report>()->get<string>("Service Name");
if (name.ok()) return *name;
}
return service_name;
}
string getEventName(const EnumArray<LogInfo, string> &log_info)
{
if (service_name == central_nginx_manager)
{
return getCNMEventName(log_info);
}
if (service_name != reverse_proxe)
{
dbgWarning(D_NGINX_MESSAGE_READER)
<< "Unknown service name: "
<< service_name
<< " Response will be sent as RPM";
}
return getRPMEventName(log_info);
}
bool
sendLog(const EnumArray<LogInfo, string> &log_info)
{
dbgFlow(D_NGINX_MESSAGE_READER);
string event_name = getEventName(log_info);
dbgTrace(D_NGINX_MESSAGE_READER)
<< "Nginx log's event name and response code: "
@@ -407,11 +283,9 @@ private:
LogGen log(
event_name,
ReportIS::Audience::SECURITY,
ReportIS::Severity::HIGH,
ReportIS::Severity::INFO,
ReportIS::Priority::LOW,
service_name == central_nginx_manager ?
ReportIS::Tags::CENTRAL_NGINX_MANAGER :
ReportIS::Tags::REVERSE_PROXY
ReportIS::Tags::REVERSE_PROXY
);
log << LogField("eventConfidence", "High");
@@ -439,47 +313,6 @@ private:
return true;
}
bool
sendGenericLog(const GenericLogInfo &log_info)
{
dbgFlow(D_NGINX_MESSAGE_READER) << "Sending generic log";
// check with christoper
string event_name = "NGINX Proxy Error: Request failed! Please verify your proxy configuration."
"If the issue persists please contact open-appsec support";
// Convert string severity to ReportIS::Severity
ReportIS::Severity severity = ReportIS::Severity::MEDIUM;
ReportIS::Priority priority = ReportIS::Priority::MEDIUM;
if (log_info.severity == "emerg" || log_info.severity == "crit") {
severity = ReportIS::Severity::CRITICAL;
priority = ReportIS::Priority::URGENT;
} else if (log_info.severity == "error" || log_info.severity == "alert") {
severity = ReportIS::Severity::HIGH;
priority = ReportIS::Priority::HIGH;
}
LogGen log(
event_name,
ReportIS::Audience::SECURITY,
severity,
priority,
ReportIS::Tags::CENTRAL_NGINX_MANAGER
);
log << LogField("eventConfidence", "High");
log << LogField("timestamp", log_info.timestamp);
log << LogField("httpResponseBody", formatGenericLogMessage(log_info));
return true;
}
string
formatGenericLogMessage(const GenericLogInfo &log_info)
{
return "[" + log_info.severity + "] " + log_info.message;
}
bool
sendRateLimitLog(const EnumArray<LogInfo, string> &log_info)
{
@@ -698,48 +531,6 @@ private:
log_info[LogInfo::RULE_NAME] = context.getRuleName();
}
Maybe<GenericLogInfo>
parseGenericErrorLog(const string &log_line)
{
dbgFlow(D_NGINX_MESSAGE_READER) << "Parsing generic error log: " << log_line;
boost::smatch matcher;
GenericLogInfo generic_log;
if (isCritErrorLog(log_line)) {
if (NGEN::Regex::regexSearch(__FILE__, __LINE__, log_line, matcher, generic_crit_log_regex)) {
const int timestamp_index = 2; // Timestamp from within syslog_regex_string
const int message_index = 5; // The captured message after [crit]
generic_log.timestamp = string(matcher[timestamp_index].first, matcher[timestamp_index].second);
generic_log.severity = "crit";
generic_log.message = string(matcher[message_index].first, matcher[message_index].second);
return generic_log;
}
} else if (isEmergErrorLog(log_line)) {
if (NGEN::Regex::regexSearch(__FILE__, __LINE__, log_line, matcher, generic_emerg_log_regex)) {
const int timestamp_index = 2; // Timestamp from within syslog_regex_string
const int message_index = 5; // The captured message after [emerg]
generic_log.timestamp = string(matcher[timestamp_index].first, matcher[timestamp_index].second);
generic_log.severity = "emerg";
generic_log.message = string(matcher[message_index].first, matcher[message_index].second);
return generic_log;
}
}
if (NGEN::Regex::regexSearch(__FILE__, __LINE__, log_line, matcher, generic_fallback_log_regex)) {
const int timestamp_index = 2; // Timestamp from within syslog_regex_string
const int severity_index = 5; // The captured severity level
const int message_index = 6; // The captured message
generic_log.timestamp = string(matcher[timestamp_index].first, matcher[timestamp_index].second);
generic_log.severity = string(matcher[severity_index].first, matcher[severity_index].second);
generic_log.message = string(matcher[message_index].first, matcher[message_index].second);
return generic_log;
}
dbgWarning(D_NGINX_MESSAGE_READER) << "Could not parse log with generic method: " << log_line;
return genError("Could not parse log with generic method");
}
Maybe<EnumArray<LogInfo, string>>
parseErrorLog(const string &log_line)
{
@@ -749,29 +540,17 @@ private:
boost::smatch matcher;
vector<string> result;
boost::regex selected_regex;
// Select appropriate regex based on log type
if (isAlertErrorLog(log_line)) {
selected_regex = alert_log_regex;
} else if (isErrorLog(log_line)) {
selected_regex = error_log_regex;
} else {
dbgWarning(D_NGINX_MESSAGE_READER) << "No matching log type found for log: " << log_line;
return genError("No matching log type found");
}
if (
!NGEN::Regex::regexSearch(
__FILE__,
__LINE__,
log_line,
matcher,
selected_regex
isAlertErrorLog(log_line) ? alert_log_regex : error_log_regex
)
) {
dbgWarning(D_NGINX_MESSAGE_READER) << "Detailed regex parsing failed for log: " << log_line;
return genError("Detailed regex parsing failed");
dbgWarning(D_NGINX_MESSAGE_READER) << "Unexpected nginx log format";
return genError("Unexpected nginx log format");
}
const int event_message_index = 6;
@@ -812,8 +591,8 @@ private:
addContextFieldsToLogInfo(log_info);
if (!validateLog(log_info)) {
dbgWarning(D_NGINX_MESSAGE_READER) << "Log validation failed for detailed parsing";
return genError("Log validation failed for detailed parsing");
dbgWarning(D_NGINX_MESSAGE_READER) << "Unexpected nginx log format";
return genError("Unexpected nginx log format");
}
return log_info;
@@ -931,7 +710,6 @@ private:
I_Socket::socketFd syslog_server_socket = -1;
string rate_limit_status_code = "429";
string service_name = "Unnamed Nano Service";
};
NginxMessageReader::NginxMessageReader() : Component("NginxMessageReader"), pimpl(make_unique<Impl>()) {}

View File

@@ -3,7 +3,6 @@ add_subdirectory(ips)
add_subdirectory(layer_7_access_control)
add_subdirectory(local_policy_mgmt_gen)
add_subdirectory(orchestration)
add_subdirectory(prometheus)
add_subdirectory(rate_limit)
add_subdirectory(waap)
add_subdirectory(central_nginx_manager)

View File

@@ -179,10 +179,10 @@ private:
Maybe<void>
configureSyslog()
{
// if (!getProfileAgentSettingWithDefault<bool>(false, "centralNginxManagement.syslogEnabled")) {
// dbgTrace(D_NGINX_MANAGER) << "Syslog is disabled via settings";
// return {};
// }
if (!getProfileAgentSettingWithDefault<bool>(false, "centralNginxManagement.syslogEnabled")) {
dbgTrace(D_NGINX_MANAGER) << "Syslog is disabled via settings";
return {};
}
string syslog_directive = "error_log syslog:server=127.0.0.1:1514 warn;";
auto load_shared_directive_result = loadSharedDirective(syslog_directive);

View File

@@ -1,2 +0,0 @@
add_library(prometheus_comp prometheus_comp.cc)
add_subdirectory(prometheus_ut)

View File

@@ -1,200 +0,0 @@
#include "prometheus_comp.h"
#include <string>
#include <map>
#include <vector>
#include <cereal/archives/json.hpp>
#include <cereal/types/map.hpp>
#include <cereal/types/vector.hpp>
#include <cereal/types/string.hpp>
#include <iostream>
#include <fstream>
#include "common.h"
#include "report/base_field.h"
#include "report/report_enums.h"
#include "log_generator.h"
#include "debug.h"
#include "rest.h"
#include "customized_cereal_map.h"
#include "i_messaging.h"
#include "prometheus_metric_names.h"
USE_DEBUG_FLAG(D_PROMETHEUS);
using namespace std;
using namespace ReportIS;
struct ServiceData
{
template <typename Archive>
void
serialize(Archive &ar)
{
ar(cereal::make_nvp("Service port", service_port));
}
int service_port;
};
class PrometheusMetricData
{
public:
PrometheusMetricData(const string &n, const string &t, const string &d) : name(n), type(t), description(d) {}
void
addElement(const string &labels, const string &value)
{
metric_labels_to_values[labels] = value;
}
ostream &
print(ostream &os)
{
if (metric_labels_to_values.empty()) return os;
string representative_name = "";
if (!name.empty()) {
auto metric_name = convertMetricName(name);
!metric_name.empty() ? representative_name = metric_name : representative_name = name;
}
if (!description.empty()) os << "# HELP " << representative_name << ' ' << description << '\n';
if (!name.empty()) os << "# TYPE " << representative_name << ' ' << type << '\n';
for (auto &entry : metric_labels_to_values) {
os << representative_name << entry.first << ' ' << entry.second << '\n';
}
os << '\n';
metric_labels_to_values.clear();
return os;
}
private:
string name;
string type;
string description;
map<string, string> metric_labels_to_values;
};
static ostream & operator<<(ostream &os, PrometheusMetricData &metric) { return metric.print(os); }
class PrometheusComp::Impl
{
public:
void
init()
{
Singleton::Consume<I_RestApi>::by<PrometheusComp>()->addGetCall(
"metrics",
[&] () { return getFormatedPrometheusMetrics(); }
);
}
void
addMetrics(const vector<PrometheusData> &metrics)
{
for(auto &metric : metrics) {
auto &metric_object = getDataObject(
metric.name,
metric.type,
metric.description
);
metric_object.addElement(metric.label, metric.value);
}
}
private:
PrometheusMetricData &
getDataObject(const string &name, const string &type, const string &description)
{
auto elem = prometheus_metrics.find(name);
if (elem == prometheus_metrics.end()) {
elem = prometheus_metrics.emplace(name, PrometheusMetricData(name, type, description)).first;
}
return elem->second;
}
map<string, ServiceData>
getServiceDetails()
{
map<string, ServiceData> registeredServices;
auto registered_services_file = getConfigurationWithDefault<string>(
getFilesystemPathConfig() + "/conf/orchestrations_registered_services.json",
"orchestration",
"Orchestration registered services"
);
ifstream file(registered_services_file);
if (!file.is_open()) {
dbgWarning(D_PROMETHEUS) << "Failed to open file: " << registered_services_file;
return registeredServices;
}
stringstream buffer;
buffer << file.rdbuf();
try {
cereal::JSONInputArchive archive(buffer);
archive(cereal::make_nvp("Registered Services", registeredServices));
} catch (const exception& e) {
dbgWarning(D_PROMETHEUS) << "Error parsing Registered Services JSON file: " << e.what();
}
return registeredServices;
}
void
getServicesMetrics()
{
dbgTrace(D_PROMETHEUS) << "Get all registered services metrics";
map<string, ServiceData> service_names_to_ports = getServiceDetails();
for (const auto &service : service_names_to_ports) {
I_Messaging *messaging = Singleton::Consume<I_Messaging>::by<PrometheusComp>();
MessageMetadata servie_metric_req_md("127.0.0.1", service.second.service_port);
servie_metric_req_md.setConnectioFlag(MessageConnectionConfig::ONE_TIME_CONN);
servie_metric_req_md.setConnectioFlag(MessageConnectionConfig::UNSECURE_CONN);
auto res = messaging->sendSyncMessage(
HTTPMethod::GET,
"/service-metrics",
string(""),
MessageCategory::GENERIC,
servie_metric_req_md
);
if (!res.ok()) {
dbgWarning(D_PROMETHEUS) << "Failed to get service metrics. Service: " << service.first;
continue;
}
stringstream buffer;
buffer << res.unpack().getBody();
cereal::JSONInputArchive archive(buffer);
vector<PrometheusData> metrics;
archive(cereal::make_nvp("metrics", metrics));
addMetrics(metrics);
}
}
string
getFormatedPrometheusMetrics()
{
MetricScrapeEvent().notify();
getServicesMetrics();
stringstream result;
for (auto &metric : prometheus_metrics) {
result << metric.second;
}
dbgTrace(D_PROMETHEUS) << "Prometheus metrics: " << result.str();
return result.str();
}
map<string, PrometheusMetricData> prometheus_metrics;
};
PrometheusComp::PrometheusComp() : Component("Prometheus"), pimpl(make_unique<Impl>()) {}
PrometheusComp::~PrometheusComp() {}
void
PrometheusComp::init()
{
pimpl->init();
}

View File

@@ -1,143 +0,0 @@
#ifndef __PROMETHEUS_METRIC_NAMES_H__
#define __PROMETHEUS_METRIC_NAMES_H__
#include <string>
#include <unordered_map>
#include "debug.h"
USE_DEBUG_FLAG(D_PROMETHEUS);
std::string
convertMetricName(const std::string &original_metric_name)
{
static const std::unordered_map<std::string, std::string> original_to_representative_names = {
// HybridModeMetric
{"watchdogProcessStartupEventsSum", "nano_service_restarts_counter"},
// nginxAttachmentMetric
{"inspectVerdictSum", "traffic_inspection_verdict_inspect_counter"},
{"acceptVeridctSum", "traffic_inspection_verdict_accept_counter"},
{"dropVerdictSum", "traffic_inspection_verdict_drop_counter"},
{"injectVerdictSum", "traffic_inspection_verdict_inject_counter"},
{"irrelevantVerdictSum", "traffic_inspection_verdict_irrelevant_counter"},
{"irrelevantVerdictSum", "traffic_inspection_verdict_irrelevant_counter"},
{"reconfVerdictSum", "traffic_inspection_verdict_reconf_counter"},
{"responseInspection", "response_body_inspection_counter"},
// nginxIntakerMetric
{"successfullInspectionTransactionsSum", "successful_Inspection_counter"},
{"failopenTransactionsSum", "fail_open_Inspection_counter"},
{"failcloseTransactionsSum", "fail_close_Inspection_counter"},
{"transparentModeTransactionsSum", "transparent_mode_counter"},
{"totalTimeInTransparentModeSum", "total_time_in_transparent_mode_counter"},
{"reachInspectVerdictSum", "inspect_verdict_counter"},
{"reachAcceptVerdictSum", "accept_verdict_counter"},
{"reachDropVerdictSum", "drop_verdict_counter"},
{"reachInjectVerdictSum", "inject_verdict_counter"},
{"reachIrrelevantVerdictSum", "irrelevant_verdict_counter"},
{"reachReconfVerdictSum", "reconf_verdict_counter"},
{"requestCompressionFailureSum", "failed_requests_compression_counter"},
{"responseCompressionFailureSum", "failed_response_compression_counter"},
{"requestDecompressionFailureSum", "failed_requests_decompression_counter"},
{"responseDecompressionFailureSum", "failed_response_decompression_counter"},
{"requestCompressionSuccessSum", "successful_request_compression_counter"},
{"responseCompressionSuccessSum", "successful_response_compression_counter"},
{"requestDecompressionSuccessSum", "successful_request_decompression_counter"},
{"responseDecompressionSuccessSum", "successful_response_decompression_counter"},
{"skippedSessionsUponCorruptedZipSum", "corrupted_zip_skipped_session_counter"},
{"attachmentThreadReachedTimeoutSum", "thread_exceeded_processing_time_counter"},
{"registrationThreadReachedTimeoutSum", "failed_registration_thread_counter"},
{"requestHeaderThreadReachedTimeoutSum", "request_headers_processing_thread_timeouts_counter"},
{"requestBodyThreadReachedTimeoutSum", "request_body_processing_thread_timeouts_counter"},
{"respondHeaderThreadReachedTimeoutSum", "response_headers_processing_thread_timeouts_counter"},
{"respondBodyThreadReachedTimeoutSum", "response_body_processing_thread_timeouts_counter"},
{"attachmentThreadFailureSum", "thread_failures_counter"},
{"httpRequestProcessingReachedTimeoutSum", "request_processing_timeouts_counter"},
{"httpRequestsSizeSum", "requests_total_size_counter"},
{"httpResponsesSizeSum", "response_total_size_counter"},
{"httpRequestFailedToReachWebServerUpstreamSum", "requests_failed_reach_upstram_counter"},
{"overallSessionProcessTimeToVerdictAvgSample", "overall_processing_time_until_verdict_average"},
{"overallSessionProcessTimeToVerdictMaxSample", "overall_processing_time_until_verdict_max"},
{"overallSessionProcessTimeToVerdictMinSample", "overall_processing_time_until_verdict_min"},
{"requestProcessTimeToVerdictAvgSample", "requests_processing_time_until_verdict_average"},
{"requestProcessTimeToVerdictMaxSample", "requests_processing_time_until_verdict_max"},
{"requestProcessTimeToVerdictMinSample", "requests_processing_time_until_verdict_min"},
{"responseProcessTimeToVerdictAvgSample", "response_processing_time_until_verdict_average"},
{"responseProcessTimeToVerdictMaxSample", "response_processing_time_until_verdict_max"},
{"responseProcessTimeToVerdictMinSample", "response_processing_time_until_verdict_min"},
{"requestBodySizeUponTimeoutAvgSample", "request_body_size_average"},
{"requestBodySizeUponTimeoutMaxSample", "request_body_size_max"},
{"requestBodySizeUponTimeoutMinSample", "request_body_size_min"},
{"responseBodySizeUponTimeoutAvgSample", "response_body_size_average"},
{"responseBodySizeUponTimeoutMaxSample", "response_body_size_max"},
{"responseBodySizeUponTimeoutMinSample", "response_body_size_min"},
// WaapTelemetrics
{"reservedNgenA", "total_requests_counter"},
{"reservedNgenB", "unique_sources_counter"},
{"reservedNgenC", "requests_blocked_by_force_and_exception_counter"},
{"reservedNgenD", "requests_blocked_by_waf_counter"},
{"reservedNgenE", "requests_blocked_by_open_api_counter"},
{"reservedNgenF", "requests_blocked_by_bot_protection_counter"},
{"reservedNgenG", "requests_threat_level_info_and_no_threat_counter"},
{"reservedNgenH", "requests_threat_level_low_counter"},
{"reservedNgenI", "requests_threat_level_medium_counter"},
{"reservedNgenJ", "requests_threat_level_high_counter"},
// WaapTrafficTelemetrics
{"reservedNgenA", "post_requests_counter"},
{"reservedNgenB", "get_requests_counter"},
{"reservedNgenC", "put_requests_counter"},
{"reservedNgenD", "patch_requests_counter"},
{"reservedNgenE", "delete_requests_counter"},
{"reservedNgenF", "other_requests_counter"},
{"reservedNgenG", "2xx_status_code_responses_counter"},
{"reservedNgenH", "4xx_status_code_responses_counter"},
{"reservedNgenI", "5xx_status_code_responses_counter"},
{"reservedNgenJ", "requests_time_latency_average"},
// WaapAttackTypesMetrics
{"reservedNgenA", "sql_injection_attacks_type_counter"},
{"reservedNgenB", "vulnerability_scanning_attacks_type_counter"},
{"reservedNgenC", "path_traversal_attacks_type_counter"},
{"reservedNgenD", "ldap_injection_attacks_type_counter"},
{"reservedNgenE", "evasion_techniques_attacks_type_counter"},
{"reservedNgenF", "remote_code_execution_attacks_type_counter"},
{"reservedNgenG", "xml_extern_entity_attacks_type_counter"},
{"reservedNgenH", "cross_site_scripting_attacks_type_counter"},
{"reservedNgenI", "general_attacks_type_counter"},
// AssetsMetric
{"numberOfProtectedApiAssetsSample", "api_assets_counter"},
{"numberOfProtectedWebAppAssetsSample", "web_api_assets_counter"},
{"numberOfProtectedAssetsSample", "all_assets_counter"},
// IPSMetric
{"preventEngineMatchesSample", "prevent_action_matches_counter"},
{"detectEngineMatchesSample", "detect_action_matches_counter"},
{"ignoreEngineMatchesSample", "ignore_action_matches_counter"},
// CPUMetric
{"cpuMaxSample", "cpu_usage_percentage_max"},
{"cpuAvgSample", "cpu_usage_percentage_average"},
{"cpuSample", "cpu_usage_percentage_last_value"},
// LogMetric
{"logQueueMaxSizeSample", "logs_queue_size_max"},
{"logQueueAvgSizeSample", "logs_queue_size_average"},
{"logQueueCurrentSizeSample", "logs_queue_size_last_value"},
{"sentLogsSum", "logs_sent_counter"},
{"sentLogsBulksSum", "bulk_logs_sent_counter"},
// MemoryMetric
{"serviceVirtualMemorySizeMaxSample", "service_virtual_memory_size_kb_max"},
{"serviceVirtualMemorySizeMinSample", "service_virtual_memory_size_kb_min"},
{"serviceVirtualMemorySizeAvgSample", "service_virtual_memory_size_kb_average"},
{"serviceRssMemorySizeMaxSample", "service_physical_memory_size_kb_max"},
{"serviceRssMemorySizeMinSample", "service_physical_memory_size_kb_min"},
{"serviceRssMemorySizeAvgSample", "service_physical_memory_size_kb_average"},
{"generalTotalMemorySizeMaxSample", "general_total_used_memory_max"},
{"generalTotalMemorySizeMinSample", "general_total_used_memory_min"},
{"generalTotalMemorySizeAvgSample", "general_total_used_memory_average"},
};
auto metric_names = original_to_representative_names.find(original_metric_name);
if (metric_names != original_to_representative_names.end()) return metric_names->second;
dbgDebug(D_PROMETHEUS)
<< "Metric don't have a representative name, originl name: "
<< original_metric_name;
return "";
}
#endif // __PROMETHEUS_METRIC_NAMES_H__

View File

@@ -1,8 +0,0 @@
link_directories(${BOOST_ROOT}/lib)
link_directories(${BOOST_ROOT}/lib ${CMAKE_BINARY_DIR}/core/shmem_ipc)
add_unit_test(
prometheus_ut
"prometheus_ut.cc"
"prometheus_comp;logging;agent_details;waap_clib;table;singleton;time_proxy;metric;event_is;connkey;http_transaction_data;generic_rulebase;generic_rulebase_evaluators;ip_utilities;intelligence_is_v2;-lboost_regex;messaging;"
)

View File

@@ -1,79 +0,0 @@
#include "prometheus_comp.h"
#include <sstream>
#include <fstream>
#include <vector>
#include "cmock.h"
#include "cptest.h"
#include "maybe_res.h"
#include "debug.h"
#include "config.h"
#include "environment.h"
#include "config_component.h"
#include "agent_details.h"
#include "time_proxy.h"
#include "mock/mock_mainloop.h"
#include "mock/mock_rest_api.h"
#include "mock/mock_messaging.h"
using namespace std;
using namespace testing;
USE_DEBUG_FLAG(D_PROMETHEUS);
class PrometheusCompTest : public Test
{
public:
PrometheusCompTest()
{
EXPECT_CALL(mock_rest, mockRestCall(_, "declare-boolean-variable", _)).WillOnce(Return(false));
env.preload();
config.preload();
env.init();
EXPECT_CALL(
mock_rest,
addGetCall("metrics", _)
).WillOnce(DoAll(SaveArg<1>(&get_metrics_func), Return(true)));
prometheus_comp.init();
}
::Environment env;
ConfigComponent config;
PrometheusComp prometheus_comp;
StrictMock<MockRestApi> mock_rest;
StrictMock<MockMainLoop> mock_ml;
NiceMock<MockMessaging> mock_messaging;
unique_ptr<ServerRest> agent_uninstall;
function<string()> get_metrics_func;
CPTestTempfile status_file;
string registered_services_file_path;
};
TEST_F(PrometheusCompTest, checkAddingMetric)
{
registered_services_file_path = cptestFnameInSrcDir(string("registered_services.json"));
setConfiguration(registered_services_file_path, "orchestration", "Orchestration registered services");
string metric_body = "{\n"
" \"metrics\": [\n"
" {\n"
" \"metric_name\": \"watchdogProcessStartupEventsSum\",\n"
" \"metric_type\": \"counter\",\n"
" \"metric_description\": \"\",\n"
" \"labels\": \"{method=\\\"post\\\",code=\\\"200\\\"}\",\n"
" \"value\": \"1534\"\n"
" }\n"
" ]\n"
"}";
string message_body;
EXPECT_CALL(mock_messaging, sendSyncMessage(_, "/service-metrics", _, _, _))
.Times(2).WillRepeatedly(Return(HTTPResponse(HTTPStatusCode::HTTP_OK, metric_body)));
string metric_str = "# TYPE nano_service_restarts_counter counter\n"
"nano_service_restarts_counter{method=\"post\",code=\"200\"} 1534\n\n";
EXPECT_EQ(metric_str, get_metrics_func());
}

View File

@@ -1,32 +0,0 @@
{
"Registered Services": {
"cp-nano-orchestration": {
"Service name": "cp-nano-orchestration",
"Service ID": "cp-nano-orchestration",
"Service port": 7777,
"Relevant configs": [
"zones",
"triggers",
"rules",
"registration-data",
"parameters",
"orchestration",
"exceptions",
"agent-intelligence"
]
},
"cp-nano-prometheus": {
"Service name": "cp-nano-prometheus",
"Service ID": "cp-nano-prometheus",
"Service port": 7465,
"Relevant configs": [
"zones",
"triggers",
"rules",
"parameters",
"exceptions",
"agent-intelligence"
]
}
}
}

View File

@@ -103,35 +103,6 @@ WildcardHost::evalVariable() const
return lower_host_ctx == lower_host;
}
EqualWafTag::EqualWafTag(const vector<string> &params)
{
if (params.size() != 1) reportWrongNumberOfParams("EqualWafTag", params.size(), 1, 1);
waf_tag = params[0];
}
Maybe<bool, Context::Error>
EqualWafTag::evalVariable() const
{
I_Environment *env = Singleton::Consume<I_Environment>::by<EqualWafTag>();
auto maybe_waf_tag_ctx = env->get<string>(HttpTransactionData::waf_tag_ctx);
if (!maybe_waf_tag_ctx.ok())
{
dbgTrace(D_RULEBASE_CONFIG) << "didnt find waf tag in current context";
return false;
}
auto waf_tag_ctx = maybe_waf_tag_ctx.unpack();
dbgTrace(D_RULEBASE_CONFIG)
<< "trying to match waf tag context with its corresponding waf tag: "
<< waf_tag_ctx
<< ". Matcher waf tag: "
<< waf_tag;
return waf_tag_ctx == waf_tag;
}
EqualListeningIP::EqualListeningIP(const vector<string> &params)
{
if (params.size() != 1) reportWrongNumberOfParams("EqualListeningIP", params.size(), 1, 1);

View File

@@ -80,7 +80,6 @@ GenericRulebase::Impl::preload()
addMatcher<IpProtocolMatcher>();
addMatcher<UrlMatcher>();
addMatcher<EqualHost>();
addMatcher<EqualWafTag>();
addMatcher<WildcardHost>();
addMatcher<EqualListeningIP>();
addMatcher<EqualListeningPort>();

View File

@@ -53,7 +53,6 @@ const string HttpTransactionData::req_body = "transaction_request_body
const string HttpTransactionData::source_identifier = "sourceIdentifiers";
const string HttpTransactionData::proxy_ip_ctx = "proxy_ip";
const string HttpTransactionData::xff_vals_ctx = "xff_vals";
const string HttpTransactionData::waf_tag_ctx = "waf_tag";
const CompressionType HttpTransactionData::default_response_content_encoding = CompressionType::NO_COMPRESSION;

View File

@@ -71,7 +71,6 @@ enum class Tags {
DEPLOYMENT_DOCKER,
WEB_SERVER_SWAG,
WEB_SERVER_NGINX_UNIFIED,
CENTRAL_NGINX_MANAGER,
COUNT
};

View File

@@ -332,17 +332,7 @@ vector<PrometheusData>
GenericMetric::getPromMetricsData()
{
vector<PrometheusData> all_metrics;
bool enable_prometheus = false;
auto prometheus_settings = getProfileAgentSetting<bool>("prometheus");
if (prometheus_settings.ok()) {
enable_prometheus = prometheus_settings.unpack();
} else {
const char *prometheus_env = getenv("PROMETHEUS");
if (prometheus_env != nullptr) {
enable_prometheus = string(prometheus_env) == "true";
}
}
if (!enable_prometheus) return all_metrics;
if (!getProfileAgentSettingWithDefault(false, "prometheus")) return all_metrics;
dbgTrace(D_METRICS) << "Get prometheus metrics";
for (auto &calc : prometheus_calcs) {

View File

@@ -11,7 +11,6 @@ set(COMMON_LIBRARIES "-lngen_core;-lcompression_utils;-lssl;-lcrypto;-lz;-lboost
include(packaging.cmake)
add_subdirectory(orchestration)
add_subdirectory(prometheus)
add_subdirectory(agent_cache)
add_subdirectory(http_transaction_handler)
add_subdirectory(attachment_registration_manager)

View File

@@ -29,5 +29,4 @@ cpview_metric_provider="cpviewMetricProvider 8282"
hello_world="hello_world"
crowdsec_aux="crowdsecAux 8081"
central_nginx_manager="centralNginxManager 7555"
prometheus="prometheus 7465"
# ## Please do not remove this comment - newline at end of file required.

View File

@@ -647,9 +647,6 @@ install_watchdog()
echo "ExecStart=ip netns exec CTX0000${VS_ID} ${FILESYSTEM_PATH}/${WATCHDOG_PATH}/cp-nano-watchdog" >> /etc/systemd/system/${NANO_AGENT_SERVICE_FILE}
fi
echo "Environment=\"FILESYSTEM_PATH=${FILESYSTEM_PATH}\"" >> /etc/systemd/system/${NANO_AGENT_SERVICE_FILE}
if [ -n "${PROMETHEUS}" ] ; then
echo "Environment=\"PROMETHEUS=${PROMETHEUS}\"" >> /etc/systemd/system/${NANO_AGENT_SERVICE_FILE}
fi
cp_exec "systemctl daemon-reload"
cp_exec "systemctl enable nano_agent"

View File

@@ -1,30 +0,0 @@
add_subdirectory(package)
add_executable(prometheus main.cc)
target_link_libraries(prometheus
-Wl,--start-group
${COMMON_LIBRARIES}
generic_rulebase
generic_rulebase_evaluators
ip_utilities
version
signal_handler
prometheus_comp
http_transaction_data
-Wl,--end-group
)
add_dependencies(prometheus ngen_core)
install(TARGETS prometheus DESTINATION bin)
install(TARGETS prometheus DESTINATION prometheus_service/bin)
gen_package(
install-cp-nano-service-prometheus.sh
prometheus_service
./install-cp-nano-prometheus.sh
Check Point Prometheus Agent Version ${PACKAGE_VERSION} Install Package
)

View File

@@ -1,15 +0,0 @@
#include "components_list.h"
#include "prometheus_comp.h"
int
main(int argc, char **argv)
{
NodeComponents<PrometheusComp> comps;
comps.registerGlobalValue<bool>("Is Rest primary routine", true);
comps.registerGlobalValue<uint>("Nano service API Port Primary", 7465);
comps.registerGlobalValue<uint>("Nano service API Port Alternative", 7466);
comps.registerGlobalValue<bool>("Nano service API Allow Get From External IP", true);
return comps.run("Prometheus Service", argc, argv);
}

View File

@@ -1,4 +0,0 @@
install(FILES install-cp-nano-prometheus.sh DESTINATION prometheus_service/ PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ)
install(FILES cp-nano-prometheus.cfg DESTINATION prometheus_service/conf PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ)
install(FILES cp-nano-prometheus-conf.json DESTINATION prometheus_service/conf PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ)
install(FILES cp-nano-prometheus-debug-conf.json DESTINATION prometheus_service/conf PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ)

View File

@@ -1,21 +0,0 @@
{
"connection": {
"Nano service API Port Primary": [
{
"value": 7465
}
],
"Nano service API Port Alternative": [
{
"value": 7466
}
]
},
"message": {
"Connection timeout": [
{
"value": 10000000
}
]
}
}

View File

@@ -1,11 +0,0 @@
{
"Debug": [
{
"Streams": [
{
"Output": "nano_agent/cp-nano-prometheus.dbg"
}
]
}
]
}

View File

@@ -1,164 +0,0 @@
#!/bin/sh
#Nano Service Details
NANO_SERVICE_NAME="prometheus"
NANO_SERVICE_BIN_NAME="cp-nano-prometheus"
NANO_SERVICE_INSTALLATION_FOLDER="prometheus"
ATTACHMENT_BIN_NAME="cp-nano-prometheus"
#Installable Names
CFG_FILE_NAME="cp-nano-prometheus.cfg"
DBG_CONF_FILE_NAME="cp-nano-prometheus-debug-conf.json"
SERVICE_CONF_FILE_NAME="cp-nano-prometheus-conf.json"
NANO_SERVICE_BIN="prometheus"
ATTACHMENT_BIN="prometheus_attachment"
#Const variables
FORCE_STDOUT=true
INSTALLATION_TIME=$(date)
CP_NANO_LOG_PATH="/var/log/nano_agent"
CP_NANO_CONF_PATH="/etc/cp/conf"
NANO_SERVICE_INSTALLATION_PATH="/etc/cp/${NANO_SERVICE_INSTALLATION_FOLDER}"
NANO_SERVICE_BIN_PATH=${NANO_SERVICE_INSTALLATION_PATH}/${NANO_SERVICE_BIN_NAME}
NANO_SERVICE_CFG_PATH=${NANO_SERVICE_BIN_PATH}.cfg
ATTACHMENT_BIN_PATH=${NANO_SERVICE_INSTALLATION_PATH}/${ATTACHMENT_BIN_NAME}
DBG_CONF_PATH=${CP_NANO_CONF_PATH}/${NANO_SERVICE_BIN_NAME}-debug-conf.json
SERVICE_CONF_PATH=${CP_NANO_CONF_PATH}/${NANO_SERVICE_BIN_NAME}-conf.json
DBG_FILE_PATH=${CP_NANO_LOG_PATH}/${NANO_SERVICE_BIN_NAME}.dbg
INSTALLATION_LOG_FILE=${CP_NANO_LOG_PATH}/${NANO_SERVICE_BIN_NAME}-install.log
mkdir -p ${CP_NANO_LOG_PATH}
touch ${DBG_FILE_PATH}
cp_print()
{
var_text=$1
var_std_out=$2
touch $INSTALLATION_LOG_FILE
if [ -n "$var_std_out" ]; then
if [ "$var_std_out" = "true" ]; then
printf "%b\n" "$var_text"
fi
fi
printf "%b\n" "$var_text" >> $INSTALLATION_LOG_FILE
}
cp_exec()
{
var_cmd=$1
var_std_out=$2
# Send exec output to RES
RES=$($var_cmd 2>&1)
if [ -n "$RES" ]; then
cp_print "$RES" "$var_std_out"
fi
}
set_configuration()
{
cp_exec "cp -n conf/${DBG_CONF_FILE_NAME} $DBG_CONF_PATH"
cp_exec "/etc/cp/scripts/cpnano_debug --default --service prometheus"
cp_exec "cp -n conf/${SERVICE_CONF_FILE_NAME} $SERVICE_CONF_PATH"
}
run_installation()
{
cp_print "Starting installation of Check Point ${NANO_SERVICE_NAME} Nano service [$INSTALLATION_TIME]\n" $FORCE_STDOUT
cp_exec "/etc/cp/watchdog/cp-nano-watchdog --un-register ${ATTACHMENT_BIN_PATH}"
cp_exec "/etc/cp/watchdog/cp-nano-watchdog --un-register ${NANO_SERVICE_BIN_PATH}"
att_path=$ATTACHMENT_BIN_PATH
cmd_pid_att=$(ps -eo pid,cmd,args | awk -v srv=${att_path} '{if($2 ~ srv || $3 ~ srv) print $1}')
srv_path=$NANO_SERVICE_BIN_NAME
cmd_pid_srv=$(ps -eo pid,cmd,args | awk -v srv=${srv_path} '{if($2 ~ srv || $3 ~ srv) print $1}')
if [ -n "$cmd_pid_att" ]; then
cp_print "Killing running instance(pid=$cmd_pid_att) of the prometheus attachment on installation"
kill -9 "$cmd_pid_att"
fi
if [ -n "$cmd_pid_srv" ]; then
cp_print "Killing running instance(pid=$cmd_pid_srv) of the prometheus service on installation"
kill -9 "$cmd_pid_srv"
fi
cp_exec "mkdir -p ${NANO_SERVICE_INSTALLATION_PATH}"
cp_exec "cp -f bin/${NANO_SERVICE_BIN} ${NANO_SERVICE_BIN_PATH}"
cp_exec "chmod +x ${NANO_SERVICE_BIN_PATH}"
cp_exec "cp -f conf/${CFG_FILE_NAME} ${NANO_SERVICE_CFG_PATH}"
cp_exec "chmod 600 ${NANO_SERVICE_CFG_PATH}"
set_configuration
cp_exec "/etc/cp/watchdog/cp-nano-watchdog --register ${NANO_SERVICE_BIN_PATH}"
cp_exec "/etc/cp/watchdog/cp-nano-watchdog --register ${ATTACHMENT_BIN_PATH}"
cp_print "Installation completed successfully." $FORCE_STDOUT
}
usage()
{
echo "Check Point: available flags are"
echo "--install : install ${NANO_SERVICE_NAME} Nano Service"
echo "--uninstall : remove ${NANO_SERVICE_NAME} Nano Service"
echo "--pre_install_test : run Pre-installation test for ${NANO_SERVICE_NAME} Nano Service install package"
echo "--post_install_test : run Post-installation test for ${NANO_SERVICE_NAME} Nano Service install package"
exit 255
}
run_uninstall()
{
cp_exec "/etc/cp/watchdog/cp-nano-watchdog --un-register ${ATTACHMENT_BIN_PATH}"
cp_exec "/etc/cp/watchdog/cp-nano-watchdog --un-register ${NANO_SERVICE_BIN_PATH}"
cp_exec "rm -rf ${NANO_SERVICE_INSTALLATION_PATH}"
cp_exec "rm -rf ${NANO_SERVICE_CONF_DIR}"
}
run_pre_install_test()
{
cp_print "Starting Pre-installation test of Check Point ${NANO_SERVICE_NAME} Nano service installation package [$INSTALLATION_TIME]\n" $FORCE_STDOUT
cp_print "Successfully finished pre-installation test for Check Point ${NANO_SERVICE_NAME} Nano service installation package [$INSTALLATION_TIME]\n" $FORCE_STDOUT
exit 0
}
run_post_install_test()
{
cp_print "Starting Post-installation test of Check Point ${NANO_SERVICE_NAME} Nano service installation package [$INSTALLATION_TIME]\n" $FORCE_STDOUT
if ! cat /etc/cp/watchdog/wd.services | grep -q ${NANO_SERVICE_BIN_PATH}; then
cp_print "Failed to register ${NANO_SERVICE_NAME} Nano service to the watchdog\n" $FORCE_STDOUT
exit 255
fi
cp_print "Successfully finished post-installation test for Check Point ${NANO_SERVICE_NAME} Nano service installation package [$INSTALLATION_TIME]\n" $FORCE_STDOUT
exit 0
}
run()
{
if [ '--install' = "$1" ]; then
run_installation "${@}"
elif [ '--uninstall' = "$1" ]; then
run_uninstall
elif [ '--pre_install_test' = "$1" ]; then
run_pre_install_test
elif [ '--post_install_test' = "$1" ]; then
run_post_install_test
else
usage
exit 1
fi
}
if [ "$(id -u)" != "0" ]; then
echo "Administrative privileges required for this Package (use su or sudo)"
exit 1
fi
shift
run "${@}"
exit 0