mirror of
https://github.com/openappsec/openappsec.git
synced 2025-11-16 17:31:52 +03:00
Compare commits
88 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7b3320ce10 | ||
|
|
25cc2d66e7 | ||
|
|
66e2112afb | ||
|
|
ba7c9afd52 | ||
|
|
2aa0993d7e | ||
|
|
0cdfc9df90 | ||
|
|
010814d656 | ||
|
|
3779dd360d | ||
|
|
0e7dc2133d | ||
|
|
c9095acbef | ||
|
|
e47e29321d | ||
|
|
25a66e77df | ||
|
|
6eea40f165 | ||
|
|
cee6ed511a | ||
|
|
4f145fd74f | ||
|
|
3fe5c5b36f | ||
|
|
7542a85ddb | ||
|
|
fae4534e5c | ||
|
|
923a8a804b | ||
|
|
b1731237d1 | ||
|
|
3d3d6e73b9 | ||
|
|
3f80127ec5 | ||
|
|
abdee954bb | ||
|
|
9a516899e8 | ||
|
|
4fd2aa6c6b | ||
|
|
0db666ac4f | ||
|
|
493d9a6627 | ||
|
|
6db87fc7fe | ||
|
|
d2b9bc8c9c | ||
|
|
886a5befe1 | ||
|
|
1f2502f9e4 | ||
|
|
9e4c5014ce | ||
|
|
024423cce9 | ||
|
|
dc4b546bd1 | ||
|
|
a86aca13b4 | ||
|
|
87b34590d4 | ||
|
|
e0198a1a95 | ||
|
|
d024ad5845 | ||
|
|
46d42c8fa3 | ||
|
|
f6c36f3363 | ||
|
|
63541a4c3c | ||
|
|
d14fa7a468 | ||
|
|
ae0de5bf14 | ||
|
|
d39919f348 | ||
|
|
4f215e1409 | ||
|
|
f05b5f8cee | ||
|
|
949b656b13 | ||
|
|
bbe293d215 | ||
|
|
35b2df729f | ||
|
|
7600b6218f | ||
|
|
20e8e65e14 | ||
|
|
414130a789 | ||
|
|
9d704455e8 | ||
|
|
602442fed4 | ||
|
|
4e9a90db01 | ||
|
|
20f92afbc2 | ||
|
|
ee7adc37d0 | ||
|
|
c0b3e9c0d0 | ||
|
|
f1f4b13327 | ||
|
|
4354a98d37 | ||
|
|
09fa11516c | ||
|
|
446b043128 | ||
|
|
91bcadf930 | ||
|
|
0824cf4b23 | ||
|
|
108abdb35e | ||
|
|
64ebf013eb | ||
|
|
2c91793f08 | ||
|
|
72a263d25a | ||
|
|
4e14ff9a58 | ||
|
|
1fb28e14d6 | ||
|
|
e38bb9525c | ||
|
|
63b8bb22c2 | ||
|
|
11c97330f5 | ||
|
|
e56fb0bc1a | ||
|
|
4571d563f4 | ||
|
|
02c1db01f6 | ||
|
|
c557affd9b | ||
|
|
8889c3c054 | ||
|
|
f67eff87bc | ||
|
|
fa6a2e4233 | ||
|
|
b7e2efbf7e | ||
|
|
96ce290e5f | ||
|
|
de8e2d9970 | ||
|
|
0048708af1 | ||
|
|
4fe0f44e88 | ||
|
|
5f139d13d7 | ||
|
|
919d775a73 | ||
|
|
ac8e353598 |
@@ -74,7 +74,7 @@ For Linux, if you’ve built your own package use the following commands:
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ install-cp-nano-agent.sh --install --hybrid_mode
|
$ install-cp-nano-agent.sh --install --hybrid_mode
|
||||||
$ install-cp-nano-service-http-transaction-handler.sh –install
|
$ install-cp-nano-service-http-transaction-handler.sh --install
|
||||||
$ install-cp-nano-attachment-registration-manager.sh --install
|
$ install-cp-nano-attachment-registration-manager.sh --install
|
||||||
```
|
```
|
||||||
You can add the ```--token <token>``` and ```--email <email address>``` options to the first command, to get a token follow [documentation](https://docs.openappsec.io/getting-started/using-the-web-ui-saas/connect-deployed-agents-to-saas-management-k8s-and-linux).
|
You can add the ```--token <token>``` and ```--email <email address>``` options to the first command, to get a token follow [documentation](https://docs.openappsec.io/getting-started/using-the-web-ui-saas/connect-deployed-agents-to-saas-management-k8s-and-linux).
|
||||||
|
|||||||
@@ -173,6 +173,12 @@ getReqBodySizeTrigger()
|
|||||||
return conf_data.getNumericalValue("body_size_trigger");
|
return conf_data.getNumericalValue("body_size_trigger");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsigned int
|
||||||
|
getRemoveResServerHeader()
|
||||||
|
{
|
||||||
|
return conf_data.getNumericalValue("remove_server_header");
|
||||||
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
isIPAddress(c_str ip_str)
|
isIPAddress(c_str ip_str)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -66,7 +66,8 @@ TEST_F(HttpAttachmentUtilTest, GetValidAttachmentConfiguration)
|
|||||||
"\"static_resources_path\": \"" + static_resources_path + "\",\n"
|
"\"static_resources_path\": \"" + static_resources_path + "\",\n"
|
||||||
"\"min_retries_for_verdict\": 1,\n"
|
"\"min_retries_for_verdict\": 1,\n"
|
||||||
"\"max_retries_for_verdict\": 3,\n"
|
"\"max_retries_for_verdict\": 3,\n"
|
||||||
"\"body_size_trigger\": 777\n"
|
"\"body_size_trigger\": 777,\n"
|
||||||
|
"\"remove_server_header\": 1\n"
|
||||||
"}\n";
|
"}\n";
|
||||||
ofstream valid_configuration_file(attachment_configuration_file_name);
|
ofstream valid_configuration_file(attachment_configuration_file_name);
|
||||||
valid_configuration_file << valid_configuration;
|
valid_configuration_file << valid_configuration;
|
||||||
@@ -95,6 +96,7 @@ TEST_F(HttpAttachmentUtilTest, GetValidAttachmentConfiguration)
|
|||||||
EXPECT_EQ(getReqBodySizeTrigger(), 777u);
|
EXPECT_EQ(getReqBodySizeTrigger(), 777u);
|
||||||
EXPECT_EQ(getWaitingForVerdictThreadTimeout(), 75u);
|
EXPECT_EQ(getWaitingForVerdictThreadTimeout(), 75u);
|
||||||
EXPECT_EQ(getInspectionMode(), ngx_http_inspection_mode::BLOCKING_THREAD);
|
EXPECT_EQ(getInspectionMode(), ngx_http_inspection_mode::BLOCKING_THREAD);
|
||||||
|
EXPECT_EQ(getRemoveResServerHeader(), 1u);
|
||||||
|
|
||||||
EXPECT_EQ(isDebugContext("1.2.3.4", "5.6.7.8", 80, "GET", "test", "/abc"), 1);
|
EXPECT_EQ(isDebugContext("1.2.3.4", "5.6.7.8", 80, "GET", "test", "/abc"), 1);
|
||||||
EXPECT_EQ(isDebugContext("1.2.3.9", "5.6.7.8", 80, "GET", "test", "/abc"), 0);
|
EXPECT_EQ(isDebugContext("1.2.3.9", "5.6.7.8", 80, "GET", "test", "/abc"), 0);
|
||||||
|
|||||||
@@ -7,3 +7,4 @@ add_subdirectory(pending_key)
|
|||||||
add_subdirectory(utils)
|
add_subdirectory(utils)
|
||||||
add_subdirectory(attachment-intakers)
|
add_subdirectory(attachment-intakers)
|
||||||
add_subdirectory(security_apps)
|
add_subdirectory(security_apps)
|
||||||
|
add_subdirectory(nginx_message_reader)
|
||||||
|
|||||||
@@ -203,6 +203,13 @@ HttpAttachmentConfig::setFailOpenTimeout()
|
|||||||
"NGINX wait thread timeout msec"
|
"NGINX wait thread timeout msec"
|
||||||
));
|
));
|
||||||
|
|
||||||
|
conf_data.setNumericalValue("remove_server_header", getAttachmentConf<uint>(
|
||||||
|
0,
|
||||||
|
"agent.removeServerHeader.nginxModule",
|
||||||
|
"HTTP manager",
|
||||||
|
"Response server header removal"
|
||||||
|
));
|
||||||
|
|
||||||
uint inspection_mode = getAttachmentConf<uint>(
|
uint inspection_mode = getAttachmentConf<uint>(
|
||||||
static_cast<uint>(ngx_http_inspection_mode_e::NON_BLOCKING_THREAD),
|
static_cast<uint>(ngx_http_inspection_mode_e::NON_BLOCKING_THREAD),
|
||||||
"agent.inspectionMode.nginxModule",
|
"agent.inspectionMode.nginxModule",
|
||||||
|
|||||||
@@ -18,7 +18,9 @@
|
|||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
#include <climits>
|
#include <climits>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
#include <unordered_set>
|
||||||
#include <boost/range/iterator_range.hpp>
|
#include <boost/range/iterator_range.hpp>
|
||||||
|
#include <boost/algorithm/string.hpp>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
|
||||||
@@ -28,6 +30,7 @@
|
|||||||
#include "http_manager_opaque.h"
|
#include "http_manager_opaque.h"
|
||||||
#include "log_generator.h"
|
#include "log_generator.h"
|
||||||
#include "http_inspection_events.h"
|
#include "http_inspection_events.h"
|
||||||
|
#include "agent_core_utilities.h"
|
||||||
|
|
||||||
USE_DEBUG_FLAG(D_HTTP_MANAGER);
|
USE_DEBUG_FLAG(D_HTTP_MANAGER);
|
||||||
|
|
||||||
@@ -66,6 +69,22 @@ public:
|
|||||||
i_transaction_table = Singleton::Consume<I_Table>::by<HttpManager>();
|
i_transaction_table = Singleton::Consume<I_Table>::by<HttpManager>();
|
||||||
|
|
||||||
Singleton::Consume<I_Logging>::by<HttpManager>()->addGeneralModifier(compressAppSecLogs);
|
Singleton::Consume<I_Logging>::by<HttpManager>()->addGeneralModifier(compressAppSecLogs);
|
||||||
|
|
||||||
|
const char* ignored_headers_env = getenv("SAAS_IGNORED_UPSTREAM_HEADERS");
|
||||||
|
if (ignored_headers_env) {
|
||||||
|
string ignored_headers_str = ignored_headers_env;
|
||||||
|
ignored_headers_str = NGEN::Strings::removeTrailingWhitespaces(ignored_headers_str);
|
||||||
|
|
||||||
|
if (!ignored_headers_str.empty()) {
|
||||||
|
dbgInfo(D_HTTP_MANAGER)
|
||||||
|
<< "Ignoring SAAS_IGNORED_UPSTREAM_HEADERS environment variable: "
|
||||||
|
<< ignored_headers_str;
|
||||||
|
|
||||||
|
vector<string> ignored_headers_vec;
|
||||||
|
boost::split(ignored_headers_vec, ignored_headers_str, boost::is_any_of(";"));
|
||||||
|
for (const string &header : ignored_headers_vec) ignored_headers.insert(header);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
FilterVerdict
|
FilterVerdict
|
||||||
@@ -90,6 +109,14 @@ public:
|
|||||||
return FilterVerdict(default_verdict);
|
return FilterVerdict(default_verdict);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (is_request && ignored_headers.find(static_cast<string>(event.getKey())) != ignored_headers.end()) {
|
||||||
|
dbgTrace(D_HTTP_MANAGER)
|
||||||
|
<< "Ignoring header key - "
|
||||||
|
<< static_cast<string>(event.getKey())
|
||||||
|
<< " - as it is in the ignored headers list";
|
||||||
|
return FilterVerdict(ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT);
|
||||||
|
}
|
||||||
|
|
||||||
ScopedContext ctx;
|
ScopedContext ctx;
|
||||||
ctx.registerValue(app_sec_marker_key, i_transaction_table->keyToString(), EnvKeyAttr::LogSection::MARKER);
|
ctx.registerValue(app_sec_marker_key, i_transaction_table->keyToString(), EnvKeyAttr::LogSection::MARKER);
|
||||||
|
|
||||||
@@ -394,6 +421,7 @@ private:
|
|||||||
I_Table *i_transaction_table;
|
I_Table *i_transaction_table;
|
||||||
static const ngx_http_cp_verdict_e default_verdict;
|
static const ngx_http_cp_verdict_e default_verdict;
|
||||||
static const string app_sec_marker_key;
|
static const string app_sec_marker_key;
|
||||||
|
unordered_set<string> ignored_headers;
|
||||||
};
|
};
|
||||||
|
|
||||||
const ngx_http_cp_verdict_e HttpManager::Impl::default_verdict(ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP);
|
const ngx_http_cp_verdict_e HttpManager::Impl::default_verdict(ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP);
|
||||||
|
|||||||
45
components/include/central_nginx_manager.h
Executable file
45
components/include/central_nginx_manager.h
Executable file
@@ -0,0 +1,45 @@
|
|||||||
|
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||||
|
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#ifndef __CENTRAL_NGINX_MANAGER_H__
|
||||||
|
#define __CENTRAL_NGINX_MANAGER_H__
|
||||||
|
|
||||||
|
#include "component.h"
|
||||||
|
#include "singleton.h"
|
||||||
|
#include "i_messaging.h"
|
||||||
|
#include "i_rest_api.h"
|
||||||
|
#include "i_mainloop.h"
|
||||||
|
#include "i_agent_details.h"
|
||||||
|
|
||||||
|
class CentralNginxManager
|
||||||
|
:
|
||||||
|
public Component,
|
||||||
|
Singleton::Consume<I_RestApi>,
|
||||||
|
Singleton::Consume<I_Messaging>,
|
||||||
|
Singleton::Consume<I_MainLoop>,
|
||||||
|
Singleton::Consume<I_AgentDetails>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
CentralNginxManager();
|
||||||
|
~CentralNginxManager();
|
||||||
|
|
||||||
|
void preload() override;
|
||||||
|
void init() override;
|
||||||
|
void fini() override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
class Impl;
|
||||||
|
std::unique_ptr<Impl> pimpl;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // __CENTRAL_NGINX_MANAGER_H__
|
||||||
28
components/include/nginx_message_reader.h
Executable file
28
components/include/nginx_message_reader.h
Executable file
@@ -0,0 +1,28 @@
|
|||||||
|
#ifndef __NGINX_MESSAGE_READER_H__
|
||||||
|
#define __NGINX_MESSAGE_READER_H__
|
||||||
|
|
||||||
|
#include "singleton.h"
|
||||||
|
#include "i_mainloop.h"
|
||||||
|
#include "i_socket_is.h"
|
||||||
|
#include "component.h"
|
||||||
|
|
||||||
|
class NginxMessageReader
|
||||||
|
:
|
||||||
|
public Component,
|
||||||
|
Singleton::Consume<I_MainLoop>,
|
||||||
|
Singleton::Consume<I_Socket>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
NginxMessageReader();
|
||||||
|
~NginxMessageReader();
|
||||||
|
|
||||||
|
void init() override;
|
||||||
|
void fini() override;
|
||||||
|
void preload() override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
class Impl;
|
||||||
|
std::unique_ptr<Impl> pimpl;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif //__NGINX_MESSAGE_READER_H__
|
||||||
51
components/include/nginx_utils.h
Executable file
51
components/include/nginx_utils.h
Executable file
@@ -0,0 +1,51 @@
|
|||||||
|
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||||
|
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#ifndef __NGINX_UTILS_H__
|
||||||
|
#define __NGINX_UTILS_H__
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "maybe_res.h"
|
||||||
|
#include "singleton.h"
|
||||||
|
#include "i_shell_cmd.h"
|
||||||
|
|
||||||
|
class NginxConfCollector
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
NginxConfCollector(const std::string &nginx_conf_input_path, const std::string &nginx_conf_output_path);
|
||||||
|
Maybe<std::string> generateFullNginxConf() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::vector<std::string> expandIncludes(const std::string &includePattern) const;
|
||||||
|
void processConfigFile(
|
||||||
|
const std::string &path,
|
||||||
|
std::ostringstream &conf_output,
|
||||||
|
std::vector<std::string> &errors
|
||||||
|
) const;
|
||||||
|
|
||||||
|
std::string main_conf_input_path;
|
||||||
|
std::string main_conf_output_path;
|
||||||
|
std::string main_conf_directory_path;
|
||||||
|
};
|
||||||
|
|
||||||
|
class NginxUtils : Singleton::Consume<I_ShellCmd>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
static std::string getModulesPath();
|
||||||
|
static std::string getMainNginxConfPath();
|
||||||
|
static Maybe<void> validateNginxConf(const std::string &nginx_conf_path);
|
||||||
|
static Maybe<void> reloadNginx(const std::string &nginx_conf_path);
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // __NGINX_UTILS_H__
|
||||||
@@ -7,15 +7,21 @@
|
|||||||
#include "singleton.h"
|
#include "singleton.h"
|
||||||
#include "i_mainloop.h"
|
#include "i_mainloop.h"
|
||||||
#include "i_environment.h"
|
#include "i_environment.h"
|
||||||
|
#include "i_geo_location.h"
|
||||||
#include "i_generic_rulebase.h"
|
#include "i_generic_rulebase.h"
|
||||||
|
#include "i_shell_cmd.h"
|
||||||
|
#include "i_env_details.h"
|
||||||
|
|
||||||
class RateLimit
|
class RateLimit
|
||||||
:
|
:
|
||||||
public Component,
|
public Component,
|
||||||
Singleton::Consume<I_MainLoop>,
|
Singleton::Consume<I_MainLoop>,
|
||||||
Singleton::Consume<I_TimeGet>,
|
Singleton::Consume<I_TimeGet>,
|
||||||
|
Singleton::Consume<I_GeoLocation>,
|
||||||
Singleton::Consume<I_Environment>,
|
Singleton::Consume<I_Environment>,
|
||||||
Singleton::Consume<I_GenericRulebase>
|
Singleton::Consume<I_GenericRulebase>,
|
||||||
|
Singleton::Consume<I_ShellCmd>,
|
||||||
|
Singleton::Consume<I_EnvDetails>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
RateLimit();
|
RateLimit();
|
||||||
|
|||||||
3
components/nginx_message_reader/CMakeLists.txt
Executable file
3
components/nginx_message_reader/CMakeLists.txt
Executable file
@@ -0,0 +1,3 @@
|
|||||||
|
link_directories(${BOOST_ROOT}/lib)
|
||||||
|
|
||||||
|
add_library(nginx_message_reader nginx_message_reader.cc)
|
||||||
735
components/nginx_message_reader/nginx_message_reader.cc
Executable file
735
components/nginx_message_reader/nginx_message_reader.cc
Executable file
@@ -0,0 +1,735 @@
|
|||||||
|
#include "nginx_message_reader.h"
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <boost/regex.hpp>
|
||||||
|
#include <boost/algorithm/string.hpp>
|
||||||
|
#include <boost/algorithm/string/regex.hpp>
|
||||||
|
|
||||||
|
#include "config.h"
|
||||||
|
#include "singleton.h"
|
||||||
|
#include "i_mainloop.h"
|
||||||
|
#include "enum_array.h"
|
||||||
|
#include "log_generator.h"
|
||||||
|
#include "maybe_res.h"
|
||||||
|
#include "http_transaction_data.h"
|
||||||
|
#include "generic_rulebase/rulebase_config.h"
|
||||||
|
#include "generic_rulebase/evaluators/asset_eval.h"
|
||||||
|
#include "generic_rulebase/triggers_config.h"
|
||||||
|
#include "agent_core_utilities.h"
|
||||||
|
#include "rate_limit_config.h"
|
||||||
|
|
||||||
|
USE_DEBUG_FLAG(D_NGINX_MESSAGE_READER);
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
|
||||||
|
static const string syslog_regex_string = (
|
||||||
|
"<[0-9]+>([A-Z][a-z][a-z]\\s{1,2}\\d{1,2}\\s\\d{2}"
|
||||||
|
"[:]\\d{2}[:]\\d{2})\\s([\\w][\\w\\d\\.@-]*)\\s(nginx:)"
|
||||||
|
);
|
||||||
|
|
||||||
|
static const boost::regex socket_address_regex("(\\d+\\.\\d+\\.\\d+\\.\\d+):(\\d+)");
|
||||||
|
static const boost::regex syslog_regex(syslog_regex_string);
|
||||||
|
static const boost::regex alert_log_regex(
|
||||||
|
"("
|
||||||
|
+ syslog_regex_string + ") "
|
||||||
|
+ "(.+?\\[alert\\] )(.+?)"
|
||||||
|
", (client: .+?)"
|
||||||
|
", (server: .+?)"
|
||||||
|
", (request: \".+?\")"
|
||||||
|
", (upstream: \".+?\")"
|
||||||
|
", (host: \".+?\")$"
|
||||||
|
);
|
||||||
|
|
||||||
|
static const boost::regex error_log_regex(
|
||||||
|
"("
|
||||||
|
+ syslog_regex_string + ") "
|
||||||
|
+ "(.+?\\[error\\] )(.+?)"
|
||||||
|
", (client: .+?)"
|
||||||
|
", (server: .+?)"
|
||||||
|
", (request: \".+?\")"
|
||||||
|
", (upstream: \".+?\")"
|
||||||
|
", (host: \".+?\")$"
|
||||||
|
);
|
||||||
|
|
||||||
|
static const boost::regex server_regex("(\\d+\\.\\d+\\.\\d+\\.\\d+)|(\\w+\\.\\w+)");
|
||||||
|
static const boost::regex uri_regex("^/");
|
||||||
|
static const boost::regex port_regex("\\d+");
|
||||||
|
static const boost::regex response_code_regex("[0-9]{3}");
|
||||||
|
static const boost::regex http_method_regex("[A-Za-z]+");
|
||||||
|
|
||||||
|
class NginxMessageReader::Impl
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
void
|
||||||
|
init()
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||||
|
I_MainLoop *mainloop = Singleton::Consume<I_MainLoop>::by<NginxMessageReader>();
|
||||||
|
mainloop->addOneTimeRoutine(
|
||||||
|
I_MainLoop::RoutineType::System,
|
||||||
|
[this] ()
|
||||||
|
{
|
||||||
|
initSyslogServerSocket();
|
||||||
|
handleNginxLogs();
|
||||||
|
},
|
||||||
|
"Initialize nginx syslog",
|
||||||
|
true
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
preload()
|
||||||
|
{
|
||||||
|
registerConfigLoadCb([this]() { loadNginxMessageReaderConfig(); });
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
fini()
|
||||||
|
{
|
||||||
|
I_Socket *i_socket = Singleton::Consume<I_Socket>::by<NginxMessageReader>();
|
||||||
|
i_socket->closeSocket(syslog_server_socket);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
loadNginxMessageReaderConfig()
|
||||||
|
{
|
||||||
|
rate_limit_status_code = getProfileAgentSettingWithDefault<string>(
|
||||||
|
"429",
|
||||||
|
"accessControl.rateLimit.returnCode"
|
||||||
|
);
|
||||||
|
dbgTrace(D_NGINX_MESSAGE_READER) << "Selected rate-limit status code: " << rate_limit_status_code;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
enum class LogInfo {
|
||||||
|
HTTP_METHOD,
|
||||||
|
URI,
|
||||||
|
RESPONSE_CODE,
|
||||||
|
HOST,
|
||||||
|
SOURCE,
|
||||||
|
DESTINATION_IP,
|
||||||
|
DESTINATION_PORT,
|
||||||
|
EVENT_MESSAGE,
|
||||||
|
ASSET_ID,
|
||||||
|
ASSET_NAME,
|
||||||
|
RULE_NAME,
|
||||||
|
RULE_ID,
|
||||||
|
COUNT
|
||||||
|
};
|
||||||
|
|
||||||
|
void
|
||||||
|
initSyslogServerSocket()
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||||
|
I_MainLoop *mainloop = Singleton::Consume<I_MainLoop>::by<NginxMessageReader>();
|
||||||
|
I_Socket *i_socket = Singleton::Consume<I_Socket>::by<NginxMessageReader>();
|
||||||
|
string nginx_syslog_server_address = getProfileAgentSettingWithDefault<string>(
|
||||||
|
"127.0.0.1:1514",
|
||||||
|
"reverseProxy.nginx.syslogAddress"
|
||||||
|
);
|
||||||
|
dbgInfo(D_NGINX_MESSAGE_READER) << "Attempting to open a socket: " << nginx_syslog_server_address;
|
||||||
|
do {
|
||||||
|
Maybe<I_Socket::socketFd> new_socket = i_socket->genSocket(
|
||||||
|
I_Socket::SocketType::UDP,
|
||||||
|
false,
|
||||||
|
true,
|
||||||
|
nginx_syslog_server_address
|
||||||
|
);
|
||||||
|
if (!new_socket.ok()) {
|
||||||
|
dbgError(D_NGINX_MESSAGE_READER) << "Failed to open a socket. Error: " << new_socket.getErr();
|
||||||
|
mainloop->yield(chrono::milliseconds(500));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (new_socket.unpack() < 0) {
|
||||||
|
dbgError(D_NGINX_MESSAGE_READER)<< "Generated socket is OK yet negative";
|
||||||
|
mainloop->yield(chrono::milliseconds(500));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
syslog_server_socket = new_socket.unpack();
|
||||||
|
dbgInfo(D_NGINX_MESSAGE_READER)
|
||||||
|
<< "Opened socket for nginx logs over syslog. Socket: "
|
||||||
|
<< syslog_server_socket;
|
||||||
|
} while (syslog_server_socket < 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
handleNginxLogs()
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||||
|
I_MainLoop::Routine read_logs =
|
||||||
|
[this] ()
|
||||||
|
{
|
||||||
|
Maybe<string> logs = getLogsFromSocket(syslog_server_socket);
|
||||||
|
|
||||||
|
if (!logs.ok()) {
|
||||||
|
dbgWarning(D_NGINX_MESSAGE_READER)
|
||||||
|
<< "Failed to get NGINX logs from the socket. Error: "
|
||||||
|
<< logs.getErr();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
string raw_logs_to_parse = logs.unpackMove();
|
||||||
|
vector<string> logs_to_parse = separateLogs(raw_logs_to_parse);
|
||||||
|
|
||||||
|
for (auto const &log: logs_to_parse) {
|
||||||
|
bool log_sent;
|
||||||
|
if (isAccessLog(log)) {
|
||||||
|
log_sent = sendAccessLog(log);
|
||||||
|
} else if (isAlertErrorLog(log) || isErrorLog(log)) {
|
||||||
|
log_sent = sendErrorLog(log);
|
||||||
|
} else {
|
||||||
|
dbgWarning(D_NGINX_MESSAGE_READER) << "Unexpected nginx log format";
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (!log_sent) {
|
||||||
|
dbgWarning(D_NGINX_MESSAGE_READER) << "Failed to send Log to Infinity Portal";
|
||||||
|
} else {
|
||||||
|
dbgTrace(D_NGINX_MESSAGE_READER) << "Succesfully sent nginx log to Infinity Portal";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
I_MainLoop *mainloop = Singleton::Consume<I_MainLoop>::by<NginxMessageReader>();
|
||||||
|
mainloop->addFileRoutine(
|
||||||
|
I_MainLoop::RoutineType::RealTime,
|
||||||
|
syslog_server_socket,
|
||||||
|
read_logs,
|
||||||
|
"Process nginx logs",
|
||||||
|
true
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
sendAccessLog(const string &log)
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MESSAGE_READER) << "Access log" << log;
|
||||||
|
Maybe<EnumArray<LogInfo, string>> log_info = parseAccessLog(log);
|
||||||
|
if (!log_info.ok()) {
|
||||||
|
dbgWarning(D_NGINX_MESSAGE_READER)
|
||||||
|
<< "Failed parsing the NGINX logs. Error: "
|
||||||
|
<< log_info.getErr();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
auto unpacked_log_info = log_info.unpack();
|
||||||
|
|
||||||
|
if (unpacked_log_info[LogInfo::RESPONSE_CODE] == rate_limit_status_code) {
|
||||||
|
return sendRateLimitLog(unpacked_log_info);
|
||||||
|
}
|
||||||
|
return sendLog(unpacked_log_info);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
sendErrorLog(const string &log)
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MESSAGE_READER) << "Error log" << log;
|
||||||
|
Maybe<EnumArray<LogInfo, string>> log_info = parseErrorLog(log);
|
||||||
|
if (!log_info.ok()) {
|
||||||
|
dbgWarning(D_NGINX_MESSAGE_READER)
|
||||||
|
<< "Failed parsing the NGINX logs. Error: "
|
||||||
|
<< log_info.getErr();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return sendLog(log_info.unpack());
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
isAccessLog(const string &log) const
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MESSAGE_READER) << "Chekck if string contains \"accessLog\"" << log;
|
||||||
|
return log.find("accessLog") != string::npos;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
isAlertErrorLog(const string &log) const
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MESSAGE_READER) << "Check if log is of type 'error log'. Log: " << log;
|
||||||
|
return log.find("[alert]") != string::npos;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
isErrorLog(const string &log) const
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MESSAGE_READER) << "Check if log is of type 'error log'. Log: " << log;
|
||||||
|
return log.find("[error]") != string::npos;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
sendLog(const EnumArray<LogInfo, string> &log_info)
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||||
|
string event_name;
|
||||||
|
switch (log_info[LogInfo::RESPONSE_CODE][0]) {
|
||||||
|
case '4': {
|
||||||
|
event_name = "Invalid request or incorrect reverse proxy configuration - Request dropped."
|
||||||
|
" Please check the reverse proxy configuration of your relevant assets";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case '5': {
|
||||||
|
event_name = "AppSec Gateway reverse proxy error - Request dropped. "
|
||||||
|
"Please verify the reverse proxy configuration of your relevant assets. "
|
||||||
|
"If the issue persists please contact Check Point Support";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default: {
|
||||||
|
dbgError(D_NGINX_MESSAGE_READER) << "Irrelevant status code";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dbgTrace(D_NGINX_MESSAGE_READER)
|
||||||
|
<< "Nginx log's event name and response code: "
|
||||||
|
<< event_name
|
||||||
|
<< ", "
|
||||||
|
<< log_info[LogInfo::RESPONSE_CODE];
|
||||||
|
LogGen log(
|
||||||
|
event_name,
|
||||||
|
ReportIS::Audience::SECURITY,
|
||||||
|
ReportIS::Severity::INFO,
|
||||||
|
ReportIS::Priority::LOW,
|
||||||
|
ReportIS::Tags::REVERSE_PROXY
|
||||||
|
);
|
||||||
|
log << LogField("eventConfidence", "High");
|
||||||
|
|
||||||
|
for (LogInfo field : makeRange<LogInfo>()) {
|
||||||
|
Maybe<string> string_field = convertLogFieldToString(field);
|
||||||
|
if (!string_field.ok()) {
|
||||||
|
dbgDebug(D_NGINX_MESSAGE_READER) << "Enum field was not converted: " << string_field.getErr();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (field != LogInfo::DESTINATION_PORT) {
|
||||||
|
log << LogField(string_field.unpack(), log_info[field]);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
log << LogField(string_field.unpack(), stoi(log_info[field]));
|
||||||
|
} catch (const exception &e) {
|
||||||
|
dbgError(D_NGINX_MESSAGE_READER)
|
||||||
|
<< "Unable to convert port to numeric value: "
|
||||||
|
<< e.what();
|
||||||
|
log << LogField(string_field.unpack(), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
sendRateLimitLog(const EnumArray<LogInfo, string> &log_info)
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MESSAGE_READER) << "Getting rate-limit rules of asset ID: " << log_info[LogInfo::ASSET_ID];
|
||||||
|
|
||||||
|
ScopedContext rate_limit_ctx;
|
||||||
|
|
||||||
|
rate_limit_ctx.registerValue<GenericConfigId>(AssetMatcher::ctx_key, log_info[LogInfo::ASSET_ID]);
|
||||||
|
auto rate_limit_config = getConfiguration<RateLimitConfig>("rulebase", "rateLimit");
|
||||||
|
if (!rate_limit_config.ok()) {
|
||||||
|
dbgTrace(D_NGINX_MESSAGE_READER)
|
||||||
|
<< "Rate limit context does not match asset ID: " << log_info[LogInfo::ASSET_ID];
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
RateLimitConfig unpacked_rate_limit_config = rate_limit_config.unpack();
|
||||||
|
|
||||||
|
string nginx_uri = log_info[LogInfo::URI];
|
||||||
|
const LogTriggerConf &rate_limit_trigger = unpacked_rate_limit_config.getRateLimitTrigger(nginx_uri);
|
||||||
|
|
||||||
|
dbgTrace(D_NGINX_MESSAGE_READER)<< "About to generate NGINX rate-limit log";
|
||||||
|
|
||||||
|
string event_name = "Rate limit";
|
||||||
|
string security_action = "Drop";
|
||||||
|
bool is_log_required = false;
|
||||||
|
|
||||||
|
// Prevent events checkbox (in triggers)
|
||||||
|
if (rate_limit_trigger.isPreventLogActive(LogTriggerConf::SecurityType::AccessControl)) {
|
||||||
|
is_log_required = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!is_log_required) {
|
||||||
|
dbgTrace(D_NGINX_MESSAGE_READER) << "Not sending NGINX rate-limit log as it is not required";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
ostringstream src_ip;
|
||||||
|
ostringstream dst_ip;
|
||||||
|
src_ip << log_info[LogInfo::SOURCE];
|
||||||
|
dst_ip << log_info[LogInfo::DESTINATION_IP];
|
||||||
|
|
||||||
|
ReportIS::Severity log_severity = ReportIS::Severity::MEDIUM;
|
||||||
|
ReportIS::Priority log_priority = ReportIS::Priority::MEDIUM;
|
||||||
|
|
||||||
|
LogGen log = rate_limit_trigger(
|
||||||
|
event_name,
|
||||||
|
LogTriggerConf::SecurityType::AccessControl,
|
||||||
|
log_severity,
|
||||||
|
log_priority,
|
||||||
|
true, // is drop
|
||||||
|
LogField("practiceType", "Rate Limit"),
|
||||||
|
ReportIS::Tags::RATE_LIMIT
|
||||||
|
);
|
||||||
|
|
||||||
|
for (LogInfo field : makeRange<LogInfo>()) {
|
||||||
|
Maybe<string> string_field = convertLogFieldToString(field);
|
||||||
|
if (!string_field.ok()) {
|
||||||
|
dbgDebug(D_NGINX_MESSAGE_READER) << "Enum field was not converted: " << string_field.getErr();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
field == LogInfo::HOST ||
|
||||||
|
field == LogInfo::URI ||
|
||||||
|
field == LogInfo::HTTP_METHOD ||
|
||||||
|
field == LogInfo::SOURCE ||
|
||||||
|
field == LogInfo::DESTINATION_IP ||
|
||||||
|
field == LogInfo::ASSET_ID ||
|
||||||
|
field == LogInfo::ASSET_NAME ||
|
||||||
|
field == LogInfo::RESPONSE_CODE
|
||||||
|
) {
|
||||||
|
if (!log_info[field].empty()) {
|
||||||
|
log << LogField(string_field.unpack(), log_info[field]);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (field == LogInfo::DESTINATION_PORT) {
|
||||||
|
try {
|
||||||
|
int numeric_dst_port = stoi(log_info[field]);
|
||||||
|
log << LogField(string_field.unpack(), numeric_dst_port);
|
||||||
|
} catch (const exception &e) {
|
||||||
|
dbgWarning(D_NGINX_MESSAGE_READER)
|
||||||
|
<< "Unable to convert dst port: "
|
||||||
|
<< log_info[field]
|
||||||
|
<< " to numberic value. Error: "
|
||||||
|
<< e.what();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Maybe<string>
|
||||||
|
convertLogFieldToString(LogInfo field)
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||||
|
switch (field) {
|
||||||
|
case LogInfo::HTTP_METHOD:
|
||||||
|
return string("httpMethod");
|
||||||
|
case LogInfo::URI:
|
||||||
|
return string("httpUriPath");
|
||||||
|
case LogInfo::RESPONSE_CODE:
|
||||||
|
return string("httpResponseCode");
|
||||||
|
case LogInfo::HOST:
|
||||||
|
return string("httpHostName");
|
||||||
|
case LogInfo::SOURCE:
|
||||||
|
return string("httpSourceId");
|
||||||
|
case LogInfo::DESTINATION_IP:
|
||||||
|
return string("destinationIp");
|
||||||
|
case LogInfo::DESTINATION_PORT:
|
||||||
|
return string("destinationPort");
|
||||||
|
case LogInfo::ASSET_ID:
|
||||||
|
return string("assetId");
|
||||||
|
case LogInfo::ASSET_NAME:
|
||||||
|
return string("assetName");
|
||||||
|
case LogInfo::EVENT_MESSAGE:
|
||||||
|
return string("httpResponseBody");
|
||||||
|
case LogInfo::RULE_ID:
|
||||||
|
return string("ruleId");
|
||||||
|
case LogInfo::RULE_NAME:
|
||||||
|
return string("ruleName");
|
||||||
|
case LogInfo::COUNT:
|
||||||
|
dbgError(D_NGINX_MESSAGE_READER) << "LogInfo::COUNT is not allowed";
|
||||||
|
return genError("LogInfo::COUNT is not allowed");
|
||||||
|
}
|
||||||
|
dbgError(D_NGINX_MESSAGE_READER) << "No Enum found, int value: " << static_cast<int>(field);
|
||||||
|
return genError("No Enum found");
|
||||||
|
}
|
||||||
|
|
||||||
|
static vector<string>
|
||||||
|
separateLogs(const string &raw_logs_to_parse)
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MESSAGE_READER) << "separating logs. logs: " << raw_logs_to_parse;
|
||||||
|
dbgTrace(D_NGINX_MESSAGE_READER) << "separateLogs start of function. Logs to parse: " << raw_logs_to_parse;
|
||||||
|
boost::smatch matcher;
|
||||||
|
vector<string> logs;
|
||||||
|
|
||||||
|
if (raw_logs_to_parse.empty()) return logs;
|
||||||
|
|
||||||
|
size_t pos = 0;
|
||||||
|
while (NGEN::Regex::regexSearch(__FILE__, __LINE__, raw_logs_to_parse.substr(pos), matcher, syslog_regex)) {
|
||||||
|
if (pos == 0) {
|
||||||
|
dbgTrace(D_NGINX_MESSAGE_READER) << "separateLogs pos = 0";
|
||||||
|
pos++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
auto log_length = matcher.position();
|
||||||
|
logs.push_back(raw_logs_to_parse.substr(pos - 1, log_length));
|
||||||
|
|
||||||
|
pos += log_length + 1;
|
||||||
|
}
|
||||||
|
logs.push_back(raw_logs_to_parse.substr(pos - 1));
|
||||||
|
dbgTrace(D_NGINX_MESSAGE_READER) << "separateLogs end of function";
|
||||||
|
|
||||||
|
return logs;
|
||||||
|
}
|
||||||
|
|
||||||
|
static pair<string, string>
|
||||||
|
parseErrorLogRequestField(const string &request)
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MESSAGE_READER) << "parsing request field. request: " << request;
|
||||||
|
string formatted_request = request;
|
||||||
|
vector<string> result;
|
||||||
|
boost::erase_all(formatted_request, "\"");
|
||||||
|
boost::erase_all(formatted_request, "\n");
|
||||||
|
boost::split(result, formatted_request, boost::is_any_of(" "), boost::token_compress_on);
|
||||||
|
|
||||||
|
const int http_method_index = 1;
|
||||||
|
const int uri_index = 2;
|
||||||
|
return pair<string, string>(result[http_method_index], result[uri_index]);
|
||||||
|
}
|
||||||
|
|
||||||
|
static string
|
||||||
|
parseErrorLogField(const string &field)
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MESSAGE_READER) << "parsing error log field " << field;
|
||||||
|
string formatted_field = field;
|
||||||
|
vector<string> result;
|
||||||
|
boost::erase_all(formatted_field, "\"");
|
||||||
|
boost::erase_all(formatted_field, "\n");
|
||||||
|
boost::split(result, formatted_field, boost::is_any_of(" "), boost::token_compress_on);
|
||||||
|
|
||||||
|
const int field_index = 1;
|
||||||
|
return result[field_index];
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
addContextFieldsToLogInfo(EnumArray<LogInfo, string> &log_info)
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||||
|
ScopedContext ctx;
|
||||||
|
|
||||||
|
try {
|
||||||
|
ctx.registerValue<uint16_t>(
|
||||||
|
HttpTransactionData::listening_port_ctx,
|
||||||
|
static_cast<uint16_t>(stoi(log_info[LogInfo::DESTINATION_PORT]))
|
||||||
|
);
|
||||||
|
} catch (const exception &e) {
|
||||||
|
dbgError(D_NGINX_MESSAGE_READER) << "Failed register values for context " << e.what();
|
||||||
|
}
|
||||||
|
ctx.registerValue<string>(HttpTransactionData::host_name_ctx, log_info[LogInfo::HOST]);
|
||||||
|
ctx.registerValue<string>(HttpTransactionData::uri_ctx, log_info[LogInfo::URI]);
|
||||||
|
auto rule_by_ctx = getConfiguration<BasicRuleConfig>("rulebase", "rulesConfig");
|
||||||
|
if (!rule_by_ctx.ok()) {
|
||||||
|
dbgWarning(D_NGINX_MESSAGE_READER)
|
||||||
|
<< "AssetId was not found by the given context. Reason: "
|
||||||
|
<< rule_by_ctx.getErr();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
BasicRuleConfig context = rule_by_ctx.unpack();
|
||||||
|
log_info[LogInfo::ASSET_ID] = context.getAssetId();
|
||||||
|
log_info[LogInfo::ASSET_NAME] = context.getAssetName();
|
||||||
|
log_info[LogInfo::RULE_ID] = context.getRuleId();
|
||||||
|
log_info[LogInfo::RULE_NAME] = context.getRuleName();
|
||||||
|
}
|
||||||
|
|
||||||
|
Maybe<EnumArray<LogInfo, string>>
|
||||||
|
parseErrorLog(const string &log_line)
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MESSAGE_READER) << "Handling log line:" << log_line;
|
||||||
|
string port;
|
||||||
|
EnumArray<LogInfo, string> log_info(EnumArray<LogInfo, string>::Fill(), string(""));
|
||||||
|
|
||||||
|
boost::smatch matcher;
|
||||||
|
vector<string> result;
|
||||||
|
if (
|
||||||
|
!NGEN::Regex::regexSearch(
|
||||||
|
__FILE__,
|
||||||
|
__LINE__,
|
||||||
|
log_line,
|
||||||
|
matcher,
|
||||||
|
isAlertErrorLog(log_line) ? alert_log_regex : error_log_regex
|
||||||
|
)
|
||||||
|
) {
|
||||||
|
dbgWarning(D_NGINX_MESSAGE_READER) << "Unexpected nginx log format";
|
||||||
|
return genError("Unexpected nginx log format");
|
||||||
|
}
|
||||||
|
|
||||||
|
const int event_message_index = 6;
|
||||||
|
const int source_index = 7;
|
||||||
|
const int request_index = 9;
|
||||||
|
const int host_index = 11;
|
||||||
|
string host = string(matcher[host_index].first, matcher[host_index].second);
|
||||||
|
string source = string(matcher[source_index].first, matcher[source_index].second);
|
||||||
|
string event_message = string(matcher[event_message_index].first, matcher[event_message_index].second);
|
||||||
|
string request = string(matcher[request_index].first, matcher[request_index].second);
|
||||||
|
|
||||||
|
host = parseErrorLogField(host);
|
||||||
|
source = parseErrorLogField(source);
|
||||||
|
pair<string, string> parsed_request = parseErrorLogRequestField(request);
|
||||||
|
string http_method = parsed_request.first;
|
||||||
|
string uri = parsed_request.second;
|
||||||
|
|
||||||
|
if (NGEN::Regex::regexSearch(__FILE__, __LINE__, host, matcher, socket_address_regex)) {
|
||||||
|
int host_index = 1;
|
||||||
|
int port_index = 2;
|
||||||
|
host = string(matcher[host_index].first, matcher[host_index].second);
|
||||||
|
port = string(matcher[port_index].first, matcher[port_index].second);
|
||||||
|
} else if (NGEN::Regex::regexSearch(__FILE__, __LINE__, host, matcher, boost::regex("https://"))) {
|
||||||
|
port = "443";
|
||||||
|
} else {
|
||||||
|
port = "80";
|
||||||
|
}
|
||||||
|
|
||||||
|
log_info[LogInfo::HOST] = host;
|
||||||
|
log_info[LogInfo::URI] = uri;
|
||||||
|
log_info[LogInfo::RESPONSE_CODE] = "500";
|
||||||
|
log_info[LogInfo::HTTP_METHOD] = http_method;
|
||||||
|
log_info[LogInfo::SOURCE] = source;
|
||||||
|
log_info[LogInfo::DESTINATION_IP] = host;
|
||||||
|
log_info[LogInfo::DESTINATION_PORT] = port;
|
||||||
|
log_info[LogInfo::EVENT_MESSAGE] = event_message;
|
||||||
|
|
||||||
|
addContextFieldsToLogInfo(log_info);
|
||||||
|
|
||||||
|
if (!validateLog(log_info)) {
|
||||||
|
dbgWarning(D_NGINX_MESSAGE_READER) << "Unexpected nginx log format";
|
||||||
|
return genError("Unexpected nginx log format");
|
||||||
|
}
|
||||||
|
|
||||||
|
return log_info;
|
||||||
|
}
|
||||||
|
|
||||||
|
Maybe<EnumArray<LogInfo, string>>
|
||||||
|
parseAccessLog(const string &log_line)
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MESSAGE_READER) << "Parsing log line: " << log_line;
|
||||||
|
string formatted_log = log_line;
|
||||||
|
EnumArray<LogInfo, string> log_info(EnumArray<LogInfo, string>::Fill(), string(""));
|
||||||
|
vector<string> result;
|
||||||
|
boost::erase_all(formatted_log, "\"");
|
||||||
|
boost::erase_all(formatted_log, "\n");
|
||||||
|
boost::split(result, formatted_log, boost::is_any_of(" "), boost::token_compress_on);
|
||||||
|
|
||||||
|
const int valid_log_size = 20;
|
||||||
|
|
||||||
|
if (result.size() < valid_log_size) {
|
||||||
|
dbgWarning(D_NGINX_MESSAGE_READER) << "Unexpected nginx log format";
|
||||||
|
return genError("Unexpected nginx log format");
|
||||||
|
}
|
||||||
|
|
||||||
|
const int host_index = 6;
|
||||||
|
const int host_port_index = 7;
|
||||||
|
const int http_method_index = 13;
|
||||||
|
const int uri_index = 14;
|
||||||
|
const int response_cod_index = 16;
|
||||||
|
const int source_index = 8;
|
||||||
|
|
||||||
|
log_info[LogInfo::HOST] = result[host_index];
|
||||||
|
log_info[LogInfo::URI] = result[uri_index];
|
||||||
|
log_info[LogInfo::RESPONSE_CODE] = result[response_cod_index];
|
||||||
|
log_info[LogInfo::HTTP_METHOD] = result[http_method_index];
|
||||||
|
log_info[LogInfo::SOURCE] = result[source_index];
|
||||||
|
log_info[LogInfo::DESTINATION_IP] = result[host_index];
|
||||||
|
log_info[LogInfo::DESTINATION_PORT] = result[host_port_index];
|
||||||
|
log_info[LogInfo::EVENT_MESSAGE] = "Invalid request or incorrect reverse proxy configuration - "
|
||||||
|
"Request dropped. Please check the reverse proxy configuration of your relevant assets";
|
||||||
|
|
||||||
|
addContextFieldsToLogInfo(log_info);
|
||||||
|
|
||||||
|
if (!validateLog(log_info)) {
|
||||||
|
dbgWarning(D_NGINX_MESSAGE_READER) << "Unexpected nginx log format";
|
||||||
|
return genError("Unexpected nginx log format");
|
||||||
|
}
|
||||||
|
return log_info;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
validateLog(const EnumArray<LogInfo, string> &log_info)
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MESSAGE_READER);
|
||||||
|
|
||||||
|
boost::smatch matcher;
|
||||||
|
if (!NGEN::Regex::regexSearch(__FILE__, __LINE__, log_info[LogInfo::HOST], matcher, server_regex)) {
|
||||||
|
dbgTrace(D_NGINX_MESSAGE_READER) << "Could not validate server (Host): " << log_info[LogInfo::HOST];
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (!NGEN::Regex::regexSearch(__FILE__, __LINE__, log_info[LogInfo::URI], matcher, uri_regex)) {
|
||||||
|
dbgTrace(D_NGINX_MESSAGE_READER) << "Could not validate Uri: " << log_info[LogInfo::URI];
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
!NGEN::Regex::regexSearch(
|
||||||
|
__FILE__,
|
||||||
|
__LINE__,
|
||||||
|
log_info[LogInfo::RESPONSE_CODE],
|
||||||
|
matcher, response_code_regex
|
||||||
|
)
|
||||||
|
) {
|
||||||
|
dbgTrace(D_NGINX_MESSAGE_READER)
|
||||||
|
<< "Could not validate response code: "
|
||||||
|
<< log_info[LogInfo::RESPONSE_CODE];
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
!NGEN::Regex::regexSearch(__FILE__, __LINE__, log_info[LogInfo::HTTP_METHOD], matcher, http_method_regex)
|
||||||
|
) {
|
||||||
|
dbgTrace(D_NGINX_MESSAGE_READER) << "Could not validate HTTP method: " << log_info[LogInfo::HTTP_METHOD];
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!NGEN::Regex::regexSearch(__FILE__, __LINE__, log_info[LogInfo::DESTINATION_PORT], matcher, port_regex)) {
|
||||||
|
dbgTrace(D_NGINX_MESSAGE_READER)
|
||||||
|
<< "Could not validate destination port : "
|
||||||
|
<< log_info[LogInfo::DESTINATION_PORT];
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!NGEN::Regex::regexSearch(__FILE__, __LINE__, log_info[LogInfo::SOURCE], matcher, server_regex)) {
|
||||||
|
dbgTrace(D_NGINX_MESSAGE_READER) << "Could not validate source : " << log_info[LogInfo::SOURCE];
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Maybe<string>
|
||||||
|
getLogsFromSocket(const I_Socket::socketFd &client_socket) const
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MESSAGE_READER) << "Reading logs from socket. fd: " << client_socket;
|
||||||
|
I_Socket *i_socket = Singleton::Consume<I_Socket>::by<NginxMessageReader>();
|
||||||
|
Maybe<vector<char>> raw_log_data = i_socket->receiveData(client_socket, 0, false);
|
||||||
|
if (!raw_log_data.ok()) {
|
||||||
|
dbgWarning(D_NGINX_MESSAGE_READER) << "Error receiving data from socket";
|
||||||
|
return genError("Error receiving data from socket");
|
||||||
|
}
|
||||||
|
|
||||||
|
string raw_log(raw_log_data.unpack().begin(), raw_log_data.unpack().end());
|
||||||
|
return move(raw_log);
|
||||||
|
}
|
||||||
|
|
||||||
|
I_Socket::socketFd syslog_server_socket = -1;
|
||||||
|
string rate_limit_status_code = "429";
|
||||||
|
};
|
||||||
|
|
||||||
|
NginxMessageReader::NginxMessageReader() : Component("NginxMessageReader"), pimpl(make_unique<Impl>()) {}
|
||||||
|
|
||||||
|
NginxMessageReader::~NginxMessageReader() {}
|
||||||
|
|
||||||
|
void
|
||||||
|
NginxMessageReader::init()
|
||||||
|
{
|
||||||
|
pimpl->init();
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
NginxMessageReader::preload()
|
||||||
|
{
|
||||||
|
pimpl->preload();
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
NginxMessageReader::fini()
|
||||||
|
{
|
||||||
|
pimpl->fini();
|
||||||
|
}
|
||||||
@@ -5,3 +5,4 @@ add_subdirectory(local_policy_mgmt_gen)
|
|||||||
add_subdirectory(orchestration)
|
add_subdirectory(orchestration)
|
||||||
add_subdirectory(rate_limit)
|
add_subdirectory(rate_limit)
|
||||||
add_subdirectory(waap)
|
add_subdirectory(waap)
|
||||||
|
add_subdirectory(central_nginx_manager)
|
||||||
|
|||||||
3
components/security_apps/central_nginx_manager/CMakeLists.txt
Executable file
3
components/security_apps/central_nginx_manager/CMakeLists.txt
Executable file
@@ -0,0 +1,3 @@
|
|||||||
|
include_directories(include)
|
||||||
|
|
||||||
|
add_library(central_nginx_manager central_nginx_manager.cc lets_encrypt_listener.cc)
|
||||||
390
components/security_apps/central_nginx_manager/central_nginx_manager.cc
Executable file
390
components/security_apps/central_nginx_manager/central_nginx_manager.cc
Executable file
@@ -0,0 +1,390 @@
|
|||||||
|
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||||
|
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "central_nginx_manager.h"
|
||||||
|
#include "lets_encrypt_listener.h"
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
#include <cereal/external/base64.hpp>
|
||||||
|
|
||||||
|
#include "debug.h"
|
||||||
|
#include "config.h"
|
||||||
|
#include "rest.h"
|
||||||
|
#include "log_generator.h"
|
||||||
|
#include "nginx_utils.h"
|
||||||
|
#include "agent_core_utilities.h"
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
|
||||||
|
USE_DEBUG_FLAG(D_NGINX_MANAGER);
|
||||||
|
|
||||||
|
class CentralNginxConfig
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
void load(cereal::JSONInputArchive &ar)
|
||||||
|
{
|
||||||
|
try {
|
||||||
|
string nginx_conf_base64;
|
||||||
|
ar(cereal::make_nvp("id", file_id));
|
||||||
|
ar(cereal::make_nvp("name", file_name));
|
||||||
|
ar(cereal::make_nvp("data", nginx_conf_base64));
|
||||||
|
nginx_conf_content = cereal::base64::decode(nginx_conf_base64);
|
||||||
|
central_nginx_conf_path = getCentralNginxConfPath();
|
||||||
|
shared_config_path = getSharedConfigPath();
|
||||||
|
if (!nginx_conf_content.empty()) configureCentralNginx();
|
||||||
|
} catch (const cereal::Exception &e) {
|
||||||
|
dbgDebug(D_NGINX_MANAGER) << "Could not load Central Management Config JSON. Error: " << e.what();
|
||||||
|
ar.setNextName(nullptr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const string & getFileId() const { return file_id; }
|
||||||
|
const string & getFileName() const { return file_name; }
|
||||||
|
const string & getFileContent() const { return nginx_conf_content; }
|
||||||
|
|
||||||
|
static string
|
||||||
|
getCentralNginxConfPath()
|
||||||
|
{
|
||||||
|
string central_nginx_conf_path = getProfileAgentSettingWithDefault<string>(
|
||||||
|
string("/tmp/central_nginx.conf"),
|
||||||
|
"centralNginxManagement.confDownloadPath"
|
||||||
|
);
|
||||||
|
dbgInfo(D_NGINX_MANAGER) << "Central NGINX configuration path: " << central_nginx_conf_path;
|
||||||
|
|
||||||
|
return central_nginx_conf_path;
|
||||||
|
}
|
||||||
|
|
||||||
|
static string
|
||||||
|
getSharedConfigPath()
|
||||||
|
{
|
||||||
|
string central_shared_conf_path = getConfigurationWithDefault<string>(
|
||||||
|
"/etc/cp/conf",
|
||||||
|
"Config Component",
|
||||||
|
"configuration path"
|
||||||
|
);
|
||||||
|
central_shared_conf_path += "/centralNginxManager/shared/central_nginx_shared.conf";
|
||||||
|
dbgInfo(D_NGINX_MANAGER) << "Shared NGINX configuration path: " << central_shared_conf_path;
|
||||||
|
|
||||||
|
return central_shared_conf_path;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
void
|
||||||
|
loadAttachmentModule()
|
||||||
|
{
|
||||||
|
string attachment_module_path = NginxUtils::getModulesPath() + "/ngx_cp_attachment_module.so";
|
||||||
|
if (!NGEN::Filesystem::exists(attachment_module_path)) {
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "Attachment module " << attachment_module_path << " does not exist";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
string attachment_module_conf = "load_module " + attachment_module_path + ";";
|
||||||
|
if (nginx_conf_content.find(attachment_module_conf) != string::npos) {
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "Attachment module " << attachment_module_path << " already loaded";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
nginx_conf_content = attachment_module_conf + "\n" + nginx_conf_content;
|
||||||
|
}
|
||||||
|
|
||||||
|
Maybe<void>
|
||||||
|
loadSharedDirective(const string &directive)
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MANAGER) << "Loading shared directive into the servers " << directive;
|
||||||
|
|
||||||
|
if (!NGEN::Filesystem::copyFile(shared_config_path, shared_config_path + ".bak", true)) {
|
||||||
|
return genError("Could not create a backup of the shared NGINX configuration file");
|
||||||
|
}
|
||||||
|
|
||||||
|
ifstream shared_config(shared_config_path);
|
||||||
|
if (!shared_config.is_open()) {
|
||||||
|
return genError("Could not open shared NGINX configuration file");
|
||||||
|
}
|
||||||
|
|
||||||
|
string shared_config_content((istreambuf_iterator<char>(shared_config)), istreambuf_iterator<char>());
|
||||||
|
shared_config.close();
|
||||||
|
|
||||||
|
if (shared_config_content.find(directive) != string::npos) {
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "Shared directive " << directive << " already loaded";
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
ofstream new_shared_config(shared_config_path, ios::app);
|
||||||
|
if (!new_shared_config.is_open()) {
|
||||||
|
return genError("Could not open shared NGINX configuration file");
|
||||||
|
}
|
||||||
|
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "Adding shared directive " << directive;
|
||||||
|
new_shared_config << directive << "\n";
|
||||||
|
new_shared_config.close();
|
||||||
|
|
||||||
|
auto validation = NginxUtils::validateNginxConf(central_nginx_conf_path);
|
||||||
|
if (!validation.ok()) {
|
||||||
|
if (!NGEN::Filesystem::copyFile(shared_config_path + ".bak", shared_config_path, true)) {
|
||||||
|
return genError("Could not restore the shared NGINX configuration file");
|
||||||
|
}
|
||||||
|
return genError("Could not validate shared NGINX configuration file. Error: " + validation.getErr());
|
||||||
|
}
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
Maybe<void>
|
||||||
|
loadSharedConfig()
|
||||||
|
{
|
||||||
|
dbgFlow(D_NGINX_MANAGER) << "Loading shared configuration into the servers";
|
||||||
|
|
||||||
|
ofstream shared_config(shared_config_path);
|
||||||
|
if (!shared_config.is_open()) {
|
||||||
|
return genError("Could not create shared NGINX configuration file");
|
||||||
|
}
|
||||||
|
shared_config.close();
|
||||||
|
|
||||||
|
string shared_config_directive = "include " + shared_config_path + ";\n";
|
||||||
|
boost::regex server_regex("server\\s*\\{");
|
||||||
|
nginx_conf_content = NGEN::Regex::regexReplace(
|
||||||
|
__FILE__,
|
||||||
|
__LINE__,
|
||||||
|
nginx_conf_content,
|
||||||
|
server_regex,
|
||||||
|
"server {\n" + shared_config_directive
|
||||||
|
);
|
||||||
|
|
||||||
|
ofstream nginx_conf_file(central_nginx_conf_path);
|
||||||
|
if (!nginx_conf_file.is_open()) {
|
||||||
|
return genError("Could not open a temporary central NGINX configuration file");
|
||||||
|
}
|
||||||
|
nginx_conf_file << nginx_conf_content;
|
||||||
|
nginx_conf_file.close();
|
||||||
|
|
||||||
|
auto validation = NginxUtils::validateNginxConf(central_nginx_conf_path);
|
||||||
|
if (!validation.ok()) {
|
||||||
|
return genError("Could not validate central NGINX configuration file. Error: " + validation.getErr());
|
||||||
|
}
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
Maybe<void>
|
||||||
|
configureSyslog()
|
||||||
|
{
|
||||||
|
if (!getProfileAgentSettingWithDefault<bool>(true, "centralNginxManagement.syslogEnabled")) {
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "Syslog is disabled via settings";
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
string syslog_directive = "error_log syslog:server=127.0.0.1:1514 warn;";
|
||||||
|
auto load_shared_directive_result = loadSharedDirective(syslog_directive);
|
||||||
|
if (!load_shared_directive_result.ok()) {
|
||||||
|
return genError("Could not configure syslog directive, error: " + load_shared_directive_result.getErr());
|
||||||
|
}
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
Maybe<void>
|
||||||
|
saveBaseCentralNginxConf()
|
||||||
|
{
|
||||||
|
ofstream central_nginx_conf_base_file(central_nginx_conf_path + ".base");
|
||||||
|
if (!central_nginx_conf_base_file.is_open()) {
|
||||||
|
return genError("Could not open a temporary central NGINX configuration file");
|
||||||
|
}
|
||||||
|
central_nginx_conf_base_file << nginx_conf_content;
|
||||||
|
central_nginx_conf_base_file.close();
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
configureCentralNginx()
|
||||||
|
{
|
||||||
|
loadAttachmentModule();
|
||||||
|
auto save_base_nginx_conf = saveBaseCentralNginxConf();
|
||||||
|
if (!save_base_nginx_conf.ok()) {
|
||||||
|
dbgWarning(D_NGINX_MANAGER)
|
||||||
|
<< "Could not save base NGINX configuration. Error: "
|
||||||
|
<< save_base_nginx_conf.getErr();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
string nginx_conf_content_backup = nginx_conf_content;
|
||||||
|
auto shared_config_result = loadSharedConfig();
|
||||||
|
if (!shared_config_result.ok()) {
|
||||||
|
dbgWarning(D_NGINX_MANAGER)
|
||||||
|
<< "Could not load shared configuration. Error: "
|
||||||
|
<< shared_config_result.getErr();
|
||||||
|
nginx_conf_content = nginx_conf_content_backup;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto syslog_result = configureSyslog();
|
||||||
|
if (!syslog_result.ok()) {
|
||||||
|
dbgWarning(D_NGINX_MANAGER) << "Could not configure syslog. Error: " << syslog_result.getErr();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
string file_id;
|
||||||
|
string file_name;
|
||||||
|
string nginx_conf_content;
|
||||||
|
string central_nginx_conf_path;
|
||||||
|
string shared_config_path;
|
||||||
|
};
|
||||||
|
|
||||||
|
class CentralNginxManager::Impl
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
void
|
||||||
|
init()
|
||||||
|
{
|
||||||
|
dbgInfo(D_NGINX_MANAGER) << "Starting Central NGINX Manager";
|
||||||
|
|
||||||
|
string main_nginx_conf_path = NginxUtils::getMainNginxConfPath();
|
||||||
|
if (
|
||||||
|
NGEN::Filesystem::exists(main_nginx_conf_path)
|
||||||
|
&& !NGEN::Filesystem::exists(main_nginx_conf_path + ".orig")
|
||||||
|
) {
|
||||||
|
dbgInfo(D_NGINX_MANAGER) << "Creating a backup of the original main NGINX configuration file";
|
||||||
|
NGEN::Filesystem::copyFile(main_nginx_conf_path, main_nginx_conf_path + ".orig", true);
|
||||||
|
}
|
||||||
|
|
||||||
|
i_mainloop = Singleton::Consume<I_MainLoop>::by<CentralNginxManager>();
|
||||||
|
if (!lets_encrypt_listener.init()) {
|
||||||
|
dbgWarning(D_NGINX_MANAGER) << "Could not start Lets Encrypt Listener, scheduling retry";
|
||||||
|
i_mainloop->addOneTimeRoutine(
|
||||||
|
I_MainLoop::RoutineType::System,
|
||||||
|
[this] ()
|
||||||
|
{
|
||||||
|
while(!lets_encrypt_listener.init()) {
|
||||||
|
dbgWarning(D_NGINX_MANAGER) << "Could not start Lets Encrypt Listener, will retry";
|
||||||
|
i_mainloop->yield(chrono::seconds(5));
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Lets Encrypt Listener initializer",
|
||||||
|
false
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
loadPolicy()
|
||||||
|
{
|
||||||
|
auto central_nginx_config = getSetting<vector<CentralNginxConfig>>("centralNginxManagement");
|
||||||
|
if (!central_nginx_config.ok() || central_nginx_config.unpack().empty()) {
|
||||||
|
dbgWarning(D_NGINX_MANAGER)
|
||||||
|
<< "Could not load Central NGINX Management settings. Error: "
|
||||||
|
<< central_nginx_config.getErr();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto &config = central_nginx_config.unpack().front();
|
||||||
|
if (config.getFileContent().empty()) {
|
||||||
|
dbgWarning(D_NGINX_MANAGER) << "Empty NGINX configuration file";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
dbgTrace(D_NGINX_MANAGER)
|
||||||
|
<< "Handling Central NGINX Management settings: "
|
||||||
|
<< config.getFileId()
|
||||||
|
<< ", "
|
||||||
|
<< config.getFileName()
|
||||||
|
<< ", "
|
||||||
|
<< config.getFileContent();
|
||||||
|
|
||||||
|
string central_nginx_conf_path = config.getCentralNginxConfPath();
|
||||||
|
ofstream central_nginx_conf_file(central_nginx_conf_path);
|
||||||
|
if (!central_nginx_conf_file.is_open()) {
|
||||||
|
dbgWarning(D_NGINX_MANAGER)
|
||||||
|
<< "Could not open central NGINX configuration file: "
|
||||||
|
<< central_nginx_conf_path;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
central_nginx_conf_file << config.getFileContent();
|
||||||
|
central_nginx_conf_file.close();
|
||||||
|
|
||||||
|
auto validation_result = NginxUtils::validateNginxConf(central_nginx_conf_path);
|
||||||
|
if (!validation_result.ok()) {
|
||||||
|
dbgWarning(D_NGINX_MANAGER)
|
||||||
|
<< "Could not validate central NGINX configuration file. Error: "
|
||||||
|
<< validation_result.getErr();
|
||||||
|
logError(validation_result.getErr());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "Validated central NGINX configuration file";
|
||||||
|
|
||||||
|
auto reload_result = NginxUtils::reloadNginx(central_nginx_conf_path);
|
||||||
|
if (!reload_result.ok()) {
|
||||||
|
dbgWarning(D_NGINX_MANAGER)
|
||||||
|
<< "Could not reload central NGINX configuration. Error: "
|
||||||
|
<< reload_result.getErr();
|
||||||
|
logError("Could not reload central NGINX configuration. Error: " + reload_result.getErr());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
fini()
|
||||||
|
{
|
||||||
|
string central_nginx_base_path = CentralNginxConfig::getCentralNginxConfPath() + ".base";
|
||||||
|
if (!NGEN::Filesystem::exists(central_nginx_base_path)) {
|
||||||
|
dbgWarning(D_NGINX_MANAGER) << "Could not find base NGINX configuration file: " << central_nginx_base_path;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
NginxUtils::reloadNginx(central_nginx_base_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
void
|
||||||
|
logError(const string &error)
|
||||||
|
{
|
||||||
|
LogGen log(
|
||||||
|
error,
|
||||||
|
ReportIS::Audience::SECURITY,
|
||||||
|
ReportIS::Severity::CRITICAL,
|
||||||
|
ReportIS::Priority::HIGH,
|
||||||
|
ReportIS::Tags::POLICY_INSTALLATION
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
I_MainLoop *i_mainloop = nullptr;
|
||||||
|
LetsEncryptListener lets_encrypt_listener;
|
||||||
|
};
|
||||||
|
|
||||||
|
CentralNginxManager::CentralNginxManager()
|
||||||
|
:
|
||||||
|
Component("Central NGINX Manager"),
|
||||||
|
pimpl(make_unique<CentralNginxManager::Impl>()) {}
|
||||||
|
|
||||||
|
CentralNginxManager::~CentralNginxManager() {}
|
||||||
|
|
||||||
|
void
|
||||||
|
CentralNginxManager::init()
|
||||||
|
{
|
||||||
|
pimpl->init();
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
CentralNginxManager::fini()
|
||||||
|
{
|
||||||
|
pimpl->fini();
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
CentralNginxManager::preload()
|
||||||
|
{
|
||||||
|
registerExpectedSetting<vector<CentralNginxConfig>>("centralNginxManagement");
|
||||||
|
registerExpectedConfiguration<string>("Config Component", "configuration path");
|
||||||
|
registerConfigLoadCb([this]() { pimpl->loadPolicy(); });
|
||||||
|
}
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||||
|
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#ifndef __LETS_ENCRYPT_HANDLER_H__
|
||||||
|
#define __LETS_ENCRYPT_HANDLER_H__
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "maybe_res.h"
|
||||||
|
|
||||||
|
class LetsEncryptListener
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
bool init();
|
||||||
|
|
||||||
|
private:
|
||||||
|
Maybe<std::string> getChallengeValue(const std::string &uri) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // __LETS_ENCRYPT_HANDLER_H__
|
||||||
76
components/security_apps/central_nginx_manager/lets_encrypt_listener.cc
Executable file
76
components/security_apps/central_nginx_manager/lets_encrypt_listener.cc
Executable file
@@ -0,0 +1,76 @@
|
|||||||
|
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||||
|
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "lets_encrypt_listener.h"
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "central_nginx_manager.h"
|
||||||
|
#include "debug.h"
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
|
||||||
|
USE_DEBUG_FLAG(D_NGINX_MANAGER);
|
||||||
|
|
||||||
|
bool
|
||||||
|
LetsEncryptListener::init()
|
||||||
|
{
|
||||||
|
dbgInfo(D_NGINX_MANAGER) << "Starting Lets Encrypt Listener";
|
||||||
|
return Singleton::Consume<I_RestApi>::by<CentralNginxManager>()->addWildcardGetCall(
|
||||||
|
".well-known/acme-challenge/",
|
||||||
|
[&] (const string &uri) -> string
|
||||||
|
{
|
||||||
|
Maybe<string> maybe_challenge_value = getChallengeValue(uri);
|
||||||
|
if (!maybe_challenge_value.ok()) {
|
||||||
|
dbgWarning(D_NGINX_MANAGER)
|
||||||
|
<< "Could not get challenge value for uri: "
|
||||||
|
<< uri
|
||||||
|
<< ", error: "
|
||||||
|
<< maybe_challenge_value.getErr();
|
||||||
|
return string{""};
|
||||||
|
};
|
||||||
|
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "Got challenge value: " << maybe_challenge_value.unpack();
|
||||||
|
return maybe_challenge_value.unpack();
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Maybe<string>
|
||||||
|
LetsEncryptListener::getChallengeValue(const string &uri) const
|
||||||
|
{
|
||||||
|
string challenge_key = uri.substr(uri.find_last_of('/') + 1);
|
||||||
|
string api_query = "/api/lets-encrypt-challenge?http_challenge_key=" + challenge_key;
|
||||||
|
|
||||||
|
dbgInfo(D_NGINX_MANAGER) << "Getting challenge value via: " << api_query;
|
||||||
|
|
||||||
|
MessageMetadata md;
|
||||||
|
md.insertHeader("X-Tenant-Id", Singleton::Consume<I_AgentDetails>::by<CentralNginxManager>()->getTenantId());
|
||||||
|
Maybe<HTTPResponse, HTTPResponse> maybe_http_challenge_value =
|
||||||
|
Singleton::Consume<I_Messaging>::by<CentralNginxManager>()->sendSyncMessage(
|
||||||
|
HTTPMethod::GET,
|
||||||
|
api_query,
|
||||||
|
string("{}"),
|
||||||
|
MessageCategory::GENERIC,
|
||||||
|
md
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!maybe_http_challenge_value.ok()) return genError(maybe_http_challenge_value.getErr().getBody());
|
||||||
|
|
||||||
|
string challenge_value = maybe_http_challenge_value.unpack().getBody();
|
||||||
|
if (!challenge_value.empty() && challenge_value.front() == '"' && challenge_value.back() == '"') {
|
||||||
|
challenge_value = challenge_value.substr(1, challenge_value.size() - 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
return challenge_value;
|
||||||
|
}
|
||||||
@@ -88,9 +88,17 @@ public:
|
|||||||
dbgWarning(D_GEO_FILTER) << "failed to get source ip from env";
|
dbgWarning(D_GEO_FILTER) << "failed to get source ip from env";
|
||||||
return EventVerdict(default_action);
|
return EventVerdict(default_action);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto source_ip = convertIpAddrToString(maybe_source_ip.unpack());
|
auto source_ip = convertIpAddrToString(maybe_source_ip.unpack());
|
||||||
ip_set.insert(source_ip);
|
|
||||||
|
// saas profile setting
|
||||||
|
bool ignore_source_ip =
|
||||||
|
getProfileAgentSettingWithDefault<bool>(false, "agent.geoProtaction.ignoreSourceIP");
|
||||||
|
if (ignore_source_ip){
|
||||||
|
dbgDebug(D_GEO_FILTER) << "Geo protection ignoring source ip: " << source_ip;
|
||||||
|
} else {
|
||||||
|
ip_set.insert(convertIpAddrToString(maybe_source_ip.unpack()));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
ngx_http_cp_verdict_e exception_verdict = getExceptionVerdict(ip_set);
|
ngx_http_cp_verdict_e exception_verdict = getExceptionVerdict(ip_set);
|
||||||
if (exception_verdict != ngx_http_cp_verdict_e::TRAFFIC_VERDICT_IRRELEVANT) {
|
if (exception_verdict != ngx_http_cp_verdict_e::TRAFFIC_VERDICT_IRRELEVANT) {
|
||||||
@@ -343,7 +351,7 @@ private:
|
|||||||
|
|
||||||
auto asset_location = i_geo_location->lookupLocation(maybe_source_ip.unpack());
|
auto asset_location = i_geo_location->lookupLocation(maybe_source_ip.unpack());
|
||||||
if (!asset_location.ok()) {
|
if (!asset_location.ok()) {
|
||||||
dbgWarning(D_GEO_FILTER) << "Lookup location failed for source: " <<
|
dbgDebug(D_GEO_FILTER) << "Lookup location failed for source: " <<
|
||||||
source <<
|
source <<
|
||||||
", Error: " <<
|
", Error: " <<
|
||||||
asset_location.getErr();
|
asset_location.getErr();
|
||||||
|
|||||||
@@ -336,9 +336,16 @@ public:
|
|||||||
return metadata.getYear();
|
return metadata.getYear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
isOk() const
|
||||||
|
{
|
||||||
|
return is_loaded;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
IPSSignatureMetaData metadata;
|
IPSSignatureMetaData metadata;
|
||||||
std::shared_ptr<BaseSignature> rule;
|
std::shared_ptr<BaseSignature> rule;
|
||||||
|
bool is_loaded;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// \class SignatureAndAction
|
/// \class SignatureAndAction
|
||||||
|
|||||||
@@ -219,10 +219,16 @@ IPSSignatureMetaData::getYear() const
|
|||||||
void
|
void
|
||||||
CompleteSignature::load(cereal::JSONInputArchive &ar)
|
CompleteSignature::load(cereal::JSONInputArchive &ar)
|
||||||
{
|
{
|
||||||
ar(cereal::make_nvp("protectionMetadata", metadata));
|
try {
|
||||||
RuleDetection rule_detection(metadata.getName());
|
ar(cereal::make_nvp("protectionMetadata", metadata));
|
||||||
ar(cereal::make_nvp("detectionRules", rule_detection));
|
RuleDetection rule_detection(metadata.getName());
|
||||||
rule = rule_detection.getRule();
|
ar(cereal::make_nvp("detectionRules", rule_detection));
|
||||||
|
rule = rule_detection.getRule();
|
||||||
|
is_loaded = true;
|
||||||
|
} catch (cereal::Exception &e) {
|
||||||
|
is_loaded = false;
|
||||||
|
dbgWarning(D_IPS) << "Failed to load signature: " << e.what();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MatchType
|
MatchType
|
||||||
@@ -367,7 +373,16 @@ SignatureAndAction::matchSilent(const Buffer &sample) const
|
|||||||
if (method.ok()) log << LogField("httpMethod", method.unpack());
|
if (method.ok()) log << LogField("httpMethod", method.unpack());
|
||||||
|
|
||||||
auto path = env->get<Buffer>("HTTP_PATH_DECODED");
|
auto path = env->get<Buffer>("HTTP_PATH_DECODED");
|
||||||
if (path.ok()) log << LogField("httpUriPath", getSubString(path, 1536), LogFieldOption::XORANDB64);
|
if (path.ok()) {
|
||||||
|
log << LogField("httpUriPath", getSubString(path, 1536), LogFieldOption::XORANDB64);
|
||||||
|
} else {
|
||||||
|
auto transaction_path = env->get<string>(HttpTransactionData::uri_path_decoded);
|
||||||
|
if (transaction_path.ok()) {
|
||||||
|
auto uri_path = transaction_path.unpack();
|
||||||
|
auto question_mark = uri_path.find('?');
|
||||||
|
log << LogField("httpUriPath", uri_path.substr(0, question_mark), LogFieldOption::XORANDB64);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
auto req_header = ips_state.getTransactionData(IPSCommonTypes::requests_header_for_log);
|
auto req_header = ips_state.getTransactionData(IPSCommonTypes::requests_header_for_log);
|
||||||
if (req_header.ok()) log << LogField("httpRequestHeaders", getSubString(req_header), LogFieldOption::XORANDB64);
|
if (req_header.ok()) log << LogField("httpRequestHeaders", getSubString(req_header), LogFieldOption::XORANDB64);
|
||||||
@@ -485,13 +500,30 @@ SignatureAndAction::isMatchedPrevent(const Buffer &context_buffer, const set<PMP
|
|||||||
auto method = env->get<string>(HttpTransactionData::method_ctx);
|
auto method = env->get<string>(HttpTransactionData::method_ctx);
|
||||||
if (method.ok()) log << LogField("httpMethod", method.unpack());
|
if (method.ok()) log << LogField("httpMethod", method.unpack());
|
||||||
uint max_size = getConfigurationWithDefault<uint>(1536, "IPS", "Max Field Size");
|
uint max_size = getConfigurationWithDefault<uint>(1536, "IPS", "Max Field Size");
|
||||||
auto path = env->get<Buffer>("HTTP_PATH_DECODED");
|
|
||||||
if (path.ok() && trigger.isWebLogFieldActive(url_path)) {
|
if (trigger.isWebLogFieldActive(url_path)) {
|
||||||
log << LogField("httpUriPath", getSubString(path, max_size), LogFieldOption::XORANDB64);
|
auto path = env->get<Buffer>("HTTP_PATH_DECODED");
|
||||||
|
if (path.ok()) {
|
||||||
|
log << LogField("httpUriPath", getSubString(path, max_size), LogFieldOption::XORANDB64);
|
||||||
|
} else {
|
||||||
|
auto transaction_path = env->get<string>(HttpTransactionData::uri_path_decoded);
|
||||||
|
if (transaction_path.ok()) {
|
||||||
|
auto uri_path = transaction_path.unpack();
|
||||||
|
auto question_mark = uri_path.find('?');
|
||||||
|
log << LogField("httpUriPath", uri_path.substr(0, question_mark), LogFieldOption::XORANDB64);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
auto query = env->get<Buffer>("HTTP_QUERY_DECODED");
|
if (trigger.isWebLogFieldActive(url_query)) {
|
||||||
if (query.ok() && trigger.isWebLogFieldActive(url_query)) {
|
auto query = env->get<Buffer>("HTTP_QUERY_DECODED");
|
||||||
log << LogField("httpUriQuery", getSubString(query, max_size), LogFieldOption::XORANDB64);
|
if (query.ok()) {
|
||||||
|
log << LogField("httpUriQuery", getSubString(query, max_size), LogFieldOption::XORANDB64);
|
||||||
|
} else {
|
||||||
|
auto transaction_query = env->get<string>(HttpTransactionData::uri_query_decoded);
|
||||||
|
if (transaction_query.ok()) {
|
||||||
|
log << LogField("httpUriQuery", transaction_query.unpack());
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
auto res_code = env->get<Buffer>("HTTP_RESPONSE_CODE");
|
auto res_code = env->get<Buffer>("HTTP_RESPONSE_CODE");
|
||||||
@@ -533,7 +565,9 @@ IPSSignaturesResource::load(cereal::JSONInputArchive &ar)
|
|||||||
|
|
||||||
all_signatures.reserve(sigs.size());
|
all_signatures.reserve(sigs.size());
|
||||||
for (auto &sig : sigs) {
|
for (auto &sig : sigs) {
|
||||||
all_signatures.emplace_back(make_shared<CompleteSignature>(move(sig)));
|
if (sig.isOk()) {
|
||||||
|
all_signatures.emplace_back(make_shared<CompleteSignature>(move(sig)));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -104,6 +104,12 @@ public:
|
|||||||
cereal::JSONInputArchive ar(ss);
|
cereal::JSONInputArchive ar(ss);
|
||||||
high_medium_confidance_signatures.load(ar);
|
high_medium_confidance_signatures.load(ar);
|
||||||
}
|
}
|
||||||
|
{
|
||||||
|
stringstream ss;
|
||||||
|
ss << "[" << signature_performance_high << ", " << signature_broken << "]";
|
||||||
|
cereal::JSONInputArchive ar(ss);
|
||||||
|
single_broken_signature.load(ar);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
~SignatureTest()
|
~SignatureTest()
|
||||||
@@ -250,6 +256,7 @@ public:
|
|||||||
IPSSignaturesResource performance_signatures1;
|
IPSSignaturesResource performance_signatures1;
|
||||||
IPSSignaturesResource performance_signatures2;
|
IPSSignaturesResource performance_signatures2;
|
||||||
IPSSignaturesResource performance_signatures3;
|
IPSSignaturesResource performance_signatures3;
|
||||||
|
IPSSignaturesResource single_broken_signature;
|
||||||
NiceMock<MockTable> table;
|
NiceMock<MockTable> table;
|
||||||
MockAgg mock_agg;
|
MockAgg mock_agg;
|
||||||
|
|
||||||
@@ -483,6 +490,26 @@ private:
|
|||||||
"\"context\": [\"HTTP_REQUEST_BODY\", \"HTTP_RESPONSE_BODY\"]"
|
"\"context\": [\"HTTP_REQUEST_BODY\", \"HTTP_RESPONSE_BODY\"]"
|
||||||
"}"
|
"}"
|
||||||
"}";
|
"}";
|
||||||
|
|
||||||
|
string signature_broken =
|
||||||
|
"{"
|
||||||
|
"\"protectionMetadata\": {"
|
||||||
|
"\"protectionName\": \"BrokenTest\","
|
||||||
|
"\"maintrainId\": \"101\","
|
||||||
|
"\"severity\": \"Medium High\","
|
||||||
|
"\"confidenceLevel\": \"Low\","
|
||||||
|
"\"performanceImpact\": \"High\","
|
||||||
|
"\"lastUpdate\": \"20210420\","
|
||||||
|
"\"tags\": [],"
|
||||||
|
"\"cveList\": []"
|
||||||
|
"},"
|
||||||
|
"\"detectionRules\": {"
|
||||||
|
"\"type\": \"simple\","
|
||||||
|
"\"SSM\": \"\","
|
||||||
|
"\"keywosrds\": \"data: \\\"www\\\";\","
|
||||||
|
"\"context\": [\"HTTP_REQUEST_BODY\", \"HTTP_RESPONSE_BODY\"]"
|
||||||
|
"}"
|
||||||
|
"}";
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(SignatureTest, basic_load_of_signatures)
|
TEST_F(SignatureTest, basic_load_of_signatures)
|
||||||
@@ -665,3 +692,14 @@ TEST_F(SignatureTest, high_confidance_signatures_matching)
|
|||||||
expectLog("\"protectionId\": \"Test4\"", "\"matchedSignatureConfidence\": \"Medium\"");
|
expectLog("\"protectionId\": \"Test4\"", "\"matchedSignatureConfidence\": \"Medium\"");
|
||||||
EXPECT_FALSE(checkData("mmm"));
|
EXPECT_FALSE(checkData("mmm"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(SignatureTest, broken_signature)
|
||||||
|
{
|
||||||
|
load(single_broken_signature, "Low or above", "Low");
|
||||||
|
EXPECT_FALSE(checkData("ggg"));
|
||||||
|
|
||||||
|
expectLog("\"matchedSignaturePerformance\": \"High\"");
|
||||||
|
EXPECT_TRUE(checkData("fff"));
|
||||||
|
|
||||||
|
EXPECT_FALSE(checkData("www"));
|
||||||
|
}
|
||||||
|
|||||||
@@ -22,4 +22,5 @@ add_library(local_policy_mgmt_gen
|
|||||||
access_control_practice.cc
|
access_control_practice.cc
|
||||||
configmaps.cc
|
configmaps.cc
|
||||||
reverse_proxy_section.cc
|
reverse_proxy_section.cc
|
||||||
|
policy_activation_data.cc
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ public:
|
|||||||
void init();
|
void init();
|
||||||
|
|
||||||
std::tuple<std::map<std::string, AppsecLinuxPolicy>, std::map<std::string, V1beta2AppsecLinuxPolicy>>
|
std::tuple<std::map<std::string, AppsecLinuxPolicy>, std::map<std::string, V1beta2AppsecLinuxPolicy>>
|
||||||
createAppsecPoliciesFromIngresses();
|
createAppsecPolicies();
|
||||||
void getClusterId() const;
|
void getClusterId() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@@ -101,12 +101,18 @@ private:
|
|||||||
) const;
|
) const;
|
||||||
|
|
||||||
template<class T, class K>
|
template<class T, class K>
|
||||||
void createPolicy(
|
void createPolicyFromIngress(
|
||||||
T &appsec_policy,
|
T &appsec_policy,
|
||||||
std::map<std::string, T> &policies,
|
std::map<std::string, T> &policies,
|
||||||
std::map<AnnotationKeys, std::string> &annotations_values,
|
std::map<AnnotationKeys, std::string> &annotations_values,
|
||||||
const SingleIngressData &item) const;
|
const SingleIngressData &item) const;
|
||||||
|
|
||||||
|
template<class T, class K>
|
||||||
|
void createPolicyFromActivation(
|
||||||
|
T &appsec_policy,
|
||||||
|
std::map<std::string, T> &policies,
|
||||||
|
const EnabledPolicy &policy) const;
|
||||||
|
|
||||||
std::tuple<Maybe<AppsecLinuxPolicy>, Maybe<V1beta2AppsecLinuxPolicy>> createAppsecPolicyK8s(
|
std::tuple<Maybe<AppsecLinuxPolicy>, Maybe<V1beta2AppsecLinuxPolicy>> createAppsecPolicyK8s(
|
||||||
const std::string &policy_name,
|
const std::string &policy_name,
|
||||||
const std::string &ingress_mode
|
const std::string &ingress_mode
|
||||||
|
|||||||
@@ -0,0 +1,90 @@
|
|||||||
|
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||||
|
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#ifndef __POLICY_ACTIVATION_DATA_H__
|
||||||
|
#define __POLICY_ACTIVATION_DATA_H__
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
#include <map>
|
||||||
|
|
||||||
|
#include "config.h"
|
||||||
|
#include "debug.h"
|
||||||
|
#include "rest.h"
|
||||||
|
#include "cereal/archives/json.hpp"
|
||||||
|
#include <cereal/types/map.hpp>
|
||||||
|
#include "customized_cereal_map.h"
|
||||||
|
|
||||||
|
#include "local_policy_common.h"
|
||||||
|
|
||||||
|
class PolicyActivationMetadata
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
void load(cereal::JSONInputArchive &archive_in);
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::string name;
|
||||||
|
};
|
||||||
|
|
||||||
|
class EnabledPolicy
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
void load(cereal::JSONInputArchive &archive_in);
|
||||||
|
|
||||||
|
const std::string & getName() const;
|
||||||
|
const std::vector<std::string> & getHosts() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::string name;
|
||||||
|
std::string mode;
|
||||||
|
std::vector<std::string> hosts;
|
||||||
|
};
|
||||||
|
|
||||||
|
class PolicyActivationSpec
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
void load(cereal::JSONInputArchive &archive_in);
|
||||||
|
|
||||||
|
const std::vector<EnabledPolicy> & getPolicies() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::string appsec_class_name;
|
||||||
|
std::vector<EnabledPolicy> policies;
|
||||||
|
};
|
||||||
|
|
||||||
|
class SinglePolicyActivationData
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
void load(cereal::JSONInputArchive &archive_in);
|
||||||
|
|
||||||
|
const PolicyActivationSpec & getSpec() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::string api_version;
|
||||||
|
std::string kind;
|
||||||
|
PolicyActivationMetadata metadata;
|
||||||
|
PolicyActivationSpec spec;
|
||||||
|
};
|
||||||
|
|
||||||
|
class PolicyActivationData : public ClientRest
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
bool loadJson(const std::string &json);
|
||||||
|
|
||||||
|
const std::vector<SinglePolicyActivationData> & getItems() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::string api_version;
|
||||||
|
std::vector<SinglePolicyActivationData> items;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // __POLICY_ACTIVATION_DATA_H__
|
||||||
@@ -32,6 +32,7 @@
|
|||||||
#include "i_messaging.h"
|
#include "i_messaging.h"
|
||||||
#include "appsec_practice_section.h"
|
#include "appsec_practice_section.h"
|
||||||
#include "ingress_data.h"
|
#include "ingress_data.h"
|
||||||
|
#include "policy_activation_data.h"
|
||||||
#include "settings_section.h"
|
#include "settings_section.h"
|
||||||
#include "triggers_section.h"
|
#include "triggers_section.h"
|
||||||
#include "local_policy_common.h"
|
#include "local_policy_common.h"
|
||||||
|
|||||||
@@ -577,7 +577,7 @@ K8sPolicyUtils::createAppsecPolicyK8s(const string &policy_name, const string &i
|
|||||||
|
|
||||||
template<class T, class K>
|
template<class T, class K>
|
||||||
void
|
void
|
||||||
K8sPolicyUtils::createPolicy(
|
K8sPolicyUtils::createPolicyFromIngress(
|
||||||
T &appsec_policy,
|
T &appsec_policy,
|
||||||
map<std::string, T> &policies,
|
map<std::string, T> &policies,
|
||||||
map<AnnotationKeys, string> &annotations_values,
|
map<AnnotationKeys, string> &annotations_values,
|
||||||
@@ -615,10 +615,35 @@ K8sPolicyUtils::createPolicy(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::tuple<map<string, AppsecLinuxPolicy>, map<string, V1beta2AppsecLinuxPolicy>>
|
template<class T, class K>
|
||||||
K8sPolicyUtils::createAppsecPoliciesFromIngresses()
|
void
|
||||||
|
K8sPolicyUtils::createPolicyFromActivation(
|
||||||
|
T &appsec_policy,
|
||||||
|
map<std::string, T> &policies,
|
||||||
|
const EnabledPolicy &policy) const
|
||||||
{
|
{
|
||||||
dbgFlow(D_LOCAL_POLICY) << "Getting all policy object from Ingresses";
|
if (policies.find(policy.getName()) == policies.end()) {
|
||||||
|
policies[policy.getName()] = appsec_policy;
|
||||||
|
}
|
||||||
|
auto default_mode = appsec_policy.getAppsecPolicySpec().getDefaultRule().getMode();
|
||||||
|
|
||||||
|
for (const string &host : policy.getHosts()) {
|
||||||
|
if (!appsec_policy.getAppsecPolicySpec().isAssetHostExist(host)) {
|
||||||
|
dbgTrace(D_LOCAL_POLICY)
|
||||||
|
<< "Inserting Host data to the specific asset set:"
|
||||||
|
<< "URL: '"
|
||||||
|
<< host
|
||||||
|
<< "'";
|
||||||
|
K ingress_rule = K(host, default_mode);
|
||||||
|
policies[policy.getName()].addSpecificRule(ingress_rule);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::tuple<map<string, AppsecLinuxPolicy>, map<string, V1beta2AppsecLinuxPolicy>>
|
||||||
|
K8sPolicyUtils::createAppsecPolicies()
|
||||||
|
{
|
||||||
|
dbgFlow(D_LOCAL_POLICY) << "Getting all policy object from Ingresses and PolicyActivation";
|
||||||
map<string, AppsecLinuxPolicy> v1bet1_policies;
|
map<string, AppsecLinuxPolicy> v1bet1_policies;
|
||||||
map<string, V1beta2AppsecLinuxPolicy> v1bet2_policies;
|
map<string, V1beta2AppsecLinuxPolicy> v1bet2_policies;
|
||||||
auto maybe_ingress = getObjectFromCluster<IngressData>("/apis/networking.k8s.io/v1/ingresses");
|
auto maybe_ingress = getObjectFromCluster<IngressData>("/apis/networking.k8s.io/v1/ingresses");
|
||||||
@@ -628,7 +653,7 @@ K8sPolicyUtils::createAppsecPoliciesFromIngresses()
|
|||||||
dbgWarning(D_LOCAL_POLICY)
|
dbgWarning(D_LOCAL_POLICY)
|
||||||
<< "Failed to retrieve K8S Ingress configurations. Error: "
|
<< "Failed to retrieve K8S Ingress configurations. Error: "
|
||||||
<< maybe_ingress.getErr();
|
<< maybe_ingress.getErr();
|
||||||
return make_tuple(v1bet1_policies, v1bet2_policies);
|
maybe_ingress = IngressData{};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -658,19 +683,50 @@ K8sPolicyUtils::createAppsecPoliciesFromIngresses()
|
|||||||
|
|
||||||
if (!std::get<0>(maybe_appsec_policy).ok()) {
|
if (!std::get<0>(maybe_appsec_policy).ok()) {
|
||||||
auto appsec_policy=std::get<1>(maybe_appsec_policy).unpack();
|
auto appsec_policy=std::get<1>(maybe_appsec_policy).unpack();
|
||||||
createPolicy<V1beta2AppsecLinuxPolicy, NewParsedRule>(
|
createPolicyFromIngress<V1beta2AppsecLinuxPolicy, NewParsedRule>(
|
||||||
appsec_policy,
|
appsec_policy,
|
||||||
v1bet2_policies,
|
v1bet2_policies,
|
||||||
annotations_values,
|
annotations_values,
|
||||||
item);
|
item);
|
||||||
} else {
|
} else {
|
||||||
auto appsec_policy=std::get<0>(maybe_appsec_policy).unpack();
|
auto appsec_policy=std::get<0>(maybe_appsec_policy).unpack();
|
||||||
createPolicy<AppsecLinuxPolicy, ParsedRule>(
|
createPolicyFromIngress<AppsecLinuxPolicy, ParsedRule>(
|
||||||
appsec_policy,
|
appsec_policy,
|
||||||
v1bet1_policies,
|
v1bet1_policies,
|
||||||
annotations_values,
|
annotations_values,
|
||||||
item);
|
item);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto maybe_policy_activation =
|
||||||
|
getObjectFromCluster<PolicyActivationData>("/apis/openappsec.io/v1beta2/policyactivations");
|
||||||
|
|
||||||
|
if (!maybe_policy_activation.ok()) {
|
||||||
|
dbgWarning(D_LOCAL_POLICY)
|
||||||
|
<< "Failed to retrieve K8S PolicyActivation configurations. Error: "
|
||||||
|
<< maybe_policy_activation.getErr();
|
||||||
|
return make_tuple(v1bet1_policies, v1bet2_policies);
|
||||||
|
}
|
||||||
|
|
||||||
|
PolicyActivationData policy_activation = maybe_policy_activation.unpack();
|
||||||
|
for (const SinglePolicyActivationData &item : policy_activation.getItems()) {
|
||||||
|
for (const auto &policy : item.getSpec().getPolicies()) {
|
||||||
|
auto maybe_appsec_policy = createAppsecPolicyK8s(policy.getName(), "");
|
||||||
|
|
||||||
|
if (!std::get<1>(maybe_appsec_policy).ok()) {
|
||||||
|
dbgWarning(D_LOCAL_POLICY)
|
||||||
|
<< "Failed to create appsec policy. v1beta2 Error: "
|
||||||
|
<< std::get<1>(maybe_appsec_policy).getErr();
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
auto appsec_policy=std::get<1>(maybe_appsec_policy).unpack();
|
||||||
|
createPolicyFromActivation<V1beta2AppsecLinuxPolicy, NewParsedRule>(
|
||||||
|
appsec_policy,
|
||||||
|
v1bet2_policies,
|
||||||
|
policy);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return make_tuple(v1bet1_policies, v1bet2_policies);
|
return make_tuple(v1bet1_policies, v1bet2_policies);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -36,6 +36,7 @@
|
|||||||
#include "customized_cereal_map.h"
|
#include "customized_cereal_map.h"
|
||||||
#include "include/appsec_practice_section.h"
|
#include "include/appsec_practice_section.h"
|
||||||
#include "include/ingress_data.h"
|
#include "include/ingress_data.h"
|
||||||
|
#include "include/policy_activation_data.h"
|
||||||
#include "include/settings_section.h"
|
#include "include/settings_section.h"
|
||||||
#include "include/triggers_section.h"
|
#include "include/triggers_section.h"
|
||||||
#include "include/local_policy_common.h"
|
#include "include/local_policy_common.h"
|
||||||
@@ -85,7 +86,7 @@ public:
|
|||||||
K8sPolicyUtils k8s_policy_utils;
|
K8sPolicyUtils k8s_policy_utils;
|
||||||
k8s_policy_utils.init();
|
k8s_policy_utils.init();
|
||||||
|
|
||||||
auto appsec_policies = k8s_policy_utils.createAppsecPoliciesFromIngresses();
|
auto appsec_policies = k8s_policy_utils.createAppsecPolicies();
|
||||||
if (!std::get<0>(appsec_policies).empty()) {
|
if (!std::get<0>(appsec_policies).empty()) {
|
||||||
return policy_maker_utils.proccesMultipleAppsecPolicies<AppsecLinuxPolicy, ParsedRule>(
|
return policy_maker_utils.proccesMultipleAppsecPolicies<AppsecLinuxPolicy, ParsedRule>(
|
||||||
std::get<0>(appsec_policies),
|
std::get<0>(appsec_policies),
|
||||||
|
|||||||
116
components/security_apps/local_policy_mgmt_gen/policy_activation_data.cc
Executable file
116
components/security_apps/local_policy_mgmt_gen/policy_activation_data.cc
Executable file
@@ -0,0 +1,116 @@
|
|||||||
|
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||||
|
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "policy_activation_data.h"
|
||||||
|
#include "customized_cereal_map.h"
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
|
||||||
|
USE_DEBUG_FLAG(D_LOCAL_POLICY);
|
||||||
|
|
||||||
|
static const set<string> valid_modes = {
|
||||||
|
"prevent-learn",
|
||||||
|
"detect-learn",
|
||||||
|
"prevent",
|
||||||
|
"detect",
|
||||||
|
"inactive"
|
||||||
|
};
|
||||||
|
|
||||||
|
void
|
||||||
|
PolicyActivationMetadata::load(cereal::JSONInputArchive &archive_in)
|
||||||
|
{
|
||||||
|
dbgTrace(D_LOCAL_POLICY) << "PolicyActivationMetadata load";
|
||||||
|
parseAppsecJSONKey<string>("name", name, archive_in);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
EnabledPolicy::load(cereal::JSONInputArchive &archive_in)
|
||||||
|
{
|
||||||
|
dbgTrace(D_LOCAL_POLICY) << "Loading policyActivation enabled policy";
|
||||||
|
parseMandatoryAppsecJSONKey<vector<string>>("hosts", hosts, archive_in);
|
||||||
|
parseAppsecJSONKey<string>("name", name, archive_in);
|
||||||
|
parseAppsecJSONKey<string>("mode", mode, archive_in, "detect");
|
||||||
|
if (valid_modes.count(mode) == 0) {
|
||||||
|
dbgWarning(D_LOCAL_POLICY) << "AppSec policy activation mode invalid: " << mode;
|
||||||
|
mode = "detect";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const string &
|
||||||
|
EnabledPolicy::getName() const
|
||||||
|
{
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
const vector<string> &
|
||||||
|
EnabledPolicy::getHosts() const
|
||||||
|
{
|
||||||
|
return hosts;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
PolicyActivationSpec::load(cereal::JSONInputArchive &archive_in)
|
||||||
|
{
|
||||||
|
dbgTrace(D_LOCAL_POLICY) << "PolicyActivationSpec load";
|
||||||
|
parseAppsecJSONKey<string>("appsecClassName", appsec_class_name, archive_in);
|
||||||
|
parseMandatoryAppsecJSONKey<vector<EnabledPolicy>>("enabledPolicies", policies, archive_in);
|
||||||
|
}
|
||||||
|
|
||||||
|
const vector<EnabledPolicy> &
|
||||||
|
PolicyActivationSpec::getPolicies() const
|
||||||
|
{
|
||||||
|
return policies;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
SinglePolicyActivationData::load(cereal::JSONInputArchive &archive_in)
|
||||||
|
{
|
||||||
|
dbgTrace(D_LOCAL_POLICY) << "Loading single policy activation data";
|
||||||
|
parseAppsecJSONKey<string>("apiVersion", api_version, archive_in);
|
||||||
|
parseAppsecJSONKey<string>("kind", kind, archive_in);
|
||||||
|
parseAppsecJSONKey<PolicyActivationMetadata>("metadata", metadata, archive_in);
|
||||||
|
parseAppsecJSONKey<PolicyActivationSpec>("spec", spec, archive_in);
|
||||||
|
}
|
||||||
|
|
||||||
|
const PolicyActivationSpec &
|
||||||
|
SinglePolicyActivationData::getSpec() const
|
||||||
|
{
|
||||||
|
return spec;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
PolicyActivationData::loadJson(const string &json)
|
||||||
|
{
|
||||||
|
string modified_json = json;
|
||||||
|
modified_json.pop_back();
|
||||||
|
stringstream in;
|
||||||
|
in.str(modified_json);
|
||||||
|
dbgTrace(D_LOCAL_POLICY) << "Loading policy activations data";
|
||||||
|
try {
|
||||||
|
cereal::JSONInputArchive in_ar(in);
|
||||||
|
in_ar(
|
||||||
|
cereal::make_nvp("apiVersion", api_version),
|
||||||
|
cereal::make_nvp("items", items)
|
||||||
|
);
|
||||||
|
} catch (cereal::Exception &e) {
|
||||||
|
dbgError(D_LOCAL_POLICY) << "Failed to load policy activations data JSON. Error: " << e.what();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
const vector<SinglePolicyActivationData> &
|
||||||
|
PolicyActivationData::getItems() const
|
||||||
|
{
|
||||||
|
return items;
|
||||||
|
}
|
||||||
@@ -14,7 +14,6 @@ add_subdirectory(details_resolver)
|
|||||||
add_subdirectory(health_check)
|
add_subdirectory(health_check)
|
||||||
add_subdirectory(health_check_manager)
|
add_subdirectory(health_check_manager)
|
||||||
add_subdirectory(updates_process_reporter)
|
add_subdirectory(updates_process_reporter)
|
||||||
add_subdirectory(env_details)
|
|
||||||
add_subdirectory(external_sdk_server)
|
add_subdirectory(external_sdk_server)
|
||||||
|
|
||||||
#add_subdirectory(orchestration_ut)
|
#add_subdirectory(orchestration_ut)
|
||||||
|
|||||||
@@ -29,7 +29,7 @@
|
|||||||
// shell command execution output as its input
|
// shell command execution output as its input
|
||||||
|
|
||||||
#ifdef SHELL_PRE_CMD
|
#ifdef SHELL_PRE_CMD
|
||||||
#if defined(gaia) || defined(smb)
|
#if defined(gaia) || defined(smb) || defined(smb_thx_v3) || defined(smb_sve_v2) || defined(smb_mrv_v1)
|
||||||
SHELL_PRE_CMD("read sdwan data",
|
SHELL_PRE_CMD("read sdwan data",
|
||||||
"(cpsdwan get_data > /tmp/cpsdwan_getdata_orch.json~) "
|
"(cpsdwan get_data > /tmp/cpsdwan_getdata_orch.json~) "
|
||||||
"&& (mv /tmp/cpsdwan_getdata_orch.json~ /tmp/cpsdwan_getdata_orch.json)")
|
"&& (mv /tmp/cpsdwan_getdata_orch.json~ /tmp/cpsdwan_getdata_orch.json)")
|
||||||
@@ -40,7 +40,7 @@ SHELL_PRE_CMD("gunzip local.cfg", "gunzip -c $FWDIR/state/local/FW1/local.cfg.gz
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef SHELL_CMD_HANDLER
|
#ifdef SHELL_CMD_HANDLER
|
||||||
#if defined(gaia) || defined(smb)
|
#if defined(gaia) || defined(smb) || defined(smb_thx_v3) || defined(smb_sve_v2) || defined(smb_mrv_v1)
|
||||||
SHELL_CMD_HANDLER("cpProductIntegrationMgmtObjectType", "cpprod_util CPPROD_IsMgmtMachine", getMgmtObjType)
|
SHELL_CMD_HANDLER("cpProductIntegrationMgmtObjectType", "cpprod_util CPPROD_IsMgmtMachine", getMgmtObjType)
|
||||||
SHELL_CMD_HANDLER(
|
SHELL_CMD_HANDLER(
|
||||||
"cpProductIntegrationMgmtObjectUid",
|
"cpProductIntegrationMgmtObjectUid",
|
||||||
@@ -51,6 +51,14 @@ SHELL_CMD_HANDLER("prerequisitesForHorizonTelemetry",
|
|||||||
"FS_PATH=<FILESYSTEM-PREFIX>; [ -f ${FS_PATH}/cp-nano-horizon-telemetry-prerequisites.log ] "
|
"FS_PATH=<FILESYSTEM-PREFIX>; [ -f ${FS_PATH}/cp-nano-horizon-telemetry-prerequisites.log ] "
|
||||||
"&& head -1 ${FS_PATH}/cp-nano-horizon-telemetry-prerequisites.log || echo ''",
|
"&& head -1 ${FS_PATH}/cp-nano-horizon-telemetry-prerequisites.log || echo ''",
|
||||||
checkIsInstallHorizonTelemetrySucceeded)
|
checkIsInstallHorizonTelemetrySucceeded)
|
||||||
|
SHELL_CMD_HANDLER(
|
||||||
|
"IS_AIOPS_RUNNING",
|
||||||
|
"FS_PATH=<FILESYSTEM-PREFIX>; "
|
||||||
|
"PID=$(ps auxf | grep -v grep | grep -E ${FS_PATH}.*cp-nano-horizon-telemetry | awk -F' ' '{printf $2}'); "
|
||||||
|
"[ -z \"${PID}\" ] && echo 'false' || echo 'true'",
|
||||||
|
getIsAiopsRunning)
|
||||||
|
#endif
|
||||||
|
#if defined(gaia)
|
||||||
SHELL_CMD_HANDLER("GLOBAL_QUID", "[ -d /opt/CPquid ] "
|
SHELL_CMD_HANDLER("GLOBAL_QUID", "[ -d /opt/CPquid ] "
|
||||||
"&& python3 /opt/CPquid/Quid_Api.py -i /opt/CPotelcol/quid_api/get_global_id.json | jq -r .message || echo ''",
|
"&& python3 /opt/CPquid/Quid_Api.py -i /opt/CPotelcol/quid_api/get_global_id.json | jq -r .message || echo ''",
|
||||||
getQUID)
|
getQUID)
|
||||||
@@ -76,12 +84,21 @@ SHELL_CMD_HANDLER("MGMT_QUID", "[ -d /opt/CPquid ] "
|
|||||||
SHELL_CMD_HANDLER("AIOPS_AGENT_ROLE", "[ -d /opt/CPOtlpAgent/custom_scripts ] "
|
SHELL_CMD_HANDLER("AIOPS_AGENT_ROLE", "[ -d /opt/CPOtlpAgent/custom_scripts ] "
|
||||||
"&& ENV_NO_FORMAT=1 /opt/CPOtlpAgent/custom_scripts/agent_role.sh",
|
"&& ENV_NO_FORMAT=1 /opt/CPOtlpAgent/custom_scripts/agent_role.sh",
|
||||||
getOtlpAgentGaiaOsRole)
|
getOtlpAgentGaiaOsRole)
|
||||||
SHELL_CMD_HANDLER(
|
#endif
|
||||||
"IS_AIOPS_RUNNING",
|
#if defined(smb) || defined(smb_thx_v3) || defined(smb_sve_v2) || defined(smb_mrv_v1)
|
||||||
"FS_PATH=<FILESYSTEM-PREFIX>; "
|
SHELL_CMD_HANDLER("GLOBAL_QUID",
|
||||||
"PID=$(ps auxf | grep -v grep | grep -E ${FS_PATH}.*cp-nano-horizon-telemetry | awk -F' ' '{printf $2}'); "
|
"cat $FWDIR/database/myown.C "
|
||||||
"[ -z \"{PID}\" ] && echo 'false' || echo 'true'",
|
"| awk -F'[()]' '/:name/ { found=1; next } found && /:uuid/ { uid=tolower($2); print uid; exit }'",
|
||||||
getIsAiopsRunning)
|
getQUID)
|
||||||
|
SHELL_CMD_HANDLER("QUID",
|
||||||
|
"cat $FWDIR/database/myown.C "
|
||||||
|
"| awk -F'[()]' '/:name/ { found=1; next } found && /:uuid/ { uid=tolower($2); print uid; exit }'",
|
||||||
|
getQUID)
|
||||||
|
SHELL_CMD_HANDLER("SMO_QUID", "echo ''", getQUID)
|
||||||
|
SHELL_CMD_HANDLER("MGMT_QUID", "echo ''", getQUID)
|
||||||
|
SHELL_CMD_HANDLER("AIOPS_AGENT_ROLE", "echo 'SMB'", getOtlpAgentGaiaOsRole)
|
||||||
|
#endif
|
||||||
|
#if defined(gaia) || defined(smb) || defined(smb_thx_v3) || defined(smb_sve_v2) || defined(smb_mrv_v1)
|
||||||
SHELL_CMD_HANDLER("hasSDWan", "[ -f $FWDIR/bin/sdwan_steering ] && echo '1' || echo '0'", checkHasSDWan)
|
SHELL_CMD_HANDLER("hasSDWan", "[ -f $FWDIR/bin/sdwan_steering ] && echo '1' || echo '0'", checkHasSDWan)
|
||||||
SHELL_CMD_HANDLER(
|
SHELL_CMD_HANDLER(
|
||||||
"canUpdateSDWanData",
|
"canUpdateSDWanData",
|
||||||
@@ -194,7 +211,7 @@ SHELL_CMD_HANDLER(
|
|||||||
)
|
)
|
||||||
#endif //gaia
|
#endif //gaia
|
||||||
|
|
||||||
#if defined(smb)
|
#if defined(smb) || defined(smb_thx_v3) || defined(smb_sve_v2) || defined(smb_mrv_v1)
|
||||||
SHELL_CMD_HANDLER(
|
SHELL_CMD_HANDLER(
|
||||||
"cpProductIntegrationMgmtParentObjectName",
|
"cpProductIntegrationMgmtParentObjectName",
|
||||||
"jq -r .cluster_name /tmp/cpsdwan_getdata_orch.json",
|
"jq -r .cluster_name /tmp/cpsdwan_getdata_orch.json",
|
||||||
@@ -252,7 +269,6 @@ SHELL_CMD_HANDLER(
|
|||||||
|
|
||||||
SHELL_CMD_OUTPUT("kernel_version", "uname -r")
|
SHELL_CMD_OUTPUT("kernel_version", "uname -r")
|
||||||
SHELL_CMD_OUTPUT("helloWorld", "cat /tmp/agentHelloWorld 2>/dev/null")
|
SHELL_CMD_OUTPUT("helloWorld", "cat /tmp/agentHelloWorld 2>/dev/null")
|
||||||
SHELL_CMD_OUTPUT("report_timestamp", "date -u +\%s")
|
|
||||||
#endif // SHELL_CMD_OUTPUT
|
#endif // SHELL_CMD_OUTPUT
|
||||||
|
|
||||||
|
|
||||||
@@ -282,7 +298,7 @@ FILE_CONTENT_HANDLER("AppSecModelVersion", "<FILESYSTEM-PREFIX>/conf/waap/waap.d
|
|||||||
#endif // FILE_CONTENT_HANDLER
|
#endif // FILE_CONTENT_HANDLER
|
||||||
|
|
||||||
#ifdef SHELL_POST_CMD
|
#ifdef SHELL_POST_CMD
|
||||||
#if defined(smb)
|
#if defined(smb) || defined(smb_thx_v3) || defined(smb_sve_v2) || defined(smb_mrv_v1)
|
||||||
SHELL_POST_CMD("remove local.cfg", "rm -rf /tmp/local.cfg")
|
SHELL_POST_CMD("remove local.cfg", "rm -rf /tmp/local.cfg")
|
||||||
#endif //smb
|
#endif //smb
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -266,10 +266,10 @@ private:
|
|||||||
case OrchestrationStatusFieldType::COUNT : return "Count";
|
case OrchestrationStatusFieldType::COUNT : return "Count";
|
||||||
}
|
}
|
||||||
|
|
||||||
dbgAssert(false)
|
dbgAssertOpt(false)
|
||||||
<< AlertInfo(AlertTeam::CORE, "orchestration health")
|
<< AlertInfo(AlertTeam::CORE, "orchestration health")
|
||||||
<< "Trying to convert unknown orchestration status field to string.";
|
<< "Trying to convert unknown orchestration status field to string.";
|
||||||
return "";
|
return "Unknown Field";
|
||||||
}
|
}
|
||||||
|
|
||||||
HealthCheckStatus
|
HealthCheckStatus
|
||||||
@@ -282,7 +282,7 @@ private:
|
|||||||
case UpdatesProcessResult::DEGRADED : return HealthCheckStatus::DEGRADED;
|
case UpdatesProcessResult::DEGRADED : return HealthCheckStatus::DEGRADED;
|
||||||
}
|
}
|
||||||
|
|
||||||
dbgAssert(false)
|
dbgAssertOpt(false)
|
||||||
<< AlertInfo(AlertTeam::CORE, "orchestration health")
|
<< AlertInfo(AlertTeam::CORE, "orchestration health")
|
||||||
<< "Trying to convert unknown update process result field to health check status.";
|
<< "Trying to convert unknown update process result field to health check status.";
|
||||||
return HealthCheckStatus::IGNORED;
|
return HealthCheckStatus::IGNORED;
|
||||||
|
|||||||
@@ -429,7 +429,7 @@ public:
|
|||||||
status.insertServiceSetting(service_name, path);
|
status.insertServiceSetting(service_name, path);
|
||||||
return;
|
return;
|
||||||
case OrchestrationStatusConfigType::MANIFEST:
|
case OrchestrationStatusConfigType::MANIFEST:
|
||||||
dbgAssert(false)
|
dbgAssertOpt(false)
|
||||||
<< AlertInfo(AlertTeam::CORE, "sesrvice configuration")
|
<< AlertInfo(AlertTeam::CORE, "sesrvice configuration")
|
||||||
<< "Manifest is not a service configuration file type";
|
<< "Manifest is not a service configuration file type";
|
||||||
break;
|
break;
|
||||||
@@ -438,7 +438,9 @@ public:
|
|||||||
case OrchestrationStatusConfigType::COUNT:
|
case OrchestrationStatusConfigType::COUNT:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
dbgAssert(false) << AlertInfo(AlertTeam::CORE, "sesrvice configuration") << "Unknown configuration file type";
|
dbgAssertOpt(false)
|
||||||
|
<< AlertInfo(AlertTeam::CORE, "service configuration")
|
||||||
|
<< "Unknown configuration file type";
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|||||||
@@ -1587,6 +1587,7 @@ private:
|
|||||||
}
|
}
|
||||||
|
|
||||||
setDelayedUpgradeTime();
|
setDelayedUpgradeTime();
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
Singleton::Consume<I_Environment>::by<OrchestrationComp>()->startNewTrace(false);
|
Singleton::Consume<I_Environment>::by<OrchestrationComp>()->startNewTrace(false);
|
||||||
if (shouldReportAgentDetailsMetadata()) {
|
if (shouldReportAgentDetailsMetadata()) {
|
||||||
@@ -1630,7 +1631,7 @@ private:
|
|||||||
|
|
||||||
string server_name = getAttribute("registered-server", "registered_server");
|
string server_name = getAttribute("registered-server", "registered_server");
|
||||||
auto server = TagAndEnumManagement::convertStringToTag(server_name);
|
auto server = TagAndEnumManagement::convertStringToTag(server_name);
|
||||||
if (server_name == "'SWAG'") server = Tags::WEB_SERVER_SWAG;
|
if (server_name == "'SWAG'" || server_name == "'SWAG Server'") server = Tags::WEB_SERVER_SWAG;
|
||||||
if (server.ok()) tags.insert(*server);
|
if (server.ok()) tags.insert(*server);
|
||||||
|
|
||||||
if (getAttribute("no-setting", "CROWDSEC_ENABLED") == "true") tags.insert(Tags::CROWDSEC);
|
if (getAttribute("no-setting", "CROWDSEC_ENABLED") == "true") tags.insert(Tags::CROWDSEC);
|
||||||
@@ -1695,13 +1696,19 @@ private:
|
|||||||
auto backup_installation_file = current_installation_file + backup_ext;
|
auto backup_installation_file = current_installation_file + backup_ext;
|
||||||
auto temp_ext = getConfigurationWithDefault<string>("_temp", "orchestration", "Temp file extension");
|
auto temp_ext = getConfigurationWithDefault<string>("_temp", "orchestration", "Temp file extension");
|
||||||
|
|
||||||
dbgAssert(i_orchestration_tools->doesFileExist(backup_installation_file))
|
if (!i_orchestration_tools->doesFileExist(backup_installation_file)) {
|
||||||
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
dbgAssertOpt(false)
|
||||||
<< "There is no backup installation package";
|
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
||||||
|
<< "There is no backup installation package";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
dbgAssert(i_orchestration_tools->copyFile(backup_installation_file, current_installation_file))
|
if (!i_orchestration_tools->copyFile(backup_installation_file, current_installation_file)) {
|
||||||
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
dbgAssertOpt(false)
|
||||||
<< "Failed to copy backup installation package";
|
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
||||||
|
<< "Failed to copy backup installation package";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Copy the backup manifest file to the default manifest file path.
|
// Copy the backup manifest file to the default manifest file path.
|
||||||
auto manifest_file_path = getConfigurationWithDefault<string>(
|
auto manifest_file_path = getConfigurationWithDefault<string>(
|
||||||
@@ -1716,12 +1723,18 @@ private:
|
|||||||
|
|
||||||
auto package_handler = Singleton::Consume<I_PackageHandler>::by<OrchestrationComp>();
|
auto package_handler = Singleton::Consume<I_PackageHandler>::by<OrchestrationComp>();
|
||||||
// Install the backup orchestration service installation package.
|
// Install the backup orchestration service installation package.
|
||||||
dbgAssert(package_handler->preInstallPackage(service_name, current_installation_file))
|
if (!package_handler->preInstallPackage(service_name, current_installation_file)) {
|
||||||
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
dbgAssertOpt(false)
|
||||||
<< "Failed to restore from backup, pre install test failed";
|
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
||||||
dbgAssert(package_handler->installPackage(service_name, current_installation_file, true))
|
<< "Failed to restore from backup, pre install test failed";
|
||||||
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
return;
|
||||||
<< "Failed to restore from backup, installation failed";
|
}
|
||||||
|
if (!package_handler->installPackage(service_name, current_installation_file, true)) {
|
||||||
|
dbgAssertOpt(false)
|
||||||
|
<< AlertInfo(AlertTeam::CORE, "orchestration backup")
|
||||||
|
<< "Failed to restore from backup, installation failed";
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// LCOV_EXCL_STOP
|
// LCOV_EXCL_STOP
|
||||||
|
|
||||||
@@ -2055,7 +2068,6 @@ private:
|
|||||||
OrchestrationPolicy policy;
|
OrchestrationPolicy policy;
|
||||||
UpdatesProcessReporter updates_process_reporter_listener;
|
UpdatesProcessReporter updates_process_reporter_listener;
|
||||||
HybridModeMetric hybrid_mode_metric;
|
HybridModeMetric hybrid_mode_metric;
|
||||||
EnvDetails env_details;
|
|
||||||
chrono::minutes upgrade_delay_time;
|
chrono::minutes upgrade_delay_time;
|
||||||
|
|
||||||
string filesystem_prefix = "";
|
string filesystem_prefix = "";
|
||||||
|
|||||||
@@ -386,7 +386,7 @@ OrchestrationTools::Impl::calculateChecksum(Package::ChecksumTypes checksum_type
|
|||||||
return genError("Error while reading file " + path + ", " + e.what());
|
return genError("Error while reading file " + path + ", " + e.what());
|
||||||
}
|
}
|
||||||
|
|
||||||
dbgAssert(false)
|
dbgAssertOpt(false)
|
||||||
<< AlertInfo(AlertTeam::CORE, "service configuration")
|
<< AlertInfo(AlertTeam::CORE, "service configuration")
|
||||||
<< "Checksum type is not supported. Checksum type: "
|
<< "Checksum type is not supported. Checksum type: "
|
||||||
<< static_cast<unsigned int>(checksum_type);
|
<< static_cast<unsigned int>(checksum_type);
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ std::ostream & operator<<(std::ostream &os, const Package &) { return os; }
|
|||||||
#include "health_check_status/health_check_status.h"
|
#include "health_check_status/health_check_status.h"
|
||||||
#include "updates_process_event.h"
|
#include "updates_process_event.h"
|
||||||
#include "declarative_policy_utils.h"
|
#include "declarative_policy_utils.h"
|
||||||
|
#include "mock/mock_env_details.h"
|
||||||
|
|
||||||
using namespace testing;
|
using namespace testing;
|
||||||
using namespace std;
|
using namespace std;
|
||||||
@@ -324,6 +325,7 @@ public:
|
|||||||
StrictMock<MockOrchestrationTools> mock_orchestration_tools;
|
StrictMock<MockOrchestrationTools> mock_orchestration_tools;
|
||||||
StrictMock<MockDownloader> mock_downloader;
|
StrictMock<MockDownloader> mock_downloader;
|
||||||
StrictMock<MockShellCmd> mock_shell_cmd;
|
StrictMock<MockShellCmd> mock_shell_cmd;
|
||||||
|
StrictMock<EnvDetailsMocker> mock_env_details;
|
||||||
StrictMock<MockMessaging> mock_message;
|
StrictMock<MockMessaging> mock_message;
|
||||||
StrictMock<MockRestApi> rest;
|
StrictMock<MockRestApi> rest;
|
||||||
StrictMock<MockServiceController> mock_service_controller;
|
StrictMock<MockServiceController> mock_service_controller;
|
||||||
@@ -583,6 +585,8 @@ TEST_F(OrchestrationTest, check_sending_registration_data)
|
|||||||
env.init();
|
env.init();
|
||||||
init();
|
init();
|
||||||
|
|
||||||
|
EXPECT_CALL(mock_env_details, getEnvType()).WillRepeatedly(Return(EnvType::LINUX));
|
||||||
|
|
||||||
EXPECT_CALL(mock_service_controller, updateServiceConfiguration(_, _, _, _, _, _))
|
EXPECT_CALL(mock_service_controller, updateServiceConfiguration(_, _, _, _, _, _))
|
||||||
.WillOnce(Return(Maybe<void>()));
|
.WillOnce(Return(Maybe<void>()));
|
||||||
EXPECT_CALL(mock_orchestration_tools, calculateChecksum(_, _)).WillRepeatedly(Return(string()));
|
EXPECT_CALL(mock_orchestration_tools, calculateChecksum(_, _)).WillRepeatedly(Return(string()));
|
||||||
|
|||||||
@@ -141,11 +141,11 @@ packageHandlerActionsToString(PackageHandlerActions action)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dbgAssert(false)
|
dbgAssertOpt(false)
|
||||||
<< AlertInfo(AlertTeam::CORE, "service configuration")
|
<< AlertInfo(AlertTeam::CORE, "service configuration")
|
||||||
<< "Package handler action is not supported. Action: "
|
<< "Package handler action is not supported. Action: "
|
||||||
<< static_cast<unsigned int>(action);
|
<< static_cast<unsigned int>(action);
|
||||||
return string();
|
return string("--UNSUPPORTED");
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|||||||
@@ -467,9 +467,9 @@ getDeplymentType()
|
|||||||
case EnvType::COUNT: break;
|
case EnvType::COUNT: break;
|
||||||
}
|
}
|
||||||
|
|
||||||
dbgAssert(false)
|
dbgAssertOpt(false)
|
||||||
<< AlertInfo(AlertTeam::CORE, "fog communication")
|
<< AlertInfo(AlertTeam::CORE, "fog communication")
|
||||||
<< "Failed to get a legitimate deplyment type: "
|
<< "Failed to get a legitimate deployment type: "
|
||||||
<< static_cast<uint>(deplyment_type);
|
<< static_cast<uint>(deplyment_type);
|
||||||
return "Embedded";
|
return "Embedded";
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -246,6 +246,27 @@ public:
|
|||||||
return matched_rule;
|
return matched_rule;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
fetchReplicaCount()
|
||||||
|
{
|
||||||
|
string curl_cmd =
|
||||||
|
"curl -H \"Authorization: Bearer " + kubernetes_token + "\" "
|
||||||
|
"https://kubernetes.default.svc.cluster.local/apis/apps/v1/namespaces/" + kubernetes_namespace +
|
||||||
|
"/deployments/${AGENT_DEPLOYMENT_NAME} -k -s | jq .status.replicas";
|
||||||
|
auto maybe_replicas = i_shell_cmd->getExecOutput(curl_cmd);
|
||||||
|
if (maybe_replicas.ok()) {
|
||||||
|
try {
|
||||||
|
replicas = std::stoi(maybe_replicas.unpack());
|
||||||
|
} catch (const std::exception &e) {
|
||||||
|
dbgWarning(D_RATE_LIMIT) << "error while converting replicas: " << e.what();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (replicas == 0) {
|
||||||
|
dbgWarning(D_RATE_LIMIT) << "replicas is set to 0, setting replicas to 1";
|
||||||
|
replicas = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
EventVerdict
|
EventVerdict
|
||||||
respond(const HttpRequestHeaderEvent &event) override
|
respond(const HttpRequestHeaderEvent &event) override
|
||||||
{
|
{
|
||||||
@@ -271,10 +292,72 @@ public:
|
|||||||
dbgDebug(D_RATE_LIMIT) << "source identifier value: " << source_identifier;
|
dbgDebug(D_RATE_LIMIT) << "source identifier value: " << source_identifier;
|
||||||
|
|
||||||
auto maybe_source_ip = env->get<IPAddr>(HttpTransactionData::client_ip_ctx);
|
auto maybe_source_ip = env->get<IPAddr>(HttpTransactionData::client_ip_ctx);
|
||||||
|
set<string> ip_set;
|
||||||
string source_ip = "";
|
string source_ip = "";
|
||||||
if (maybe_source_ip.ok()) source_ip = ipAddrToStr(maybe_source_ip.unpack());
|
if (maybe_source_ip.ok()) {
|
||||||
|
source_ip = ipAddrToStr(maybe_source_ip.unpack());
|
||||||
|
|
||||||
unordered_map<string, set<string>> condition_map = createConditionMap(uri, source_ip, source_identifier);
|
if (getProfileAgentSettingWithDefault<bool>(false, "agent.rateLimit.ignoreSourceIP")) {
|
||||||
|
dbgDebug(D_RATE_LIMIT) << "Rate limit ignoring source ip: " << source_ip;
|
||||||
|
} else {
|
||||||
|
ip_set.insert(source_ip);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
auto maybe_xff = env->get<string>(HttpTransactionData::xff_vals_ctx);
|
||||||
|
if (!maybe_xff.ok()) {
|
||||||
|
dbgTrace(D_RATE_LIMIT) << "Rate limit failed to get xff vals from env";
|
||||||
|
} else {
|
||||||
|
auto ips = split(maybe_xff.unpack(), ',');
|
||||||
|
ip_set.insert(ips.begin(), ips.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
EnumArray<I_GeoLocation::GeoLocationField, string> geo_location_data;
|
||||||
|
set<string> country_codes;
|
||||||
|
set<string> country_names;
|
||||||
|
for (const string& source : ip_set) {
|
||||||
|
Maybe<IPAddr> maybe_source_ip = IPAddr::createIPAddr(source);
|
||||||
|
if (!maybe_source_ip.ok()){
|
||||||
|
dbgWarning(D_RATE_LIMIT)
|
||||||
|
<< "Rate limit failed to create ip address from source: "
|
||||||
|
<< source
|
||||||
|
<< ", Error: "
|
||||||
|
<< maybe_source_ip.getErr();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
auto asset_location =
|
||||||
|
Singleton::Consume<I_GeoLocation>::by<RateLimit>()->lookupLocation(maybe_source_ip.unpack());
|
||||||
|
if (!asset_location.ok()) {
|
||||||
|
dbgWarning(D_RATE_LIMIT)
|
||||||
|
<< "Rate limit lookup location failed for source: "
|
||||||
|
<< source_ip
|
||||||
|
<< ", Error: "
|
||||||
|
<< asset_location.getErr();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
geo_location_data = asset_location.unpack();
|
||||||
|
auto code = geo_location_data[I_GeoLocation::GeoLocationField::COUNTRY_CODE];
|
||||||
|
auto name = geo_location_data[I_GeoLocation::GeoLocationField::COUNTRY_NAME];
|
||||||
|
country_codes.insert(code);
|
||||||
|
country_names.insert(name);
|
||||||
|
dbgTrace(D_RATE_LIMIT)
|
||||||
|
<< "Rate limit found "
|
||||||
|
<< "country code: "
|
||||||
|
<< code
|
||||||
|
<< ", country name: "
|
||||||
|
<< name
|
||||||
|
<< ", source ip address: "
|
||||||
|
<< source;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
unordered_map<string, set<string>> condition_map = createConditionMap(
|
||||||
|
uri,
|
||||||
|
source_ip,
|
||||||
|
source_identifier,
|
||||||
|
country_codes,
|
||||||
|
country_names
|
||||||
|
);
|
||||||
if (shouldApplyException(condition_map)) {
|
if (shouldApplyException(condition_map)) {
|
||||||
dbgDebug(D_RATE_LIMIT) << "found accept exception, not enforcing rate limit on this URI: " << uri;
|
dbgDebug(D_RATE_LIMIT) << "found accept exception, not enforcing rate limit on this URI: " << uri;
|
||||||
return ACCEPT;
|
return ACCEPT;
|
||||||
@@ -293,11 +376,6 @@ public:
|
|||||||
return ACCEPT;
|
return ACCEPT;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto replicas = getenv("REPLICA_COUNT") ? std::stoi(getenv("REPLICA_COUNT")) : 1;
|
|
||||||
if (replicas == 0) {
|
|
||||||
dbgWarning(D_RATE_LIMIT) << "REPLICA_COUNT environment variable is set to 0, setting REPLICA_COUNT to 1";
|
|
||||||
replicas = 1;
|
|
||||||
}
|
|
||||||
burst = static_cast<float>(rule.getRateLimit()) / replicas;
|
burst = static_cast<float>(rule.getRateLimit()) / replicas;
|
||||||
limit = static_cast<float>(calcRuleLimit(rule)) / replicas;
|
limit = static_cast<float>(calcRuleLimit(rule)) / replicas;
|
||||||
|
|
||||||
@@ -476,10 +554,18 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
unordered_map<string, set<string>>
|
unordered_map<string, set<string>>
|
||||||
createConditionMap(const string &uri, const string &source_ip, const string &source_identifier)
|
createConditionMap(
|
||||||
|
const string &uri,
|
||||||
|
const string &source_ip,
|
||||||
|
const string &source_identifier,
|
||||||
|
const set<string> &country_codes,
|
||||||
|
const set<string> &country_names
|
||||||
|
)
|
||||||
{
|
{
|
||||||
unordered_map<string, set<string>> condition_map;
|
unordered_map<string, set<string>> condition_map;
|
||||||
if (!source_ip.empty()) condition_map["sourceIP"].insert(source_ip);
|
if (!source_ip.empty()) condition_map["sourceIP"].insert(source_ip);
|
||||||
|
if (!country_codes.empty()) condition_map["countryCode"].insert(country_codes.begin(), country_codes.end());
|
||||||
|
if (!country_names.empty()) condition_map["countryName"].insert(country_names.begin(), country_names.end());
|
||||||
condition_map["sourceIdentifier"].insert(source_identifier);
|
condition_map["sourceIdentifier"].insert(source_identifier);
|
||||||
condition_map["url"].insert(uri);
|
condition_map["url"].insert(uri);
|
||||||
|
|
||||||
@@ -616,6 +702,21 @@ public:
|
|||||||
"Initialize rate limit component",
|
"Initialize rate limit component",
|
||||||
false
|
false
|
||||||
);
|
);
|
||||||
|
|
||||||
|
i_shell_cmd = Singleton::Consume<I_ShellCmd>::by<RateLimit>();
|
||||||
|
i_env_details = Singleton::Consume<I_EnvDetails>::by<RateLimit>();
|
||||||
|
env_type = i_env_details->getEnvType();
|
||||||
|
if (env_type == EnvType::K8S) {
|
||||||
|
kubernetes_token = i_env_details->getToken();
|
||||||
|
kubernetes_namespace = i_env_details->getNameSpace();
|
||||||
|
fetchReplicaCount();
|
||||||
|
Singleton::Consume<I_MainLoop>::by<RateLimit>()->addRecurringRoutine(
|
||||||
|
I_MainLoop::RoutineType::Offline,
|
||||||
|
chrono::seconds(120),
|
||||||
|
[this]() { fetchReplicaCount(); },
|
||||||
|
"Fetch current replica count from the Kubernetes cluster"
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -624,6 +725,9 @@ public:
|
|||||||
disconnectRedis();
|
disconnectRedis();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
I_ShellCmd *i_shell_cmd = nullptr;
|
||||||
|
I_EnvDetails* i_env_details = nullptr;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static constexpr auto DROP = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP;
|
static constexpr auto DROP = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP;
|
||||||
static constexpr auto ACCEPT = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT;
|
static constexpr auto ACCEPT = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT;
|
||||||
@@ -634,6 +738,10 @@ private:
|
|||||||
int burst;
|
int burst;
|
||||||
float limit;
|
float limit;
|
||||||
redisContext* redis = nullptr;
|
redisContext* redis = nullptr;
|
||||||
|
int replicas = 1;
|
||||||
|
EnvType env_type;
|
||||||
|
string kubernetes_namespace = "";
|
||||||
|
string kubernetes_token = "";
|
||||||
};
|
};
|
||||||
|
|
||||||
RateLimit::RateLimit() : Component("RateLimit"), pimpl(make_unique<Impl>()) {}
|
RateLimit::RateLimit() : Component("RateLimit"), pimpl(make_unique<Impl>()) {}
|
||||||
|
|||||||
@@ -194,6 +194,10 @@ void SerializeToFileBase::saveData()
|
|||||||
dbgWarning(D_WAAP_CONFIDENCE_CALCULATOR) << "Failed to gzip data";
|
dbgWarning(D_WAAP_CONFIDENCE_CALCULATOR) << "Failed to gzip data";
|
||||||
} else {
|
} else {
|
||||||
ss.str(string((const char *)res.output, res.num_output_bytes));
|
ss.str(string((const char *)res.output, res.num_output_bytes));
|
||||||
|
// free the memory allocated by compressData
|
||||||
|
if (res.output) free(res.output);
|
||||||
|
res.output = nullptr;
|
||||||
|
res.num_output_bytes = 0;
|
||||||
}
|
}
|
||||||
if (res.output) free(res.output);
|
if (res.output) free(res.output);
|
||||||
res.output = nullptr;
|
res.output = nullptr;
|
||||||
|
|||||||
@@ -112,20 +112,6 @@ double Waap::Scanner::getScoreData(Waf2ScanResult& res, const std::string &poolN
|
|||||||
}
|
}
|
||||||
double res_score = getScoreFromPool(res, newKeywords, poolName);
|
double res_score = getScoreFromPool(res, newKeywords, poolName);
|
||||||
|
|
||||||
std::string other_pool_name = Waap::Scores::getOtherScorePoolName();
|
|
||||||
Waap::Scores::ModelLoggingSettings modelLoggingSettings = Waap::Scores::getModelLoggingSettings();
|
|
||||||
|
|
||||||
if (applyLearning && poolName != other_pool_name &&
|
|
||||||
modelLoggingSettings.logLevel != Waap::Scores::ModelLogLevel::OFF) {
|
|
||||||
double other_score = getScoreFromPool(res, newKeywords, other_pool_name);
|
|
||||||
dbgDebug(D_WAAP_SCANNER) << "Comparing score from pool " << poolName << ": " << res_score
|
|
||||||
<< ", vs. pool " << other_pool_name << ": " << other_score
|
|
||||||
<< ", score difference: " << res_score - other_score
|
|
||||||
<< ", sample: " << res.unescaped_line;
|
|
||||||
res.other_model_score = other_score;
|
|
||||||
} else {
|
|
||||||
res.other_model_score = res_score;
|
|
||||||
}
|
|
||||||
return res_score;
|
return res_score;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,3 +5,5 @@ add_subdirectory(ip_utilities)
|
|||||||
add_subdirectory(keywords)
|
add_subdirectory(keywords)
|
||||||
add_subdirectory(pm)
|
add_subdirectory(pm)
|
||||||
add_subdirectory(service_health_status)
|
add_subdirectory(service_health_status)
|
||||||
|
add_subdirectory(nginx_utils)
|
||||||
|
add_subdirectory(utilities)
|
||||||
|
|||||||
1
components/utils/nginx_utils/CMakeLists.txt
Executable file
1
components/utils/nginx_utils/CMakeLists.txt
Executable file
@@ -0,0 +1 @@
|
|||||||
|
add_library(nginx_utils nginx_utils.cc)
|
||||||
281
components/utils/nginx_utils/nginx_utils.cc
Executable file
281
components/utils/nginx_utils/nginx_utils.cc
Executable file
@@ -0,0 +1,281 @@
|
|||||||
|
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||||
|
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "nginx_utils.h"
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
#include <fstream>
|
||||||
|
#include <sstream>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
#include <dirent.h>
|
||||||
|
#include <boost/regex.hpp>
|
||||||
|
|
||||||
|
#include "debug.h"
|
||||||
|
#include "maybe_res.h"
|
||||||
|
#include "config.h"
|
||||||
|
#include "agent_core_utilities.h"
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
|
||||||
|
USE_DEBUG_FLAG(D_NGINX_MANAGER);
|
||||||
|
|
||||||
|
NginxConfCollector::NginxConfCollector(const string &input_path, const string &output_path)
|
||||||
|
:
|
||||||
|
main_conf_input_path(input_path),
|
||||||
|
main_conf_output_path(output_path)
|
||||||
|
{
|
||||||
|
main_conf_directory_path = main_conf_input_path.substr(0, main_conf_input_path.find_last_of('/'));
|
||||||
|
}
|
||||||
|
|
||||||
|
vector<string>
|
||||||
|
NginxConfCollector::expandIncludes(const string &include_pattern) const {
|
||||||
|
vector<string> matching_files;
|
||||||
|
string absolute_include_pattern = include_pattern;
|
||||||
|
string maybe_directory = include_pattern.substr(0, include_pattern.find_last_of('/'));
|
||||||
|
if (!maybe_directory.empty() && maybe_directory.front() != '/') {
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "Include pattern is a relative path: " << include_pattern;
|
||||||
|
maybe_directory = main_conf_directory_path + '/' + maybe_directory;
|
||||||
|
absolute_include_pattern = main_conf_directory_path + '/' + include_pattern;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!NGEN::Filesystem::exists(maybe_directory)) {
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "Include pattern directory/file does not exist: " << maybe_directory;
|
||||||
|
return matching_files;
|
||||||
|
}
|
||||||
|
|
||||||
|
string filename_pattern = absolute_include_pattern.substr(absolute_include_pattern.find_last_of('/') + 1);
|
||||||
|
boost::regex wildcard_regex("\\*");
|
||||||
|
boost::regex pattern(
|
||||||
|
NGEN::Regex::regexReplace(__FILE__, __LINE__, filename_pattern, wildcard_regex, string("[^/]*"))
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!NGEN::Filesystem::isDirectory(maybe_directory)) {
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "Include pattern is a file: " << absolute_include_pattern;
|
||||||
|
matching_files.push_back(absolute_include_pattern);
|
||||||
|
return matching_files;
|
||||||
|
}
|
||||||
|
|
||||||
|
DIR* dir = opendir(maybe_directory.c_str());
|
||||||
|
if (!dir) {
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "Could not open directory: " << maybe_directory;
|
||||||
|
return matching_files;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct dirent *entry;
|
||||||
|
while ((entry = readdir(dir)) != nullptr) {
|
||||||
|
if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) continue;
|
||||||
|
|
||||||
|
if (NGEN::Regex::regexMatch(__FILE__, __LINE__, entry->d_name, pattern)) {
|
||||||
|
matching_files.push_back(maybe_directory + "/" + entry->d_name);
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "Matched file: " << maybe_directory << '/' << entry->d_name;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
closedir(dir);
|
||||||
|
|
||||||
|
return matching_files;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
NginxConfCollector::processConfigFile(const string &path, ostringstream &conf_output, vector<string> &errors) const
|
||||||
|
{
|
||||||
|
ifstream file(path);
|
||||||
|
if (!file.is_open()) return;
|
||||||
|
|
||||||
|
string content((istreambuf_iterator<char>(file)), istreambuf_iterator<char>());
|
||||||
|
file.close();
|
||||||
|
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "Processing file: " << path;
|
||||||
|
|
||||||
|
if (content.empty()) return;
|
||||||
|
|
||||||
|
try {
|
||||||
|
boost::regex include_regex(R"(^\s*include\s+([^;]+);)");
|
||||||
|
boost::smatch match;
|
||||||
|
|
||||||
|
while (NGEN::Regex::regexSearch(__FILE__, __LINE__, content, match, include_regex)) {
|
||||||
|
string include_pattern = match[1].str();
|
||||||
|
include_pattern = NGEN::Strings::trim(include_pattern);
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "Include pattern: " << include_pattern;
|
||||||
|
|
||||||
|
vector<string> included_files = expandIncludes(include_pattern);
|
||||||
|
if (included_files.empty()) {
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "No files matched the include pattern: " << include_pattern;
|
||||||
|
content.replace(match.position(), match.length(), "");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
ostringstream included_content;
|
||||||
|
for (const string &included_file : included_files) {
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "Processing included file: " << included_file;
|
||||||
|
processConfigFile(included_file, included_content, errors);
|
||||||
|
}
|
||||||
|
content.replace(match.position(), match.length(), included_content.str());
|
||||||
|
}
|
||||||
|
} catch (const boost::regex_error &e) {
|
||||||
|
errors.emplace_back(e.what());
|
||||||
|
return;
|
||||||
|
} catch (const exception &e) {
|
||||||
|
errors.emplace_back(e.what());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
conf_output << content;
|
||||||
|
}
|
||||||
|
|
||||||
|
Maybe<string>
|
||||||
|
NginxConfCollector::generateFullNginxConf() const
|
||||||
|
{
|
||||||
|
if (!NGEN::Filesystem::exists(main_conf_input_path)) {
|
||||||
|
return genError("Input file does not exist: " + main_conf_input_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
ostringstream conf_output;
|
||||||
|
vector<string> errors;
|
||||||
|
processConfigFile(main_conf_input_path, conf_output, errors);
|
||||||
|
|
||||||
|
if (!errors.empty()) {
|
||||||
|
for (const string &error : errors) dbgWarning(D_NGINX_MANAGER) << error;
|
||||||
|
return genError("Errors occurred while processing configuration files");
|
||||||
|
}
|
||||||
|
|
||||||
|
ofstream single_nginx_conf_file(main_conf_output_path);
|
||||||
|
if (!single_nginx_conf_file.is_open()) return genError("Could not create output file: " + main_conf_output_path);
|
||||||
|
|
||||||
|
single_nginx_conf_file << conf_output.str();
|
||||||
|
single_nginx_conf_file.close();
|
||||||
|
|
||||||
|
return NGEN::Filesystem::resolveFullPath(main_conf_output_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
string
|
||||||
|
NginxUtils::getMainNginxConfPath()
|
||||||
|
{
|
||||||
|
static string main_nginx_conf_path;
|
||||||
|
if (!main_nginx_conf_path.empty()) return main_nginx_conf_path;
|
||||||
|
|
||||||
|
auto main_nginx_conf_path_setting = getProfileAgentSetting<string>("centralNginxManagement.mainConfPath");
|
||||||
|
if (main_nginx_conf_path_setting.ok()) {
|
||||||
|
main_nginx_conf_path = main_nginx_conf_path_setting.unpack();
|
||||||
|
return main_nginx_conf_path;
|
||||||
|
}
|
||||||
|
|
||||||
|
string default_main_nginx_conf_path = "/etc/nginx/nginx.conf";
|
||||||
|
string command = "nginx -V 2>&1";
|
||||||
|
auto result = Singleton::Consume<I_ShellCmd>::by<NginxUtils>()->getExecOutputAndCode(command);
|
||||||
|
if (!result.ok()) return default_main_nginx_conf_path;
|
||||||
|
|
||||||
|
string output = result.unpack().first;
|
||||||
|
boost::regex conf_regex(R"(--conf-path=([^ ]+))");
|
||||||
|
boost::smatch match;
|
||||||
|
if (!NGEN::Regex::regexSearch(__FILE__, __LINE__, output, match, conf_regex)) {
|
||||||
|
main_nginx_conf_path = default_main_nginx_conf_path;
|
||||||
|
return main_nginx_conf_path;
|
||||||
|
}
|
||||||
|
|
||||||
|
string conf_path = match[1].str();
|
||||||
|
conf_path = NGEN::Strings::trim(conf_path);
|
||||||
|
if (conf_path.empty()) {
|
||||||
|
main_nginx_conf_path = default_main_nginx_conf_path;
|
||||||
|
return main_nginx_conf_path;
|
||||||
|
}
|
||||||
|
|
||||||
|
main_nginx_conf_path = conf_path;
|
||||||
|
return main_nginx_conf_path;
|
||||||
|
}
|
||||||
|
|
||||||
|
string
|
||||||
|
NginxUtils::getModulesPath()
|
||||||
|
{
|
||||||
|
static string main_modules_path;
|
||||||
|
if (!main_modules_path.empty()) return main_modules_path;
|
||||||
|
|
||||||
|
auto modules_path_setting = getProfileAgentSetting<string>("centralNginxManagement.modulesPath");
|
||||||
|
if (modules_path_setting.ok()) {
|
||||||
|
main_modules_path = modules_path_setting.unpack();
|
||||||
|
return main_modules_path;
|
||||||
|
}
|
||||||
|
|
||||||
|
string default_modules_path = "/usr/share/nginx/modules";
|
||||||
|
string command = "nginx -V 2>&1";
|
||||||
|
auto result = Singleton::Consume<I_ShellCmd>::by<NginxUtils>()->getExecOutputAndCode(command);
|
||||||
|
if (!result.ok()) return default_modules_path;
|
||||||
|
|
||||||
|
string output = result.unpack().first;
|
||||||
|
boost::regex modules_regex(R"(--modules-path=([^ ]+))");
|
||||||
|
boost::smatch match;
|
||||||
|
if (!NGEN::Regex::regexSearch(__FILE__, __LINE__, output, match, modules_regex)) {
|
||||||
|
main_modules_path = default_modules_path;
|
||||||
|
return main_modules_path;
|
||||||
|
}
|
||||||
|
|
||||||
|
string modules_path = match[1].str();
|
||||||
|
modules_path = NGEN::Strings::trim(modules_path);
|
||||||
|
if (modules_path.empty()) {
|
||||||
|
main_modules_path = default_modules_path;
|
||||||
|
return main_modules_path;
|
||||||
|
}
|
||||||
|
|
||||||
|
main_modules_path = modules_path;
|
||||||
|
return modules_path;
|
||||||
|
}
|
||||||
|
|
||||||
|
Maybe<void>
|
||||||
|
NginxUtils::validateNginxConf(const string &nginx_conf_path)
|
||||||
|
{
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "Validating NGINX configuration file: " << nginx_conf_path;
|
||||||
|
if (!NGEN::Filesystem::exists(nginx_conf_path)) return genError("Nginx configuration file does not exist");
|
||||||
|
|
||||||
|
string command = "nginx -t -c " + nginx_conf_path + " 2>&1";
|
||||||
|
auto result = Singleton::Consume<I_ShellCmd>::by<NginxUtils>()->getExecOutputAndCode(command);
|
||||||
|
if (!result.ok()) return genError(result.getErr());
|
||||||
|
if (result.unpack().second != 0) return genError(result.unpack().first);
|
||||||
|
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "NGINX configuration file is valid";
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
Maybe<void>
|
||||||
|
NginxUtils::reloadNginx(const string &nginx_conf_path)
|
||||||
|
{
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "Applying and reloading new NGINX configuration file: " << nginx_conf_path;
|
||||||
|
string main_nginx_conf_path = getMainNginxConfPath();
|
||||||
|
|
||||||
|
string backup_conf_path = main_nginx_conf_path + ".bak";
|
||||||
|
if (
|
||||||
|
NGEN::Filesystem::exists(main_nginx_conf_path)
|
||||||
|
&& !NGEN::Filesystem::copyFile(main_nginx_conf_path, backup_conf_path, true)
|
||||||
|
) {
|
||||||
|
return genError("Could not create backup of NGINX configuration file");
|
||||||
|
}
|
||||||
|
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "Copying new NGINX configuration file to: " << main_nginx_conf_path;
|
||||||
|
if (!NGEN::Filesystem::copyFile(nginx_conf_path, main_nginx_conf_path, true)) {
|
||||||
|
return genError("Could not copy new NGINX configuration file");
|
||||||
|
}
|
||||||
|
|
||||||
|
string command = "nginx -s reload 2>&1";
|
||||||
|
auto result = Singleton::Consume<I_ShellCmd>::by<NginxUtils>()->getExecOutputAndCode(command);
|
||||||
|
if (!result.ok() || result.unpack().second != 0) {
|
||||||
|
if (!NGEN::Filesystem::copyFile(backup_conf_path, main_nginx_conf_path, true)) {
|
||||||
|
return genError("Could not restore backup of NGINX configuration file");
|
||||||
|
}
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "Successfully restored backup of NGINX configuration file";
|
||||||
|
return result.ok() ? genError(result.unpack().first) : genError(result.getErr());
|
||||||
|
}
|
||||||
|
|
||||||
|
dbgInfo(D_NGINX_MANAGER) << "Successfully reloaded NGINX configuration file";
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
@@ -46,7 +46,7 @@ panicCFmt(const string &func, uint line, const char *fmt, ...)
|
|||||||
{
|
{
|
||||||
va_list va;
|
va_list va;
|
||||||
va_start(va, fmt);
|
va_start(va, fmt);
|
||||||
Debug("PM", func, line).getStreamAggr() << CFmtPrinter(fmt, va);
|
Debug("PM", func, line, true).getStreamAggr() << CFmtPrinter(fmt, va);
|
||||||
va_end(va);
|
va_end(va);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
1
components/utils/utilities/CMakeLists.txt
Executable file
1
components/utils/utilities/CMakeLists.txt
Executable file
@@ -0,0 +1 @@
|
|||||||
|
add_subdirectory(nginx_conf_collector)
|
||||||
37
components/utils/utilities/nginx_conf_collector/CMakeLists.txt
Executable file
37
components/utils/utilities/nginx_conf_collector/CMakeLists.txt
Executable file
@@ -0,0 +1,37 @@
|
|||||||
|
include_directories(${PROJECT_SOURCE_DIR}/core/include/)
|
||||||
|
|
||||||
|
link_directories(${Boost_LIBRARY_DIRS})
|
||||||
|
link_directories(${ZLIB_ROOT}/lib)
|
||||||
|
|
||||||
|
link_directories(${ZLIB_ROOT}/lib)
|
||||||
|
link_directories(${CMAKE_BINARY_DIR}/core)
|
||||||
|
link_directories(${CMAKE_BINARY_DIR}/core/compression)
|
||||||
|
|
||||||
|
SET(EXECUTABLE_NAME "nginx_conf_collector_bin")
|
||||||
|
add_executable(${EXECUTABLE_NAME} nginx_conf_collector.cc)
|
||||||
|
target_compile_definitions(${EXECUTABLE_NAME} PRIVATE "NGINX_CONF_COLLECTOR_VERSION=\"$ENV{CI_PIPELINE_ID}\"")
|
||||||
|
|
||||||
|
target_link_libraries(${EXECUTABLE_NAME}
|
||||||
|
shell_cmd
|
||||||
|
mainloop
|
||||||
|
messaging
|
||||||
|
event_is
|
||||||
|
metric
|
||||||
|
compression_utils
|
||||||
|
z
|
||||||
|
nginx_utils
|
||||||
|
time_proxy
|
||||||
|
debug_is
|
||||||
|
version
|
||||||
|
report
|
||||||
|
config
|
||||||
|
environment
|
||||||
|
singleton
|
||||||
|
rest
|
||||||
|
boost_context
|
||||||
|
boost_regex
|
||||||
|
pthread
|
||||||
|
)
|
||||||
|
|
||||||
|
install(TARGETS ${EXECUTABLE_NAME} DESTINATION bin)
|
||||||
|
install(PROGRAMS ${EXECUTABLE_NAME} DESTINATION central_nginx_manager/bin RENAME cp-nano-nginx-conf-collector)
|
||||||
148
components/utils/utilities/nginx_conf_collector/nginx_conf_collector.cc
Executable file
148
components/utils/utilities/nginx_conf_collector/nginx_conf_collector.cc
Executable file
@@ -0,0 +1,148 @@
|
|||||||
|
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||||
|
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
#include <unistd.h>
|
||||||
|
|
||||||
|
#include "agent_core_utilities.h"
|
||||||
|
#include "debug.h"
|
||||||
|
#include "internal/shell_cmd.h"
|
||||||
|
#include "mainloop.h"
|
||||||
|
#include "nginx_utils.h"
|
||||||
|
#include "time_proxy.h"
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
|
||||||
|
USE_DEBUG_FLAG(D_NGINX_MANAGER);
|
||||||
|
|
||||||
|
class MainComponent
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
MainComponent()
|
||||||
|
{
|
||||||
|
time_proxy.init();
|
||||||
|
environment.init();
|
||||||
|
mainloop.init();
|
||||||
|
shell_cmd.init();
|
||||||
|
}
|
||||||
|
|
||||||
|
~MainComponent()
|
||||||
|
{
|
||||||
|
shell_cmd.fini();
|
||||||
|
mainloop.fini();
|
||||||
|
environment.fini();
|
||||||
|
time_proxy.fini();
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
ShellCmd shell_cmd;
|
||||||
|
MainloopComponent mainloop;
|
||||||
|
Environment environment;
|
||||||
|
TimeProxyComponent time_proxy;
|
||||||
|
};
|
||||||
|
|
||||||
|
void
|
||||||
|
printVersion()
|
||||||
|
{
|
||||||
|
#ifdef NGINX_CONF_COLLECTOR_VERSION
|
||||||
|
cout << "Check Point NGINX configuration collector version: " << NGINX_CONF_COLLECTOR_VERSION << '\n';
|
||||||
|
#else
|
||||||
|
cout << "Check Point NGINX configuration collector version: Private" << '\n';
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
printUsage(const char *prog_name)
|
||||||
|
{
|
||||||
|
cout << "Usage: " << prog_name << " [-v] [-i /path/to/nginx.conf] [-o /path/to/output.conf]" << '\n';
|
||||||
|
cout << " -V Print version" << '\n';
|
||||||
|
cout << " -v Enable verbose output" << '\n';
|
||||||
|
cout << " -i input_file Specify input file (default is /etc/nginx/nginx.conf)" << '\n';
|
||||||
|
cout << " -o output_file Specify output file (default is ./full_nginx.conf)" << '\n';
|
||||||
|
cout << " -h Print this help message" << '\n';
|
||||||
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
main(int argc, char *argv[])
|
||||||
|
{
|
||||||
|
string nginx_input_file = "/etc/nginx/nginx.conf";
|
||||||
|
string nginx_output_file = "full_nginx.conf";
|
||||||
|
|
||||||
|
int opt;
|
||||||
|
while ((opt = getopt(argc, argv, "Vvhi:o:h")) != -1) {
|
||||||
|
switch (opt) {
|
||||||
|
case 'V':
|
||||||
|
printVersion();
|
||||||
|
return 0;
|
||||||
|
case 'v':
|
||||||
|
Debug::setUnitTestFlag(D_NGINX_MANAGER, Debug::DebugLevel::TRACE);
|
||||||
|
break;
|
||||||
|
case 'i':
|
||||||
|
nginx_input_file = optarg;
|
||||||
|
break;
|
||||||
|
case 'o':
|
||||||
|
nginx_output_file = optarg;
|
||||||
|
break;
|
||||||
|
case 'h':
|
||||||
|
printUsage(argv[0]);
|
||||||
|
return 0;
|
||||||
|
default:
|
||||||
|
printUsage(argv[0]);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = optind; i < argc;) {
|
||||||
|
cerr << "Unknown argument: " << argv[i] << '\n';
|
||||||
|
printUsage(argv[0]);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
dbgTrace(D_NGINX_MANAGER) << "Starting nginx configuration collector";
|
||||||
|
|
||||||
|
MainComponent main_component;
|
||||||
|
auto validation_result = NginxUtils::validateNginxConf(nginx_input_file);
|
||||||
|
if (!validation_result.ok()) {
|
||||||
|
cerr
|
||||||
|
<< "Could not validate nginx configuration file: "
|
||||||
|
<< nginx_input_file
|
||||||
|
<< '\n'
|
||||||
|
<< validation_result.getErr();
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
NginxConfCollector nginx_collector(nginx_input_file, nginx_output_file);
|
||||||
|
auto result = nginx_collector.generateFullNginxConf();
|
||||||
|
if (!result.ok()) {
|
||||||
|
cerr << "Could not generate full nginx configuration file, error: " << result.getErr() << '\n';
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result.unpack().empty() || !NGEN::Filesystem::exists(result.unpack())) {
|
||||||
|
cerr << "Generated nginx configuration file does not exist: " << result.unpack() << '\n';
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
validation_result = NginxUtils::validateNginxConf(result.unpack());
|
||||||
|
if (!validation_result.ok()) {
|
||||||
|
cerr
|
||||||
|
<< "Could not validate generated nginx configuration file: "
|
||||||
|
<< nginx_output_file
|
||||||
|
<< '\n'
|
||||||
|
<< validation_result.getErr();
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
cout << "Full nginx configuration file was successfully generated: " << result.unpack() << '\n';
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
@@ -138,6 +138,8 @@ spec:
|
|||||||
items:
|
items:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
host:
|
host:
|
||||||
type: string
|
type: string
|
||||||
mode:
|
mode:
|
||||||
@@ -182,142 +184,6 @@ spec:
|
|||||||
---
|
---
|
||||||
apiVersion: apiextensions.k8s.io/v1
|
apiVersion: apiextensions.k8s.io/v1
|
||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata :
|
|
||||||
name : practices.openappsec.io
|
|
||||||
|
|
||||||
spec:
|
|
||||||
group: openappsec.io
|
|
||||||
versions:
|
|
||||||
- name: v1beta1
|
|
||||||
served: true
|
|
||||||
storage: true
|
|
||||||
schema:
|
|
||||||
openAPIV3Schema:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
spec:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
web-attacks:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
override-mode:
|
|
||||||
type: string
|
|
||||||
enum:
|
|
||||||
- prevent-learn
|
|
||||||
- detect-learn
|
|
||||||
- prevent
|
|
||||||
- detect
|
|
||||||
- inactive
|
|
||||||
minimum-confidence:
|
|
||||||
type: string
|
|
||||||
enum:
|
|
||||||
- medium
|
|
||||||
- high
|
|
||||||
- critical
|
|
||||||
max-url-size-bytes:
|
|
||||||
type: integer
|
|
||||||
max-object-depth:
|
|
||||||
type: integer
|
|
||||||
max-body-size-kb:
|
|
||||||
type: integer
|
|
||||||
max-header-size-bytes:
|
|
||||||
type: integer
|
|
||||||
protections:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
csrf-enabled:
|
|
||||||
type: string
|
|
||||||
enum:
|
|
||||||
- prevent-learn
|
|
||||||
- detect-learn
|
|
||||||
- prevent
|
|
||||||
- detect
|
|
||||||
- inactive
|
|
||||||
error-disclosure-enabled:
|
|
||||||
type: string
|
|
||||||
enum:
|
|
||||||
- prevent-learn
|
|
||||||
- detect-learn
|
|
||||||
- prevent
|
|
||||||
- detect
|
|
||||||
- inactive
|
|
||||||
open-redirect-enabled:
|
|
||||||
type: string
|
|
||||||
enum:
|
|
||||||
- prevent-learn
|
|
||||||
- detect-learn
|
|
||||||
- prevent
|
|
||||||
- detect
|
|
||||||
- inactive
|
|
||||||
non-valid-http-methods:
|
|
||||||
type: boolean
|
|
||||||
anti-bot:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
override-mode:
|
|
||||||
type: string
|
|
||||||
enum:
|
|
||||||
- prevent-learn
|
|
||||||
- detect-learn
|
|
||||||
- prevent
|
|
||||||
- detect
|
|
||||||
- inactive
|
|
||||||
injected-URIs:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
uri:
|
|
||||||
type: string
|
|
||||||
validated-URIs:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
uri:
|
|
||||||
type: string
|
|
||||||
snort-signatures:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
override-mode:
|
|
||||||
type: string
|
|
||||||
enum:
|
|
||||||
- prevent-learn
|
|
||||||
- detect-learn
|
|
||||||
- prevent
|
|
||||||
- detect
|
|
||||||
- inactive
|
|
||||||
configmap:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
openapi-schema-validation:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
override-mode:
|
|
||||||
type: string
|
|
||||||
enum:
|
|
||||||
- prevent-learn
|
|
||||||
- detect-learn
|
|
||||||
- prevent
|
|
||||||
- detect
|
|
||||||
- inactive
|
|
||||||
configmap:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
|
|
||||||
scope: Cluster
|
|
||||||
names:
|
|
||||||
plural: practices
|
|
||||||
singular: practice
|
|
||||||
kind: Practice
|
|
||||||
shortNames:
|
|
||||||
- practice
|
|
||||||
---
|
|
||||||
apiVersion: apiextensions.k8s.io/v1
|
|
||||||
kind: CustomResourceDefinition
|
|
||||||
metadata :
|
metadata :
|
||||||
name : accesscontrolpractices.openappsec.io
|
name : accesscontrolpractices.openappsec.io
|
||||||
creationTimestamp: null
|
creationTimestamp: null
|
||||||
@@ -338,8 +204,6 @@ spec:
|
|||||||
properties:
|
properties:
|
||||||
appsecClassName:
|
appsecClassName:
|
||||||
type: string
|
type: string
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
practiceMode:
|
practiceMode:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
@@ -431,7 +295,7 @@ spec:
|
|||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
- block-page
|
- block-page
|
||||||
#- redirect
|
- redirect
|
||||||
- response-code-only
|
- response-code-only
|
||||||
message-title:
|
message-title:
|
||||||
type: string
|
type: string
|
||||||
@@ -455,8 +319,6 @@ spec:
|
|||||||
properties:
|
properties:
|
||||||
appsecClassName:
|
appsecClassName:
|
||||||
type: string
|
type: string
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
mode:
|
mode:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
@@ -569,8 +431,6 @@ spec:
|
|||||||
properties:
|
properties:
|
||||||
appsecClassName:
|
appsecClassName:
|
||||||
type: string
|
type: string
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
action:
|
action:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
@@ -718,8 +578,6 @@ spec:
|
|||||||
properties:
|
properties:
|
||||||
appsecClassName:
|
appsecClassName:
|
||||||
type: string
|
type: string
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
accessControlLogging:
|
accessControlLogging:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
@@ -801,8 +659,8 @@ spec:
|
|||||||
- json
|
- json
|
||||||
- json-formatted
|
- json-formatted
|
||||||
default: json
|
default: json
|
||||||
k8s-service:
|
local-tuning:
|
||||||
type: boolean # Default value is dependant on the environment type
|
type: boolean
|
||||||
cefService:
|
cefService:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
@@ -873,8 +731,6 @@ spec:
|
|||||||
properties:
|
properties:
|
||||||
appsecClassName:
|
appsecClassName:
|
||||||
type: string
|
type: string
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
sourcesIdentifiers: # required, minItems: 1
|
sourcesIdentifiers: # required, minItems: 1
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
@@ -929,8 +785,6 @@ spec:
|
|||||||
properties:
|
properties:
|
||||||
appsecClassName:
|
appsecClassName:
|
||||||
type: string
|
type: string
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
practiceMode:
|
practiceMode:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
@@ -1078,6 +932,8 @@ spec:
|
|||||||
- inactive
|
- inactive
|
||||||
- inherited #inherited from threatPreventionPractice mode set in policy
|
- inherited #inherited from threatPreventionPractice mode set in policy
|
||||||
default: inactive
|
default: inactive
|
||||||
|
enforcementLevel:
|
||||||
|
type: string
|
||||||
configmap:
|
configmap:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
@@ -1303,8 +1159,6 @@ spec:
|
|||||||
properties:
|
properties:
|
||||||
appsecClassName:
|
appsecClassName:
|
||||||
type: string
|
type: string
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
minNumOfSources:
|
minNumOfSources:
|
||||||
type: integer
|
type: integer
|
||||||
default: 3
|
default: 3
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ spec:
|
|||||||
- default-threat-prevention-practice
|
- default-threat-prevention-practice
|
||||||
accessControlPractices:
|
accessControlPractices:
|
||||||
- default-access-control-practice
|
- default-access-control-practice
|
||||||
customResponses: default-web-user-response
|
customResponse: default-web-user-response
|
||||||
triggers:
|
triggers:
|
||||||
- default-log-trigger
|
- default-log-trigger
|
||||||
specificRules:
|
specificRules:
|
||||||
@@ -62,7 +62,7 @@ spec:
|
|||||||
files: []
|
files: []
|
||||||
# relevant for docker and linux embedded deployments
|
# relevant for docker and linux embedded deployments
|
||||||
# 0 or 1 files supported in array
|
# 0 or 1 files supported in array
|
||||||
openapiSchemaValidation: # schema validation requires "Premium Edition"
|
schemaValidation: # schema validation requires "Premium Edition"
|
||||||
overrideMode: inherited
|
overrideMode: inherited
|
||||||
configmap: []
|
configmap: []
|
||||||
# relevant for deployments on kubernetes
|
# relevant for deployments on kubernetes
|
||||||
|
|||||||
@@ -0,0 +1,163 @@
|
|||||||
|
apiVersion: openappsec.io/v1beta2
|
||||||
|
kind: AccessControlPractice
|
||||||
|
metadata:
|
||||||
|
name: access-control-practice-example
|
||||||
|
spec:
|
||||||
|
practiceMode: prevent
|
||||||
|
rateLimit:
|
||||||
|
overrideMode: inherited
|
||||||
|
rules:
|
||||||
|
- action: prevent
|
||||||
|
comment: Limiting access to the resource
|
||||||
|
limit: 100
|
||||||
|
triggers:
|
||||||
|
- log-trigger-example
|
||||||
|
unit: minute
|
||||||
|
uri: /api/resource
|
||||||
|
- action: inherited
|
||||||
|
comment: Rate limiting for authentication requests
|
||||||
|
limit: 50
|
||||||
|
triggers:
|
||||||
|
- log-trigger-example
|
||||||
|
unit: second
|
||||||
|
uri: /api/auth
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: openappsec.io/v1beta2
|
||||||
|
kind: CustomResponse
|
||||||
|
metadata:
|
||||||
|
name: custom-response-block-page-example
|
||||||
|
spec:
|
||||||
|
mode: block-page
|
||||||
|
messageTitle: "Access Denied"
|
||||||
|
messageBody: "Your request was blocked for security reasons."
|
||||||
|
httpResponseCode: 403
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: openappsec.io/v1beta2
|
||||||
|
kind: Exception
|
||||||
|
metadata:
|
||||||
|
name: exception-example
|
||||||
|
spec:
|
||||||
|
action: accept
|
||||||
|
condition:
|
||||||
|
- key: countryCode
|
||||||
|
value: US
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: openappsec.io/v1beta2
|
||||||
|
kind: LogTrigger
|
||||||
|
metadata:
|
||||||
|
name: log-trigger-example
|
||||||
|
spec:
|
||||||
|
accessControlLogging:
|
||||||
|
allowEvents: false
|
||||||
|
dropEvents: true
|
||||||
|
appsecLogging:
|
||||||
|
detectEvents: true
|
||||||
|
preventEvents: true
|
||||||
|
allWebRequests: false
|
||||||
|
additionalSuspiciousEventsLogging:
|
||||||
|
enabled: true
|
||||||
|
minSeverity: high # {high|critical}
|
||||||
|
responseBody: false
|
||||||
|
responseCode: true
|
||||||
|
extendedLogging:
|
||||||
|
urlPath: true
|
||||||
|
urlQuery: true
|
||||||
|
httpHeaders: false
|
||||||
|
requestBody: false
|
||||||
|
logDestination:
|
||||||
|
cloud: true
|
||||||
|
logToAgent: true
|
||||||
|
stdout:
|
||||||
|
format: json-formatted
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: openappsec.io/v1beta2
|
||||||
|
kind: Policy
|
||||||
|
metadata:
|
||||||
|
name: policy-example
|
||||||
|
spec:
|
||||||
|
default:
|
||||||
|
mode: prevent-learn
|
||||||
|
accessControlPractices: [access-control-practice-example]
|
||||||
|
threatPreventionPractices: [threat-prevention-practice-example]
|
||||||
|
triggers: [log-trigger-example]
|
||||||
|
customResponse: custom-response-block-page-example
|
||||||
|
sourceIdentifiers: sources-identifier-example
|
||||||
|
trustedSources: trusted-sources-example
|
||||||
|
exceptions:
|
||||||
|
- exception-example
|
||||||
|
---
|
||||||
|
apiVersion: openappsec.io/v1beta2
|
||||||
|
kind: ThreatPreventionPractice
|
||||||
|
metadata:
|
||||||
|
name: threat-prevention-practice-example
|
||||||
|
spec:
|
||||||
|
practiceMode: inherited
|
||||||
|
webAttacks:
|
||||||
|
overrideMode: inherited
|
||||||
|
minimumConfidence: high
|
||||||
|
intrusionPrevention:
|
||||||
|
# intrusion prevention (IPS) requires "Premium Edition"
|
||||||
|
overrideMode: inherited
|
||||||
|
maxPerformanceImpact: medium
|
||||||
|
minSeverityLevel: medium
|
||||||
|
minCveYear: 2016
|
||||||
|
highConfidenceEventAction: inherited
|
||||||
|
mediumConfidenceEventAction: inherited
|
||||||
|
lowConfidenceEventAction: detect
|
||||||
|
fileSecurity:
|
||||||
|
# file security requires "Premium Edition"
|
||||||
|
overrideMode: inherited
|
||||||
|
minSeverityLevel: medium
|
||||||
|
highConfidenceEventAction: inherited
|
||||||
|
mediumConfidenceEventAction: inherited
|
||||||
|
lowConfidenceEventAction: detect
|
||||||
|
snortSignatures:
|
||||||
|
# you must specify snort signatures in configmap or file to activate snort inspection
|
||||||
|
overrideMode: inherited
|
||||||
|
configmap: []
|
||||||
|
# relevant for deployments on kubernetes
|
||||||
|
# 0 or 1 configmaps supported in array
|
||||||
|
files: []
|
||||||
|
# relevant for docker and linux embedded deployments
|
||||||
|
# 0 or 1 files supported in array
|
||||||
|
schemaValidation: # schema validation requires "Premium Edition"
|
||||||
|
overrideMode: inherited
|
||||||
|
configmap: []
|
||||||
|
# relevant for deployments on kubernetes
|
||||||
|
# 0 or 1 configmaps supported in array
|
||||||
|
files: []
|
||||||
|
# relevant for docker and linux embedded deployments
|
||||||
|
# 0 or 1 files supported in array
|
||||||
|
antiBot: # antibot requires "Premium Edition"
|
||||||
|
overrideMode: inherited
|
||||||
|
injectedUris: []
|
||||||
|
validatedUris: []
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: openappsec.io/v1beta2
|
||||||
|
kind: TrustedSource
|
||||||
|
metadata:
|
||||||
|
name: trusted-sources-example
|
||||||
|
spec:
|
||||||
|
minNumOfSources: 3
|
||||||
|
sourcesIdentifiers:
|
||||||
|
- 1.0.0.27
|
||||||
|
- 1.0.0.28
|
||||||
|
- 1.0.0.29
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: openappsec.io/v1beta2
|
||||||
|
kind: SourcesIdentifier
|
||||||
|
metadata:
|
||||||
|
name: sources-identifier-example
|
||||||
|
spec:
|
||||||
|
sourcesIdentifiers:
|
||||||
|
- identifier: sourceip
|
||||||
|
value:
|
||||||
|
- "192.168.1.1"
|
||||||
|
- "10.0.0.1"
|
||||||
|
|
||||||
@@ -14,7 +14,7 @@ spec:
|
|||||||
- default-threat-prevention-practice
|
- default-threat-prevention-practice
|
||||||
accessControlPractices:
|
accessControlPractices:
|
||||||
- default-access-control-practice
|
- default-access-control-practice
|
||||||
customResponses: default-web-user-response
|
customResponse: default-web-user-response
|
||||||
triggers:
|
triggers:
|
||||||
- default-log-trigger
|
- default-log-trigger
|
||||||
specificRules:
|
specificRules:
|
||||||
@@ -62,7 +62,7 @@ spec:
|
|||||||
files: []
|
files: []
|
||||||
# relevant for docker and linux embedded deployments
|
# relevant for docker and linux embedded deployments
|
||||||
# 0 or 1 files supported in array
|
# 0 or 1 files supported in array
|
||||||
openapiSchemaValidation: # schema validation requires "Premium Edition"
|
schemaValidation: # schema validation requires "Premium Edition"
|
||||||
overrideMode: inherited
|
overrideMode: inherited
|
||||||
configmap: []
|
configmap: []
|
||||||
# relevant for deployments on kubernetes
|
# relevant for deployments on kubernetes
|
||||||
|
|||||||
@@ -12,17 +12,17 @@ practices:
|
|||||||
- name: webapp-default-practice
|
- name: webapp-default-practice
|
||||||
openapi-schema-validation:
|
openapi-schema-validation:
|
||||||
configmap: []
|
configmap: []
|
||||||
override-mode: detect-learn
|
override-mode: as-top-level
|
||||||
snort-signatures:
|
snort-signatures:
|
||||||
configmap: []
|
configmap: []
|
||||||
override-mode: detect-learn
|
override-mode: as-top-level
|
||||||
web-attacks:
|
web-attacks:
|
||||||
max-body-size-kb: 1000000
|
max-body-size-kb: 1000000
|
||||||
max-header-size-bytes: 102400
|
max-header-size-bytes: 102400
|
||||||
max-object-depth: 40
|
max-object-depth: 40
|
||||||
max-url-size-bytes: 32768
|
max-url-size-bytes: 32768
|
||||||
minimum-confidence: critical
|
minimum-confidence: critical
|
||||||
override-mode: detect-learn
|
override-mode: as-top-level
|
||||||
protections:
|
protections:
|
||||||
csrf-protection: inactive
|
csrf-protection: inactive
|
||||||
error-disclosure: inactive
|
error-disclosure: inactive
|
||||||
@@ -31,7 +31,7 @@ practices:
|
|||||||
anti-bot:
|
anti-bot:
|
||||||
injected-URIs: []
|
injected-URIs: []
|
||||||
validated-URIs: []
|
validated-URIs: []
|
||||||
override-mode: detect-learn
|
override-mode: as-top-level
|
||||||
|
|
||||||
log-triggers:
|
log-triggers:
|
||||||
- name: appsec-default-log-trigger
|
- name: appsec-default-log-trigger
|
||||||
|
|||||||
@@ -12,17 +12,17 @@ practices:
|
|||||||
- name: webapp-default-practice
|
- name: webapp-default-practice
|
||||||
openapi-schema-validation:
|
openapi-schema-validation:
|
||||||
configmap: []
|
configmap: []
|
||||||
override-mode: prevent-learn
|
override-mode: as-top-level
|
||||||
snort-signatures:
|
snort-signatures:
|
||||||
configmap: []
|
configmap: []
|
||||||
override-mode: prevent-learn
|
override-mode: as-top-level
|
||||||
web-attacks:
|
web-attacks:
|
||||||
max-body-size-kb: 1000000
|
max-body-size-kb: 1000000
|
||||||
max-header-size-bytes: 102400
|
max-header-size-bytes: 102400
|
||||||
max-object-depth: 40
|
max-object-depth: 40
|
||||||
max-url-size-bytes: 32768
|
max-url-size-bytes: 32768
|
||||||
minimum-confidence: critical
|
minimum-confidence: critical
|
||||||
override-mode: prevent-learn
|
override-mode: as-top-level
|
||||||
protections:
|
protections:
|
||||||
csrf-protection: inactive
|
csrf-protection: inactive
|
||||||
error-disclosure: inactive
|
error-disclosure: inactive
|
||||||
@@ -31,7 +31,7 @@ practices:
|
|||||||
anti-bot:
|
anti-bot:
|
||||||
injected-URIs: []
|
injected-URIs: []
|
||||||
validated-URIs: []
|
validated-URIs: []
|
||||||
override-mode: prevent-learn
|
override-mode: as-top-level
|
||||||
|
|
||||||
log-triggers:
|
log-triggers:
|
||||||
- name: appsec-default-log-trigger
|
- name: appsec-default-log-trigger
|
||||||
|
|||||||
@@ -31,6 +31,7 @@ add_subdirectory(tenant_manager)
|
|||||||
add_subdirectory(compression)
|
add_subdirectory(compression)
|
||||||
add_subdirectory(attachments)
|
add_subdirectory(attachments)
|
||||||
add_subdirectory(report_messaging)
|
add_subdirectory(report_messaging)
|
||||||
|
add_subdirectory(env_details)
|
||||||
|
|
||||||
add_library(ngen_core SHARED ".")
|
add_library(ngen_core SHARED ".")
|
||||||
target_link_libraries(
|
target_link_libraries(
|
||||||
@@ -39,7 +40,7 @@ target_link_libraries(
|
|||||||
"table;debug_is;shell_cmd;metric;tenant_manager;messaging;encryptor;time_proxy;singleton;mainloop;environment;logging;report;rest"
|
"table;debug_is;shell_cmd;metric;tenant_manager;messaging;encryptor;time_proxy;singleton;mainloop;environment;logging;report;rest"
|
||||||
"compression_utils;-lz;config;intelligence_is_v2;event_is;memory_consumption;connkey"
|
"compression_utils;-lz;config;intelligence_is_v2;event_is;memory_consumption;connkey"
|
||||||
"instance_awareness;socket_is;agent_details;agent_details_reporter;buffers;cpu;agent_core_utilities"
|
"instance_awareness;socket_is;agent_details;agent_details_reporter;buffers;cpu;agent_core_utilities"
|
||||||
"report_messaging"
|
"report_messaging;env_details;version"
|
||||||
-Wl,-no-whole-archive
|
-Wl,-no-whole-archive
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -203,6 +203,18 @@ deleteFile(const string &path)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
string
|
||||||
|
resolveFullPath(const string &input_path) {
|
||||||
|
dbgTrace(D_INFRA_UTILS) << "Resolving absolute path: " << input_path;
|
||||||
|
char resolved_path[PATH_MAX];
|
||||||
|
if (!realpath(input_path.c_str(), resolved_path)) {
|
||||||
|
dbgWarning(D_INFRA_UTILS) << "Error resolving path: " << input_path << ", errno: " << errno;
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(resolved_path);
|
||||||
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
deleteDirectory(const string &path, bool delete_content)
|
deleteDirectory(const string &path, bool delete_content)
|
||||||
{
|
{
|
||||||
@@ -510,6 +522,23 @@ removeTrailingWhitespaces(string str)
|
|||||||
return str;
|
return str;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
string
|
||||||
|
removeLeadingWhitespaces(string str)
|
||||||
|
{
|
||||||
|
str.erase(
|
||||||
|
str.begin(),
|
||||||
|
find_if(str.begin(), str.end(), [] (char c) { return !isspace(c); })
|
||||||
|
);
|
||||||
|
|
||||||
|
return str;
|
||||||
|
}
|
||||||
|
|
||||||
|
string
|
||||||
|
trim(string str)
|
||||||
|
{
|
||||||
|
return removeLeadingWhitespaces(removeTrailingWhitespaces(str));
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Strings
|
} // namespace Strings
|
||||||
|
|
||||||
} // namespace NGEN
|
} // namespace NGEN
|
||||||
|
|||||||
@@ -184,3 +184,27 @@ TEST_F(AgentCoreUtilUT, removeTrailingWhitespacesTest)
|
|||||||
string str_with_trailing_whitespace = "str_with_trailing_whitespace\n\n\n\r \n\n\r";
|
string str_with_trailing_whitespace = "str_with_trailing_whitespace\n\n\n\r \n\n\r";
|
||||||
EXPECT_EQ(NGEN::Strings::removeTrailingWhitespaces(str_with_trailing_whitespace), "str_with_trailing_whitespace");
|
EXPECT_EQ(NGEN::Strings::removeTrailingWhitespaces(str_with_trailing_whitespace), "str_with_trailing_whitespace");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(AgentCoreUtilUT, removeLeadingWhitespacesTest)
|
||||||
|
{
|
||||||
|
string str_with_leading_whitespace = "\n\n\n\r \n\n\rstr_with_leading_whitespace";
|
||||||
|
EXPECT_EQ(NGEN::Strings::removeLeadingWhitespaces(str_with_leading_whitespace), "str_with_leading_whitespace");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AgentCoreUtilUT, trimTest)
|
||||||
|
{
|
||||||
|
string str_with_leading_and_trailing_whitespace = "\n\n \r \rstr_with_whitespace\n\r \n\n\r";
|
||||||
|
EXPECT_EQ(NGEN::Strings::trim(str_with_leading_and_trailing_whitespace), "str_with_whitespace");
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(AgentCoreUtilUT, resolveFullPathTest)
|
||||||
|
{
|
||||||
|
string working_dir = cptestFnameInExeDir("");
|
||||||
|
ofstream file(working_dir + "test.txt");
|
||||||
|
ASSERT_TRUE(file.is_open());
|
||||||
|
file.close();
|
||||||
|
string relative_path = "test.txt";
|
||||||
|
string full_path = NGEN::Filesystem::resolveFullPath(relative_path);
|
||||||
|
EXPECT_EQ(full_path, working_dir + "test.txt");
|
||||||
|
ASSERT_TRUE(NGEN::Filesystem::deleteFile(working_dir + "test.txt"));
|
||||||
|
}
|
||||||
|
|||||||
@@ -388,8 +388,9 @@ AgentDetails::convertProxyProtocolToString(ProxyProtocol proto) const
|
|||||||
case ProxyProtocol::HTTP: return "http";
|
case ProxyProtocol::HTTP: return "http";
|
||||||
case ProxyProtocol::HTTPS: return "https";
|
case ProxyProtocol::HTTPS: return "https";
|
||||||
}
|
}
|
||||||
dbgAssert(false) << alert << "Unsupported Proxy Protocol " << static_cast<int>(proto);
|
dbgAssertOpt(false) << alert << "Unsupported Proxy Protocol " << static_cast<int>(proto);
|
||||||
return "";
|
dbgWarning(D_ORCHESTRATOR) << "Using https proxy as default";
|
||||||
|
return "https";
|
||||||
}
|
}
|
||||||
|
|
||||||
Maybe<void>
|
Maybe<void>
|
||||||
@@ -475,11 +476,14 @@ Maybe<void>
|
|||||||
AgentDetails::loadProxyType(ProxyProtocol protocol)
|
AgentDetails::loadProxyType(ProxyProtocol protocol)
|
||||||
{
|
{
|
||||||
dbgFlow(D_ORCHESTRATOR) << "Loading proxy type: " << convertProxyProtocolToString(protocol);
|
dbgFlow(D_ORCHESTRATOR) << "Loading proxy type: " << convertProxyProtocolToString(protocol);
|
||||||
dbgAssert(protocol == ProxyProtocol::HTTP || protocol == ProxyProtocol::HTTPS)
|
if (!(protocol == ProxyProtocol::HTTP || protocol == ProxyProtocol::HTTPS)) {
|
||||||
<< alert
|
dbgAssertOpt(false)
|
||||||
<< "Unsupported Proxy Protocol "
|
<< alert
|
||||||
<< static_cast<int>(protocol);
|
<< "Unsupported Proxy Protocol "
|
||||||
|
<< static_cast<int>(protocol);
|
||||||
|
protocol = ProxyProtocol::HTTPS;
|
||||||
|
dbgWarning(D_ORCHESTRATOR) << "Using https proxy as default";
|
||||||
|
}
|
||||||
static const map<ProxyProtocol, string> env_var_name = {
|
static const map<ProxyProtocol, string> env_var_name = {
|
||||||
{ProxyProtocol::HTTPS, "https_proxy"},
|
{ProxyProtocol::HTTPS, "https_proxy"},
|
||||||
{ProxyProtocol::HTTP, "http_proxy"}
|
{ProxyProtocol::HTTP, "http_proxy"}
|
||||||
|
|||||||
@@ -111,7 +111,8 @@ HttpAttachmentConfiguration::save(cereal::JSONOutputArchive &archive) const
|
|||||||
cereal::make_nvp("keep_alive_interval_msec", getNumericalValue("keep_alive_interval_msec")),
|
cereal::make_nvp("keep_alive_interval_msec", getNumericalValue("keep_alive_interval_msec")),
|
||||||
cereal::make_nvp("min_retries_for_verdict", getNumericalValue("min_retries_for_verdict")),
|
cereal::make_nvp("min_retries_for_verdict", getNumericalValue("min_retries_for_verdict")),
|
||||||
cereal::make_nvp("max_retries_for_verdict", getNumericalValue("max_retries_for_verdict")),
|
cereal::make_nvp("max_retries_for_verdict", getNumericalValue("max_retries_for_verdict")),
|
||||||
cereal::make_nvp("body_size_trigger", getNumericalValue("body_size_trigger"))
|
cereal::make_nvp("body_size_trigger", getNumericalValue("body_size_trigger")),
|
||||||
|
cereal::make_nvp("remove_server_header", getNumericalValue("remove_server_header"))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -167,6 +168,7 @@ HttpAttachmentConfiguration::load(cereal::JSONInputArchive &archive)
|
|||||||
loadNumericalValue(archive, "min_retries_for_verdict", 3);
|
loadNumericalValue(archive, "min_retries_for_verdict", 3);
|
||||||
loadNumericalValue(archive, "max_retries_for_verdict", 15);
|
loadNumericalValue(archive, "max_retries_for_verdict", 15);
|
||||||
loadNumericalValue(archive, "body_size_trigger", 200000);
|
loadNumericalValue(archive, "body_size_trigger", 200000);
|
||||||
|
loadNumericalValue(archive, "remove_server_header", 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
|
|||||||
@@ -63,7 +63,8 @@ TEST_F(HttpAttachmentUtilTest, GetValidAttachmentConfiguration)
|
|||||||
"\"waiting_for_verdict_thread_timeout_msec\": 60,\n"
|
"\"waiting_for_verdict_thread_timeout_msec\": 60,\n"
|
||||||
"\"req_header_thread_timeout_msec\": 10,\n"
|
"\"req_header_thread_timeout_msec\": 10,\n"
|
||||||
"\"ip_ranges\": " + createIPRangesString(ip_ranges) + ",\n"
|
"\"ip_ranges\": " + createIPRangesString(ip_ranges) + ",\n"
|
||||||
"\"static_resources_path\": \"" + static_resources_path + "\""
|
"\"static_resources_path\": \"" + static_resources_path + "\",\n"
|
||||||
|
"\"remove_server_header\": 0"
|
||||||
"}\n";
|
"}\n";
|
||||||
ofstream valid_configuration_file(attachment_configuration_file_name);
|
ofstream valid_configuration_file(attachment_configuration_file_name);
|
||||||
valid_configuration_file << valid_configuration;
|
valid_configuration_file << valid_configuration;
|
||||||
@@ -89,6 +90,7 @@ TEST_F(HttpAttachmentUtilTest, GetValidAttachmentConfiguration)
|
|||||||
EXPECT_EQ(conf_data_out.getNumericalValue("res_body_thread_timeout_msec"), 80u);
|
EXPECT_EQ(conf_data_out.getNumericalValue("res_body_thread_timeout_msec"), 80u);
|
||||||
EXPECT_EQ(conf_data_out.getNumericalValue("waiting_for_verdict_thread_timeout_msec"), 60u);
|
EXPECT_EQ(conf_data_out.getNumericalValue("waiting_for_verdict_thread_timeout_msec"), 60u);
|
||||||
EXPECT_EQ(conf_data_out.getNumericalValue("nginx_inspection_mode"), 1u);
|
EXPECT_EQ(conf_data_out.getNumericalValue("nginx_inspection_mode"), 1u);
|
||||||
|
EXPECT_EQ(conf_data_out.getNumericalValue("remove_server_header"), 0u);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(HttpAttachmentUtilTest, GetMalformedAttachmentConfiguration)
|
TEST_F(HttpAttachmentUtilTest, GetMalformedAttachmentConfiguration)
|
||||||
|
|||||||
@@ -144,8 +144,8 @@ Buffer::operator+(const Buffer &other) const
|
|||||||
Buffer
|
Buffer
|
||||||
Buffer::getSubBuffer(uint start, uint end) const
|
Buffer::getSubBuffer(uint start, uint end) const
|
||||||
{
|
{
|
||||||
dbgAssert(start<=end && end<=len) << alert << "Buffer::getSubBuffer() returned: Illegal scoping of buffer";
|
dbgAssertOpt(start<=end && end<=len) << alert << "Buffer::getSubBuffer() returned: Illegal scoping of buffer";
|
||||||
if (start == end) return Buffer();
|
if (start >= end || end > len) return Buffer();
|
||||||
|
|
||||||
Buffer res;
|
Buffer res;
|
||||||
uint offset = 0;
|
uint offset = 0;
|
||||||
@@ -178,8 +178,12 @@ Buffer::getSubBuffer(uint start, uint end) const
|
|||||||
Maybe<uint>
|
Maybe<uint>
|
||||||
Buffer::findFirstOf(char ch, uint start) const
|
Buffer::findFirstOf(char ch, uint start) const
|
||||||
{
|
{
|
||||||
dbgAssert(start <= len) << alert << "Buffer::findFirstOf() returned: Cannot set a start point after buffer's end";
|
if (start > len) {
|
||||||
|
dbgAssertOpt(start <= len)
|
||||||
|
<< alert
|
||||||
|
<< "Buffer::findFirstOf() returned: Cannot set a start point after buffer's end";
|
||||||
|
return genError("Cannot set a start point after buffer's end");
|
||||||
|
}
|
||||||
for (; start < len; ++start) {
|
for (; start < len; ++start) {
|
||||||
if ((*this)[start] == ch) return start;
|
if ((*this)[start] == ch) return start;
|
||||||
}
|
}
|
||||||
@@ -189,8 +193,12 @@ Buffer::findFirstOf(char ch, uint start) const
|
|||||||
Maybe<uint>
|
Maybe<uint>
|
||||||
Buffer::findFirstOf(const Buffer &buf, uint start) const
|
Buffer::findFirstOf(const Buffer &buf, uint start) const
|
||||||
{
|
{
|
||||||
dbgAssert(start <= len) << alert << "Buffer::findFirstOf() returned: Cannot set a start point after buffer's end";
|
if (start > len) {
|
||||||
|
dbgAssertOpt(start <= len)
|
||||||
|
<< alert
|
||||||
|
<< "Buffer::findFirstOf() returned: Cannot set a start point after buffer's end";
|
||||||
|
return genError("Cannot set a start point after buffer's end");
|
||||||
|
}
|
||||||
for (; start + buf.size() <= len; ++start) {
|
for (; start + buf.size() <= len; ++start) {
|
||||||
auto sub_buffer = getSubBuffer(start, start + buf.size());
|
auto sub_buffer = getSubBuffer(start, start + buf.size());
|
||||||
if (sub_buffer == buf) return start;
|
if (sub_buffer == buf) return start;
|
||||||
@@ -201,9 +209,13 @@ Buffer::findFirstOf(const Buffer &buf, uint start) const
|
|||||||
Maybe<uint>
|
Maybe<uint>
|
||||||
Buffer::findFirstNotOf(char ch, uint start) const
|
Buffer::findFirstNotOf(char ch, uint start) const
|
||||||
{
|
{
|
||||||
dbgAssert(start <= len)
|
if (start > len) {
|
||||||
<< alert
|
dbgAssertOpt(start <= len)
|
||||||
<< "Buffer::findFirstNotOf() returned: Cannot set a start point after buffer's end";
|
<< alert
|
||||||
|
<< "Buffer::findFirstNotOf() returned: Cannot set a start point after buffer's end";
|
||||||
|
return genError("Cannot set a start point after buffer's end");
|
||||||
|
}
|
||||||
|
|
||||||
for (; start < len; ++start) {
|
for (; start < len; ++start) {
|
||||||
if ((*this)[start] != ch) return start;
|
if ((*this)[start] != ch) return start;
|
||||||
}
|
}
|
||||||
@@ -213,7 +225,12 @@ Buffer::findFirstNotOf(char ch, uint start) const
|
|||||||
Maybe<uint>
|
Maybe<uint>
|
||||||
Buffer::findLastOf(char ch, uint start) const
|
Buffer::findLastOf(char ch, uint start) const
|
||||||
{
|
{
|
||||||
dbgAssert(start <= len) << alert << "Buffer::findLastOf() returned: Cannot set a start point after buffer's end";
|
if (start > len) {
|
||||||
|
dbgAssertOpt(start <= len)
|
||||||
|
<< alert
|
||||||
|
<< "Buffer::findLastOf() returned: Cannot set a start point after buffer's end";
|
||||||
|
return genError("Cannot set a start point after buffer's end");
|
||||||
|
}
|
||||||
for (; 0 < start; --start) {
|
for (; 0 < start; --start) {
|
||||||
if ((*this)[start - 1] == ch) return start - 1;
|
if ((*this)[start - 1] == ch) return start - 1;
|
||||||
}
|
}
|
||||||
@@ -223,9 +240,12 @@ Buffer::findLastOf(char ch, uint start) const
|
|||||||
Maybe<uint>
|
Maybe<uint>
|
||||||
Buffer::findLastNotOf(char ch, uint start) const
|
Buffer::findLastNotOf(char ch, uint start) const
|
||||||
{
|
{
|
||||||
dbgAssert(start <= len)
|
if (start > len) {
|
||||||
<< alert
|
dbgAssertOpt(start <= len)
|
||||||
<< "Buffer::findLastNotOf() returned: Cannot set a start point after buffer's end";
|
<< alert
|
||||||
|
<< "Buffer::findLastNotOf() returned: Cannot set a start point after buffer's end";
|
||||||
|
return genError("Cannot set a start point after buffer's end");
|
||||||
|
}
|
||||||
for (; 0 < start; --start) {
|
for (; 0 < start; --start) {
|
||||||
if ((*this)[start - 1] != ch) return start - 1;
|
if ((*this)[start - 1] != ch) return start - 1;
|
||||||
}
|
}
|
||||||
@@ -235,8 +255,8 @@ Buffer::findLastNotOf(char ch, uint start) const
|
|||||||
void
|
void
|
||||||
Buffer::truncateHead(uint size)
|
Buffer::truncateHead(uint size)
|
||||||
{
|
{
|
||||||
dbgAssert(size <= len) << alert << "Cannot set a new start of buffer after the buffer's end";
|
dbgAssertOpt(size <= len) << alert << "Cannot set a new start of buffer after the buffer's end";
|
||||||
if (size == 0) return;
|
if (size == 0 || size > len) return;
|
||||||
if (size == len) {
|
if (size == len) {
|
||||||
clear();
|
clear();
|
||||||
return;
|
return;
|
||||||
@@ -261,8 +281,8 @@ Buffer::truncateHead(uint size)
|
|||||||
void
|
void
|
||||||
Buffer::truncateTail(uint size)
|
Buffer::truncateTail(uint size)
|
||||||
{
|
{
|
||||||
dbgAssert(size <= len) << alert << "Cannot set a new end of buffer after the buffer's end";
|
dbgAssertOpt(size <= len) << alert << "Cannot set a new end of buffer after the buffer's end";
|
||||||
if (size == 0) return;
|
if (size == 0 || size > len) return;
|
||||||
if (size == len) {
|
if (size == len) {
|
||||||
clear();
|
clear();
|
||||||
return;
|
return;
|
||||||
@@ -285,14 +305,20 @@ Buffer::truncateTail(uint size)
|
|||||||
void
|
void
|
||||||
Buffer::keepHead(uint size)
|
Buffer::keepHead(uint size)
|
||||||
{
|
{
|
||||||
dbgAssert(size <= len) << alert << "Cannot set a new end of buffer before the buffer's start";
|
if (size > len) {
|
||||||
|
dbgAssertOpt(size <= len) << alert << "Cannot set a new end of buffer before the buffer's start";
|
||||||
|
return;
|
||||||
|
}
|
||||||
truncateTail(len - size);
|
truncateTail(len - size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
Buffer::keepTail(uint size)
|
Buffer::keepTail(uint size)
|
||||||
{
|
{
|
||||||
dbgAssert(size <= len) << alert << "Cannot set a new start of buffer after the buffer's end";
|
if (size > len) {
|
||||||
|
dbgAssertOpt(size <= len) << alert << "Cannot set a new start of buffer after the buffer's end";
|
||||||
|
return;
|
||||||
|
}
|
||||||
truncateHead(len - size);
|
truncateHead(len - size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2,8 +2,11 @@ include_directories(${ng_module_osrc_zlib_path}/include)
|
|||||||
add_definitions(-DZLIB_CONST)
|
add_definitions(-DZLIB_CONST)
|
||||||
|
|
||||||
add_library(compression_utils SHARED compression_utils.cc)
|
add_library(compression_utils SHARED compression_utils.cc)
|
||||||
|
add_library(static_compression_utils compression_utils.cc)
|
||||||
|
|
||||||
add_subdirectory(compression_utils_ut)
|
add_subdirectory(compression_utils_ut)
|
||||||
|
|
||||||
install(TARGETS compression_utils DESTINATION lib)
|
install(TARGETS compression_utils DESTINATION lib)
|
||||||
install(TARGETS compression_utils DESTINATION http_transaction_handler_service/lib)
|
install(TARGETS compression_utils DESTINATION http_transaction_handler_service/lib)
|
||||||
|
|
||||||
|
install(TARGETS static_compression_utils DESTINATION lib)
|
||||||
|
|||||||
@@ -64,12 +64,12 @@ IPAddr::print(ostream &os) const
|
|||||||
switch (type) {
|
switch (type) {
|
||||||
case IPType::V4: {
|
case IPType::V4: {
|
||||||
formatted_addr = inet_ntop(AF_INET, &v4, buf, sizeof(buf));
|
formatted_addr = inet_ntop(AF_INET, &v4, buf, sizeof(buf));
|
||||||
dbgAssert(formatted_addr == buf) << alert("conversion error") << "Failed to convert an IPv4 address";
|
dbgAssertOpt(formatted_addr == buf) << alert("conversion error") << "Failed to convert an IPv4 address";
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case IPType::V6: {
|
case IPType::V6: {
|
||||||
formatted_addr = inet_ntop(AF_INET6, &v6, buf, sizeof(buf));
|
formatted_addr = inet_ntop(AF_INET6, &v6, buf, sizeof(buf));
|
||||||
dbgAssert(formatted_addr == buf) << alert("conversion error") << "Failed to convert an IPv6 address";
|
dbgAssertOpt(formatted_addr == buf) << alert("conversion error") << "Failed to convert an IPv6 address";
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case IPType::UNINITIALIZED: {
|
case IPType::UNINITIALIZED: {
|
||||||
@@ -116,7 +116,7 @@ ConnKey::reverse()
|
|||||||
size_t
|
size_t
|
||||||
ConnKey::hash() const
|
ConnKey::hash() const
|
||||||
{
|
{
|
||||||
dbgAssert(src.type != IPType::UNINITIALIZED)
|
dbgAssertOpt(src.type != IPType::UNINITIALIZED)
|
||||||
<< alert("hashing")
|
<< alert("hashing")
|
||||||
<< "ConnKey::hash was called on an uninitialized object";
|
<< "ConnKey::hash was called on an uninitialized object";
|
||||||
size_t seed = 0;
|
size_t seed = 0;
|
||||||
|
|||||||
@@ -27,6 +27,7 @@
|
|||||||
#include "i_instance_awareness.h"
|
#include "i_instance_awareness.h"
|
||||||
#include "i_signal_handler.h"
|
#include "i_signal_handler.h"
|
||||||
#include "hash_combine.h"
|
#include "hash_combine.h"
|
||||||
|
#include "version.h"
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
@@ -298,14 +299,19 @@ AlertInfo::evalParams()
|
|||||||
Debug::Debug(
|
Debug::Debug(
|
||||||
const string &file_name,
|
const string &file_name,
|
||||||
const string &func_name,
|
const string &func_name,
|
||||||
const uint &line)
|
const uint &line,
|
||||||
|
bool force_assert)
|
||||||
{
|
{
|
||||||
if (Singleton::exists<Config::I_Config>()) {
|
if (!force_assert && !should_assert_optional) {
|
||||||
do_assert = getConfigurationWithDefault<bool>(true, "Debug I/S", "Abort on assertion");
|
do_assert = false;
|
||||||
} else {
|
} else {
|
||||||
do_assert = true;
|
do_assert = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (Singleton::exists<Config::I_Config>()) {
|
||||||
|
do_assert = getConfigurationWithDefault<bool>(do_assert, "Debug I/S", "Abort on assertion");
|
||||||
|
}
|
||||||
|
|
||||||
auto current_configuration =
|
auto current_configuration =
|
||||||
Singleton::exists<Config::I_Config>() ? getConfigurationWithDefault(default_config, "Debug") : default_config;
|
Singleton::exists<Config::I_Config>() ? getConfigurationWithDefault(default_config, "Debug") : default_config;
|
||||||
|
|
||||||
@@ -519,6 +525,13 @@ Debug::preload()
|
|||||||
|
|
||||||
active_streams["STDOUT"] = make_shared<Debug::DebugStream>(&cout);
|
active_streams["STDOUT"] = make_shared<Debug::DebugStream>(&cout);
|
||||||
active_streams["FOG"] = make_shared<DebugFogStream>();
|
active_streams["FOG"] = make_shared<DebugFogStream>();
|
||||||
|
|
||||||
|
string branch = Version::getBranch();
|
||||||
|
if (branch == "open-source" || branch == "master" || branch.substr(0, 6) == "hotfix") {
|
||||||
|
should_assert_optional = false;
|
||||||
|
} else {
|
||||||
|
should_assert_optional = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -844,3 +857,4 @@ bool Debug::is_fail_open_mode = false;
|
|||||||
bool Debug::debug_override_exist = false;
|
bool Debug::debug_override_exist = false;
|
||||||
string Debug::default_debug_file_stream_path = "";
|
string Debug::default_debug_file_stream_path = "";
|
||||||
vector<string> Debug::streams_from_mgmt;
|
vector<string> Debug::streams_from_mgmt;
|
||||||
|
bool Debug::should_assert_optional = true;
|
||||||
|
|||||||
@@ -396,14 +396,18 @@ LogLevel
|
|||||||
DebugFogStream::getLogLevel() const
|
DebugFogStream::getLogLevel() const
|
||||||
{
|
{
|
||||||
switch (level) {
|
switch (level) {
|
||||||
case Debug::DebugLevel::NOISE: dbgAssert(false) << alert << "Impossible LogLevel 'Noise'"; break;
|
case Debug::DebugLevel::NOISE:
|
||||||
|
dbgAssertOpt(false) << alert << "Impossible LogLevel 'Noise'";
|
||||||
|
return LogLevel::TRACE;
|
||||||
case Debug::DebugLevel::TRACE: return LogLevel::TRACE;
|
case Debug::DebugLevel::TRACE: return LogLevel::TRACE;
|
||||||
case Debug::DebugLevel::DEBUG: return LogLevel::DEBUG;
|
case Debug::DebugLevel::DEBUG: return LogLevel::DEBUG;
|
||||||
case Debug::DebugLevel::WARNING: return LogLevel::WARNING;
|
case Debug::DebugLevel::WARNING: return LogLevel::WARNING;
|
||||||
case Debug::DebugLevel::INFO: return LogLevel::INFO;
|
case Debug::DebugLevel::INFO: return LogLevel::INFO;
|
||||||
case Debug::DebugLevel::ERROR: return LogLevel::ERROR;
|
case Debug::DebugLevel::ERROR: return LogLevel::ERROR;
|
||||||
case Debug::DebugLevel::ASSERTION: return LogLevel::ERROR;
|
case Debug::DebugLevel::ASSERTION: return LogLevel::ERROR;
|
||||||
case Debug::DebugLevel::NONE: dbgAssert(false) << alert << "Impossible LogLevel 'None'"; break;
|
case Debug::DebugLevel::NONE:
|
||||||
|
dbgAssertOpt(false) << alert << "Impossible LogLevel 'None'";
|
||||||
|
return LogLevel::ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
return LogLevel::INFO;
|
return LogLevel::INFO;
|
||||||
|
|||||||
@@ -15,18 +15,32 @@
|
|||||||
|
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
#include "debug.h"
|
#include "debug.h"
|
||||||
#include "orchestration_tools.h"
|
|
||||||
|
#include <sys/stat.h>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
USE_DEBUG_FLAG(D_LOCAL_POLICY);
|
USE_DEBUG_FLAG(D_LOCAL_POLICY);
|
||||||
|
|
||||||
static const string k8s_service_account = "/var/run/secrets/kubernetes.io/serviceaccount";
|
static const string k8s_service_account = "/var/run/secrets/kubernetes.io/serviceaccount";
|
||||||
// LCOV_EXCL_START Reason: can't use on the pipline environment
|
|
||||||
EnvDetails::EnvDetails() : env_type(EnvType::LINUX)
|
static bool
|
||||||
|
checkExistence(const string &path, bool is_dir)
|
||||||
{
|
{
|
||||||
auto tools = Singleton::Consume<I_OrchestrationTools>::from<OrchestrationTools>();
|
try {
|
||||||
if (tools->doesFileExist("/.dockerenv")) env_type = EnvType::DOCKER;
|
struct stat info;
|
||||||
|
if (stat(path.c_str(), &info) != 0) return false;
|
||||||
|
int flag = is_dir ? S_IFDIR : S_IFREG;
|
||||||
|
return info.st_mode & flag;
|
||||||
|
} catch (exception &e) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LCOV_EXCL_START Reason: can't use on the pipline environment
|
||||||
|
EnvDetails::EnvDetails() : Component("EnvDetails")
|
||||||
|
{
|
||||||
|
if (doesFileExist("/.dockerenv")) env_type = EnvType::DOCKER;
|
||||||
token = retrieveToken();
|
token = retrieveToken();
|
||||||
agent_namespace = retrieveNamespace();
|
agent_namespace = retrieveNamespace();
|
||||||
if (!token.empty()) {
|
if (!token.empty()) {
|
||||||
@@ -82,4 +96,10 @@ EnvDetails::readFileContent(const string &file_path)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
EnvDetails::doesFileExist(const string &file_path) const
|
||||||
|
{
|
||||||
|
return checkExistence(file_path, false);
|
||||||
|
}
|
||||||
|
|
||||||
// LCOV_EXCL_STOP
|
// LCOV_EXCL_STOP
|
||||||
@@ -60,10 +60,11 @@ Context::convertToString(MetaDataType type)
|
|||||||
case MetaDataType::Direction: return "direction";
|
case MetaDataType::Direction: return "direction";
|
||||||
case MetaDataType::Email: return "email";
|
case MetaDataType::Email: return "email";
|
||||||
case MetaDataType::COUNT:
|
case MetaDataType::COUNT:
|
||||||
dbgAssert(false) << alert << "COUNT is not a valid meta data type";
|
dbgAssertOpt(false) << alert << "COUNT is not a valid meta data type";
|
||||||
|
return "invalid_count";
|
||||||
}
|
}
|
||||||
dbgAssert(false) << alert << "Reached impossible case with type=" << static_cast<int>(type);
|
dbgAssertOpt(false) << alert << "Reached impossible case with type=" << static_cast<int>(type);
|
||||||
return "";
|
return "invalid_metadata_type";
|
||||||
}
|
}
|
||||||
|
|
||||||
map<string, uint64_t>
|
map<string, uint64_t>
|
||||||
|
|||||||
@@ -97,8 +97,8 @@ Span::convertSpanContextTypeToString(ContextType type)
|
|||||||
return "Follows from";
|
return "Follows from";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dbgAssert(false) << AlertInfo(AlertTeam::CORE, "tracing") << "Span context not supported";
|
dbgAssertOpt(false) << AlertInfo(AlertTeam::CORE, "tracing") << "Span context not supported";
|
||||||
return string();
|
return "Invalid context type";
|
||||||
}
|
}
|
||||||
|
|
||||||
SpanWrapper::SpanWrapper(string _trace_id, Span::ContextType _type, string _prev_span)
|
SpanWrapper::SpanWrapper(string _trace_id, Span::ContextType _type, string _prev_span)
|
||||||
|
|||||||
@@ -57,6 +57,7 @@ unsigned int getResBodyThreadTimeout();
|
|||||||
unsigned int getMinRetriesForVerdict();
|
unsigned int getMinRetriesForVerdict();
|
||||||
unsigned int getMaxRetriesForVerdict();
|
unsigned int getMaxRetriesForVerdict();
|
||||||
unsigned int getReqBodySizeTrigger();
|
unsigned int getReqBodySizeTrigger();
|
||||||
|
unsigned int getRemoveResServerHeader();
|
||||||
|
|
||||||
unsigned int getWaitingForVerdictThreadTimeout();
|
unsigned int getWaitingForVerdictThreadTimeout();
|
||||||
|
|
||||||
|
|||||||
@@ -159,7 +159,8 @@ public:
|
|||||||
Debug(
|
Debug(
|
||||||
const std::string &file_name,
|
const std::string &file_name,
|
||||||
const std::string &func_name,
|
const std::string &func_name,
|
||||||
const uint &line
|
const uint &line,
|
||||||
|
bool force_assert
|
||||||
);
|
);
|
||||||
|
|
||||||
Debug(
|
Debug(
|
||||||
@@ -273,6 +274,7 @@ private:
|
|||||||
static bool debug_override_exist;
|
static bool debug_override_exist;
|
||||||
static std::string default_debug_file_stream_path;
|
static std::string default_debug_file_stream_path;
|
||||||
static std::vector<std::string> streams_from_mgmt;
|
static std::vector<std::string> streams_from_mgmt;
|
||||||
|
static bool should_assert_optional;
|
||||||
|
|
||||||
bool do_assert;
|
bool do_assert;
|
||||||
bool is_communication = false;
|
bool is_communication = false;
|
||||||
@@ -328,7 +330,11 @@ getBaseName(const char *iter, const char *base)
|
|||||||
|
|
||||||
#define dbgAssert(cond) \
|
#define dbgAssert(cond) \
|
||||||
if (CP_LIKELY(cond)) { \
|
if (CP_LIKELY(cond)) { \
|
||||||
} else Debug::DebugAlert(__FILENAME__, __FUNCTION__, __LINE__).getStreamAggr()
|
} else Debug::DebugAlert(__FILENAME__, __FUNCTION__, __LINE__, true).getStreamAggr()
|
||||||
|
|
||||||
|
#define dbgAssertOpt(cond) \
|
||||||
|
if (CP_LIKELY(cond)) { \
|
||||||
|
} else Debug::DebugAlert(__FILENAME__, __FUNCTION__, __LINE__, false).getStreamAggr()
|
||||||
|
|
||||||
// Macros to allow simple debug messaging
|
// Macros to allow simple debug messaging
|
||||||
#define DBG_GENERIC(level, ...) \
|
#define DBG_GENERIC(level, ...) \
|
||||||
|
|||||||
@@ -49,6 +49,10 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
virtual bool addGetCall(const std::string &uri, const std::function<std::string()> &callback) = 0;
|
virtual bool addGetCall(const std::string &uri, const std::function<std::string()> &callback) = 0;
|
||||||
|
virtual bool addWildcardGetCall(
|
||||||
|
const std::string &uri,
|
||||||
|
const std::function<std::string(const std::string &)> &callback
|
||||||
|
) = 0;
|
||||||
|
|
||||||
virtual uint16_t getListeningPort() const = 0;
|
virtual uint16_t getListeningPort() const = 0;
|
||||||
|
|
||||||
|
|||||||
@@ -228,6 +228,18 @@ public:
|
|||||||
return sni_host_name;
|
return sni_host_name;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
setDnHostName(const std::string &_dn_host_name)
|
||||||
|
{
|
||||||
|
dn_host_name = _dn_host_name;
|
||||||
|
}
|
||||||
|
|
||||||
|
Maybe<std::string>
|
||||||
|
getDnHostName() const
|
||||||
|
{
|
||||||
|
return dn_host_name;
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
setRateLimitBlock(uint block_time)
|
setRateLimitBlock(uint block_time)
|
||||||
{
|
{
|
||||||
@@ -273,6 +285,7 @@ public:
|
|||||||
private:
|
private:
|
||||||
std::string host_name = "";
|
std::string host_name = "";
|
||||||
Maybe<std::string> sni_host_name = genError("SNI host name not set");
|
Maybe<std::string> sni_host_name = genError("SNI host name not set");
|
||||||
|
Maybe<std::string> dn_host_name = genError("DN host name not set");
|
||||||
std::string ca_path = "";
|
std::string ca_path = "";
|
||||||
std::string client_cert_path = "";
|
std::string client_cert_path = "";
|
||||||
std::string client_key_path = "";
|
std::string client_key_path = "";
|
||||||
|
|||||||
@@ -10,6 +10,11 @@ class MockRestApi : public Singleton::Provide<I_RestApi>::From<MockProvider<I_Re
|
|||||||
public:
|
public:
|
||||||
MOCK_CONST_METHOD0(getListeningPort, uint16_t());
|
MOCK_CONST_METHOD0(getListeningPort, uint16_t());
|
||||||
MOCK_METHOD2(addGetCall, bool(const std::string &, const std::function<std::string()> &));
|
MOCK_METHOD2(addGetCall, bool(const std::string &, const std::function<std::string()> &));
|
||||||
|
MOCK_METHOD2(
|
||||||
|
addWildcardGetCall,
|
||||||
|
bool(const std::string &, const std::function<std::string(const std::string &)> &)
|
||||||
|
);
|
||||||
|
|
||||||
// You can't mock a function with an R-value reference. So mock a slightly different one
|
// You can't mock a function with an R-value reference. So mock a slightly different one
|
||||||
MOCK_METHOD3(mockRestCall, bool(RestAction, const std::string &, const std::unique_ptr<RestInit> &));
|
MOCK_METHOD3(mockRestCall, bool(RestAction, const std::string &, const std::unique_ptr<RestInit> &));
|
||||||
|
|
||||||
|
|||||||
@@ -47,6 +47,7 @@
|
|||||||
#include "buffer.h"
|
#include "buffer.h"
|
||||||
#include "intelligence_comp_v2.h"
|
#include "intelligence_comp_v2.h"
|
||||||
#include "messaging.h"
|
#include "messaging.h"
|
||||||
|
#include "env_details.h"
|
||||||
|
|
||||||
USE_DEBUG_FLAG(D_COMP_IS);
|
USE_DEBUG_FLAG(D_COMP_IS);
|
||||||
|
|
||||||
@@ -233,6 +234,7 @@ class ComponentListCore
|
|||||||
MemoryCalculator,
|
MemoryCalculator,
|
||||||
TenantManager,
|
TenantManager,
|
||||||
GenericRulebase,
|
GenericRulebase,
|
||||||
|
EnvDetails,
|
||||||
Components...
|
Components...
|
||||||
>
|
>
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -114,6 +114,7 @@ DEFINE_FLAG(D_COMPONENT, D_ALL)
|
|||||||
DEFINE_FLAG(D_FILE_UPLOAD, D_COMPONENT)
|
DEFINE_FLAG(D_FILE_UPLOAD, D_COMPONENT)
|
||||||
DEFINE_FLAG(D_RATE_LIMIT, D_COMPONENT)
|
DEFINE_FLAG(D_RATE_LIMIT, D_COMPONENT)
|
||||||
DEFINE_FLAG(D_ROLLBACK_TESTING, D_COMPONENT)
|
DEFINE_FLAG(D_ROLLBACK_TESTING, D_COMPONENT)
|
||||||
|
DEFINE_FLAG(D_NGINX_MANAGER, D_COMPONENT)
|
||||||
|
|
||||||
DEFINE_FLAG(D_PARSER, D_COMPONENT)
|
DEFINE_FLAG(D_PARSER, D_COMPONENT)
|
||||||
DEFINE_FLAG(D_WS, D_COMPONENT)
|
DEFINE_FLAG(D_WS, D_COMPONENT)
|
||||||
|
|||||||
@@ -21,8 +21,12 @@
|
|||||||
#include "i_env_details.h"
|
#include "i_env_details.h"
|
||||||
#include "singleton.h"
|
#include "singleton.h"
|
||||||
#include "debug.h"
|
#include "debug.h"
|
||||||
|
#include "component.h"
|
||||||
|
|
||||||
class EnvDetails : Singleton::Provide<I_EnvDetails>::SelfInterface
|
class EnvDetails
|
||||||
|
:
|
||||||
|
public Component,
|
||||||
|
Singleton::Provide<I_EnvDetails>::SelfInterface
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
EnvDetails();
|
EnvDetails();
|
||||||
@@ -35,10 +39,11 @@ private:
|
|||||||
std::string retrieveToken();
|
std::string retrieveToken();
|
||||||
std::string retrieveNamespace();
|
std::string retrieveNamespace();
|
||||||
std::string readFileContent(const std::string &file_path);
|
std::string readFileContent(const std::string &file_path);
|
||||||
|
bool doesFileExist(const std::string &file_path) const;
|
||||||
|
|
||||||
std::string token;
|
std::string token;
|
||||||
std::string agent_namespace;
|
std::string agent_namespace;
|
||||||
EnvType env_type;
|
EnvType env_type = EnvType::LINUX;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // __ENV_DETAILS_H__
|
#endif // __ENV_DETAILS_H__
|
||||||
@@ -162,7 +162,7 @@ class LogField : Singleton::Consume<I_Environment>
|
|||||||
void
|
void
|
||||||
addFields(const LogField &)
|
addFields(const LogField &)
|
||||||
{
|
{
|
||||||
dbgAssert(false)
|
dbgAssertOpt(false)
|
||||||
<< AlertInfo(AlertTeam::CORE, "report i/s")
|
<< AlertInfo(AlertTeam::CORE, "report i/s")
|
||||||
<< "Trying to add a log field to a 'type'ed field";
|
<< "Trying to add a log field to a 'type'ed field";
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -26,7 +26,10 @@ public:
|
|||||||
void
|
void
|
||||||
setBulkSize(uint size)
|
setBulkSize(uint size)
|
||||||
{
|
{
|
||||||
dbgAssert(size > 0) << AlertInfo(AlertTeam::CORE, "report i/s") << "Bulk size must be larger than 0";
|
if (size <= 0) {
|
||||||
|
dbgAssertOpt(size > 0) << AlertInfo(AlertTeam::CORE, "report i/s") << "Bulk size must be larger than 0";
|
||||||
|
size = 100;
|
||||||
|
}
|
||||||
dbgDebug(D_REPORT_BULK) << "Bulk size is set to " << size;
|
dbgDebug(D_REPORT_BULK) << "Bulk size is set to " << size;
|
||||||
bulk_size = size;
|
bulk_size = size;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ bool makeDir(const std::string &path, mode_t permission = S_IRWXU);
|
|||||||
bool makeDirRecursive(const std::string &path, mode_t permission = S_IRWXU);
|
bool makeDirRecursive(const std::string &path, mode_t permission = S_IRWXU);
|
||||||
bool deleteDirectory(const std::string &path, bool delete_content = false);
|
bool deleteDirectory(const std::string &path, bool delete_content = false);
|
||||||
bool touchFile(const std::string &path);
|
bool touchFile(const std::string &path);
|
||||||
|
std::string resolveFullPath(const std::string &input_path);
|
||||||
|
|
||||||
bool
|
bool
|
||||||
copyFile(
|
copyFile(
|
||||||
@@ -43,11 +44,8 @@ copyFile(
|
|||||||
);
|
);
|
||||||
|
|
||||||
bool deleteFile(const std::string &path);
|
bool deleteFile(const std::string &path);
|
||||||
|
|
||||||
std::string convertToHumanReadable(uint64_t size_in_bytes);
|
std::string convertToHumanReadable(uint64_t size_in_bytes);
|
||||||
|
|
||||||
std::string getFileName(const std::string &path);
|
std::string getFileName(const std::string &path);
|
||||||
|
|
||||||
bool copyDirectory(const std::string &src_dir_path, const std::string &dst_dir_path);
|
bool copyDirectory(const std::string &src_dir_path, const std::string &dst_dir_path);
|
||||||
|
|
||||||
}// namespace Filesystem
|
}// namespace Filesystem
|
||||||
@@ -85,6 +83,8 @@ namespace Strings
|
|||||||
{
|
{
|
||||||
|
|
||||||
std::string removeTrailingWhitespaces(std::string str);
|
std::string removeTrailingWhitespaces(std::string str);
|
||||||
|
std::string removeLeadingWhitespaces(std::string str);
|
||||||
|
std::string trim(std::string str);
|
||||||
|
|
||||||
} // namespace Strings
|
} // namespace Strings
|
||||||
|
|
||||||
|
|||||||
@@ -87,9 +87,12 @@ public:
|
|||||||
bool
|
bool
|
||||||
operator==(const IPAddr &other) const
|
operator==(const IPAddr &other) const
|
||||||
{
|
{
|
||||||
dbgAssert(type!=IPType::UNINITIALIZED && other.type!=IPType::UNINITIALIZED)
|
if (type == IPType::UNINITIALIZED || other.type == IPType::UNINITIALIZED) {
|
||||||
<< AlertInfo(AlertTeam::CORE, "connkey")
|
dbgAssertOpt(type!=IPType::UNINITIALIZED && other.type!=IPType::UNINITIALIZED)
|
||||||
<< "Called on an uninitialized IPType object";
|
<< AlertInfo(AlertTeam::CORE, "connkey")
|
||||||
|
<< "Called on an uninitialized IPType object";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
// Always compairing as if IPv6, in case of Ipv4 the rest of the address is zeroed out.
|
// Always compairing as if IPv6, in case of Ipv4 the rest of the address is zeroed out.
|
||||||
int ip_len = (other.type == IPType::V4) ? sizeof(v4.s_addr) : sizeof(v6.s6_addr);
|
int ip_len = (other.type == IPType::V4) ? sizeof(v4.s_addr) : sizeof(v6.s6_addr);
|
||||||
return (type == other.type) && (memcmp(v6.s6_addr, other.v6.s6_addr, ip_len) == 0);
|
return (type == other.type) && (memcmp(v6.s6_addr, other.v6.s6_addr, ip_len) == 0);
|
||||||
@@ -308,9 +311,12 @@ public:
|
|||||||
IPType
|
IPType
|
||||||
getType() const
|
getType() const
|
||||||
{
|
{
|
||||||
dbgAssert(src.type == dst.type)
|
if (src.type != dst.type) {
|
||||||
<< AlertInfo(AlertTeam::CORE, "connkey")
|
dbgAssertOpt(src.type == dst.type)
|
||||||
<< "Mismatch in connection types (Src and Dst types are not identical)";
|
<< AlertInfo(AlertTeam::CORE, "connkey")
|
||||||
|
<< "Mismatch in connection types (Src and Dst types are not identical)";
|
||||||
|
return IPType::V6;
|
||||||
|
}
|
||||||
return src.type;
|
return src.type;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -18,12 +18,14 @@
|
|||||||
#include "i_mainloop.h"
|
#include "i_mainloop.h"
|
||||||
#include "singleton.h"
|
#include "singleton.h"
|
||||||
#include "component.h"
|
#include "component.h"
|
||||||
|
#include "i_time_get.h"
|
||||||
|
|
||||||
class SocketIS
|
class SocketIS
|
||||||
:
|
:
|
||||||
public Component,
|
public Component,
|
||||||
Singleton::Provide<I_Socket>,
|
Singleton::Provide<I_Socket>,
|
||||||
Singleton::Consume<I_MainLoop>
|
Singleton::Consume<I_MainLoop>,
|
||||||
|
Singleton::Consume<I_TimeGet>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
SocketIS();
|
SocketIS();
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
add_library(logging logging.cc log_generator.cc debug_stream.cc file_stream.cc fog_stream.cc syslog_stream.cc cef_stream.cc k8s_svc_stream.cc)
|
add_library(logging logging.cc log_generator.cc debug_stream.cc file_stream.cc fog_stream.cc syslog_stream.cc cef_stream.cc k8s_svc_stream.cc log_connector.cc)
|
||||||
|
|
||||||
add_subdirectory(logging_ut)
|
add_subdirectory(logging_ut)
|
||||||
|
|||||||
@@ -24,22 +24,21 @@ USE_DEBUG_FLAG(D_REPORT);
|
|||||||
static string lookup_cmd = "nslookup ";
|
static string lookup_cmd = "nslookup ";
|
||||||
static string line_selection_cmd = "| grep Address | sed -n 2p";
|
static string line_selection_cmd = "| grep Address | sed -n 2p";
|
||||||
static string parsing_cmd = "| cut -f2 -d' ' | tr -d '\n'";
|
static string parsing_cmd = "| cut -f2 -d' ' | tr -d '\n'";
|
||||||
|
static string CEF_NAME = "CEF";
|
||||||
|
|
||||||
CefStream::CefStream(const string &_address, int _port, I_Socket::SocketType _protocol)
|
CefStream::CefStream(const string &_address, int _port, I_Socket::SocketType _protocol)
|
||||||
:
|
:
|
||||||
i_socket(Singleton::Consume<I_Socket>::by<LoggingComp>()),
|
LogStreamConnector(_address, _port, _protocol, CEF_NAME)
|
||||||
address(_address),
|
|
||||||
port(_port),
|
|
||||||
protocol(_protocol)
|
|
||||||
{
|
{
|
||||||
connect();
|
init();
|
||||||
if (!socket.ok()) {
|
socket = genError("Not set yet");
|
||||||
dbgWarning(D_REPORT) << "Failed to connect to the CEF server";
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
CefStream::~CefStream()
|
CefStream::~CefStream()
|
||||||
{
|
{
|
||||||
|
sendAllLogs();
|
||||||
|
if (mainloop != nullptr && mainloop->doesRoutineExist(connecting_routine)) mainloop->stop(connecting_routine);
|
||||||
|
|
||||||
if (socket.ok()) {
|
if (socket.ok()) {
|
||||||
i_socket->closeSocket(const_cast<int &>(*socket));
|
i_socket->closeSocket(const_cast<int &>(*socket));
|
||||||
socket = genError("Closed socket");
|
socket = genError("Closed socket");
|
||||||
@@ -49,50 +48,67 @@ CefStream::~CefStream()
|
|||||||
void
|
void
|
||||||
CefStream::sendLog(const Report &log)
|
CefStream::sendLog(const Report &log)
|
||||||
{
|
{
|
||||||
if (!socket.ok()) {
|
|
||||||
connect();
|
|
||||||
if (!socket.ok()) {
|
|
||||||
dbgWarning(D_REPORT) << "Failed to connect to the CEF server, log will not be sent.";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dbgTrace(D_REPORT) << "Connected to socket.";
|
|
||||||
string cef_report = log.getCef();
|
string cef_report = log.getCef();
|
||||||
if (protocol == I_Socket::SocketType::TCP) {
|
if (protocol == I_Socket::SocketType::TCP) {
|
||||||
cef_report = to_string(cef_report.length()) + " " + cef_report;
|
cef_report = to_string(cef_report.length()) + " " + cef_report;
|
||||||
}
|
}
|
||||||
vector<char> data(cef_report.begin(), cef_report.end());
|
vector<char> data(cef_report.begin(), cef_report.end());
|
||||||
for (size_t tries = 0; tries < 3; tries++) {
|
sendLogWithQueue(data);
|
||||||
if (i_socket->writeData(socket.unpack(), data)) {
|
}
|
||||||
dbgTrace(D_REPORT) << "log was sent to CEF server";
|
|
||||||
return;
|
void
|
||||||
} else {
|
CefStream::init() {
|
||||||
dbgWarning(D_REPORT) << "Failed to send log to CEF server";
|
updateSettings();
|
||||||
}
|
mainloop->addOneTimeRoutine(
|
||||||
}
|
I_MainLoop::RoutineType::Offline,
|
||||||
|
[this] ()
|
||||||
|
{
|
||||||
|
dbgTrace(D_REPORT) << FIRST_CEF_CONNECT_NAME;
|
||||||
|
},
|
||||||
|
FIRST_CEF_CONNECT_NAME
|
||||||
|
);
|
||||||
|
|
||||||
|
auto ceflog_retry_interval = getProfileAgentSettingWithDefault<uint>(
|
||||||
|
RETRY_CONNECT_INTERVAL,
|
||||||
|
"agent.config.log.cefServer.connect_retry_interval");
|
||||||
|
dbgTrace(D_REPORT) << "retry interval: " << ceflog_retry_interval;
|
||||||
|
chrono::seconds connect_retry_interval = chrono::seconds(ceflog_retry_interval);
|
||||||
|
connecting_routine = mainloop->addRecurringRoutine(
|
||||||
|
I_MainLoop::RoutineType::Offline,
|
||||||
|
connect_retry_interval,
|
||||||
|
[this] ()
|
||||||
|
{
|
||||||
|
dbgTrace(D_REPORT) << CEF_CONNECT_NAME;
|
||||||
|
maintainConnection();
|
||||||
|
},
|
||||||
|
CEF_CONNECT_NAME
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
CefStream::connect()
|
CefStream::connect()
|
||||||
{
|
{
|
||||||
auto cef_address = getProfileAgentSettingWithDefault<string>(address, "agent.config.log.cefServer.IP");
|
dbgDebug(D_REPORT)
|
||||||
auto cef_port = getProfileAgentSettingWithDefault<uint>(port, "agent.config.log.cefServer.port");
|
<< "Connecting to CEF server"
|
||||||
|
<< " Address: "
|
||||||
if (cef_address.empty()) {
|
<< address
|
||||||
|
<< " Port: "
|
||||||
|
<< port;
|
||||||
|
if (address.empty()) {
|
||||||
dbgWarning(D_REPORT) << "Cannot connect to CEF server, IP/Domain is not configured.";
|
dbgWarning(D_REPORT) << "Cannot connect to CEF server, IP/Domain is not configured.";
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct in_addr addr;
|
struct in_addr addr;
|
||||||
if (inet_pton(AF_INET, cef_address.data(), &addr) != 1) {
|
if (inet_pton(AF_INET, address.data(), &addr) != 1) {
|
||||||
I_ShellCmd *shell_cmd = Singleton::Consume<I_ShellCmd>::by<LoggingComp>();
|
I_ShellCmd *shell_cmd = Singleton::Consume<I_ShellCmd>::by<LoggingComp>();
|
||||||
string host_cmd = lookup_cmd + cef_address + line_selection_cmd + parsing_cmd;
|
string host_cmd = lookup_cmd + address + line_selection_cmd + parsing_cmd;
|
||||||
Maybe<string> res = shell_cmd->getExecOutput(host_cmd, 500);
|
Maybe<string> res = shell_cmd->getExecOutput(host_cmd, 500);
|
||||||
if (!res.ok()) {
|
if (!res.ok()) {
|
||||||
dbgWarning(D_REPORT)
|
dbgWarning(D_REPORT)
|
||||||
<< "Failed to execute domain lookup command. "
|
<< "Failed to execute domain lookup command. "
|
||||||
<< "CEF Domain: "
|
<< "CEF Domain: "
|
||||||
<< cef_address
|
<< address
|
||||||
<< "Error: "
|
<< "Error: "
|
||||||
<< res.getErr();
|
<< res.getErr();
|
||||||
return;
|
return;
|
||||||
@@ -102,7 +118,7 @@ CefStream::connect()
|
|||||||
dbgWarning(D_REPORT)
|
dbgWarning(D_REPORT)
|
||||||
<< "Got en empty ip address from lookup command. "
|
<< "Got en empty ip address from lookup command. "
|
||||||
<< "CEF Domain: "
|
<< "CEF Domain: "
|
||||||
<< cef_address
|
<< address
|
||||||
<< "Got bad ip address: "
|
<< "Got bad ip address: "
|
||||||
<< res.unpack();
|
<< res.unpack();
|
||||||
return;
|
return;
|
||||||
@@ -113,19 +129,47 @@ CefStream::connect()
|
|||||||
dbgWarning(D_REPORT)
|
dbgWarning(D_REPORT)
|
||||||
<< "Got a faulty ip address from lookup command. "
|
<< "Got a faulty ip address from lookup command. "
|
||||||
<< "CEF Domain: "
|
<< "CEF Domain: "
|
||||||
<< cef_address
|
<< address
|
||||||
<< "Got bad ip address: "
|
<< "Got bad ip address: "
|
||||||
<< res.unpack();
|
<< res.unpack();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
cef_address = res.unpack();
|
address = res.unpack();
|
||||||
}
|
}
|
||||||
|
|
||||||
socket = i_socket->genSocket(
|
socket = i_socket->genSocket(
|
||||||
protocol,
|
protocol,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
cef_address + ":" + to_string(cef_port)
|
address + ":" + to_string(port)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
CefStream::updateSettings()
|
||||||
|
{
|
||||||
|
max_logs_per_send = getProfileAgentSettingWithDefault<int>(
|
||||||
|
NUMBER_OF_LOGS_PER_SEND,
|
||||||
|
"agent.config.log.cefServer.MaxLogsPerSend"
|
||||||
|
);
|
||||||
|
if (max_logs_per_send < 0) {
|
||||||
|
max_logs_per_send = NUMBER_OF_LOGS_PER_SEND;
|
||||||
|
}
|
||||||
|
address = getProfileAgentSettingWithDefault<string>(address, "agent.config.log.cefServer.IP");
|
||||||
|
port = getProfileAgentSettingWithDefault<uint>(port, "agent.config.log.cefServer.port");
|
||||||
|
max_data_in_queue = getProfileAgentSettingWithDefault<uint>(
|
||||||
|
MAX_LOG_QUEUE,
|
||||||
|
"agent.config.log.cefServer.MaxDataInQueue"
|
||||||
|
);
|
||||||
|
dbgTrace(D_REPORT)
|
||||||
|
<< "CEF server settings updated. "
|
||||||
|
<< "Address: "
|
||||||
|
<< address
|
||||||
|
<< " Port: "
|
||||||
|
<< port
|
||||||
|
<< " Max logs per send: "
|
||||||
|
<< max_logs_per_send
|
||||||
|
<< " Max data in queue: "
|
||||||
|
<< max_data_in_queue;
|
||||||
|
}
|
||||||
|
|||||||
131
core/logging/log_connector.cc
Executable file
131
core/logging/log_connector.cc
Executable file
@@ -0,0 +1,131 @@
|
|||||||
|
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||||
|
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "log_streams.h"
|
||||||
|
|
||||||
|
void
|
||||||
|
LogStreamConnector::maintainConnection()
|
||||||
|
{
|
||||||
|
dbgTrace(D_REPORT)
|
||||||
|
<< "Check if the connection is alive:"
|
||||||
|
<< (socket.ok() ? " socket ok" : " socket not ok")
|
||||||
|
<< (did_write_fail_in_this_window ? " previous write failed" : " previous write succeeded");
|
||||||
|
if (!socket.ok() || did_write_fail_in_this_window) {
|
||||||
|
dbgTrace(D_REPORT)
|
||||||
|
<< (socket.ok() ? "" : "The current socket is not ok, trying to connect.");
|
||||||
|
connect();
|
||||||
|
did_write_fail_in_this_window = false;
|
||||||
|
if (!socket.ok()) {
|
||||||
|
dbgWarning(D_REPORT) << "Failed to connect to the server, logs will not be sent";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
LogStreamConnector::addLogToQueue(const std::vector<char> &data)
|
||||||
|
{
|
||||||
|
if (logs_in_queue.size() < max_data_in_queue) {
|
||||||
|
dbgTrace(D_REPORT)
|
||||||
|
<< "Adding log to queue, Amount of logs in queue: "
|
||||||
|
<< logs_in_queue.size();
|
||||||
|
logs_in_queue.push_back(data);
|
||||||
|
} else {
|
||||||
|
dbgWarning(D_REPORT) << "Queue is full, dropping log";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
LogStreamConnector::writeFail()
|
||||||
|
{
|
||||||
|
if (!socket.ok()) {
|
||||||
|
dbgTrace(D_REPORT) << "Socket is not ok, stopping the connect after write failure";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
dbgTrace(D_REPORT) << (did_write_fail_in_this_window ? "Previous write failed" : "Previous write succeeded");
|
||||||
|
if (!did_write_fail_in_this_window) {
|
||||||
|
dbgTrace(D_REPORT)
|
||||||
|
<< "First time in window that write failed, trying to reconnect to server";
|
||||||
|
connect();
|
||||||
|
}
|
||||||
|
did_write_fail_in_this_window = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
LogStreamConnector::basicWriteLog(const std::vector<char> &data)
|
||||||
|
{
|
||||||
|
for (size_t tries = 0; tries < 3; tries++) {
|
||||||
|
if (socket.ok() && i_socket->writeData(socket.unpack(), data)) {
|
||||||
|
dbgTrace(D_REPORT) << "log was sent to server";
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
dbgTrace(D_REPORT) << "Failed to send log to server";
|
||||||
|
writeFail();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
LogStreamConnector::sendLogWithQueue(const std::vector<char> &data)
|
||||||
|
{
|
||||||
|
if (!socket.ok()) {
|
||||||
|
dbgTrace(D_REPORT)
|
||||||
|
<< "Socket not ok. Size of logs in queue: "
|
||||||
|
<< logs_in_queue.size()
|
||||||
|
<< ". Adding logs to the queue until the connection is established.";
|
||||||
|
addLogToQueue(data);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (logs_in_queue.empty() && basicWriteLog(data)) return;
|
||||||
|
|
||||||
|
addLogToQueue(data);
|
||||||
|
|
||||||
|
int write_iterations = 0;
|
||||||
|
|
||||||
|
while (write_iterations < max_logs_per_send && !logs_in_queue.empty()) {
|
||||||
|
dbgTrace(D_REPORT)
|
||||||
|
<< " Iteration: "
|
||||||
|
<< write_iterations
|
||||||
|
<< " to try and write a log from queue to server"
|
||||||
|
<< log_name;
|
||||||
|
int i = 0;
|
||||||
|
bool write_success = false;
|
||||||
|
while (
|
||||||
|
socket.ok() &&
|
||||||
|
(i < 3) &&
|
||||||
|
!(write_success = i_socket->writeData(socket.unpack(), logs_in_queue.front()))) {
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
if (write_success) {
|
||||||
|
dbgTrace(D_REPORT) << "log was written to " << log_name << " server";
|
||||||
|
logs_in_queue.erase(logs_in_queue.begin());
|
||||||
|
write_iterations++;
|
||||||
|
} else {
|
||||||
|
dbgTrace(D_REPORT) << "Failed to send log to " << log_name << " server";
|
||||||
|
writeFail();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
LogStreamConnector::sendAllLogs()
|
||||||
|
{
|
||||||
|
dbgTrace(D_REPORT) << "Sending all logs from queue to server";
|
||||||
|
for(auto &log : logs_in_queue) {
|
||||||
|
basicWriteLog(log);
|
||||||
|
}
|
||||||
|
logs_in_queue.clear();
|
||||||
|
}
|
||||||
@@ -23,6 +23,15 @@
|
|||||||
#include "logging_metric.h"
|
#include "logging_metric.h"
|
||||||
#include "i_logging.h"
|
#include "i_logging.h"
|
||||||
#include "i_socket_is.h"
|
#include "i_socket_is.h"
|
||||||
|
#include "logging_comp.h"
|
||||||
|
|
||||||
|
static const int RETRY_CONNECT_INTERVAL = 120;
|
||||||
|
static const std::string FIRST_SYSLOG_CONNECT_NAME = "first connecting to Syslog server";
|
||||||
|
static const std::string SYSLOG_CONNECT_NAME = "connecting to Syslog server";
|
||||||
|
static const std::string FIRST_CEF_CONNECT_NAME = "first connecting to CEF server";
|
||||||
|
static const std::string CEF_CONNECT_NAME = "connecting to CEF server";
|
||||||
|
static const int NUMBER_OF_LOGS_PER_SEND = 15;
|
||||||
|
static size_t MAX_LOG_QUEUE = 1000;
|
||||||
|
|
||||||
USE_DEBUG_FLAG(D_REPORT);
|
USE_DEBUG_FLAG(D_REPORT);
|
||||||
|
|
||||||
@@ -93,43 +102,77 @@ private:
|
|||||||
I_Messaging *i_msg = nullptr;
|
I_Messaging *i_msg = nullptr;
|
||||||
};
|
};
|
||||||
|
|
||||||
class SyslogStream : public Stream
|
class LogStreamConnector : public Stream
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
LogStreamConnector(
|
||||||
|
const std::string &_address,
|
||||||
|
int _port,
|
||||||
|
I_Socket::SocketType _protocol,
|
||||||
|
const std::string &_log_name) :
|
||||||
|
mainloop(Singleton::Consume<I_MainLoop>::by<LoggingComp>()),
|
||||||
|
i_socket(Singleton::Consume<I_Socket>::by<LoggingComp>()),
|
||||||
|
address(_address),
|
||||||
|
port(_port),
|
||||||
|
protocol(_protocol),
|
||||||
|
logs_in_queue(),
|
||||||
|
log_name(_log_name) {}
|
||||||
|
virtual ~LogStreamConnector() {}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
virtual void connect() = 0;
|
||||||
|
virtual void updateSettings() = 0;
|
||||||
|
|
||||||
|
void maintainConnection();
|
||||||
|
void addLogToQueue(const std::vector<char> &data);
|
||||||
|
void writeFail();
|
||||||
|
bool basicWriteLog(const std::vector<char> &data);
|
||||||
|
void sendLogWithQueue(const std::vector<char> &data);
|
||||||
|
void sendAllLogs();
|
||||||
|
|
||||||
|
I_MainLoop *mainloop = nullptr;
|
||||||
|
I_Socket *i_socket = nullptr;
|
||||||
|
std::string address;
|
||||||
|
int port;
|
||||||
|
I_Socket::SocketType protocol = I_Socket::SocketType::UDP;
|
||||||
|
Maybe<I_Socket::socketFd> socket = genError("Not set yet");
|
||||||
|
bool did_write_fail_in_this_window = false;
|
||||||
|
std::vector<std::vector<char>> logs_in_queue;
|
||||||
|
I_MainLoop::RoutineID connecting_routine = -1;
|
||||||
|
int max_logs_per_send = NUMBER_OF_LOGS_PER_SEND;
|
||||||
|
std::string log_name;
|
||||||
|
uint max_data_in_queue = MAX_LOG_QUEUE;
|
||||||
|
};
|
||||||
|
|
||||||
|
class SyslogStream : public LogStreamConnector
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
SyslogStream(const std::string &_address, int _port, I_Socket::SocketType protocol);
|
SyslogStream(const std::string &_address, int _port, I_Socket::SocketType protocol);
|
||||||
~SyslogStream();
|
~SyslogStream();
|
||||||
|
|
||||||
void sendLog(const Report &log) override;
|
void sendLog(const Report &log) override;
|
||||||
|
|
||||||
private:
|
protected:
|
||||||
void sendLog(const std::vector<char> &data);
|
void connect() override;
|
||||||
void connect();
|
void updateSettings() override;
|
||||||
|
|
||||||
I_Socket *i_socket = nullptr;
|
private:
|
||||||
I_MainLoop *mainloop = nullptr;
|
void init();
|
||||||
std::string address;
|
void sendLog(const std::vector<char> &data);
|
||||||
int port;
|
|
||||||
I_Socket::SocketType protocol = I_Socket::SocketType::UDP;
|
|
||||||
I_MainLoop::RoutineID log_send_routine = -1;
|
I_MainLoop::RoutineID log_send_routine = -1;
|
||||||
Maybe<I_Socket::socketFd> socket = genError("Not set yet");
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class CefStream : public Stream
|
class CefStream : public LogStreamConnector
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
CefStream(const std::string &_address, int _port, I_Socket::SocketType _protocol);
|
CefStream(const std::string &_address, int _port, I_Socket::SocketType _protocol);
|
||||||
~CefStream();
|
~CefStream();
|
||||||
|
|
||||||
void sendLog(const Report &log) override;
|
void sendLog(const Report &log) override;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
void connect() override;
|
||||||
|
void updateSettings() override;
|
||||||
private:
|
private:
|
||||||
void connect();
|
void init();
|
||||||
|
|
||||||
I_Socket *i_socket = nullptr;
|
|
||||||
std::string address;
|
|
||||||
int port;
|
|
||||||
I_Socket::SocketType protocol = I_Socket::SocketType::UDP;
|
|
||||||
Maybe<I_Socket::socketFd> socket = genError("Not set yet");
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // __LOG_STREAMS_H__
|
#endif // __LOG_STREAMS_H__
|
||||||
|
|||||||
@@ -171,6 +171,7 @@ public:
|
|||||||
} else {
|
} else {
|
||||||
LogEventLogsSent(true).notify();
|
LogEventLogsSent(true).notify();
|
||||||
for (auto &iter : streams) {
|
for (auto &iter : streams) {
|
||||||
|
dbgTrace(D_REPORT) << "Sending log to stream: " << TagAndEnumManagement::convertToString(iter.first);
|
||||||
if (log.isStreamActive(iter.first)) iter.second->sendLog(log);
|
if (log.isStreamActive(iter.first)) iter.second->sendLog(log);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,6 +24,7 @@
|
|||||||
#include "metric/all_metric_event.h"
|
#include "metric/all_metric_event.h"
|
||||||
#include "mock/mock_shell_cmd.h"
|
#include "mock/mock_shell_cmd.h"
|
||||||
#include "version.h"
|
#include "version.h"
|
||||||
|
#include "../log_streams.h"
|
||||||
|
|
||||||
using namespace testing;
|
using namespace testing;
|
||||||
using namespace std;
|
using namespace std;
|
||||||
@@ -104,6 +105,7 @@ public:
|
|||||||
class LogTest : public testing::Test
|
class LogTest : public testing::Test
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
const ::Report CreateReport(ReportIS::Tags &tag1, ReportIS::Tags &tag2);
|
||||||
LogTest()
|
LogTest()
|
||||||
:
|
:
|
||||||
agent_details(),
|
agent_details(),
|
||||||
@@ -132,6 +134,26 @@ public:
|
|||||||
DoAll(SaveArg<1>(&sysog_routine), Return(0))
|
DoAll(SaveArg<1>(&sysog_routine), Return(0))
|
||||||
);
|
);
|
||||||
|
|
||||||
|
EXPECT_CALL(
|
||||||
|
mock_mainloop,
|
||||||
|
addOneTimeRoutine(_, _, "first connecting to Syslog server", _)
|
||||||
|
).WillRepeatedly(DoAll(SaveArg<1>(&first_connect_syslog_routine), Return(0)));
|
||||||
|
|
||||||
|
EXPECT_CALL(
|
||||||
|
mock_mainloop,
|
||||||
|
addOneTimeRoutine(_, _, "first connecting to CEF server", _)
|
||||||
|
).WillRepeatedly(DoAll(SaveArg<1>(&first_connect_cef_routine), Return(0)));
|
||||||
|
|
||||||
|
EXPECT_CALL(
|
||||||
|
mock_mainloop,
|
||||||
|
addRecurringRoutine(_, _, _, "connecting to Syslog server", _)
|
||||||
|
).WillRepeatedly(DoAll(SaveArg<2>(&connect_syslog_routine), Return(2)));
|
||||||
|
|
||||||
|
EXPECT_CALL(
|
||||||
|
mock_mainloop,
|
||||||
|
addRecurringRoutine(_, _, _, "connecting to CEF server", _)
|
||||||
|
).WillRepeatedly(DoAll(SaveArg<2>(&connect_cef_routine), Return(3)));
|
||||||
|
|
||||||
EXPECT_CALL(mock_socket_is, writeData(1, _)).WillRepeatedly(
|
EXPECT_CALL(mock_socket_is, writeData(1, _)).WillRepeatedly(
|
||||||
WithArg<1>(
|
WithArg<1>(
|
||||||
Invoke(
|
Invoke(
|
||||||
@@ -291,6 +313,10 @@ public:
|
|||||||
ConfigComponent config;
|
ConfigComponent config;
|
||||||
vector<string> capture_syslog_cef_data;
|
vector<string> capture_syslog_cef_data;
|
||||||
I_MainLoop::Routine sysog_routine = nullptr;
|
I_MainLoop::Routine sysog_routine = nullptr;
|
||||||
|
I_MainLoop::Routine first_connect_syslog_routine = nullptr;
|
||||||
|
I_MainLoop::Routine first_connect_cef_routine = nullptr;
|
||||||
|
I_MainLoop::Routine connect_syslog_routine = nullptr;
|
||||||
|
I_MainLoop::Routine connect_cef_routine = nullptr;
|
||||||
StrictMock<MockShellCmd> mock_shell_cmd;
|
StrictMock<MockShellCmd> mock_shell_cmd;
|
||||||
bool is_domain;
|
bool is_domain;
|
||||||
|
|
||||||
@@ -1469,98 +1495,156 @@ TEST_F(LogTest, BulkModification)
|
|||||||
EXPECT_EQ(local_body, str1);
|
EXPECT_EQ(local_body, str1);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LogTest, ObfuscationTest)
|
const ::Report
|
||||||
|
LogTest::CreateReport(Tags &tag1, Tags &tag2) {
|
||||||
|
LogField origin("String", "Another string");
|
||||||
|
|
||||||
|
const ::Report report(
|
||||||
|
"String=\"Another string\"",
|
||||||
|
chrono::microseconds(90000),
|
||||||
|
Type::EVENT,
|
||||||
|
Level::LOG,
|
||||||
|
LogLevel::INFO,
|
||||||
|
Audience::INTERNAL,
|
||||||
|
AudienceTeam::AGENT_CORE,
|
||||||
|
Severity::INFO,
|
||||||
|
Priority::LOW,
|
||||||
|
chrono::seconds(3600),
|
||||||
|
origin,
|
||||||
|
tag1,
|
||||||
|
tag2,
|
||||||
|
Notification::POLICY_UPDATE,
|
||||||
|
IssuingEngine::AGENT_CORE
|
||||||
|
);
|
||||||
|
return report;
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(LogTest, ObfuscationCefSysLogTest)
|
||||||
{
|
{
|
||||||
loadFakeConfiguration(false);
|
loadFakeConfiguration(false);
|
||||||
Tags tag1 = Tags::POLICY_INSTALLATION;
|
Tags tag1 = Tags::POLICY_INSTALLATION;
|
||||||
Tags tag2 = Tags::ACCESS_CONTROL;
|
Tags tag2 = Tags::ACCESS_CONTROL;
|
||||||
|
std::string address = "172.28.1.6";
|
||||||
|
int port = 514;
|
||||||
|
I_Socket::SocketType protocol = I_Socket::SocketType::TCP;
|
||||||
|
// for cef
|
||||||
|
CefStream cef_stream(address, port, protocol);
|
||||||
|
ASSERT_NE(first_connect_cef_routine, nullptr);
|
||||||
|
first_connect_cef_routine();
|
||||||
|
ASSERT_NE(connect_cef_routine, nullptr);
|
||||||
|
connect_cef_routine();
|
||||||
|
cef_stream.sendLog(CreateReport(tag1, tag2));
|
||||||
|
EXPECT_EQ(capture_syslog_cef_data.size(), 1u);
|
||||||
|
// for syslog activate send log
|
||||||
|
SyslogStream syslog_stream(address, port, protocol);
|
||||||
|
|
||||||
static const string expected_obfuscated_log(
|
// connection to socket before send log
|
||||||
"{\n"
|
ASSERT_NE(first_connect_syslog_routine, nullptr);
|
||||||
" \"log\": {\n"
|
first_connect_syslog_routine();
|
||||||
" \"eventTime\": \"0:0:0\",\n"
|
ASSERT_NE(connect_syslog_routine, nullptr);
|
||||||
" \"eventName\": \"Install policy\",\n"
|
connect_syslog_routine();
|
||||||
" \"eventSeverity\": \"Info\",\n"
|
|
||||||
" \"eventPriority\": \"Low\",\n"
|
syslog_stream.sendLog(CreateReport(tag1, tag2)); // send log in routine sysog_routine
|
||||||
" \"eventType\": \"Event Driven\",\n"
|
|
||||||
" \"eventLevel\": \"Log\",\n"
|
|
||||||
" \"eventLogLevel\": \"info\",\n"
|
|
||||||
" \"eventAudience\": \"Internal\",\n"
|
|
||||||
" \"eventAudienceTeam\": \"\",\n"
|
|
||||||
" \"eventFrequency\": 0,\n"
|
|
||||||
" \"eventTags\": [\n"
|
|
||||||
" \"Access Control\",\n"
|
|
||||||
" \"Policy Installation\"\n"
|
|
||||||
" ],\n"
|
|
||||||
" \"eventSource\": {\n"
|
|
||||||
" \"agentId\": \"Unknown\",\n"
|
|
||||||
" \"eventTraceId\": \"\",\n"
|
|
||||||
" \"eventSpanId\": \"\",\n"
|
|
||||||
" \"issuingEngineVersion\": \"\",\n"
|
|
||||||
" \"serviceName\": \"Unnamed Nano Service\"\n"
|
|
||||||
" },\n"
|
|
||||||
" \"eventData\": {\n"
|
|
||||||
" \"logIndex\": 1,\n"
|
|
||||||
" \"String\": \"{XORANDB64}:mocked field\"\n"
|
|
||||||
" }\n"
|
|
||||||
" }\n"
|
|
||||||
"}"
|
|
||||||
);
|
|
||||||
StrictMock<MockEncryptor> mock_encrypt;
|
|
||||||
EXPECT_CALL(mock_encrypt, base64Encode(_)).WillOnce(Return("mocked field"));
|
|
||||||
|
|
||||||
static const string expected_clear_log(
|
|
||||||
"{\n"
|
|
||||||
" \"eventTime\": \"0:0:0\",\n"
|
|
||||||
" \"eventName\": \"Install policy\",\n"
|
|
||||||
" \"eventSeverity\": \"Info\",\n"
|
|
||||||
" \"eventPriority\": \"Low\",\n"
|
|
||||||
" \"eventType\": \"Event Driven\",\n"
|
|
||||||
" \"eventLevel\": \"Log\",\n"
|
|
||||||
" \"eventLogLevel\": \"info\",\n"
|
|
||||||
" \"eventAudience\": \"Internal\",\n"
|
|
||||||
" \"eventAudienceTeam\": \"\",\n"
|
|
||||||
" \"eventFrequency\": 0,\n"
|
|
||||||
" \"eventTags\": [\n"
|
|
||||||
" \"Access Control\",\n"
|
|
||||||
" \"Policy Installation\"\n"
|
|
||||||
" ],\n"
|
|
||||||
" \"eventSource\": {\n"
|
|
||||||
" \"agentId\": \"Unknown\",\n"
|
|
||||||
" \"eventTraceId\": \"\",\n"
|
|
||||||
" \"eventSpanId\": \"\",\n"
|
|
||||||
" \"issuingEngineVersion\": \"\",\n"
|
|
||||||
" \"serviceName\": \"Unnamed Nano Service\"\n"
|
|
||||||
" },\n"
|
|
||||||
" \"eventData\": {\n"
|
|
||||||
" \"logIndex\": 1,\n"
|
|
||||||
" \"String\": \"Another string\"\n"
|
|
||||||
" }\n"
|
|
||||||
"}"
|
|
||||||
);
|
|
||||||
|
|
||||||
{
|
|
||||||
LogGen log(
|
|
||||||
"Install policy",
|
|
||||||
Audience::INTERNAL,
|
|
||||||
Severity::INFO,
|
|
||||||
Priority::LOW,
|
|
||||||
tag1,
|
|
||||||
tag2,
|
|
||||||
Enreachments::BEAUTIFY_OUTPUT
|
|
||||||
);
|
|
||||||
log << LogField("String", "Another string", LogFieldOption::XORANDB64);
|
|
||||||
EXPECT_EQ(toJson(log), expected_clear_log);
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPECT_THAT(getMessages(), HasSubstr(expected_clear_log));
|
|
||||||
EXPECT_THAT(readLogFile(), HasSubstr(expected_clear_log));
|
|
||||||
EXPECT_EQ(getBodyFogMessage(), expected_obfuscated_log);
|
|
||||||
ASSERT_NE(sysog_routine, nullptr);
|
ASSERT_NE(sysog_routine, nullptr);
|
||||||
sysog_routine();
|
sysog_routine();
|
||||||
EXPECT_EQ(capture_syslog_cef_data.size(), 2u);
|
|
||||||
|
EXPECT_EQ(capture_syslog_cef_data.size(), 2u); // 1 for CEF 1 for Syslog
|
||||||
for (const string &str : capture_syslog_cef_data) {
|
for (const string &str : capture_syslog_cef_data) {
|
||||||
EXPECT_THAT(str, AnyOf(HasSubstr("String='Another string'"), HasSubstr("String=\"Another string\"")));
|
EXPECT_THAT(str, AnyOf(
|
||||||
|
HasSubstr("String='Another string'"),
|
||||||
|
HasSubstr(R"(String="Another string")"),
|
||||||
|
HasSubstr("String=\"Another string\"")));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(LogTest, SysLogWriteFailTest)
|
||||||
|
{
|
||||||
|
loadFakeConfiguration(false);
|
||||||
|
capture_syslog_cef_data.clear();
|
||||||
|
Tags tag1 = Tags::POLICY_INSTALLATION;
|
||||||
|
Tags tag2 = Tags::ACCESS_CONTROL;
|
||||||
|
|
||||||
|
// for syslog activate send log
|
||||||
|
std::string address = "172.28.1.6";
|
||||||
|
int port = 514;
|
||||||
|
I_Socket::SocketType protocol = I_Socket::SocketType::TCP;
|
||||||
|
SyslogStream syslog_stream(address, port, protocol);
|
||||||
|
|
||||||
|
ASSERT_NE(first_connect_syslog_routine, nullptr);
|
||||||
|
first_connect_syslog_routine();
|
||||||
|
ASSERT_NE(connect_syslog_routine, nullptr);
|
||||||
|
connect_syslog_routine();
|
||||||
|
|
||||||
|
EXPECT_CALL(mock_socket_is, writeData(1, _))
|
||||||
|
.WillOnce(Return(false))
|
||||||
|
.WillOnce(Return(false))
|
||||||
|
.WillOnce(Return(false))
|
||||||
|
.WillRepeatedly(
|
||||||
|
WithArg<1>(
|
||||||
|
Invoke(
|
||||||
|
[this](const vector<char> &data)
|
||||||
|
{
|
||||||
|
capture_syslog_cef_data.emplace_back(data.begin(), data.end());
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
syslog_stream.sendLog(CreateReport(tag1, tag2));
|
||||||
|
ASSERT_NE(sysog_routine, nullptr);
|
||||||
|
EXPECT_EQ(capture_syslog_cef_data.size(), 0u); //before write
|
||||||
|
sysog_routine();
|
||||||
|
EXPECT_EQ(capture_syslog_cef_data.size(), 1u);
|
||||||
|
for (const string &str : capture_syslog_cef_data) {
|
||||||
|
EXPECT_THAT(str, AnyOf(
|
||||||
|
HasSubstr("String='Another string'"),
|
||||||
|
HasSubstr(R"(String="Another string")"),
|
||||||
|
HasSubstr("String=\"Another string\"")));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(LogTest, CefWriteFailTest)
|
||||||
|
{
|
||||||
|
loadFakeConfiguration(false);
|
||||||
|
capture_syslog_cef_data.clear();
|
||||||
|
Tags tag1 = Tags::POLICY_INSTALLATION;
|
||||||
|
Tags tag2 = Tags::ACCESS_CONTROL;
|
||||||
|
|
||||||
|
// for syslog activate send log
|
||||||
|
std::string address = "172.28.1.6";
|
||||||
|
int port = 514;
|
||||||
|
I_Socket::SocketType protocol = I_Socket::SocketType::TCP;
|
||||||
|
CefStream cef_stream(address, port, protocol);
|
||||||
|
|
||||||
|
ASSERT_NE(first_connect_cef_routine, nullptr);
|
||||||
|
first_connect_cef_routine();
|
||||||
|
ASSERT_NE(connect_cef_routine, nullptr);
|
||||||
|
connect_cef_routine();
|
||||||
|
|
||||||
|
EXPECT_CALL(mock_socket_is, writeData(1, _))
|
||||||
|
.WillOnce(Return(false))
|
||||||
|
.WillOnce(Return(false))
|
||||||
|
.WillOnce(Return(false))
|
||||||
|
.WillRepeatedly(
|
||||||
|
WithArg<1>(
|
||||||
|
Invoke(
|
||||||
|
[this](const vector<char> &data)
|
||||||
|
{
|
||||||
|
capture_syslog_cef_data.emplace_back(data.begin(), data.end());
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
EXPECT_EQ(capture_syslog_cef_data.size(), 0u); //before write
|
||||||
|
cef_stream.sendLog(CreateReport(tag1, tag2));
|
||||||
|
EXPECT_EQ(capture_syslog_cef_data.size(), 1u);
|
||||||
|
for (const string &str : capture_syslog_cef_data) {
|
||||||
|
EXPECT_THAT(str, AnyOf(
|
||||||
|
HasSubstr("String='Another string'"),
|
||||||
|
HasSubstr(R"(String="Another string")"),
|
||||||
|
HasSubstr("String=\"Another string\"")));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -23,24 +23,22 @@ USE_DEBUG_FLAG(D_REPORT);
|
|||||||
static string lookup_cmd = "nslookup ";
|
static string lookup_cmd = "nslookup ";
|
||||||
static string line_selection_cmd = "| grep Address | sed -n 2p";
|
static string line_selection_cmd = "| grep Address | sed -n 2p";
|
||||||
static string parsing_cmd = "| cut -f2 -d' ' | tr -d '\n'";
|
static string parsing_cmd = "| cut -f2 -d' ' | tr -d '\n'";
|
||||||
|
static string SYSLOG_NAME = "Syslog";
|
||||||
|
|
||||||
SyslogStream::SyslogStream(const string &_address, int _port, I_Socket::SocketType _protocol)
|
SyslogStream::SyslogStream(const string &_address, int _port, I_Socket::SocketType _protocol)
|
||||||
:
|
:
|
||||||
i_socket(Singleton::Consume<I_Socket>::by<LoggingComp>()),
|
LogStreamConnector(_address, _port, _protocol, SYSLOG_NAME)
|
||||||
mainloop(Singleton::Consume<I_MainLoop>::by<LoggingComp>()),
|
|
||||||
address(_address),
|
|
||||||
port(_port),
|
|
||||||
protocol(_protocol)
|
|
||||||
{
|
{
|
||||||
connect();
|
socket = genError("Not set yet");
|
||||||
if (!socket.ok()) {
|
init();
|
||||||
dbgWarning(D_REPORT) << "Failed to connect to the syslog server";
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
SyslogStream::~SyslogStream()
|
SyslogStream::~SyslogStream()
|
||||||
{
|
{
|
||||||
|
sendAllLogs();
|
||||||
if (mainloop != nullptr && mainloop->doesRoutineExist(log_send_routine)) mainloop->stop(log_send_routine);
|
if (mainloop != nullptr && mainloop->doesRoutineExist(log_send_routine)) mainloop->stop(log_send_routine);
|
||||||
|
if (mainloop != nullptr && mainloop->doesRoutineExist(connecting_routine)) mainloop->stop(connecting_routine);
|
||||||
|
|
||||||
if (socket.ok()) {
|
if (socket.ok()) {
|
||||||
i_socket->closeSocket(const_cast<int &>(*socket));
|
i_socket->closeSocket(const_cast<int &>(*socket));
|
||||||
socket = genError("Closed socket");
|
socket = genError("Closed socket");
|
||||||
@@ -55,7 +53,7 @@ SyslogStream::sendLog(const Report &log)
|
|||||||
syslog_report = to_string(syslog_report.length()) + " " + syslog_report;
|
syslog_report = to_string(syslog_report.length()) + " " + syslog_report;
|
||||||
}
|
}
|
||||||
vector<char> data(syslog_report.begin(), syslog_report.end());
|
vector<char> data(syslog_report.begin(), syslog_report.end());
|
||||||
mainloop->addOneTimeRoutine(
|
log_send_routine = mainloop->addOneTimeRoutine(
|
||||||
I_MainLoop::RoutineType::Offline,
|
I_MainLoop::RoutineType::Offline,
|
||||||
[this, data] () { sendLog(data); },
|
[this, data] () { sendLog(data); },
|
||||||
"Logging Syslog stream messaging"
|
"Logging Syslog stream messaging"
|
||||||
@@ -65,45 +63,64 @@ SyslogStream::sendLog(const Report &log)
|
|||||||
void
|
void
|
||||||
SyslogStream::sendLog(const vector<char> &data)
|
SyslogStream::sendLog(const vector<char> &data)
|
||||||
{
|
{
|
||||||
for (int tries = 0; tries < 3; ++tries) {
|
dbgTrace(D_REPORT) << "Sending Syslog log." << " Max logs per send: " << max_logs_per_send;
|
||||||
if (!socket.ok()) {
|
sendLogWithQueue(data);
|
||||||
connect();
|
}
|
||||||
if (!socket.ok()) {
|
|
||||||
dbgWarning(D_REPORT) << "Failed to connect to the syslog server, Log will not be sent.";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
dbgTrace(D_REPORT) << "Successfully connect to the syslog server";
|
|
||||||
}
|
|
||||||
|
|
||||||
if (i_socket->writeData(socket.unpack(), data)) {
|
|
||||||
dbgTrace(D_REPORT) << "log was sent to syslog server";
|
void
|
||||||
return;
|
SyslogStream::init() {
|
||||||
}
|
updateSettings();
|
||||||
}
|
mainloop->addOneTimeRoutine(
|
||||||
dbgWarning(D_REPORT) << "Failed to send log to syslog server";
|
I_MainLoop::RoutineType::Offline,
|
||||||
|
[this] ()
|
||||||
|
{
|
||||||
|
dbgTrace(D_REPORT) << FIRST_SYSLOG_CONNECT_NAME;
|
||||||
|
},
|
||||||
|
FIRST_SYSLOG_CONNECT_NAME
|
||||||
|
);
|
||||||
|
|
||||||
|
auto syslog_retry_interval = getProfileAgentSettingWithDefault<uint>(
|
||||||
|
RETRY_CONNECT_INTERVAL,
|
||||||
|
"agent.config.log.syslogServer.connect_retry_interval");
|
||||||
|
chrono::seconds connect_retry_interval = chrono::seconds(syslog_retry_interval);
|
||||||
|
connecting_routine = mainloop->addRecurringRoutine(
|
||||||
|
I_MainLoop::RoutineType::Offline,
|
||||||
|
connect_retry_interval,
|
||||||
|
[this] ()
|
||||||
|
{
|
||||||
|
dbgTrace(D_REPORT) << SYSLOG_CONNECT_NAME;
|
||||||
|
maintainConnection();
|
||||||
|
},
|
||||||
|
SYSLOG_CONNECT_NAME
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
SyslogStream::connect()
|
SyslogStream::connect()
|
||||||
{
|
{
|
||||||
auto syslog_address = getProfileAgentSettingWithDefault<string>(address, "agent.config.log.syslogServer.IP");
|
dbgDebug(D_REPORT)
|
||||||
auto syslog_port = getProfileAgentSettingWithDefault<uint>(port, "agent.config.log.syslogServer.port");
|
<< "Connecting to Syslog server"
|
||||||
|
<< " Address: "
|
||||||
|
<< address
|
||||||
|
<< " Port: "
|
||||||
|
<< port;
|
||||||
|
|
||||||
if (syslog_address.empty()) {
|
if (address.empty()) {
|
||||||
dbgWarning(D_REPORT) << "Cannot connect to Syslog server, Address IP/Domain not configured.";
|
dbgWarning(D_REPORT) << "Cannot connect to Syslog server, Address IP/Domain not configured.";
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct in_addr addr;
|
struct in_addr addr;
|
||||||
if (inet_pton(AF_INET, syslog_address.data(), &addr) != 1) {
|
if (inet_pton(AF_INET, address.data(), &addr) != 1) {
|
||||||
I_ShellCmd *shell_cmd = Singleton::Consume<I_ShellCmd>::by<LoggingComp>();
|
I_ShellCmd *shell_cmd = Singleton::Consume<I_ShellCmd>::by<LoggingComp>();
|
||||||
string host_cmd = lookup_cmd + syslog_address + line_selection_cmd + parsing_cmd;
|
string host_cmd = lookup_cmd + address + line_selection_cmd + parsing_cmd;
|
||||||
Maybe<string> res = shell_cmd->getExecOutput(host_cmd, 500);
|
Maybe<string> res = shell_cmd->getExecOutput(host_cmd, 500);
|
||||||
if (!res.ok()) {
|
if (!res.ok()) {
|
||||||
dbgWarning(D_REPORT)
|
dbgWarning(D_REPORT)
|
||||||
<< "Failed to execute domain lookup command. "
|
<< "Failed to execute domain lookup command. "
|
||||||
<< "SYSLOG Domain: "
|
<< "SYSLOG Domain: "
|
||||||
<< syslog_address
|
<< address
|
||||||
<< "Error: "
|
<< "Error: "
|
||||||
<< res.getErr();
|
<< res.getErr();
|
||||||
return;
|
return;
|
||||||
@@ -113,7 +130,7 @@ SyslogStream::connect()
|
|||||||
dbgWarning(D_REPORT)
|
dbgWarning(D_REPORT)
|
||||||
<< "Got en empty ip address from lookup command. "
|
<< "Got en empty ip address from lookup command. "
|
||||||
<< "SYSLOG Domain: "
|
<< "SYSLOG Domain: "
|
||||||
<< syslog_address
|
<< address
|
||||||
<< "Got bad ip address: "
|
<< "Got bad ip address: "
|
||||||
<< res.unpack();
|
<< res.unpack();
|
||||||
return;
|
return;
|
||||||
@@ -124,19 +141,46 @@ SyslogStream::connect()
|
|||||||
dbgWarning(D_REPORT)
|
dbgWarning(D_REPORT)
|
||||||
<< "Got a faulty ip address from lookup command. "
|
<< "Got a faulty ip address from lookup command. "
|
||||||
<< "SYSLOG Domain: "
|
<< "SYSLOG Domain: "
|
||||||
<< syslog_address
|
<< address
|
||||||
<< "Got bad ip address: "
|
<< "Got bad ip address: "
|
||||||
<< res.unpack();
|
<< res.unpack();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
syslog_address = res.unpack();
|
address = res.unpack();
|
||||||
}
|
}
|
||||||
|
|
||||||
socket = i_socket->genSocket(
|
socket = i_socket->genSocket(
|
||||||
protocol,
|
protocol,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
syslog_address + ":" + to_string(syslog_port)
|
address + ":" + to_string(port)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
SyslogStream::updateSettings()
|
||||||
|
{
|
||||||
|
max_logs_per_send = getProfileAgentSettingWithDefault<int>(
|
||||||
|
NUMBER_OF_LOGS_PER_SEND,
|
||||||
|
"agent.config.log.syslogServer.MaxLogsPerSend"
|
||||||
|
);
|
||||||
|
if (max_logs_per_send < 0) {
|
||||||
|
max_logs_per_send = NUMBER_OF_LOGS_PER_SEND;
|
||||||
|
}
|
||||||
|
address = getProfileAgentSettingWithDefault<string>(address, "agent.config.log.syslogServer.IP");
|
||||||
|
port = getProfileAgentSettingWithDefault<uint>(port, "agent.config.log.syslogServer.port");
|
||||||
|
max_data_in_queue =
|
||||||
|
getProfileAgentSettingWithDefault<uint>(MAX_LOG_QUEUE, "agent.config.log.syslogServer.MaxLogQueue");
|
||||||
|
|
||||||
|
dbgTrace(D_REPORT)
|
||||||
|
<< "Syslog server settings updated. "
|
||||||
|
<< "Address: "
|
||||||
|
<< address
|
||||||
|
<< " Port: "
|
||||||
|
<< port
|
||||||
|
<< " Max logs per send: "
|
||||||
|
<< max_logs_per_send
|
||||||
|
<< " Max data in queue: "
|
||||||
|
<< max_data_in_queue;
|
||||||
|
}
|
||||||
|
|||||||
@@ -76,10 +76,12 @@ RoutineWrapper::resume()
|
|||||||
void
|
void
|
||||||
RoutineWrapper::invoke(pull_type &pull, I_MainLoop::Routine func)
|
RoutineWrapper::invoke(pull_type &pull, I_MainLoop::Routine func)
|
||||||
{
|
{
|
||||||
dbgAssert(active != nullptr)
|
if (!active) {
|
||||||
<< AlertInfo(AlertTeam::CORE, "mainloop i/s")
|
dbgAssertOpt(active != nullptr)
|
||||||
<< "Trying to invoke without an active routine";
|
<< AlertInfo(AlertTeam::CORE, "mainloop i/s")
|
||||||
|
<< "Trying to invoke without an active routine";
|
||||||
|
return;
|
||||||
|
}
|
||||||
active->pull = move(pull); // First invokation (other invokaction will start inside `func`), set the `pull` object
|
active->pull = move(pull); // First invokation (other invokaction will start inside `func`), set the `pull` object
|
||||||
func();
|
func();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -227,7 +227,10 @@ MainloopComponent::Impl::reportStartupEvent()
|
|||||||
void
|
void
|
||||||
MainloopComponent::Impl::run()
|
MainloopComponent::Impl::run()
|
||||||
{
|
{
|
||||||
dbgAssert(!is_running) << alert << "MainloopComponent::Impl::run was called while it was already running";
|
if (is_running) {
|
||||||
|
dbgAssertOpt(!is_running) << alert << "MainloopComponent::Impl::run was called while it was already running";
|
||||||
|
return;
|
||||||
|
}
|
||||||
is_running = true;
|
is_running = true;
|
||||||
|
|
||||||
bool has_primary_routines = true;
|
bool has_primary_routines = true;
|
||||||
@@ -467,7 +470,10 @@ MainloopComponent::Impl::getCurrentRoutineId() const
|
|||||||
void
|
void
|
||||||
MainloopComponent::Impl::yield(bool force)
|
MainloopComponent::Impl::yield(bool force)
|
||||||
{
|
{
|
||||||
dbgAssert(curr_iter != routines.end()) << alert << "Calling 'yield' without a running current routine";
|
if (curr_iter == routines.end()) {
|
||||||
|
dbgAssertOpt(curr_iter != routines.end()) << alert << "Calling 'yield' without a running current routine";
|
||||||
|
return;
|
||||||
|
}
|
||||||
if (do_stop) throw MainloopStop();
|
if (do_stop) throw MainloopStop();
|
||||||
if (!force && getTimer()->getMonotonicTime() < stop_time) return;
|
if (!force && getTimer()->getMonotonicTime() < stop_time) return;
|
||||||
|
|
||||||
@@ -508,7 +514,10 @@ MainloopComponent::Impl::stopAll()
|
|||||||
void
|
void
|
||||||
MainloopComponent::Impl::stop()
|
MainloopComponent::Impl::stop()
|
||||||
{
|
{
|
||||||
dbgAssert(curr_iter != routines.end()) << alert << "Attempting to stop a routine when none is running";
|
if (curr_iter == routines.end()) {
|
||||||
|
dbgAssertOpt(curr_iter != routines.end()) << alert << "Attempting to stop a routine when none is running";
|
||||||
|
return;
|
||||||
|
}
|
||||||
stop(curr_iter);
|
stop(curr_iter);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -526,7 +535,10 @@ MainloopComponent::Impl::stop(RoutineID id)
|
|||||||
void
|
void
|
||||||
MainloopComponent::Impl::halt()
|
MainloopComponent::Impl::halt()
|
||||||
{
|
{
|
||||||
dbgAssert(curr_iter != routines.end()) << alert << "Calling 'halt' without a running current routine";
|
if (curr_iter == routines.end()) {
|
||||||
|
dbgAssertOpt(curr_iter != routines.end()) << alert << "Calling 'halt' without a running current routine";
|
||||||
|
return;
|
||||||
|
}
|
||||||
curr_iter->second.halt();
|
curr_iter->second.halt();
|
||||||
yield(true);
|
yield(true);
|
||||||
}
|
}
|
||||||
@@ -535,7 +547,10 @@ void
|
|||||||
MainloopComponent::Impl::halt(RoutineID id)
|
MainloopComponent::Impl::halt(RoutineID id)
|
||||||
{
|
{
|
||||||
auto iter = routines.find(id);
|
auto iter = routines.find(id);
|
||||||
dbgAssert(iter != routines.end()) << alert << "No routine " << id << " to halt";
|
if (iter == routines.end()) {
|
||||||
|
dbgAssertOpt(iter != routines.end()) << alert << "No routine " << id << " to halt";
|
||||||
|
return;
|
||||||
|
}
|
||||||
iter->second.halt();
|
iter->second.halt();
|
||||||
if (iter == curr_iter) yield(true);
|
if (iter == curr_iter) yield(true);
|
||||||
}
|
}
|
||||||
@@ -544,7 +559,10 @@ void
|
|||||||
MainloopComponent::Impl::resume(RoutineID id)
|
MainloopComponent::Impl::resume(RoutineID id)
|
||||||
{
|
{
|
||||||
auto iter = routines.find(id);
|
auto iter = routines.find(id);
|
||||||
dbgAssert(iter != routines.end()) << alert << "No routine " << id << " to resume";
|
if (iter == routines.end()) {
|
||||||
|
dbgAssertOpt(iter != routines.end()) << alert << "No routine " << id << " to resume";
|
||||||
|
return;
|
||||||
|
}
|
||||||
iter->second.resume();
|
iter->second.resume();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -98,6 +98,8 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
sni_hostname = metadata.getSniHostName();
|
sni_hostname = metadata.getSniHostName();
|
||||||
|
dn_host_name = metadata.getDnHostName();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -328,17 +330,24 @@ private:
|
|||||||
SSL_set_hostflags(ssl_socket, X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS);
|
SSL_set_hostflags(ssl_socket, X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS);
|
||||||
|
|
||||||
auto host = key.getHostName().c_str();
|
auto host = key.getHostName().c_str();
|
||||||
if (!SSL_set1_host(ssl_socket, host)) {
|
auto dn = host;
|
||||||
return genError("Failed to set host name verification. Host: " + string(host));
|
auto sni = host;
|
||||||
|
|
||||||
|
if (dn_host_name.ok()) {
|
||||||
|
dn = dn_host_name->c_str();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dbgDebug(D_CONNECTION) << "Setting host DN: " << dn;
|
||||||
|
if (!SSL_set1_host(ssl_socket, dn)) {
|
||||||
|
return genError("Failed to set host name verification. Host: " + string(dn));
|
||||||
|
}
|
||||||
if (sni_hostname.ok()) {
|
if (sni_hostname.ok()) {
|
||||||
host = sni_hostname->c_str();
|
sni = sni_hostname->c_str();
|
||||||
}
|
}
|
||||||
|
|
||||||
dbgDebug(D_CONNECTION) << "Setting TLS host name extension. Host: " << host;
|
dbgDebug(D_CONNECTION) << "Setting TLS host name extension. Host: " << sni;
|
||||||
if (!SSL_set_tlsext_host_name(ssl_socket, host)) {
|
if (!SSL_set_tlsext_host_name(ssl_socket, sni)) {
|
||||||
return genError("Failed to set TLS host name extension. Host: " + string(host));
|
return genError("Failed to set TLS host name extension. Host: " + string(sni));
|
||||||
}
|
}
|
||||||
|
|
||||||
return Maybe<void>();
|
return Maybe<void>();
|
||||||
@@ -698,6 +707,8 @@ private:
|
|||||||
bool should_close_connection = false;
|
bool should_close_connection = false;
|
||||||
bool is_dual_auth = false;
|
bool is_dual_auth = false;
|
||||||
Maybe<string> sni_hostname = genError<string>("Uninitialized");
|
Maybe<string> sni_hostname = genError<string>("Uninitialized");
|
||||||
|
Maybe<string> dn_host_name = genError<string>("Uninitialized");
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Connection::Connection(const MessageConnectionKey &key, const MessageMetadata &metadata)
|
Connection::Connection(const MessageConnectionKey &key, const MessageMetadata &metadata)
|
||||||
@@ -716,7 +727,7 @@ Connection::setProxySettings(const MessageProxySettings &settings)
|
|||||||
map<string, string> headers;
|
map<string, string> headers;
|
||||||
auto i_encrypt = Singleton::Consume<I_Encryptor>::by<Messaging>();
|
auto i_encrypt = Singleton::Consume<I_Encryptor>::by<Messaging>();
|
||||||
if (!settings.getProxyAuth().empty()) {
|
if (!settings.getProxyAuth().empty()) {
|
||||||
headers["Proxy-Authorization"] = i_encrypt->base64Encode(settings.getProxyAuth());
|
headers["Proxy-Authorization"] = "Basic " + i_encrypt->base64Encode(settings.getProxyAuth());
|
||||||
}
|
}
|
||||||
|
|
||||||
auto req = HTTPRequest::prepareRequest(*this, HTTPMethod::CONNECT, "", headers, "");
|
auto req = HTTPRequest::prepareRequest(*this, HTTPMethod::CONNECT, "", headers, "");
|
||||||
|
|||||||
@@ -30,11 +30,15 @@ using namespace std;
|
|||||||
USE_DEBUG_FLAG(D_MESSAGING_BUFFER);
|
USE_DEBUG_FLAG(D_MESSAGING_BUFFER);
|
||||||
|
|
||||||
#ifndef smb
|
#ifndef smb
|
||||||
static constexpr uint buffer_max_size_MB = 100;
|
static constexpr uint buffer_max_size_MB = 300;
|
||||||
#else
|
#else
|
||||||
static constexpr uint buffer_max_size_MB = 3;
|
static constexpr uint buffer_max_size_MB = 3;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static const uint reservation_default_size = 32;
|
||||||
|
static const uint memory_messages_max_size_default = 1024;
|
||||||
|
static const uint additional_buffer_size_default = 128;
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
checkExistence(const string &path)
|
checkExistence(const string &path)
|
||||||
{
|
{
|
||||||
@@ -74,7 +78,7 @@ private:
|
|||||||
|
|
||||||
void handleInMemoryMessages();
|
void handleInMemoryMessages();
|
||||||
|
|
||||||
void writeToDisk(const BufferedMessage &message);
|
bool writeToDisk(const BufferedMessage &message);
|
||||||
|
|
||||||
static Maybe<uint32_t> seekStartOfMessage(FILE *file);
|
static Maybe<uint32_t> seekStartOfMessage(FILE *file);
|
||||||
static bool readBytes(FILE *file, uint size_to_read, char *output_bytes);
|
static bool readBytes(FILE *file, uint size_to_read, char *output_bytes);
|
||||||
@@ -94,6 +98,9 @@ private:
|
|||||||
string buffer_output;
|
string buffer_output;
|
||||||
string buffer_root_path;
|
string buffer_root_path;
|
||||||
uint max_size_on_disk_MB = 0;
|
uint max_size_on_disk_MB = 0;
|
||||||
|
uint memory_messages_max_size = 0;
|
||||||
|
uint additional_buffer_size = 0;
|
||||||
|
uint memory_messages_reserve_size = reservation_default_size;
|
||||||
uint curr_no_retries = 0;
|
uint curr_no_retries = 0;
|
||||||
I_ShellCmd *shell_cmd = nullptr;
|
I_ShellCmd *shell_cmd = nullptr;
|
||||||
I_Encryptor *encryptor = nullptr;
|
I_Encryptor *encryptor = nullptr;
|
||||||
@@ -105,6 +112,16 @@ void
|
|||||||
MessagingBufferComponent::Impl::init()
|
MessagingBufferComponent::Impl::init()
|
||||||
{
|
{
|
||||||
max_size_on_disk_MB = getProfileAgentSettingWithDefault<uint>(buffer_max_size_MB, "eventBuffer.maxSizeOnDiskInMB");
|
max_size_on_disk_MB = getProfileAgentSettingWithDefault<uint>(buffer_max_size_MB, "eventBuffer.maxSizeOnDiskInMB");
|
||||||
|
memory_messages_max_size =
|
||||||
|
getProfileAgentSettingWithDefault<uint>(
|
||||||
|
memory_messages_max_size_default,
|
||||||
|
"eventBuffer.maxMemoryMessagesToStore"
|
||||||
|
);
|
||||||
|
additional_buffer_size =
|
||||||
|
getProfileAgentSettingWithDefault<uint>(
|
||||||
|
additional_buffer_size_default,
|
||||||
|
"eventBuffer.additionalBufferSize"
|
||||||
|
);
|
||||||
shell_cmd = Singleton::Consume<I_ShellCmd>::by<Messaging>();
|
shell_cmd = Singleton::Consume<I_ShellCmd>::by<Messaging>();
|
||||||
encryptor = Singleton::Consume<I_Encryptor>::by<Messaging>();
|
encryptor = Singleton::Consume<I_Encryptor>::by<Messaging>();
|
||||||
mainloop = Singleton::Consume<I_MainLoop>::by<Messaging>();
|
mainloop = Singleton::Consume<I_MainLoop>::by<Messaging>();
|
||||||
@@ -121,7 +138,7 @@ MessagingBufferComponent::Impl::init()
|
|||||||
string unique_id = instance_awareness->getInstanceID().ok() ? instance_awareness->getInstanceID().unpack() : "";
|
string unique_id = instance_awareness->getInstanceID().ok() ? instance_awareness->getInstanceID().unpack() : "";
|
||||||
buffer_input = buffer_root_path + "/" + executable_name + unique_id + ".input";
|
buffer_input = buffer_root_path + "/" + executable_name + unique_id + ".input";
|
||||||
buffer_output = buffer_root_path + "/" + executable_name + unique_id + ".output";
|
buffer_output = buffer_root_path + "/" + executable_name + unique_id + ".output";
|
||||||
memory_messages.reserve(32);
|
memory_messages.reserve(memory_messages_reserve_size);
|
||||||
|
|
||||||
uint tmo = getConfigurationWithDefault<uint>(5, "message", "Send event retry in sec");
|
uint tmo = getConfigurationWithDefault<uint>(5, "message", "Send event retry in sec");
|
||||||
mainloop->addRecurringRoutine(
|
mainloop->addRecurringRoutine(
|
||||||
@@ -138,6 +155,26 @@ MessagingBufferComponent::Impl::init()
|
|||||||
"Handling in-memory messages",
|
"Handling in-memory messages",
|
||||||
false
|
false
|
||||||
);
|
);
|
||||||
|
|
||||||
|
registerConfigLoadCb(
|
||||||
|
[this]() {
|
||||||
|
memory_messages_max_size =
|
||||||
|
getProfileAgentSettingWithDefault<uint>(
|
||||||
|
1000,
|
||||||
|
"eventBuffer.maxMemoryMessagesToStore"
|
||||||
|
);
|
||||||
|
max_size_on_disk_MB =
|
||||||
|
getProfileAgentSettingWithDefault<uint>(
|
||||||
|
buffer_max_size_MB,
|
||||||
|
"eventBuffer.maxSizeOnDiskInMB"
|
||||||
|
);
|
||||||
|
additional_buffer_size =
|
||||||
|
getProfileAgentSettingWithDefault<uint>(
|
||||||
|
100,
|
||||||
|
"eventBuffer.additionalBufferSize"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@@ -152,8 +189,10 @@ MessagingBufferComponent::Impl::pushNewBufferedMessage(
|
|||||||
{
|
{
|
||||||
dbgTrace(D_MESSAGING_BUFFER) << "Pushing new message to buffer";
|
dbgTrace(D_MESSAGING_BUFFER) << "Pushing new message to buffer";
|
||||||
|
|
||||||
if (!force_immediate_writing) {
|
if (!force_immediate_writing && memory_messages.size() < memory_messages_max_size + additional_buffer_size) {
|
||||||
dbgDebug(D_MESSAGING_BUFFER) << "Holding message temporarily in memory";
|
dbgTrace(D_MESSAGING_BUFFER)
|
||||||
|
<< "Holding message temporarily in memory. Memory messages size: "
|
||||||
|
<< memory_messages.size();
|
||||||
memory_messages.emplace_back(body, method, uri, category, message_metadata);
|
memory_messages.emplace_back(body, method, uri, category, message_metadata);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -328,30 +367,80 @@ void
|
|||||||
MessagingBufferComponent::Impl::handleInMemoryMessages()
|
MessagingBufferComponent::Impl::handleInMemoryMessages()
|
||||||
{
|
{
|
||||||
auto messages = move(memory_messages);
|
auto messages = move(memory_messages);
|
||||||
memory_messages.reserve(32);
|
uint failed_messages = 0;
|
||||||
|
dbgDebug(D_MESSAGING_BUFFER) << "Handling " << to_string(messages.size()) <<" new in-memory messages";
|
||||||
|
|
||||||
for (const auto &message : messages) {
|
memory_messages.reserve(memory_messages_reserve_size);
|
||||||
if (sendMessage(message) != HTTPStatusCode::HTTP_OK) {
|
auto it = messages.begin();
|
||||||
if (message.getMessageMetadata().shouldBufferMessage()) writeToDisk(message);
|
for (; it != messages.end() && memory_messages.size() < memory_messages_max_size; ++it) {
|
||||||
|
if (sendMessage(*it) != HTTPStatusCode::HTTP_OK) {
|
||||||
|
if (it->getMessageMetadata().shouldBufferMessage()) {
|
||||||
|
if (!writeToDisk(*it)) ++failed_messages;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
dbgTrace(D_MESSAGING_BUFFER)
|
||||||
|
<< "Processed "
|
||||||
|
<< (it - messages.begin() + 1)
|
||||||
|
<< " messages out of "
|
||||||
|
<< messages.size();
|
||||||
mainloop->yield();
|
mainloop->yield();
|
||||||
}
|
}
|
||||||
|
if (it == messages.end()) {
|
||||||
|
memory_messages_reserve_size = reservation_default_size;
|
||||||
|
if (failed_messages > 0) {
|
||||||
|
dbgDebug(D_MESSAGING_BUFFER)
|
||||||
|
<< "Failed to handle "
|
||||||
|
<< to_string(failed_messages)
|
||||||
|
<< " messages out of "
|
||||||
|
<< to_string(messages.size());
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
memory_messages_reserve_size =
|
||||||
|
min(memory_messages_reserve_size * 2, memory_messages_max_size + additional_buffer_size);
|
||||||
|
|
||||||
|
dbgDebug(D_MESSAGING_BUFFER) << "Heap buffer is full. Storing messages to disk";
|
||||||
|
auto it2 = messages.end() - 1;
|
||||||
|
do {
|
||||||
|
if (it2->getMessageMetadata().shouldBufferMessage() && !writeToDisk(*it2)) {
|
||||||
|
failed_messages += it2 - it + 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} while(it2-- != it);
|
||||||
|
|
||||||
|
if (failed_messages > 0) {
|
||||||
|
dbgDebug(D_MESSAGING_BUFFER)
|
||||||
|
<< "Failed to handle "
|
||||||
|
<< to_string(failed_messages)
|
||||||
|
<< " messages out of "
|
||||||
|
<< to_string(messages.size());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
bool
|
||||||
MessagingBufferComponent::Impl::writeToDisk(const BufferedMessage &message)
|
MessagingBufferComponent::Impl::writeToDisk(const BufferedMessage &message)
|
||||||
{
|
{
|
||||||
|
static uint full_buffer_failed_messages = 0;
|
||||||
auto serialized_message = message.toString();
|
auto serialized_message = message.toString();
|
||||||
|
|
||||||
if (!canWriteToDisk(serialized_message.size())) {
|
if (!canWriteToDisk(serialized_message.size())) {
|
||||||
dbgWarning(D_MESSAGING_BUFFER) << "Buffer is full. Message will not be written to disk: " << message.getURI();
|
full_buffer_failed_messages++;
|
||||||
return;
|
if (full_buffer_failed_messages % 10 == 0) {
|
||||||
|
dbgWarning(D_MESSAGING_BUFFER)
|
||||||
|
<< "Buffer is full. "
|
||||||
|
<< full_buffer_failed_messages
|
||||||
|
<< " messages will not be written to disk";
|
||||||
|
}
|
||||||
|
dbgDebug(D_MESSAGING_BUFFER) << "Buffer is full. Message will not be written to disk: " << message.getURI();
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
full_buffer_failed_messages = 0;
|
||||||
|
|
||||||
ofstream file(buffer_input, ios::app);
|
ofstream file(buffer_input, ios::app);
|
||||||
if (!file.is_open()) {
|
if (!file.is_open()) {
|
||||||
dbgWarning(D_MESSAGING_BUFFER) << "Failed to open file for writing. File: " << buffer_input;
|
dbgWarning(D_MESSAGING_BUFFER) << "Failed to open file for writing. File: " << buffer_input;
|
||||||
return;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t size = serialized_message.size();
|
uint32_t size = serialized_message.size();
|
||||||
@@ -359,6 +448,8 @@ MessagingBufferComponent::Impl::writeToDisk(const BufferedMessage &message)
|
|||||||
file.write(reinterpret_cast<char *>(&size), sizeof(size));
|
file.write(reinterpret_cast<char *>(&size), sizeof(size));
|
||||||
char type = 0;
|
char type = 0;
|
||||||
file.write(&type, 1);
|
file.write(&type, 1);
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
Maybe<uint32_t>
|
Maybe<uint32_t>
|
||||||
@@ -538,7 +629,7 @@ MessagingBufferComponent::Impl::canWriteToDisk(size_t message_size) const
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
dbgWarning(D_MESSAGING_BUFFER)
|
dbgDebug(D_MESSAGING_BUFFER)
|
||||||
<< "Buffer size is full. Directry size: "
|
<< "Buffer size is full. Directry size: "
|
||||||
<< *maybe_directory_size
|
<< *maybe_directory_size
|
||||||
<< ", Message size: "
|
<< ", Message size: "
|
||||||
|
|||||||
@@ -282,3 +282,51 @@ TEST_F(TestMessagingBuffer, testRoutinInMemory)
|
|||||||
msg = buffer_provider->peekMessage();
|
msg = buffer_provider->peekMessage();
|
||||||
ASSERT_FALSE(msg.ok());
|
ASSERT_FALSE(msg.ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(TestMessagingBuffer, testRoutinInMemoryOverflow)
|
||||||
|
{
|
||||||
|
string config_json =
|
||||||
|
"{"
|
||||||
|
" \"agentSettings\": [\n"
|
||||||
|
" {\n"
|
||||||
|
" \"id\": \"123\",\n"
|
||||||
|
" \"key\": \"eventBuffer.maxMemoryMessagesToStore\",\n"
|
||||||
|
" \"value\": \"5\"\n"
|
||||||
|
" },\n"
|
||||||
|
" {\n"
|
||||||
|
" \"id\": \"123\",\n"
|
||||||
|
" \"key\": \"eventBuffer.additionalBufferSize\",\n"
|
||||||
|
" \"value\": \"1\"\n"
|
||||||
|
" }]\n"
|
||||||
|
"}";
|
||||||
|
|
||||||
|
istringstream ss(config_json);
|
||||||
|
Singleton::Consume<Config::I_Config>::from(config)->loadConfiguration(ss);
|
||||||
|
|
||||||
|
MessageCategory category = MessageCategory::GENERIC;
|
||||||
|
MessageMetadata message_metadata = MessageMetadata();
|
||||||
|
message_metadata.setShouldBufferMessage(true);
|
||||||
|
HTTPMethod method = HTTPMethod::POST;
|
||||||
|
HTTPResponse res(HTTPStatusCode::HTTP_OK, "");
|
||||||
|
|
||||||
|
for (int i = 0; i < 6; i++) {
|
||||||
|
string body = "body" + to_string(i);
|
||||||
|
buffer_provider->pushNewBufferedMessage(body, method, "/" + to_string(i), category, message_metadata, false);
|
||||||
|
EXPECT_CALL(mock_messaging, sendSyncMessage(method, "/" + to_string(i), body, _, _)).WillOnce(Return(res));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = 0; i < 2; i++) {
|
||||||
|
string body = "body" + to_string(i);
|
||||||
|
buffer_provider->pushNewBufferedMessage(body, method, "/" + to_string(i), category, message_metadata, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
memory_routine();
|
||||||
|
|
||||||
|
for (int i = 0; i < 2; i++) {
|
||||||
|
auto msg = buffer_provider->peekMessage();
|
||||||
|
ASSERT_TRUE(msg.ok());
|
||||||
|
buffer_provider->popMessage();
|
||||||
|
}
|
||||||
|
auto msg = buffer_provider->peekMessage();
|
||||||
|
ASSERT_FALSE(msg.ok());
|
||||||
|
}
|
||||||
|
|||||||
@@ -94,10 +94,13 @@ string
|
|||||||
HTTPResponse::toString() const
|
HTTPResponse::toString() const
|
||||||
{
|
{
|
||||||
auto code = status_code_to_string.find(status_code);
|
auto code = status_code_to_string.find(status_code);
|
||||||
dbgAssert(code != status_code_to_string.end())
|
if (code == status_code_to_string.end()) {
|
||||||
<< AlertInfo(AlertTeam::CORE, "messaging i/s")
|
dbgAssertOpt(code != status_code_to_string.end())
|
||||||
<< "Unknown status code "
|
<< AlertInfo(AlertTeam::CORE, "messaging i/s")
|
||||||
<< int(status_code);
|
<< "Unknown status code "
|
||||||
|
<< int(status_code);
|
||||||
|
return "[Status-code]: 500 - HTTP_INTERNAL_SERVER_ERROR, [Body]: " + (body.empty() ? "{}" : body);
|
||||||
|
}
|
||||||
|
|
||||||
return "[Status-code]: " + code->second + ", [Body]: " + (body.empty() ? "{}" : body);
|
return "[Status-code]: " + code->second + ", [Body]: " + (body.empty() ? "{}" : body);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -160,8 +160,8 @@ TagAndEnumManagement::convertToString(const StreamType &stream_type)
|
|||||||
case StreamType::COUNT: break;
|
case StreamType::COUNT: break;
|
||||||
}
|
}
|
||||||
|
|
||||||
dbgAssert(false) << alert << "Unknown log stream type. Type: " << static_cast<int>(stream_type);
|
dbgAssertOpt(false) << alert << "Unknown log stream type. Type: " << static_cast<int>(stream_type);
|
||||||
return "";
|
return "Unknown stream";
|
||||||
}
|
}
|
||||||
|
|
||||||
string
|
string
|
||||||
@@ -175,8 +175,8 @@ TagAndEnumManagement::convertToString(const Severity &severity)
|
|||||||
case Severity::INFO: return "Info";
|
case Severity::INFO: return "Info";
|
||||||
}
|
}
|
||||||
|
|
||||||
dbgAssert(false) << alert << "Reached an impossible severity value of: " << static_cast<int>(severity);
|
dbgAssertOpt(false) << alert << "Reached an impossible severity value of: " << static_cast<int>(severity);
|
||||||
return "";
|
return "Unknown severity";
|
||||||
}
|
}
|
||||||
|
|
||||||
string
|
string
|
||||||
@@ -188,8 +188,8 @@ TagAndEnumManagement::convertToString(const Type &type)
|
|||||||
case Type::CODE: return "Code Related";
|
case Type::CODE: return "Code Related";
|
||||||
}
|
}
|
||||||
|
|
||||||
dbgAssert(false) << alert << "Reached an impossible type value of: " << static_cast<int>(type);
|
dbgAssertOpt(false) << alert << "Reached an impossible type value of: " << static_cast<int>(type);
|
||||||
return "";
|
return "Unknown type";
|
||||||
}
|
}
|
||||||
|
|
||||||
string
|
string
|
||||||
@@ -203,8 +203,8 @@ TagAndEnumManagement::convertToString(const Level &level)
|
|||||||
case Level::CUSTOM: return "Custom";
|
case Level::CUSTOM: return "Custom";
|
||||||
}
|
}
|
||||||
|
|
||||||
dbgAssert(false) << alert << "Reached an impossible type value of: " << static_cast<int>(level);
|
dbgAssertOpt(false) << alert << "Reached an impossible type value of: " << static_cast<int>(level);
|
||||||
return "";
|
return "Unknown Level";
|
||||||
}
|
}
|
||||||
|
|
||||||
string
|
string
|
||||||
@@ -218,8 +218,8 @@ TagAndEnumManagement::convertToString(const LogLevel &log_level)
|
|||||||
case LogLevel::ERROR: return "error";
|
case LogLevel::ERROR: return "error";
|
||||||
}
|
}
|
||||||
|
|
||||||
dbgAssert(false) << alert << "Reached an impossible type value of: " << static_cast<int>(log_level);
|
dbgAssertOpt(false) << alert << "Reached an impossible type value of: " << static_cast<int>(log_level);
|
||||||
return "";
|
return "Unknown log level";
|
||||||
}
|
}
|
||||||
|
|
||||||
string
|
string
|
||||||
@@ -230,8 +230,8 @@ TagAndEnumManagement::convertToString(const Audience &audience)
|
|||||||
case Audience::INTERNAL: return "Internal";
|
case Audience::INTERNAL: return "Internal";
|
||||||
}
|
}
|
||||||
|
|
||||||
dbgAssert(false) << alert << "Reached an impossible audience value of: " << static_cast<int>(audience);
|
dbgAssertOpt(false) << alert << "Reached an impossible audience value of: " << static_cast<int>(audience);
|
||||||
return "";
|
return "Unknown audience";
|
||||||
}
|
}
|
||||||
|
|
||||||
string
|
string
|
||||||
@@ -244,8 +244,8 @@ TagAndEnumManagement::convertToString(const Priority &priority)
|
|||||||
case Priority::LOW: return "Low";
|
case Priority::LOW: return "Low";
|
||||||
}
|
}
|
||||||
|
|
||||||
dbgAssert(false) << alert << "Reached impossible priority value of: " << static_cast<int>(priority);
|
dbgAssertOpt(false) << alert << "Reached impossible priority value of: " << static_cast<int>(priority);
|
||||||
return "";
|
return "Unknown priority";
|
||||||
}
|
}
|
||||||
|
|
||||||
string
|
string
|
||||||
@@ -263,8 +263,8 @@ TagAndEnumManagement::convertToString(const Notification ¬ification)
|
|||||||
case Notification::SDWAN_POLICY_WARNING_LOG: return "c58d490e-6aa0-43da-bfaa-7edad0a57b7a";
|
case Notification::SDWAN_POLICY_WARNING_LOG: return "c58d490e-6aa0-43da-bfaa-7edad0a57b7a";
|
||||||
}
|
}
|
||||||
|
|
||||||
dbgAssert(false) << alert << "Reached impossible notification value of: " << static_cast<int>(notification);
|
dbgAssertOpt(false) << alert << "Reached impossible notification value of: " << static_cast<int>(notification);
|
||||||
return "";
|
return "00000000-0000-0000-0000-000000000000";
|
||||||
}
|
}
|
||||||
|
|
||||||
string
|
string
|
||||||
@@ -281,8 +281,8 @@ TagAndEnumManagement::convertToString(const IssuingEngine &issuing_engine)
|
|||||||
case IssuingEngine::HORIZON_TELEMETRY_METRICS: return "horizonTelemetryMetrics";
|
case IssuingEngine::HORIZON_TELEMETRY_METRICS: return "horizonTelemetryMetrics";
|
||||||
}
|
}
|
||||||
|
|
||||||
dbgAssert(false) << alert << "Reached impossible engine value of: " << static_cast<int>(issuing_engine);
|
dbgAssertOpt(false) << alert << "Reached impossible engine value of: " << static_cast<int>(issuing_engine);
|
||||||
return "";
|
return "Unknown Issuer";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -50,6 +50,7 @@ public:
|
|||||||
bool bindRestServerSocket(struct sockaddr_in6 &addr, vector<uint16_t> port_range);
|
bool bindRestServerSocket(struct sockaddr_in6 &addr, vector<uint16_t> port_range);
|
||||||
bool addRestCall(RestAction oper, const string &uri, unique_ptr<RestInit> &&init) override;
|
bool addRestCall(RestAction oper, const string &uri, unique_ptr<RestInit> &&init) override;
|
||||||
bool addGetCall(const string &uri, const function<string()> &cb) override;
|
bool addGetCall(const string &uri, const function<string()> &cb) override;
|
||||||
|
bool addWildcardGetCall(const string &uri, const function<string(const string &)> &callback);
|
||||||
uint16_t getListeningPort() const override { return listening_port; }
|
uint16_t getListeningPort() const override { return listening_port; }
|
||||||
Maybe<string> getSchema(const string &uri) const override;
|
Maybe<string> getSchema(const string &uri) const override;
|
||||||
Maybe<string> invokeRest(const string &uri, istream &in) const override;
|
Maybe<string> invokeRest(const string &uri, istream &in) const override;
|
||||||
@@ -67,6 +68,7 @@ private:
|
|||||||
I_MainLoop *mainloop;
|
I_MainLoop *mainloop;
|
||||||
map<string, unique_ptr<RestInit>> rest_calls;
|
map<string, unique_ptr<RestInit>> rest_calls;
|
||||||
map<string, function<string()>> get_calls;
|
map<string, function<string()>> get_calls;
|
||||||
|
map<string, function<string(const string &)>> wildcard_get_calls;
|
||||||
uint16_t listening_port = 0;
|
uint16_t listening_port = 0;
|
||||||
vector<uint16_t> port_range;
|
vector<uint16_t> port_range;
|
||||||
};
|
};
|
||||||
@@ -128,11 +130,14 @@ RestServer::Impl::prepareConfiguration()
|
|||||||
} else {
|
} else {
|
||||||
auto range_start = getPortConfig("Nano service API Port Range start");
|
auto range_start = getPortConfig("Nano service API Port Range start");
|
||||||
auto range_end = getPortConfig("Nano service API Port Range end");
|
auto range_end = getPortConfig("Nano service API Port Range end");
|
||||||
dbgAssert(range_start.ok() && range_end.ok()) << alert << "Rest port configuration was not provided";
|
if (!(range_start.ok() && range_end.ok()) || !(*range_start < *range_end)) {
|
||||||
dbgAssert(*range_start < *range_end)
|
dbgAssertOpt(range_start.ok() && range_end.ok()) << alert << "Rest port configuration was not provided";
|
||||||
<< alert
|
dbgAssertOpt(*range_start < *range_end)
|
||||||
<< "Rest port range corrupted (lower bound higher then upper bound)";
|
<< alert
|
||||||
|
<< "Rest port range corrupted (lower bound higher then upper bound)";
|
||||||
|
range_start = 0;
|
||||||
|
range_end = 1;
|
||||||
|
}
|
||||||
port_range.resize(*range_end - *range_start);
|
port_range.resize(*range_end - *range_start);
|
||||||
for (uint16_t i = 0, port = *range_start; i < port_range.size(); i++, port++) {
|
for (uint16_t i = 0, port = *range_start; i < port_range.size(); i++, port++) {
|
||||||
port_range[i] = port;
|
port_range[i] = port;
|
||||||
@@ -283,6 +288,13 @@ RestServer::Impl::addGetCall(const string &uri, const function<string()> &callba
|
|||||||
return get_calls.emplace(uri, callback).second;
|
return get_calls.emplace(uri, callback).second;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
RestServer::Impl::addWildcardGetCall(const string &uri, const function<string(const string&)> &callback)
|
||||||
|
{
|
||||||
|
if (rest_calls.find(uri) != rest_calls.end()) return false;
|
||||||
|
return wildcard_get_calls.emplace(uri, callback).second;
|
||||||
|
}
|
||||||
|
|
||||||
Maybe<string>
|
Maybe<string>
|
||||||
RestServer::Impl::getSchema(const string &uri) const
|
RestServer::Impl::getSchema(const string &uri) const
|
||||||
{
|
{
|
||||||
@@ -307,14 +319,26 @@ RestServer::Impl::invokeRest(const string &uri, istream &in) const
|
|||||||
bool
|
bool
|
||||||
RestServer::Impl::isGetCall(const string &uri) const
|
RestServer::Impl::isGetCall(const string &uri) const
|
||||||
{
|
{
|
||||||
return get_calls.find(uri) != get_calls.end();
|
if (get_calls.find(uri) != get_calls.end()) return true;
|
||||||
|
|
||||||
|
for (const auto &wildcard : wildcard_get_calls) {
|
||||||
|
if (!uri.find(wildcard.first)) return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
string
|
string
|
||||||
RestServer::Impl::invokeGet(const string &uri) const
|
RestServer::Impl::invokeGet(const string &uri) const
|
||||||
{
|
{
|
||||||
auto instance = get_calls.find(uri);
|
auto instance = get_calls.find(uri);
|
||||||
return instance != get_calls.end() ? instance->second() : "";
|
if (instance != get_calls.end()) return instance->second();
|
||||||
|
|
||||||
|
for (const auto &wildcard : wildcard_get_calls) {
|
||||||
|
if (!uri.find(wildcard.first)) return wildcard.second(uri);
|
||||||
|
}
|
||||||
|
|
||||||
|
return "";
|
||||||
}
|
}
|
||||||
|
|
||||||
string
|
string
|
||||||
@@ -334,8 +358,8 @@ RestServer::Impl::changeActionToString(RestAction oper)
|
|||||||
return "delete-";
|
return "delete-";
|
||||||
}
|
}
|
||||||
default: {
|
default: {
|
||||||
dbgAssert(false) << alert << "Unknown REST action";
|
dbgAssertOpt(false) << alert << "Unknown REST action";
|
||||||
return "";
|
return "unknown-";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,5 +4,5 @@ link_directories(${ng_module_osrc_zlib_path}/lib)
|
|||||||
add_unit_test(
|
add_unit_test(
|
||||||
rest_server_ut
|
rest_server_ut
|
||||||
"rest_schema_ut.cc;rest_must_param_ut.cc;rest_config_ut.cc"
|
"rest_schema_ut.cc;rest_must_param_ut.cc;rest_config_ut.cc"
|
||||||
"singleton;messaging;tenant_manager;rest;environment;-lz;shell_cmd;-lboost_filesystem;instance_awareness;-lz;debug_is;time_proxy;mainloop;agent_details;encryptor;event_is;metric;-lboost_context;-lboost_regex;-lboost_system;-lssl;-lcrypto;connkey"
|
"singleton;messaging;tenant_manager;rest;environment;-lz;shell_cmd;-lboost_filesystem;instance_awareness;-lz;version;debug_is;time_proxy;mainloop;agent_details;encryptor;event_is;metric;-lboost_context;-lboost_regex;-lboost_system;-lssl;-lcrypto;connkey"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -171,11 +171,16 @@ TEST_F(RestConfigTest, basic_flow)
|
|||||||
auto i_rest = Singleton::Consume<I_RestApi>::from(rest_server);
|
auto i_rest = Singleton::Consume<I_RestApi>::from(rest_server);
|
||||||
ASSERT_TRUE(i_rest->addRestCall<TestServer>(RestAction::ADD, "test"));
|
ASSERT_TRUE(i_rest->addRestCall<TestServer>(RestAction::ADD, "test"));
|
||||||
ASSERT_TRUE(i_rest->addGetCall("stuff", [] () { return string("blabla"); }));
|
ASSERT_TRUE(i_rest->addGetCall("stuff", [] () { return string("blabla"); }));
|
||||||
|
ASSERT_TRUE(
|
||||||
|
i_rest->addWildcardGetCall("api/", [] (const string &uri) { return uri.substr(uri.find_last_of('/') + 1); })
|
||||||
|
);
|
||||||
|
|
||||||
int file_descriptor1 = socket(AF_INET, SOCK_STREAM, 0);
|
int file_descriptor1 = socket(AF_INET, SOCK_STREAM, 0);
|
||||||
EXPECT_NE(file_descriptor1, -1);
|
EXPECT_NE(file_descriptor1, -1);
|
||||||
int file_descriptor2 = socket(AF_INET, SOCK_STREAM, 0);
|
int file_descriptor2 = socket(AF_INET, SOCK_STREAM, 0);
|
||||||
EXPECT_NE(file_descriptor2, -1);
|
EXPECT_NE(file_descriptor2, -1);
|
||||||
|
int file_descriptor3 = socket(AF_INET, SOCK_STREAM, 0);
|
||||||
|
EXPECT_NE(file_descriptor3, -1);
|
||||||
|
|
||||||
auto primary_port = getConfiguration<uint>("connection", "Nano service API Port Alternative");
|
auto primary_port = getConfiguration<uint>("connection", "Nano service API Port Alternative");
|
||||||
struct sockaddr_in sa;
|
struct sockaddr_in sa;
|
||||||
@@ -185,6 +190,7 @@ TEST_F(RestConfigTest, basic_flow)
|
|||||||
int socket_enable = 1;
|
int socket_enable = 1;
|
||||||
EXPECT_EQ(setsockopt(file_descriptor1, SOL_SOCKET, SO_REUSEADDR, &socket_enable, sizeof(int)), 0);
|
EXPECT_EQ(setsockopt(file_descriptor1, SOL_SOCKET, SO_REUSEADDR, &socket_enable, sizeof(int)), 0);
|
||||||
EXPECT_EQ(setsockopt(file_descriptor2, SOL_SOCKET, SO_REUSEADDR, &socket_enable, sizeof(int)), 0);
|
EXPECT_EQ(setsockopt(file_descriptor2, SOL_SOCKET, SO_REUSEADDR, &socket_enable, sizeof(int)), 0);
|
||||||
|
EXPECT_EQ(setsockopt(file_descriptor3, SOL_SOCKET, SO_REUSEADDR, &socket_enable, sizeof(int)), 0);
|
||||||
|
|
||||||
EXPECT_CALL(messaging, sendSyncMessage(_, _, _, _, _))
|
EXPECT_CALL(messaging, sendSyncMessage(_, _, _, _, _))
|
||||||
.WillRepeatedly(Return(HTTPResponse(HTTPStatusCode::HTTP_OK, "")));
|
.WillRepeatedly(Return(HTTPResponse(HTTPStatusCode::HTTP_OK, "")));
|
||||||
@@ -203,6 +209,11 @@ TEST_F(RestConfigTest, basic_flow)
|
|||||||
string msg2 = "POST /add-test HTTP/1.1\r\nContent-Length: 10\r\n\r\n{\"num\": 5}";
|
string msg2 = "POST /add-test HTTP/1.1\r\nContent-Length: 10\r\n\r\n{\"num\": 5}";
|
||||||
EXPECT_EQ(write(file_descriptor2, msg2.data(), msg2.size()), static_cast<int>(msg2.size()));
|
EXPECT_EQ(write(file_descriptor2, msg2.data(), msg2.size()), static_cast<int>(msg2.size()));
|
||||||
|
|
||||||
|
EXPECT_EQ(connect(file_descriptor3, (struct sockaddr*)&sa, sizeof(struct sockaddr)), 0)
|
||||||
|
<< "file_descriptor3 Error: "
|
||||||
|
<< strerror(errno);
|
||||||
|
string msg3 = "GET /api/123 HTTP/1.1\r\n\r\n";
|
||||||
|
EXPECT_EQ(write(file_descriptor3, msg3.data(), msg3.size()), static_cast<int>(msg3.size()));
|
||||||
while(!TestServer::g_num) {
|
while(!TestServer::g_num) {
|
||||||
mainloop->yield(true);
|
mainloop->yield(true);
|
||||||
}
|
}
|
||||||
@@ -215,6 +226,14 @@ TEST_F(RestConfigTest, basic_flow)
|
|||||||
mainloop->yield(true);
|
mainloop->yield(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct pollfd s_poll3;
|
||||||
|
s_poll3.fd = file_descriptor3;
|
||||||
|
s_poll3.events = POLLIN;
|
||||||
|
s_poll3.revents = 0;
|
||||||
|
while(poll(&s_poll3, 1, 0) <= 0) {
|
||||||
|
mainloop->yield(true);
|
||||||
|
}
|
||||||
|
|
||||||
mainloop->stopAll();
|
mainloop->stopAll();
|
||||||
};
|
};
|
||||||
mainloop->addOneTimeRoutine(
|
mainloop->addOneTimeRoutine(
|
||||||
@@ -233,6 +252,11 @@ TEST_F(RestConfigTest, basic_flow)
|
|||||||
string(respose, 76),
|
string(respose, 76),
|
||||||
"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\nContent-Length: 6\r\n\r\nblabla"
|
"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\nContent-Length: 6\r\n\r\nblabla"
|
||||||
);
|
);
|
||||||
|
EXPECT_EQ(read(file_descriptor3, respose, 1000), 73);
|
||||||
|
EXPECT_EQ(
|
||||||
|
string(respose, 73),
|
||||||
|
"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\nContent-Length: 3\r\n\r\n123"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
string
|
string
|
||||||
|
|||||||
@@ -26,9 +26,16 @@
|
|||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
#include "debug.h"
|
#include "debug.h"
|
||||||
|
#include "config.h"
|
||||||
|
#include "singleton.h"
|
||||||
|
#include "i_time_get.h"
|
||||||
|
|
||||||
static const uint udp_max_packet_size = 1024 * 64;
|
static const uint udp_max_packet_size = 1024 * 64;
|
||||||
static const AlertInfo alert(AlertTeam::CORE, "socket i/s");
|
static const AlertInfo alert(AlertTeam::CORE, "socket i/s");
|
||||||
|
static const uint CONNECT_TIMEOUT_MICROSECOUNDS(10000000); //10 seconds
|
||||||
|
static const uint CHECK_CONNECTION_INTERVAL_MICROSECONDS(250000); //0.25 seconds
|
||||||
|
static const std::chrono::microseconds CHRONO_CHECK_CONNECTION_INTERVAL =
|
||||||
|
std::chrono::microseconds(CHECK_CONNECTION_INTERVAL_MICROSECONDS);
|
||||||
|
|
||||||
USE_DEBUG_FLAG(D_SOCKET);
|
USE_DEBUG_FLAG(D_SOCKET);
|
||||||
|
|
||||||
@@ -175,9 +182,11 @@ public:
|
|||||||
Maybe<unique_ptr<SocketInternal>>
|
Maybe<unique_ptr<SocketInternal>>
|
||||||
acceptConn(bool is_blocking, const string &authorized_ip = "")
|
acceptConn(bool is_blocking, const string &authorized_ip = "")
|
||||||
{
|
{
|
||||||
dbgAssert(is_server_socket) << alert << "Failed to accept new connections from a client socket";
|
if (!(is_server_socket) || !(socket_int > 0)) {
|
||||||
dbgAssert(socket_int > 0) << alert << "Called with uninitialized server socket";
|
dbgAssertOpt(is_server_socket) << alert << "Failed to accept new connections from a client socket";
|
||||||
|
dbgAssertOpt(socket_int > 0) << alert << "Called with uninitialized server socket";
|
||||||
|
return genError("Failed due to internal error");
|
||||||
|
}
|
||||||
dbgDebug(D_SOCKET) << "Attempt to accept new socket. Server Socket FD: " << socket_int;
|
dbgDebug(D_SOCKET) << "Attempt to accept new socket. Server Socket FD: " << socket_int;
|
||||||
int client_socket;
|
int client_socket;
|
||||||
if (!authorized_ip.empty()) {
|
if (!authorized_ip.empty()) {
|
||||||
@@ -235,9 +244,117 @@ private:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
int setNonBlocking(int socket) {
|
||||||
|
dbgTrace(D_SOCKET) << "Setting socket to non-blocking mode";
|
||||||
|
int flags = fcntl(socket, F_GETFL, 0);
|
||||||
|
if (flags == -1) return -1;
|
||||||
|
return fcntl(socket, F_SETFL, flags | O_NONBLOCK);
|
||||||
|
}
|
||||||
|
|
||||||
|
int setBlocking(int socket) {
|
||||||
|
dbgTrace(D_SOCKET) << "Setting socket to blocking mode";
|
||||||
|
int flags = fcntl(socket, F_GETFL, 0);
|
||||||
|
if (flags == -1) return -1;
|
||||||
|
return fcntl(socket, F_SETFL, flags & ~O_NONBLOCK);
|
||||||
|
}
|
||||||
|
|
||||||
class TCPSocket : public SocketInternal
|
class TCPSocket : public SocketInternal
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
static Maybe<unique_ptr<TCPSocket>>
|
||||||
|
connectAsClient(unique_ptr<TCPSocket> &tcp_socket)
|
||||||
|
{
|
||||||
|
if (setNonBlocking(tcp_socket->getSocket()) < 0) {
|
||||||
|
dbgTrace(D_SOCKET) << "Failed to set the socket to non-blocking mode";
|
||||||
|
return genError("Failed to set the socket to non-blocking mode");
|
||||||
|
}
|
||||||
|
|
||||||
|
chrono::microseconds time_before_connect =
|
||||||
|
Singleton::Consume<I_TimeGet>::by<SocketIS>()->getWalltime();
|
||||||
|
|
||||||
|
|
||||||
|
if (connect(
|
||||||
|
tcp_socket->getSocket(),
|
||||||
|
reinterpret_cast<struct sockaddr *>(&tcp_socket->server),
|
||||||
|
sizeof(struct sockaddr_in)
|
||||||
|
) >= 0
|
||||||
|
) {
|
||||||
|
dbgTrace(D_SOCKET) << "Successfully connected to socket";
|
||||||
|
if (setBlocking(tcp_socket->getSocket()) < 0) {
|
||||||
|
dbgWarning(D_SOCKET) << "Failed to set the socket to blocking mode";
|
||||||
|
close(tcp_socket->getSocket());
|
||||||
|
return genError("Failed to set the socket to blocking mode");
|
||||||
|
}
|
||||||
|
return move(tcp_socket);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (setBlocking(tcp_socket->getSocket()) < 0) {
|
||||||
|
dbgWarning(D_SOCKET) << "Failed to set the socket to blocking mode";
|
||||||
|
close(tcp_socket->getSocket());
|
||||||
|
return genError("Failed to set the socket to blocking mode");
|
||||||
|
}
|
||||||
|
|
||||||
|
auto connection_timeout_to_server = getProfileAgentSettingWithDefault<uint>(
|
||||||
|
CONNECT_TIMEOUT_MICROSECOUNDS,
|
||||||
|
"agent.config.log.TCP.connectTimeout");
|
||||||
|
|
||||||
|
dbgTrace(D_SOCKET)
|
||||||
|
<< "Waiting for the socket connection to be established"
|
||||||
|
<< " with a timeout of "
|
||||||
|
<< connection_timeout_to_server
|
||||||
|
<< " microseconds and each iteration in this timeout is "
|
||||||
|
<< CHECK_CONNECTION_INTERVAL_MICROSECONDS
|
||||||
|
<< " microseconds";
|
||||||
|
|
||||||
|
int ready_fds = 0; // parameters for select
|
||||||
|
int err;
|
||||||
|
socklen_t len;
|
||||||
|
fd_set writefds;
|
||||||
|
struct timeval timeout;
|
||||||
|
timeout.tv_sec = 0;
|
||||||
|
timeout.tv_usec = 0;
|
||||||
|
|
||||||
|
while (
|
||||||
|
Singleton::Consume<I_TimeGet>::by<SocketIS>()->getWalltime() - time_before_connect
|
||||||
|
< chrono::microseconds(connection_timeout_to_server))
|
||||||
|
{
|
||||||
|
dbgTrace(D_SOCKET) << "Iterating to check the connection status";
|
||||||
|
Singleton::Consume<I_MainLoop>::by<SocketIS>()->yield(CHRONO_CHECK_CONNECTION_INTERVAL);
|
||||||
|
|
||||||
|
FD_ZERO(&writefds);
|
||||||
|
FD_SET(tcp_socket->getSocket(), &writefds);
|
||||||
|
|
||||||
|
ready_fds = select(tcp_socket->getSocket() + 1, NULL, &writefds, NULL, &timeout);
|
||||||
|
|
||||||
|
if (ready_fds > 0) {
|
||||||
|
len = sizeof(err);
|
||||||
|
if (getsockopt(tcp_socket->getSocket(), SOL_SOCKET, SO_ERROR, &err, &len) >= 0) {
|
||||||
|
if (err == 0) {
|
||||||
|
dbgTrace(D_SOCKET) << "Connected to socket";
|
||||||
|
return move(tcp_socket);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ready_fds > 0) {
|
||||||
|
// there is at least one file descriptor ready for IO operation
|
||||||
|
if (getsockopt(tcp_socket->getSocket(), SOL_SOCKET, SO_ERROR, &err, &len) < 0) {
|
||||||
|
dbgWarning(D_SOCKET) << "Failed to get socket options";
|
||||||
|
close(tcp_socket->getSocket());
|
||||||
|
return genError("Failed to get socket options");
|
||||||
|
}
|
||||||
|
if (err != 0) {
|
||||||
|
dbgWarning(D_SOCKET) << "Failed to connect socket. Error number: " << err;
|
||||||
|
close(tcp_socket->getSocket());
|
||||||
|
return genError("Failed to connect socket");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dbgWarning(D_SOCKET) << "No file descriptor is ready for IO operation";
|
||||||
|
close(tcp_socket->getSocket());
|
||||||
|
return genError("Failed to connect socket");
|
||||||
|
}
|
||||||
|
|
||||||
static Maybe<unique_ptr<TCPSocket>>
|
static Maybe<unique_ptr<TCPSocket>>
|
||||||
connectSock(bool _is_blocking, bool _is_server, const string &_address)
|
connectSock(bool _is_blocking, bool _is_server, const string &_address)
|
||||||
{
|
{
|
||||||
@@ -262,15 +379,7 @@ public:
|
|||||||
tcp_socket->server.sin_port = htons(port);
|
tcp_socket->server.sin_port = htons(port);
|
||||||
|
|
||||||
if (!tcp_socket->isServerSock()) {
|
if (!tcp_socket->isServerSock()) {
|
||||||
if (connect(
|
return tcp_socket->connectAsClient(tcp_socket);
|
||||||
tcp_socket->getSocket(),
|
|
||||||
reinterpret_cast<struct sockaddr *>(&tcp_socket->server),
|
|
||||||
sizeof(struct sockaddr_in)
|
|
||||||
) == -1
|
|
||||||
) {
|
|
||||||
return genError("Failed to connect socket");
|
|
||||||
}
|
|
||||||
return move(tcp_socket);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const int on = 1;
|
static const int on = 1;
|
||||||
@@ -638,7 +747,10 @@ SocketIS::Impl::genSocket(
|
|||||||
socketTypeName = "UNIXDG";
|
socketTypeName = "UNIXDG";
|
||||||
} else if (type == SocketType::TCP) {
|
} else if (type == SocketType::TCP) {
|
||||||
Maybe<unique_ptr<SocketInternal>> tcp_sock = TCPSocket::connectSock(is_blocking, is_server, address);
|
Maybe<unique_ptr<SocketInternal>> tcp_sock = TCPSocket::connectSock(is_blocking, is_server, address);
|
||||||
if (!tcp_sock.ok()) return tcp_sock.passErr();
|
if (!tcp_sock.ok()) {
|
||||||
|
dbgWarning(D_SOCKET) << "Failed to initialize TCP socket. Error: " << tcp_sock.getErr();
|
||||||
|
return tcp_sock.passErr();
|
||||||
|
}
|
||||||
new_sock = tcp_sock.unpackMove();
|
new_sock = tcp_sock.unpackMove();
|
||||||
socketTypeName = "TCP";
|
socketTypeName = "TCP";
|
||||||
} else if (type == SocketType::UDP) {
|
} else if (type == SocketType::UDP) {
|
||||||
|
|||||||
@@ -52,9 +52,12 @@ public:
|
|||||||
setMonotonicTime(microseconds new_time) override
|
setMonotonicTime(microseconds new_time) override
|
||||||
{
|
{
|
||||||
if (is_monotomic_set) {
|
if (is_monotomic_set) {
|
||||||
dbgAssert((new_time+monotonic_delta) >= monotonic_now)
|
if ((new_time+monotonic_delta) < monotonic_now) {
|
||||||
<< AlertInfo(AlertTeam::CORE, "time proxy")
|
dbgAssertOpt((new_time+monotonic_delta) >= monotonic_now)
|
||||||
<< "Monotonic time must not go back!";
|
<< AlertInfo(AlertTeam::CORE, "time proxy")
|
||||||
|
<< "Monotonic time must not go back!";
|
||||||
|
return;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// The first time that the monotonic time is been set, we take the current value to be the base line.
|
// The first time that the monotonic time is been set, we take the current value to be the base line.
|
||||||
// This is in order to avoid the clock going backwards.
|
// This is in order to avoid the clock going backwards.
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user