mirror of
https://github.com/openappsec/openappsec.git
synced 2025-11-16 09:21:54 +03:00
Compare commits
6 Commits
1.1.8
...
Apr_21_202
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4504138a4a | ||
|
|
66ed4a8d81 | ||
|
|
189c9209c9 | ||
|
|
1a1580081c | ||
|
|
942b2ef8b4 | ||
|
|
7a7f65a77a |
@@ -47,6 +47,7 @@ fi
|
||||
|
||||
orchestration_service_installation_flags="--container_mode --skip_registration"
|
||||
if [ ! -z $var_token ]; then
|
||||
export AGENT_TOKEN="$var_token"
|
||||
orchestration_service_installation_flags="$orchestration_service_installation_flags --token $var_token"
|
||||
fi
|
||||
if [ ! -z $var_fog_address ]; then
|
||||
|
||||
@@ -1,137 +0,0 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "generic_rulebase/assets_config.h"
|
||||
|
||||
#include <string>
|
||||
#include <algorithm>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "generic_rulebase/generic_rulebase_utils.h"
|
||||
#include "config.h"
|
||||
#include "debug.h"
|
||||
#include "ip_utilities.h"
|
||||
|
||||
USE_DEBUG_FLAG(D_RULEBASE_CONFIG);
|
||||
|
||||
using namespace std;
|
||||
|
||||
void
|
||||
RuleAsset::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
archive_in(cereal::make_nvp("assetId", asset_id));
|
||||
archive_in(cereal::make_nvp("assetName", asset_name));
|
||||
archive_in(cereal::make_nvp("assetUrls", asset_urls));
|
||||
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Adding asset with UID: " << asset_id;
|
||||
}
|
||||
|
||||
void
|
||||
RuleAsset::AssetUrl::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
archive_in(cereal::make_nvp("protocol", protocol));
|
||||
transform(protocol.begin(), protocol.end(), protocol.begin(), [](unsigned char c) { return tolower(c); });
|
||||
|
||||
archive_in(cereal::make_nvp("ip", ip));
|
||||
archive_in(cereal::make_nvp("port", port));
|
||||
|
||||
int value;
|
||||
if (protocol == "*") {
|
||||
is_any_proto = true;
|
||||
} else {
|
||||
is_any_proto = false;
|
||||
try {
|
||||
value = 0;
|
||||
if(protocol == "udp") value = IPPROTO_UDP;
|
||||
if(protocol == "tcp") value = IPPROTO_TCP;
|
||||
if(protocol == "dccp") value = IPPROTO_DCCP;
|
||||
if(protocol == "sctp") value = IPPROTO_SCTP;
|
||||
if(protocol == "icmp") value = IPPROTO_ICMP;
|
||||
if(protocol == "icmpv6") value = IPPROTO_ICMP;
|
||||
|
||||
if (value > static_cast<int>(UINT8_MAX) || value < 0) {
|
||||
dbgWarning(D_RULEBASE_CONFIG)
|
||||
<< "provided value is not a legal IP protocol number. Value: "
|
||||
<< protocol;
|
||||
} else {
|
||||
parsed_proto = value;
|
||||
}
|
||||
} catch (...) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "provided value is not a legal IP protocol. Value: " << protocol;
|
||||
}
|
||||
}
|
||||
|
||||
if (port == "*") {
|
||||
is_any_port = true;
|
||||
} else {
|
||||
is_any_port = false;
|
||||
try {
|
||||
value = stoi(port);
|
||||
if (value > static_cast<int>(UINT16_MAX) || value < 0) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "provided value is not a legal port number. Value: " << port;
|
||||
} else {
|
||||
parsed_port = value;
|
||||
}
|
||||
} catch (...) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "provided value is not a legal port. Value: " << port;
|
||||
}
|
||||
}
|
||||
|
||||
if (ip == "*") {
|
||||
is_any_ip = true;
|
||||
} else {
|
||||
is_any_ip = false;
|
||||
auto ip_addr = IPAddr::createIPAddr(ip);
|
||||
if (!ip_addr.ok()) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Could not create IP address. Error: " << ip_addr.getErr();
|
||||
} else {
|
||||
parsed_ip = ConvertToIpAddress(ip_addr.unpackMove());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
IpAddress
|
||||
RuleAsset::AssetUrl::ConvertToIpAddress(const IPAddr &addr)
|
||||
{
|
||||
IpAddress address;
|
||||
switch (addr.getType()) {
|
||||
case IPType::UNINITIALIZED: {
|
||||
address.addr4_t = {0};
|
||||
address.ip_type = IP_VERSION_ANY;
|
||||
break;
|
||||
}
|
||||
case IPType::V4: {
|
||||
address.addr4_t = addr.getIPv4();
|
||||
address.ip_type = IP_VERSION_4;
|
||||
break;
|
||||
}
|
||||
case IPType::V6: {
|
||||
address.addr6_t = addr.getIPv6();
|
||||
address.ip_type = IP_VERSION_6;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
address.addr4_t = {0};
|
||||
address.ip_type = IP_VERSION_ANY;
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Unsupported IP type: " << static_cast<int>(addr.getType());
|
||||
}
|
||||
return address;
|
||||
}
|
||||
|
||||
const Assets Assets::empty_assets_config = Assets();
|
||||
|
||||
void
|
||||
Assets::preload()
|
||||
{
|
||||
registerExpectedSetting<Assets>("rulebase", "usedAssets");
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "generic_rulebase/evaluators/asset_eval.h"
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
#include "generic_rulebase/assets_config.h"
|
||||
#include "config.h"
|
||||
#include "debug.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
USE_DEBUG_FLAG(D_RULEBASE_CONFIG);
|
||||
|
||||
string AssetMatcher::ctx_key = "asset_id";
|
||||
|
||||
AssetMatcher::AssetMatcher(const vector<string> ¶ms)
|
||||
{
|
||||
if (params.size() != 1) reportWrongNumberOfParams(AssetMatcher::getName(), params.size(), 1, 1);
|
||||
asset_id = params[0];
|
||||
}
|
||||
|
||||
Maybe<bool, Context::Error>
|
||||
AssetMatcher::evalVariable() const
|
||||
{
|
||||
I_Environment *env = Singleton::Consume<I_Environment>::by<AssetMatcher>();
|
||||
auto bc_asset_id_ctx = env->get<GenericConfigId>(AssetMatcher::ctx_key);
|
||||
|
||||
if (bc_asset_id_ctx.ok()) {
|
||||
dbgTrace(D_RULEBASE_CONFIG)
|
||||
<< "Asset ID: "
|
||||
<< asset_id
|
||||
<< "; Current set assetId context: "
|
||||
<< *bc_asset_id_ctx;
|
||||
} else {
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Asset ID: " << asset_id << ". Empty context";
|
||||
}
|
||||
|
||||
return bc_asset_id_ctx.ok() && *bc_asset_id_ctx == asset_id;
|
||||
}
|
||||
@@ -1,299 +0,0 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "generic_rulebase/evaluators/connection_eval.h"
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
#include "generic_rulebase/rulebase_config.h"
|
||||
#include "config.h"
|
||||
#include "debug.h"
|
||||
#include "ip_utilities.h"
|
||||
|
||||
using namespace std;
|
||||
USE_DEBUG_FLAG(D_RULEBASE_CONFIG);
|
||||
|
||||
string IpAddressMatcher::ctx_key = "ipAddress";
|
||||
string SourceIpMatcher::ctx_key = "sourceIP";
|
||||
string DestinationIpMatcher::ctx_key = "destinationIP";
|
||||
string SourcePortMatcher::ctx_key = "sourcePort";
|
||||
string ListeningPortMatcher::ctx_key = "listeningPort";
|
||||
string IpProtocolMatcher::ctx_key = "ipProtocol";
|
||||
string UrlMatcher::ctx_key = "url";
|
||||
|
||||
Maybe<IPAddr>
|
||||
getIpAddrFromEnviroment(I_Environment *env, Context::MetaDataType enum_data_type, const string &str_data_type)
|
||||
{
|
||||
auto ip_str = env->get<string>(enum_data_type);
|
||||
if (!ip_str.ok()) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Failed to get " << str_data_type << " from the enviroment.";
|
||||
return genError("Failed to get " + str_data_type + " from the enviroment.");
|
||||
}
|
||||
return IPAddr::createIPAddr(ip_str.unpack());
|
||||
}
|
||||
|
||||
bool
|
||||
checkIfIpInRangesVec(const vector<CustomRange<IPAddr>> &values, const IPAddr &ip_to_check)
|
||||
{
|
||||
if (values.size() == 0) {
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Ip addersses vector empty. Match is true.";
|
||||
return true;
|
||||
}
|
||||
for (const CustomRange<IPAddr> &range : values) {
|
||||
if (range.contains(ip_to_check)) {
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Ip adderss matched: " << ip_to_check;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Ip adderss not match: " << ip_to_check;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
IpAddressMatcher::IpAddressMatcher(const vector<string> ¶ms)
|
||||
{
|
||||
for (const string ¶m : params) {
|
||||
Maybe<CustomRange<IPAddr>> ip_range = CustomRange<IPAddr>::createRange(param);
|
||||
if (!ip_range.ok()) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Failed to create ip. Error: " + ip_range.getErr();
|
||||
continue;
|
||||
}
|
||||
values.push_back(ip_range.unpack());
|
||||
}
|
||||
}
|
||||
|
||||
Maybe<bool, Context::Error>
|
||||
IpAddressMatcher::evalVariable() const
|
||||
{
|
||||
I_Environment *env = Singleton::Consume<I_Environment>::by<IpAddressMatcher>();
|
||||
Maybe<IPAddr> subject_ip = getIpAddrFromEnviroment(
|
||||
env,
|
||||
Context::MetaDataType::SubjectIpAddr,
|
||||
"subject ip address"
|
||||
);
|
||||
if (subject_ip.ok() && checkIfIpInRangesVec(values, subject_ip.unpack())) return true;
|
||||
|
||||
Maybe<IPAddr> other_ip = getIpAddrFromEnviroment(
|
||||
env,
|
||||
Context::MetaDataType::OtherIpAddr,
|
||||
"other ip address"
|
||||
);
|
||||
if (other_ip.ok() && checkIfIpInRangesVec(values, other_ip.unpack())) return true;
|
||||
if (!subject_ip.ok() && !other_ip.ok()) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Error in getting subject ip and other ip from the enviroment";
|
||||
return false;
|
||||
}
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Ip adderss didn't match";
|
||||
return false;
|
||||
}
|
||||
|
||||
SourceIpMatcher::SourceIpMatcher(const vector<string> ¶ms)
|
||||
{
|
||||
for (const string ¶m : params) {
|
||||
Maybe<CustomRange<IPAddr>> ip_range = CustomRange<IPAddr>::createRange(param);
|
||||
if (!ip_range.ok()) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Failed to create source ip. Error: " + ip_range.getErr();
|
||||
continue;
|
||||
}
|
||||
values.push_back(ip_range.unpack());
|
||||
}
|
||||
}
|
||||
|
||||
Maybe<bool, Context::Error>
|
||||
SourceIpMatcher::evalVariable() const
|
||||
{
|
||||
I_Environment *env = Singleton::Consume<I_Environment>::by<SourceIpMatcher>();
|
||||
auto direction_maybe = env->get<string>(Context::MetaDataType::Direction);
|
||||
if (!direction_maybe.ok()) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Failed to get direction from the enviroment.";
|
||||
return false;
|
||||
}
|
||||
string direction = direction_maybe.unpack();
|
||||
if (direction == "incoming") {
|
||||
Maybe<IPAddr> other_ip = getIpAddrFromEnviroment(
|
||||
env,
|
||||
Context::MetaDataType::OtherIpAddr,
|
||||
"other ip address"
|
||||
);
|
||||
return other_ip.ok() && checkIfIpInRangesVec(values, other_ip.unpack());
|
||||
} else if (direction == "outgoing") {
|
||||
Maybe<IPAddr> subject_ip = getIpAddrFromEnviroment(
|
||||
env,
|
||||
Context::MetaDataType::SubjectIpAddr,
|
||||
"subject ip address"
|
||||
);
|
||||
return subject_ip.ok() && checkIfIpInRangesVec(values, subject_ip.unpack());
|
||||
}
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Source ip adderss didn't match";
|
||||
return false;
|
||||
}
|
||||
|
||||
DestinationIpMatcher::DestinationIpMatcher(const vector<string> ¶ms)
|
||||
{
|
||||
for (const string ¶m : params) {
|
||||
Maybe<CustomRange<IPAddr>> ip_range = CustomRange<IPAddr>::createRange(param);
|
||||
if (!ip_range.ok()) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Failed to create destination ip. Error: " + ip_range.getErr();
|
||||
continue;
|
||||
}
|
||||
values.push_back(ip_range.unpack());
|
||||
}
|
||||
}
|
||||
|
||||
Maybe<bool, Context::Error>
|
||||
DestinationIpMatcher::evalVariable() const
|
||||
{
|
||||
I_Environment *env = Singleton::Consume<I_Environment>::by<DestinationIpMatcher>();
|
||||
auto direction_maybe = env->get<string>(Context::MetaDataType::Direction);
|
||||
if (!direction_maybe.ok()) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Failed to get direction.";
|
||||
return false;
|
||||
}
|
||||
string direction = direction_maybe.unpack();
|
||||
if (direction == "outgoing") {
|
||||
Maybe<IPAddr> other_ip = getIpAddrFromEnviroment(
|
||||
env,
|
||||
Context::MetaDataType::OtherIpAddr,
|
||||
"other ip address"
|
||||
);
|
||||
return other_ip.ok() && checkIfIpInRangesVec(values, other_ip.unpack());
|
||||
} else if (direction == "incoming") {
|
||||
Maybe<IPAddr> subject_ip = getIpAddrFromEnviroment(
|
||||
env,
|
||||
Context::MetaDataType::SubjectIpAddr,
|
||||
"subject ip address"
|
||||
);
|
||||
return subject_ip.ok() && checkIfIpInRangesVec(values, subject_ip.unpack());
|
||||
}
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Destination ip adderss didn't match";
|
||||
return false;
|
||||
}
|
||||
|
||||
SourcePortMatcher::SourcePortMatcher(const vector<string> ¶ms)
|
||||
{
|
||||
for (const string ¶m : params) {
|
||||
Maybe<CustomRange<PortNumber>> port_range = CustomRange<PortNumber>::createRange(param);
|
||||
if (!port_range.ok()) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Failed to create source port.";
|
||||
continue;
|
||||
}
|
||||
values.push_back(port_range.unpack());
|
||||
}
|
||||
}
|
||||
|
||||
Maybe<bool, Context::Error>
|
||||
SourcePortMatcher::evalVariable() const
|
||||
{
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Source is not a match";
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
ListeningPortMatcher::ListeningPortMatcher(const vector<string> ¶ms)
|
||||
{
|
||||
for (const string ¶m : params) {
|
||||
Maybe<CustomRange<PortNumber>> port_range = CustomRange<PortNumber>::createRange(param);
|
||||
if (!port_range.ok()) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Failed to create listening port range.";
|
||||
continue;
|
||||
}
|
||||
values.push_back(port_range.unpack());
|
||||
}
|
||||
}
|
||||
|
||||
Maybe<bool, Context::Error>
|
||||
ListeningPortMatcher::evalVariable() const
|
||||
{
|
||||
I_Environment *env = Singleton::Consume<I_Environment>::by<ListeningPortMatcher>();
|
||||
auto port_str = env->get<string>(Context::MetaDataType::Port);
|
||||
if (!port_str.ok()) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Failed to get port from the enviroment.";
|
||||
return false;
|
||||
}
|
||||
PortNumber port;
|
||||
if (ConnKeyUtil::fromString(port_str.unpack(), port)) {
|
||||
if (values.size() == 0) return true;
|
||||
for (const CustomRange<PortNumber> &port_range : values) {
|
||||
if (port_range.contains(port)) {
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Listening port is a match. Value: " << port_str.unpack();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Listening port is not a match. Value: " << port_str.unpack();
|
||||
return false;
|
||||
}
|
||||
|
||||
IpProtocolMatcher::IpProtocolMatcher(const vector<string> ¶ms)
|
||||
{
|
||||
for (const string ¶m : params) {
|
||||
Maybe<CustomRange<IPProto>> proto_range = CustomRange<IPProto>::createRange(param);
|
||||
if (!proto_range.ok()) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Failed to create ip protocol.";
|
||||
continue;
|
||||
}
|
||||
values.push_back(proto_range.unpack());
|
||||
}
|
||||
}
|
||||
|
||||
Maybe<bool, Context::Error>
|
||||
IpProtocolMatcher::evalVariable() const
|
||||
{
|
||||
I_Environment *env = Singleton::Consume<I_Environment>::by<IpProtocolMatcher>();
|
||||
auto proto_str = env->get<string>(Context::MetaDataType::Protocol);
|
||||
if (!proto_str.ok()) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Failed to get ip protocol from the enviroment.";
|
||||
return false;
|
||||
}
|
||||
IPProto protocol;
|
||||
if (ConnKeyUtil::fromString(proto_str.unpack(), protocol)) {
|
||||
if (values.size() == 0) return true;
|
||||
for (const CustomRange<IPProto> &proto_range : values) {
|
||||
if (proto_range.contains(protocol)) {
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Ip protocol is a match. Value: " << proto_str.unpack();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Source port is not a match. Value: " << proto_str.unpack();
|
||||
return false;
|
||||
}
|
||||
|
||||
UrlMatcher::UrlMatcher(const vector<string> ¶ms) : values(params) {}
|
||||
|
||||
Maybe<bool, Context::Error>
|
||||
UrlMatcher::evalVariable() const
|
||||
{
|
||||
I_Environment *env = Singleton::Consume<I_Environment>::by<UrlMatcher>();
|
||||
auto curr_url_ctx = env->get<string>(Context::MetaDataType::Url);
|
||||
if (!curr_url_ctx.ok()) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Failed to get URL from the enviroment.";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (values.size() == 0) {
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Matched URL on \"any\". Url: " << *curr_url_ctx;
|
||||
return true;
|
||||
}
|
||||
|
||||
for (const string &url : values) {
|
||||
if (*curr_url_ctx == url) {
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Matched URL. Value: " << *curr_url_ctx;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "URL is not a match. Value: " << *curr_url_ctx;
|
||||
return false;
|
||||
}
|
||||
@@ -1,168 +0,0 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "generic_rulebase/evaluators/http_transaction_data_eval.h"
|
||||
|
||||
#include <boost/lexical_cast.hpp>
|
||||
#include <algorithm>
|
||||
|
||||
#include "http_transaction_data.h"
|
||||
#include "environment/evaluator_templates.h"
|
||||
#include "i_environment.h"
|
||||
#include "singleton.h"
|
||||
#include "debug.h"
|
||||
|
||||
USE_DEBUG_FLAG(D_RULEBASE_CONFIG);
|
||||
|
||||
using namespace std;
|
||||
using namespace EnvironmentHelper;
|
||||
|
||||
EqualHost::EqualHost(const vector<string> ¶ms)
|
||||
{
|
||||
if (params.size() != 1) reportWrongNumberOfParams("EqualHost", params.size(), 1, 1);
|
||||
host = params[0];
|
||||
}
|
||||
|
||||
Maybe<bool, Context::Error>
|
||||
EqualHost::evalVariable() const
|
||||
{
|
||||
I_Environment *env = Singleton::Consume<I_Environment>::by<EqualHost>();
|
||||
auto host_ctx = env->get<string>(HttpTransactionData::host_name_ctx);
|
||||
|
||||
if (!host_ctx.ok())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string lower_host_ctx = host_ctx.unpack();
|
||||
std::transform(lower_host_ctx.begin(), lower_host_ctx.end(), lower_host_ctx.begin(), ::tolower);
|
||||
|
||||
std::string lower_host = host;
|
||||
std::transform(lower_host.begin(), lower_host.end(), lower_host.begin(), ::tolower);
|
||||
|
||||
|
||||
if (lower_host_ctx == lower_host) return true;
|
||||
size_t pos = lower_host_ctx.find_last_of(':');
|
||||
if (pos == string::npos) return false;
|
||||
lower_host_ctx = string(lower_host_ctx.data(), pos);
|
||||
return lower_host_ctx == lower_host;
|
||||
}
|
||||
|
||||
WildcardHost::WildcardHost(const vector<string> ¶ms)
|
||||
{
|
||||
if (params.size() != 1) reportWrongNumberOfParams("WildcardHost", params.size(), 1, 1);
|
||||
host = params[0];
|
||||
}
|
||||
|
||||
Maybe<bool, Context::Error>
|
||||
WildcardHost::evalVariable() const
|
||||
{
|
||||
I_Environment *env = Singleton::Consume<I_Environment>::by<WildcardHost>();
|
||||
auto host_ctx = env->get<string>(HttpTransactionData::host_name_ctx);
|
||||
|
||||
if (!host_ctx.ok())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
string lower_host_ctx = host_ctx.unpack();
|
||||
transform(lower_host_ctx.begin(), lower_host_ctx.end(), lower_host_ctx.begin(), ::tolower);
|
||||
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "found host in current context: " << lower_host_ctx;
|
||||
|
||||
size_t pos = lower_host_ctx.find_first_of(".");
|
||||
if (pos == string::npos) {
|
||||
return false;
|
||||
}
|
||||
|
||||
lower_host_ctx = "*" + lower_host_ctx.substr(pos, lower_host_ctx.length());
|
||||
|
||||
string lower_host = host;
|
||||
transform(lower_host.begin(), lower_host.end(), lower_host.begin(), ::tolower);
|
||||
|
||||
dbgTrace(D_RULEBASE_CONFIG)
|
||||
<< "trying to match host context with its corresponding wildcard address: "
|
||||
<< lower_host_ctx
|
||||
<< ". Matcher host: "
|
||||
<< lower_host;
|
||||
|
||||
if (lower_host_ctx == lower_host) return true;
|
||||
pos = lower_host_ctx.find_last_of(':');
|
||||
if (pos == string::npos) return false;
|
||||
lower_host_ctx = string(lower_host_ctx.data(), pos);
|
||||
return lower_host_ctx == lower_host;
|
||||
}
|
||||
|
||||
EqualListeningIP::EqualListeningIP(const vector<string> ¶ms)
|
||||
{
|
||||
if (params.size() != 1) reportWrongNumberOfParams("EqualListeningIP", params.size(), 1, 1);
|
||||
|
||||
auto maybe_ip = IPAddr::createIPAddr(params[0]);
|
||||
if (!maybe_ip.ok()) reportWrongParamType(getName(), params[0], "Not a valid IP Address");
|
||||
|
||||
listening_ip = maybe_ip.unpack();
|
||||
}
|
||||
|
||||
Maybe<bool, Context::Error>
|
||||
EqualListeningIP::evalVariable() const
|
||||
{
|
||||
I_Environment *env = Singleton::Consume<I_Environment>::by<EqualListeningIP>();
|
||||
auto listening_ip_ctx = env->get<IPAddr>(HttpTransactionData::listening_ip_ctx);
|
||||
return listening_ip_ctx.ok() && listening_ip_ctx.unpack() == listening_ip;
|
||||
}
|
||||
|
||||
EqualListeningPort::EqualListeningPort(const vector<string> ¶ms)
|
||||
{
|
||||
if (params.size() != 1) reportWrongNumberOfParams("EqualListeningPort", params.size(), 1, 1);
|
||||
|
||||
try {
|
||||
listening_port = boost::lexical_cast<PortNumber>(params[0]);
|
||||
} catch (boost::bad_lexical_cast const&) {
|
||||
reportWrongParamType(getName(), params[0], "Not a valid port number");
|
||||
}
|
||||
}
|
||||
|
||||
Maybe<bool, Context::Error>
|
||||
EqualListeningPort::evalVariable() const
|
||||
{
|
||||
I_Environment *env = Singleton::Consume<I_Environment>::by<EqualListeningPort>();
|
||||
auto port_ctx = env->get<PortNumber>(HttpTransactionData::listening_port_ctx);
|
||||
|
||||
return port_ctx.ok() && port_ctx.unpack() == listening_port;
|
||||
}
|
||||
|
||||
BeginWithUri::BeginWithUri(const vector<string> ¶ms)
|
||||
{
|
||||
if (params.size() != 1) reportWrongNumberOfParams("BeginWithUri", params.size(), 1, 1);
|
||||
uri_prefix = params[0];
|
||||
}
|
||||
|
||||
Maybe<bool, Context::Error>
|
||||
BeginWithUri::evalVariable() const
|
||||
{
|
||||
I_Environment *env = Singleton::Consume<I_Environment>::by<BeginWithUri>();
|
||||
auto uri_ctx = env->get<string>(HttpTransactionData::uri_ctx);
|
||||
|
||||
if (!uri_ctx.ok())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string lower_uri_ctx = uri_ctx.unpack();
|
||||
std::transform(lower_uri_ctx.begin(), lower_uri_ctx.end(), lower_uri_ctx.begin(), ::tolower);
|
||||
|
||||
std::string lower_uri_prefix = uri_prefix;
|
||||
std::transform(lower_uri_prefix.begin(), lower_uri_prefix.end(), lower_uri_prefix.begin(), ::tolower);
|
||||
|
||||
return lower_uri_ctx.find(lower_uri_prefix) == 0;
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "generic_rulebase/evaluators/parameter_eval.h"
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
#include "generic_rulebase/rulebase_config.h"
|
||||
#include "config.h"
|
||||
#include "debug.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
string ParameterMatcher::ctx_key = "parameters";
|
||||
|
||||
ParameterMatcher::ParameterMatcher(const vector<string> ¶ms)
|
||||
{
|
||||
if (params.size() != 1) reportWrongNumberOfParams(ParameterMatcher::getName(), params.size(), 1, 1);
|
||||
parameter_id = params[0];
|
||||
}
|
||||
|
||||
Maybe<bool, Context::Error>
|
||||
ParameterMatcher::evalVariable() const
|
||||
{
|
||||
auto rule = getConfiguration<BasicRuleConfig>("rulebase", "rulesConfig");
|
||||
return rule.ok() && rule.unpack().isParameterActive(parameter_id);
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "generic_rulebase/evaluators/practice_eval.h"
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
#include "generic_rulebase/rulebase_config.h"
|
||||
#include "config.h"
|
||||
#include "debug.h"
|
||||
|
||||
USE_DEBUG_FLAG(D_RULEBASE_CONFIG);
|
||||
|
||||
using namespace std;
|
||||
|
||||
string PracticeMatcher::ctx_key = "practices";
|
||||
|
||||
PracticeMatcher::PracticeMatcher(const vector<string> ¶ms)
|
||||
{
|
||||
if (params.size() != 1) reportWrongNumberOfParams(PracticeMatcher::getName(), params.size(), 1, 1);
|
||||
practice_id = params[0];
|
||||
}
|
||||
|
||||
Maybe<bool, Context::Error>
|
||||
PracticeMatcher::evalVariable() const
|
||||
{
|
||||
I_Environment *env = Singleton::Consume<I_Environment>::by<PracticeMatcher>();
|
||||
auto bc_practice_id_ctx = env->get<set<GenericConfigId>>(PracticeMatcher::ctx_key);
|
||||
dbgTrace(D_RULEBASE_CONFIG)
|
||||
<< "Trying to match practice. ID: "
|
||||
<< practice_id << ", Current set IDs: "
|
||||
<< makeSeparatedStr(bc_practice_id_ctx.ok() ? *bc_practice_id_ctx : set<GenericConfigId>(), ", ");
|
||||
if (bc_practice_id_ctx.ok()) {
|
||||
return bc_practice_id_ctx.unpack().count(practice_id) > 0;
|
||||
}
|
||||
|
||||
auto rule = getConfiguration<BasicRuleConfig>("rulebase", "rulesConfig");
|
||||
return rule.ok() && rule.unpack().isPracticeActive(practice_id);
|
||||
}
|
||||
@@ -1,136 +0,0 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "generic_rulebase/evaluators/query_eval.h"
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
#include "generic_rulebase/rulebase_config.h"
|
||||
#include "generic_rulebase/zones_config.h"
|
||||
#include "i_environment.h"
|
||||
#include "singleton.h"
|
||||
#include "config.h"
|
||||
#include "debug.h"
|
||||
#include "enum_range.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
USE_DEBUG_FLAG(D_RULEBASE_CONFIG);
|
||||
|
||||
QueryMatcher::QueryMatcher(const vector<string> ¶ms)
|
||||
{
|
||||
if (params.size() < 1) reportWrongNumberOfParams(QueryMatcher::getName(), params.size(), 1);
|
||||
|
||||
key = params.front();
|
||||
if (key == "any") {
|
||||
is_any = true;
|
||||
} else {
|
||||
values.reserve(params.size() - 1);
|
||||
for (uint i = 1; i < params.size() ; i++) {
|
||||
if (params[i] == "any") {
|
||||
values.clear();
|
||||
break;
|
||||
}
|
||||
values.insert(params[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const string
|
||||
QueryMatcher::contextKeyToString(Context::MetaDataType type)
|
||||
{
|
||||
if (type == Context::MetaDataType::SubjectIpAddr || type == Context::MetaDataType::OtherIpAddr) return "ip";
|
||||
return Context::convertToString(type);
|
||||
}
|
||||
|
||||
class QueryMatchSerializer
|
||||
{
|
||||
public:
|
||||
static const string req_attr_ctx_key;
|
||||
|
||||
template <typename Archive>
|
||||
void
|
||||
serialize(Archive &ar)
|
||||
{
|
||||
I_Environment *env = Singleton::Consume<I_Environment>::by<QueryMatcher>();
|
||||
auto req_attr = env->get<string>(req_attr_ctx_key);
|
||||
if (!req_attr.ok()) return;
|
||||
|
||||
try {
|
||||
ar(cereal::make_nvp(*req_attr, value));
|
||||
dbgDebug(D_RULEBASE_CONFIG)
|
||||
<< "Found value for requested attribute. Tag: "
|
||||
<< *req_attr
|
||||
<< ", Value: "
|
||||
<< value;
|
||||
} catch (exception &e) {
|
||||
dbgDebug(D_RULEBASE_CONFIG) << "Could not find values for requested attribute. Tag: " << *req_attr;
|
||||
ar.finishNode();
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Values>
|
||||
bool
|
||||
matchValues(const Values &requested_vals) const
|
||||
{
|
||||
return value != "" && (requested_vals.empty() || requested_vals.count(value) > 0);
|
||||
}
|
||||
|
||||
private:
|
||||
string value;
|
||||
};
|
||||
|
||||
const string QueryMatchSerializer::req_attr_ctx_key = "requested attribute key";
|
||||
|
||||
Maybe<bool, Context::Error>
|
||||
QueryMatcher::evalVariable() const
|
||||
{
|
||||
if (is_any) return true;
|
||||
|
||||
I_Environment *env = Singleton::Consume<I_Environment>::by<QueryMatcher>();
|
||||
auto local_asset_ctx = env->get<bool>("is local asset");
|
||||
bool is_remote_asset = local_asset_ctx.ok() && !(*local_asset_ctx);
|
||||
|
||||
QueryRequest request;
|
||||
for (Context::MetaDataType name : makeRange<Context::MetaDataType>()) {
|
||||
auto val = env->get<string>(name);
|
||||
if (val.ok()) {
|
||||
if ((name == Context::MetaDataType::SubjectIpAddr && is_remote_asset) ||
|
||||
(name == Context::MetaDataType::OtherIpAddr && !is_remote_asset)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
request.addCondition(Condition::EQUALS, contextKeyToString(name), *val);
|
||||
}
|
||||
}
|
||||
if (request.empty()) return false;
|
||||
|
||||
request.setRequestedAttr(key);
|
||||
ScopedContext req_attr_key;
|
||||
req_attr_key.registerValue<string>(QueryMatchSerializer::req_attr_ctx_key, key);
|
||||
|
||||
I_Intelligence_IS_V2 *intelligence = Singleton::Consume<I_Intelligence_IS_V2>::by<Zone>();
|
||||
auto query_res = intelligence->queryIntelligence<QueryMatchSerializer>(request);
|
||||
if (!query_res.ok()) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Failed to perform intelligence query. Error: " << query_res.getErr();
|
||||
return false;
|
||||
}
|
||||
|
||||
for (const AssetReply<QueryMatchSerializer> &asset : query_res.unpack()) {
|
||||
if (asset.matchValues<unordered_set<string>>(values)) return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "generic_rulebase/evaluators/trigger_eval.h"
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
#include "generic_rulebase/rulebase_config.h"
|
||||
#include "config.h"
|
||||
#include "debug.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
USE_DEBUG_FLAG(D_RULEBASE_CONFIG);
|
||||
|
||||
string TriggerMatcher::ctx_key = "triggers";
|
||||
|
||||
TriggerMatcher::TriggerMatcher(const vector<string> ¶ms)
|
||||
{
|
||||
if (params.size() != 1) reportWrongNumberOfParams(TriggerMatcher::getName(), params.size(), 1, 1);
|
||||
trigger_id = params[0];
|
||||
}
|
||||
|
||||
Maybe<bool, Context::Error>
|
||||
TriggerMatcher::evalVariable() const
|
||||
{
|
||||
I_Environment *env = Singleton::Consume<I_Environment>::by<TriggerMatcher>();
|
||||
auto ac_bc_trigger_id_ctx = env->get<set<GenericConfigId>>("ac_trigger_id");
|
||||
dbgTrace(D_RULEBASE_CONFIG)
|
||||
<< "Trying to match trigger for access control rule. ID: "
|
||||
<< trigger_id << ", Current set IDs: "
|
||||
<< makeSeparatedStr(ac_bc_trigger_id_ctx.ok() ? *ac_bc_trigger_id_ctx : set<GenericConfigId>(), ", ");
|
||||
if (ac_bc_trigger_id_ctx.ok()) {
|
||||
return ac_bc_trigger_id_ctx.unpack().count(trigger_id) > 0;
|
||||
}
|
||||
|
||||
auto bc_trigger_id_ctx = env->get<set<GenericConfigId>>(TriggerMatcher::ctx_key);
|
||||
dbgTrace(D_RULEBASE_CONFIG)
|
||||
<< "Trying to match trigger. ID: "
|
||||
<< trigger_id << ", Current set IDs: "
|
||||
<< makeSeparatedStr(bc_trigger_id_ctx.ok() ? *bc_trigger_id_ctx : set<GenericConfigId>(), ", ");
|
||||
if (bc_trigger_id_ctx.ok() && bc_trigger_id_ctx.unpack().count(trigger_id) > 0 ) return true;
|
||||
|
||||
auto rule = getConfiguration<BasicRuleConfig>("rulebase", "rulesConfig");
|
||||
return rule.ok() && rule.unpack().isTriggerActive(trigger_id);
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "generic_rulebase/evaluators/zone_eval.h"
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
#include "generic_rulebase/zone.h"
|
||||
#include "generic_rulebase/rulebase_config.h"
|
||||
#include "config.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
string ZoneMatcher::ctx_key = "zone_id";
|
||||
|
||||
ZoneMatcher::ZoneMatcher(const vector<string> ¶ms)
|
||||
{
|
||||
if (params.size() != 1) reportWrongNumberOfParams(ZoneMatcher::getName(), params.size(), 1, 1);
|
||||
zone_id = params[0];
|
||||
}
|
||||
|
||||
Maybe<bool, Context::Error>
|
||||
ZoneMatcher::evalVariable() const
|
||||
{
|
||||
I_Environment *env = Singleton::Consume<I_Environment>::by<ZoneMatcher>();
|
||||
auto bc_zone_id_ctx = env->get<GenericConfigId>(ZoneMatcher::ctx_key);
|
||||
if (bc_zone_id_ctx.ok() && *bc_zone_id_ctx == zone_id) return true;
|
||||
|
||||
if (!getProfileAgentSettingWithDefault<bool>(false, "rulebase.enableQueryBasedMatch")) return false;
|
||||
|
||||
auto zone = getConfiguration<Zone>("rulebase", "zones");
|
||||
return zone.ok() && zone.unpack().getId() == zone_id;
|
||||
}
|
||||
@@ -1,126 +0,0 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "generic_rulebase/generic_rulebase.h"
|
||||
|
||||
#include <unordered_set>
|
||||
|
||||
#include "generic_rulebase/evaluators/trigger_eval.h"
|
||||
#include "generic_rulebase/evaluators/practice_eval.h"
|
||||
#include "generic_rulebase/evaluators/parameter_eval.h"
|
||||
#include "generic_rulebase/evaluators/zone_eval.h"
|
||||
#include "generic_rulebase/evaluators/asset_eval.h"
|
||||
#include "generic_rulebase/evaluators/query_eval.h"
|
||||
#include "generic_rulebase/evaluators/connection_eval.h"
|
||||
#include "generic_rulebase/evaluators/http_transaction_data_eval.h"
|
||||
#include "generic_rulebase/zone.h"
|
||||
#include "generic_rulebase/triggers_config.h"
|
||||
#include "singleton.h"
|
||||
#include "common.h"
|
||||
#include "debug.h"
|
||||
#include "cache.h"
|
||||
#include "config.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
USE_DEBUG_FLAG(D_RULEBASE_CONFIG);
|
||||
|
||||
class GenericRulebase::Impl : Singleton::Provide<I_GenericRulebase>::From<GenericRulebase>
|
||||
{
|
||||
public:
|
||||
void init() {}
|
||||
void fini() {}
|
||||
|
||||
void preload();
|
||||
|
||||
Maybe<Zone, Config::Errors> getLocalZone() const override { return getZoneConfig(true); }
|
||||
Maybe<Zone, Config::Errors> getOtherZone() const override { return getZoneConfig(false); }
|
||||
|
||||
set<ParameterBehavior> getBehavior(const ParameterKeyValues &key_value_pairs) const override;
|
||||
|
||||
private:
|
||||
Maybe<Zone, Config::Errors>
|
||||
getZoneConfig(bool is_local_zone) const
|
||||
{
|
||||
ScopedContext asset_location_ctx;
|
||||
asset_location_ctx.registerValue<bool>("is local asset", is_local_zone);
|
||||
return getConfiguration<Zone>("rulebase", "zones");
|
||||
}
|
||||
};
|
||||
|
||||
void
|
||||
GenericRulebase::Impl::preload()
|
||||
{
|
||||
addMatcher<TriggerMatcher>();
|
||||
addMatcher<PracticeMatcher>();
|
||||
addMatcher<ParameterMatcher>();
|
||||
addMatcher<ZoneMatcher>();
|
||||
addMatcher<AssetMatcher>();
|
||||
addMatcher<QueryMatcher>();
|
||||
addMatcher<IpAddressMatcher>();
|
||||
addMatcher<SourceIpMatcher>();
|
||||
addMatcher<DestinationIpMatcher>();
|
||||
addMatcher<SourcePortMatcher>();
|
||||
addMatcher<ListeningPortMatcher>();
|
||||
addMatcher<IpProtocolMatcher>();
|
||||
addMatcher<UrlMatcher>();
|
||||
addMatcher<EqualHost>();
|
||||
addMatcher<WildcardHost>();
|
||||
addMatcher<EqualListeningIP>();
|
||||
addMatcher<EqualListeningPort>();
|
||||
addMatcher<BeginWithUri>();
|
||||
BasicRuleConfig::preload();
|
||||
LogTriggerConf::preload();
|
||||
ParameterException::preload();
|
||||
registerExpectedConfiguration<Zone>("rulebase", "zones");
|
||||
registerExpectedConfigFile("zones", Config::ConfigFileType::Policy);
|
||||
registerExpectedConfigFile("triggers", Config::ConfigFileType::Policy);
|
||||
registerExpectedConfigFile("rules", Config::ConfigFileType::Policy);
|
||||
registerExpectedConfigFile("parameters", Config::ConfigFileType::Policy);
|
||||
registerExpectedConfigFile("exceptions", Config::ConfigFileType::Policy);
|
||||
|
||||
}
|
||||
|
||||
set<ParameterBehavior>
|
||||
GenericRulebase::Impl::getBehavior(const ParameterKeyValues &key_value_pairs) const
|
||||
{
|
||||
auto &exceptions = getConfiguration<ParameterException>("rulebase", "exception");
|
||||
|
||||
if (!exceptions.ok()) {
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Could not find any exception with the current rule's context";
|
||||
return {};
|
||||
}
|
||||
return (*exceptions).getBehavior(key_value_pairs);
|
||||
}
|
||||
|
||||
GenericRulebase::GenericRulebase() : Component("GenericRulebase"), pimpl(make_unique<Impl>()) {}
|
||||
|
||||
GenericRulebase::~GenericRulebase() {}
|
||||
|
||||
void
|
||||
GenericRulebase::init()
|
||||
{
|
||||
pimpl->init();
|
||||
}
|
||||
|
||||
void
|
||||
GenericRulebase::fini()
|
||||
{
|
||||
pimpl->fini();
|
||||
}
|
||||
|
||||
void
|
||||
GenericRulebase::preload()
|
||||
{
|
||||
pimpl->preload();
|
||||
}
|
||||
@@ -1,109 +0,0 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "generic_rulebase/generic_rulebase_context.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "context.h"
|
||||
#include "config.h"
|
||||
#include "generic_rulebase/evaluators/trigger_eval.h"
|
||||
#include "generic_rulebase/evaluators/parameter_eval.h"
|
||||
#include "generic_rulebase/evaluators/practice_eval.h"
|
||||
#include "generic_rulebase/evaluators/zone_eval.h"
|
||||
#include "generic_rulebase/evaluators/asset_eval.h"
|
||||
|
||||
USE_DEBUG_FLAG(D_RULEBASE_CONFIG);
|
||||
|
||||
using namespace std;
|
||||
|
||||
template<typename Configs>
|
||||
set<GenericConfigId>
|
||||
extractIds(const vector<Configs> &configurations)
|
||||
{
|
||||
set<GenericConfigId> ids;
|
||||
for (const Configs &conf : configurations) {
|
||||
ids.insert(conf.getId());
|
||||
}
|
||||
return ids;
|
||||
}
|
||||
|
||||
void
|
||||
GenericRulebaseContext::activate(const BasicRuleConfig &rule)
|
||||
{
|
||||
switch(registration_state) {
|
||||
case RuleRegistrationState::UNINITIALIZED: {
|
||||
registration_state = RuleRegistrationState::REGISTERED;
|
||||
ctx.registerValue<set<GenericConfigId>>(
|
||||
TriggerMatcher::ctx_key,
|
||||
extractIds<RuleTrigger>(rule.getTriggers())
|
||||
);
|
||||
ctx.registerValue<set<GenericConfigId>>(
|
||||
PracticeMatcher::ctx_key,
|
||||
extractIds<RulePractice>(rule.getPractices())
|
||||
);
|
||||
dbgTrace(D_RULEBASE_CONFIG)
|
||||
<< "Activating current practices. Current practice IDs: "
|
||||
<< makeSeparatedStr(extractIds<RulePractice>(rule.getPractices()), ", ");
|
||||
|
||||
ctx.registerValue<set<GenericConfigId>>(
|
||||
ParameterMatcher::ctx_key,
|
||||
extractIds<RuleParameter>(rule.getParameters())
|
||||
);
|
||||
ctx.registerValue<GenericConfigId>(
|
||||
ZoneMatcher::ctx_key,
|
||||
rule.getZoneId()
|
||||
);
|
||||
ctx.registerValue<GenericConfigId>(
|
||||
AssetMatcher::ctx_key,
|
||||
rule.getAssetId()
|
||||
);
|
||||
ctx.activate();
|
||||
break;
|
||||
}
|
||||
case RuleRegistrationState::REGISTERED: {
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Activating registered rule values";
|
||||
ctx.activate();
|
||||
break;
|
||||
}
|
||||
case RuleRegistrationState::UNREGISTERED: {
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Failed to register rule values";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
GenericRulebaseContext::activate()
|
||||
{
|
||||
switch(registration_state) {
|
||||
case RuleRegistrationState::UNINITIALIZED: {
|
||||
auto maybe_rule = getConfiguration<BasicRuleConfig>("rulebase", "rulesConfig");
|
||||
if (!maybe_rule.ok()) {
|
||||
registration_state = RuleRegistrationState::UNREGISTERED;
|
||||
return;
|
||||
}
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Registering new rule values";
|
||||
activate(maybe_rule.unpack());
|
||||
registration_state = RuleRegistrationState::REGISTERED;
|
||||
break;
|
||||
}
|
||||
case RuleRegistrationState::REGISTERED: {
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Activating registered rule values";
|
||||
ctx.activate();
|
||||
break;
|
||||
}
|
||||
case RuleRegistrationState::UNREGISTERED: {
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Failed to register rule values";
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,347 +0,0 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "generic_rulebase/match_query.h"
|
||||
|
||||
#include "cereal/types/set.hpp"
|
||||
|
||||
#include "generic_rulebase/generic_rulebase_utils.h"
|
||||
#include "config.h"
|
||||
#include "ip_utilities.h"
|
||||
#include "agent_core_utilities.h"
|
||||
|
||||
USE_DEBUG_FLAG(D_RULEBASE_CONFIG);
|
||||
|
||||
using namespace std;
|
||||
|
||||
static const unordered_map<string, MatchQuery::MatchType> string_to_match_type = {
|
||||
{ "condition", MatchQuery::MatchType::Condition },
|
||||
{ "operator", MatchQuery::MatchType::Operator }
|
||||
};
|
||||
|
||||
static const unordered_map<string, MatchQuery::Operators> string_to_operator = {
|
||||
{ "and", MatchQuery::Operators::And },
|
||||
{ "or", MatchQuery::Operators::Or }
|
||||
};
|
||||
|
||||
static const unordered_map<string, MatchQuery::Conditions> string_to_condition = {
|
||||
{ "equals", MatchQuery::Conditions::Equals },
|
||||
{ "not-equals", MatchQuery::Conditions::NotEquals },
|
||||
{ "not equals", MatchQuery::Conditions::NotEquals },
|
||||
{ "in", MatchQuery::Conditions::In },
|
||||
{ "not-in", MatchQuery::Conditions::NotIn },
|
||||
{ "not in", MatchQuery::Conditions::NotIn },
|
||||
{ "exist", MatchQuery::Conditions::Exist }
|
||||
};
|
||||
|
||||
static const string ip_addr_type_name = "IP address";
|
||||
static const string port_type_name = "port";
|
||||
static const string ip_proto_type_name = "IP protocol";
|
||||
|
||||
static const unordered_map<string, MatchQuery::StaticKeys> string_to_key = {
|
||||
{ "sourceIP", MatchQuery::StaticKeys::SrcIpAddress },
|
||||
{ "sourceIpAddr", MatchQuery::StaticKeys::SrcIpAddress },
|
||||
{ "destinationIP", MatchQuery::StaticKeys::DstIpAddress },
|
||||
{ "destinationIpAddr", MatchQuery::StaticKeys::DstIpAddress },
|
||||
{ "ipAddress", MatchQuery::StaticKeys::IpAddress },
|
||||
{ "sourcePort", MatchQuery::StaticKeys::SrcPort },
|
||||
{ "listeningPort", MatchQuery::StaticKeys::ListeningPort },
|
||||
{ "ipProtocol", MatchQuery::StaticKeys::IpProtocol },
|
||||
{ "domain", MatchQuery::StaticKeys::Domain }
|
||||
};
|
||||
|
||||
MatchQuery::MatchQuery(const string &match) : is_specific_label(false), is_ignore_keyword(false)
|
||||
{
|
||||
try {
|
||||
stringstream ss;
|
||||
ss.str(match);
|
||||
cereal::JSONInputArchive archive_in(ss);
|
||||
load(archive_in);
|
||||
} catch (const exception &e) {
|
||||
dbgWarning(D_RULEBASE_CONFIG)
|
||||
<< "Unable to load match query JSON. JSON content: "
|
||||
<< match
|
||||
<< ", Error: "
|
||||
<< e.what();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
MatchQuery::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
string type_as_string;
|
||||
archive_in(cereal::make_nvp("type", type_as_string));
|
||||
|
||||
string op_as_string;
|
||||
archive_in(cereal::make_nvp("op", op_as_string));
|
||||
|
||||
auto maybe_type = string_to_match_type.find(type_as_string);
|
||||
if (maybe_type == string_to_match_type.end()) {
|
||||
reportConfigurationError("Illegal Zone match query type. Provided type in configuration: " + type_as_string);
|
||||
}
|
||||
|
||||
type = maybe_type->second;
|
||||
switch (type) {
|
||||
case (MatchType::Condition): {
|
||||
auto maybe_condition = string_to_condition.find(op_as_string);
|
||||
if (maybe_condition == string_to_condition.end()) {
|
||||
reportConfigurationError(
|
||||
"Illegal op provided for condition. Provided op in configuration: " +
|
||||
op_as_string
|
||||
);
|
||||
}
|
||||
condition_type = maybe_condition->second;
|
||||
operator_type = Operators::None;
|
||||
archive_in(cereal::make_nvp("key", key));
|
||||
key_type = getKeyByName(key);
|
||||
if (key_type == StaticKeys::NotStatic) {
|
||||
if (key.rfind("containerLabels.", 0) == 0) {
|
||||
is_specific_label = true;
|
||||
} else {
|
||||
is_specific_label = false;
|
||||
}
|
||||
}
|
||||
is_ignore_keyword = (key == "indicator");
|
||||
|
||||
if (condition_type != Conditions::Exist) {
|
||||
archive_in(cereal::make_nvp("value", value));
|
||||
for(const auto &val: value) {
|
||||
if (isKeyTypeIp()) {
|
||||
auto ip_range = IPUtilities::createRangeFromString<IPRange, IpAddress>(val, ip_addr_type_name);
|
||||
if (ip_range.ok()) {
|
||||
ip_addr_value.push_back(ip_range.unpack());
|
||||
} else {
|
||||
dbgWarning(D_RULEBASE_CONFIG)
|
||||
<< "Failed to parse IP address range. Error: "
|
||||
<< ip_range.getErr();
|
||||
}
|
||||
} else if (isKeyTypePort()) {
|
||||
auto port_range = IPUtilities::createRangeFromString<PortsRange, uint16_t>(
|
||||
val,
|
||||
port_type_name
|
||||
);
|
||||
if (port_range.ok()) {
|
||||
port_value.push_back(port_range.unpack());
|
||||
} else {
|
||||
dbgWarning(D_RULEBASE_CONFIG)
|
||||
<< "Failed to parse port range. Error: "
|
||||
<< port_range.getErr();
|
||||
}
|
||||
} else if (isKeyTypeProtocol()) {
|
||||
auto proto_range = IPUtilities::createRangeFromString<IpProtoRange, uint8_t>(
|
||||
val,
|
||||
ip_proto_type_name
|
||||
);
|
||||
if (proto_range.ok()) {
|
||||
ip_proto_value.push_back(proto_range.unpack());
|
||||
} else {
|
||||
dbgWarning(D_RULEBASE_CONFIG)
|
||||
<< "Failed to parse IP protocol range. Error: "
|
||||
<< proto_range.getErr();
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
regex_values.insert(boost::regex(val));
|
||||
} catch (const exception &e) {
|
||||
dbgDebug(D_RULEBASE_CONFIG) << "Failed to compile regex. Error: " << e.what();
|
||||
}
|
||||
}
|
||||
first_value = *(value.begin());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case (MatchType::Operator): {
|
||||
auto maybe_operator = string_to_operator.find(op_as_string);
|
||||
if (maybe_operator == string_to_operator.end()) {
|
||||
reportConfigurationError(
|
||||
"Illegal op provided for operator. Provided op in configuration: " +
|
||||
op_as_string
|
||||
);
|
||||
}
|
||||
operator_type = maybe_operator->second;
|
||||
condition_type = Conditions::None;
|
||||
archive_in(cereal::make_nvp("items", items));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MatchQuery::StaticKeys
|
||||
MatchQuery::getKeyByName(const string &key_type_name)
|
||||
{
|
||||
auto key = string_to_key.find(key_type_name);
|
||||
if (key == string_to_key.end()) return StaticKeys::NotStatic;
|
||||
return key->second;
|
||||
}
|
||||
|
||||
bool
|
||||
MatchQuery::isKeyTypeIp() const
|
||||
{
|
||||
return (key_type >= StaticKeys::IpAddress && key_type <= StaticKeys::DstIpAddress);
|
||||
}
|
||||
|
||||
bool
|
||||
MatchQuery::isKeyTypePort() const
|
||||
{
|
||||
return (key_type == StaticKeys::SrcPort || key_type == StaticKeys::ListeningPort);
|
||||
}
|
||||
|
||||
bool
|
||||
MatchQuery::isKeyTypeProtocol() const
|
||||
{
|
||||
return (key_type == StaticKeys::IpProtocol);
|
||||
}
|
||||
|
||||
bool
|
||||
MatchQuery::isKeyTypeDomain() const
|
||||
{
|
||||
return (key_type == StaticKeys::Domain);
|
||||
}
|
||||
|
||||
bool
|
||||
MatchQuery::isKeyTypeSpecificLabel() const
|
||||
{
|
||||
return is_specific_label;
|
||||
}
|
||||
|
||||
bool
|
||||
MatchQuery::isKeyTypeStatic() const
|
||||
{
|
||||
return (key_type != StaticKeys::NotStatic);
|
||||
}
|
||||
|
||||
set<string>
|
||||
MatchQuery::getAllKeys() const
|
||||
{
|
||||
set<string> keys;
|
||||
if (type == MatchType::Condition) {
|
||||
if (!key.empty()) keys.insert(key);
|
||||
return keys;
|
||||
}
|
||||
|
||||
for (const MatchQuery &inner_match: items) {
|
||||
set<string> iner_keys = inner_match.getAllKeys();
|
||||
keys.insert(iner_keys.begin(), iner_keys.end());
|
||||
}
|
||||
|
||||
return keys;
|
||||
}
|
||||
|
||||
bool
|
||||
MatchQuery::matchAttributes(
|
||||
const unordered_map<string, set<string>> &key_value_pairs,
|
||||
set<string> &matched_override_keywords ) const
|
||||
{
|
||||
|
||||
if (type == MatchType::Condition) {
|
||||
auto key_value_pair = key_value_pairs.find(key);
|
||||
if (key_value_pair == key_value_pairs.end()) {
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Ignoring irrelevant key: " << key;
|
||||
return false;
|
||||
}
|
||||
return matchAttributes(key_value_pair->second, matched_override_keywords);
|
||||
} else if (type == MatchType::Operator && operator_type == Operators::And) {
|
||||
for (const MatchQuery &inner_match: items) {
|
||||
if (!inner_match.matchAttributes(key_value_pairs, matched_override_keywords)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
} else if (type == MatchType::Operator && operator_type == Operators::Or) {
|
||||
// With 'or' condition, evaluate matched override keywords first and add the ones that were fully matched
|
||||
set<string> inner_override_keywords;
|
||||
bool res = false;
|
||||
for (const MatchQuery &inner_match: items) {
|
||||
inner_override_keywords.clear();
|
||||
if (inner_match.matchAttributes(key_value_pairs, inner_override_keywords)) {
|
||||
matched_override_keywords.insert(inner_override_keywords.begin(), inner_override_keywords.end());
|
||||
res = true;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
} else {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Unsupported match query type";
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
MatchQuery::MatchResult
|
||||
MatchQuery::getMatch( const unordered_map<string, set<string>> &key_value_pairs) const
|
||||
{
|
||||
MatchQuery::MatchResult matches;
|
||||
matches.matched_keywords = make_shared<set<string>>();
|
||||
matches.is_match = matchAttributes(key_value_pairs, *matches.matched_keywords);
|
||||
return matches;
|
||||
}
|
||||
|
||||
bool
|
||||
MatchQuery::matchAttributes(
|
||||
const unordered_map<string, set<string>> &key_value_pairs) const
|
||||
{
|
||||
return getMatch(key_value_pairs).is_match;
|
||||
}
|
||||
|
||||
bool
|
||||
MatchQuery::matchAttributes(
|
||||
const set<string> &values,
|
||||
set<string> &matched_override_keywords) const
|
||||
{
|
||||
auto &type = condition_type;
|
||||
bool negate = type == MatchQuery::Conditions::NotEquals || type == MatchQuery::Conditions::NotIn;
|
||||
bool match = isRegEx() ? matchAttributesRegEx(values, matched_override_keywords) : matchAttributesString(values);
|
||||
return negate ? !match : match;
|
||||
}
|
||||
|
||||
bool
|
||||
MatchQuery::matchAttributesRegEx(
|
||||
const set<string> &values,
|
||||
set<string> &matched_override_keywords) const
|
||||
{
|
||||
bool res = false;
|
||||
boost::cmatch value_matcher;
|
||||
for (const boost::regex &val_regex : regex_values) {
|
||||
for (const string &requested_match_value : values) {
|
||||
if (NGEN::Regex::regexMatch(
|
||||
__FILE__,
|
||||
__LINE__,
|
||||
requested_match_value.c_str(),
|
||||
value_matcher,
|
||||
val_regex))
|
||||
{
|
||||
res = true;
|
||||
if (is_ignore_keyword) {
|
||||
matched_override_keywords.insert(requested_match_value);
|
||||
} else {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
bool
|
||||
MatchQuery::matchAttributesString(const set<string> &values) const
|
||||
{
|
||||
for (const string &requested_value : values) {
|
||||
if (value.find(requested_value) != value.end()) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
MatchQuery::isRegEx() const
|
||||
{
|
||||
return key != "protectionName";
|
||||
}
|
||||
@@ -1,157 +0,0 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "generic_rulebase/parameters_config.h"
|
||||
|
||||
USE_DEBUG_FLAG(D_RULEBASE_CONFIG);
|
||||
|
||||
using namespace std;
|
||||
|
||||
bool ParameterException::is_geo_location_exception_exists(false);
|
||||
bool ParameterException::is_geo_location_exception_being_loaded(false);
|
||||
|
||||
void
|
||||
ParameterOverrides::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
parseJSONKey<vector<ParsedBehavior>>("parsedBehavior", parsed_behaviors, archive_in);
|
||||
}
|
||||
|
||||
void
|
||||
ParameterTrustedSources::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
parseJSONKey<uint>("numOfSources", num_of_sources, archive_in);
|
||||
parseJSONKey<vector<SourcesIdentifier>>("sourcesIdentifiers", sources_identidiers, archive_in);
|
||||
}
|
||||
|
||||
void
|
||||
ParameterBehavior::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
string key_string;
|
||||
string val_string;
|
||||
parseJSONKey<string>("id", id, archive_in);
|
||||
parseJSONKey<string>("key", key_string, archive_in);
|
||||
parseJSONKey<string>("value", val_string, archive_in);
|
||||
if (string_to_behavior_key.find(key_string) == string_to_behavior_key.end()) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Unsupported behavior key: " << key_string;
|
||||
return;
|
||||
}
|
||||
key = string_to_behavior_key.at(key_string);
|
||||
|
||||
if (string_to_behavior_val.find(val_string) == string_to_behavior_val.end()) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Unsupported behavior value: " << val_string;
|
||||
return;
|
||||
}
|
||||
value = string_to_behavior_val.at(val_string);
|
||||
}
|
||||
|
||||
void
|
||||
ParameterAntiBot::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
parseJSONKey<vector<string>>("injected", injected, archive_in);
|
||||
parseJSONKey<vector<string>>("validated", validated, archive_in);
|
||||
}
|
||||
|
||||
void
|
||||
ParameterOAS::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
parseJSONKey<string>("value", value, archive_in);
|
||||
}
|
||||
|
||||
void
|
||||
ParameterException::MatchBehaviorPair::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
parseJSONKey<MatchQuery>("match", match, archive_in);
|
||||
parseJSONKey<ParameterBehavior>("behavior", behavior, archive_in);
|
||||
}
|
||||
|
||||
void
|
||||
ParameterException::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
try {
|
||||
archive_in(
|
||||
cereal::make_nvp("match", match),
|
||||
cereal::make_nvp("behavior", behavior)
|
||||
);
|
||||
} catch (...) {
|
||||
parseJSONKey<vector<MatchBehaviorPair>>("exceptions", match_queries, archive_in);
|
||||
}
|
||||
|
||||
function<bool(const MatchQuery &)> isGeoLocationExists =
|
||||
[&](const MatchQuery &query)
|
||||
{
|
||||
if (query.getKey() == "countryCode" || query.getKey() == "countryName") {
|
||||
is_geo_location_exception_being_loaded = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
for (const MatchQuery &query_item : query.getItems()) {
|
||||
if (isGeoLocationExists(query_item)) return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
if (isGeoLocationExists(match)) return;
|
||||
for (const MatchBehaviorPair &match_query : match_queries) {
|
||||
if (isGeoLocationExists(match_query.match)) return;
|
||||
}
|
||||
}
|
||||
|
||||
set<ParameterBehavior>
|
||||
ParameterException::getBehavior(
|
||||
const unordered_map<string, set<string>> &key_value_pairs,
|
||||
set<string> &matched_override_keywords) const
|
||||
{
|
||||
set<ParameterBehavior> matched_behaviors;
|
||||
|
||||
matched_override_keywords.clear();
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Matching exception";
|
||||
for (const MatchBehaviorPair &match_behavior_pair: match_queries) {
|
||||
MatchQuery::MatchResult match_res = match_behavior_pair.match.getMatch(key_value_pairs);
|
||||
if (match_res.is_match) {
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Successfully matched an exception from a list of matches.";
|
||||
// When matching indicators with action=ignore, we expect no behavior override.
|
||||
// Instead, a matched keywords list should be returned which will be later removed from score calculation
|
||||
if (match_res.matched_keywords->size() > 0 && match_behavior_pair.behavior == action_ignore) {
|
||||
matched_override_keywords.insert(match_res.matched_keywords->begin(),
|
||||
match_res.matched_keywords->end());
|
||||
} else {
|
||||
matched_behaviors.insert(match_behavior_pair.behavior);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (match_queries.empty()) {
|
||||
MatchQuery::MatchResult match_res = match.getMatch(key_value_pairs);
|
||||
if (match_res.is_match) {
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Successfully matched an exception.";
|
||||
// When matching indicators with action=ignore, we expect no behavior override.
|
||||
// Instead, a matched keywords list should be returned which will be later removed from score calculation
|
||||
if (match_res.matched_keywords->size() > 0 && behavior == action_ignore) {
|
||||
matched_override_keywords.insert(match_res.matched_keywords->begin(),
|
||||
match_res.matched_keywords->end());
|
||||
} else {
|
||||
matched_behaviors.insert(behavior);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return matched_behaviors;
|
||||
}
|
||||
|
||||
set<ParameterBehavior>
|
||||
ParameterException::getBehavior(const unordered_map<string, set<string>> &key_value_pairs) const
|
||||
{
|
||||
set<string> keywords;
|
||||
return getBehavior(key_value_pairs, keywords);
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "generic_rulebase/rulebase_config.h"
|
||||
|
||||
#include "telemetry.h"
|
||||
#include "config.h"
|
||||
|
||||
USE_DEBUG_FLAG(D_RULEBASE_CONFIG);
|
||||
|
||||
using namespace std;
|
||||
|
||||
set<string> BasicRuleConfig::assets_ids{};
|
||||
set<string> BasicRuleConfig::assets_ids_aggregation{};
|
||||
|
||||
void
|
||||
BasicRuleConfig::load(cereal::JSONInputArchive &ar)
|
||||
{
|
||||
parseJSONKey<vector<RulePractice>>("practices", practices, ar);
|
||||
parseJSONKey<vector<RuleTrigger>>("triggers", triggers, ar);
|
||||
parseJSONKey<vector<RuleParameter>>("parameters", parameters, ar);
|
||||
parseJSONKey<uint8_t>("priority", priority, ar);
|
||||
parseJSONKey<string>("ruleId", rule_id, ar);
|
||||
parseJSONKey<string>("ruleName", rule_name, ar);
|
||||
parseJSONKey<string>("assetId", asset_id, ar);
|
||||
parseJSONKey<string>("assetName", asset_name, ar);
|
||||
parseJSONKey<string>("zoneId", zone_id, ar);
|
||||
parseJSONKey<string>("zoneName", zone_name, ar);
|
||||
|
||||
assets_ids_aggregation.insert(asset_id);
|
||||
}
|
||||
|
||||
void
|
||||
BasicRuleConfig::updateCountMetric()
|
||||
{
|
||||
BasicRuleConfig::assets_ids = BasicRuleConfig::assets_ids_aggregation;
|
||||
AssetCountEvent(AssetType::ALL, BasicRuleConfig::assets_ids.size()).notify();
|
||||
}
|
||||
|
||||
bool
|
||||
BasicRuleConfig::isPracticeActive(const string &practice_id) const
|
||||
{
|
||||
for (auto practice: practices) {
|
||||
if (practice.getId() == practice_id) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
BasicRuleConfig::isTriggerActive(const string &trigger_id) const
|
||||
{
|
||||
for (auto trigger: triggers) {
|
||||
if (trigger.getId() == trigger_id) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
BasicRuleConfig::isParameterActive(const string ¶meter_id) const
|
||||
{
|
||||
for (auto param: parameters) {
|
||||
if (param.getId() == parameter_id) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@@ -1,243 +0,0 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
#include "generic_rulebase/triggers_config.h"
|
||||
#include "generic_rulebase/generic_rulebase_utils.h"
|
||||
|
||||
USE_DEBUG_FLAG(D_RULEBASE_CONFIG);
|
||||
|
||||
using namespace std;
|
||||
|
||||
WebTriggerConf::WebTriggerConf() : response_title(""), response_body(""), response_code(0) {}
|
||||
WebTriggerConf::WebTriggerConf(const string &title, const string &body, uint code)
|
||||
:
|
||||
response_title(title),
|
||||
response_body(body),
|
||||
response_code(code)
|
||||
{}
|
||||
|
||||
WebTriggerConf WebTriggerConf::default_trigger_conf = WebTriggerConf(
|
||||
"Attack blocked by web application protection", // title
|
||||
"Check Point's <b>Application Security</b> has detected an attack and blocked it.", // body
|
||||
403
|
||||
);
|
||||
|
||||
void
|
||||
WebTriggerConf::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
try {
|
||||
parseJSONKey<string>("details level", details_level, archive_in);
|
||||
if (details_level == "Redirect") {
|
||||
parseJSONKey<string>("redirect URL", redirect_url, archive_in);
|
||||
parseJSONKey<bool>("xEventId", add_event_id_to_header, archive_in);
|
||||
parseJSONKey<bool>("eventIdInHeader", add_event_id_to_header, archive_in);
|
||||
return;
|
||||
}
|
||||
parseJSONKey<uint>("response code", response_code, archive_in);
|
||||
if (response_code < 100 || response_code > 599) {
|
||||
throw cereal::Exception(
|
||||
"illegal web trigger response code: " +
|
||||
to_string(response_code) +
|
||||
" is out of range (100-599)"
|
||||
);
|
||||
}
|
||||
|
||||
if (details_level == "Response Code") return;
|
||||
|
||||
parseJSONKey<string>("response body", response_body, archive_in);
|
||||
parseJSONKey<string>("response title", response_title, archive_in);
|
||||
} catch (const exception &e) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Failed to parse the web trigger configuration: '" << e.what() << "'";
|
||||
archive_in.setNextName(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
WebTriggerConf::operator==(const WebTriggerConf &other) const
|
||||
{
|
||||
return
|
||||
response_code == other.response_code &&
|
||||
response_title == other.response_title &&
|
||||
response_body == other.response_body;
|
||||
}
|
||||
|
||||
LogTriggerConf::LogTriggerConf(string trigger_name, bool log_detect, bool log_prevent) : name(trigger_name)
|
||||
{
|
||||
if (log_detect) should_log_on_detect.setAll();
|
||||
if (log_prevent) should_log_on_prevent.setAll();
|
||||
active_streams.setFlag(ReportIS::StreamType::JSON_FOG);
|
||||
active_streams.setFlag(ReportIS::StreamType::JSON_LOG_FILE);
|
||||
}
|
||||
|
||||
ReportIS::Severity
|
||||
LogTriggerConf::getSeverity(bool is_action_drop_or_prevent) const
|
||||
{
|
||||
return is_action_drop_or_prevent ? ReportIS::Severity::MEDIUM : ReportIS::Severity::LOW;
|
||||
}
|
||||
|
||||
ReportIS::Priority
|
||||
LogTriggerConf::getPriority(bool is_action_drop_or_prevent) const
|
||||
{
|
||||
return is_action_drop_or_prevent ? ReportIS::Priority::HIGH : ReportIS::Priority::MEDIUM;
|
||||
}
|
||||
|
||||
Flags<ReportIS::StreamType>
|
||||
LogTriggerConf::getStreams(SecurityType security_type, bool is_action_drop_or_prevent) const
|
||||
{
|
||||
if (is_action_drop_or_prevent && should_log_on_prevent.isSet(security_type)) return active_streams;
|
||||
if (!is_action_drop_or_prevent && should_log_on_detect.isSet(security_type)) return active_streams;
|
||||
|
||||
return Flags<ReportIS::StreamType>();
|
||||
}
|
||||
|
||||
Flags<ReportIS::Enreachments>
|
||||
LogTriggerConf::getEnrechments(SecurityType security_type) const
|
||||
{
|
||||
Flags<ReportIS::Enreachments> enreachments;
|
||||
|
||||
if (log_geo_location.isSet(security_type)) enreachments.setFlag(ReportIS::Enreachments::GEOLOCATION);
|
||||
if (should_format_output) enreachments.setFlag(ReportIS::Enreachments::BEAUTIFY_OUTPUT);
|
||||
|
||||
return enreachments;
|
||||
}
|
||||
|
||||
template <typename EnumClass>
|
||||
static void
|
||||
setTriggersFlag(const string &key, cereal::JSONInputArchive &ar, EnumClass flag, Flags<EnumClass> &flags)
|
||||
{
|
||||
bool value = false;
|
||||
parseJSONKey<bool>(key, value, ar);
|
||||
if (value) flags.setFlag(flag);
|
||||
}
|
||||
|
||||
static void
|
||||
setLogConfiguration(
|
||||
const ReportIS::StreamType &log_type,
|
||||
const string &log_server_url = "",
|
||||
const string &protocol = ""
|
||||
)
|
||||
{
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "log server url:" << log_server_url;
|
||||
if (log_server_url != "" && protocol != "") {
|
||||
Singleton::Consume<I_Logging>::by<LogTriggerConf>()->addStream(log_type, log_server_url, protocol);
|
||||
} else {
|
||||
Singleton::Consume<I_Logging>::by<LogTriggerConf>()->addStream(log_type);
|
||||
}
|
||||
}
|
||||
|
||||
static string
|
||||
parseProtocolWithDefault(
|
||||
const std::string &default_value,
|
||||
const std::string &key_name,
|
||||
cereal::JSONInputArchive &archive_in
|
||||
)
|
||||
{
|
||||
string value;
|
||||
try {
|
||||
archive_in(cereal::make_nvp(key_name, value));
|
||||
} catch (const cereal::Exception &e) {
|
||||
return default_value;
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
void
|
||||
LogTriggerConf::load(cereal::JSONInputArchive& archive_in)
|
||||
{
|
||||
try {
|
||||
parseJSONKey<string>("triggerName", name, archive_in);
|
||||
parseJSONKey<string>("verbosity", verbosity, archive_in);
|
||||
parseJSONKey<string>("urlForSyslog", url_for_syslog, archive_in);
|
||||
parseJSONKey<string>("urlForCef", url_for_cef, archive_in);
|
||||
parseJSONKey<string>("syslogProtocol", syslog_protocol, archive_in);
|
||||
syslog_protocol = parseProtocolWithDefault("UDP", "syslogProtocol", archive_in);
|
||||
cef_protocol = parseProtocolWithDefault("UDP", "cefProtocol", archive_in);
|
||||
|
||||
setTriggersFlag("webBody", archive_in, WebLogFields::webBody, log_web_fields);
|
||||
setTriggersFlag("webHeaders", archive_in, WebLogFields::webHeaders, log_web_fields);
|
||||
setTriggersFlag("webRequests", archive_in, WebLogFields::webRequests, log_web_fields);
|
||||
setTriggersFlag("webUrlPath", archive_in, WebLogFields::webUrlPath, log_web_fields);
|
||||
setTriggersFlag("webUrlQuery", archive_in, WebLogFields::webUrlQuery, log_web_fields);
|
||||
setTriggersFlag("logToAgent", archive_in, ReportIS::StreamType::JSON_LOG_FILE, active_streams);
|
||||
setTriggersFlag("logToCloud", archive_in, ReportIS::StreamType::JSON_FOG, active_streams);
|
||||
setTriggersFlag("logToK8sService", archive_in, ReportIS::StreamType::JSON_K8S_SVC, active_streams);
|
||||
setTriggersFlag("logToSyslog", archive_in, ReportIS::StreamType::SYSLOG, active_streams);
|
||||
setTriggersFlag("logToCef", archive_in, ReportIS::StreamType::CEF, active_streams);
|
||||
setTriggersFlag("acAllow", archive_in, SecurityType::AccessControl, should_log_on_detect);
|
||||
setTriggersFlag("acDrop", archive_in, SecurityType::AccessControl, should_log_on_prevent);
|
||||
setTriggersFlag("tpDetect", archive_in, SecurityType::ThreatPrevention, should_log_on_detect);
|
||||
setTriggersFlag("tpPrevent", archive_in, SecurityType::ThreatPrevention, should_log_on_prevent);
|
||||
setTriggersFlag("complianceWarnings", archive_in, SecurityType::Compliance, should_log_on_detect);
|
||||
setTriggersFlag("complianceViolations", archive_in, SecurityType::Compliance, should_log_on_prevent);
|
||||
setTriggersFlag("acLogGeoLocation", archive_in, SecurityType::AccessControl, log_geo_location);
|
||||
setTriggersFlag("tpLogGeoLocation", archive_in, SecurityType::ThreatPrevention, log_geo_location);
|
||||
setTriggersFlag("complianceLogGeoLocation", archive_in, SecurityType::Compliance, log_geo_location);
|
||||
|
||||
bool extend_logging = false;
|
||||
parseJSONKey<bool>("extendLogging", extend_logging, archive_in);
|
||||
if (extend_logging) {
|
||||
setTriggersFlag("responseCode", archive_in, WebLogFields::responseCode, log_web_fields);
|
||||
setTriggersFlag("responseBody", archive_in, WebLogFields::responseBody, log_web_fields);
|
||||
|
||||
string severity;
|
||||
static const map<string, extendLoggingSeverity> extend_logging_severity_strings = {
|
||||
{"High", extendLoggingSeverity::High},
|
||||
{"Critical", extendLoggingSeverity::Critical}
|
||||
};
|
||||
parseJSONKey<string>("extendLoggingMinSeverity", severity, archive_in);
|
||||
auto extended_severity = extend_logging_severity_strings.find(severity);
|
||||
if (extended_severity != extend_logging_severity_strings.end()) {
|
||||
extend_logging_severity = extended_severity->second;
|
||||
} else {
|
||||
dbgWarning(D_RULEBASE_CONFIG)
|
||||
<< "Failed to parse the extendLoggingMinSeverityfield: '"
|
||||
<< severity
|
||||
<< "'";
|
||||
}
|
||||
}
|
||||
|
||||
for (ReportIS::StreamType log_stream : makeRange<ReportIS::StreamType>()) {
|
||||
if (!active_streams.isSet(log_stream)) continue;
|
||||
switch (log_stream) {
|
||||
case ReportIS::StreamType::JSON_DEBUG:
|
||||
setLogConfiguration(ReportIS::StreamType::JSON_DEBUG);
|
||||
break;
|
||||
case ReportIS::StreamType::JSON_FOG:
|
||||
setLogConfiguration(ReportIS::StreamType::JSON_FOG);
|
||||
break;
|
||||
case ReportIS::StreamType::JSON_LOG_FILE:
|
||||
setLogConfiguration(ReportIS::StreamType::JSON_LOG_FILE);
|
||||
break;
|
||||
case ReportIS::StreamType::JSON_K8S_SVC:
|
||||
setLogConfiguration(ReportIS::StreamType::JSON_K8S_SVC);
|
||||
break;
|
||||
case ReportIS::StreamType::SYSLOG:
|
||||
setLogConfiguration(ReportIS::StreamType::SYSLOG, getUrlForSyslog(), syslog_protocol);
|
||||
break;
|
||||
case ReportIS::StreamType::CEF:
|
||||
setLogConfiguration(ReportIS::StreamType::CEF, getUrlForCef(), cef_protocol);
|
||||
break;
|
||||
case ReportIS::StreamType::NONE: break;
|
||||
case ReportIS::StreamType::COUNT: break;
|
||||
}
|
||||
}
|
||||
|
||||
parseJSONKey<bool>("formatLoggingOutput", should_format_output, archive_in);
|
||||
} catch (const exception &e) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Failed to parse the log trigger configuration: '" << e.what() << "'";
|
||||
archive_in.setNextName(nullptr);
|
||||
}
|
||||
}
|
||||
@@ -1,179 +0,0 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "generic_rulebase/zone.h"
|
||||
|
||||
#include <set>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
using namespace std;
|
||||
|
||||
static const unordered_map<string, Zone::Direction> string_to_direction = {
|
||||
{ "to", Zone::Direction::To },
|
||||
{ "from", Zone::Direction::From },
|
||||
{ "bidirectional", Zone::Direction::Bidirectional }
|
||||
};
|
||||
|
||||
class AdjacentZone
|
||||
{
|
||||
public:
|
||||
void
|
||||
load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
string direction_as_string;
|
||||
archive_in(cereal::make_nvp("direction", direction_as_string));
|
||||
archive_in(cereal::make_nvp("zoneId", id));
|
||||
auto maybe_direction = string_to_direction.find(direction_as_string);
|
||||
if (maybe_direction == string_to_direction.end()) {
|
||||
reportConfigurationError(
|
||||
"Illegal direction provided for adjacency. Provided direction in configuration: " +
|
||||
direction_as_string
|
||||
);
|
||||
}
|
||||
dir = maybe_direction->second;
|
||||
}
|
||||
|
||||
pair<Zone::Direction, GenericConfigId> getValue() const { return make_pair(dir, id); }
|
||||
|
||||
private:
|
||||
Zone::Direction dir;
|
||||
GenericConfigId id;
|
||||
};
|
||||
|
||||
class TagsValues
|
||||
{
|
||||
public:
|
||||
static const string req_attrs_ctx_key;
|
||||
|
||||
TagsValues() {}
|
||||
|
||||
template <typename Archive>
|
||||
void
|
||||
serialize(Archive &ar)
|
||||
{
|
||||
I_Environment *env = Singleton::Consume<I_Environment>::by<Zone>();
|
||||
auto req_attrs = env->get<set<string>>(req_attrs_ctx_key);
|
||||
if (!req_attrs.ok()) return;
|
||||
|
||||
for (const string &req_attr : *req_attrs) {
|
||||
try {
|
||||
string data;
|
||||
ar(cereal::make_nvp(req_attr, data));
|
||||
dbgDebug(D_RULEBASE_CONFIG)
|
||||
<< "Found value for requested attribute. Tag: "
|
||||
<< req_attr
|
||||
<< ", Value: "
|
||||
<< data;
|
||||
|
||||
tags_set[req_attr].insert(data);
|
||||
} catch (const exception &e) {
|
||||
dbgDebug(D_RULEBASE_CONFIG) << "Could not find values for requested attribute. Tag: " << req_attr;
|
||||
ar.setNextName(nullptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
matchValueByKey(const string &requested_key, const unordered_set<string> &possible_values) const
|
||||
{
|
||||
auto values = tags_set.find(requested_key);
|
||||
if (values == tags_set.end()) return false;
|
||||
|
||||
for (const string &val : possible_values) {
|
||||
if (values->second.count(val)) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
insert(const TagsValues &other)
|
||||
{
|
||||
for (auto &single_tags_value : other.getData()) {
|
||||
tags_set[single_tags_value.first].insert(single_tags_value.second.begin(), single_tags_value.second.end());
|
||||
}
|
||||
}
|
||||
|
||||
const unordered_map<string, set<string>> & getData() const { return tags_set; }
|
||||
|
||||
private:
|
||||
unordered_map<string, set<string>> tags_set;
|
||||
};
|
||||
|
||||
const string TagsValues::req_attrs_ctx_key = "requested attributes key";
|
||||
|
||||
void
|
||||
Zone::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
archive_in(cereal::make_nvp("id", zone_id));
|
||||
archive_in(cereal::make_nvp("name", zone_name));
|
||||
vector<AdjacentZone> adjacency;
|
||||
try {
|
||||
archive_in(cereal::make_nvp("adjacentZones", adjacency));
|
||||
} catch (const cereal::Exception &) {
|
||||
dbgTrace(D_RULEBASE_CONFIG)
|
||||
<< "List of adjacentZones does not exist for current object. Zone id: "
|
||||
<< zone_id
|
||||
<< ", Zone name: "
|
||||
<< zone_name;
|
||||
|
||||
archive_in.setNextName(nullptr);
|
||||
}
|
||||
|
||||
for (const AdjacentZone &zone : adjacency) {
|
||||
adjacent_zones.push_back(zone.getValue());
|
||||
}
|
||||
|
||||
archive_in(cereal::make_nvp("match", match_query));
|
||||
|
||||
is_any =
|
||||
match_query.getType() == MatchQuery::MatchType::Condition &&
|
||||
match_query.getKey() == "any" &&
|
||||
match_query.getValue().count("any") > 0;
|
||||
|
||||
set<string> keys = match_query.getAllKeys();
|
||||
}
|
||||
|
||||
const string
|
||||
contextKeyToString(Context::MetaDataType type)
|
||||
{
|
||||
if (type == Context::MetaDataType::SubjectIpAddr || type == Context::MetaDataType::OtherIpAddr) return "ip";
|
||||
return Context::convertToString(type);
|
||||
}
|
||||
|
||||
bool
|
||||
Zone::contains(const Asset &asset)
|
||||
{
|
||||
QueryRequest request;
|
||||
|
||||
for (const auto &main_attr : asset.getAttrs()) {
|
||||
request.addCondition(Condition::EQUALS, contextKeyToString(main_attr.first), main_attr.second);
|
||||
}
|
||||
|
||||
ScopedContext req_attrs_key;
|
||||
req_attrs_key.registerValue<set<string>>(TagsValues::req_attrs_ctx_key, match_query.getAllKeys());
|
||||
|
||||
I_Intelligence_IS_V2 *intelligence = Singleton::Consume<I_Intelligence_IS_V2>::by<Zone>();
|
||||
auto query_res = intelligence->queryIntelligence<TagsValues>(request);
|
||||
if (!query_res.ok()) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Failed to perform intelligence query. Error: " << query_res.getErr();
|
||||
return false;
|
||||
}
|
||||
|
||||
for (const AssetReply<TagsValues> &asset : query_res.unpack()) {
|
||||
TagsValues tag_values = asset.mergeReplyData();
|
||||
|
||||
if (match_query.matchAttributes(tag_values.getData())) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@@ -1,114 +0,0 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "generic_rulebase/zones_config.h"
|
||||
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "generic_rulebase/generic_rulebase_utils.h"
|
||||
#include "config.h"
|
||||
#include "ip_utilities.h"
|
||||
#include "connkey.h"
|
||||
#include "i_generic_rulebase.h"
|
||||
|
||||
USE_DEBUG_FLAG(D_RULEBASE_CONFIG);
|
||||
|
||||
using namespace std;
|
||||
|
||||
void
|
||||
ZonesConfig::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
dbgFlow(D_RULEBASE_CONFIG) << "Saving active zones";
|
||||
set<string> used_zones;
|
||||
cereal::load(archive_in, used_zones);
|
||||
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Loading all zones";
|
||||
auto all_zones_maybe = getSetting<Zones>("rulebase", "zones");
|
||||
if (!all_zones_maybe.ok()) {
|
||||
dbgWarning(D_RULEBASE_CONFIG) << "Failed to load zones";
|
||||
return;
|
||||
}
|
||||
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Creating cache of all zones by ID";
|
||||
map<GenericConfigId, Zone> all_zones;
|
||||
for (const auto &single_zone : all_zones_maybe.unpack().zones) {
|
||||
if (used_zones.count(single_zone.getId()) > 0 && single_zone.isAnyZone()) {
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Found used zone of type \"Any\": saving all zones as active zones";
|
||||
zones = all_zones_maybe.unpack().zones;
|
||||
return;
|
||||
}
|
||||
|
||||
dbgDebug(D_RULEBASE_CONFIG)
|
||||
<< "Adding specific zone to cache. Zone ID: "
|
||||
<< single_zone.getId()
|
||||
<< ", name: "
|
||||
<< single_zone.getName();
|
||||
all_zones.emplace(single_zone.getId(), single_zone);
|
||||
}
|
||||
|
||||
dbgTrace(D_RULEBASE_CONFIG) << "Creating list of active zones";
|
||||
map<GenericConfigId, Zone> active_zones_set;
|
||||
for (const auto &single_used_zone_id : used_zones) {
|
||||
const auto &found_zone = all_zones[single_used_zone_id];
|
||||
dbgTrace(D_RULEBASE_CONFIG)
|
||||
<< "Adding zone to list of active zones. Zone ID: "
|
||||
<< single_used_zone_id
|
||||
<< ", zone name: "
|
||||
<< found_zone.getName();
|
||||
active_zones_set.emplace(found_zone.getId(), found_zone);
|
||||
|
||||
for (const auto &adjacent_zone : found_zone.getAdjacentZones()) {
|
||||
const auto &adjacent_zone_obj = all_zones[adjacent_zone.second];
|
||||
dbgTrace(D_RULEBASE_CONFIG)
|
||||
<< "Adding adjacent zone to list of active zones. Zone ID: "
|
||||
<< adjacent_zone_obj.getId()
|
||||
<< ", zone name: "
|
||||
<< adjacent_zone_obj.getName();
|
||||
active_zones_set.emplace(adjacent_zone_obj.getId(), adjacent_zone_obj);
|
||||
}
|
||||
}
|
||||
|
||||
vector<GenericConfigId> implied_zones = {
|
||||
"impliedAzure",
|
||||
"impliedDNS",
|
||||
"impliedSSH",
|
||||
"impliedProxy",
|
||||
"impliedFog"
|
||||
};
|
||||
|
||||
GenericConfigId any_zone_id = "";
|
||||
for (const auto &single_zone : all_zones_maybe.unpack().zones) {
|
||||
if (single_zone.isAnyZone()) any_zone_id = single_zone.getId();
|
||||
}
|
||||
for (GenericConfigId &implied_id: implied_zones) {
|
||||
if (all_zones.find(implied_id) != all_zones.end()) {
|
||||
dbgDebug(D_RULEBASE_CONFIG) << "Adding implied zone to cache. Zone ID: " << implied_id;
|
||||
active_zones_set.emplace(implied_id, all_zones[implied_id]);
|
||||
if (any_zone_id != "" && active_zones_set.count(any_zone_id) == 0) {
|
||||
active_zones_set.emplace(any_zone_id, all_zones[any_zone_id]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto &single_id_zone_pair : active_zones_set) {
|
||||
zones.push_back(single_id_zone_pair.second);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ZonesConfig::preload()
|
||||
{
|
||||
registerExpectedSetting<Zones>("rulebase", "zones");
|
||||
registerExpectedSetting<ZonesConfig>("rulebase", "usedZones");
|
||||
}
|
||||
@@ -34,6 +34,7 @@ public:
|
||||
~DetailsResolver();
|
||||
|
||||
void preload() override;
|
||||
void init() override;
|
||||
|
||||
private:
|
||||
class Impl;
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
#include "url_parser.h"
|
||||
#include "i_agent_details.h"
|
||||
#include "i_mainloop.h"
|
||||
#include "i_environment.h"
|
||||
#include "singleton.h"
|
||||
#include "component.h"
|
||||
|
||||
@@ -32,6 +33,7 @@ class Downloader
|
||||
Singleton::Consume<I_Encryptor>,
|
||||
Singleton::Consume<I_MainLoop>,
|
||||
Singleton::Consume<I_OrchestrationTools>,
|
||||
Singleton::Consume<I_Environment>,
|
||||
Singleton::Consume<I_UpdateCommunication>
|
||||
{
|
||||
public:
|
||||
|
||||
@@ -29,6 +29,7 @@ public:
|
||||
virtual bool isGwNotVsx() = 0;
|
||||
virtual bool isVersionAboveR8110() = 0;
|
||||
virtual bool isReverseProxy() = 0;
|
||||
virtual bool isCloudStorageEnabled() = 0;
|
||||
virtual Maybe<std::tuple<std::string, std::string, std::string>> parseNginxMetadata() = 0;
|
||||
virtual std::map<std::string, std::string> getResolvedDetails() = 0;
|
||||
#if defined(gaia) || defined(smb)
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
class I_Downloader
|
||||
{
|
||||
public:
|
||||
virtual Maybe<std::string> downloadFileFromFog(
|
||||
virtual Maybe<std::string> downloadFile(
|
||||
const std::string &checksum,
|
||||
Package::ChecksumTypes,
|
||||
const GetResourceFile &resourse_file
|
||||
|
||||
@@ -117,7 +117,7 @@ public:
|
||||
const std::string &conf_path) const = 0;
|
||||
virtual bool copyFile(const std::string &src_path, const std::string &dst_path) const = 0;
|
||||
virtual bool doesFileExist(const std::string &file_path) const = 0;
|
||||
virtual void getClusterId() const = 0;
|
||||
virtual void setClusterId() const = 0;
|
||||
virtual void fillKeyInJson(
|
||||
const std::string &filename,
|
||||
const std::string &_key,
|
||||
|
||||
@@ -32,6 +32,7 @@ public:
|
||||
const std::string &policy_versions
|
||||
) const = 0;
|
||||
virtual Maybe<void> authenticateAgent() = 0;
|
||||
virtual void registerLocalAgentToFog() = 0;
|
||||
virtual Maybe<void> getUpdate(CheckUpdateRequest &request) = 0;
|
||||
virtual Maybe<std::string> downloadAttributeFile(
|
||||
const GetResourceFile &resourse_file,
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
#include "i_environment.h"
|
||||
#include "i_tenant_manager.h"
|
||||
#include "i_package_handler.h"
|
||||
#include "i_proxy_configuration.h"
|
||||
#include "i_env_details.h"
|
||||
#include "component.h"
|
||||
|
||||
@@ -54,7 +55,8 @@ class OrchestrationComp
|
||||
Singleton::Consume<I_UpdateCommunication>,
|
||||
Singleton::Consume<I_Downloader>,
|
||||
Singleton::Consume<I_ManifestController>,
|
||||
Singleton::Consume<I_EnvDetails>
|
||||
Singleton::Consume<I_EnvDetails>,
|
||||
Singleton::Consume<I_ProxyConfiguration>
|
||||
{
|
||||
public:
|
||||
OrchestrationComp();
|
||||
|
||||
@@ -40,7 +40,7 @@ public:
|
||||
~OrchestrationStatus();
|
||||
|
||||
void init() override;
|
||||
|
||||
|
||||
private:
|
||||
class Impl;
|
||||
std::unique_ptr<Impl> pimpl;
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
#include "i_package_handler.h"
|
||||
#include "i_orchestration_tools.h"
|
||||
#include "i_shell_cmd.h"
|
||||
#include "i_environment.h"
|
||||
#include "component.h"
|
||||
|
||||
class PackageHandler
|
||||
@@ -24,7 +25,8 @@ class PackageHandler
|
||||
public Component,
|
||||
Singleton::Provide<I_PackageHandler>,
|
||||
Singleton::Consume<I_ShellCmd>,
|
||||
Singleton::Consume<I_OrchestrationTools>
|
||||
Singleton::Consume<I_OrchestrationTools>,
|
||||
Singleton::Consume<I_Environment>
|
||||
{
|
||||
public:
|
||||
PackageHandler();
|
||||
|
||||
@@ -35,8 +35,10 @@ public:
|
||||
bool isOverSSL() const { return over_ssl; }
|
||||
std::string getPort() const { return port; }
|
||||
std::string getQuery() const { return query; }
|
||||
std::string getHost() const;
|
||||
URLProtocol getProtocol() const { return protocol; }
|
||||
std::string toString() const;
|
||||
void setHost(const std::string &new_host);
|
||||
void setQuery(const std::string &new_query);
|
||||
|
||||
private:
|
||||
@@ -47,6 +49,7 @@ private:
|
||||
std::string base_url;
|
||||
std::string port;
|
||||
std::string query;
|
||||
std::string host;
|
||||
URLProtocol protocol;
|
||||
};
|
||||
|
||||
|
||||
@@ -50,9 +50,13 @@ public:
|
||||
|
||||
private:
|
||||
void readRules(cereal::JSONInputArchive &ar);
|
||||
void readTriggerId(cereal::JSONInputArchive &ar);
|
||||
void readExceptionId(cereal::JSONInputArchive &ar);
|
||||
void readDefaultAction(cereal::JSONInputArchive &ar);
|
||||
|
||||
std::vector<Rule> rules;
|
||||
std::string trigger_id;
|
||||
std::string exception_id;
|
||||
};
|
||||
|
||||
#endif // __IPS_BASIC_POLICY_H__
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
#include "log_generator.h"
|
||||
#include "parsed_context.h"
|
||||
#include "pm_hook.h"
|
||||
#include "i_generic_rulebase.h"
|
||||
|
||||
/// \namespace IPSSignatureSubTypes
|
||||
/// \brief Namespace containing subtypes for IPS signatures.
|
||||
@@ -348,8 +349,16 @@ public:
|
||||
/// \brief Construct a SignatureAndAction object.
|
||||
/// \param _signature The complete signature.
|
||||
/// \param _action The signature action.
|
||||
SignatureAndAction(std::shared_ptr<CompleteSignature> _signature, SignatureAction _action) :
|
||||
signature(_signature), action(_action)
|
||||
SignatureAndAction(
|
||||
std::shared_ptr<CompleteSignature> _signature,
|
||||
SignatureAction _action,
|
||||
std::string _trigger_id,
|
||||
std::string _exception_id)
|
||||
:
|
||||
signature(_signature),
|
||||
action(_action),
|
||||
trigger_id(_trigger_id),
|
||||
exception_id(_exception_id)
|
||||
{}
|
||||
|
||||
/// \brief Check if the signature is matched for prevention.
|
||||
@@ -375,6 +384,11 @@ public:
|
||||
return signature->getContext();
|
||||
}
|
||||
|
||||
LogTriggerConf getTrigger() const;
|
||||
|
||||
std::set<ParameterBehavior>
|
||||
getBehavior(const std::unordered_map<std::string, std::set<std::string>> &exceptions_dict) const;
|
||||
|
||||
private:
|
||||
/// \brief Get the action results for the IPS state.
|
||||
/// \param ips_state The IPS entry.
|
||||
@@ -382,6 +396,8 @@ private:
|
||||
|
||||
std::shared_ptr<CompleteSignature> signature;
|
||||
SignatureAction action;
|
||||
std::string trigger_id;
|
||||
std::string exception_id;
|
||||
};
|
||||
} // namespace IPSSignatureSubTypes
|
||||
|
||||
|
||||
@@ -17,6 +17,8 @@ public:
|
||||
private:
|
||||
IPSSignatureSubTypes::SignatureAction action = IPSSignatureSubTypes::SignatureAction::IGNORE;
|
||||
std::vector<std::string> file_names;
|
||||
std::string trigger_id;
|
||||
std::string exception_id;
|
||||
};
|
||||
|
||||
#endif // __SNORT_BASIC_POLICY_H__
|
||||
|
||||
@@ -17,6 +17,8 @@ void
|
||||
RuleSelector::load(cereal::JSONInputArchive &ar)
|
||||
{
|
||||
readRules(ar);
|
||||
readTriggerId(ar);
|
||||
readExceptionId(ar);
|
||||
readDefaultAction(ar);
|
||||
}
|
||||
|
||||
@@ -36,7 +38,7 @@ RuleSelector::selectSignatures() const
|
||||
if (rule.isSignaturedMatched(*signature)) {
|
||||
if (rule.getAction() != IPSSignatureSubTypes::SignatureAction::IGNORE) {
|
||||
signature->setIndicators("Check Point", signatures_version);
|
||||
res.emplace_back(signature, rule.getAction());
|
||||
res.emplace_back(signature, rule.getAction(), trigger_id, exception_id);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -52,6 +54,28 @@ RuleSelector::readRules(cereal::JSONInputArchive &ar)
|
||||
ar(cereal::make_nvp("rules", rules));
|
||||
}
|
||||
|
||||
void
|
||||
RuleSelector::readTriggerId(cereal::JSONInputArchive &ar)
|
||||
{
|
||||
try {
|
||||
ar(cereal::make_nvp("triggers", trigger_id));
|
||||
} catch (const cereal::Exception &e) {
|
||||
ar.setNextName(nullptr);
|
||||
trigger_id = "";
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
RuleSelector::readExceptionId(cereal::JSONInputArchive &ar)
|
||||
{
|
||||
try {
|
||||
ar(cereal::make_nvp("exceptions", exception_id));
|
||||
} catch (const cereal::Exception &e) {
|
||||
ar.setNextName(nullptr);
|
||||
exception_id = "";
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
RuleSelector::readDefaultAction(cereal::JSONInputArchive &ar)
|
||||
{
|
||||
|
||||
@@ -280,8 +280,7 @@ SignatureAndAction::getAction(const IPSEntry &ips_state) const
|
||||
exceptions_dict["sourceIdentifier"].insert(*env_source_identifier);
|
||||
}
|
||||
|
||||
I_GenericRulebase *i_rulebase = Singleton::Consume<I_GenericRulebase>::by<IPSComp>();
|
||||
auto behaviors = i_rulebase->getBehavior(exceptions_dict);
|
||||
auto behaviors = getBehavior(exceptions_dict);
|
||||
|
||||
set<BehaviorValue> override_actions;
|
||||
vector<string> override_ids;
|
||||
@@ -315,6 +314,23 @@ static const auto url_query = LogTriggerConf::WebLogFields::webUrlQuery;
|
||||
static const auto res_body = LogTriggerConf::WebLogFields::responseBody;
|
||||
static const auto res_code = LogTriggerConf::WebLogFields::responseCode;
|
||||
|
||||
LogTriggerConf
|
||||
SignatureAndAction::getTrigger() const
|
||||
{
|
||||
if (trigger_id.empty()) return getConfigurationWithDefault(LogTriggerConf(), "rulebase", "log");
|
||||
|
||||
return Singleton::Consume<I_GenericRulebase>::by<IPSComp>()->getLogTriggerConf(trigger_id);
|
||||
}
|
||||
|
||||
set<ParameterBehavior>
|
||||
SignatureAndAction::getBehavior(const unordered_map<string, set<string>> &exceptions_dict) const
|
||||
{
|
||||
I_GenericRulebase *i_rulebase = Singleton::Consume<I_GenericRulebase>::by<IPSComp>();
|
||||
if (exception_id.empty()) return i_rulebase->getBehavior(exceptions_dict);
|
||||
|
||||
return i_rulebase->getParameterException(exception_id).getBehavior(exceptions_dict);
|
||||
}
|
||||
|
||||
bool
|
||||
SignatureAndAction::matchSilent(const Buffer &sample) const
|
||||
{
|
||||
@@ -398,7 +414,7 @@ SignatureAndAction::isMatchedPrevent(const Buffer &context_buffer, const set<PMP
|
||||
|
||||
dbgDebug(D_IPS) << "Signature matched - sending log";
|
||||
|
||||
auto &trigger = getConfigurationWithDefault(default_triger, "rulebase", "log");
|
||||
auto trigger = getTrigger();
|
||||
bool is_prevent = get<0>(override_action) == IPSSignatureSubTypes::SignatureAction::PREVENT;
|
||||
|
||||
auto severity = signature->getSeverity() < IPSLevel::HIGH ? Severity::HIGH : Severity::CRITICAL;
|
||||
|
||||
@@ -596,6 +596,8 @@ TEST_F(ComponentTest, check_filtering_by_year)
|
||||
|
||||
TEST_F(ComponentTest, log_fields)
|
||||
{
|
||||
generic_rulebase.preload();
|
||||
generic_rulebase.init();
|
||||
string config =
|
||||
"{"
|
||||
"\"IPS\": {"
|
||||
@@ -632,6 +634,8 @@ TEST_F(ComponentTest, log_fields)
|
||||
"\"assetId\": \"1-1-1\","
|
||||
"\"practiceId\": \"2-2-2\","
|
||||
"\"practiceName\": \"practice1\","
|
||||
"\"triggers\": \"5eaeefde6765c30010bae8b6\","
|
||||
"\"exceptions\": \"\","
|
||||
"\"defaultAction\": \"Detect\","
|
||||
"\"rules\": ["
|
||||
"{"
|
||||
@@ -643,10 +647,36 @@ TEST_F(ComponentTest, log_fields)
|
||||
"]"
|
||||
"}"
|
||||
"]"
|
||||
"},"
|
||||
"\"rulebase\": {"
|
||||
"\"log\": ["
|
||||
"{"
|
||||
"\"context\": \"triggerId(5eaeefde6765c30010bae8b6)\","
|
||||
"\"triggerName\": \"Logging Trigger\","
|
||||
"\"triggerType\": \"log\","
|
||||
"\"urlForSyslog\": \"\","
|
||||
"\"urlForCef\": \"128.1.1.1:333\","
|
||||
"\"acAllow\": false,"
|
||||
"\"acDrop\": true,"
|
||||
"\"complianceViolations\": true,"
|
||||
"\"complianceWarnings\": true,"
|
||||
"\"logToAgent\": true,"
|
||||
"\"logToCloud\": true,"
|
||||
"\"logToSyslog\": false,"
|
||||
"\"logToCef\": true,"
|
||||
"\"tpDetect\": true,"
|
||||
"\"tpPrevent\": true,"
|
||||
"\"verbosity\": \"Standard\","
|
||||
"\"webBody\": true,"
|
||||
"\"webHeaders\": true,"
|
||||
"\"webRequests\": true,"
|
||||
"\"webUrlPath\": true,"
|
||||
"\"webUrlQuery\": true"
|
||||
"}"
|
||||
"]"
|
||||
"}"
|
||||
"}";
|
||||
loadPolicy(config);
|
||||
setTrigger();
|
||||
|
||||
EXPECT_CALL(table, createStateRValueRemoved(_, _));
|
||||
EXPECT_CALL(table, getState(_)).WillRepeatedly(Return(&entry));
|
||||
@@ -829,6 +859,8 @@ TEST_F(ComponentTest, prxeem_exception_bug)
|
||||
" \"practiceId\": \"2-2-2\","
|
||||
" \"practiceName\": \"practice1\","
|
||||
" \"defaultAction\": \"Prevent\","
|
||||
" \"triggers\": \"\","
|
||||
" \"exceptions\": \"6c3867be-4da5-42c2-93dc-8f509a764004\","
|
||||
" \"rules\": []"
|
||||
" }"
|
||||
" ]"
|
||||
@@ -847,6 +879,11 @@ TEST_F(ComponentTest, prxeem_exception_bug)
|
||||
" \"parameterId\": \"6c3867be-4da5-42c2-93dc-8f509a764003\","
|
||||
" \"parameterType\": \"exceptions\","
|
||||
" \"parameterName\": \"exception\""
|
||||
" },"
|
||||
" {"
|
||||
" \"parameterId\": \"6c3867be-4da5-42c2-93dc-8f509a764004\","
|
||||
" \"parameterType\": \"exceptions\","
|
||||
" \"parameterName\": \"exception\""
|
||||
" }"
|
||||
" ],"
|
||||
" \"zoneId\": \"\","
|
||||
@@ -855,7 +892,7 @@ TEST_F(ComponentTest, prxeem_exception_bug)
|
||||
" ],"
|
||||
" \"exception\": ["
|
||||
" {"
|
||||
" \"context\": \"parameterId(6c3867be-4da5-42c2-93dc-8f509a764003)\","
|
||||
" \"context\": \"parameterId(6c3867be-4da5-42c2-93dc-8f509a764004)\","
|
||||
" \"match\": {"
|
||||
" \"type\": \"operator\","
|
||||
" \"op\": \"and\","
|
||||
|
||||
@@ -16,6 +16,19 @@ using namespace std;
|
||||
void
|
||||
SnortRuleSelector::load(cereal::JSONInputArchive &ar)
|
||||
{
|
||||
try {
|
||||
ar(cereal::make_nvp("triggers", trigger_id));
|
||||
} catch (const cereal::Exception &e) {
|
||||
ar.setNextName(nullptr);
|
||||
trigger_id = "";
|
||||
}
|
||||
|
||||
try {
|
||||
ar(cereal::make_nvp("exceptions", exception_id));
|
||||
} catch (const cereal::Exception &e) {
|
||||
ar.setNextName(nullptr);
|
||||
exception_id = "";
|
||||
}
|
||||
string mode;
|
||||
ar(cereal::make_nvp("mode", mode), cereal::make_nvp("files", file_names));
|
||||
|
||||
@@ -38,7 +51,7 @@ SnortRuleSelector::selectSignatures() const
|
||||
|
||||
for (auto &file : file_names) {
|
||||
for (auto &signature : (*signatures).getSignatures(file)) {
|
||||
res.emplace_back(signature, action);
|
||||
res.emplace_back(signature, action, trigger_id, exception_id);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
|
||||
@@ -228,6 +228,7 @@ AccessControlPracticeSpec::load(cereal::JSONInputArchive &archive_in)
|
||||
dbgTrace(D_LOCAL_POLICY) << "Loading AppSec practice spec";
|
||||
|
||||
parseAppsecJSONKey<string>("name", practice_name, archive_in);
|
||||
parseAppsecJSONKey<string>("practiceMode", mode, archive_in);
|
||||
parseAppsecJSONKey<string>("appsecClassName", appsec_class_name, archive_in);
|
||||
parseMandatoryAppsecJSONKey<AccessControlRateLimit>("rateLimit", rate_limit, archive_in);
|
||||
}
|
||||
@@ -255,4 +256,10 @@ AccessControlPracticeSpec::getName() const
|
||||
{
|
||||
return practice_name;
|
||||
}
|
||||
|
||||
const string &
|
||||
AccessControlPracticeSpec::getMode(const std::string &default_mode) const
|
||||
{
|
||||
return isModeInherited(mode) ? default_mode : mode;
|
||||
}
|
||||
// LCOV_EXCL_STOP
|
||||
|
||||
@@ -181,12 +181,14 @@ public:
|
||||
const AccessControlRateLimit &getRateLimit() const;
|
||||
const std::string & getAppSecClassName() const;
|
||||
const std::string & getName() const;
|
||||
const std::string & getMode(const std::string &default_mode = "inactive") const;
|
||||
void setName(const std::string &_name);
|
||||
|
||||
private:
|
||||
AccessControlRateLimit rate_limit;
|
||||
std::string appsec_class_name;
|
||||
std::string practice_name;
|
||||
std::string mode;
|
||||
};
|
||||
|
||||
#endif // __ACCESS_CONTROL_PRACTICE_H__
|
||||
|
||||
@@ -587,6 +587,7 @@ public:
|
||||
const NewFileSecurity & getFileSecurity() const;
|
||||
const std::string & getAppSecClassName() const;
|
||||
const std::string & getName() const;
|
||||
const std::string & getMode(const std::string &default_mode = "inactive") const;
|
||||
void setName(const std::string &_name);
|
||||
|
||||
private:
|
||||
@@ -598,6 +599,7 @@ private:
|
||||
NewAppSecPracticeAntiBot anti_bot;
|
||||
std::string appsec_class_name;
|
||||
std::string practice_name;
|
||||
std::string mode;
|
||||
};
|
||||
|
||||
#endif // __NEW_PRACTICE_H__
|
||||
|
||||
@@ -27,7 +27,7 @@ NewAppsecTriggerAccessControlLogging::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
dbgTrace(D_LOCAL_POLICY) << "Loading AppSec Trigger - Access Control Logging";
|
||||
parseAppsecJSONKey<bool>("allowEvents", ac_allow_events, archive_in, false);
|
||||
parseAppsecJSONKey<bool>("dropEvents", ac_drop_events, archive_in, false);
|
||||
parseAppsecJSONKey<bool>("dropEvents", ac_drop_events, archive_in, true);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -36,8 +36,7 @@ NewAppsecTriggerAdditionalSuspiciousEventsLogging::load(cereal::JSONInputArchive
|
||||
dbgTrace(D_LOCAL_POLICY) << "Loading AppSec Trigger - Additional Suspicious Events Logging";
|
||||
parseAppsecJSONKey<bool>("enabled", enabled, archive_in, true);
|
||||
parseAppsecJSONKey<bool>("responseBody", response_body, archive_in, false);
|
||||
//the old code didn't parse the responsecode so ask Noam what is the currenct default value for it
|
||||
parseAppsecJSONKey<bool>("responseCode", response_code, archive_in, false);
|
||||
parseAppsecJSONKey<bool>("responseCode", response_code, archive_in, true);
|
||||
parseAppsecJSONKey<string>("minSeverity", minimum_severity, archive_in, "high");
|
||||
if (valid_severities.count(minimum_severity) == 0) {
|
||||
dbgWarning(D_LOCAL_POLICY)
|
||||
@@ -175,8 +174,12 @@ void
|
||||
NewAppsecTriggerLogDestination::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
dbgTrace(D_LOCAL_POLICY) << "Loading AppSec Trigger LogDestination";
|
||||
// TBD: support "file"
|
||||
parseAppsecJSONKey<bool>("cloud", cloud, archive_in, false);
|
||||
if (getConfigurationFlag("orchestration-mode") != "hybrid_mode") {
|
||||
// TBD: support "file"
|
||||
parseAppsecJSONKey<bool>("cloud", cloud, archive_in, false);
|
||||
} else {
|
||||
cloud = false;
|
||||
}
|
||||
auto mode = Singleton::Consume<I_AgentDetails>::by<NewAppsecTriggerLogDestination>()->getOrchestrationMode();
|
||||
auto env_type = Singleton::Consume<I_EnvDetails>::by<NewAppsecTriggerLogDestination>()->getEnvType();
|
||||
bool k8s_service_default = (mode == OrchestrationMode::HYBRID && env_type == EnvType::K8S);
|
||||
@@ -184,7 +187,7 @@ NewAppsecTriggerLogDestination::load(cereal::JSONInputArchive &archive_in)
|
||||
|
||||
NewStdoutLogging stdout_log;
|
||||
parseAppsecJSONKey<NewStdoutLogging>("stdout", stdout_log, archive_in);
|
||||
agent_local = !(stdout_log.getFormat().empty());
|
||||
parseAppsecJSONKey<bool>("logToAgent", agent_local, archive_in, true);
|
||||
beautify_logs = stdout_log.getFormat() == "json-formatted";
|
||||
parseAppsecJSONKey<NewLoggingService>("syslogService", syslog_service, archive_in);
|
||||
parseAppsecJSONKey<NewLoggingService>("cefService", cef_service, archive_in);
|
||||
|
||||
@@ -1025,6 +1025,7 @@ NewAppSecPracticeSpec::load(cereal::JSONInputArchive &archive_in)
|
||||
parseMandatoryAppsecJSONKey<NewAppSecPracticeWebAttacks>("webAttacks", web_attacks, archive_in);
|
||||
parseAppsecJSONKey<NewAppSecPracticeAntiBot>("antiBot", anti_bot, archive_in);
|
||||
parseAppsecJSONKey<string>("name", practice_name, archive_in);
|
||||
parseAppsecJSONKey<string>("practiceMode", mode, archive_in, "inherited");
|
||||
}
|
||||
|
||||
void
|
||||
@@ -1080,4 +1081,11 @@ NewAppSecPracticeSpec::getName() const
|
||||
{
|
||||
return practice_name;
|
||||
}
|
||||
|
||||
const string &
|
||||
NewAppSecPracticeSpec::getMode(const string &default_mode) const
|
||||
{
|
||||
return isModeInherited(mode) ? default_mode : mode;
|
||||
}
|
||||
|
||||
// LCOV_EXCL_STOP
|
||||
|
||||
@@ -147,7 +147,7 @@ string
|
||||
PolicyMakerUtils::dumpPolicyToFile(
|
||||
const PolicyWrapper &policy,
|
||||
const string &policy_path,
|
||||
const string &settings_path)
|
||||
const string &)
|
||||
{
|
||||
clearElementsMaps();
|
||||
|
||||
@@ -170,6 +170,7 @@ PolicyMakerUtils::dumpPolicyToFile(
|
||||
cereal::JSONOutputArchive ar(settings_ss);
|
||||
policy.getSettings().save(ar);
|
||||
}
|
||||
#if 0
|
||||
string settings_str = settings_ss.str();
|
||||
try {
|
||||
ofstream settings_file(settings_path);
|
||||
@@ -179,6 +180,7 @@ PolicyMakerUtils::dumpPolicyToFile(
|
||||
dbgDebug(D_NGINX_POLICY) << "Error while writing settings to " << settings_path << ", Error: " << e.what();
|
||||
}
|
||||
dbgDebug(D_LOCAL_POLICY) << settings_path << " content: " << settings_str;
|
||||
#endif
|
||||
|
||||
return policy_str;
|
||||
}
|
||||
@@ -1002,8 +1004,9 @@ PolicyMakerUtils::createIpsSections(
|
||||
auto apssec_practice = getAppsecPracticeSpec<V1beta2AppsecLinuxPolicy, NewAppSecPracticeSpec>(
|
||||
rule_annotations[AnnotationTypes::PRACTICE],
|
||||
policy);
|
||||
const string &override_mode =
|
||||
apssec_practice.getIntrusionPrevention().getMode(apssec_practice.getMode(default_mode));
|
||||
|
||||
const string &override_mode = apssec_practice.getIntrusionPrevention().getMode(default_mode);
|
||||
if (override_mode == "Inactive" || override_mode == "Disabled") return;
|
||||
|
||||
IpsProtectionsSection ips_section = IpsProtectionsSection(
|
||||
@@ -1076,8 +1079,9 @@ PolicyMakerUtils::createSnortSections(
|
||||
auto apssec_practice = getAppsecPracticeSpec<V1beta2AppsecLinuxPolicy, NewAppSecPracticeSpec>(
|
||||
rule_annotations[AnnotationTypes::PRACTICE],
|
||||
policy);
|
||||
const string &override_mode =
|
||||
apssec_practice.getSnortSignatures().getOverrideMode(apssec_practice.getMode(default_mode));
|
||||
|
||||
const string &override_mode = apssec_practice.getSnortSignatures().getOverrideMode(default_mode);
|
||||
if (override_mode == "Inactive" ||
|
||||
override_mode == "Disabled" ||
|
||||
apssec_practice.getSnortSignatures().getFiles().size() == 0) {
|
||||
@@ -1129,7 +1133,7 @@ PolicyMakerUtils::createFileSecuritySections(
|
||||
asset_id,
|
||||
practice_name,
|
||||
practice_id,
|
||||
default_mode
|
||||
apssec_practice.getMode(default_mode)
|
||||
);
|
||||
|
||||
file_security[asset_name] = file_security_section;
|
||||
@@ -1171,7 +1175,7 @@ PolicyMakerUtils::createRateLimitSection(
|
||||
asset_name,
|
||||
url,
|
||||
uri,
|
||||
access_control_practice.getRateLimit().getMode(default_mode),
|
||||
access_control_practice.getRateLimit().getMode(access_control_practice.getMode(default_mode)),
|
||||
practice_id,
|
||||
rule_annotations[AnnotationTypes::ACCESS_CONTROL_PRACTICE],
|
||||
rules
|
||||
@@ -1191,6 +1195,8 @@ PolicyMakerUtils::createWebAppSection(
|
||||
rule_annotations[AnnotationTypes::PRACTICE],
|
||||
policy
|
||||
);
|
||||
const string &practice_mode = apssec_practice.getMode(default_mode);
|
||||
|
||||
PracticeAdvancedConfig practice_advance_config(
|
||||
apssec_practice.getWebAttacks().getMaxHeaderSizeBytes(),
|
||||
apssec_practice.getWebAttacks().getMaxBodySizeKb(),
|
||||
@@ -1206,8 +1212,8 @@ PolicyMakerUtils::createWebAppSection(
|
||||
practice_id,
|
||||
rule_annotations[AnnotationTypes::PRACTICE],
|
||||
rule_config.getContext(),
|
||||
apssec_practice.getWebAttacks().getMinimumConfidence(default_mode),
|
||||
apssec_practice.getWebAttacks().getMode(default_mode),
|
||||
apssec_practice.getWebAttacks().getMinimumConfidence(practice_mode),
|
||||
apssec_practice.getWebAttacks().getMode(practice_mode),
|
||||
practice_advance_config,
|
||||
apssec_practice.getAntiBot(),
|
||||
log_triggers[rule_annotations[AnnotationTypes::TRIGGER]],
|
||||
|
||||
@@ -387,8 +387,12 @@ void
|
||||
AppsecTriggerLogDestination::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
dbgTrace(D_LOCAL_POLICY) << "Loading AppSec Trigger LogDestination";
|
||||
// TBD: support "file"
|
||||
parseAppsecJSONKey<bool>("cloud", cloud, archive_in, false);
|
||||
if (getConfigurationFlag("orchestration-mode") != "hybrid_mode") {
|
||||
// TBD: support "file"
|
||||
parseAppsecJSONKey<bool>("cloud", cloud, archive_in, false);
|
||||
} else {
|
||||
cloud = false;
|
||||
}
|
||||
auto mode = Singleton::Consume<I_AgentDetails>::by<AppsecTriggerLogDestination>()->getOrchestrationMode();
|
||||
auto env_type = Singleton::Consume<I_EnvDetails>::by<AppsecTriggerLogDestination>()->getEnvType();
|
||||
bool k8s_service_default = (mode == OrchestrationMode::HYBRID && env_type == EnvType::K8S);
|
||||
|
||||
@@ -32,6 +32,7 @@ class DetailsResolver::Impl
|
||||
Singleton::Provide<I_DetailsResolver>::From<DetailsResolver>
|
||||
{
|
||||
public:
|
||||
void init() { handler.init(); }
|
||||
Maybe<string> getHostname() override;
|
||||
Maybe<string> getPlatform() override;
|
||||
Maybe<string> getArch() override;
|
||||
@@ -43,6 +44,7 @@ public:
|
||||
bool isGwNotVsx() override;
|
||||
bool isVersionAboveR8110() override;
|
||||
bool isReverseProxy() override;
|
||||
bool isCloudStorageEnabled() override;
|
||||
Maybe<tuple<string, string, string>> parseNginxMetadata() override;
|
||||
#if defined(gaia) || defined(smb)
|
||||
bool compareCheckpointVersion(int cp_version, std::function<bool(int, int)> compare_operator) const override;
|
||||
@@ -134,6 +136,18 @@ DetailsResolver::Impl::isReverseProxy()
|
||||
return getenv("DOCKER_RPM_ENABLED") && getenv("DOCKER_RPM_ENABLED") == string("true");
|
||||
}
|
||||
|
||||
bool
|
||||
DetailsResolver::Impl::isCloudStorageEnabled()
|
||||
{
|
||||
auto cloud_storage_mode_override = getProfileAgentSetting<bool>("agent.cloudStorage.enabled");
|
||||
if (cloud_storage_mode_override.ok()) {
|
||||
dbgInfo(D_ORCHESTRATOR) << "Received cloud-storage mode override: " << *cloud_storage_mode_override;
|
||||
return *cloud_storage_mode_override;
|
||||
}
|
||||
|
||||
return getenv("CLOUD_STORAGE_ENABLED") && getenv("CLOUD_STORAGE_ENABLED") == string("true");
|
||||
}
|
||||
|
||||
bool
|
||||
DetailsResolver::Impl::isKernelVersion3OrHigher()
|
||||
{
|
||||
@@ -290,6 +304,12 @@ DetailsResolver::DetailsResolver() : Component("DetailsResolver"), pimpl(make_un
|
||||
|
||||
DetailsResolver::~DetailsResolver() {}
|
||||
|
||||
void
|
||||
DetailsResolver::init()
|
||||
{
|
||||
pimpl->init();
|
||||
}
|
||||
|
||||
void
|
||||
DetailsResolver::preload()
|
||||
{
|
||||
|
||||
@@ -32,6 +32,17 @@ checkSAMLSupportedBlade(const string &command_output)
|
||||
return genError("Current host does not have SAML capability");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
checkIDABlade(const string &command_output)
|
||||
{
|
||||
string idaBlade = "identityServer";
|
||||
if (command_output.find(idaBlade) != string::npos) {
|
||||
return string("true");
|
||||
}
|
||||
|
||||
return genError("Current host does not have IDA installed");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
checkSAMLPortal(const string &command_output)
|
||||
{
|
||||
@@ -43,9 +54,19 @@ checkSAMLPortal(const string &command_output)
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
getIDASSamlGaia(const string &command_output)
|
||||
checkPepIdaIdnStatus(const string &command_output)
|
||||
{
|
||||
return string("idaSaml_gaia");
|
||||
if (command_output.find("ida_idn_nano_service_enabled=1") != string::npos) {
|
||||
return string("true");
|
||||
}
|
||||
|
||||
return genError("Current host does not have PEP control IDA IDN enabled");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
getIDAGaiaPackages(const string &command_output)
|
||||
{
|
||||
return string("idaSaml_gaia;idaIdn_gaia;idaIdnBg_gaia;");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
@@ -76,6 +97,18 @@ checkIsInstallHorizonTelemetrySucceeded(const string &command_output)
|
||||
return command_output;
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
getQUID(const string &command_output)
|
||||
{
|
||||
if (command_output == "" ) return string("false");
|
||||
// validate valid QUID - contains exactly 4 '-'
|
||||
if (std::count(command_output.begin(), command_output.end(), '-') != 4) {
|
||||
return genError("not valid QUID");
|
||||
}
|
||||
return command_output;
|
||||
}
|
||||
|
||||
|
||||
Maybe<string>
|
||||
checkHasSDWan(const string &command_output)
|
||||
{
|
||||
@@ -92,6 +125,26 @@ checkCanUpdateSDWanData(const string &command_output)
|
||||
return string("true");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
checkLsmProfileName(const string &command_output)
|
||||
{
|
||||
if (!command_output.empty()) {
|
||||
return command_output;
|
||||
}
|
||||
|
||||
return genError("LSM profile name was not found");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
checkLsmProfileUuid(const string &command_output)
|
||||
{
|
||||
if (!command_output.empty()) {
|
||||
return command_output;
|
||||
}
|
||||
|
||||
return genError("LSM profile uuid was not found");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
getMgmtObjType(const string &command_output)
|
||||
{
|
||||
|
||||
@@ -46,6 +46,9 @@ SHELL_CMD_HANDLER("prerequisitesForHorizonTelemetry",
|
||||
"[ -f /var/log/nano_agent/cp-nano-horizon-telemetry-prerequisites.log ] "
|
||||
"&& head -1 /var/log/nano_agent/cp-nano-horizon-telemetry-prerequisites.log || echo ''",
|
||||
checkIsInstallHorizonTelemetrySucceeded)
|
||||
SHELL_CMD_HANDLER("QUID", "[ -d /opt/CPquid ] "
|
||||
"&& python3 /opt/CPquid/Quid_Api.py -i /opt/CPotelcol/quid_api/get_global_id.json | jq -r .message || echo ''",
|
||||
getQUID)
|
||||
SHELL_CMD_HANDLER("hasSDWan", "[ -f $FWDIR/bin/sdwan_steering ] && echo '1' || echo '0'", checkHasSDWan)
|
||||
SHELL_CMD_HANDLER(
|
||||
"canUpdateSDWanData",
|
||||
@@ -56,6 +59,16 @@ SHELL_CMD_HANDLER(
|
||||
"isSdwanRunning",
|
||||
"[ -v $(pidof cp-nano-sdwan) ] && echo 'false' || echo 'true'",
|
||||
checkIfSdwanRunning)
|
||||
SHELL_CMD_HANDLER(
|
||||
"lsmProfileName",
|
||||
"jq -r .lsm_profile_name /tmp/cpsdwan_getdata_orch.json",
|
||||
checkLsmProfileName
|
||||
)
|
||||
SHELL_CMD_HANDLER(
|
||||
"lsmProfileUuid",
|
||||
"jq -r .lsm_profile_uuid /tmp/cpsdwan_getdata_orch.json",
|
||||
checkLsmProfileUuid
|
||||
)
|
||||
SHELL_CMD_HANDLER(
|
||||
"IP Address",
|
||||
"[ $(cpprod_util FWisDAG) -eq 1 ] && echo \"Dynamic Address\" "
|
||||
@@ -84,8 +97,10 @@ SHELL_CMD_HANDLER(
|
||||
|
||||
#if defined(gaia)
|
||||
SHELL_CMD_HANDLER("hasSAMLSupportedBlade", "enabled_blades", checkSAMLSupportedBlade)
|
||||
SHELL_CMD_HANDLER("hasSAMLPortal", "mpclient status saml-vpn", checkSAMLPortal)
|
||||
SHELL_CMD_HANDLER("requiredNanoServices", "ida_saml_gaia", getIDASSamlGaia)
|
||||
SHELL_CMD_HANDLER("hasIDABlade", "enabled_blades", checkIDABlade)
|
||||
SHELL_CMD_HANDLER("hasSAMLPortal", "mpclient status nac", checkSAMLPortal)
|
||||
SHELL_CMD_HANDLER("hasIdaIdnEnabled", "pep control IDN_nano_Srv_support status", checkPepIdaIdnStatus)
|
||||
SHELL_CMD_HANDLER("requiredNanoServices", "ida_packages", getIDAGaiaPackages)
|
||||
SHELL_CMD_HANDLER(
|
||||
"cpProductIntegrationMgmtParentObjectName",
|
||||
"cat $FWDIR/database/myself_objects.C "
|
||||
@@ -223,7 +238,7 @@ FILE_CONTENT_HANDLER(
|
||||
FILE_CONTENT_HANDLER("os_release", "/etc/os-release", getOsRelease)
|
||||
#endif // gaia || smb
|
||||
|
||||
FILE_CONTENT_HANDLER("AppSecModelVersion", "/etc/cp/conf/waap/waap.data", getWaapModelVersion)
|
||||
FILE_CONTENT_HANDLER("AppSecModelVersion", "<FILESYSTEM-PREFIX>/conf/waap/waap.data", getWaapModelVersion)
|
||||
|
||||
#endif // FILE_CONTENT_HANDLER
|
||||
|
||||
|
||||
@@ -36,9 +36,12 @@ using FileContentHandler = function<Maybe<string>(shared_ptr<istream> file_otput
|
||||
|
||||
#include "checkpoint_product_handlers.h"
|
||||
|
||||
static const string filesystem_place_holder = "<FILESYSTEM-PREFIX>";
|
||||
|
||||
class DetailsResolvingHanlder::Impl
|
||||
{
|
||||
public:
|
||||
void init();
|
||||
map<string, string> getResolvedDetails() const;
|
||||
static Maybe<string> getCommandOutput(const string &cmd);
|
||||
|
||||
@@ -70,6 +73,20 @@ private:
|
||||
};
|
||||
#undef SHELL_POST_CMD
|
||||
|
||||
void
|
||||
DetailsResolvingHanlder::Impl::init()
|
||||
{
|
||||
string actual_filesystem_prefix = getFilesystemPathConfig();
|
||||
|
||||
for (auto &file_handler : file_content_handlers) {
|
||||
string &path = file_handler.second.first;
|
||||
size_t place_holder_size = filesystem_place_holder.size();
|
||||
if (path.substr(0, place_holder_size) == filesystem_place_holder) {
|
||||
path = actual_filesystem_prefix + path.substr(place_holder_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
map<string, string>
|
||||
DetailsResolvingHanlder::Impl::getResolvedDetails() const
|
||||
{
|
||||
@@ -155,6 +172,12 @@ DetailsResolvingHanlder::Impl::getCommandOutput(const string &cmd)
|
||||
DetailsResolvingHanlder::DetailsResolvingHanlder() : pimpl(make_unique<Impl>()) {}
|
||||
DetailsResolvingHanlder::~DetailsResolvingHanlder() {}
|
||||
|
||||
void
|
||||
DetailsResolvingHanlder::init()
|
||||
{
|
||||
return pimpl->init();
|
||||
}
|
||||
|
||||
map<string, string>
|
||||
DetailsResolvingHanlder::getResolvedDetails() const
|
||||
{
|
||||
|
||||
@@ -31,6 +31,7 @@ public:
|
||||
DetailsResolvingHanlder();
|
||||
~DetailsResolvingHanlder();
|
||||
|
||||
void init();
|
||||
std::map<std::string, std::string> getResolvedDetails() const;
|
||||
|
||||
static Maybe<std::string> getCommandOutput(const std::string &cmd);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
ADD_DEFINITIONS(-Wno-deprecated-declarations -Dalpine)
|
||||
|
||||
add_library(orchestration_downloader curl_client.cc downloader.cc http_client.cc https_client.cc)
|
||||
add_library(orchestration_downloader curl_client.cc downloader.cc https_client.cc https_client_helper.cc)
|
||||
|
||||
#add_subdirectory(downloader_ut)
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
#include "i_orchestration_tools.h"
|
||||
#include "singleton.h"
|
||||
#include "http_client.h"
|
||||
#include "https_client.h"
|
||||
#include "debug.h"
|
||||
#include "config.h"
|
||||
#include "rest.h"
|
||||
@@ -63,7 +63,7 @@ class Downloader::Impl : Singleton::Provide<I_Downloader>::From<Downloader>
|
||||
public:
|
||||
void init();
|
||||
|
||||
Maybe<string> downloadFileFromFog(
|
||||
Maybe<string> downloadFile(
|
||||
const string &checksum,
|
||||
Package::ChecksumTypes checksum_type,
|
||||
const GetResourceFile &resourse_file
|
||||
@@ -87,7 +87,7 @@ public:
|
||||
string getProfileFromMap(const string &tenant_id) const override;
|
||||
|
||||
private:
|
||||
Maybe<string> downloadFileFromFogByHTTP(
|
||||
Maybe<string> downloadAttributeFile(
|
||||
const GetResourceFile &resourse_file,
|
||||
const string &file_name
|
||||
) const;
|
||||
@@ -121,16 +121,21 @@ Downloader::Impl::init()
|
||||
"Default file download path"
|
||||
);
|
||||
|
||||
auto maybe_vs_id = Singleton::Consume<I_Environment>::by<Downloader>()->get<string>("VS ID");
|
||||
if (maybe_vs_id.ok()) {
|
||||
dir_path = dir_path + "/vs" + maybe_vs_id.unpack();
|
||||
}
|
||||
|
||||
Singleton::Consume<I_OrchestrationTools>::by<Downloader>()->createDirectory(dir_path);
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
Downloader::Impl::downloadFileFromFog(
|
||||
Downloader::Impl::downloadFile(
|
||||
const string &checksum,
|
||||
Package::ChecksumTypes checksum_type,
|
||||
const GetResourceFile &resourse_file) const
|
||||
{
|
||||
auto file_path = downloadFileFromFogByHTTP(resourse_file, resourse_file.getFileName() + ".download");
|
||||
auto file_path = downloadAttributeFile(resourse_file, resourse_file.getFileName() + ".download");
|
||||
|
||||
if (!file_path.ok()) {
|
||||
return file_path;
|
||||
@@ -373,11 +378,11 @@ Downloader::Impl::validateChecksum(
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
Downloader::Impl::downloadFileFromFogByHTTP(const GetResourceFile &resourse_file, const string &file_name) const
|
||||
Downloader::Impl::downloadAttributeFile(const GetResourceFile &resourse_file, const string &file_name) const
|
||||
{
|
||||
string file_path = dir_path + "/" + file_name;
|
||||
|
||||
dbgInfo(D_ORCHESTRATOR) << "Downloading file from fog. File: " << resourse_file.getFileName();
|
||||
dbgInfo(D_ORCHESTRATOR) << "Downloading file. File: " << resourse_file.getFileName();
|
||||
|
||||
I_UpdateCommunication *update_communication = Singleton::Consume<I_UpdateCommunication>::by<Downloader>();
|
||||
auto downloaded_file = update_communication->downloadAttributeFile(resourse_file, file_path);
|
||||
@@ -403,17 +408,14 @@ Downloader::Impl::getFileFromLocal(const string &local_file_path, const string &
|
||||
Maybe<string>
|
||||
Downloader::Impl::getFileFromURL(const URLParser &url, const string &file_path, bool auth_required) const
|
||||
{
|
||||
ofstream outFile(file_path, ofstream::out | ofstream::binary);
|
||||
HTTPClient http_client;
|
||||
dbgInfo(D_ORCHESTRATOR) << "Downloading file. URL: " << url;
|
||||
auto get_file_response = http_client.getFile(url, outFile, auth_required);
|
||||
auto get_file_response = HTTPSClient().getFile(url, file_path, auth_required);
|
||||
if (!get_file_response.ok()) {
|
||||
Maybe<string> error = genError("Failed to download file from " + url.getBaseURL().unpack() +
|
||||
". Error: " + get_file_response.getErr());
|
||||
dbgWarning(D_ORCHESTRATOR) << "Download failed";
|
||||
return error;
|
||||
}
|
||||
outFile.close();
|
||||
dbgInfo(D_ORCHESTRATOR) << "Download completed. URL: " << url;
|
||||
return file_path;
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ TEST_F(DownloaderTest, doNothing)
|
||||
{
|
||||
}
|
||||
|
||||
TEST_F(DownloaderTest, downloadFileFromFog)
|
||||
TEST_F(DownloaderTest, downloadFile)
|
||||
{
|
||||
string fog_response = "bla bla";
|
||||
string checksum = "123";
|
||||
@@ -55,7 +55,7 @@ TEST_F(DownloaderTest, downloadFileFromFog)
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile("/tmp/virtualSettings.download")).WillOnce(Return(true));
|
||||
|
||||
Maybe<string> downloaded_file = i_downloader->downloadFileFromFog(
|
||||
Maybe<string> downloaded_file = i_downloader->downloadFile(
|
||||
checksum,
|
||||
Package::ChecksumTypes::SHA256,
|
||||
resourse_file
|
||||
@@ -76,7 +76,7 @@ TEST_F(DownloaderTest, downloadFileFromFogFailure)
|
||||
downloadAttributeFile(resourse_file, "/tmp/settings.download")
|
||||
).WillOnce(Return(fog_response));
|
||||
|
||||
Maybe<string> downloaded_file = i_downloader->downloadFileFromFog(
|
||||
Maybe<string> downloaded_file = i_downloader->downloadFile(
|
||||
checksum,
|
||||
Package::ChecksumTypes::SHA256,
|
||||
resourse_file
|
||||
@@ -241,7 +241,7 @@ TEST_F(DownloaderTest, downloadEmptyFileFromFog)
|
||||
calculateChecksum(Package::ChecksumTypes::SHA256, "/tmp/manifest.download")
|
||||
).WillOnce(Return(checksum));
|
||||
|
||||
Maybe<string> downloaded_file = i_downloader->downloadFileFromFog(
|
||||
Maybe<string> downloaded_file = i_downloader->downloadFile(
|
||||
checksum,
|
||||
Package::ChecksumTypes::SHA256,
|
||||
resourse_file
|
||||
|
||||
@@ -1,270 +0,0 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "http_client.h"
|
||||
|
||||
#include "curl_client.h"
|
||||
#include "downloader.h"
|
||||
#include "debug.h"
|
||||
#include "i_encryptor.h"
|
||||
#include "url_parser.h"
|
||||
#include "config.h"
|
||||
#include "i_environment.h"
|
||||
#include "orchestration_comp.h"
|
||||
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include <chrono>
|
||||
#include <boost/asio/ip/tcp.hpp>
|
||||
|
||||
using boost::asio::ip::tcp;
|
||||
using namespace std;
|
||||
|
||||
USE_DEBUG_FLAG(D_ORCHESTRATOR);
|
||||
USE_DEBUG_FLAG(D_HTTP_REQUEST);
|
||||
|
||||
// LCOV_EXCL_START Reason: Depends on real download server.
|
||||
class ClientConnection
|
||||
{
|
||||
public:
|
||||
ClientConnection(
|
||||
const URLParser &_url,
|
||||
const Maybe<string> &_proxy_url,
|
||||
const Maybe<uint16_t> &_proxy_port,
|
||||
const Maybe<string> &_proxy_auth,
|
||||
const string &_token)
|
||||
:
|
||||
url(_url),
|
||||
proxy_url(_proxy_url),
|
||||
proxy_port(_proxy_port),
|
||||
proxy_auth(_proxy_auth),
|
||||
token(_token)
|
||||
{
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
handleConnect()
|
||||
{
|
||||
if (!url.getBaseURL().ok()) {
|
||||
return genError("Failed to handle connection. Error: " + url.getBaseURL().getErr());
|
||||
}
|
||||
string server_name = url.getBaseURL().unpack();
|
||||
string port = url.getPort();
|
||||
string query = url.getQuery();
|
||||
string host = server_name;
|
||||
try {
|
||||
if (stoi(port) != 80) {
|
||||
host = host + ":" + port;
|
||||
}
|
||||
} catch (const exception &err) {
|
||||
return genError("Failed to parse port to a number. Port: " + port );
|
||||
}
|
||||
|
||||
chrono::duration<unsigned int, ratio<1>> sleep_time(60);
|
||||
io_stream.expires_from_now(sleep_time);
|
||||
|
||||
if (proxy_url.ok()) {
|
||||
if (!proxy_port.ok()) {
|
||||
return genError(
|
||||
"Failed to handle connection to server. proxy domain is defined with invalid port, Error: " +
|
||||
proxy_port.getErr()
|
||||
);
|
||||
}
|
||||
io_stream.connect(proxy_url.unpack(), to_string(proxy_port.unpack()));
|
||||
} else {
|
||||
io_stream.connect(server_name, port);
|
||||
}
|
||||
|
||||
if (!io_stream) {
|
||||
return genError("Failed to handle connection to server. Error: " + io_stream.error().message());
|
||||
}
|
||||
|
||||
string request_url = query;
|
||||
if (proxy_url.ok()) {
|
||||
request_url = host + query;
|
||||
}
|
||||
|
||||
stringstream http_request;
|
||||
http_request << "GET http://" << request_url << " HTTP/1.1\r\n";
|
||||
http_request << "Host: " << host << "\r\n";
|
||||
if (!token.empty()) {
|
||||
http_request << "Authorization: " << "Bearer " << token << "\r\n";
|
||||
}
|
||||
http_request << "User-Agent: Infinity Next (a7030abf93a4c13)\r\n";
|
||||
|
||||
auto i_trace_env = Singleton::Consume<I_Environment>::by<OrchestrationComp>();
|
||||
http_request << i_trace_env->getCurrentHeaders();
|
||||
http_request << "Accept: */*\r\n";
|
||||
|
||||
if (proxy_url.ok()) {
|
||||
http_request << "Accept-Encoding: identity";
|
||||
http_request << "Connection: close\r\n";
|
||||
http_request << "Proxy-Connection: Keep-Alive\r\n";
|
||||
|
||||
if (proxy_auth.ok()) {
|
||||
I_Encryptor *encryptor = Singleton::Consume<I_Encryptor>::by<Downloader>();
|
||||
http_request << "Proxy-Authorization: Basic " + encryptor->base64Encode(proxy_auth.unpack()) + "\r\n";
|
||||
}
|
||||
http_request << "\r\n";
|
||||
} else {
|
||||
http_request << "Connection: close\r\n\r\n";
|
||||
}
|
||||
|
||||
dbgTrace(D_HTTP_REQUEST) << "Sending the following HTTP Request: " << endl << http_request.str();
|
||||
io_stream << http_request.str();
|
||||
return Maybe<void>();
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
handleResponse(ofstream &out_file)
|
||||
{
|
||||
string response_http_version;
|
||||
io_stream >> response_http_version;
|
||||
unsigned int status_code;
|
||||
io_stream >> status_code;
|
||||
string status_message;
|
||||
getline(io_stream, status_message);
|
||||
|
||||
if (!io_stream || response_http_version.substr(0, 5) != "HTTP/") {
|
||||
return genError("Invalid response");
|
||||
}
|
||||
|
||||
if (status_code != 200) {
|
||||
return genError("HTTP response returned with status code " + status_code);
|
||||
}
|
||||
|
||||
string header;
|
||||
vector<string> headers;
|
||||
while (getline(io_stream, header) && header != "\r") {
|
||||
headers.push_back(header);
|
||||
}
|
||||
|
||||
out_file << io_stream.rdbuf();
|
||||
|
||||
dbgTrace(D_HTTP_REQUEST)
|
||||
<< "Received HTTP Response with the following data (downloaded file will not be printed):"
|
||||
<< endl
|
||||
<< response_http_version
|
||||
<< " "
|
||||
<< status_code
|
||||
<< " "
|
||||
<< status_message
|
||||
<< endl
|
||||
<< makeSeparatedStr(headers, "\n");
|
||||
|
||||
|
||||
return Maybe<void>();
|
||||
}
|
||||
|
||||
private:
|
||||
const URLParser url;
|
||||
const Maybe<string> proxy_url;
|
||||
const Maybe<uint16_t> proxy_port;
|
||||
const Maybe<string> proxy_auth;
|
||||
const string &token;
|
||||
boost::asio::ip::tcp::iostream io_stream;
|
||||
};
|
||||
|
||||
Maybe<void>
|
||||
HTTPClient::getFile(const URLParser &url, ofstream &out_file, bool auth_required)
|
||||
{
|
||||
auto proxy_config = Singleton::Consume<I_ProxyConfiguration>::by<HTTPClient>();
|
||||
auto load_env_proxy = proxy_config->loadProxy();
|
||||
if (!load_env_proxy.ok()) return load_env_proxy;
|
||||
|
||||
string token = "";
|
||||
if (auth_required) {
|
||||
token = Singleton::Consume<I_AgentDetails>::by<HTTPClient>()->getAccessToken();
|
||||
}
|
||||
|
||||
if (url.isOverSSL()) {
|
||||
auto get_file_over_ssl_res = getFileSSL(url, out_file, token);
|
||||
if (!get_file_over_ssl_res.ok())
|
||||
{
|
||||
//CURL fallback
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to get file over SSL. Trying via CURL (SSL).";
|
||||
return curlGetFileOverSSL(url, out_file, token);
|
||||
}
|
||||
return get_file_over_ssl_res;
|
||||
}
|
||||
auto get_file_http_res = getFileHttp(url, out_file, token);
|
||||
if (!get_file_http_res.ok())
|
||||
{
|
||||
//CURL fallback
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to get file over HTTP. Trying via CURL (HTTP).";
|
||||
return curlGetFileOverHttp(url, out_file, token);
|
||||
}
|
||||
|
||||
return get_file_http_res;
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
HTTPClient::curlGetFileOverHttp(const URLParser &url, ofstream &out_file, const string &token)
|
||||
{
|
||||
try {
|
||||
auto proxy_config = Singleton::Consume<I_ProxyConfiguration>::by<HTTPClient>();
|
||||
|
||||
HttpCurl http_curl_client(
|
||||
url,
|
||||
out_file,
|
||||
token,
|
||||
proxy_config->getProxyDomain(ProxyProtocol::HTTPS),
|
||||
proxy_config->getProxyPort(ProxyProtocol::HTTPS),
|
||||
proxy_config->getProxyAuthentication(ProxyProtocol::HTTPS));
|
||||
|
||||
http_curl_client.setCurlOpts();
|
||||
bool connection_ok = http_curl_client.connect();
|
||||
if (!connection_ok)
|
||||
{
|
||||
stringstream url_s;
|
||||
url_s << url;
|
||||
string err_msg = string("Failed to get file over HTTP. URL: ") + url_s.str();
|
||||
return genError(err_msg);
|
||||
}
|
||||
|
||||
// As this class is a temporal solution catch all exception types is enabled.
|
||||
} catch (const exception &e) {
|
||||
string err_msg = "Failed to get file over HTTP. Exception: " + string(e.what());
|
||||
return genError(err_msg);
|
||||
}
|
||||
|
||||
return Maybe<void>();
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
HTTPClient::getFileHttp(const URLParser &url, ofstream &out_file, const string &token)
|
||||
{
|
||||
try {
|
||||
auto proxy_config = Singleton::Consume<I_ProxyConfiguration>::by<HTTPClient>();
|
||||
ClientConnection client_connection(
|
||||
url,
|
||||
proxy_config->getProxyDomain(ProxyProtocol::HTTP),
|
||||
proxy_config->getProxyPort(ProxyProtocol::HTTP),
|
||||
proxy_config->getProxyAuthentication(ProxyProtocol::HTTP),
|
||||
token
|
||||
);
|
||||
auto handle_connect_res = client_connection.handleConnect();
|
||||
if (!handle_connect_res.ok()) return handle_connect_res;
|
||||
|
||||
return client_connection.handleResponse(out_file);
|
||||
|
||||
// As this class is a temporal solution catch all exception types is enabled.
|
||||
} catch (const exception &e) {
|
||||
string err_msg = "Failed to get file over HTTP. Exception: " + string(e.what());
|
||||
return genError(err_msg);
|
||||
}
|
||||
|
||||
return Maybe<void>();
|
||||
}
|
||||
// LCOV_EXCL_STOP
|
||||
@@ -1,43 +0,0 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef __HTTP_CLIENT_H__
|
||||
#define __HTTP_CLIENT_H__
|
||||
|
||||
#include <string>
|
||||
#include "maybe_res.h"
|
||||
#include "url_parser.h"
|
||||
#include "i_agent_details.h"
|
||||
#include "i_proxy_configuration.h"
|
||||
|
||||
// LCOV_EXCL_START Reason: Depends on real download server.
|
||||
class HTTPClient
|
||||
:
|
||||
public Singleton::Consume<I_AgentDetails>,
|
||||
public Singleton::Consume<I_ProxyConfiguration>
|
||||
{
|
||||
public:
|
||||
HTTPClient() = default;
|
||||
|
||||
Maybe<void> getFile(const URLParser &url, std::ofstream &out_file, bool auth_required);
|
||||
|
||||
private:
|
||||
std::string loadCAChainDir();
|
||||
Maybe<void> getFileSSL(const URLParser &url, std::ofstream &out_file, const std::string &_token);
|
||||
Maybe<void> getFileHttp(const URLParser &url, std::ofstream &out_file, const std::string &_token);
|
||||
Maybe<void> curlGetFileOverHttp(const URLParser &url, std::ofstream &out_file, const std::string &_token);
|
||||
Maybe<void> curlGetFileOverSSL(const URLParser &url, std::ofstream &out_file, const std::string &_token);
|
||||
};
|
||||
// LCOV_EXCL_STOP
|
||||
|
||||
#endif // __HTTP_CLIENT_H__
|
||||
@@ -11,498 +11,51 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "http_client.h"
|
||||
#include "curl_client.h"
|
||||
|
||||
#include "debug.h"
|
||||
#include "i_agent_details.h"
|
||||
#include "i_encryptor.h"
|
||||
#include "downloader.h"
|
||||
#include "config.h"
|
||||
#include "boost/uuid/uuid.hpp"
|
||||
#include "boost/uuid/uuid_generators.hpp"
|
||||
#include <boost/asio/deadline_timer.hpp>
|
||||
#include "boost/uuid/uuid_io.hpp"
|
||||
#include "https_client.h"
|
||||
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <iostream>
|
||||
#include <istream>
|
||||
#include <ostream>
|
||||
#include <fstream>
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/bind/bind.hpp>
|
||||
#include <boost/asio/ssl.hpp>
|
||||
#include <exception>
|
||||
#include <chrono>
|
||||
|
||||
#include "config.h"
|
||||
#include "curl_client.h"
|
||||
|
||||
using namespace boost::placeholders;
|
||||
using boost::asio::ip::tcp;
|
||||
using namespace std;
|
||||
|
||||
USE_DEBUG_FLAG(D_COMMUNICATION);
|
||||
USE_DEBUG_FLAG(D_HTTP_REQUEST);
|
||||
USE_DEBUG_FLAG(D_ORCHESTRATOR);
|
||||
USE_DEBUG_FLAG(D_HTTP_REQUEST);
|
||||
|
||||
// LCOV_EXCL_START Reason: Depends on real download server.
|
||||
class BadResponseFromServer : public exception
|
||||
Maybe<void>
|
||||
HTTPSClient::getFile(const URLParser &url, const string &out_file, bool auth_required)
|
||||
{
|
||||
public:
|
||||
BadResponseFromServer() : message("Bad response returned from server") {}
|
||||
BadResponseFromServer(const string &msg) : message(msg) {}
|
||||
const char *
|
||||
what() const throw()
|
||||
{
|
||||
return message.c_str();
|
||||
auto proxy_config = Singleton::Consume<I_ProxyConfiguration>::by<OrchestrationComp>();
|
||||
auto load_env_proxy = proxy_config->loadProxy();
|
||||
if (!load_env_proxy.ok()) return load_env_proxy;
|
||||
|
||||
string token = "";
|
||||
if (auth_required) {
|
||||
token = Singleton::Consume<I_AgentDetails>::by<OrchestrationComp>()->getAccessToken();
|
||||
}
|
||||
|
||||
private:
|
||||
string message;
|
||||
};
|
||||
if (!url.isOverSSL()) return genError("URL is not over SSL.");
|
||||
|
||||
class Client
|
||||
{
|
||||
public:
|
||||
Client(
|
||||
ofstream &out_file,
|
||||
boost::asio::io_service &io_service,
|
||||
boost::asio::ssl::context &context,
|
||||
const URLParser &_url,
|
||||
const Maybe<string> &_proxy_url,
|
||||
const Maybe<uint16_t> &_proxy_port,
|
||||
const Maybe<string> &_proxy_auth,
|
||||
const string &_token)
|
||||
:
|
||||
out_file(out_file),
|
||||
url(_url),
|
||||
proxy_url(_proxy_url),
|
||||
proxy_port(_proxy_port),
|
||||
proxy_auth(_proxy_auth),
|
||||
resolver_(io_service),
|
||||
deadline(io_service),
|
||||
socket_(io_service),
|
||||
ssl_socket(socket_, context),
|
||||
token(_token)
|
||||
{
|
||||
}
|
||||
if (getFileSSLDirect(url, out_file, token).ok()) return Maybe<void>();
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to get file over SSL directly. Trying indirectly.";
|
||||
|
||||
Maybe<void>
|
||||
handleConnection()
|
||||
{
|
||||
ostream request_stream(&request_);
|
||||
stringstream http_request;
|
||||
http_request << "GET " << url.getQuery() << " HTTP/1.1\r\n";
|
||||
string host = url.getBaseURL().unpack();
|
||||
string port = url.getPort();
|
||||
int port_int;
|
||||
try {
|
||||
port_int = stoi(port);
|
||||
} catch (const exception &err) {
|
||||
dbgWarning(D_COMMUNICATION)
|
||||
<< "Failed to convert port number from string. Port: "
|
||||
<< port
|
||||
<< ", Error: "
|
||||
<< err.what();
|
||||
return genError("Failed to parse port to a number. Port: " + port);
|
||||
}
|
||||
if (port_int != 443) {
|
||||
host = host + ":" + port;
|
||||
}
|
||||
if (getFileSSL(url, out_file, token).ok()) return Maybe<void>();
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to get file over SSL. Trying via CURL (SSL).";
|
||||
|
||||
http_request << "Host: " << host << "\r\n";
|
||||
|
||||
if (!token.empty()) {
|
||||
http_request << "Authorization: " << "Bearer " << token << "\r\n";
|
||||
}
|
||||
http_request << "User-Agent: Infinity Next (a7030abf93a4c13)\r\n";
|
||||
boost::uuids::uuid correlation_id;
|
||||
try {
|
||||
correlation_id = uuid_random_gen();
|
||||
} catch (const boost::uuids::entropy_error &) {
|
||||
dbgWarning(D_COMMUNICATION) << "Failed to generate random correlation id - entropy exception";
|
||||
}
|
||||
http_request << "X-Trace-Id: " + boost::uuids::to_string(correlation_id) + "\r\n";
|
||||
http_request << "Accept: */*\r\n";
|
||||
http_request << "Connection: close\r\n\r\n";
|
||||
|
||||
request_stream << http_request.str();
|
||||
|
||||
deadline.expires_from_now(boost::posix_time::minutes(5));
|
||||
deadline.async_wait(boost::bind(&Client::checkDeadline, this, _1));
|
||||
|
||||
if (proxy_url.ok()) {
|
||||
if (!proxy_port.ok()) {
|
||||
dbgWarning(D_COMMUNICATION)
|
||||
<< "Failed to connect to proxy due to invalid port value, Error: "
|
||||
<< proxy_port.getErr();
|
||||
|
||||
return genError(
|
||||
"Failed to handle connection to server. proxy port is invalid, Error: " +
|
||||
proxy_port.getErr()
|
||||
);
|
||||
}
|
||||
if (port_int == 443) host = host + ":" + port;
|
||||
ostream connect_request_stream(&connect_request);
|
||||
stringstream proxy_request;
|
||||
proxy_request << "CONNECT " << host << " HTTP/1.1\r\n";
|
||||
proxy_request << "Host: " << host << "\r\n";
|
||||
if (proxy_auth.ok()) {
|
||||
I_Encryptor *encryptor = Singleton::Consume<I_Encryptor>::by<Downloader>();
|
||||
proxy_request
|
||||
<< "Proxy-Authorization: Basic "
|
||||
<< encryptor->base64Encode(proxy_auth.unpack())
|
||||
<< "\r\n";
|
||||
}
|
||||
proxy_request << "\r\n";
|
||||
|
||||
dbgTrace(D_HTTP_REQUEST) << "Connecting to proxy: " << endl << proxy_request.str();
|
||||
connect_request_stream << proxy_request.str();
|
||||
|
||||
tcp::resolver::query query(proxy_url.unpack(), to_string(proxy_port.unpack()));
|
||||
resolver_.async_resolve(
|
||||
query,
|
||||
boost::bind(
|
||||
&Client::overProxyResolver,
|
||||
this,
|
||||
boost::asio::placeholders::error,
|
||||
boost::asio::placeholders::iterator
|
||||
)
|
||||
);
|
||||
} else {
|
||||
tcp::resolver::query query(url.getBaseURL().unpack(), port);
|
||||
resolver_.async_resolve(
|
||||
query,
|
||||
boost::bind(
|
||||
&Client::handleResolve,
|
||||
this,
|
||||
boost::asio::placeholders::error,
|
||||
boost::asio::placeholders::iterator
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
dbgTrace(D_HTTP_REQUEST) << "Sending the following HTTP Request: " << endl << http_request.str();
|
||||
return Maybe<void>();
|
||||
}
|
||||
|
||||
private:
|
||||
void
|
||||
checkDeadline(const boost::system::error_code &err)
|
||||
{
|
||||
if (err) return;
|
||||
if (deadline.expires_at() <= boost::asio::deadline_timer::traits_type::now()) {
|
||||
boost::system::error_code ignored_ec = boost::asio::error::operation_aborted;
|
||||
socket_.close(ignored_ec);
|
||||
deadline.expires_at(boost::posix_time::pos_infin);
|
||||
return;
|
||||
}
|
||||
deadline.async_wait(boost::bind(&Client::checkDeadline, this, _1));
|
||||
}
|
||||
|
||||
void
|
||||
overProxyResolver(const boost::system::error_code &err, tcp::resolver::iterator endpoint_iterator)
|
||||
{
|
||||
if (!err) {
|
||||
boost::asio::async_connect(socket_, endpoint_iterator,
|
||||
boost::bind(&Client::overProxyHandleConnect, this,
|
||||
boost::asio::placeholders::error));
|
||||
} else {
|
||||
string err_msg = "Failed to connect to proxy. Error: " + err.message();
|
||||
throw BadResponseFromServer(err_msg);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
overProxyHandleConnect(const boost::system::error_code &err)
|
||||
{
|
||||
if (!err) {
|
||||
boost::asio::async_write(socket_, connect_request,
|
||||
boost::bind(&Client::overProxyHandleWriteRequest, this,
|
||||
boost::asio::placeholders::error));
|
||||
} else {
|
||||
string err_msg = "Failed to connect to proxy. Error: " + err.message();
|
||||
throw BadResponseFromServer(err_msg);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
overProxyHandleWriteRequest(const boost::system::error_code &err)
|
||||
{
|
||||
if (!err) {
|
||||
boost::asio::async_read_until(
|
||||
socket_,
|
||||
response_,
|
||||
"\r\n",
|
||||
boost::bind(&Client::overProxyHandleReadStatusLine, this, boost::asio::placeholders::error)
|
||||
);
|
||||
} else {
|
||||
string err_msg = "Failed to write over proxy. Error: " + err.message();
|
||||
throw BadResponseFromServer(err_msg);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
overProxyHandleReadStatusLine(const boost::system::error_code &err)
|
||||
{
|
||||
if (err) {
|
||||
string err_msg = "Failed to read status line over proxy. Error: " + err.message();
|
||||
throw BadResponseFromServer(err_msg);
|
||||
}
|
||||
// Check that response is OK.
|
||||
istream response_stream(&response_);
|
||||
string response_http_version;
|
||||
response_stream >> response_http_version;
|
||||
unsigned int status_code;
|
||||
response_stream >> status_code;
|
||||
string status_message;
|
||||
getline(response_stream, status_message);
|
||||
if (!response_stream || response_http_version.substr(0, 5) != "HTTP/") {
|
||||
throw BadResponseFromServer("Invalid response");
|
||||
return;
|
||||
}
|
||||
|
||||
if (status_code != 200) {
|
||||
string err_msg = "Response returned with status code " + status_code;
|
||||
throw BadResponseFromServer(err_msg);
|
||||
}
|
||||
|
||||
dbgTrace(D_HTTP_REQUEST)
|
||||
<< "Received HTTP Response over proxied connection with the following data:"
|
||||
<< endl
|
||||
<< response_http_version
|
||||
<< " "
|
||||
<< status_code
|
||||
<< " "
|
||||
<< status_message;
|
||||
|
||||
if (getProfileAgentSettingWithDefault<bool>(false, "agent.config.message.ignoreSslValidation") == false) {
|
||||
ssl_socket.set_verify_mode(boost::asio::ssl::verify_peer | boost::asio::ssl::verify_fail_if_no_peer_cert);
|
||||
ssl_socket.set_verify_callback(boost::bind(&Client::verifyCertificate, this, _1, _2));
|
||||
} else {
|
||||
dbgWarning(D_HTTP_REQUEST) << "Ignoring SSL validation";
|
||||
}
|
||||
|
||||
ssl_socket.async_handshake(
|
||||
boost::asio::ssl::stream_base::client,
|
||||
boost::bind(&Client::handleHandshake, this, boost::asio::placeholders::error)
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
handleResolve(const boost::system::error_code &err, tcp::resolver::iterator endpoint_iterator)
|
||||
{
|
||||
if (!err) {
|
||||
boost::asio::async_connect(ssl_socket.lowest_layer(), endpoint_iterator,
|
||||
boost::bind(&Client::handleConnect, this,
|
||||
boost::asio::placeholders::error));
|
||||
} else {
|
||||
string message = "Failed to connect. Error: " + err.message();
|
||||
throw BadResponseFromServer(message);
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
verifyCertificate(bool preverified, boost::asio::ssl::verify_context &ctx)
|
||||
{
|
||||
if (!token.empty()) {
|
||||
X509_STORE_CTX *cts = ctx.native_handle();
|
||||
|
||||
switch (X509_STORE_CTX_get_error(cts))
|
||||
{
|
||||
case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT:
|
||||
dbgWarning(D_ORCHESTRATOR) << "SSL verification error: X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT";
|
||||
break;
|
||||
case X509_V_ERR_CERT_NOT_YET_VALID:
|
||||
case X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD:
|
||||
dbgWarning(D_ORCHESTRATOR) << "SSL verification error: Certificate not yet valid";
|
||||
break;
|
||||
case X509_V_ERR_CERT_HAS_EXPIRED:
|
||||
case X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD:
|
||||
dbgWarning(D_ORCHESTRATOR) << "Certificate expired";
|
||||
break;
|
||||
case X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT:
|
||||
case X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN:
|
||||
dbgDebug(D_ORCHESTRATOR) << "Self signed certificate in chain";
|
||||
if (getConfigurationWithDefault(false, "orchestration", "Self signed certificates acceptable")) {
|
||||
preverified = true;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if (!preverified) {
|
||||
dbgWarning(D_ORCHESTRATOR)
|
||||
<< "Certificate verification error number: "
|
||||
<< X509_STORE_CTX_get_error(cts);
|
||||
}
|
||||
break;
|
||||
}
|
||||
return preverified;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
handleConnect(const boost::system::error_code &err)
|
||||
{
|
||||
if (!err) {
|
||||
if (getProfileAgentSettingWithDefault<bool>(false, "agent.config.message.ignoreSslValidation") == false) {
|
||||
ssl_socket.set_verify_mode(
|
||||
boost::asio::ssl::verify_peer |
|
||||
boost::asio::ssl::verify_fail_if_no_peer_cert
|
||||
);
|
||||
ssl_socket.set_verify_callback(boost::bind(&Client::verifyCertificate, this, _1, _2));
|
||||
} else {
|
||||
dbgWarning(D_HTTP_REQUEST) << "Ignoring SSL validation";
|
||||
}
|
||||
|
||||
ssl_socket.async_handshake(boost::asio::ssl::stream_base::client,
|
||||
boost::bind(&Client::handleHandshake, this,
|
||||
boost::asio::placeholders::error));
|
||||
} else {
|
||||
string err_message = "Failed to connect. Error: " + err.message();
|
||||
throw BadResponseFromServer(err_message);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
handleHandshake(const boost::system::error_code &error)
|
||||
{
|
||||
if (!error) {
|
||||
boost::asio::buffer_cast<const char*>(request_.data());
|
||||
|
||||
boost::asio::async_write(ssl_socket, request_,
|
||||
boost::bind(&Client::handleWriteRequest, this,
|
||||
boost::asio::placeholders::error));
|
||||
} else {
|
||||
string err_message = "Handshake failed. Error: " + error.message();
|
||||
throw BadResponseFromServer(err_message);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
handleWriteRequest(const boost::system::error_code &err)
|
||||
{
|
||||
if (!err) {
|
||||
boost::asio::async_read_until(ssl_socket, resp, "\r\n",
|
||||
boost::bind(&Client::handleReadStatusLine, this,
|
||||
boost::asio::placeholders::error));
|
||||
} else {
|
||||
string err_message = "Failed to handle write request. Error: " + err.message();
|
||||
throw BadResponseFromServer(err_message);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
handleReadStatusLine(const boost::system::error_code &err)
|
||||
{
|
||||
if (!err) {
|
||||
istream response_stream(&resp);
|
||||
string http_version;
|
||||
response_stream >> http_version;
|
||||
unsigned int status_code;
|
||||
response_stream >> status_code;
|
||||
string status_message;
|
||||
getline(response_stream, status_message);
|
||||
dbgTrace(D_HTTP_REQUEST)
|
||||
<< "Received HTTP Response with the following data:"
|
||||
<< endl
|
||||
<< http_version
|
||||
<< " "
|
||||
<< status_code;
|
||||
|
||||
if (!response_stream || http_version.substr(0, 5) != "HTTP/") {
|
||||
string err_message = "Invalid response";
|
||||
throw BadResponseFromServer(err_message);
|
||||
}
|
||||
if (status_code != 200) {
|
||||
string err_message = "HTTPS response returned with status code " + to_string(status_code)
|
||||
+ ". URL: " + url.toString();
|
||||
throw BadResponseFromServer(err_message);
|
||||
}
|
||||
|
||||
boost::asio::async_read_until(ssl_socket, resp, "\r\n\r\n",
|
||||
boost::bind(&Client::handleReadHeaders, this,
|
||||
boost::asio::placeholders::error));
|
||||
} else {
|
||||
dbgWarning(D_COMMUNICATION) << "Failed to read response status. Error:" << err.message();
|
||||
string err_message = "Failed to read status. Error: " + err.message();
|
||||
throw BadResponseFromServer(err_message);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
handleReadHeaders(const boost::system::error_code &err)
|
||||
{
|
||||
if (!err) {
|
||||
// Process the response headers.
|
||||
istream response_stream(&resp);
|
||||
string header;
|
||||
vector<string> headers;
|
||||
while (getline(response_stream, header) && header != "\r") {
|
||||
headers.push_back(header);
|
||||
}
|
||||
|
||||
dbgTrace(D_HTTP_REQUEST) << "Received Response headers:" << endl << makeSeparatedStr(headers, "\n");
|
||||
// Write whatever content we already have to output.
|
||||
if (resp.size() > 0)
|
||||
out_file << &resp;
|
||||
|
||||
// Start reading remaining data until EOF.
|
||||
boost::asio::async_read(ssl_socket, resp,
|
||||
boost::asio::transfer_at_least(1),
|
||||
boost::bind(&Client::handleReadContent, this,
|
||||
boost::asio::placeholders::error));
|
||||
} else {
|
||||
dbgWarning(D_COMMUNICATION) << "Failed to read response headers. Error:" << err.message();
|
||||
string err_message = "Failed to read headers. Error: " + err.message();
|
||||
throw BadResponseFromServer(err_message);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
handleReadContent(const boost::system::error_code &err)
|
||||
{
|
||||
if (!err) {
|
||||
// Write all of the data that has been read so far.
|
||||
out_file << &resp;
|
||||
// Continue reading remaining data until EOF.
|
||||
boost::asio::async_read(
|
||||
ssl_socket,
|
||||
resp,
|
||||
boost::asio::transfer_at_least(1),
|
||||
boost::bind(&Client::handleReadContent, this, boost::asio::placeholders::error)
|
||||
);
|
||||
} else if (err != boost::asio::error::eof && err != boost::asio::ssl::error::stream_truncated) {
|
||||
dbgWarning(D_COMMUNICATION) << "Failed to read response body. Error:" << err.message();
|
||||
string err_message = "Failed to read content. Error: " + err.message();
|
||||
throw BadResponseFromServer(err_message);
|
||||
} else if (err == boost::asio::ssl::error::stream_truncated) {
|
||||
dbgError(D_COMMUNICATION) << "Had SSL warning during reading response body stage. Error:" << err.message();
|
||||
deadline.cancel();
|
||||
} else {
|
||||
deadline.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
ofstream &out_file;
|
||||
const URLParser &url;
|
||||
const Maybe<string> proxy_url;
|
||||
const Maybe<uint16_t> proxy_port;
|
||||
const Maybe<string> proxy_auth;
|
||||
tcp::resolver resolver_;
|
||||
boost::asio::deadline_timer deadline;
|
||||
boost::asio::ip::tcp::socket socket_;
|
||||
boost::asio::ssl::stream<boost::asio::ip::tcp::socket&> ssl_socket;
|
||||
boost::asio::streambuf request_;
|
||||
boost::asio::streambuf connect_request;
|
||||
boost::asio::streambuf response_;
|
||||
boost::asio::streambuf resp;
|
||||
const string &token;
|
||||
boost::uuids::random_generator uuid_random_gen;
|
||||
};
|
||||
//CURL fallback
|
||||
return curlGetFileOverSSL(url, out_file, token);
|
||||
}
|
||||
|
||||
string
|
||||
HTTPClient::loadCAChainDir()
|
||||
HTTPSClient::loadCAChainDir()
|
||||
{
|
||||
string ca_chain_dir;
|
||||
auto agent_details = Singleton::Consume<I_AgentDetails>::by<Downloader>();
|
||||
auto agent_details = Singleton::Consume<I_AgentDetails>::by<OrchestrationComp>();
|
||||
auto load_ca_chain_dir = agent_details->getOpenSSLDir();
|
||||
if (load_ca_chain_dir.ok()) {
|
||||
ca_chain_dir = load_ca_chain_dir.unpack();
|
||||
@@ -511,69 +64,26 @@ HTTPClient::loadCAChainDir()
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
HTTPClient::getFileSSL(const URLParser &url, ofstream &out_file, const string &token)
|
||||
HTTPSClient::getFileSSL(const URLParser &url, const string &out_file, const string &)
|
||||
{
|
||||
try {
|
||||
boost::asio::ssl::context ctx(boost::asio::ssl::context::sslv23);
|
||||
if (!token.empty()) {
|
||||
string cert_file_path = getConfigurationWithDefault<string>(
|
||||
getFilesystemPathConfig() + "/certs/fog.pem",
|
||||
"message",
|
||||
"Certificate chain file path"
|
||||
);
|
||||
dbgTrace(D_ORCHESTRATOR) << "Http client, cert file path: " << cert_file_path;
|
||||
auto trusted_ca_directory = getConfiguration<string>("message", "Trusted CA directory");
|
||||
if (trusted_ca_directory.ok() && !trusted_ca_directory.unpack().empty()) {
|
||||
ctx.add_verify_path(trusted_ca_directory.unpack());
|
||||
} else {
|
||||
string cert_file_path = getConfigurationWithDefault<string>(
|
||||
getFilesystemPathConfig() + "/certs/fog.pem",
|
||||
"message",
|
||||
"Certificate chain file path"
|
||||
);
|
||||
ctx.load_verify_file(cert_file_path);
|
||||
}
|
||||
}
|
||||
boost::asio::io_service io_service;
|
||||
auto proxy_config = Singleton::Consume<I_ProxyConfiguration>::by<HTTPClient>();
|
||||
|
||||
Client client(
|
||||
out_file,
|
||||
io_service,
|
||||
ctx,
|
||||
url,
|
||||
proxy_config->getProxyDomain(ProxyProtocol::HTTPS),
|
||||
proxy_config->getProxyPort(ProxyProtocol::HTTPS),
|
||||
proxy_config->getProxyAuthentication(ProxyProtocol::HTTPS),
|
||||
token
|
||||
);
|
||||
|
||||
auto connection_result = client.handleConnection();
|
||||
if (!connection_result.ok()) {
|
||||
return connection_result;
|
||||
};
|
||||
|
||||
auto mainloop = Singleton::Consume<I_MainLoop>::by<Downloader>();
|
||||
while (!io_service.stopped()) {
|
||||
io_service.poll_one();
|
||||
mainloop->yield(true);
|
||||
}
|
||||
} catch (const exception &e) {
|
||||
dbgWarning(D_COMMUNICATION) << "Failed to get file over HTTPS. Error:" << string(e.what());
|
||||
string error_str = "Failed to get file over HTTPS, exception: " + string(e.what());
|
||||
return genError(error_str);
|
||||
auto downlaod_file = Singleton::Consume<I_Messaging>::by<OrchestrationComp>()->downloadFile(
|
||||
HTTPMethod::GET,
|
||||
url.getQuery(),
|
||||
out_file
|
||||
);
|
||||
if (!downlaod_file.ok()) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to get file over SSL. Error: " << downlaod_file.getErr().toString();
|
||||
return genError(downlaod_file.getErr().toString());
|
||||
}
|
||||
|
||||
return Maybe<void>();
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
HTTPClient::curlGetFileOverSSL(const URLParser &url, ofstream &out_file, const string &token)
|
||||
HTTPSClient::curlGetFileOverSSL(const URLParser &url, const string &out_file, const string &token)
|
||||
{
|
||||
try {
|
||||
string cert_file_path;
|
||||
if (!token.empty())
|
||||
{
|
||||
if (!token.empty()) {
|
||||
cert_file_path = getConfigurationWithDefault<string>(
|
||||
getFilesystemPathConfig() + "/certs/fog.pem",
|
||||
"message",
|
||||
@@ -581,11 +91,12 @@ HTTPClient::curlGetFileOverSSL(const URLParser &url, ofstream &out_file, const s
|
||||
);
|
||||
}
|
||||
|
||||
auto proxy_config = Singleton::Consume<I_ProxyConfiguration>::by<HTTPClient>();
|
||||
auto proxy_config = Singleton::Consume<I_ProxyConfiguration>::by<OrchestrationComp>();
|
||||
ofstream out_file_stream(out_file, ofstream::out | ofstream::binary);
|
||||
|
||||
HttpsCurl ssl_curl_client(
|
||||
url,
|
||||
out_file,
|
||||
out_file_stream,
|
||||
token,
|
||||
proxy_config->getProxyDomain(ProxyProtocol::HTTPS),
|
||||
proxy_config->getProxyPort(ProxyProtocol::HTTPS),
|
||||
@@ -594,8 +105,7 @@ HTTPClient::curlGetFileOverSSL(const URLParser &url, ofstream &out_file, const s
|
||||
|
||||
ssl_curl_client.setCurlOpts();
|
||||
bool connection_ok = ssl_curl_client.connect();
|
||||
if (!connection_ok)
|
||||
{
|
||||
if (!connection_ok) {
|
||||
stringstream url_s;
|
||||
url_s << url;
|
||||
string err_msg = string("Failed to get file over HTTPS. URL: ") + url_s.str();
|
||||
@@ -603,12 +113,11 @@ HTTPClient::curlGetFileOverSSL(const URLParser &url, ofstream &out_file, const s
|
||||
}
|
||||
|
||||
} catch (const exception &e) {
|
||||
dbgWarning(D_COMMUNICATION) << "Failed to get file over HTTPS. Error:" << string(e.what());
|
||||
dbgWarning(D_HTTP_REQUEST) << "Failed to get file over HTTPS. Error:" << string(e.what());
|
||||
string error_str = "Failed to get file over HTTPS, exception: " + string(e.what());
|
||||
return genError(error_str);
|
||||
}
|
||||
|
||||
return Maybe<void>();
|
||||
}
|
||||
|
||||
// LCOV_EXCL_STOP
|
||||
|
||||
@@ -0,0 +1,36 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef __HTTPS_CLIENT_H__
|
||||
#define __HTTPS_CLIENT_H__
|
||||
|
||||
#include <string>
|
||||
#include "maybe_res.h"
|
||||
#include "url_parser.h"
|
||||
#include "orchestration_comp.h"
|
||||
|
||||
// LCOV_EXCL_START Reason: Depends on real download server.
|
||||
class HTTPSClient
|
||||
{
|
||||
public:
|
||||
Maybe<void> getFile(const URLParser &url, const std::string &out_file, bool auth_required);
|
||||
|
||||
private:
|
||||
std::string loadCAChainDir();
|
||||
Maybe<void> getFileSSL(const URLParser &url, const std::string &out_file, const std::string &_token);
|
||||
Maybe<void> getFileSSLDirect(const URLParser &url, const std::string &out_file, const std::string &_token);
|
||||
Maybe<void> curlGetFileOverSSL(const URLParser &url, const std::string &out_file, const std::string &_token);
|
||||
};
|
||||
// LCOV_EXCL_STOP
|
||||
|
||||
#endif // __HTTPS_CLIENT_H__
|
||||
@@ -0,0 +1,20 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "https_client.h"
|
||||
|
||||
Maybe<void>
|
||||
HTTPSClient::getFileSSLDirect(const URLParser &, const std::string &, const std::string &)
|
||||
{
|
||||
return genError("No direct downloading in open-source");
|
||||
}
|
||||
@@ -15,6 +15,7 @@
|
||||
|
||||
#include "config.h"
|
||||
#include "debug.h"
|
||||
#include "orchestration_tools.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
@@ -22,10 +23,15 @@ USE_DEBUG_FLAG(D_LOCAL_POLICY);
|
||||
|
||||
static const string k8s_service_account = "/var/run/secrets/kubernetes.io/serviceaccount";
|
||||
// LCOV_EXCL_START Reason: can't use on the pipline environment
|
||||
EnvDetails::EnvDetails()
|
||||
EnvDetails::EnvDetails() : env_type(EnvType::LINUX)
|
||||
{
|
||||
auto tools = Singleton::Consume<I_OrchestrationTools>::from<OrchestrationTools>();
|
||||
if (tools->doesFileExist("/.dockerenv")) env_type = EnvType::DOCKER;
|
||||
token = retrieveToken();
|
||||
token.empty() ? env_type = EnvType::LINUX : env_type = EnvType::K8S;
|
||||
if (!token.empty()) {
|
||||
auto env_res = getenv("deployment_type");
|
||||
env_type = env_res != nullptr && env_res == string("non_crd_k8s") ? EnvType::NON_CRD_K8S : EnvType::K8S;
|
||||
}
|
||||
}
|
||||
|
||||
EnvType
|
||||
|
||||
@@ -47,6 +47,7 @@ HybridModeMetric::upon(const HybridModeMetricEvent &)
|
||||
string cmd_output = maybe_cmd_output.unpack();
|
||||
trim(cmd_output);
|
||||
dbgDebug(D_ORCHESTRATOR) << "Watchdog process counter: " << cmd_output;
|
||||
if (cmd_output.empty()) return;
|
||||
|
||||
try {
|
||||
wd_process_restart.report(stoi(cmd_output));
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
#include "i_rest_api.h"
|
||||
#include "i_time_get.h"
|
||||
#include "i_encryptor.h"
|
||||
#include "i_shell_cmd.h"
|
||||
#include "maybe_res.h"
|
||||
|
||||
class FogAuthenticator
|
||||
@@ -43,10 +44,13 @@ class FogAuthenticator
|
||||
Singleton::Consume<I_DetailsResolver>,
|
||||
Singleton::Consume<I_OrchestrationStatus>,
|
||||
Singleton::Consume<I_OrchestrationTools>,
|
||||
Singleton::Consume<I_EnvDetails>,
|
||||
Singleton::Consume<I_Encryptor>,
|
||||
Singleton::Consume<I_MainLoop>,
|
||||
Singleton::Consume<I_Messaging>,
|
||||
Singleton::Consume<I_TimeGet>
|
||||
Singleton::Consume<I_TimeGet>,
|
||||
Singleton::Consume<I_ShellCmd>,
|
||||
Singleton::Consume<I_Environment>
|
||||
{
|
||||
class AccessToken
|
||||
{
|
||||
@@ -88,6 +92,8 @@ public:
|
||||
void serialize(cereal::JSONOutputArchive &out_ar) const;
|
||||
void serialize(cereal::JSONInputArchive &in_ar);
|
||||
|
||||
std::string getData() const;
|
||||
|
||||
private:
|
||||
AuthenticationType type;
|
||||
std::string data;
|
||||
@@ -103,6 +109,7 @@ public:
|
||||
Maybe<void> authenticateAgent() override;
|
||||
void setAddressExtenesion(const std::string &extension) override;
|
||||
static std::string getUserEdition();
|
||||
void registerLocalAgentToFog() override;
|
||||
|
||||
protected:
|
||||
class UserCredentials
|
||||
@@ -138,6 +145,7 @@ protected:
|
||||
|
||||
bool saveCredentialsToFile(const UserCredentials &credentials) const;
|
||||
Maybe<UserCredentials> getCredentialsFromFile() const;
|
||||
Maybe<RegistrationData> getRegistrationToken();
|
||||
Maybe<RegistrationData> getRegistrationData();
|
||||
|
||||
std::string base64Encode(const std::string &in) const;
|
||||
|
||||
@@ -37,10 +37,7 @@
|
||||
#include "maybe_res.h"
|
||||
#include "declarative_policy_utils.h"
|
||||
|
||||
class HybridCommunication
|
||||
:
|
||||
public FogAuthenticator,
|
||||
Singleton::Consume<I_EnvDetails>
|
||||
class HybridCommunication : public FogAuthenticator
|
||||
{
|
||||
public:
|
||||
void init() override;
|
||||
|
||||
@@ -29,6 +29,7 @@ public:
|
||||
void init();
|
||||
|
||||
Maybe<void> authenticateAgent() override;
|
||||
void registerLocalAgentToFog() override;
|
||||
Maybe<void> getUpdate(CheckUpdateRequest &request) override;
|
||||
|
||||
Maybe<std::string> downloadAttributeFile(
|
||||
|
||||
@@ -35,6 +35,7 @@ public:
|
||||
MOCK_METHOD0(getPlatform, Maybe<std::string>());
|
||||
MOCK_METHOD0(getArch, Maybe<std::string>());
|
||||
MOCK_METHOD0(getAgentVersion, std::string());
|
||||
MOCK_METHOD0(isCloudStorageEnabled, bool());
|
||||
MOCK_METHOD0(isReverseProxy, bool());
|
||||
MOCK_METHOD0(isKernelVersion3OrHigher, bool());
|
||||
MOCK_METHOD0(isGwNotVsx, bool());
|
||||
|
||||
@@ -24,7 +24,7 @@ class MockDownloader :
|
||||
{
|
||||
public:
|
||||
MOCK_CONST_METHOD3(
|
||||
downloadFileFromFog,
|
||||
downloadFile,
|
||||
Maybe<std::string>(const std::string &, Package::ChecksumTypes, const GetResourceFile &)
|
||||
);
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@ class MockUpdateCommunication :
|
||||
public:
|
||||
void init() {}
|
||||
MOCK_METHOD0(authenticateAgent, Maybe<void>());
|
||||
MOCK_METHOD0(registerLocalAgentToFog, void());
|
||||
MOCK_METHOD1(getUpdate, Maybe<void>(CheckUpdateRequest &));
|
||||
MOCK_METHOD2(
|
||||
downloadAttributeFile,
|
||||
|
||||
@@ -26,7 +26,7 @@ class NamespaceData : public ClientRest
|
||||
{
|
||||
public:
|
||||
bool loadJson(const std::string &json);
|
||||
Maybe<std::string> getNamespaceUidByName(const std::string &name);
|
||||
Maybe<std::string> getNamespaceUidByName(const std::string &name) const;
|
||||
|
||||
private:
|
||||
std::map<std::string, std::string> ns_name_to_uid;
|
||||
|
||||
@@ -21,8 +21,8 @@ class OrchestrationPolicy
|
||||
{
|
||||
public:
|
||||
const std::string & getFogAddress() const;
|
||||
const unsigned long & getSleepInterval() const;
|
||||
const unsigned long & getErrorSleepInterval() const;
|
||||
unsigned int getSleepInterval() const;
|
||||
unsigned int getErrorSleepInterval() const;
|
||||
|
||||
void serialize(cereal::JSONInputArchive & archive);
|
||||
|
||||
@@ -31,8 +31,8 @@ public:
|
||||
|
||||
private:
|
||||
std::string fog_address;
|
||||
unsigned long sleep_interval;
|
||||
unsigned long error_sleep_interval;
|
||||
unsigned int sleep_interval;
|
||||
unsigned int error_sleep_interval;
|
||||
};
|
||||
|
||||
#endif // __ORCHESTRATION_POLICY_H__
|
||||
|
||||
@@ -43,8 +43,8 @@ TEST_F(PolicyTest, serialization)
|
||||
ASSERT_TRUE(false) << "Cereal threw an exception: " << e.what();
|
||||
}
|
||||
|
||||
EXPECT_EQ(15u, orchestration_policy.getErrorSleepInterval());
|
||||
EXPECT_EQ(20u, orchestration_policy.getSleepInterval());
|
||||
EXPECT_EQ(15, orchestration_policy.getErrorSleepInterval());
|
||||
EXPECT_EQ(20, orchestration_policy.getSleepInterval());
|
||||
EXPECT_EQ("http://10.0.0.18:81/control/", orchestration_policy.getFogAddress());
|
||||
}
|
||||
|
||||
@@ -63,8 +63,8 @@ TEST_F(PolicyTest, noAgentType)
|
||||
ASSERT_TRUE(false) << "Cereal threw an exception: " << e.what();
|
||||
}
|
||||
|
||||
EXPECT_EQ(15u, orchestration_policy.getErrorSleepInterval());
|
||||
EXPECT_EQ(20u, orchestration_policy.getSleepInterval());
|
||||
EXPECT_EQ(15, orchestration_policy.getErrorSleepInterval());
|
||||
EXPECT_EQ(20, orchestration_policy.getSleepInterval());
|
||||
EXPECT_EQ("http://10.0.0.18:81/control/", orchestration_policy.getFogAddress());
|
||||
}
|
||||
|
||||
@@ -83,8 +83,8 @@ TEST_F(PolicyTest, zeroSleepIntervels)
|
||||
ASSERT_TRUE(false) << "Cereal threw an exception: " << e.what();
|
||||
}
|
||||
|
||||
EXPECT_EQ(0u, orchestration_policy.getErrorSleepInterval());
|
||||
EXPECT_EQ(0u, orchestration_policy.getSleepInterval());
|
||||
EXPECT_EQ(0, orchestration_policy.getErrorSleepInterval());
|
||||
EXPECT_EQ(0, orchestration_policy.getSleepInterval());
|
||||
EXPECT_EQ("http://10.0.0.18:81/control/", orchestration_policy.getFogAddress());
|
||||
}
|
||||
|
||||
@@ -152,7 +152,7 @@ TEST_F(PolicyTest, newOptionalFields)
|
||||
ASSERT_TRUE(false) << "Cereal threw an exception: " << e.what();
|
||||
}
|
||||
|
||||
EXPECT_EQ(10u, orchestration_policy.getErrorSleepInterval());
|
||||
EXPECT_EQ(30u, orchestration_policy.getSleepInterval());
|
||||
EXPECT_EQ(10, orchestration_policy.getErrorSleepInterval());
|
||||
EXPECT_EQ(30, orchestration_policy.getSleepInterval());
|
||||
EXPECT_EQ("https://fog-api-gw-agents.cloud.ngen.checkpoint.com", orchestration_policy.getFogAddress());
|
||||
}
|
||||
|
||||
@@ -59,6 +59,15 @@ TEST_F(URLParserTest, parseAWSWithoutSlash)
|
||||
EXPECT_EQ("", link.getQuery());
|
||||
}
|
||||
|
||||
TEST_F(URLParserTest, setHost)
|
||||
{
|
||||
URLParser link("http://172.23.92.180:180/something");
|
||||
|
||||
EXPECT_EQ(link.getHost(), "172.23.92.180");
|
||||
link.setHost("my.domain");
|
||||
EXPECT_EQ(link.getHost(), "my.domain");
|
||||
}
|
||||
|
||||
TEST_F(URLParserTest, protocolIsMissing)
|
||||
{
|
||||
// HTTPS is set by default when protocol is not present in URL.
|
||||
|
||||
@@ -22,13 +22,13 @@ OrchestrationPolicy::getFogAddress() const
|
||||
return fog_address;
|
||||
}
|
||||
|
||||
const unsigned long &
|
||||
unsigned int
|
||||
OrchestrationPolicy::getSleepInterval() const
|
||||
{
|
||||
return sleep_interval;
|
||||
}
|
||||
|
||||
const unsigned long &
|
||||
unsigned int
|
||||
OrchestrationPolicy::getErrorSleepInterval() const
|
||||
{
|
||||
return error_sleep_interval;
|
||||
@@ -37,10 +37,13 @@ OrchestrationPolicy::getErrorSleepInterval() const
|
||||
void
|
||||
OrchestrationPolicy::serialize(JSONInputArchive &archive)
|
||||
{
|
||||
// Split it, so the order doesn't matter.
|
||||
archive(make_nvp("fog-address", fog_address));
|
||||
archive(make_nvp("pulling-interval", sleep_interval));
|
||||
archive(make_nvp("error-pulling-interval", error_sleep_interval));
|
||||
try {
|
||||
archive(make_nvp("fog-address", fog_address));
|
||||
archive(make_nvp("pulling-interval", sleep_interval));
|
||||
archive(make_nvp("error-pulling-interval", error_sleep_interval));
|
||||
} catch (const cereal::Exception&) {
|
||||
archive(make_nvp("orchestration", *this));
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
|
||||
@@ -399,7 +399,6 @@ public:
|
||||
if (!write_result) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to write Orchestration status. File: " << orchestration_status_path;
|
||||
}
|
||||
dbgTrace(D_ORCHESTRATOR) << "Orchestration status file has been updated. File: " << orchestration_status_path;
|
||||
}
|
||||
|
||||
void
|
||||
@@ -459,7 +458,6 @@ public:
|
||||
seconds(5),
|
||||
[this] ()
|
||||
{
|
||||
dbgTrace(D_ORCHESTRATOR) << "Write Orchestration status file <co-routine>";
|
||||
writeStatusToFile();
|
||||
},
|
||||
"Write Orchestration status file"
|
||||
|
||||
@@ -129,6 +129,18 @@ URLParser::parseProtocol(const string &url) const
|
||||
return URLProtocol::HTTPS;
|
||||
}
|
||||
|
||||
string
|
||||
URLParser::getHost() const
|
||||
{
|
||||
return host.empty() ? base_url : host;
|
||||
}
|
||||
|
||||
void
|
||||
URLParser::setHost(const string &new_host)
|
||||
{
|
||||
host = new_host;
|
||||
}
|
||||
|
||||
void
|
||||
URLParser::setQuery(const string &new_query)
|
||||
{
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -108,7 +108,7 @@ NamespaceData::loadJson(const string &json)
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
NamespaceData::getNamespaceUidByName(const string &name)
|
||||
NamespaceData::getNamespaceUidByName(const string &name) const
|
||||
{
|
||||
if (ns_name_to_uid.find(name) == ns_name_to_uid.end()) {
|
||||
return genError("Namespace doesn't exist. Name: " + name);
|
||||
|
||||
@@ -54,7 +54,7 @@ public:
|
||||
bool removeFile(const string &path) const override;
|
||||
bool copyFile(const string &src_path, const string &dst_path) const override;
|
||||
bool doesFileExist(const string &file_path) const override;
|
||||
void getClusterId() const override;
|
||||
void setClusterId() const override;
|
||||
void fillKeyInJson(const string &filename, const string &_key, const string &_val) const override;
|
||||
bool createDirectory(const string &directory_path) const override;
|
||||
bool doesDirectoryExist(const string &dir_path) const override;
|
||||
@@ -143,83 +143,55 @@ isPlaygroundEnv()
|
||||
}
|
||||
|
||||
Maybe<NamespaceData, string>
|
||||
getNamespaceDataFromCluster(const string &path)
|
||||
getNamespaceDataFromCluster()
|
||||
{
|
||||
NamespaceData name_space;
|
||||
string token = Singleton::Consume<I_EnvDetails>::by<OrchestrationTools>()->getToken();
|
||||
auto messaging = Singleton::Consume<I_Messaging>::by<OrchestrationTools>();
|
||||
string auth_header = "Authorization: Bearer " + token;
|
||||
string connection_header = "Connection: close";
|
||||
string host = "https://kubernetes.default.svc:443/api/v1/namespaces/";
|
||||
string culr_cmd = "curl -s -k -H \"" + auth_header + "\" -H \"" + connection_header + "\" " + host +
|
||||
" | /etc/cp/bin/cpnano_json";
|
||||
|
||||
MessageMetadata get_ns_md("kubernetes.default.svc", 443);
|
||||
get_ns_md.insertHeader("Authorization", "Bearer " + token);
|
||||
get_ns_md.insertHeader("Connection", "close");
|
||||
get_ns_md.setConnectioFlag(MessageConnectionConfig::IGNORE_SSL_VALIDATION);
|
||||
auto res = messaging->sendSyncMessage(
|
||||
HTTPMethod::GET,
|
||||
path,
|
||||
name_space,
|
||||
MessageCategory::GENERIC,
|
||||
get_ns_md
|
||||
);
|
||||
auto output_res = Singleton::Consume<I_ShellCmd>::by<OrchestrationTools>()->getExecOutput(culr_cmd);
|
||||
if (!output_res.ok()) {
|
||||
return genError("Failed to get namespace data from the cluster: " + output_res.getErr());
|
||||
}
|
||||
|
||||
if (res.ok()) return name_space;
|
||||
|
||||
return genError(string("Was not able to get object form k8s cluser in path: " + path));
|
||||
dbgTrace(D_ORCHESTRATOR) << "Got the repsonse from the cluster: " << output_res.unpack();
|
||||
NamespaceData name_space;
|
||||
if (name_space.loadJson(output_res.unpack())) return name_space;
|
||||
return genError("Was not able to parse the object form k8s cluser");
|
||||
}
|
||||
|
||||
bool
|
||||
doesClusterIdExists()
|
||||
void
|
||||
OrchestrationTools::Impl::setClusterId() const
|
||||
{
|
||||
auto env_type = Singleton::Consume<I_EnvDetails>::by<OrchestrationTools>()->getEnvType();
|
||||
if (env_type != EnvType::K8S) return;
|
||||
|
||||
dbgTrace(D_ORCHESTRATOR) << "Setting cluster UID";
|
||||
|
||||
Maybe<NamespaceData> namespaces_data = getNamespaceDataFromCluster();
|
||||
if (!namespaces_data.ok()) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to retrieve namespace data. Error: " << namespaces_data.getErr();
|
||||
return;
|
||||
}
|
||||
|
||||
auto ns_uid = (*namespaces_data).getNamespaceUidByName("kube-system");
|
||||
if (!ns_uid.ok()) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to retrieve namespace UID. Error: " << ns_uid.getErr();
|
||||
return;
|
||||
}
|
||||
|
||||
string playground_uid = isPlaygroundEnv() ? "playground-" : "";
|
||||
|
||||
dbgTrace(D_ORCHESTRATOR) << "Getting cluster UID";
|
||||
|
||||
auto maybe_namespaces_data = getNamespaceDataFromCluster("/api/v1/namespaces/");
|
||||
|
||||
if (!maybe_namespaces_data.ok()) {
|
||||
dbgWarning(D_ORCHESTRATOR)
|
||||
<< "Failed to retrieve K8S namespace data. Error: "
|
||||
<< maybe_namespaces_data.getErr();
|
||||
return false;
|
||||
}
|
||||
|
||||
NamespaceData namespaces_data = maybe_namespaces_data.unpack();
|
||||
|
||||
Maybe<string> maybe_ns_uid = namespaces_data.getNamespaceUidByName("kube-system");
|
||||
if (!maybe_ns_uid.ok()) {
|
||||
dbgWarning(D_ORCHESTRATOR) << maybe_ns_uid.getErr();
|
||||
return false;
|
||||
}
|
||||
string uid = playground_uid + maybe_ns_uid.unpack();
|
||||
string uid = playground_uid + ns_uid.unpack();
|
||||
dbgTrace(D_ORCHESTRATOR) << "Found k8s cluster UID: " << uid;
|
||||
I_Environment *env = Singleton::Consume<I_Environment>::by<OrchestrationTools>();
|
||||
env->getConfigurationContext().registerValue<string>(
|
||||
Singleton::Consume<I_Environment>::by<OrchestrationTools>()->getConfigurationContext().registerValue<string>(
|
||||
"k8sClusterId",
|
||||
uid,
|
||||
EnvKeyAttr::LogSection::SOURCE
|
||||
);
|
||||
I_AgentDetails *i_agent_details = Singleton::Consume<I_AgentDetails>::by<OrchestrationTools>();
|
||||
i_agent_details->setClusterId(uid);
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
OrchestrationTools::Impl::getClusterId() const
|
||||
{
|
||||
auto env_type = Singleton::Consume<I_EnvDetails>::by<OrchestrationTools>()->getEnvType();
|
||||
|
||||
if (env_type == EnvType::K8S) {
|
||||
Singleton::Consume<I_MainLoop>::by<OrchestrationTools>()->addOneTimeRoutine(
|
||||
I_MainLoop::RoutineType::Offline,
|
||||
[this] ()
|
||||
{
|
||||
while(!doesClusterIdExists()) {
|
||||
Singleton::Consume<I_MainLoop>::by<OrchestrationTools>()->yield(chrono::seconds(1));
|
||||
}
|
||||
return;
|
||||
},
|
||||
"Get k8s cluster ID"
|
||||
);
|
||||
}
|
||||
Singleton::Consume<I_AgentDetails>::by<OrchestrationTools>()->setClusterId(uid);
|
||||
}
|
||||
|
||||
bool
|
||||
|
||||
@@ -77,30 +77,23 @@ TEST_F(OrchestrationToolsTest, doNothing)
|
||||
{
|
||||
}
|
||||
|
||||
TEST_F(OrchestrationToolsTest, getClusterId)
|
||||
TEST_F(OrchestrationToolsTest, setClusterId)
|
||||
{
|
||||
EXPECT_CALL(mock_env_details, getToken()).WillOnce(Return("123"));
|
||||
EXPECT_CALL(mock_env_details, getEnvType()).WillOnce(Return(EnvType::K8S));
|
||||
I_MainLoop::Routine routine;
|
||||
EXPECT_CALL(
|
||||
mock_mainloop,
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::Offline, _, "Get k8s cluster ID", _)
|
||||
).WillOnce(DoAll(SaveArg<1>(&routine), Return(1)));
|
||||
|
||||
string namespaces = getResource("k8s_namespaces.json");
|
||||
EXPECT_CALL(
|
||||
mock_messaging,
|
||||
sendSyncMessage(
|
||||
HTTPMethod::GET,
|
||||
"/api/v1/namespaces/",
|
||||
_,
|
||||
_,
|
||||
_
|
||||
mock_shell_cmd,
|
||||
getExecOutput(
|
||||
"curl -s -k -H \"Authorization: Bearer 123\" -H \"Connection: close\" "
|
||||
"https://kubernetes.default.svc:443/api/v1/namespaces/ | /etc/cp/bin/cpnano_json",
|
||||
200,
|
||||
false
|
||||
)
|
||||
).WillOnce(Return(HTTPResponse(HTTPStatusCode::HTTP_OK, namespaces)));
|
||||
).WillOnce(Return(namespaces));
|
||||
|
||||
i_orchestration_tools->getClusterId();
|
||||
routine();
|
||||
i_orchestration_tools->setClusterId();
|
||||
}
|
||||
|
||||
TEST_F(OrchestrationToolsTest, writeReadTextToFile)
|
||||
|
||||
@@ -24,6 +24,30 @@
|
||||
using namespace testing;
|
||||
using namespace std;
|
||||
|
||||
string host_address = "1.2.3.5";
|
||||
string host_url = "https://" + host_address + "/";
|
||||
Maybe<string> response(
|
||||
string(
|
||||
"{\n"
|
||||
" \"fog-address\": \"" + host_url + "\",\n"
|
||||
" \"agent-type\": \"test\",\n"
|
||||
" \"pulling-interval\": 25,\n"
|
||||
" \"error-pulling-interval\": 15\n"
|
||||
"}"
|
||||
)
|
||||
);
|
||||
string orchestration_policy_file_path = "/etc/cp/conf/orchestration/orchestration.policy";
|
||||
string orchestration_policy_file_path_bk = orchestration_policy_file_path + ".bk";
|
||||
|
||||
class ExpectInitializer
|
||||
{
|
||||
public:
|
||||
ExpectInitializer(StrictMock<MockOrchestrationTools> &mock_orchestration_tools)
|
||||
{
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/.dockerenv")).WillRepeatedly(Return(false));
|
||||
}
|
||||
};
|
||||
|
||||
class OrchestrationMultitenancyTest : public Test
|
||||
{
|
||||
public:
|
||||
@@ -54,6 +78,11 @@ public:
|
||||
void
|
||||
init()
|
||||
{
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path)).WillOnce(Return(response));
|
||||
EXPECT_CALL(mock_status, setFogAddress(host_url)).WillRepeatedly(Return());
|
||||
EXPECT_CALL(mock_orchestration_tools, setClusterId());
|
||||
|
||||
EXPECT_CALL(mock_service_controller, isServiceInstalled("Access Control")).WillRepeatedly(Return(false));
|
||||
|
||||
// This Holding the Main Routine of the Orchestration.
|
||||
@@ -62,8 +91,6 @@ public:
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::RealTime, _, "Orchestration runner", true)
|
||||
).WillOnce(DoAll(SaveArg<1>(&routine), Return(1)));
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, getClusterId());
|
||||
|
||||
EXPECT_CALL(mock_shell_cmd, getExecOutput("openssl version -d | cut -d\" \" -f2 | cut -d\"\\\"\" -f2", _, _))
|
||||
.WillOnce(Return(string("OpenSSL certificates Directory")));
|
||||
|
||||
@@ -182,6 +209,7 @@ public:
|
||||
NiceMock<MockAgenetDetailsReporter> mock_agent_reporter;
|
||||
NiceMock<MockLogging> mock_log;
|
||||
|
||||
ExpectInitializer initializer{mock_orchestration_tools};
|
||||
OrchestrationComp orchestration_comp;
|
||||
|
||||
private:
|
||||
@@ -209,7 +237,6 @@ TEST_F(OrchestrationMultitenancyTest, init)
|
||||
|
||||
TEST_F(OrchestrationMultitenancyTest, handle_virtual_resource)
|
||||
{
|
||||
string orchestration_policy_file_path = "/etc/cp/conf/orchestration/orchestration.policy";
|
||||
string manifest_file_path = "/etc/cp/conf/manifest.json";
|
||||
string setting_file_path = "/etc/cp/conf/settings.json";
|
||||
string policy_file_path = "/etc/cp/conf/policy.json";
|
||||
@@ -237,22 +264,6 @@ TEST_F(OrchestrationMultitenancyTest, handle_virtual_resource)
|
||||
init();
|
||||
expectDetailsResolver();
|
||||
|
||||
Maybe<string> response(
|
||||
string(
|
||||
"{\n"
|
||||
" \"fog-address\": \"" + host_url + "\",\n"
|
||||
" \"agent-type\": \"test\",\n"
|
||||
" \"pulling-interval\": 25,\n"
|
||||
" \"error-pulling-interval\": 15\n"
|
||||
"}"
|
||||
)
|
||||
);
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path)).WillOnce(Return(response));
|
||||
EXPECT_CALL(mock_message, setFogConnection(host_address, 443, true, MessageCategory::GENERIC))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_update_communication, setAddressExtenesion(""));
|
||||
EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe<void>()));
|
||||
EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, manifest_file_path))
|
||||
@@ -268,7 +279,11 @@ TEST_F(OrchestrationMultitenancyTest, handle_virtual_resource)
|
||||
.WillOnce(Return(data_checksum));
|
||||
|
||||
EXPECT_CALL(mock_service_controller, getPolicyVersion())
|
||||
.Times(2).WillRepeatedly(ReturnRef(first_policy_version));
|
||||
.Times(3).WillRepeatedly(ReturnRef(first_policy_version));
|
||||
|
||||
map<string, PortNumber> empty_service_to_port_map;
|
||||
EXPECT_CALL(mock_service_controller, getServiceToPortMap()).WillRepeatedly(Return(empty_service_to_port_map));
|
||||
|
||||
|
||||
set<string> active_tenants = { "1236", "1235" };
|
||||
map<string, set<string>> old_tenant_profile_set;
|
||||
|
||||
@@ -22,10 +22,35 @@
|
||||
#include "agent_details.h"
|
||||
#include "customized_cereal_map.h"
|
||||
#include "health_check_status/health_check_status.h"
|
||||
#include "declarative_policy_utils.h"
|
||||
|
||||
using namespace testing;
|
||||
using namespace std;
|
||||
|
||||
string host_address = "1.2.3.5";
|
||||
string host_url = "https://" + host_address + "/";
|
||||
Maybe<string> response(
|
||||
string(
|
||||
"{\n"
|
||||
" \"fog-address\": \"" + host_url + "\",\n"
|
||||
" \"agent-type\": \"test\",\n"
|
||||
" \"pulling-interval\": 25,\n"
|
||||
" \"error-pulling-interval\": 15\n"
|
||||
"}"
|
||||
)
|
||||
);
|
||||
string orchestration_policy_file_path = "/etc/cp/conf/orchestration/orchestration.policy";
|
||||
string orchestration_policy_file_path_bk = orchestration_policy_file_path + ".bk";
|
||||
|
||||
class ExpectInitializer
|
||||
{
|
||||
public:
|
||||
ExpectInitializer(StrictMock<MockOrchestrationTools> &mock_orchestration_tools)
|
||||
{
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist("/.dockerenv")).WillRepeatedly(Return(false));
|
||||
}
|
||||
};
|
||||
|
||||
class OrchestrationTest : public testing::TestWithParam<bool>
|
||||
{
|
||||
public:
|
||||
@@ -48,14 +73,15 @@ public:
|
||||
void
|
||||
init()
|
||||
{
|
||||
// This Holding the Main Routine of the Orchestration.
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path)).WillOnce(Return(response));
|
||||
EXPECT_CALL(mock_status, setFogAddress(host_url)).WillRepeatedly(Return());
|
||||
EXPECT_CALL(mock_orchestration_tools, setClusterId());
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::RealTime, _, "Orchestration runner", true)
|
||||
).WillOnce(DoAll(SaveArg<1>(&routine), Return(1)));
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, getClusterId());
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_shell_cmd,
|
||||
getExecOutput("openssl version -d | cut -d\" \" -f2 | cut -d\"\\\"\" -f2", _, _)
|
||||
@@ -134,6 +160,7 @@ public:
|
||||
EXPECT_CALL(mock_details_resolver, getPlatform()).WillRepeatedly(Return(string("linux")));
|
||||
EXPECT_CALL(mock_details_resolver, getArch()).WillRepeatedly(Return(string("x86_64")));
|
||||
EXPECT_CALL(mock_details_resolver, isReverseProxy()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, isCloudStorageEnabled()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, isKernelVersion3OrHigher()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, isGwNotVsx()).WillRepeatedly(Return(false));
|
||||
EXPECT_CALL(mock_details_resolver, isVersionAboveR8110()).WillRepeatedly(Return(false));
|
||||
@@ -228,6 +255,17 @@ public:
|
||||
routine();
|
||||
}
|
||||
|
||||
void
|
||||
runRegTokenRoutine()
|
||||
{
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addRecurringRoutine(I_MainLoop::RoutineType::Offline, _, _, "Check For Environment Registration Token", _)
|
||||
).WillOnce(DoAll(SaveArg<2>(®_token_routine), Return(0)));
|
||||
|
||||
routine();
|
||||
}
|
||||
|
||||
void
|
||||
runStatusRoutine()
|
||||
{
|
||||
@@ -270,8 +308,6 @@ public:
|
||||
NiceMock<MockTimeGet> mock_time_get;
|
||||
::Environment env;
|
||||
string first_policy_version = "";
|
||||
string host_address = "1.2.3.5";
|
||||
string host_url = "https://" + host_address + "/";
|
||||
ConfigComponent config_comp;
|
||||
StrictMock<MockEncryptor> mock_encryptor;
|
||||
NiceMock<MockLogging> mock_log;
|
||||
@@ -289,9 +325,12 @@ public:
|
||||
StrictMock<MockDetailsResolver> mock_details_resolver;
|
||||
NiceMock<MockAgenetDetailsReporter> mock_agent_reporter;
|
||||
NiceMock<MockTenantManager> tenant_manager;
|
||||
ExpectInitializer initializer{mock_orchestration_tools};
|
||||
OrchestrationComp orchestration_comp;
|
||||
DeclarativePolicyUtils declarative_policy_utils;
|
||||
AgentDetails agent_details;
|
||||
I_MainLoop::Routine sending_routine;
|
||||
I_MainLoop::Routine reg_token_routine;
|
||||
string message_body;
|
||||
|
||||
private:
|
||||
@@ -312,6 +351,48 @@ private:
|
||||
I_MainLoop::Routine status_routine;
|
||||
};
|
||||
|
||||
|
||||
TEST_F(OrchestrationTest, hybridModeRegisterLocalAgentRoutine)
|
||||
{
|
||||
EXPECT_CALL(rest, mockRestCall(_, _, _)).WillRepeatedly(Return(true));
|
||||
Singleton::Consume<Config::I_Config>::from(config_comp)->loadConfiguration(
|
||||
vector<string>{"--orchestration-mode=hybrid_mode"}
|
||||
);
|
||||
|
||||
preload();
|
||||
env.init();
|
||||
init();
|
||||
|
||||
EXPECT_CALL(mock_service_controller, updateServiceConfiguration(_, _, _, _, _, _))
|
||||
.WillOnce(Return(Maybe<void>()));
|
||||
EXPECT_CALL(mock_orchestration_tools, calculateChecksum(_, _)).WillRepeatedly(Return(string()));
|
||||
EXPECT_CALL(mock_service_controller, getPolicyVersion()).WillRepeatedly(ReturnRef(first_policy_version));
|
||||
EXPECT_CALL(mock_shell_cmd, getExecOutput(_, _, _)).WillRepeatedly(Return(string()));
|
||||
EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe<void>()));
|
||||
EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(false));
|
||||
expectDetailsResolver();
|
||||
EXPECT_CALL(mock_update_communication, getUpdate(_));
|
||||
EXPECT_CALL(mock_status, setLastUpdateAttempt());
|
||||
EXPECT_CALL(mock_status, setFieldStatus(_, _, _));
|
||||
EXPECT_CALL(mock_status, setIsConfigurationUpdated(_));
|
||||
|
||||
EXPECT_CALL(mock_ml, yield(A<chrono::microseconds>()))
|
||||
.WillOnce(Return())
|
||||
.WillOnce(Invoke([] (chrono::microseconds) { throw invalid_argument("stop while loop"); }));
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::Offline, _, "Send registration data", false)
|
||||
).WillOnce(Return(1));
|
||||
|
||||
try {
|
||||
runRegTokenRoutine();
|
||||
} catch (const invalid_argument& e) {}
|
||||
|
||||
EXPECT_CALL(mock_update_communication, registerLocalAgentToFog());
|
||||
reg_token_routine();
|
||||
}
|
||||
|
||||
TEST_F(OrchestrationTest, testAgentUninstallRest)
|
||||
{
|
||||
EXPECT_CALL(
|
||||
@@ -490,27 +571,12 @@ TEST_F(OrchestrationTest, check_sending_registration_data)
|
||||
env.init();
|
||||
init();
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist(_)).WillOnce(Return(false));
|
||||
Maybe<string> response(
|
||||
string(
|
||||
"{\n"
|
||||
" \"fog-address\": \"" + host_url + "\",\n"
|
||||
" \"agent-type\": \"test\",\n"
|
||||
" \"pulling-interval\": 25,\n"
|
||||
" \"error-pulling-interval\": 15\n"
|
||||
"}"
|
||||
)
|
||||
);
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile(_)).WillOnce(Return(response));
|
||||
EXPECT_CALL(mock_service_controller, updateServiceConfiguration(_, _, _, _, _, _))
|
||||
.WillOnce(Return(Maybe<void>()));
|
||||
EXPECT_CALL(mock_message, setFogConnection(_, _, _, _)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, calculateChecksum(_, _)).WillRepeatedly(Return(string()));
|
||||
EXPECT_CALL(mock_service_controller, getPolicyVersion()).WillRepeatedly(ReturnRef(first_policy_version));
|
||||
EXPECT_CALL(mock_shell_cmd, getExecOutput(_, _, _)).WillRepeatedly(Return(string()));
|
||||
EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe<void>()));
|
||||
EXPECT_CALL(mock_update_communication, setAddressExtenesion(_));
|
||||
EXPECT_CALL(mock_status, setFogAddress(_));
|
||||
EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(false));
|
||||
expectDetailsResolver();
|
||||
EXPECT_CALL(mock_update_communication, getUpdate(_));
|
||||
@@ -554,7 +620,6 @@ TEST_F(OrchestrationTest, orchestrationPolicyUpdatRollback)
|
||||
rest,
|
||||
mockRestCall(RestAction::ADD, "proxy", _)
|
||||
).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler)));
|
||||
EXPECT_CALL(mock_status, setFogAddress(host_url)).Times(2);
|
||||
|
||||
string config_json =
|
||||
"{\n"
|
||||
@@ -592,17 +657,6 @@ TEST_F(OrchestrationTest, orchestrationPolicyUpdatRollback)
|
||||
string second_val = "12";
|
||||
string third_val = "13";
|
||||
|
||||
Maybe<string> policy_response(
|
||||
string(
|
||||
"{\n"
|
||||
" \"fog-address\": \"" + host_url + "\",\n"
|
||||
" \"agent-type\": \"test\",\n"
|
||||
" \"pulling-interval\": 25,\n"
|
||||
" \"error-pulling-interval\": 15\n"
|
||||
"}"
|
||||
)
|
||||
);
|
||||
|
||||
Maybe<string> new_policy_response(
|
||||
string(
|
||||
"{\n"
|
||||
@@ -618,20 +672,21 @@ TEST_F(OrchestrationTest, orchestrationPolicyUpdatRollback)
|
||||
EXPECT_CALL(mock_service_controller, mockMoveChangedPolicies()).WillOnce(Return(expected_changed_policies));
|
||||
|
||||
EXPECT_CALL(mock_status, setFogAddress(new_host_url));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path))
|
||||
.WillOnce(Return(true))
|
||||
.WillOnce(Return(true));
|
||||
// Rollback related test: The readFile function is called 3 times:
|
||||
// 1. Read the current policy file
|
||||
// 2. Read the new policy file - The one that should fail
|
||||
// 3. Read the current policy file again - The one that should be restored
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path))
|
||||
.WillOnce(Return(policy_response))
|
||||
.WillOnce(Return(new_policy_response))
|
||||
.WillOnce(Return(policy_response));
|
||||
.WillOnce(Return(response));
|
||||
EXPECT_CALL(mock_orchestration_tools, copyFile(new_policy_path, policy_file_path + ".last"))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_message, setFogConnection(host_address, 443, true, MessageCategory::GENERIC))
|
||||
.Times(2).WillRepeatedly(Return(true));
|
||||
EXPECT_CALL(mock_update_communication, setAddressExtenesion("")).Times(2);
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_update_communication, setAddressExtenesion(""));
|
||||
EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe<void>()));
|
||||
expectDetailsResolver();
|
||||
EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(false));
|
||||
@@ -649,7 +704,8 @@ TEST_F(OrchestrationTest, orchestrationPolicyUpdatRollback)
|
||||
|
||||
// Rollback related test: After failing to update the policy file, the policy version should be restored
|
||||
EXPECT_CALL(mock_service_controller, getPolicyVersion())
|
||||
.Times(5)
|
||||
.Times(6)
|
||||
.WillOnce(ReturnRef(first_policy_version))
|
||||
.WillOnce(ReturnRef(first_policy_version))
|
||||
.WillOnce(ReturnRef(first_policy_version))
|
||||
.WillOnce(ReturnRef(second_val))
|
||||
@@ -682,7 +738,7 @@ TEST_F(OrchestrationTest, orchestrationPolicyUpdatRollback)
|
||||
GetResourceFile policy_file(GetResourceFile::ResourceFileType::POLICY);
|
||||
EXPECT_CALL(
|
||||
mock_downloader,
|
||||
downloadFileFromFog(new_policy_checksum, Package::ChecksumTypes::SHA256, policy_file)
|
||||
downloadFile(new_policy_checksum, Package::ChecksumTypes::SHA256, policy_file)
|
||||
).WillOnce(Return(Maybe<std::string>(new_policy_path)));
|
||||
|
||||
vector<string> expected_data_types = {};
|
||||
@@ -772,7 +828,6 @@ TEST_F(OrchestrationTest, orchestrationPolicyUpdate)
|
||||
rest,
|
||||
mockRestCall(RestAction::ADD, "proxy", _)
|
||||
).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler)));
|
||||
EXPECT_CALL(mock_status, setFogAddress(host_url));
|
||||
|
||||
init();
|
||||
|
||||
@@ -796,17 +851,6 @@ TEST_F(OrchestrationTest, orchestrationPolicyUpdate)
|
||||
string second_val = "12";
|
||||
string third_val = "13";
|
||||
|
||||
Maybe<string> policy_response(
|
||||
string(
|
||||
"{\n"
|
||||
" \"fog-address\": \"" + host_url + "\",\n"
|
||||
" \"agent-type\": \"test\",\n"
|
||||
" \"pulling-interval\": 25,\n"
|
||||
" \"error-pulling-interval\": 15\n"
|
||||
"}"
|
||||
)
|
||||
);
|
||||
|
||||
Maybe<string> new_policy_response(
|
||||
string(
|
||||
"{\n"
|
||||
@@ -824,13 +868,9 @@ TEST_F(OrchestrationTest, orchestrationPolicyUpdate)
|
||||
EXPECT_CALL(mock_status, setFogAddress(new_host_url));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path))
|
||||
.WillOnce(Return(policy_response))
|
||||
.WillOnce(Return(new_policy_response));
|
||||
EXPECT_CALL(mock_orchestration_tools, copyFile(new_policy_path, policy_file_path + ".last"))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_message, setFogConnection(host_address, 443, true, MessageCategory::GENERIC))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_update_communication, setAddressExtenesion(""));
|
||||
EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe<void>()));
|
||||
expectDetailsResolver();
|
||||
EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(false));
|
||||
@@ -847,7 +887,8 @@ TEST_F(OrchestrationTest, orchestrationPolicyUpdate)
|
||||
.WillOnce(Return(data_checksum));
|
||||
|
||||
EXPECT_CALL(mock_service_controller, getPolicyVersion())
|
||||
.Times(4)
|
||||
.Times(5)
|
||||
.WillOnce(ReturnRef(first_policy_version))
|
||||
.WillOnce(ReturnRef(first_policy_version))
|
||||
.WillOnce(ReturnRef(first_policy_version))
|
||||
.WillOnce(ReturnRef(second_val))
|
||||
@@ -876,7 +917,7 @@ TEST_F(OrchestrationTest, orchestrationPolicyUpdate)
|
||||
GetResourceFile policy_file(GetResourceFile::ResourceFileType::POLICY);
|
||||
EXPECT_CALL(
|
||||
mock_downloader,
|
||||
downloadFileFromFog(new_policy_checksum, Package::ChecksumTypes::SHA256, policy_file)
|
||||
downloadFile(new_policy_checksum, Package::ChecksumTypes::SHA256, policy_file)
|
||||
).WillOnce(Return(Maybe<std::string>(new_policy_path)));
|
||||
|
||||
vector<string> expected_data_types = {};
|
||||
@@ -939,178 +980,52 @@ TEST_F(OrchestrationTest, orchestrationPolicyUpdate)
|
||||
} catch (const invalid_argument& e) {}
|
||||
}
|
||||
|
||||
TEST_F(OrchestrationTest, startOrchestrationPoliceWithFailures)
|
||||
{
|
||||
waitForRestCall();
|
||||
preload();
|
||||
Maybe<string> msg_err = genError("Failed to send message");
|
||||
EXPECT_CALL(mock_status, setFogAddress(host_url));
|
||||
EXPECT_CALL(
|
||||
rest,
|
||||
mockRestCall(RestAction::ADD, "proxy", _)
|
||||
).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler)));
|
||||
init();
|
||||
string orchestration_policy_file_path = getPolicyConfigPath("orchestration", Config::ConfigFileType::Policy);
|
||||
string orchestration_policy_file_path_bk = orchestration_policy_file_path + ".bk";
|
||||
string manifest_file_path = "/etc/cp/conf/manifest.json";
|
||||
string setting_file_path = "/etc/cp/conf/settings.json";
|
||||
string policy_file_path = "/etc/cp/conf/policy.json";
|
||||
string last_policy_file_path = "/etc/cp/conf/policy.json.last";
|
||||
string data_file_path = "/etc/cp/conf/data.json";
|
||||
|
||||
string host_address = "1.2.3.5";
|
||||
string manifest_checksum = "manifest";
|
||||
string policy_checksum = "policy";
|
||||
string settings_checksum = "settings";
|
||||
string data_checksum = "data";
|
||||
|
||||
Maybe<string> response(
|
||||
string(
|
||||
"{\n"
|
||||
" \"fog-address\": \"" + host_url + "\",\n"
|
||||
" \"agent-type\": \"test\",\n"
|
||||
" \"pulling-interval\": 25,\n"
|
||||
" \"error-pulling-interval\": 15\n"
|
||||
"}"
|
||||
)
|
||||
);
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path))
|
||||
.WillOnce(Return(Maybe<string>(genError("Failed"))))
|
||||
.WillOnce(Return(response));
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path_bk)).WillOnce(
|
||||
Return(Maybe<string>(genError("Failed")))
|
||||
);
|
||||
|
||||
vector<string> expected_data_types = {};
|
||||
EXPECT_CALL(
|
||||
mock_service_controller,
|
||||
updateServiceConfiguration(policy_file_path, setting_file_path, expected_data_types, "", "", _)
|
||||
).Times(2).WillRepeatedly(Return(Maybe<void>()));
|
||||
|
||||
EXPECT_CALL(mock_message, setFogConnection(host_address, 443, true, MessageCategory::GENERIC))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_update_communication, setAddressExtenesion(""));
|
||||
EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe<void>()));
|
||||
expectDetailsResolver();
|
||||
EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, manifest_file_path))
|
||||
.WillOnce(Return(manifest_checksum));
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, setting_file_path))
|
||||
.WillOnce(Return(settings_checksum));
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, policy_file_path))
|
||||
.WillOnce(Return(policy_checksum));
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, data_file_path))
|
||||
.WillOnce(Return(data_checksum));
|
||||
|
||||
EXPECT_CALL(mock_service_controller, getPolicyVersion())
|
||||
.Times(2).WillRepeatedly(ReturnRef(first_policy_version));
|
||||
|
||||
EXPECT_CALL(mock_update_communication, getUpdate(_)).WillOnce(
|
||||
Invoke(
|
||||
[&](CheckUpdateRequest &req)
|
||||
{
|
||||
EXPECT_THAT(req.getPolicy(), IsValue(policy_checksum));
|
||||
EXPECT_THAT(req.getSettings(), IsValue(settings_checksum));
|
||||
EXPECT_THAT(req.getManifest(), IsValue(manifest_checksum));
|
||||
EXPECT_THAT(req.getData(), IsValue(data_checksum));
|
||||
req = CheckUpdateRequest("", "", "", "", "", "");
|
||||
return Maybe<void>();
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
EXPECT_CALL(mock_status, setLastUpdateAttempt());
|
||||
EXPECT_CALL(
|
||||
mock_status,
|
||||
setFieldStatus(OrchestrationStatusFieldType::LAST_UPDATE, OrchestrationStatusResult::SUCCESS, "")
|
||||
);
|
||||
|
||||
EXPECT_CALL(mock_status, setIsConfigurationUpdated(A<EnumArray<OrchestrationStatusConfigType, bool>>())
|
||||
).WillOnce(
|
||||
Invoke(
|
||||
[](EnumArray<OrchestrationStatusConfigType, bool> arr)
|
||||
{
|
||||
EXPECT_EQ(arr[OrchestrationStatusConfigType::MANIFEST], false);
|
||||
EXPECT_EQ(arr[OrchestrationStatusConfigType::POLICY], false);
|
||||
EXPECT_EQ(arr[OrchestrationStatusConfigType::SETTINGS], false);
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
EXPECT_CALL(mock_ml, yield(A<chrono::microseconds>()))
|
||||
.WillOnce(
|
||||
Invoke(
|
||||
[] (chrono::microseconds microseconds)
|
||||
{
|
||||
EXPECT_EQ(1000000, microseconds.count());
|
||||
}
|
||||
)
|
||||
)
|
||||
.WillOnce(
|
||||
Invoke(
|
||||
[] (chrono::microseconds microseconds)
|
||||
{
|
||||
EXPECT_EQ(25000000, microseconds.count());
|
||||
throw invalid_argument("stop while loop");
|
||||
}
|
||||
)
|
||||
);
|
||||
EXPECT_CALL(
|
||||
mock_shell_cmd,
|
||||
getExecOutput(_, _, _)
|
||||
).WillRepeatedly(Return(string("daniel\n1\n")));
|
||||
try {
|
||||
runRoutine();
|
||||
} catch (const invalid_argument& e) {}
|
||||
}
|
||||
|
||||
TEST_F(OrchestrationTest, loadOrchestrationPolicyFromBackup)
|
||||
{
|
||||
EXPECT_CALL(
|
||||
rest,
|
||||
mockRestCall(RestAction::ADD, "proxy", _)
|
||||
).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler)));
|
||||
);
|
||||
waitForRestCall();
|
||||
init();
|
||||
string orchestration_policy_file_path = "/etc/cp/conf/orchestration/orchestration.policy";
|
||||
string orchestration_policy_file_path_bk = orchestration_policy_file_path + ".bk";
|
||||
string manifest_file_path = "/etc/cp/conf/manifest.json";
|
||||
string setting_file_path = "/etc/cp/conf/settings.json";
|
||||
string policy_file_path = "/etc/cp/conf/policy.json";
|
||||
string last_policy_file_path = "/etc/cp/conf/policy.json.last";
|
||||
string data_file_path = "/etc/cp/conf/data.json";
|
||||
|
||||
string host_address = "1.2.3.5";
|
||||
string manifest_checksum = "manifest";
|
||||
string policy_checksum = "policy";
|
||||
string settings_checksum = "settings";
|
||||
string data_checksum = "data";
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(I_MainLoop::RoutineType::RealTime, _, "Orchestration runner", true)
|
||||
);
|
||||
|
||||
Maybe<string> response(
|
||||
string(
|
||||
"{\n"
|
||||
" \"fog-address\": \"https://1.2.3.5/\",\n"
|
||||
" \"agent-type\": \"test\",\n"
|
||||
" \"pulling-interval\": 25,\n"
|
||||
" \"error-pulling-interval\": 15\n"
|
||||
"}"
|
||||
EXPECT_CALL(
|
||||
mock_shell_cmd,
|
||||
getExecOutput("openssl version -d | cut -d\" \" -f2 | cut -d\"\\\"\" -f2", _, _)
|
||||
).WillOnce(Return(string("OpenSSL certificates Directory")));
|
||||
|
||||
EXPECT_CALL(mock_service_controller, isServiceInstalled("Access Control")).WillRepeatedly(
|
||||
InvokeWithoutArgs(
|
||||
[]()
|
||||
{
|
||||
static int count = 0;
|
||||
if (count > 0) return false;
|
||||
count++;
|
||||
return true;
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
EXPECT_CALL(mock_status, setFogAddress(host_url));
|
||||
map<string, PortNumber> empty_service_to_port_map;
|
||||
EXPECT_CALL(mock_service_controller, getServiceToPortMap()).WillRepeatedly(Return(empty_service_to_port_map));
|
||||
|
||||
EXPECT_CALL(rest, mockRestCall(RestAction::SHOW, "orchestration-status", _));
|
||||
|
||||
vector<string> expected_data_types = {};
|
||||
EXPECT_CALL(
|
||||
mock_service_controller,
|
||||
updateServiceConfiguration(policy_file_path, setting_file_path, expected_data_types, "", "", _)
|
||||
).WillOnce(Return(Maybe<void>()));
|
||||
rest,
|
||||
mockRestCall(RestAction::SET, "agent-uninstall", _)
|
||||
);
|
||||
|
||||
doEncrypt();
|
||||
EXPECT_CALL(mock_orchestration_tools, loadTenantsFromDir(_)).Times(1);
|
||||
|
||||
EXPECT_CALL(mock_status, setFogAddress(host_url));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, setClusterId());
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path))
|
||||
.WillOnce(Return(Maybe<string>(genError("Failed"))));
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path_bk)).WillOnce(Return(response));
|
||||
@@ -1118,81 +1033,8 @@ TEST_F(OrchestrationTest, loadOrchestrationPolicyFromBackup)
|
||||
mock_orchestration_tools,
|
||||
copyFile(orchestration_policy_file_path_bk, orchestration_policy_file_path)
|
||||
).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_message, setFogConnection(host_address, 443, true, MessageCategory::GENERIC))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_update_communication, setAddressExtenesion(""));
|
||||
EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe<void>()));
|
||||
expectDetailsResolver();
|
||||
EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, manifest_file_path))
|
||||
.WillOnce(Return(manifest_checksum));
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, setting_file_path))
|
||||
.WillOnce(Return(settings_checksum));
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, policy_file_path))
|
||||
.WillOnce(Return(policy_checksum));
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, data_file_path))
|
||||
.WillOnce(Return(data_checksum));
|
||||
|
||||
EXPECT_CALL(mock_service_controller, getPolicyVersion())
|
||||
.Times(2).WillRepeatedly(ReturnRef(first_policy_version));
|
||||
EXPECT_CALL(mock_update_communication, getUpdate(_)).WillOnce(
|
||||
Invoke(
|
||||
[&](CheckUpdateRequest &req)
|
||||
{
|
||||
EXPECT_THAT(req.getPolicy(), IsValue(policy_checksum));
|
||||
EXPECT_THAT(req.getSettings(), IsValue(settings_checksum));
|
||||
EXPECT_THAT(req.getManifest(), IsValue(manifest_checksum));
|
||||
EXPECT_THAT(req.getData(), IsValue(data_checksum));
|
||||
req = CheckUpdateRequest("", "", "", "", "", "");
|
||||
return Maybe<void>();
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
EXPECT_CALL(mock_status, setLastUpdateAttempt());
|
||||
EXPECT_CALL(
|
||||
mock_status,
|
||||
setFieldStatus(OrchestrationStatusFieldType::LAST_UPDATE, OrchestrationStatusResult::SUCCESS, "")
|
||||
);
|
||||
EXPECT_CALL(mock_status, setIsConfigurationUpdated(A<EnumArray<OrchestrationStatusConfigType, bool>>())
|
||||
).WillOnce(
|
||||
Invoke(
|
||||
[](EnumArray<OrchestrationStatusConfigType, bool> arr)
|
||||
{
|
||||
EXPECT_EQ(arr[OrchestrationStatusConfigType::MANIFEST], false);
|
||||
EXPECT_EQ(arr[OrchestrationStatusConfigType::POLICY], false);
|
||||
EXPECT_EQ(arr[OrchestrationStatusConfigType::SETTINGS], false);
|
||||
}
|
||||
)
|
||||
);
|
||||
EXPECT_CALL(mock_ml, yield(A<chrono::microseconds>()))
|
||||
.WillOnce(
|
||||
Invoke(
|
||||
[] (chrono::microseconds microseconds)
|
||||
{
|
||||
EXPECT_EQ(1000000, microseconds.count());
|
||||
}
|
||||
)
|
||||
)
|
||||
.WillOnce(
|
||||
Invoke(
|
||||
[] (chrono::microseconds microseconds)
|
||||
{
|
||||
EXPECT_EQ(25000000, microseconds.count());
|
||||
throw invalid_argument("stop while loop");
|
||||
}
|
||||
)
|
||||
);
|
||||
EXPECT_CALL(
|
||||
mock_shell_cmd,
|
||||
getExecOutput(_, _, _)
|
||||
).WillRepeatedly(Return(string("daniel\n1\n")));
|
||||
try {
|
||||
runRoutine();
|
||||
} catch (const invalid_argument& e) {}
|
||||
orchestration_comp.init();
|
||||
}
|
||||
|
||||
TEST_F(OrchestrationTest, newServicePolicyUpdate)
|
||||
@@ -1213,7 +1055,6 @@ TEST_F(OrchestrationTest, manifestUpdate)
|
||||
).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler)));
|
||||
waitForRestCall();
|
||||
init();
|
||||
string orchestration_policy_file_path = "/etc/cp/conf/orchestration/orchestration.policy";
|
||||
string manifest_file_path = "/etc/cp/conf/manifest.json";
|
||||
string setting_file_path = "/etc/cp/conf/settings.json";
|
||||
string policy_file_path = "/etc/cp/conf/policy.json";
|
||||
@@ -1226,30 +1067,12 @@ TEST_F(OrchestrationTest, manifestUpdate)
|
||||
string settings_checksum= "settings";
|
||||
string data_checksum = "data";
|
||||
|
||||
EXPECT_CALL(mock_status, setFogAddress(host_url));
|
||||
|
||||
Maybe<string> response(
|
||||
string(
|
||||
"{\n"
|
||||
" \"fog-address\": \"" + host_url + "\",\n"
|
||||
" \"agent-type\": \"test\",\n"
|
||||
" \"pulling-interval\": 25,\n"
|
||||
" \"error-pulling-interval\": 15\n"
|
||||
"}"
|
||||
)
|
||||
);
|
||||
|
||||
vector<string> expected_data_types = {};
|
||||
EXPECT_CALL(
|
||||
mock_service_controller,
|
||||
updateServiceConfiguration(policy_file_path, setting_file_path, expected_data_types, "", "", _)
|
||||
).WillOnce(Return(Maybe<void>()));
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path)).WillOnce(Return(response));
|
||||
EXPECT_CALL(mock_message, setFogConnection(host_address, 443, true, MessageCategory::GENERIC))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_update_communication, setAddressExtenesion(""));
|
||||
EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe<void>()));
|
||||
expectDetailsResolver();
|
||||
EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(false));
|
||||
@@ -1266,7 +1089,7 @@ TEST_F(OrchestrationTest, manifestUpdate)
|
||||
.WillOnce(Return(data_checksum));
|
||||
|
||||
EXPECT_CALL(mock_service_controller, getPolicyVersion())
|
||||
.Times(2).WillRepeatedly(ReturnRef(first_policy_version));
|
||||
.Times(3).WillRepeatedly(ReturnRef(first_policy_version));
|
||||
EXPECT_CALL(mock_update_communication, getUpdate(_)).WillOnce(
|
||||
Invoke(
|
||||
[&](CheckUpdateRequest &req)
|
||||
@@ -1304,7 +1127,7 @@ TEST_F(OrchestrationTest, manifestUpdate)
|
||||
|
||||
GetResourceFile manifest_file(GetResourceFile::ResourceFileType::MANIFEST);
|
||||
EXPECT_CALL(mock_downloader,
|
||||
downloadFileFromFog(
|
||||
downloadFile(
|
||||
string("new check sum"),
|
||||
Package::ChecksumTypes::SHA256,
|
||||
manifest_file
|
||||
@@ -1346,7 +1169,6 @@ TEST_F(OrchestrationTest, getBadPolicyUpdate)
|
||||
).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler)));
|
||||
waitForRestCall();
|
||||
init();
|
||||
string orchestration_policy_file_path = "/etc/cp/conf/orchestration/orchestration.policy";
|
||||
string manifest_file_path = "/etc/cp/conf/manifest.json";
|
||||
string setting_file_path = "/etc/cp/conf/settings.json";
|
||||
string policy_file_path = "/etc/cp/conf/policy.json";
|
||||
@@ -1359,18 +1181,6 @@ TEST_F(OrchestrationTest, getBadPolicyUpdate)
|
||||
string settings_checksum = "settings";
|
||||
string data_checksum = "data";
|
||||
|
||||
Maybe<string> response(
|
||||
string(
|
||||
"{\n"
|
||||
" \"fog-address\": \"" + host_url + "\",\n"
|
||||
" \"agent-type\": \"test\",\n"
|
||||
" \"pulling-interval\": 25,\n"
|
||||
" \"error-pulling-interval\": 15\n"
|
||||
"}"
|
||||
)
|
||||
);
|
||||
EXPECT_CALL(mock_status, setFogAddress(host_url));
|
||||
|
||||
vector<string> expected_data_types = {};
|
||||
|
||||
EXPECT_CALL(
|
||||
@@ -1380,13 +1190,9 @@ TEST_F(OrchestrationTest, getBadPolicyUpdate)
|
||||
|
||||
set<string> expected_changed_policies = {};
|
||||
EXPECT_CALL(mock_service_controller, mockMoveChangedPolicies()).WillOnce(Return(expected_changed_policies));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path)).WillOnce(Return(response));
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, copyFile(new_policy_path, policy_file_path + ".last"))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_message, setFogConnection(host_address, 443, true, MessageCategory::GENERIC))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_update_communication, setAddressExtenesion(""));
|
||||
|
||||
EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe<void>()));
|
||||
expectDetailsResolver();
|
||||
@@ -1408,7 +1214,7 @@ TEST_F(OrchestrationTest, getBadPolicyUpdate)
|
||||
GetResourceFile policy_file(GetResourceFile::ResourceFileType::POLICY);
|
||||
EXPECT_CALL(
|
||||
mock_downloader,
|
||||
downloadFileFromFog(
|
||||
downloadFile(
|
||||
string("111111"),
|
||||
Package::ChecksumTypes::SHA256,
|
||||
policy_file
|
||||
@@ -1421,7 +1227,8 @@ TEST_F(OrchestrationTest, getBadPolicyUpdate)
|
||||
string second_val = "12";
|
||||
string third_val = "13";
|
||||
EXPECT_CALL(mock_service_controller, getPolicyVersion())
|
||||
.Times(3)
|
||||
.Times(4)
|
||||
.WillOnce(ReturnRef(first_policy_version))
|
||||
.WillOnce(ReturnRef(first_policy_version))
|
||||
.WillOnce(ReturnRef(first_policy_version))
|
||||
.WillOnce(ReturnRef(second_val)
|
||||
@@ -1456,7 +1263,7 @@ TEST_F(OrchestrationTest, getBadPolicyUpdate)
|
||||
)
|
||||
);
|
||||
|
||||
EXPECT_CALL(mock_service_controller, getUpdatePolicyVersion()).Times(1).WillOnce(ReturnRef(third_val));
|
||||
EXPECT_CALL(mock_service_controller, getUpdatePolicyVersion()).WillRepeatedly(ReturnRef(third_val));
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_service_controller,
|
||||
@@ -1498,7 +1305,6 @@ TEST_F(OrchestrationTest, failedDownloadSettings)
|
||||
).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler)));
|
||||
waitForRestCall();
|
||||
init();
|
||||
string orchestration_policy_file_path = "/etc/cp/conf/orchestration/orchestration.policy";
|
||||
string manifest_file_path = "/etc/cp/conf/manifest.json";
|
||||
string setting_file_path = "/etc/cp/conf/settings.json";
|
||||
string policy_file_path = "/etc/cp/conf/policy.json";
|
||||
@@ -1511,30 +1317,12 @@ TEST_F(OrchestrationTest, failedDownloadSettings)
|
||||
string settings_checksum = "settings-checksum";
|
||||
string data_checksum = "data";
|
||||
|
||||
Maybe<string> response(
|
||||
string(
|
||||
"{\n"
|
||||
" \"fog-address\": \"" + host_url + "\",\n"
|
||||
" \"agent-type\": \"test\",\n"
|
||||
" \"pulling-interval\": 25,\n"
|
||||
" \"error-pulling-interval\": 15\n"
|
||||
"}"
|
||||
)
|
||||
);
|
||||
EXPECT_CALL(mock_status, setFogAddress(host_url));
|
||||
|
||||
vector<string> expected_data_types = {};
|
||||
EXPECT_CALL(
|
||||
mock_service_controller,
|
||||
updateServiceConfiguration(policy_file_path, setting_file_path, expected_data_types, "", "", _)
|
||||
).WillOnce(Return(Maybe<void>()));
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path)).WillOnce(Return(response));
|
||||
EXPECT_CALL(mock_message, setFogConnection(host_address, 443, true, MessageCategory::GENERIC))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_update_communication, setAddressExtenesion(""));
|
||||
|
||||
EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe<void>()));
|
||||
expectDetailsResolver();
|
||||
EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(false));
|
||||
@@ -1553,7 +1341,7 @@ TEST_F(OrchestrationTest, failedDownloadSettings)
|
||||
Maybe<string> new_policy_checksum(string("111111"));
|
||||
|
||||
EXPECT_CALL(mock_service_controller, getPolicyVersion())
|
||||
.Times(2).WillRepeatedly(ReturnRef(first_policy_version));
|
||||
.Times(3).WillRepeatedly(ReturnRef(first_policy_version));
|
||||
EXPECT_CALL(mock_update_communication, getUpdate(_)).WillOnce(
|
||||
Invoke(
|
||||
[&](CheckUpdateRequest &req)
|
||||
@@ -1603,14 +1391,14 @@ TEST_F(OrchestrationTest, failedDownloadSettings)
|
||||
GetResourceFile manifest_file(GetResourceFile::ResourceFileType::MANIFEST);
|
||||
|
||||
EXPECT_CALL(mock_downloader,
|
||||
downloadFileFromFog(
|
||||
downloadFile(
|
||||
string("manifest-checksum"),
|
||||
Package::ChecksumTypes::SHA256,
|
||||
manifest_file
|
||||
)
|
||||
).WillOnce(Return(download_error));
|
||||
EXPECT_CALL(mock_downloader,
|
||||
downloadFileFromFog(
|
||||
downloadFile(
|
||||
string("settings-checksum"),
|
||||
Package::ChecksumTypes::SHA256,
|
||||
settings_file
|
||||
@@ -1652,7 +1440,6 @@ TEST_P(OrchestrationTest, orchestrationFirstRun)
|
||||
).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler)));
|
||||
waitForRestCall();
|
||||
init();
|
||||
string orchestration_policy_file_path = "/etc/cp/conf/orchestration/orchestration.policy";
|
||||
string manifest_file_path = "/etc/cp/conf/manifest.json";
|
||||
string setting_file_path = "/etc/cp/conf/settings.json";
|
||||
string policy_file_path = "/etc/cp/conf/policy.json";
|
||||
@@ -1669,26 +1456,6 @@ TEST_P(OrchestrationTest, orchestrationFirstRun)
|
||||
string policy = "";
|
||||
string setting = "";
|
||||
|
||||
Maybe<string> response(
|
||||
string(
|
||||
"{\n"
|
||||
" \"fog-address\": \"" + host_url + "\",\n"
|
||||
" \"agent-type\": \"test\",\n"
|
||||
" \"pulling-interval\": 25,\n"
|
||||
" \"error-pulling-interval\": 15\n"
|
||||
"}"
|
||||
)
|
||||
);
|
||||
|
||||
EXPECT_CALL(mock_status, setFogAddress(host_url));
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(false));
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path))
|
||||
.WillOnce(Return(response));
|
||||
EXPECT_CALL(mock_message, setFogConnection(host_address, 443, true, MessageCategory::GENERIC)).
|
||||
Times(1).
|
||||
WillRepeatedly(Return(true));
|
||||
EXPECT_CALL(mock_update_communication, setAddressExtenesion(""));
|
||||
|
||||
EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe<void>()));
|
||||
expectDetailsResolver();
|
||||
EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(true));
|
||||
@@ -1721,7 +1488,7 @@ TEST_P(OrchestrationTest, orchestrationFirstRun)
|
||||
}
|
||||
)
|
||||
);
|
||||
EXPECT_CALL(mock_service_controller, getPolicyVersion()).WillOnce(ReturnRef(first_policy_version));
|
||||
EXPECT_CALL(mock_service_controller, getPolicyVersion()).WillRepeatedly(ReturnRef(first_policy_version));
|
||||
EXPECT_CALL(mock_update_communication, getUpdate(_)).WillOnce(
|
||||
Invoke(
|
||||
[&](CheckUpdateRequest &req)
|
||||
@@ -1854,7 +1621,6 @@ TEST_F(OrchestrationTest, set_proxy)
|
||||
mockRestCall(RestAction::ADD, "proxy", _)
|
||||
).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler)));
|
||||
waitForRestCall();
|
||||
|
||||
init();
|
||||
stringstream is;
|
||||
string proxy_url = "http://some-proxy.com:8080";
|
||||
@@ -1873,7 +1639,7 @@ TEST_F(OrchestrationTest, dataUpdate)
|
||||
).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler)));
|
||||
waitForRestCall();
|
||||
init();
|
||||
string orchestration_policy_file_path = "/etc/cp/conf/orchestration/orchestration.policy";
|
||||
|
||||
string manifest_file_path = "/etc/cp/conf/manifest.json";
|
||||
string setting_file_path = "/etc/cp/conf/settings.json";
|
||||
string policy_file_path = "/etc/cp/conf/policy.json";
|
||||
@@ -1890,19 +1656,6 @@ TEST_F(OrchestrationTest, dataUpdate)
|
||||
string data_checksum_type = "sha1sum";
|
||||
string data_instance_checksum = "8d4a5709673a05b380ba7d6567e28910019118f5";
|
||||
|
||||
EXPECT_CALL(mock_status, setFogAddress(host_url));
|
||||
|
||||
Maybe<string> policy_response(
|
||||
string(
|
||||
"{\n"
|
||||
" \"fog-address\": \"" + host_url + "\",\n"
|
||||
" \"agent-type\": \"test\",\n"
|
||||
" \"pulling-interval\": 25,\n"
|
||||
" \"error-pulling-interval\": 15\n"
|
||||
"}"
|
||||
)
|
||||
);
|
||||
|
||||
Maybe<string> data_response(
|
||||
string(
|
||||
"{\n"
|
||||
@@ -1929,14 +1682,8 @@ TEST_F(OrchestrationTest, dataUpdate)
|
||||
).After(expectation_set).WillOnce(Return(Maybe<void>()));
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, doesDirectoryExist("/etc/cp/conf/data")).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path)).WillOnce(Return(policy_response));
|
||||
EXPECT_CALL(mock_orchestration_tools, readFile(data_file_path + ".download")).WillOnce(Return(data_response));
|
||||
|
||||
|
||||
EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_message, setFogConnection(host_address, 443, true, MessageCategory::GENERIC))
|
||||
.WillOnce(Return(true));
|
||||
EXPECT_CALL(mock_update_communication, setAddressExtenesion(""));
|
||||
EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe<void>()));
|
||||
EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(false));
|
||||
expectDetailsResolver();
|
||||
@@ -1955,8 +1702,7 @@ TEST_F(OrchestrationTest, dataUpdate)
|
||||
EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, "/path/ips"))
|
||||
.WillOnce(Return(data_instance_checksum));
|
||||
|
||||
EXPECT_CALL(mock_service_controller, getPolicyVersion())
|
||||
.Times(2).WillRepeatedly(ReturnRef(first_policy_version));
|
||||
EXPECT_CALL(mock_service_controller, getPolicyVersion()).WillRepeatedly(ReturnRef(first_policy_version));
|
||||
EXPECT_CALL(mock_update_communication, getUpdate(_)).WillOnce(
|
||||
Invoke(
|
||||
[&](CheckUpdateRequest &req)
|
||||
@@ -2011,7 +1757,7 @@ TEST_F(OrchestrationTest, dataUpdate)
|
||||
string new_data_file_path = data_file_path + ".download";
|
||||
GetResourceFile data_file(GetResourceFile::ResourceFileType::DATA);
|
||||
EXPECT_CALL(mock_downloader,
|
||||
downloadFileFromFog(
|
||||
downloadFile(
|
||||
string("new data"),
|
||||
Package::ChecksumTypes::SHA256,
|
||||
data_file
|
||||
|
||||
@@ -109,6 +109,10 @@ packageHandlerActionsToString(PackageHandlerActions action)
|
||||
installation_mode += " --certs-dir ";
|
||||
installation_mode += trusted_ca_directory.unpack();
|
||||
}
|
||||
|
||||
auto maybe_vs_id = Singleton::Consume<I_Environment>::by<PackageHandler>()->get<string>("VS ID");
|
||||
if (maybe_vs_id.ok()) installation_mode += " --vs_id " + *maybe_vs_id;
|
||||
|
||||
AdditionalFlagsConfiguration additional_flags = getConfigurationWithDefault<AdditionalFlagsConfiguration>(
|
||||
AdditionalFlagsConfiguration(),
|
||||
"orchestration",
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
using namespace std;
|
||||
using namespace ReportIS;
|
||||
|
||||
USE_DEBUG_FLAG(D_ORCHESTRATOR);
|
||||
USE_DEBUG_FLAG(D_SERVICE_CONTROLLER);
|
||||
|
||||
class SendConfigurations : public ClientRest
|
||||
{
|
||||
@@ -56,7 +56,7 @@ public:
|
||||
auto service_controller = Singleton::Consume<I_ServiceController>::by<ServiceReconfStatusMonitor>();
|
||||
if (!finished.get()) {
|
||||
service_controller->updateReconfStatus(id.get(), service_name.get(), ReconfStatus::IN_PROGRESS);
|
||||
dbgTrace(D_ORCHESTRATOR)
|
||||
dbgTrace(D_SERVICE_CONTROLLER)
|
||||
<< "Request for service reconfiguration is still in progress. ID: "
|
||||
<< id.get()
|
||||
<< ", Service Name: "
|
||||
@@ -65,7 +65,7 @@ public:
|
||||
}
|
||||
if (error.get()) {
|
||||
service_controller->updateReconfStatus(id.get(), service_name.get(), ReconfStatus::FAILED);
|
||||
dbgError(D_ORCHESTRATOR)
|
||||
dbgError(D_SERVICE_CONTROLLER)
|
||||
<< "Request for service reconfiguration failed to complete. ID: "
|
||||
<< id.get()
|
||||
<< ", Service Name: "
|
||||
@@ -75,7 +75,7 @@ public:
|
||||
return;
|
||||
}
|
||||
service_controller->updateReconfStatus(id.get(), service_name.get(), ReconfStatus::SUCCEEDED);
|
||||
dbgInfo(D_ORCHESTRATOR)
|
||||
dbgInfo(D_SERVICE_CONTROLLER)
|
||||
<< "Request for service reconfiguration successfully accomplished. Reconf ID: "
|
||||
<< id.get()
|
||||
<< ", Service Name: "
|
||||
@@ -112,7 +112,7 @@ ServiceDetails::isServiceActive() const
|
||||
}
|
||||
}
|
||||
|
||||
dbgDebug(D_ORCHESTRATOR)
|
||||
dbgDebug(D_SERVICE_CONTROLLER)
|
||||
<< "Executing service status check via watchdog api. Service name: "
|
||||
<< service_name
|
||||
<< ", Watchdog command: "
|
||||
@@ -133,7 +133,7 @@ ServiceDetails::isServiceActive() const
|
||||
for (int current_attempt = 0; current_attempt < max_retry_attempts; ++current_attempt) {
|
||||
if (service_status.ok() || service_status.getErr().find("Reached timeout") == string::npos) break;
|
||||
|
||||
dbgWarning(D_ORCHESTRATOR)
|
||||
dbgWarning(D_SERVICE_CONTROLLER)
|
||||
<< "Retrying to execute service status check via watchdog API after getting timeout. Service name: "
|
||||
<< service_name
|
||||
<< ", Watchdog command: "
|
||||
@@ -146,7 +146,7 @@ ServiceDetails::isServiceActive() const
|
||||
}
|
||||
|
||||
if (!service_status.ok()) {
|
||||
dbgWarning(D_ORCHESTRATOR)
|
||||
dbgWarning(D_SERVICE_CONTROLLER)
|
||||
<< "Changing service status to inactive after failure to its status from watchdog. Service name: "
|
||||
<< service_name
|
||||
<< ", Watchdog output: "
|
||||
@@ -154,7 +154,7 @@ ServiceDetails::isServiceActive() const
|
||||
return false;
|
||||
}
|
||||
|
||||
dbgDebug(D_ORCHESTRATOR)
|
||||
dbgDebug(D_SERVICE_CONTROLLER)
|
||||
<< "Successfully retrieved service status from watchdog. Service name: "
|
||||
<< service_name
|
||||
<< ", Watchdog output: "
|
||||
@@ -166,7 +166,7 @@ ServiceDetails::isServiceActive() const
|
||||
bool is_registered = status.find("not-registered") == string::npos && status.find("registered") != string::npos;
|
||||
bool is_running = status.find("not-running") == string::npos && status.find("running") != string::npos;
|
||||
|
||||
dbgTrace(D_ORCHESTRATOR)
|
||||
dbgTrace(D_SERVICE_CONTROLLER)
|
||||
<< "Successfully set service status. Service name: "
|
||||
<< service_name
|
||||
<< ", Status: "
|
||||
@@ -189,7 +189,7 @@ ReconfStatus
|
||||
ServiceDetails::sendNewConfigurations(int configuration_id, const string &policy_version)
|
||||
{
|
||||
if(!isServiceActive()) {
|
||||
dbgDebug(D_ORCHESTRATOR) << "Service " << service_name << " is inactive";
|
||||
dbgDebug(D_SERVICE_CONTROLLER) << "Service " << service_name << " is inactive";
|
||||
return ReconfStatus::INACTIVE;
|
||||
}
|
||||
|
||||
@@ -210,7 +210,7 @@ ServiceDetails::sendNewConfigurations(int configuration_id, const string &policy
|
||||
|
||||
if (!res.ok()) {
|
||||
auto err = res.getErr();
|
||||
dbgDebug(D_ORCHESTRATOR)
|
||||
dbgDebug(D_SERVICE_CONTROLLER)
|
||||
<< "Service: "
|
||||
<< service_name
|
||||
<< " didn't get new configuration. Error: "
|
||||
@@ -223,7 +223,7 @@ ServiceDetails::sendNewConfigurations(int configuration_id, const string &policy
|
||||
if (new_config.finished.get()) {
|
||||
if (!new_config.error.get()) {
|
||||
service_details->startReconfStatus(new_config.id.get(), ReconfStatus::SUCCEEDED, service_name, service_id);
|
||||
dbgDebug(D_ORCHESTRATOR) << "Loading service configuration succeeded for service " << service_name;
|
||||
dbgDebug(D_SERVICE_CONTROLLER) << "Loading service configuration succeeded for service " << service_name;
|
||||
return ReconfStatus::SUCCEEDED;
|
||||
} else {
|
||||
string log_name = "Agent could not update policy to version " +
|
||||
@@ -241,7 +241,7 @@ ServiceDetails::sendNewConfigurations(int configuration_id, const string &policy
|
||||
<< LogField("policyVersion", service_details->getPolicyVersion());
|
||||
|
||||
service_details->startReconfStatus(new_config.id.get(), ReconfStatus::FAILED, service_name, service_id);
|
||||
dbgDebug(D_ORCHESTRATOR)
|
||||
dbgDebug(D_SERVICE_CONTROLLER)
|
||||
<< "Loading service configuration failed for service "
|
||||
<< service_name
|
||||
<< " with error: "
|
||||
@@ -249,7 +249,7 @@ ServiceDetails::sendNewConfigurations(int configuration_id, const string &policy
|
||||
return ReconfStatus::FAILED;
|
||||
}
|
||||
}
|
||||
dbgDebug(D_ORCHESTRATOR) << "Loading service configuration is in progress for service: " << service_name;
|
||||
dbgDebug(D_SERVICE_CONTROLLER) << "Loading service configuration is in progress for service: " << service_name;
|
||||
service_details->startReconfStatus(new_config.id.get(), ReconfStatus::IN_PROGRESS, service_name, service_id);
|
||||
return ReconfStatus::IN_PROGRESS;
|
||||
}
|
||||
@@ -257,7 +257,7 @@ ServiceDetails::sendNewConfigurations(int configuration_id, const string &policy
|
||||
void
|
||||
SetNanoServiceConfig::doCall()
|
||||
{
|
||||
dbgFlow(D_ORCHESTRATOR)
|
||||
dbgFlow(D_SERVICE_CONTROLLER)
|
||||
<< "Received registration request from service. Service name: "
|
||||
<< service_name.get()
|
||||
<< ", service listening port: "
|
||||
@@ -402,12 +402,12 @@ ServiceController::Impl::getUpdatedReconfStatus()
|
||||
auto maybe_service = getServiceDetails(service_id);
|
||||
|
||||
if (!maybe_service.ok()) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Unable to get service details. Error: " << maybe_service.getErr();
|
||||
dbgWarning(D_SERVICE_CONTROLLER) << "Unable to get service details. Error: " << maybe_service.getErr();
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!maybe_service.unpack().isServiceActive()) {
|
||||
dbgInfo(D_ORCHESTRATOR)
|
||||
dbgInfo(D_SERVICE_CONTROLLER)
|
||||
<< "Service is not active, removing from registered services list. Service: "
|
||||
<< services_reconf_names[service_and_reconf_status.first]
|
||||
<< "ID: "
|
||||
@@ -490,7 +490,7 @@ ServiceController::Impl::loadRegisteredServicesFromFile()
|
||||
auto maybe_registered_services_str = Singleton::Consume<I_OrchestrationTools>::by<ServiceController::Impl>()->
|
||||
readFile(registered_services_file);
|
||||
if (!maybe_registered_services_str.ok()) {
|
||||
dbgTrace(D_ORCHESTRATOR)
|
||||
dbgTrace(D_SERVICE_CONTROLLER)
|
||||
<< "could not read file. File: "
|
||||
<< registered_services_file
|
||||
<< " Error: " << maybe_registered_services_str.getErr();
|
||||
@@ -501,7 +501,7 @@ ServiceController::Impl::loadRegisteredServicesFromFile()
|
||||
cereal::JSONInputArchive ar(ss);
|
||||
ar(cereal::make_nvp("Registered Services", pending_services));
|
||||
|
||||
dbgInfo(D_ORCHESTRATOR)
|
||||
dbgInfo(D_SERVICE_CONTROLLER)
|
||||
<< "Orchestration pending services loaded from file."
|
||||
<< " File: "
|
||||
<< registered_services_file
|
||||
@@ -509,7 +509,7 @@ ServiceController::Impl::loadRegisteredServicesFromFile()
|
||||
|
||||
for (const auto &id_service_pair : pending_services) {
|
||||
const auto &service = id_service_pair.second;
|
||||
dbgInfo(D_ORCHESTRATOR)
|
||||
dbgInfo(D_SERVICE_CONTROLLER)
|
||||
<< "Service name: "
|
||||
<< service.getServiceName()
|
||||
<< ", Service ID: "
|
||||
@@ -522,7 +522,7 @@ ServiceController::Impl::loadRegisteredServicesFromFile()
|
||||
void
|
||||
ServiceController::Impl::writeRegisteredServicesToFile()
|
||||
{
|
||||
dbgFlow(D_ORCHESTRATOR);
|
||||
dbgFlow(D_SERVICE_CONTROLLER);
|
||||
auto registered_services_file = getConfigurationWithDefault<string>(
|
||||
filesystem_prefix + "/conf/orchestrations_registered_services.json",
|
||||
"orchestration",
|
||||
@@ -533,14 +533,14 @@ ServiceController::Impl::writeRegisteredServicesToFile()
|
||||
cereal::JSONOutputArchive ar(ss);
|
||||
ar(cereal::make_nvp("Registered Services", registered_services));
|
||||
|
||||
dbgInfo(D_ORCHESTRATOR)
|
||||
dbgInfo(D_SERVICE_CONTROLLER)
|
||||
<< "Orchestration registered services file has been updated. File: "
|
||||
<< registered_services_file
|
||||
<< ". Registered Services:";
|
||||
|
||||
for (const auto &id_service_pair : registered_services) {
|
||||
const auto &service = id_service_pair.second;
|
||||
dbgInfo(D_ORCHESTRATOR)
|
||||
dbgInfo(D_SERVICE_CONTROLLER)
|
||||
<< "Service name: "
|
||||
<< service.getServiceName()
|
||||
<< ", Service ID: "
|
||||
@@ -626,6 +626,7 @@ ServiceController::Impl::registerServiceConfig(
|
||||
|
||||
pending_services.erase(service_config.getServiceID());
|
||||
pending_services.insert({service_config.getServiceID(), service_config});
|
||||
refreshPendingServices();
|
||||
}
|
||||
|
||||
bool
|
||||
@@ -639,12 +640,12 @@ ServiceController::Impl::isServiceInstalled(const string &service_name)
|
||||
void
|
||||
ServiceController::Impl::refreshPendingServices()
|
||||
{
|
||||
dbgFlow(D_ORCHESTRATOR);
|
||||
dbgFlow(D_SERVICE_CONTROLLER);
|
||||
if (pending_services.empty()) return;
|
||||
for (const auto &service : pending_services) {
|
||||
registered_services.erase(service.first);
|
||||
registered_services.insert({service.first, service.second});
|
||||
dbgDebug(D_ORCHESTRATOR) << "Successfully registered service. Name: " << service.first;
|
||||
dbgDebug(D_SERVICE_CONTROLLER) << "Successfully registered service. Name: " << service.first;
|
||||
}
|
||||
pending_services.clear();
|
||||
|
||||
@@ -659,7 +660,7 @@ ServiceController::Impl::backupConfigurationFile(const string &config_file_path)
|
||||
string backup_file = config_file_path + backup_ext;
|
||||
|
||||
if (!orchestration_tools->doesFileExist(config_file_path)) {
|
||||
dbgTrace(D_ORCHESTRATOR) << "File does not exist. File: " << config_file_path;
|
||||
dbgTrace(D_SERVICE_CONTROLLER) << "File does not exist. File: " << config_file_path;
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -670,7 +671,7 @@ ServiceController::Impl::backupConfigurationFile(const string &config_file_path)
|
||||
mainloop->yield(false);
|
||||
}
|
||||
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to back up the file. File: " << config_file_path;
|
||||
dbgWarning(D_SERVICE_CONTROLLER) << "Failed to back up the file. File: " << config_file_path;
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -692,35 +693,15 @@ ServiceController::Impl::createDirectoryForChildTenant(
|
||||
if (orchestration_tools->doesDirectoryExist(dir)) return true;
|
||||
|
||||
if (!orchestration_tools->createDirectory(dir)) {
|
||||
dbgError(D_ORCHESTRATOR)
|
||||
dbgError(D_SERVICE_CONTROLLER)
|
||||
<< "Failed to create configuration directory for tenant "
|
||||
<< child_tenant_id;
|
||||
return false;
|
||||
}
|
||||
dbgTrace(D_ORCHESTRATOR) << "Created new configuration directory for tenant " << child_tenant_id;
|
||||
dbgTrace(D_SERVICE_CONTROLLER) << "Created new configuration directory for tenant " << child_tenant_id;
|
||||
return true;
|
||||
}
|
||||
|
||||
static string
|
||||
getChecksum(const string &file_path)
|
||||
{
|
||||
auto orchestration_tools = Singleton::Consume<I_OrchestrationTools>::by<ServiceController>();
|
||||
Maybe<string> file_checksum = orchestration_tools->calculateChecksum(
|
||||
Package::ChecksumTypes::MD5,
|
||||
file_path
|
||||
);
|
||||
|
||||
if (file_checksum.ok()) return file_checksum.unpack();
|
||||
|
||||
string checksum = "unknown version";
|
||||
try {
|
||||
checksum = to_string(boost::uuids::random_generator()());
|
||||
} catch (const boost::uuids::entropy_error &e) {
|
||||
dbgDebug(D_ORCHESTRATOR) << "Couldn't generate random checksum";
|
||||
}
|
||||
return checksum;
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
ServiceController::Impl::updateServiceConfiguration(
|
||||
const string &new_policy_path,
|
||||
@@ -734,7 +715,7 @@ ServiceController::Impl::updateServiceConfiguration(
|
||||
if (!child_tenant_id.empty()) {
|
||||
tenant_and_profile_ids = " Child tenant id: " + child_tenant_id + ", Child profile id: " + child_profile_id;
|
||||
}
|
||||
dbgFlow(D_ORCHESTRATOR)
|
||||
dbgFlow(D_SERVICE_CONTROLLER)
|
||||
<< "new_policy_path: "
|
||||
<< new_policy_path
|
||||
<< ", new_settings_path: "
|
||||
@@ -758,9 +739,9 @@ ServiceController::Impl::updateServiceConfiguration(
|
||||
}
|
||||
|
||||
for (const string &data : new_data_files) {
|
||||
dbgTrace(D_ORCHESTRATOR) << "data: " << data;
|
||||
dbgTrace(D_SERVICE_CONTROLLER) << "data: " << data;
|
||||
if (service.second.isConfigurationRelevant(data)) {
|
||||
dbgTrace(D_ORCHESTRATOR)
|
||||
dbgTrace(D_SERVICE_CONTROLLER)
|
||||
<< "data has relevant configuration, will update the service: "
|
||||
<< service.first;
|
||||
nano_services_to_update.insert(service.first);
|
||||
@@ -770,7 +751,8 @@ ServiceController::Impl::updateServiceConfiguration(
|
||||
}
|
||||
|
||||
if (new_policy_path == "") {
|
||||
dbgDebug(D_ORCHESTRATOR) << "Policy file was not updated. Sending reload command regarding settings and data";
|
||||
dbgDebug(D_SERVICE_CONTROLLER)
|
||||
<< "Policy file was not updated. Sending reload command regarding settings and data";
|
||||
auto signal_services = sendSignalForServices(nano_services_to_update, "");
|
||||
if (!signal_services.ok()) return signal_services.passErr();
|
||||
Singleton::Consume<I_DeclarativePolicy>::from<DeclarativePolicyUtils>()->turnOffApplyPolicyFlag();
|
||||
@@ -779,7 +761,7 @@ ServiceController::Impl::updateServiceConfiguration(
|
||||
|
||||
Maybe<string> loaded_policy_json = orchestration_tools->readFile(new_policy_path);
|
||||
if (!loaded_policy_json.ok()) {
|
||||
dbgWarning(D_ORCHESTRATOR)
|
||||
dbgWarning(D_SERVICE_CONTROLLER)
|
||||
<< "Failed to load new file: "
|
||||
<< new_policy_path
|
||||
<< ". Error: "
|
||||
@@ -795,7 +777,7 @@ ServiceController::Impl::updateServiceConfiguration(
|
||||
);
|
||||
|
||||
if (!all_security_policies.ok()) {
|
||||
dbgWarning(D_ORCHESTRATOR)
|
||||
dbgWarning(D_SERVICE_CONTROLLER)
|
||||
<< "Failed to parse json file: "
|
||||
<< new_policy_path
|
||||
<< ". Error: "
|
||||
@@ -825,13 +807,13 @@ ServiceController::Impl::updateServiceConfiguration(
|
||||
if (child_tenant_id.empty() && single_policy.first == versions_param) {
|
||||
//In a multi-tenant env, only the parent should handle the versions parameter
|
||||
policy_versions = single_policy.second;
|
||||
dbgWarning(D_ORCHESTRATOR) << "Found versions parameter in policy file:" << policy_versions;
|
||||
dbgTrace(D_SERVICE_CONTROLLER) << "Found versions parameter in policy file:" << policy_versions;
|
||||
}
|
||||
|
||||
dbgDebug(D_ORCHESTRATOR) << "Starting to update policy file. Policy type: " << single_policy.first;
|
||||
dbgDebug(D_SERVICE_CONTROLLER) << "Starting to update policy file. Policy type: " << single_policy.first;
|
||||
|
||||
if (!createDirectoryForChildTenant(child_tenant_id, child_profile_id)) {
|
||||
dbgWarning(D_ORCHESTRATOR)
|
||||
dbgWarning(D_SERVICE_CONTROLLER)
|
||||
<< "Failed to create directory for child. Tenant id: " << child_tenant_id
|
||||
<< ", Profile id: " << child_profile_id;
|
||||
return genError("Failed to create directory for child tenant");
|
||||
@@ -861,7 +843,7 @@ ServiceController::Impl::updateServiceConfiguration(
|
||||
}
|
||||
changed_policy_files.insert(policy_file_path);
|
||||
|
||||
dbgInfo(D_ORCHESTRATOR) << "Successfully updated policy file. Policy name: " << single_policy.first;
|
||||
dbgInfo(D_SERVICE_CONTROLLER) << "Successfully updated policy file. Policy name: " << single_policy.first;
|
||||
|
||||
auto orc_status = Singleton::Consume<I_OrchestrationStatus>::by<ServiceController>();
|
||||
orc_status->setServiceConfiguration(
|
||||
@@ -878,7 +860,9 @@ ServiceController::Impl::updateServiceConfiguration(
|
||||
for (const auto &instance_id: instances) {
|
||||
auto relevant_service = registered_services.find(instance_id);
|
||||
if (relevant_service == registered_services.end()) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Could not find registered service. Service Id: " << instance_id;
|
||||
dbgWarning(D_SERVICE_CONTROLLER)
|
||||
<< "Could not find registered service. Service Id: "
|
||||
<< instance_id;
|
||||
continue;
|
||||
}
|
||||
if (relevant_service->second.isConfigurationRelevant(single_policy.first)) {
|
||||
@@ -897,12 +881,12 @@ ServiceController::Impl::updateServiceConfiguration(
|
||||
// In a multi-tenant env, we send the signal to the services only on the last iteration
|
||||
if (!is_multi_tenant_env || last_iteration) {
|
||||
auto is_send_signal_for_services =
|
||||
sendSignalForServices(nano_services_to_update, version_value + ',' + getChecksum(new_policy_path));
|
||||
sendSignalForServices(nano_services_to_update, version_value + ',' + policy_versions);
|
||||
was_policy_updated &= is_send_signal_for_services.ok();
|
||||
if (!is_send_signal_for_services.ok()) send_signal_for_services_err = is_send_signal_for_services.getErr();
|
||||
}
|
||||
|
||||
dbgTrace(D_ORCHESTRATOR) << "was policy updated: " << (was_policy_updated ? "true" : "false");
|
||||
dbgTrace(D_SERVICE_CONTROLLER) << "was policy updated: " << (was_policy_updated ? "true" : "false");
|
||||
|
||||
if (was_policy_updated) {
|
||||
string base_path =
|
||||
@@ -916,14 +900,14 @@ ServiceController::Impl::updateServiceConfiguration(
|
||||
);
|
||||
|
||||
if (new_policy_path.compare(config_file_path) == 0) {
|
||||
dbgDebug(D_ORCHESTRATOR) << "Enforcing the default policy file";
|
||||
dbgDebug(D_SERVICE_CONTROLLER) << "Enforcing the default policy file";
|
||||
policy_version = version_value;
|
||||
Singleton::Consume<I_DeclarativePolicy>::from<DeclarativePolicyUtils>()->turnOffApplyPolicyFlag();
|
||||
return Maybe<void>();
|
||||
}
|
||||
|
||||
if (!backupConfigurationFile(config_file_path)) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to backup the policy file.";
|
||||
dbgWarning(D_SERVICE_CONTROLLER) << "Failed to backup the policy file.";
|
||||
return genError("Failed to backup the policy file.");
|
||||
}
|
||||
|
||||
@@ -931,7 +915,7 @@ ServiceController::Impl::updateServiceConfiguration(
|
||||
|
||||
// Save the new configuration file.
|
||||
if (!orchestration_tools->copyFile(new_policy_path, config_file_path)) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to save the policy file.";
|
||||
dbgWarning(D_SERVICE_CONTROLLER) << "Failed to save the policy file.";
|
||||
return genError("Failed to save the policy file.");
|
||||
}
|
||||
}
|
||||
@@ -946,11 +930,11 @@ ServiceController::Impl::sendSignalForServices(
|
||||
const set<string> &nano_services_to_update,
|
||||
const string &policy_version_to_update)
|
||||
{
|
||||
dbgFlow(D_ORCHESTRATOR);
|
||||
dbgFlow(D_SERVICE_CONTROLLER) << "Policy version to update: " << policy_version_to_update;
|
||||
for (auto &service_id : nano_services_to_update) {
|
||||
auto nano_service = registered_services.find(service_id);
|
||||
if (nano_service == registered_services.end()) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Could not find registered service. Service Id: " << service_id;
|
||||
dbgWarning(D_SERVICE_CONTROLLER) << "Could not find registered service. Service Id: " << service_id;
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -958,13 +942,13 @@ ServiceController::Impl::sendSignalForServices(
|
||||
auto reconf_status = nano_service->second.sendNewConfigurations(configuration_id, policy_version_to_update);
|
||||
|
||||
if (reconf_status == ReconfStatus::INACTIVE) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Erasing details regarding inactive service " << service_id;
|
||||
dbgWarning(D_SERVICE_CONTROLLER) << "Erasing details regarding inactive service " << service_id;
|
||||
registered_services.erase(service_id);
|
||||
writeRegisteredServicesToFile();
|
||||
}
|
||||
|
||||
if (reconf_status == ReconfStatus::FAILED) {
|
||||
dbgDebug(D_ORCHESTRATOR) << "The reconfiguration failed for serivce: " << service_id;
|
||||
dbgDebug(D_SERVICE_CONTROLLER) << "The reconfiguration failed for serivce: " << service_id;
|
||||
services_reconf_status.clear();
|
||||
services_reconf_names.clear();
|
||||
return genError("The reconfiguration failed for serivce: " + service_id);
|
||||
@@ -985,13 +969,14 @@ ServiceController::Impl::sendSignalForServices(
|
||||
while(timer->getMonotonicTime() < current_timeout) {
|
||||
switch (getUpdatedReconfStatus()) {
|
||||
case ReconfStatus::SUCCEEDED: {
|
||||
dbgDebug(D_ORCHESTRATOR) << "The reconfiguration was successfully completed for all the services";
|
||||
dbgDebug(D_SERVICE_CONTROLLER)
|
||||
<< "The reconfiguration was successfully completed for all the services";
|
||||
services_reconf_status.clear();
|
||||
services_reconf_names.clear();
|
||||
return Maybe<void>();
|
||||
}
|
||||
case ReconfStatus::IN_PROGRESS: {
|
||||
dbgTrace(D_ORCHESTRATOR) << "Reconfiguration in progress...";
|
||||
dbgTrace(D_SERVICE_CONTROLLER) << "Reconfiguration in progress...";
|
||||
Singleton::Consume<I_MainLoop>::by<ServiceController>()->yield(chrono::seconds(2));
|
||||
break;
|
||||
}
|
||||
@@ -1000,7 +985,7 @@ ServiceController::Impl::sendSignalForServices(
|
||||
for(auto &status : services_reconf_status) {
|
||||
if (status.second == ReconfStatus::FAILED) {
|
||||
failed_services_vec.push_back(services_reconf_names[status.first]);
|
||||
dbgDebug(D_ORCHESTRATOR)
|
||||
dbgDebug(D_SERVICE_CONTROLLER)
|
||||
<< "The reconfiguration failed for serivce "
|
||||
<< services_reconf_names[status.first];
|
||||
}
|
||||
@@ -1013,7 +998,7 @@ ServiceController::Impl::sendSignalForServices(
|
||||
return genError("The reconfiguration failed for serivces: " + failed_services);
|
||||
}
|
||||
case ReconfStatus::INACTIVE: {
|
||||
dbgError(D_ORCHESTRATOR) << "Reached inactive state in the middle of reconfiguration!";
|
||||
dbgError(D_SERVICE_CONTROLLER) << "Reached inactive state in the middle of reconfiguration!";
|
||||
services_reconf_status.clear();
|
||||
services_reconf_names.clear();
|
||||
return genError("Reached inactive state in the middle of reconfiguration!");
|
||||
@@ -1021,7 +1006,7 @@ ServiceController::Impl::sendSignalForServices(
|
||||
}
|
||||
}
|
||||
|
||||
dbgDebug(D_ORCHESTRATOR) << "The reconfiguration has reached a timeout";
|
||||
dbgDebug(D_SERVICE_CONTROLLER) << "The reconfiguration has reached a timeout";
|
||||
services_reconf_status.clear();
|
||||
services_reconf_names.clear();
|
||||
return genError("The reconfiguration has reached a timeout");
|
||||
@@ -1033,17 +1018,17 @@ ServiceController::Impl::updateServiceConfigurationFile(
|
||||
const string &configuration_file_path,
|
||||
const string &new_configuration)
|
||||
{
|
||||
dbgFlow(D_ORCHESTRATOR) << "Updating configuration. Config Name: " << configuration_name;
|
||||
dbgFlow(D_SERVICE_CONTROLLER) << "Updating configuration. Config Name: " << configuration_name;
|
||||
|
||||
if (orchestration_tools->doesFileExist(configuration_file_path)) {
|
||||
Maybe<string> old_configuration = orchestration_tools->readFile(configuration_file_path);
|
||||
if (old_configuration.ok()) {
|
||||
bool service_changed = old_configuration.unpack().compare(new_configuration) != 0;
|
||||
if (service_changed == false) {
|
||||
dbgDebug(D_ORCHESTRATOR) << "There is no update for policy file: " << configuration_file_path;
|
||||
dbgDebug(D_SERVICE_CONTROLLER) << "There is no update for policy file: " << configuration_file_path;
|
||||
return Maybe<void>();
|
||||
}
|
||||
dbgDebug(D_ORCHESTRATOR)
|
||||
dbgDebug(D_SERVICE_CONTROLLER)
|
||||
<< "Starting to update " << configuration_file_path << " to " << new_configuration;
|
||||
string old_configuration_backup_path = configuration_file_path + getConfigurationWithDefault<string>(
|
||||
".bk",
|
||||
@@ -1051,13 +1036,15 @@ ServiceController::Impl::updateServiceConfigurationFile(
|
||||
"Backup file extension"
|
||||
);
|
||||
if (orchestration_tools->copyFile(configuration_file_path, old_configuration_backup_path)) {
|
||||
dbgDebug(D_ORCHESTRATOR) << "Backup of policy file has been created in: " << configuration_file_path;
|
||||
dbgDebug(D_SERVICE_CONTROLLER)
|
||||
<< "Backup of policy file has been created in: "
|
||||
<< configuration_file_path;
|
||||
} else {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to backup policy file";
|
||||
dbgWarning(D_SERVICE_CONTROLLER) << "Failed to backup policy file";
|
||||
return genError("Failed to backup policy file");
|
||||
}
|
||||
} else {
|
||||
dbgWarning(D_ORCHESTRATOR)
|
||||
dbgWarning(D_SERVICE_CONTROLLER)
|
||||
<< "Failed to read current policy file "
|
||||
<< configuration_file_path
|
||||
<< ". Error: "
|
||||
@@ -1073,13 +1060,13 @@ ServiceController::Impl::updateServiceConfigurationFile(
|
||||
}
|
||||
|
||||
if (orchestration_tools->writeFile(new_configuration, configuration_file_path)) {
|
||||
dbgDebug(D_ORCHESTRATOR) << "New policy file has been saved in: " << configuration_file_path;
|
||||
dbgDebug(D_SERVICE_CONTROLLER) << "New policy file has been saved in: " << configuration_file_path;
|
||||
} else {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Failed to save new policy file";
|
||||
dbgWarning(D_SERVICE_CONTROLLER) << "Failed to save new policy file";
|
||||
return genError("Failed to save new policy file");
|
||||
}
|
||||
|
||||
dbgInfo(D_ORCHESTRATOR) << "Successfully updated policy file: " << configuration_file_path;
|
||||
dbgInfo(D_SERVICE_CONTROLLER) << "Successfully updated policy file: " << configuration_file_path;
|
||||
|
||||
return Maybe<void>();
|
||||
}
|
||||
@@ -1120,14 +1107,14 @@ ServiceController::Impl::updateReconfStatus(int id, const string &service_name,
|
||||
}
|
||||
|
||||
if (services_reconf_status.find(id) == services_reconf_status.end()) {
|
||||
dbgError(D_ORCHESTRATOR)
|
||||
dbgError(D_SERVICE_CONTROLLER)
|
||||
<< "Unable to find a mapping for reconfiguration ID:"
|
||||
<< id
|
||||
<< ". Service name: "
|
||||
<< service_name;
|
||||
return;
|
||||
}
|
||||
dbgTrace(D_ORCHESTRATOR)
|
||||
dbgTrace(D_SERVICE_CONTROLLER)
|
||||
<< "Updating reconf status for reconfiguration ID "
|
||||
<< id
|
||||
<< ", Service name: "
|
||||
@@ -1144,7 +1131,7 @@ ServiceController::Impl::startReconfStatus(
|
||||
const string &service_name,
|
||||
const string &service_id)
|
||||
{
|
||||
dbgTrace(D_ORCHESTRATOR)
|
||||
dbgTrace(D_SERVICE_CONTROLLER)
|
||||
<< "Starting reconf status. Configuration ID: "
|
||||
<< id
|
||||
<< ", service name: "
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -100,11 +100,19 @@ DeclarativePolicyUtils::updateCurrentPolicy(const string &policy_checksum)
|
||||
{
|
||||
string clean_policy_checksum = getCleanChecksum(policy_checksum);
|
||||
auto env = Singleton::Consume<I_EnvDetails>::by<DeclarativePolicyUtils>()->getEnvType();
|
||||
curr_policy = Singleton::Consume<I_LocalPolicyMgmtGen>::by<DeclarativePolicyUtils>()->generateAppSecLocalPolicy(
|
||||
env,
|
||||
clean_policy_checksum,
|
||||
local_policy_path
|
||||
);
|
||||
string maybe_policy =
|
||||
Singleton::Consume<I_LocalPolicyMgmtGen>::by<DeclarativePolicyUtils>()->generateAppSecLocalPolicy(
|
||||
env,
|
||||
clean_policy_checksum,
|
||||
local_policy_path
|
||||
);
|
||||
|
||||
if (maybe_policy.empty()) {
|
||||
dbgWarning(D_ORCHESTRATOR) << "Could not generate appsec local policy";
|
||||
return;
|
||||
}
|
||||
|
||||
curr_policy = maybe_policy;
|
||||
}
|
||||
|
||||
string
|
||||
|
||||
@@ -68,6 +68,12 @@ FogAuthenticator::RegistrationData::RegistrationData(const string &token)
|
||||
{
|
||||
}
|
||||
|
||||
string
|
||||
FogAuthenticator::RegistrationData::getData() const
|
||||
{
|
||||
return data;
|
||||
}
|
||||
|
||||
FogAuthenticator::UserCredentials::UserCredentials(const string &_client_id, const string &_shared_secret)
|
||||
:
|
||||
client_id(_client_id),
|
||||
@@ -193,6 +199,10 @@ FogAuthenticator::registerAgent(
|
||||
request << make_pair("reverse_proxy", "true");
|
||||
}
|
||||
|
||||
if (details_resolver->isCloudStorageEnabled()) {
|
||||
request << make_pair("cloud_storage_service", "true");
|
||||
}
|
||||
|
||||
if (details_resolver->isKernelVersion3OrHigher()) {
|
||||
request << make_pair("isKernelVersion3OrHigher", "true");
|
||||
}
|
||||
@@ -212,6 +222,10 @@ FogAuthenticator::registerAgent(
|
||||
if (details_resolver->compareCheckpointVersion(8200, std::greater_equal<int>())) {
|
||||
request << make_pair("isCheckpointVersionGER82", "true");
|
||||
}
|
||||
auto maybe_vs_id = Singleton::Consume<I_Environment>::by<FogAuthenticator>()->get<string>("VS ID");
|
||||
if (maybe_vs_id.ok()) {
|
||||
request << make_pair("virtualSystemId", maybe_vs_id.unpack());
|
||||
}
|
||||
#endif // gaia || smb
|
||||
|
||||
dbgDebug(D_ORCHESTRATOR) << "Sending registration request to fog";
|
||||
@@ -297,36 +311,88 @@ FogAuthenticator::getRegistrationData()
|
||||
return reg_data;
|
||||
}
|
||||
|
||||
const char *env_otp = getenv("NANO_AGENT_TOKEN");
|
||||
if (env_otp) {
|
||||
dbgInfo(D_ORCHESTRATOR) << "Loading registration token from environment";
|
||||
return RegistrationData(env_otp);
|
||||
}
|
||||
if (reg_data.ok()) {
|
||||
dbgInfo(D_ORCHESTRATOR) << "Loading registration token from cache";
|
||||
return reg_data;
|
||||
}
|
||||
|
||||
auto local_env_token = getRegistrationToken();
|
||||
if (local_env_token.ok()) return local_env_token;
|
||||
|
||||
return genError("Failed to load registration token from the environment.");
|
||||
}
|
||||
|
||||
Maybe<FogAuthenticator::RegistrationData>
|
||||
FogAuthenticator::getRegistrationToken()
|
||||
{
|
||||
auto reg_data_path = getConfigurationWithDefault<string>(
|
||||
filesystem_prefix + "/conf/registration-data.json",
|
||||
"orchestration",
|
||||
"Registration data Path"
|
||||
);
|
||||
dbgTrace(D_ORCHESTRATOR) << "Getting registration token from " << reg_data_path;
|
||||
|
||||
dbgDebug(D_ORCHESTRATOR) << "Loading registration data from " << reg_data_path;
|
||||
auto orchestration_tools = Singleton::Consume<I_OrchestrationTools>::by<FogAuthenticator>();
|
||||
auto raw_reg_data = orchestration_tools->readFile(reg_data_path);
|
||||
if (!raw_reg_data.ok()) return genError(raw_reg_data.getErr());
|
||||
if (raw_reg_data.ok()) {
|
||||
auto decoded_reg_data = orchestration_tools->base64Decode(raw_reg_data.unpack());
|
||||
reg_data = orchestration_tools->jsonStringToObject<RegistrationData>(decoded_reg_data);
|
||||
|
||||
dbgTrace(D_ORCHESTRATOR) << "Successfully loaded the registration data";
|
||||
auto decoded_reg_data = orchestration_tools->base64Decode(raw_reg_data.unpack());
|
||||
reg_data = orchestration_tools->jsonStringToObject<RegistrationData>(decoded_reg_data);
|
||||
|
||||
if (reg_data.ok()) {
|
||||
dbgTrace(D_ORCHESTRATOR) << "Registration token has been converted to an object";
|
||||
if (reg_data.ok()) {
|
||||
dbgInfo(D_ORCHESTRATOR) << "Registration token has been loaded from: " << reg_data_path;
|
||||
return reg_data;
|
||||
}
|
||||
}
|
||||
|
||||
return reg_data;
|
||||
dbgTrace(D_ORCHESTRATOR) << "Getting registration token from container environment.";
|
||||
const char *container_otp = getenv("AGENT_TOKEN");
|
||||
if (container_otp) {
|
||||
dbgInfo(D_ORCHESTRATOR) << "Registration token has been loaded from container environment";
|
||||
return RegistrationData(container_otp);
|
||||
}
|
||||
|
||||
dbgTrace(D_ORCHESTRATOR) << "Getting registration token from the environment.";
|
||||
const char *env_otp = getenv("NANO_AGENT_TOKEN");
|
||||
if (env_otp) {
|
||||
dbgInfo(D_ORCHESTRATOR) << "Registration token has been loaded from the environment";
|
||||
return RegistrationData(env_otp);
|
||||
}
|
||||
|
||||
return genError("No registration token in the environment");
|
||||
}
|
||||
|
||||
void
|
||||
FogAuthenticator::registerLocalAgentToFog()
|
||||
{
|
||||
auto local_reg_token = getRegistrationToken();
|
||||
if (!local_reg_token.ok()) return;
|
||||
dbgInfo(D_ORCHESTRATOR) << "Start local agent registration to the fog";
|
||||
|
||||
string exec_command = "open-appsec-ctl --set-mode --online_mode --token " + local_reg_token.unpack().getData();
|
||||
|
||||
auto i_agent_details = Singleton::Consume<I_AgentDetails>::by<FogAuthenticator>();
|
||||
auto fog_address = i_agent_details->getFogDomain();
|
||||
if (fog_address.ok()) exec_command = exec_command + " --fog https://" + fog_address.unpack();
|
||||
|
||||
auto shell_cmd = Singleton::Consume<I_ShellCmd>::by<FogAuthenticator>();
|
||||
auto maybe_cmd_output = shell_cmd->getExecOutputAndCode(
|
||||
exec_command,
|
||||
300000,
|
||||
true
|
||||
);
|
||||
if (!maybe_cmd_output.ok()){
|
||||
dbgWarning(D_ORCHESTRATOR)
|
||||
<< "Failed in local agent registertion to the fog. Error: "
|
||||
<< maybe_cmd_output.getErr();
|
||||
return;
|
||||
}
|
||||
|
||||
if (maybe_cmd_output.unpack().second != 0) {
|
||||
dbgWarning(D_ORCHESTRATOR)
|
||||
<< "Failed in local agent registertion to the fog. Error: "
|
||||
<< maybe_cmd_output.unpack().first;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
@@ -378,6 +444,22 @@ FogAuthenticator::getCredentialsFromFile() const
|
||||
return orchestration_tools->jsonStringToObject<UserCredentials>(encrypted_cred.unpack());
|
||||
}
|
||||
|
||||
static string
|
||||
getDeplymentType()
|
||||
{
|
||||
auto deplyment_type = Singleton::Consume<I_EnvDetails>::by<FogAuthenticator>()->getEnvType();
|
||||
switch (deplyment_type) {
|
||||
case EnvType::LINUX: return "Embedded";
|
||||
case EnvType::DOCKER: return "Embedded";
|
||||
case EnvType::NON_CRD_K8S:
|
||||
case EnvType::K8S: return "Embedded";
|
||||
case EnvType::COUNT: break;
|
||||
}
|
||||
|
||||
dbgAssert(false) << "Failed to get a legitimate deplyment type: " << static_cast<uint>(deplyment_type);
|
||||
return "Embedded";
|
||||
}
|
||||
|
||||
Maybe<FogAuthenticator::UserCredentials>
|
||||
FogAuthenticator::getCredentials()
|
||||
{
|
||||
@@ -386,7 +468,7 @@ FogAuthenticator::getCredentials()
|
||||
return maybe_credentials;
|
||||
}
|
||||
|
||||
dbgTrace(D_ORCHESTRATOR) << "Credentials were not not receoived from the file. Getting registration data.";
|
||||
dbgTrace(D_ORCHESTRATOR) << "Credentials were not not received from the file. Getting registration data.";
|
||||
auto reg_data = getRegistrationData();
|
||||
if (!reg_data.ok()) {
|
||||
return genError("Failed to load a valid registration token, Error: " + reg_data.getErr());
|
||||
@@ -396,17 +478,24 @@ FogAuthenticator::getCredentials()
|
||||
Maybe<string> name = details_resolver->getHostname();
|
||||
if (!name.ok()) return name.passErr();
|
||||
|
||||
auto maybe_vs_id = Singleton::Consume<I_Environment>::by<FogAuthenticator>()->get<string>("VS ID");
|
||||
string host_name = *name;
|
||||
if (maybe_vs_id.ok()) {
|
||||
host_name.append(":");
|
||||
host_name.append(maybe_vs_id.unpack());
|
||||
}
|
||||
|
||||
Maybe<string> platform = details_resolver->getPlatform();
|
||||
if (!platform.ok()) return platform.passErr();
|
||||
|
||||
Maybe<string> arch = details_resolver->getArch();
|
||||
if (!arch.ok()) return arch.passErr();
|
||||
|
||||
string type = getConfigurationWithDefault<string>("Embedded", "orchestration", "Agent type");
|
||||
maybe_credentials = registerAgent(reg_data.unpack(), *name, type, *platform, *arch);
|
||||
string type = getSettingWithDefault(getDeplymentType(), "orchestration", "Agent type");
|
||||
maybe_credentials = registerAgent(reg_data.unpack(), host_name, type, *platform, *arch);
|
||||
|
||||
auto orc_status = Singleton::Consume<I_OrchestrationStatus>::by<FogAuthenticator>();
|
||||
orc_status->setRegistrationDetails(*name, type, *platform, *arch);
|
||||
orc_status->setRegistrationDetails(host_name, type, *platform, *arch);
|
||||
|
||||
if (!maybe_credentials.ok()) return maybe_credentials;
|
||||
|
||||
|
||||
@@ -96,8 +96,6 @@ FogCommunication::downloadAttributeFile(const GetResourceFile &resourse_file, co
|
||||
{
|
||||
if (!access_token.ok()) return genError("Acccess Token not available.");
|
||||
|
||||
auto unpacked_access_token = access_token.unpack().getToken();
|
||||
|
||||
string policy_mgmt_mode = getSettingWithDefault<string>("management", "profileManagedMode");
|
||||
if (policy_mgmt_mode == "declarative" && resourse_file.getFileName() =="policy") {
|
||||
dbgDebug(D_ORCHESTRATOR) << "Download policy on declarative mode - returning the local policy";
|
||||
|
||||
@@ -104,8 +104,6 @@ HybridCommunication::downloadAttributeFile(const GetResourceFile &resourse_file,
|
||||
if (resourse_file.getFileName() == "manifest") {
|
||||
if (!access_token.ok()) return genError("Acccess Token not available.");
|
||||
|
||||
auto unpacked_access_token = access_token.unpack().getToken();
|
||||
|
||||
auto attribute_file = Singleton::Consume<I_Messaging>::by<HybridCommunication>()->downloadFile(
|
||||
resourse_file.getRequestMethod(),
|
||||
agent_resource_api + '/' + resourse_file.getFileName(),
|
||||
|
||||
@@ -40,6 +40,12 @@ LocalCommunication::authenticateAgent()
|
||||
return Maybe<void>();
|
||||
}
|
||||
|
||||
void
|
||||
LocalCommunication::registerLocalAgentToFog()
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
string
|
||||
LocalCommunication::getChecksum(const string &file_path)
|
||||
{
|
||||
|
||||
@@ -69,6 +69,11 @@ public:
|
||||
return i_update_comm_impl->authenticateAgent();
|
||||
}
|
||||
|
||||
void registerLocalAgentToFog()
|
||||
{
|
||||
i_update_comm_impl->registerLocalAgentToFog();
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
getUpdate(CheckUpdateRequest &request) override
|
||||
{
|
||||
|
||||
@@ -38,6 +38,12 @@ public:
|
||||
return local_communication.authenticateAgent();
|
||||
}
|
||||
|
||||
void
|
||||
registerLocalAgentToFog()
|
||||
{
|
||||
local_communication.registerLocalAgentToFog();
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
sendPolicyVersion(const string &version, const string &policy_versions)
|
||||
{
|
||||
@@ -122,6 +128,11 @@ TEST_F(LocalCommunicationTest, authenticateAgent)
|
||||
EXPECT_TRUE(authenticat_res.ok());
|
||||
}
|
||||
|
||||
TEST_F(LocalCommunicationTest, registerLocalAgentToFog)
|
||||
{
|
||||
registerLocalAgentToFog();
|
||||
}
|
||||
|
||||
TEST_F(LocalCommunicationTest, downloadManifest)
|
||||
{
|
||||
string new_manifest_string = "new manifest";
|
||||
|
||||
@@ -23,6 +23,7 @@ USE_DEBUG_FLAG(D_WAAP_PARSER_JSON);
|
||||
USE_DEBUG_FLAG(D_OA_SCHEMA_UPDATER);
|
||||
|
||||
const std::string ParserJson::m_parserName = "jsonParser";
|
||||
static const std::string OPERATION_NAME_GRAPHQL = "operationName";
|
||||
|
||||
int
|
||||
ParserJson::cb_null()
|
||||
|
||||
@@ -44,6 +44,7 @@ WaapTelemetryBase::sendLog(const LogRest &metric_client_rest) const
|
||||
"X-Tenant-Id",
|
||||
Singleton::Consume<I_AgentDetails>::by<GenericMetric>()->getTenantId()
|
||||
);
|
||||
req_md.setConnectioFlag(MessageConnectionConfig::UNSECURE_CONN);
|
||||
Singleton::Consume<I_Messaging>::by<GenericMetric>()->sendSyncMessageWithoutResponse(
|
||||
HTTPMethod::POST,
|
||||
fog_metric_uri,
|
||||
|
||||
@@ -59,6 +59,7 @@ private:
|
||||
if (agentDetails->getOrchestrationMode() != OrchestrationMode::ONLINE) {
|
||||
MessageMetadata req_md(getSharedStorageHost(), 80);
|
||||
req_md.insertHeader("X-Tenant-Id", agentDetails->getTenantId());
|
||||
req_md.setConnectioFlag(MessageConnectionConfig::UNSECURE_CONN);
|
||||
auto req_status = messaging->sendSyncMessage(
|
||||
method,
|
||||
uri,
|
||||
|
||||
@@ -136,8 +136,10 @@ private:
|
||||
string service_underscore_name = service_name;
|
||||
replace(service_underscore_name.begin(), service_underscore_name.end(), ' ', '_');
|
||||
|
||||
string logFilesPath = getLogFilesPathConfig();
|
||||
|
||||
trace_file_path = getConfigurationWithDefault<string>(
|
||||
"/var/log/nano_agent/trace_export_files/" + service_underscore_name + "_trace_file.dbg",
|
||||
logFilesPath + "/nano_agent/trace_export_files/" + service_underscore_name + "_trace_file.dbg",
|
||||
"SignalHandler",
|
||||
"outputFilePath"
|
||||
);
|
||||
|
||||
@@ -17,11 +17,11 @@
|
||||
#include <dirent.h>
|
||||
#include <algorithm>
|
||||
#include <fstream>
|
||||
#include <boost/regex.hpp>
|
||||
#include <iostream>
|
||||
#include <cctype>
|
||||
|
||||
#include "agent_core_utilities.h"
|
||||
|
||||
#include "cereal/archives/json.hpp"
|
||||
|
||||
#include "debug.h"
|
||||
#include "cereal/external/rapidjson/error/en.h"
|
||||
#include "include/profile_settings.h"
|
||||
@@ -158,6 +158,7 @@ private:
|
||||
vector<string> fillMultiTenantConfigFiles(const map<string, set<string>> &tenants);
|
||||
vector<string> fillMultiTenantExpectedConfigFiles(const map<string, set<string>> &tenants);
|
||||
map<string, string> getProfileAgentSetting() const;
|
||||
void resolveVsId() const;
|
||||
|
||||
string
|
||||
getActiveTenant() const
|
||||
@@ -339,6 +340,9 @@ void
|
||||
ConfigComponent::Impl::init()
|
||||
{
|
||||
reloadFileSystemPaths();
|
||||
|
||||
resolveVsId();
|
||||
|
||||
tenant_manager = Singleton::Consume<I_TenantManager>::by<ConfigComponent>();
|
||||
|
||||
if (!Singleton::exists<I_MainLoop>()) return;
|
||||
@@ -354,8 +358,7 @@ ConfigComponent::Impl::init()
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
bool
|
||||
static bool
|
||||
checkContext(const shared_ptr<EnvironmentEvaluator<bool>> &ctx)
|
||||
{
|
||||
if (ctx == nullptr) return true;
|
||||
@@ -950,6 +953,26 @@ ConfigComponent::Impl::reloadConfigurationContinuesWrapper(const string &version
|
||||
is_continuous_report = false;
|
||||
}
|
||||
|
||||
void
|
||||
ConfigComponent::Impl::resolveVsId() const
|
||||
{
|
||||
const string &path = getConfigurationFlag("filesystem_path");
|
||||
|
||||
size_t vs_pos = path.rfind("/vs");
|
||||
if (vs_pos == string::npos) return;
|
||||
|
||||
string vs_id = path.substr(vs_pos + 3);
|
||||
|
||||
if (!vs_id.empty() && all_of(vs_id.begin(), vs_id.end(), ::isdigit) && vs_id.size() < 6) {
|
||||
dbgDebug(D_CONFIG) << "Identified VSX installation, VS ID: " << vs_id;
|
||||
Singleton::Consume<I_Environment>::by<ConfigComponent>()->registerValue("VS ID", vs_id);
|
||||
return;
|
||||
}
|
||||
|
||||
dbgWarning(D_CONFIG) << "Possible VSX installation but VS ID is invalid, VS ID: " << vs_id;
|
||||
return;
|
||||
}
|
||||
|
||||
ConfigComponent::ConfigComponent() : Component("ConfigComponent"), pimpl(make_unique<Impl>()) {}
|
||||
ConfigComponent::~ConfigComponent() {}
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ enum AttachmentType
|
||||
NGINX_ATT_ID,
|
||||
PRELOAD_ATT_ID,
|
||||
SQUID_ATT_ID,
|
||||
ENVOY_ATT_ID,
|
||||
#ifdef __cplusplus
|
||||
COUNT
|
||||
#endif
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
#include <string>
|
||||
#include <stdbool.h>
|
||||
|
||||
enum class EnvType { LINUX, K8S, COUNT };
|
||||
enum class EnvType { LINUX, DOCKER, K8S, NON_CRD_K8S, COUNT };
|
||||
|
||||
class I_EnvDetails
|
||||
{
|
||||
|
||||
@@ -21,6 +21,8 @@
|
||||
#include "intelligence_is_v2/intelligence_response.h"
|
||||
#include "intelligence_is_v2/intelligence_types_v2.h"
|
||||
#include "intelligence_is_v2/query_request_v2.h"
|
||||
#include "messaging/messaging_enums.h"
|
||||
#include "messaging/messaging_metadata.h"
|
||||
#include "maybe_res.h"
|
||||
|
||||
namespace Intelligence {
|
||||
@@ -43,18 +45,29 @@ public:
|
||||
getResponse(
|
||||
const std::vector<QueryRequest> &query_requests,
|
||||
bool is_pretty,
|
||||
bool is_bulk
|
||||
bool is_bulk,
|
||||
const MessageMetadata &req_md
|
||||
) const = 0;
|
||||
|
||||
virtual Maybe<Intelligence::Response> getResponse(const QueryRequest &query_request, bool is_pretty) const = 0;
|
||||
virtual Maybe<Intelligence::Response>
|
||||
getResponse(const QueryRequest &query_request, bool is_pretty, const MessageMetadata &req_md) const = 0;
|
||||
|
||||
template<typename Data>
|
||||
Maybe<std::vector<AssetReply<Data>>>
|
||||
queryIntelligence(QueryRequest &query_request, bool ignore_in_progress = false, bool is_pretty = true);
|
||||
queryIntelligence(
|
||||
QueryRequest &query_request,
|
||||
bool ignore_in_progress = false,
|
||||
bool is_pretty = true,
|
||||
MessageMetadata req_md = MessageMetadata("", 0)
|
||||
);
|
||||
|
||||
template<typename Data>
|
||||
Maybe<std::vector<Maybe<std::vector<AssetReply<Data>>>>>
|
||||
queryIntelligence(std::vector<QueryRequest> &query_requests, bool is_pretty = true);
|
||||
queryIntelligence(
|
||||
std::vector<QueryRequest> &query_requests,
|
||||
bool is_pretty = true,
|
||||
MessageMetadata req_md = MessageMetadata("", 0)
|
||||
);
|
||||
|
||||
protected:
|
||||
virtual ~I_Intelligence_IS_V2() {}
|
||||
|
||||
@@ -76,7 +76,7 @@ public:
|
||||
MessageMetadata message_metadata = MessageMetadata()
|
||||
) = 0;
|
||||
|
||||
virtual Maybe<HTTPStatusCode, HTTPResponse> downloadFile(
|
||||
virtual Maybe<void, HTTPResponse> downloadFile(
|
||||
const HTTPMethod method,
|
||||
const std::string &uri,
|
||||
const std::string &download_file_path,
|
||||
@@ -84,7 +84,7 @@ public:
|
||||
MessageMetadata message_metadata = MessageMetadata()
|
||||
) = 0;
|
||||
|
||||
virtual Maybe<HTTPStatusCode, HTTPResponse> uploadFile(
|
||||
virtual Maybe<void, HTTPResponse> uploadFile(
|
||||
const std::string & uri,
|
||||
const std::string & upload_file_path,
|
||||
const MessageCategory category = MessageCategory::GENERIC,
|
||||
|
||||
@@ -20,9 +20,14 @@
|
||||
|
||||
template <typename Data>
|
||||
Maybe<std::vector<AssetReply<Data>>>
|
||||
I_Intelligence_IS_V2::queryIntelligence(QueryRequest &query_request, bool ignore_in_progress, bool is_pretty)
|
||||
I_Intelligence_IS_V2::queryIntelligence(
|
||||
QueryRequest &query_request,
|
||||
bool ignore_in_progress,
|
||||
bool is_pretty,
|
||||
MessageMetadata req_md
|
||||
)
|
||||
{
|
||||
auto response = getResponse(query_request, is_pretty);
|
||||
auto response = getResponse(query_request, is_pretty, req_md);
|
||||
|
||||
if (!response.ok()) return response.passErr();
|
||||
auto serializable_response = response->getSerializableResponse<Data>();
|
||||
@@ -39,9 +44,13 @@ I_Intelligence_IS_V2::queryIntelligence(QueryRequest &query_request, bool ignore
|
||||
|
||||
template<typename Data>
|
||||
Maybe<std::vector<Maybe<std::vector<AssetReply<Data>>>>>
|
||||
I_Intelligence_IS_V2::queryIntelligence(std::vector<QueryRequest> &query_requests, bool is_pretty)
|
||||
I_Intelligence_IS_V2::queryIntelligence(
|
||||
std::vector<QueryRequest> &query_requests,
|
||||
bool is_pretty,
|
||||
MessageMetadata req_md
|
||||
)
|
||||
{
|
||||
auto res = getResponse(query_requests, is_pretty, true);
|
||||
auto res = getResponse(query_requests, is_pretty, true, req_md);
|
||||
if (!res.ok()) return res.passErr();
|
||||
|
||||
return res->getBulkData<Data>();
|
||||
|
||||
@@ -80,4 +80,13 @@ enum class HTTPStatusCode
|
||||
HTTP_SUSPEND = -2
|
||||
};
|
||||
|
||||
enum class BioConnectionStatus
|
||||
{
|
||||
SUCCESS,
|
||||
SHOULD_RETRY,
|
||||
SHOULD_NOT_RETRY,
|
||||
|
||||
COUNT
|
||||
};
|
||||
|
||||
#endif // __MESSAGING_ENUMS_H__
|
||||
|
||||
@@ -26,11 +26,19 @@ public:
|
||||
MOCK_CONST_METHOD1(sendInvalidation, bool(const Invalidation &invalidation));
|
||||
MOCK_METHOD2(registerInvalidation, Maybe<uint>(const Invalidation &invalidation, const InvalidationCb &callback));
|
||||
MOCK_METHOD1(unregisterInvalidation, void(uint id));
|
||||
MOCK_CONST_METHOD4(
|
||||
getResponse,
|
||||
Maybe<Response>(
|
||||
const std::vector<QueryRequest> &query_requests,
|
||||
bool is_pretty,
|
||||
bool is_bulk,
|
||||
const MessageMetadata &req_md
|
||||
)
|
||||
);
|
||||
MOCK_CONST_METHOD3(
|
||||
getResponse,
|
||||
Maybe<Response>(const std::vector<QueryRequest> &query_requests, bool is_pretty, bool is_bulk)
|
||||
Maybe<Response>(const QueryRequest &query_request, bool is_pretty, const MessageMetadata &req_md)
|
||||
);
|
||||
MOCK_CONST_METHOD2(getResponse, Maybe<Response>(const QueryRequest &query_request, bool is_pretty));
|
||||
MOCK_CONST_METHOD0(getIsOfflineOnly, bool(void));
|
||||
MOCK_CONST_METHOD1(getOfflineInfoString, Maybe<std::string>(const SerializableQueryFilter &query));
|
||||
};
|
||||
|
||||
@@ -32,7 +32,7 @@ public:
|
||||
|
||||
MOCK_METHOD5(
|
||||
downloadFile,
|
||||
Maybe<HTTPStatusCode, HTTPResponse> (
|
||||
Maybe<void, HTTPResponse> (
|
||||
HTTPMethod,
|
||||
const string &,
|
||||
const string &,
|
||||
@@ -43,7 +43,7 @@ public:
|
||||
|
||||
MOCK_METHOD4(
|
||||
uploadFile,
|
||||
Maybe<HTTPStatusCode, HTTPResponse> (
|
||||
Maybe<void, HTTPResponse> (
|
||||
const string &,
|
||||
const string &,
|
||||
MessageCategory,
|
||||
@@ -62,10 +62,4 @@ operator<<(std::ostream &os, const HTTPResponse &)
|
||||
return os;
|
||||
}
|
||||
|
||||
static std::ostream &
|
||||
operator<<(std::ostream &os, const HTTPStatusCode &)
|
||||
{
|
||||
return os;
|
||||
}
|
||||
|
||||
#endif // __MOCK_MESSAGING_H__
|
||||
|
||||
@@ -134,6 +134,7 @@ DEFINE_FLAG(D_COMPONENT, D_ALL)
|
||||
DEFINE_FLAG(D_AGENT_DETAILS, D_ORCHESTRATOR)
|
||||
DEFINE_FLAG(D_LOCAL_POLICY, D_ORCHESTRATOR)
|
||||
DEFINE_FLAG(D_NGINX_POLICY, D_ORCHESTRATOR)
|
||||
DEFINE_FLAG(D_SERVICE_CONTROLLER, D_ORCHESTRATOR)
|
||||
|
||||
DEFINE_FLAG(D_GRADUAL_DEPLOYMENT, D_COMPONENT)
|
||||
DEFINE_FLAG(D_SDWAN, D_COMPONENT)
|
||||
@@ -148,7 +149,7 @@ DEFINE_FLAG(D_COMPONENT, D_ALL)
|
||||
DEFINE_FLAG(D_UPSTREAM_KEEPALIVE, D_REVERSE_PROXY)
|
||||
DEFINE_FLAG(D_FORWARD_PROXY, D_REVERSE_PROXY)
|
||||
|
||||
DEFINE_FLAG(D_IDA_SAML, D_COMPONENT)
|
||||
DEFINE_FLAG(D_IDA, D_COMPONENT)
|
||||
|
||||
DEFINE_FLAG(D_IOT_NEXT, D_COMPONENT)
|
||||
DEFINE_FLAG(D_IOT_AUXILIARY, D_IOT_NEXT)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user