diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..bb356b2 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,38 @@ +cmake_minimum_required (VERSION 2.8.4) +project (ngen) + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall -Wno-terminate -Dalpine") + +find_package(Boost REQUIRED) +find_package(ZLIB REQUIRED) +find_package(GTest REQUIRED) + +include(cppcheck.cmake) + +include_directories(${Boost_INCLUDE_DIRS}) +link_directories(${OPENSSL_ROOT_DIR}/lib) +include_directories(${ZLIB_INCLUDE_DIR}) +link_directories(${ZLIB_LIBRARY}) +include_directories(/usr/include/libxml2) +include_directories(/usr/src/googletest/googlemock/include) + +include(unit_test.cmake) + +include_directories(external) +include_directories(external/yajl/yajl-2.1.1/include) +include_directories(external/C-Mock/include/cmock) +include_directories(external/picojson) +include_directories(core/include/general) +include_directories(core/include/internal) +include_directories(core/include/services_sdk/interfaces) +include_directories(core/include/services_sdk/resources) +include_directories(core/include/services_sdk/utilities) +include_directories(core/include/attachments) +include_directories(components/include) + +add_subdirectory(build_system) +add_subdirectory(external) +add_subdirectory(core) +add_subdirectory(attachments) +add_subdirectory(components) +add_subdirectory(nodes) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..8722099 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,47 @@ +# Contributor Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors pledge to make participation in our community (project) a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others’ private information, such as a physical or email address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting our team at [opensource@openappsec.io](mailto:opensource@openappsec.io). All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. + +The project team is obligated to respect the privacy and security of the reporter of any incident. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other leaders of this community. + +## Attribution + +This Code of Conduct is partially adapted from the [Contributor Covenant](https://contributor-covenant.org) Code of Conduct with language adopted from various versions. \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..3afdef7 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,37 @@ +# open-appsec Contributing Guide +Thank you for your interest in open-appsec. We welcome everyone that wishes to share their knowledge and expertise to enhance and expand the project. + +Read our [Code of Conduct](./CODE_OF_CONDUCT.md) to keep our community approachable and respectable. + +In this guide we will provide an overview of the various contribution options' guidelines - from reporting or fixing a bug, to suggesting an enhancement. + +## Reporting security vulnerabilities + +If you've found a vulnerability or a potential vulnerability in open-appsec please let us know at [security-alert@openappsec.io](mailto:security-alert@openappsec.io). We'll send a confirmation email to acknowledge your report within 24 hours, and we'll send an additional email when we've identified the issue positively or negatively. + +An internal process will be activated upon determining the validity of a reported security vulnerability, which will end with releasing a fix and deciding on the applicable disclosure actions. The reporter of the issue will receive updates of this process' progress. + +## Reporting a bug + +**Important - If the bug you wish to report regards a suspicion of a security vulnerability, please refer to the "Reporting security vulnerability" section** + +To report a bug, you can either open a new issue using a relevant [issue form](https://github.com/github/docs/issues/new/choose), or, alternatively, [contact us via our open-appsec open source distribution list](mailto:opensource@openappsec.io). + +Be sure to include a **title and clear description**, as much relevant information as possible, and a **code sample** or an **executable test case** demonstrating the expected behavior that is not occurring. + +## Contributing a fix to a bug + +Please [contact us via our open-appsec open source distribution list](mailto:opensource@openappsec.io) before writing your code. We will want to make sure we understand the boundaries of the proposed fix, that the relevant coding style is clear for the proposed fix's location in the code, and that the proposed contribution is relevant and eligible. + +## Proposing an enhancement + +Please [suggest your change via our open-appsec open source distribution list](mailto:opensource@openappsec.io) before writing your code. We will contact you to make sure we understand the boundaries of the proposed fix, that the relevant coding style is clear for the proposed fix's location in the code, and that the proposed contribution is relevant and eligible. There may be additional considerations that we would like to discuss with you before implementing the enhancement. + +## Open Source documentation issues + +For reporting or suggesting a change, of any issue detected in the documentation files of our open source repositories, please use the same guidelines as bug reports/fixes. + +# Final Thanks +We value all efforts to read, suggest changes and/or contribute to our open source files. Thank you for your time and efforts. + +The open-appsec Team diff --git a/LEXFO-CHP20221014-Report-Code_audit-OPEN-APPSEC-v1.2.pdf b/LEXFO-CHP20221014-Report-Code_audit-OPEN-APPSEC-v1.2.pdf new file mode 100644 index 0000000..a98d78e Binary files /dev/null and b/LEXFO-CHP20221014-Report-Code_audit-OPEN-APPSEC-v1.2.pdf differ diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md index 12e5387..2b7551b 100644 --- a/README.md +++ b/README.md @@ -1 +1,140 @@ -# openappsec \ No newline at end of file +
+ +

openappsec/openappsec

+
+ +# About +[open-appsec](https://www.openappsec.io) (openappsec.io) builds on machine learning to provide pre-emptive web app & API threat protection against OWASP-Top-10 and zero-day attacks. It can be deployed as add-on to Kubernetes Ingress, NGINX, Envoy (soon) and API Gateways. + +The open-appsec engine learns how users normally interact with your web application. It then uses this information to automatically detect requests that fall outside of normal operations, and sends those requests for further analysis to decide whether the request is malicious or not. + +Every request to the application goes through three phases: + +1. The payload is decoded. All HTTP requests are parsed, JSON and XML sections are extracted, and any IP-level access control is applied. + +2. Multiple variables are fed to the machine learning engine. These variables, which are either directly extracted from the HTTP request or decoded from different parts of the payload, include attack indicators, IP addresses, user agents, fingerprints, and many other considerations. The supervised model of the machine learning engine uses these variables to compare the request with many common attack patterns found across the globe. + +3. If the request is identified as a valid and legitimate request, the request is allowed, and forwarded to your application. If, however, the request is considered suspicious or high risk, it then gets evaluated by the unsupervised model, which was trained in your specific environment. This model uses information such as the URL and the users involved to create a final confidence score that determines whether the request should be allowed or blocked. + + +## Machine Learning Models + +open-appsec uses two models: + +1. A supervised model that was trained offline and fed with millions of requests, both malicious and benign. + + * A basic model is provided as part of this repository. It is recommended for use in Monitor-Only and Test environments. + * An advanced model which is more accurate and recommended for Production use, can be downloaded from [open-appsec portal](https://my.openappsec.io). User Menu->Download advanced ML model. This model updates from time to time and you will get an email when these updates happen. + +2. An unsupervised model that is being built in real time in the protected environment. This model uses traffic patterns specific to the environment. + +## Documentation +* [Offical documentation](https://docs.openappsec.io/) +* [Video Tutorial](https://www.youtube.com/watch?v=ZmFrA2ibdog) + +# Repositories + +open-appsec GitHub includes three main repositores: + +* [openappsec/openappsec](https://github.com/openappsec/openappsec) the main code and logic of open-appsec. Developed in C++. +* [openappsec/attachement](https://github.com/openappsec/attachment) connects between processes that provide HTTP data (e.g NGINX) and the open-appsec Agent security logic. Developed in C. +* [openappsec/smartsync](https://github.com/openappsec/smartsync) in charge of correlating learning data from multiple agent instances and delivering a unified learning model for each asset. Developed in Golang. +* [openappsec/smartsync-shared-files](https://github.com/openappsec/smartsync-shared-files) interface to physical storage used by smartsync service for storing learning data + +## open-appsec NGINX attachment compilation instructions + +### Installing external dependencies + +Before compiling the services, you'll need to ensure the latest development versions of the following libraries: +* Boost +* OpenSSL +* PCRE2 +* libxml2 +* GTest +* GMock +* cURL + +An example of installing the packages on Alpine: + +```bash + $ apk update + $ apk add boost-dev openssl-dev pcre2-dev libxml2-dev gtest-dev curl-dev +``` + +### Compiling and packaging the agent code + +1. Clone this repository +2. Run CMake command +3. Run make install command + +```bash + $ git clone https://github.com/openappsec/openappsec.git + $ cd openappsec/ + $ cmake -DCMAKE_INSTALL_PREFIX=build_out . + $ make install + $ make package +``` + +### Placing the agent code inside an Alpine docker image + +Once the agent code has been compiled and packaged, an Alpine image running it can be created. This requires permissions to excute the `docker` command. + +```bash + $ make docker +``` + +This will create a local image for your docker called `agent-docker`. + +### Deplyment of the agent docker image as container + +To run a Nano-Agent as a container the following steps are requiered: + +1. If you are using a container management system / plan on deploying the container using your CI, Add the agent docker image in an accessible registry. +2. If you are planing to manage the agent using the open appsec UI, then make sure to obtain an agent token from the Management Portal and Enforce. +3. Run the agent with the follwing command (where –e https_proxy parameter is optional): + +`docker run -d --name=agent-container --ipc=host -v=:/etc/cp/conf -v=:/etc/cp/data -v=:/var/log/nano_agent –e https_proxy= -it /cp-nano-agent [--token | --hybrid-mode]` + +Example: +```bash + $ docker run -d --name=agent-container --ipc=host -v=/home/admin/agent/conf:/etc/cp/conf -v=/home/admin/agent/data:/etc/cp/data -v=/home/admin/agent/logs:/var/log/nano_agent –e https_proxy=user:password@1.2.3.4:8080 -it agent-docker /cp-nano-agent --hybrid-mode + $ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +1e67f2abbfd4 agent-docker "/cp-nano-agent --hybrid-mode" 1 minute ago Up 1 minute agent-container +``` + + Note that you are not requiered to use a token from the Management Portal if you are managing your security policy locally. However, you are requiered to use the --hybryd-mode flag in such case. In adddition, the voliums in the command are mandatory only if you wish to have persistecy upon restart/upgrade/crash of the agent and its re execition. + Lastly, --ipc=host argument is mandatory in order for the agent to have access to shared memory with a protected attachment (nginx server). + +4. Create or replace the NGINX container using the [Attachment Repository](https://github.com/openappsec/attachment). + +This will run a docker container using the agent docker image. + +## Contact +Please join open-appsec community and follow us on LinkedIn. + +* [Community] https://www.openappsec.io/community +* [LinkedIn] https://www.linkedin.com/company/open-appsec + + +## Contributing +We welcome everyone that wishes to share their knowledge and expertise to enhance and expand the project. + +Please see the [Contributing Guidelines](https://github.com/openappsec/openappsec/blob/main/CONTRIBUTING.md]). + +## Security + +### Security Audit +open-appsec code was audited by an independent third party in September-October 2022. +See the [full report](https://github.com/openappsec/openappsec/blob/main/LEXFO-CHP20221014-Report-Code_audit-OPEN-APPSEC-v1.2.pdf). + +### Reporting security vulnerabilities +If you've found a vulnerability or a potential vulnerability in open-appsec please let us know at securityalert@openappsec.io. We'll send a confirmation email to acknowledge your report within 24 hours, and we'll send an additional email when we've identified the issue positively or negatively. + + +## License +open-appsec is open source and available under Apache 2.0 license. + +The basic ML model is open source and available under Apache 2.0 license. + +The advanced ML model is open source and available under Machine Learning Model license, available upon download in the tar file. diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..7df2126 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,7 @@ +# Security Policy + +## Reporting a Vulnerability + +If you've found a vulnerability or a potential vulnerability in open-appsec please let us know at [security-alert@openappsec.io](mailto:security-alert@openappsec.io). We'll send a confirmation email to acknowledge your report within 24 hours, and we'll send an additional email when we've identified the issue positively or negatively. + +A process will be activated upon determining the validity of a reported security vulnerability, which will end with releasing a fix and deciding on the applicable disclosure actions. The reporter of the issue will receive updates of this process' progress. diff --git a/attachments/CMakeLists.txt b/attachments/CMakeLists.txt new file mode 100644 index 0000000..d1aa43b --- /dev/null +++ b/attachments/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(nginx) diff --git a/attachments/kernel_modules/core/include/common_is/kdebug_flags.h b/attachments/kernel_modules/core/include/common_is/kdebug_flags.h new file mode 100755 index 0000000..cabeffb --- /dev/null +++ b/attachments/kernel_modules/core/include/common_is/kdebug_flags.h @@ -0,0 +1,34 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifdef DEFINE_KDEBUG_FLAG + +DEFINE_KDEBUG_FLAG(kernelStartup) +DEFINE_KDEBUG_FLAG(l4Firewall) +DEFINE_KDEBUG_FLAG(ftpHandler) +DEFINE_KDEBUG_FLAG(geneve) +DEFINE_KDEBUG_FLAG(l4Ips) +DEFINE_KDEBUG_FLAG(tableIs) +DEFINE_KDEBUG_FLAG(logIs) +DEFINE_KDEBUG_FLAG(debugIs) +DEFINE_KDEBUG_FLAG(ioctlInfra) +DEFINE_KDEBUG_FLAG(trapInfra) +DEFINE_KDEBUG_FLAG(netfilterAttachment) +DEFINE_KDEBUG_FLAG(accessControlPolicy) +DEFINE_KDEBUG_FLAG(connection) +DEFINE_KDEBUG_FLAG(assetResolver) +DEFINE_KDEBUG_FLAG(statefulValidation) +DEFINE_KDEBUG_FLAG(statelessValidation) +DEFINE_KDEBUG_FLAG(kernelMetric) + +#endif // DEFINE_KDEBUG_FLAG diff --git a/attachments/nginx/CMakeLists.txt b/attachments/nginx/CMakeLists.txt new file mode 100644 index 0000000..ef815f9 --- /dev/null +++ b/attachments/nginx/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(nginx_attachment_util) diff --git a/attachments/nginx/nginx_attachment_util/CMakeLists.txt b/attachments/nginx/nginx_attachment_util/CMakeLists.txt new file mode 100644 index 0000000..c414de0 --- /dev/null +++ b/attachments/nginx/nginx_attachment_util/CMakeLists.txt @@ -0,0 +1,8 @@ +add_definitions(-DUSERSPACE) + +add_library(nginx_attachment_util SHARED nginx_attachment_util.cc) + +add_subdirectory(nginx_attachment_util_ut) + +install(TARGETS nginx_attachment_util DESTINATION lib) +install(TARGETS nginx_attachment_util DESTINATION http_transaction_handler_service/lib) diff --git a/attachments/nginx/nginx_attachment_util/nginx_attachment_util.cc b/attachments/nginx/nginx_attachment_util/nginx_attachment_util.cc new file mode 100644 index 0000000..32e66cd --- /dev/null +++ b/attachments/nginx/nginx_attachment_util/nginx_attachment_util.cc @@ -0,0 +1,251 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "nginx_attachment_util.h" + +#include + +#include "http_configuration.h" + +using namespace std; + +static HttpAttachmentConfiguration conf_data; + +int +initAttachmentConfig(c_str conf_file) +{ + return conf_data.init(conf_file); +} + +ngx_http_inspection_mode_e +getInspectionMode() +{ + return static_cast(conf_data.getNumericalValue("nginx_inspection_mode")); +} + +unsigned int +getNumOfNginxIpcElements() +{ + return conf_data.getNumericalValue("num_of_nginx_ipc_elements"); +} + +unsigned int +getKeepAliveIntervalMsec() +{ + return conf_data.getNumericalValue("keep_alive_interval_msec"); +} + +unsigned int +getDbgLevel() +{ + return conf_data.getNumericalValue("dbg_level"); +} + +int +isDebugContext(c_str client, c_str server, unsigned int port, c_str method, c_str host , c_str uri) +{ + auto &ctx = conf_data.getDebugContext(); + return + (ctx.client == "" || ctx.client == client) && + (ctx.server == "" || ctx.server == server) && + (ctx.port == 0 || ctx.port == port) && + (ctx.method == "" || ctx.method == method) && + (ctx.host == "" || ctx.host == host) && + (ctx.uri == "" || ctx.uri == uri); +} + +c_str +getStaticResourcesPath() +{ + return conf_data.getStringValue("static_resources_path").c_str(); +} + +int +isFailOpenMode() +{ + return conf_data.getNumericalValue("is_fail_open_mode_enabled"); +} + +unsigned int +getFailOpenTimeout() +{ + return conf_data.getNumericalValue("fail_open_timeout"); +} + +int +isFailOpenHoldMode() +{ + return conf_data.getNumericalValue("is_fail_open_mode_hold_enabled"); +} + +unsigned int +getFailOpenHoldTimeout() +{ + return conf_data.getNumericalValue("fail_open_hold_timeout"); +} + +unsigned int +getMaxSessionsPerMinute() +{ + return conf_data.getNumericalValue("max_sessions_per_minute"); +} + +int +isFailOpenOnSessionLimit() +{ + return conf_data.getStringValue("sessions_per_minute_limit_verdict") == "Accept"; +} + +unsigned int +getRegistrationThreadTimeout() +{ + return conf_data.getNumericalValue("registration_thread_timeout_msec"); +} + +unsigned int +getReqProccessingTimeout() +{ + return conf_data.getNumericalValue("req_proccessing_timeout_msec"); +} + +unsigned int +getReqHeaderThreadTimeout() +{ + return conf_data.getNumericalValue("req_header_thread_timeout_msec"); +} + +unsigned int +getReqBodyThreadTimeout() +{ + return conf_data.getNumericalValue("req_body_thread_timeout_msec"); +} + +unsigned int +getResProccessingTimeout() +{ + return conf_data.getNumericalValue("res_proccessing_timeout_msec"); +} + +unsigned int +getResHeaderThreadTimeout() +{ + return conf_data.getNumericalValue("res_header_thread_timeout_msec"); +} + +unsigned int +getResBodyThreadTimeout() +{ + return conf_data.getNumericalValue("res_body_thread_timeout_msec"); +} + +unsigned int +getWaitingForVerdictThreadTimeout() +{ + return conf_data.getNumericalValue("waiting_for_verdict_thread_timeout_msec"); +} + +int +isIPAddress(c_str ip_str) +{ + int address_family = AF_INET; + for (int i = 0; ip_str[i]; ++i) { + if (ip_str[i] == ':') address_family = AF_INET6; + } + + char placeholder[16]; + return inet_pton(address_family, ip_str, placeholder); +} + +struct IpAddress +{ + union { + struct in_addr ipv4; + struct in6_addr ipv6; + } ip; + bool is_ipv4; + + bool + operator<(const IpAddress &other) const + { + if (is_ipv4 != other.is_ipv4) return is_ipv4 < other.is_ipv4; + if (is_ipv4) return memcmp(&ip.ipv4, &other.ip.ipv4, sizeof(struct in_addr)) < 0; + return memcmp(&ip.ipv6, &other.ip.ipv6, sizeof(struct in6_addr)) < 0; + } + + bool + operator<=(const IpAddress &other) const + { + return !(other < *this); + } +}; + +static IpAddress +createIPAddress(c_str ip_str) +{ + IpAddress res; + + for (int i = 0; ip_str[i]; ++i) { + if (ip_str[i] == ':') { + res.is_ipv4 = false; + inet_pton(AF_INET6, ip_str, &res.ip.ipv6); + return res; + } + } + + res.is_ipv4 = true; + inet_pton(AF_INET, ip_str, &res.ip.ipv4); + return res; +} + +static bool +isIPInRange(const IpAddress &ip, const IpAddress &start, const IpAddress &end) +{ + if (ip.is_ipv4 != start.is_ipv4 || ip.is_ipv4 != end.is_ipv4) return false; + return start <= ip && ip <= end; +} + +static bool +isIPInRange(const IpAddress &ip, const string &range) +{ + auto delimiter = range.find('-'); + + if (delimiter == string::npos) { + if (!isIPAddress(range.c_str())) return false; + auto address = createIPAddress(range.c_str()); + return isIPInRange(ip, address, address); + } + + auto start_str = range.substr(0, delimiter); + if (!isIPAddress(start_str.c_str())) return false; + auto start_addr = createIPAddress(start_str.c_str()); + + auto end_str = range.substr(delimiter + 1); + if (!isIPAddress(end_str.c_str())) return false; + auto end_addr = createIPAddress(end_str.c_str()); + + return isIPInRange(ip, start_addr, end_addr); +} + +int +isSkipSource(c_str ip_str) +{ + if (!isIPAddress(ip_str)) return 0; + auto ip = createIPAddress(ip_str); + + for (auto &range : conf_data.getExcludeSources()) { + if (isIPInRange(ip, range)) return 1; + } + + return 0; +} diff --git a/attachments/nginx/nginx_attachment_util/nginx_attachment_util_ut/CMakeLists.txt b/attachments/nginx/nginx_attachment_util/nginx_attachment_util_ut/CMakeLists.txt new file mode 100644 index 0000000..01e7009 --- /dev/null +++ b/attachments/nginx/nginx_attachment_util/nginx_attachment_util_ut/CMakeLists.txt @@ -0,0 +1,9 @@ +include_directories(${Boost_INCLUDE_DIRS}) +include_directories(${CMAKE_SOURCE_DIR}/components/include) +include_directories(${CMAKE_SOURCE_DIR}/attachments/nginx/nginx_attachment_util) + +add_unit_test( + nginx_attachment_util_ut + "nginx_attachment_util_ut.cc" + "nginx_attachment_util;http_configuration" +) diff --git a/attachments/nginx/nginx_attachment_util/nginx_attachment_util_ut/nginx_attachment_util_ut.cc b/attachments/nginx/nginx_attachment_util/nginx_attachment_util_ut/nginx_attachment_util_ut.cc new file mode 100644 index 0000000..f99d400 --- /dev/null +++ b/attachments/nginx/nginx_attachment_util/nginx_attachment_util_ut/nginx_attachment_util_ut.cc @@ -0,0 +1,125 @@ +#include +#include +#include +#include + +#include "nginx_attachment_util.h" +#include "cptest.h" +#include "c_common/ip_common.h" + +using namespace std; +using namespace testing; + +class HttpAttachmentUtilTest : public Test +{ +public: + string + createIPRangesString(const vector &ip_ranges) + { + stringstream ip_ranges_string_stream; + ip_ranges_string_stream << "["; + for (auto iterator = ip_ranges.begin(); iterator < ip_ranges.end() - 1; iterator++) { + ip_ranges_string_stream << "\"" << *iterator << "\"" << ", "; + } + ip_ranges_string_stream << "\"" << ip_ranges.back() << "\"]"; + + return ip_ranges_string_stream.str(); + } + + const string attachment_configuration_file_name = "cp_nano_http_attachment_conf"; + const vector ip_ranges = { "8.8.8.8", "9.9.9.9-10.10.10.10", "0:0:0:0:0:0:0:2-0:0:0:0:0:0:0:5"}; + const string static_resources_path = "/dev/shm/static_resources/"; +}; + +TEST_F(HttpAttachmentUtilTest, GetValidAttachmentConfiguration) +{ + string valid_configuration = + "{\n" + "\"context_values\": {" + "\"clientIp\": \"1.2.3.4\"," + "\"listeningIp\": \"5.6.7.8\"," + "\"uriPrefix\": \"/abc\"," + "\"hostName\": \"test\"," + "\"httpMethod\": \"GET\"," + "\"listeningPort\": 80" + "}," + "\"is_fail_open_mode_enabled\": 0,\n" + "\"fail_open_timeout\": 1234,\n" + "\"is_fail_open_mode_hold_enabled\": 1,\n" + "\"fail_open_hold_timeout\": 4321,\n" + "\"sessions_per_minute_limit_verdict\": \"Accept\",\n" + "\"max_sessions_per_minute\": 0,\n" + "\"num_of_nginx_ipc_elements\": 200,\n" + "\"keep_alive_interval_msec\": 10000,\n" + "\"dbg_level\": 2,\n" + "\"nginx_inspection_mode\": 1,\n" + "\"operation_mode\": 0,\n" + "\"req_body_thread_timeout_msec\": 155,\n" + "\"req_proccessing_timeout_msec\": 42,\n" + "\"registration_thread_timeout_msec\": 101,\n" + "\"res_proccessing_timeout_msec\": 420,\n" + "\"res_header_thread_timeout_msec\": 1,\n" + "\"res_body_thread_timeout_msec\": 0,\n" + "\"waiting_for_verdict_thread_timeout_msec\": 75,\n" + "\"req_header_thread_timeout_msec\": 10,\n" + "\"ip_ranges\": " + createIPRangesString(ip_ranges) + ",\n" + "\"static_resources_path\": \"" + static_resources_path + "\"" + "}\n"; + ofstream valid_configuration_file(attachment_configuration_file_name); + valid_configuration_file << valid_configuration; + valid_configuration_file.close(); + + EXPECT_EQ(initAttachmentConfig(attachment_configuration_file_name.c_str()), 1); + EXPECT_EQ(getDbgLevel(), 2); + EXPECT_EQ(getStaticResourcesPath(), static_resources_path); + EXPECT_EQ(isFailOpenMode(), 0); + EXPECT_EQ(getFailOpenTimeout(), 1234); + EXPECT_EQ(isFailOpenHoldMode(), 1); + EXPECT_EQ(getFailOpenHoldTimeout(), 4321); + EXPECT_EQ(isFailOpenOnSessionLimit(), 1); + EXPECT_EQ(getMaxSessionsPerMinute(), 0); + EXPECT_EQ(getNumOfNginxIpcElements(), 200); + EXPECT_EQ(getKeepAliveIntervalMsec(), 10000); + EXPECT_EQ(getResProccessingTimeout(), 420); + EXPECT_EQ(getReqProccessingTimeout(), 42); + EXPECT_EQ(getRegistrationThreadTimeout(), 101); + EXPECT_EQ(getReqHeaderThreadTimeout(), 10); + EXPECT_EQ(getReqBodyThreadTimeout(), 155); + EXPECT_EQ(getResHeaderThreadTimeout(), 1); + EXPECT_EQ(getResBodyThreadTimeout(), 0); + EXPECT_EQ(getWaitingForVerdictThreadTimeout(), 75); + EXPECT_EQ(getInspectionMode(), ngx_http_inspection_mode::BLOCKING_THREAD); + + EXPECT_EQ(isDebugContext("1.2.3.4", "5.6.7.8", 80, "GET", "test", "/abc"), 1); + EXPECT_EQ(isDebugContext("1.2.3.9", "5.6.7.8", 80, "GET", "test", "/abc"), 0); + EXPECT_EQ(isDebugContext("1.2.3.4", "5.6.7.9", 80, "GET", "test", "/abc"), 0); + EXPECT_EQ(isDebugContext("1.2.3.4", "5.6.7.8", 88, "GET", "test", "/abc"), 0); + EXPECT_EQ(isDebugContext("1.2.3.4", "5.6.7.8", 80, "POST", "test", "/abc"), 0); + EXPECT_EQ(isDebugContext("1.2.3.4", "5.6.7.8", 80, "GET", "est", "/abc"), 0); + EXPECT_EQ(isDebugContext("1.2.3.4", "5.6.7.8", 80, "GET", "test", "/ab"), 0); + + EXPECT_EQ(isSkipSource("8.8.8.8"), 1); + EXPECT_EQ(isSkipSource("8.8.8.9"), 0); + EXPECT_EQ(isSkipSource("8.8.8.10"), 0); + + EXPECT_EQ(isSkipSource("9.9.9.8"), 0); + EXPECT_EQ(isSkipSource("9.9.9.9"), 1); + EXPECT_EQ(isSkipSource("9.255.0.0"), 1); + EXPECT_EQ(isSkipSource("10.10.10.10"), 1); + EXPECT_EQ(isSkipSource("10.10.10.11"), 0); + + EXPECT_EQ(isSkipSource("0:0:0:0:0:0:0:1"), 0); + EXPECT_EQ(isSkipSource("0:0:0:0:0:0:0:2"), 1); + EXPECT_EQ(isSkipSource("0:0:0:0:0:0:0:4"), 1); + EXPECT_EQ(isSkipSource("0:0:0:0:0:0:0:5"), 1); + EXPECT_EQ(isSkipSource("0:0:0:0:0:0:0:6"), 0); +} + +TEST_F(HttpAttachmentUtilTest, CheckIPAddrValidity) +{ + EXPECT_EQ(isIPAddress("10.0.0.1"), 1); + EXPECT_EQ(isIPAddress("2001:0db8:85a3:0000:0000:8a2e:0370:7334"), 1); + + EXPECT_EQ(isIPAddress("333.0.0.1"), 0); + EXPECT_EQ(isIPAddress("2001:0gb8:85a3:0000:0000:8a2e:0370:7334"), 0); +} diff --git a/build_system/CMakeLists.txt b/build_system/CMakeLists.txt new file mode 100644 index 0000000..1bf2242 --- /dev/null +++ b/build_system/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(docker) +add_subdirectory(charts) diff --git a/build_system/charts/CMakeLists.txt b/build_system/charts/CMakeLists.txt new file mode 100644 index 0000000..b1c73e2 --- /dev/null +++ b/build_system/charts/CMakeLists.txt @@ -0,0 +1,6 @@ +add_custom_command( + OUTPUT ${CMAKE_INSTALL_PREFIX}/open-appsec-k8s-nginx-ingress-4.1.4.tgz + COMMAND helm package ${CMAKE_SOURCE_DIR}/build_system/charts/open-appsec-k8s-nginx-ingress && mv ${CMAKE_SOURCE_DIR}/build_system/charts/open-appsec-k8s-nginx-ingress-4.1.4.tgz ${CMAKE_INSTALL_PREFIX}/open-appsec-k8s-nginx-ingress-4.1.4.tgz +) + +add_custom_target(charts DEPENDS ${CMAKE_INSTALL_PREFIX}/open-appsec-k8s-nginx-ingress-4.1.4.tgz) diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/.helmignore b/build_system/charts/open-appsec-k8s-nginx-ingress/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/CHANGELOG.md b/build_system/charts/open-appsec-k8s-nginx-ingress/CHANGELOG.md new file mode 100644 index 0000000..f3f44c3 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/CHANGELOG.md @@ -0,0 +1,375 @@ +# Changelog + +This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org). + +### 4.0.18 +"[8291](https://github.com/kubernetes/ingress-nginx/pull/8291) remove git tag env from cloud build" +"[8286](https://github.com/kubernetes/ingress-nginx/pull/8286) Fix OpenTelemetry sidecar image build" +"[8277](https://github.com/kubernetes/ingress-nginx/pull/8277) Add OpenSSF Best practices badge" +"[8273](https://github.com/kubernetes/ingress-nginx/pull/8273) Issue#8241" +"[8267](https://github.com/kubernetes/ingress-nginx/pull/8267) Add fsGroup value to admission-webhooks/job-patch charts" +"[8262](https://github.com/kubernetes/ingress-nginx/pull/8262) Updated confusing error" +"[8256](https://github.com/kubernetes/ingress-nginx/pull/8256) fix: deny locations with invalid auth-url annotation" +"[8253](https://github.com/kubernetes/ingress-nginx/pull/8253) Add a certificate info metric" +"[8236](https://github.com/kubernetes/ingress-nginx/pull/8236) webhook: remove useless code." +"[8227](https://github.com/kubernetes/ingress-nginx/pull/8227) Update libraries in webhook image" +"[8225](https://github.com/kubernetes/ingress-nginx/pull/8225) fix inconsistent-label-cardinality for prometheus metrics: nginx_ingress_controller_requests" +"[8221](https://github.com/kubernetes/ingress-nginx/pull/8221) Do not validate ingresses with unknown ingress class in admission webhook endpoint" +"[8210](https://github.com/kubernetes/ingress-nginx/pull/8210) Bump github.com/prometheus/client_golang from 1.11.0 to 1.12.1" +"[8209](https://github.com/kubernetes/ingress-nginx/pull/8209) Bump google.golang.org/grpc from 1.43.0 to 1.44.0" +"[8204](https://github.com/kubernetes/ingress-nginx/pull/8204) Add Artifact Hub lint" +"[8203](https://github.com/kubernetes/ingress-nginx/pull/8203) Fix Indentation of example and link to cert-manager tutorial" +"[8201](https://github.com/kubernetes/ingress-nginx/pull/8201) feat(metrics): add path and method labels to requests countera" +"[8199](https://github.com/kubernetes/ingress-nginx/pull/8199) use functional options to reduce number of methods creating an EchoDeployment" +"[8196](https://github.com/kubernetes/ingress-nginx/pull/8196) docs: fix inconsistent controller annotation" +"[8191](https://github.com/kubernetes/ingress-nginx/pull/8191) Using Go install for misspell" +"[8186](https://github.com/kubernetes/ingress-nginx/pull/8186) prometheus+grafana using servicemonitor" +"[8185](https://github.com/kubernetes/ingress-nginx/pull/8185) Append elements on match, instead of removing for cors-annotations" +"[8179](https://github.com/kubernetes/ingress-nginx/pull/8179) Bump github.com/opencontainers/runc from 1.0.3 to 1.1.0" +"[8173](https://github.com/kubernetes/ingress-nginx/pull/8173) Adding annotations to the controller service account" +"[8163](https://github.com/kubernetes/ingress-nginx/pull/8163) Update the $req_id placeholder description" +"[8162](https://github.com/kubernetes/ingress-nginx/pull/8162) Versioned static manifests" +"[8159](https://github.com/kubernetes/ingress-nginx/pull/8159) Adding some geoip variables and default values" +"[8155](https://github.com/kubernetes/ingress-nginx/pull/8155) #7271 feat: avoid-pdb-creation-when-default-backend-disabled-and-replicas-gt-1" +"[8151](https://github.com/kubernetes/ingress-nginx/pull/8151) Automatically generate helm docs" +"[8143](https://github.com/kubernetes/ingress-nginx/pull/8143) Allow to configure delay before controller exits" +"[8136](https://github.com/kubernetes/ingress-nginx/pull/8136) add ingressClass option to helm chart - back compatibility with ingress.class annotations" +"[8126](https://github.com/kubernetes/ingress-nginx/pull/8126) Example for JWT" + + +### 4.0.15 + +- [8120] https://github.com/kubernetes/ingress-nginx/pull/8120 Update go in runner and release v1.1.1 +- [8119] https://github.com/kubernetes/ingress-nginx/pull/8119 Update to go v1.17.6 +- [8118] https://github.com/kubernetes/ingress-nginx/pull/8118 Remove deprecated libraries, update other libs +- [8117] https://github.com/kubernetes/ingress-nginx/pull/8117 Fix codegen errors +- [8115] https://github.com/kubernetes/ingress-nginx/pull/8115 chart/ghaction: set the correct permission to have access to push a release +- [8098] https://github.com/kubernetes/ingress-nginx/pull/8098 generating SHA for CA only certs in backend_ssl.go + comparision of P… +- [8088] https://github.com/kubernetes/ingress-nginx/pull/8088 Fix Edit this page link to use main branch +- [8072] https://github.com/kubernetes/ingress-nginx/pull/8072 Expose GeoIP2 Continent code as variable +- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 docs(charts): using helm-docs for chart +- [8058] https://github.com/kubernetes/ingress-nginx/pull/8058 Bump github.com/spf13/cobra from 1.2.1 to 1.3.0 +- [8054] https://github.com/kubernetes/ingress-nginx/pull/8054 Bump google.golang.org/grpc from 1.41.0 to 1.43.0 +- [8051] https://github.com/kubernetes/ingress-nginx/pull/8051 align bug report with feature request regarding kind documentation +- [8046] https://github.com/kubernetes/ingress-nginx/pull/8046 Report expired certificates (#8045) +- [8044] https://github.com/kubernetes/ingress-nginx/pull/8044 remove G109 check till gosec resolves issues +- [8042] https://github.com/kubernetes/ingress-nginx/pull/8042 docs_multiple_instances_one_cluster_ticket_7543 +- [8041] https://github.com/kubernetes/ingress-nginx/pull/8041 docs: fix typo'd executible name +- [8035] https://github.com/kubernetes/ingress-nginx/pull/8035 Comment busy owners +- [8029] https://github.com/kubernetes/ingress-nginx/pull/8029 Add stream-snippet as a ConfigMap and Annotation option +- [8023] https://github.com/kubernetes/ingress-nginx/pull/8023 fix nginx compilation flags +- [8021] https://github.com/kubernetes/ingress-nginx/pull/8021 Disable default modsecurity_rules_file if modsecurity-snippet is specified +- [8019] https://github.com/kubernetes/ingress-nginx/pull/8019 Revise main documentation page +- [8018] https://github.com/kubernetes/ingress-nginx/pull/8018 Preserve order of plugin invocation +- [8015] https://github.com/kubernetes/ingress-nginx/pull/8015 Add newline indenting to admission webhook annotations +- [8014] https://github.com/kubernetes/ingress-nginx/pull/8014 Add link to example error page manifest in docs +- [8009] https://github.com/kubernetes/ingress-nginx/pull/8009 Fix spelling in documentation and top-level files +- [8008] https://github.com/kubernetes/ingress-nginx/pull/8008 Add relabelings in controller-servicemonitor.yaml +- [8003] https://github.com/kubernetes/ingress-nginx/pull/8003 Minor improvements (formatting, consistency) in install guide +- [8001] https://github.com/kubernetes/ingress-nginx/pull/8001 fix: go-grpc Dockerfile +- [7999] https://github.com/kubernetes/ingress-nginx/pull/7999 images: use k8s-staging-test-infra/gcb-docker-gcloud +- [7996] https://github.com/kubernetes/ingress-nginx/pull/7996 doc: improvement +- [7983] https://github.com/kubernetes/ingress-nginx/pull/7983 Fix a couple of misspellings in the annotations documentation. +- [7979] https://github.com/kubernetes/ingress-nginx/pull/7979 allow set annotations for admission Jobs +- [7977] https://github.com/kubernetes/ingress-nginx/pull/7977 Add ssl_reject_handshake to defaul server +- [7975] https://github.com/kubernetes/ingress-nginx/pull/7975 add legacy version update v0.50.0 to main changelog +- [7972] https://github.com/kubernetes/ingress-nginx/pull/7972 updated service upstream definition + +### 4.0.14 + +- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 Using helm-docs to populate values table in README.md + +### 4.0.13 + +- [8008] https://github.com/kubernetes/ingress-nginx/pull/8008 Add relabelings in controller-servicemonitor.yaml + +### 4.0.12 + +- [7978] https://github.com/kubernetes/ingress-nginx/pull/7979 Support custom annotations in admissions Jobs + +### 4.0.11 + +- [7873] https://github.com/kubernetes/ingress-nginx/pull/7873 Makes the [appProtocol](https://kubernetes.io/docs/concepts/services-networking/_print/#application-protocol) field optional. + +### 4.0.10 + +- [7964] https://github.com/kubernetes/ingress-nginx/pull/7964 Update controller version to v1.1.0 + +### 4.0.9 + +- [6992] https://github.com/kubernetes/ingress-nginx/pull/6992 Add ability to specify labels for all resources + +### 4.0.7 + +- [7923] https://github.com/kubernetes/ingress-nginx/pull/7923 Release v1.0.5 of ingress-nginx +- [7806] https://github.com/kubernetes/ingress-nginx/pull/7806 Choice option for internal/external loadbalancer type service + +### 4.0.6 + +- [7804] https://github.com/kubernetes/ingress-nginx/pull/7804 Release v1.0.4 of ingress-nginx +- [7651] https://github.com/kubernetes/ingress-nginx/pull/7651 Support ipFamilyPolicy and ipFamilies fields in Helm Chart +- [7798] https://github.com/kubernetes/ingress-nginx/pull/7798 Exoscale: use HTTP Healthcheck mode +- [7793] https://github.com/kubernetes/ingress-nginx/pull/7793 Update kube-webhook-certgen to v1.1.1 + +### 4.0.5 + +- [7740] https://github.com/kubernetes/ingress-nginx/pull/7740 Release v1.0.3 of ingress-nginx + +### 4.0.3 + +- [7707] https://github.com/kubernetes/ingress-nginx/pull/7707 Release v1.0.2 of ingress-nginx + +### 4.0.2 + +- [7681] https://github.com/kubernetes/ingress-nginx/pull/7681 Release v1.0.1 of ingress-nginx + +### 4.0.1 + +- [7535] https://github.com/kubernetes/ingress-nginx/pull/7535 Release v1.0.0 ingress-nginx + +### 3.34.0 + +- [7256] https://github.com/kubernetes/ingress-nginx/pull/7256 Add namespace field in the namespace scoped resource templates + +### 3.33.0 + +- [7164] https://github.com/kubernetes/ingress-nginx/pull/7164 Update nginx to v1.20.1 + +### 3.32.0 + +- [7117] https://github.com/kubernetes/ingress-nginx/pull/7117 Add annotations for HPA + +### 3.31.0 + +- [7137] https://github.com/kubernetes/ingress-nginx/pull/7137 Add support for custom probes + +### 3.30.0 + +- [#7092](https://github.com/kubernetes/ingress-nginx/pull/7092) Removes the possibility of using localhost in ExternalNames as endpoints + +### 3.29.0 + +- [X] [#6945](https://github.com/kubernetes/ingress-nginx/pull/7020) Add option to specify job label for ServiceMonitor + +### 3.28.0 + +- [ ] [#6900](https://github.com/kubernetes/ingress-nginx/pull/6900) Support existing PSPs + +### 3.27.0 + +- Update ingress-nginx v0.45.0 + +### 3.26.0 + +- [X] [#6979](https://github.com/kubernetes/ingress-nginx/pull/6979) Changed servicePort value for metrics + +### 3.25.0 + +- [X] [#6957](https://github.com/kubernetes/ingress-nginx/pull/6957) Add ability to specify automountServiceAccountToken + +### 3.24.0 + +- [X] [#6908](https://github.com/kubernetes/ingress-nginx/pull/6908) Add volumes to default-backend deployment + +### 3.23.0 + +- Update ingress-nginx v0.44.0 + +### 3.22.0 + +- [X] [#6802](https://github.com/kubernetes/ingress-nginx/pull/6802) Add value for configuring a custom Diffie-Hellman parameters file +- [X] [#6815](https://github.com/kubernetes/ingress-nginx/pull/6815) Allow use of numeric namespaces in helm chart + +### 3.21.0 + +- [X] [#6783](https://github.com/kubernetes/ingress-nginx/pull/6783) Add custom annotations to ScaledObject +- [X] [#6761](https://github.com/kubernetes/ingress-nginx/pull/6761) Adding quotes in the serviceAccount name in Helm values +- [X] [#6767](https://github.com/kubernetes/ingress-nginx/pull/6767) Remove ClusterRole when scope option is enabled +- [X] [#6785](https://github.com/kubernetes/ingress-nginx/pull/6785) Update kube-webhook-certgen image to v1.5.1 + +### 3.20.1 + +- Do not create KEDA in case of DaemonSets. +- Fix KEDA v2 definition + +### 3.20.0 + +- [X] [#6730](https://github.com/kubernetes/ingress-nginx/pull/6730) Do not create HPA for defaultBackend if not enabled. + +### 3.19.0 + +- Update ingress-nginx v0.43.0 + +### 3.18.0 + +- [X] [#6688](https://github.com/kubernetes/ingress-nginx/pull/6688) Allow volume-type emptyDir in controller podsecuritypolicy +- [X] [#6691](https://github.com/kubernetes/ingress-nginx/pull/6691) Improve parsing of helm parameters + +### 3.17.0 + +- Update ingress-nginx v0.42.0 + +### 3.16.1 + +- Fix chart-releaser action + +### 3.16.0 + +- [X] [#6646](https://github.com/kubernetes/ingress-nginx/pull/6646) Added LoadBalancerIP value for internal service + +### 3.15.1 + +- Fix chart-releaser action + +### 3.15.0 + +- [X] [#6586](https://github.com/kubernetes/ingress-nginx/pull/6586) Fix 'maxmindLicenseKey' location in values.yaml + +### 3.14.0 + +- [X] [#6469](https://github.com/kubernetes/ingress-nginx/pull/6469) Allow custom service names for controller and backend + +### 3.13.0 + +- [X] [#6544](https://github.com/kubernetes/ingress-nginx/pull/6544) Fix default backend HPA name variable + +### 3.12.0 + +- [X] [#6514](https://github.com/kubernetes/ingress-nginx/pull/6514) Remove helm2 support and update docs + +### 3.11.1 + +- [X] [#6505](https://github.com/kubernetes/ingress-nginx/pull/6505) Reorder HPA resource list to work with GitOps tooling + +### 3.11.0 + +- Support Keda Autoscaling + +### 3.10.1 + +- Fix regression introduced in 0.41.0 with external authentication + +### 3.10.0 + +- Fix routing regression introduced in 0.41.0 with PathType Exact + +### 3.9.0 + +- [X] [#6423](https://github.com/kubernetes/ingress-nginx/pull/6423) Add Default backend HPA autoscaling + +### 3.8.0 + +- [X] [#6395](https://github.com/kubernetes/ingress-nginx/pull/6395) Update jettech/kube-webhook-certgen image +- [X] [#6377](https://github.com/kubernetes/ingress-nginx/pull/6377) Added loadBalancerSourceRanges for internal lbs +- [X] [#6356](https://github.com/kubernetes/ingress-nginx/pull/6356) Add securitycontext settings on defaultbackend +- [X] [#6401](https://github.com/kubernetes/ingress-nginx/pull/6401) Fix controller service annotations +- [X] [#6403](https://github.com/kubernetes/ingress-nginx/pull/6403) Initial helm chart changelog + +### 3.7.1 + +- [X] [#6326](https://github.com/kubernetes/ingress-nginx/pull/6326) Fix liveness and readiness probe path in daemonset chart + +### 3.7.0 + +- [X] [#6316](https://github.com/kubernetes/ingress-nginx/pull/6316) Numerals in podAnnotations in quotes [#6315](https://github.com/kubernetes/ingress-nginx/issues/6315) + +### 3.6.0 + +- [X] [#6305](https://github.com/kubernetes/ingress-nginx/pull/6305) Add default linux nodeSelector + +### 3.5.1 + +- [X] [#6299](https://github.com/kubernetes/ingress-nginx/pull/6299) Fix helm chart release + +### 3.5.0 + +- [X] [#6260](https://github.com/kubernetes/ingress-nginx/pull/6260) Allow Helm Chart to customize admission webhook's annotations, timeoutSeconds, namespaceSelector, objectSelector and cert files locations + +### 3.4.0 + +- [X] [#6268](https://github.com/kubernetes/ingress-nginx/pull/6268) Update to 0.40.2 in helm chart #6288 + +### 3.3.1 + +- [X] [#6259](https://github.com/kubernetes/ingress-nginx/pull/6259) Release helm chart +- [X] [#6258](https://github.com/kubernetes/ingress-nginx/pull/6258) Fix chart markdown link +- [X] [#6253](https://github.com/kubernetes/ingress-nginx/pull/6253) Release v0.40.0 + +### 3.3.1 + +- [X] [#6233](https://github.com/kubernetes/ingress-nginx/pull/6233) Add admission controller e2e test + +### 3.3.0 + +- [X] [#6203](https://github.com/kubernetes/ingress-nginx/pull/6203) Refactor parsing of key values +- [X] [#6162](https://github.com/kubernetes/ingress-nginx/pull/6162) Add helm chart options to expose metrics service as NodePort +- [X] [#6180](https://github.com/kubernetes/ingress-nginx/pull/6180) Fix helm chart admissionReviewVersions regression +- [X] [#6169](https://github.com/kubernetes/ingress-nginx/pull/6169) Fix Typo in example prometheus rules + +### 3.0.0 + +- [X] [#6167](https://github.com/kubernetes/ingress-nginx/pull/6167) Update chart requirements + +### 2.16.0 + +- [X] [#6154](https://github.com/kubernetes/ingress-nginx/pull/6154) add `topologySpreadConstraint` to controller + +### 2.15.0 + +- [X] [#6087](https://github.com/kubernetes/ingress-nginx/pull/6087) Adding parameter for externalTrafficPolicy in internal controller service spec + +### 2.14.0 + +- [X] [#6104](https://github.com/kubernetes/ingress-nginx/pull/6104) Misc fixes for nginx-ingress chart for better keel and prometheus-operator integration + +### 2.13.0 + +- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0 + +### 2.13.0 + +- [X] [#6093](https://github.com/kubernetes/ingress-nginx/pull/6093) Release v0.35.0 +- [X] [#6080](https://github.com/kubernetes/ingress-nginx/pull/6080) Switch images to k8s.gcr.io after Vanity Domain Flip + +### 2.12.1 + +- [X] [#6075](https://github.com/kubernetes/ingress-nginx/pull/6075) Sync helm chart affinity examples + +### 2.12.0 + +- [X] [#6039](https://github.com/kubernetes/ingress-nginx/pull/6039) Add configurable serviceMonitor metricRelabelling and targetLabels +- [X] [#6044](https://github.com/kubernetes/ingress-nginx/pull/6044) Fix YAML linting + +### 2.11.3 + +- [X] [#6038](https://github.com/kubernetes/ingress-nginx/pull/6038) Bump chart version PATCH + +### 2.11.2 + +- [X] [#5951](https://github.com/kubernetes/ingress-nginx/pull/5951) Bump chart patch version + +### 2.11.1 + +- [X] [#5900](https://github.com/kubernetes/ingress-nginx/pull/5900) Release helm chart for v0.34.1 + +### 2.11.0 + +- [X] [#5879](https://github.com/kubernetes/ingress-nginx/pull/5879) Update helm chart for v0.34.0 +- [X] [#5671](https://github.com/kubernetes/ingress-nginx/pull/5671) Make liveness probe more fault tolerant than readiness probe + +### 2.10.0 + +- [X] [#5843](https://github.com/kubernetes/ingress-nginx/pull/5843) Update jettech/kube-webhook-certgen image + +### 2.9.1 + +- [X] [#5823](https://github.com/kubernetes/ingress-nginx/pull/5823) Add quoting to sysctls because numeric values need to be presented as strings (#5823) + +### 2.9.0 + +- [X] [#5795](https://github.com/kubernetes/ingress-nginx/pull/5795) Use fully qualified images to avoid cri-o issues + + +### TODO + +Keep building the changelog using *git log charts* checking the tag diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/Chart.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/Chart.yaml new file mode 100644 index 0000000..59c42f7 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/Chart.yaml @@ -0,0 +1,22 @@ +annotations: + artifacthub.io/changes: | + - "[8459](https://github.com/kubernetes/ingress-nginx/pull/8459) Update default allowed CORS headers" + - "[8202](https://github.com/kubernetes/ingress-nginx/pull/8202) disable modsecurity on error page" + - "[8178](https://github.com/kubernetes/ingress-nginx/pull/8178) Add header Host into mirror annotations" + - "[8213](https://github.com/kubernetes/ingress-nginx/pull/8213) feat: always set auth cookie" + - "[8548](https://github.com/kubernetes/ingress-nginx/pull/8548) Implement reporting status classes in metrics" + - "[8612](https://github.com/kubernetes/ingress-nginx/pull/8612) move so files under /etc/nginx/modules" + - "[8624](https://github.com/kubernetes/ingress-nginx/pull/8624) Add patch to remove root and alias directives" + - "[8623](https://github.com/kubernetes/ingress-nginx/pull/8623) Improve path rule" + artifacthub.io/prerelease: "false" +apiVersion: v2 +appVersion: 1.2.1 +keywords: +- ingress +- nginx +kubeVersion: '>=1.19.0-0' +name: open-appsec-k8s-nginx-ingress +sources: +- https://github.com/kubernetes/ingress-nginx +type: application +version: 4.1.4 diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/OWNERS b/build_system/charts/open-appsec-k8s-nginx-ingress/OWNERS new file mode 100644 index 0000000..6b7e049 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs: https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md + +approvers: +- ingress-nginx-helm-maintainers + +reviewers: +- ingress-nginx-helm-reviewers + +labels: +- area/helm diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/README.md b/build_system/charts/open-appsec-k8s-nginx-ingress/README.md new file mode 100644 index 0000000..0647db9 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/README.md @@ -0,0 +1,487 @@ +# ingress-nginx + +[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer + +![Version: 4.0.19](https://img.shields.io/badge/Version-4.0.19-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.1.3](https://img.shields.io/badge/AppVersion-1.1.3-informational?style=flat-square) + +To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. + +This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Chart version 3.x.x: Kubernetes v1.16+ +- Chart version 4.x.x and above: Kubernetes v1.19+ + +## Get Repo Info + +```console +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +``` + +## Install Chart + +**Important:** only helm3 is supported + +```console +helm install [RELEASE_NAME] ingress-nginx/ingress-nginx +``` + +The command deploys ingress-nginx on the Kubernetes cluster in the default configuration. + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### Upgrading With Zero Downtime in Production + +By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8). + +### Migrating from stable/nginx-ingress + +There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart: + +1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one +1. For critical services in production that require zero-downtime, you will want to: + 1. [Install](#install-chart) a second Ingress controller + 1. Redirect your DNS traffic from the old controller to the new controller + 1. Log traffic from both controllers during this changeover + 1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it + 1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production) + +Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values ingress-nginx/ingress-nginx +``` + +### PodDisruptionBudget + +Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one, +else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info. + +### Prometheus Metrics + +The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`. + +You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. +Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`) + +### ingress-nginx nginx\_status page/stats server + +Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller: + +- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed +- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost. + You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server + +### ExternalDNS Service Configuration + +Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service: + +```yaml +controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com. +``` + +### AWS L7 ELB with SSL Termination + +Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/main/deploy/aws/l7/service-l7.yaml): + +```yaml +controller: + service: + targetPorts: + http: http + https: http + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' +``` + +### AWS route53-mapper + +To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/tree/master/addons/route53-mapper), add the `domainName` annotation and `dns` label: + +```yaml +controller: + service: + labels: + dns: "route53" + annotations: + domainName: "kubernetes-example.com" +``` + +### Additional Internal Load Balancer + +This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application. + +By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL. + +You'll need to set both the following values: + +`controller.service.internal.enabled` +`controller.service.internal.annotations` + +If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken. + +`controller.service.internal.annotations` varies with the cloud service you're using. + +Example for AWS: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal ELB + service.beta.kubernetes.io/aws-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for GCE: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing + # For GKE versions 1.17 and later + networking.gke.io/load-balancer-type: "Internal" + # For earlier versions + # cloud.google.com/load-balancer-type: "Internal" + + # Any other annotation can be declared here. +``` + +Example for Azure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for Oracle Cloud Infrastructure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/oci-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object. + +Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`. + +### Ingress Admission Webhooks + +With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. +**This feature is enabled by default since 0.31.0.** + +With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521) + +### Helm Error When Upgrading: spec.clusterIP: Invalid value: "" + +If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this: + +```console +Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable +``` + +Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. + +As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. + +## Requirements + +Kubernetes: `>=1.19.0-0` + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| commonLabels | object | `{}` | | +| controller.addHeaders | object | `{}` | Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers | +| controller.admissionWebhooks.annotations | object | `{}` | | +| controller.admissionWebhooks.certificate | string | `"/usr/local/certificates/cert"` | | +| controller.admissionWebhooks.createSecretJob.resources | object | `{}` | | +| controller.admissionWebhooks.enabled | bool | `true` | | +| controller.admissionWebhooks.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| controller.admissionWebhooks.failurePolicy | string | `"Fail"` | | +| controller.admissionWebhooks.key | string | `"/usr/local/certificates/key"` | | +| controller.admissionWebhooks.labels | object | `{}` | Labels to be added to admission webhooks | +| controller.admissionWebhooks.namespaceSelector | object | `{}` | | +| controller.admissionWebhooks.objectSelector | object | `{}` | | +| controller.admissionWebhooks.patch.enabled | bool | `true` | | +| controller.admissionWebhooks.patch.fsGroup | int | `2000` | | +| controller.admissionWebhooks.patch.image.digest | string | `"sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660"` | | +| controller.admissionWebhooks.patch.image.image | string | `"ingress-nginx/kube-webhook-certgen"` | | +| controller.admissionWebhooks.patch.image.pullPolicy | string | `"IfNotPresent"` | | +| controller.admissionWebhooks.patch.image.registry | string | `"k8s.gcr.io"` | | +| controller.admissionWebhooks.patch.image.tag | string | `"v1.1.1"` | | +| controller.admissionWebhooks.patch.labels | object | `{}` | Labels to be added to patch job resources | +| controller.admissionWebhooks.patch.nodeSelector."kubernetes.io/os" | string | `"linux"` | | +| controller.admissionWebhooks.patch.podAnnotations | object | `{}` | | +| controller.admissionWebhooks.patch.priorityClassName | string | `""` | Provide a priority class name to the webhook patching job | +| controller.admissionWebhooks.patch.runAsUser | int | `2000` | | +| controller.admissionWebhooks.patch.tolerations | list | `[]` | | +| controller.admissionWebhooks.patchWebhookJob.resources | object | `{}` | | +| controller.admissionWebhooks.port | int | `8443` | | +| controller.admissionWebhooks.service.annotations | object | `{}` | | +| controller.admissionWebhooks.service.externalIPs | list | `[]` | | +| controller.admissionWebhooks.service.loadBalancerSourceRanges | list | `[]` | | +| controller.admissionWebhooks.service.servicePort | int | `443` | | +| controller.admissionWebhooks.service.type | string | `"ClusterIP"` | | +| controller.affinity | object | `{}` | Affinity and anti-affinity rules for server scheduling to nodes | +| controller.allowSnippetAnnotations | bool | `true` | This configuration defines if Ingress Controller should allow users to set their own *-snippet annotations, otherwise this is forbidden / dropped when users add those annotations. Global snippets in ConfigMap are still respected | +| controller.annotations | object | `{}` | Annotations to be added to the controller Deployment or DaemonSet | +| controller.autoscaling.behavior | object | `{}` | | +| controller.autoscaling.enabled | bool | `false` | | +| controller.autoscaling.maxReplicas | int | `11` | | +| controller.autoscaling.minReplicas | int | `1` | | +| controller.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| controller.autoscaling.targetMemoryUtilizationPercentage | int | `50` | | +| controller.autoscalingTemplate | list | `[]` | | +| controller.config | object | `{}` | Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ | +| controller.configAnnotations | object | `{}` | Annotations to be added to the controller config configuration configmap. | +| controller.configMapNamespace | string | `""` | Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) | +| controller.containerName | string | `"controller"` | Configures the controller container name | +| controller.containerPort | object | `{"http":80,"https":443}` | Configures the ports that the nginx-controller listens on | +| controller.customTemplate.configMapKey | string | `""` | | +| controller.customTemplate.configMapName | string | `""` | | +| controller.dnsConfig | object | `{}` | Optionally customize the pod dnsConfig. | +| controller.dnsPolicy | string | `"ClusterFirst"` | Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. | +| controller.electionID | string | `"ingress-controller-leader"` | Election ID to use for status update | +| controller.enableMimalloc | bool | `true` | Enable mimalloc as a drop-in replacement for malloc. | +| controller.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| controller.extraArgs | object | `{}` | Additional command line arguments to pass to nginx-ingress-controller E.g. to specify the default SSL certificate you can use | +| controller.extraContainers | list | `[]` | Additional containers to be added to the controller pod. See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. | +| controller.extraEnvs | list | `[]` | Additional environment variables to set | +| controller.extraInitContainers | list | `[]` | Containers, which are run before the app containers are started. | +| controller.extraModules | list | `[]` | | +| controller.extraVolumeMounts | list | `[]` | Additional volumeMounts to the controller main container. | +| controller.extraVolumes | list | `[]` | Additional volumes to the controller pod. | +| controller.healthCheckHost | string | `""` | Address to bind the health check endpoint. It is better to set this option to the internal node address if the ingress nginx controller is running in the `hostNetwork: true` mode. | +| controller.healthCheckPath | string | `"/healthz"` | Path of the health check endpoint. All requests received on the port defined by the healthz-port parameter are forwarded internally to this path. | +| controller.hostNetwork | bool | `false` | Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 is merged | +| controller.hostPort.enabled | bool | `false` | Enable 'hostPort' or not | +| controller.hostPort.ports.http | int | `80` | 'hostPort' http port | +| controller.hostPort.ports.https | int | `443` | 'hostPort' https port | +| controller.hostname | object | `{}` | Optionally customize the pod hostname. | +| controller.image.allowPrivilegeEscalation | bool | `true` | | +| controller.image.digest | string | `"sha256:31f47c1e202b39fadecf822a9b76370bd4baed199a005b3e7d4d1455f4fd3fe2"` | | +| controller.image.image | string | `"ingress-nginx/controller"` | | +| controller.image.pullPolicy | string | `"IfNotPresent"` | | +| controller.image.registry | string | `"k8s.gcr.io"` | | +| controller.image.runAsUser | int | `101` | | +| controller.image.tag | string | `"v1.1.3"` | | +| controller.ingressClass | string | `"nginx"` | For backwards compatibility with ingress.class annotation, use ingressClass. Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation | +| controller.ingressClassByName | bool | `false` | Process IngressClass per name (additionally as per spec.controller). | +| controller.ingressClassResource.controllerValue | string | `"k8s.io/ingress-nginx"` | Controller-value of the controller that is processing this ingressClass | +| controller.ingressClassResource.default | bool | `false` | Is this the default ingressClass for the cluster | +| controller.ingressClassResource.enabled | bool | `true` | Is this ingressClass enabled or not | +| controller.ingressClassResource.name | string | `"nginx"` | Name of the ingressClass | +| controller.ingressClassResource.parameters | object | `{}` | Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters. | +| controller.keda.apiVersion | string | `"keda.sh/v1alpha1"` | | +| controller.keda.behavior | object | `{}` | | +| controller.keda.cooldownPeriod | int | `300` | | +| controller.keda.enabled | bool | `false` | | +| controller.keda.maxReplicas | int | `11` | | +| controller.keda.minReplicas | int | `1` | | +| controller.keda.pollingInterval | int | `30` | | +| controller.keda.restoreToOriginalReplicaCount | bool | `false` | | +| controller.keda.scaledObject.annotations | object | `{}` | | +| controller.keda.triggers | list | `[]` | | +| controller.kind | string | `"Deployment"` | Use a `DaemonSet` or `Deployment` | +| controller.labels | object | `{}` | Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels | +| controller.lifecycle | object | `{"preStop":{"exec":{"command":["/wait-shutdown"]}}}` | Improve connection draining when ingress controller pod is deleted using a lifecycle hook: With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds to 300, allowing the draining of connections up to five minutes. If the active connections end before that, the pod will terminate gracefully at that time. To effectively take advantage of this feature, the Configmap feature worker-shutdown-timeout new value is 240s instead of 10s. | +| controller.livenessProbe.failureThreshold | int | `5` | | +| controller.livenessProbe.httpGet.path | string | `"/healthz"` | | +| controller.livenessProbe.httpGet.port | int | `10254` | | +| controller.livenessProbe.httpGet.scheme | string | `"HTTP"` | | +| controller.livenessProbe.initialDelaySeconds | int | `10` | | +| controller.livenessProbe.periodSeconds | int | `10` | | +| controller.livenessProbe.successThreshold | int | `1` | | +| controller.livenessProbe.timeoutSeconds | int | `1` | | +| controller.maxmindLicenseKey | string | `""` | Maxmind license key to download GeoLite2 Databases. | +| controller.metrics.enabled | bool | `false` | | +| controller.metrics.port | int | `10254` | | +| controller.metrics.prometheusRule.additionalLabels | object | `{}` | | +| controller.metrics.prometheusRule.enabled | bool | `false` | | +| controller.metrics.prometheusRule.rules | list | `[]` | | +| controller.metrics.service.annotations | object | `{}` | | +| controller.metrics.service.externalIPs | list | `[]` | List of IP addresses at which the stats-exporter service is available | +| controller.metrics.service.loadBalancerSourceRanges | list | `[]` | | +| controller.metrics.service.servicePort | int | `10254` | | +| controller.metrics.service.type | string | `"ClusterIP"` | | +| controller.metrics.serviceMonitor.additionalLabels | object | `{}` | | +| controller.metrics.serviceMonitor.enabled | bool | `false` | | +| controller.metrics.serviceMonitor.metricRelabelings | list | `[]` | | +| controller.metrics.serviceMonitor.namespace | string | `""` | | +| controller.metrics.serviceMonitor.namespaceSelector | object | `{}` | | +| controller.metrics.serviceMonitor.relabelings | list | `[]` | | +| controller.metrics.serviceMonitor.scrapeInterval | string | `"30s"` | | +| controller.metrics.serviceMonitor.targetLabels | list | `[]` | | +| controller.minAvailable | int | `1` | | +| controller.minReadySeconds | int | `0` | `minReadySeconds` to avoid killing pods before we are ready | +| controller.name | string | `"controller"` | | +| controller.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for controller pod assignment | +| controller.podAnnotations | object | `{}` | Annotations to be added to controller pods | +| controller.podLabels | object | `{}` | Labels to add to the pod container metadata | +| controller.podSecurityContext | object | `{}` | Security Context policies for controller pods | +| controller.priorityClassName | string | `""` | | +| controller.proxySetHeaders | object | `{}` | Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers | +| controller.publishService | object | `{"enabled":true,"pathOverride":""}` | Allows customization of the source of the IP address or FQDN to report in the ingress status field. By default, it reads the information provided by the service. If disable, the status field reports the IP address of the node or nodes where an ingress controller pod is running. | +| controller.publishService.enabled | bool | `true` | Enable 'publishService' or not | +| controller.publishService.pathOverride | string | `""` | Allows overriding of the publish service to bind to Must be / | +| controller.readinessProbe.failureThreshold | int | `3` | | +| controller.readinessProbe.httpGet.path | string | `"/healthz"` | | +| controller.readinessProbe.httpGet.port | int | `10254` | | +| controller.readinessProbe.httpGet.scheme | string | `"HTTP"` | | +| controller.readinessProbe.initialDelaySeconds | int | `10` | | +| controller.readinessProbe.periodSeconds | int | `10` | | +| controller.readinessProbe.successThreshold | int | `1` | | +| controller.readinessProbe.timeoutSeconds | int | `1` | | +| controller.replicaCount | int | `1` | | +| controller.reportNodeInternalIp | bool | `false` | Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply | +| controller.resources.requests.cpu | string | `"100m"` | | +| controller.resources.requests.memory | string | `"90Mi"` | | +| controller.scope.enabled | bool | `false` | Enable 'scope' or not | +| controller.scope.namespace | string | `""` | Namespace to limit the controller to; defaults to $(POD_NAMESPACE) | +| controller.scope.namespaceSelector | string | `""` | When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. | +| controller.service.annotations | object | `{}` | | +| controller.service.appProtocol | bool | `true` | If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http It allows choosing the protocol for each backend specified in the Kubernetes service. See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 Will be ignored for Kubernetes versions older than 1.20 | +| controller.service.enableHttp | bool | `true` | | +| controller.service.enableHttps | bool | `true` | | +| controller.service.enabled | bool | `true` | | +| controller.service.external.enabled | bool | `true` | | +| controller.service.externalIPs | list | `[]` | List of IP addresses at which the controller services are available | +| controller.service.internal.annotations | object | `{}` | Annotations are mandatory for the load balancer to come up. Varies with the cloud service. | +| controller.service.internal.enabled | bool | `false` | Enables an additional internal load balancer (besides the external one). | +| controller.service.internal.loadBalancerSourceRanges | list | `[]` | Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. | +| controller.service.ipFamilies | list | `["IPv4"]` | List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. | +| controller.service.ipFamilyPolicy | string | `"SingleStack"` | Represents the dual-stack-ness requested or required by this Service. Possible values are SingleStack, PreferDualStack or RequireDualStack. The ipFamilies and clusterIPs fields depend on the value of this field. | +| controller.service.labels | object | `{}` | | +| controller.service.loadBalancerSourceRanges | list | `[]` | | +| controller.service.nodePorts.http | string | `""` | | +| controller.service.nodePorts.https | string | `""` | | +| controller.service.nodePorts.tcp | object | `{}` | | +| controller.service.nodePorts.udp | object | `{}` | | +| controller.service.ports.http | int | `80` | | +| controller.service.ports.https | int | `443` | | +| controller.service.targetPorts.http | string | `"http"` | | +| controller.service.targetPorts.https | string | `"https"` | | +| controller.service.type | string | `"LoadBalancer"` | | +| controller.shareProcessNamespace | bool | `false` | This can be used for example to signal log rotation using `kill -USR1` from a sidecar. | +| controller.sysctls | object | `{}` | See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls | +| controller.tcp.annotations | object | `{}` | Annotations to be added to the tcp config configmap | +| controller.tcp.configMapNamespace | string | `""` | Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) | +| controller.terminationGracePeriodSeconds | int | `300` | `terminationGracePeriodSeconds` to avoid killing pods before we are ready | +| controller.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints | +| controller.topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. | +| controller.udp.annotations | object | `{}` | Annotations to be added to the udp config configmap | +| controller.udp.configMapNamespace | string | `""` | Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) | +| controller.updateStrategy | object | `{}` | The update strategy to apply to the Deployment or DaemonSet | +| controller.watchIngressWithoutClass | bool | `false` | Process Ingress objects without ingressClass annotation/ingressClassName field Overrides value for --watch-ingress-without-class flag of the controller binary Defaults to false | +| defaultBackend.affinity | object | `{}` | | +| defaultBackend.autoscaling.annotations | object | `{}` | | +| defaultBackend.autoscaling.enabled | bool | `false` | | +| defaultBackend.autoscaling.maxReplicas | int | `2` | | +| defaultBackend.autoscaling.minReplicas | int | `1` | | +| defaultBackend.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| defaultBackend.autoscaling.targetMemoryUtilizationPercentage | int | `50` | | +| defaultBackend.containerSecurityContext | object | `{}` | Security Context policies for controller main container. See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls | +| defaultBackend.enabled | bool | `false` | | +| defaultBackend.existingPsp | string | `""` | Use an existing PSP instead of creating one | +| defaultBackend.extraArgs | object | `{}` | | +| defaultBackend.extraEnvs | list | `[]` | Additional environment variables to set for defaultBackend pods | +| defaultBackend.extraVolumeMounts | list | `[]` | | +| defaultBackend.extraVolumes | list | `[]` | | +| defaultBackend.image.allowPrivilegeEscalation | bool | `false` | | +| defaultBackend.image.image | string | `"defaultbackend-amd64"` | | +| defaultBackend.image.pullPolicy | string | `"IfNotPresent"` | | +| defaultBackend.image.readOnlyRootFilesystem | bool | `true` | | +| defaultBackend.image.registry | string | `"k8s.gcr.io"` | | +| defaultBackend.image.runAsNonRoot | bool | `true` | | +| defaultBackend.image.runAsUser | int | `65534` | | +| defaultBackend.image.tag | string | `"1.5"` | | +| defaultBackend.labels | object | `{}` | Labels to be added to the default backend resources | +| defaultBackend.livenessProbe.failureThreshold | int | `3` | | +| defaultBackend.livenessProbe.initialDelaySeconds | int | `30` | | +| defaultBackend.livenessProbe.periodSeconds | int | `10` | | +| defaultBackend.livenessProbe.successThreshold | int | `1` | | +| defaultBackend.livenessProbe.timeoutSeconds | int | `5` | | +| defaultBackend.minAvailable | int | `1` | | +| defaultBackend.name | string | `"defaultbackend"` | | +| defaultBackend.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for default backend pod assignment | +| defaultBackend.podAnnotations | object | `{}` | Annotations to be added to default backend pods | +| defaultBackend.podLabels | object | `{}` | Labels to add to the pod container metadata | +| defaultBackend.podSecurityContext | object | `{}` | Security Context policies for controller pods See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls | +| defaultBackend.port | int | `8080` | | +| defaultBackend.priorityClassName | string | `""` | | +| defaultBackend.readinessProbe.failureThreshold | int | `6` | | +| defaultBackend.readinessProbe.initialDelaySeconds | int | `0` | | +| defaultBackend.readinessProbe.periodSeconds | int | `5` | | +| defaultBackend.readinessProbe.successThreshold | int | `1` | | +| defaultBackend.readinessProbe.timeoutSeconds | int | `5` | | +| defaultBackend.replicaCount | int | `1` | | +| defaultBackend.resources | object | `{}` | | +| defaultBackend.service.annotations | object | `{}` | | +| defaultBackend.service.externalIPs | list | `[]` | List of IP addresses at which the default backend service is available | +| defaultBackend.service.loadBalancerSourceRanges | list | `[]` | | +| defaultBackend.service.servicePort | int | `80` | | +| defaultBackend.service.type | string | `"ClusterIP"` | | +| defaultBackend.serviceAccount.automountServiceAccountToken | bool | `true` | | +| defaultBackend.serviceAccount.create | bool | `true` | | +| defaultBackend.serviceAccount.name | string | `""` | | +| defaultBackend.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints | +| dhParam | string | `nil` | A base64-encoded Diffie-Hellman parameter. This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` | +| imagePullSecrets | list | `[]` | Optional array of imagePullSecrets containing private registry credentials | +| podSecurityPolicy.enabled | bool | `false` | | +| rbac.create | bool | `true` | | +| rbac.scope | bool | `false` | | +| revisionHistoryLimit | int | `10` | Rollback limit | +| serviceAccount.annotations | object | `{}` | Annotations for the controller service account | +| serviceAccount.automountServiceAccountToken | bool | `true` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.name | string | `""` | | +| tcp | object | `{}` | TCP service key:value pairs | +| udp | object | `{}` | UDP service key:value pairs | + diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/README.md.gotmpl b/build_system/charts/open-appsec-k8s-nginx-ingress/README.md.gotmpl new file mode 100644 index 0000000..5cd9e59 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/README.md.gotmpl @@ -0,0 +1,235 @@ +{{ template "chart.header" . }} +[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer + +{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }} + +To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources. + +This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Chart version 3.x.x: Kubernetes v1.16+ +- Chart version 4.x.x and above: Kubernetes v1.19+ + +## Get Repo Info + +```console +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +``` + +## Install Chart + +**Important:** only helm3 is supported + +```console +helm install [RELEASE_NAME] ingress-nginx/ingress-nginx +``` + +The command deploys ingress-nginx on the Kubernetes cluster in the default configuration. + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### Upgrading With Zero Downtime in Production + +By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8). + +### Migrating from stable/nginx-ingress + +There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart: + +1. For Nginx Ingress controllers used for non-critical services, the easiest method is to [uninstall](#uninstall-chart) the old release and [install](#install-chart) the new one +1. For critical services in production that require zero-downtime, you will want to: + 1. [Install](#install-chart) a second Ingress controller + 1. Redirect your DNS traffic from the old controller to the new controller + 1. Log traffic from both controllers during this changeover + 1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it + 1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production) + +Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values ingress-nginx/ingress-nginx +``` + +### PodDisruptionBudget + +Note that the PodDisruptionBudget resource will only be defined if the replicaCount is greater than one, +else it would make it impossible to evacuate a node. See [gh issue #7127](https://github.com/helm/charts/issues/7127) for more info. + +### Prometheus Metrics + +The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`. + +You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`. +Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`) + +### ingress-nginx nginx\_status page/stats server + +Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller: + +- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed +- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost. + You can use `curl --unix-socket /tmp/nginx-status-server.sock http://localhost/nginx_status` inside the controller container to access it locally, or use the snippet from [nginx-ingress changelog](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230) to re-enable the http server + +### ExternalDNS Service Configuration + +Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service: + +```yaml +controller: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: kubernetes-example.com. +``` + +### AWS L7 ELB with SSL Termination + +Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/main/deploy/aws/l7/service-l7.yaml): + +```yaml +controller: + service: + targetPorts: + http: http + https: http + annotations: + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:XX-XXXX-X:XXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "http" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "https" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' +``` + +### AWS route53-mapper + +To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/tree/master/addons/route53-mapper), add the `domainName` annotation and `dns` label: + +```yaml +controller: + service: + labels: + dns: "route53" + annotations: + domainName: "kubernetes-example.com" +``` + +### Additional Internal Load Balancer + +This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application. + +By default, the ingress object will point to the external load balancer address, but if correctly configured, you can make use of the internal one if the URL you are looking up resolves to the internal load balancer's URL. + +You'll need to set both the following values: + +`controller.service.internal.enabled` +`controller.service.internal.annotations` + +If one of them is missing the internal load balancer will not be deployed. Example you may have `controller.service.internal.enabled=true` but no annotations set, in this case no action will be taken. + +`controller.service.internal.annotations` varies with the cloud service you're using. + +Example for AWS: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal ELB + service.beta.kubernetes.io/aws-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for GCE: + +```yaml +controller: + service: + internal: + enabled: true + annotations: + # Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing + # For GKE versions 1.17 and later + networking.gke.io/load-balancer-type: "Internal" + # For earlier versions + # cloud.google.com/load-balancer-type: "Internal" + + # Any other annotation can be declared here. +``` + +Example for Azure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +Example for Oracle Cloud Infrastructure: + +```yaml +controller: + service: + annotations: + # Create internal LB + service.beta.kubernetes.io/oci-load-balancer-internal: "true" + # Any other annotation can be declared here. +``` + +An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object. + +Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`. + +### Ingress Admission Webhooks + +With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster. +**This feature is enabled by default since 0.31.0.** + +With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521) + +### Helm Error When Upgrading: spec.clusterIP: Invalid value: "" + +If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this: + +```console +Error: UPGRADE FAILED: Service "?????-controller" is invalid: spec.clusterIP: Invalid value: "": field is immutable +``` + +Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13646) but to resolve this you can set `xxxx.service.omitClusterIP` to `true` where `xxxx` is the service referenced in the error. + +As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered. + +{{ template "chart.requirementsSection" . }} + +{{ template "chart.valuesSection" . }} + +{{ template "helm-docs.versionFooter" . }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/controller-custom-ingressclass-flags.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/controller-custom-ingressclass-flags.yaml new file mode 100644 index 0000000..b28a232 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/controller-custom-ingressclass-flags.yaml @@ -0,0 +1,7 @@ +controller: + watchIngressWithoutClass: true + ingressClassResource: + name: custom-nginx + enabled: true + default: true + controllerValue: "k8s.io/custom-nginx" diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-customconfig-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-customconfig-values.yaml new file mode 100644 index 0000000..076e324 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-customconfig-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + allowSnippetAnnotations: false + admissionWebhooks: + enabled: false + service: + type: ClusterIP + + config: + use-proxy-protocol: "true" diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-customnodeport-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-customnodeport-values.yaml new file mode 100644 index 0000000..cfc545f --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-customnodeport-values.yaml @@ -0,0 +1,18 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + + service: + type: NodePort + nodePorts: + tcp: + 9000: 30090 + udp: + 9001: 30091 + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-extra-modules.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-extra-modules.yaml new file mode 100644 index 0000000..883943c --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-extra-modules.yaml @@ -0,0 +1,7 @@ +controller: + kind: DaemonSet + service: + type: ClusterIP + extraModules: + - name: opentelemetry + image: busybox diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-headers-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-headers-values.yaml new file mode 100644 index 0000000..ff82cd9 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-headers-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + addHeaders: + X-Frame-Options: deny + proxySetHeaders: + X-Forwarded-Proto: https + service: + type: ClusterIP diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-internal-lb-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-internal-lb-values.yaml new file mode 100644 index 0000000..d8948d6 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-internal-lb-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + service: + type: ClusterIP + internal: + enabled: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: "true" diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-nodeport-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-nodeport-values.yaml new file mode 100644 index 0000000..6d6605f --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-nodeport-values.yaml @@ -0,0 +1,6 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + service: + type: NodePort diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-podannotations-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-podannotations-values.yaml new file mode 100644 index 0000000..04ac58d --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-podannotations-values.yaml @@ -0,0 +1,13 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP + podAnnotations: + prometheus.io/path: /metrics + prometheus.io/port: "10254" + prometheus.io/scheme: http + prometheus.io/scrape: "true" diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-tcp-udp-configMapNamespace-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-tcp-udp-configMapNamespace-values.yaml new file mode 100644 index 0000000..afb5487 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-tcp-udp-configMapNamespace-values.yaml @@ -0,0 +1,16 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + service: + type: ClusterIP + tcp: + configMapNamespace: default + udp: + configMapNamespace: default + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-tcp-udp-portNamePrefix-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-tcp-udp-portNamePrefix-values.yaml new file mode 100644 index 0000000..ad86690 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-tcp-udp-portNamePrefix-values.yaml @@ -0,0 +1,14 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" + +portNamePrefix: "port" diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-tcp-udp-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-tcp-udp-values.yaml new file mode 100644 index 0000000..7b4d7cb --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-tcp-udp-values.yaml @@ -0,0 +1,12 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-tcp-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-tcp-values.yaml new file mode 100644 index 0000000..a359a6a --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/daemonset-tcp-values.yaml @@ -0,0 +1,10 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + 9001: "default/test:8080" diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deamonset-default-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deamonset-default-values.yaml new file mode 100644 index 0000000..e63a7f5 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deamonset-default-values.yaml @@ -0,0 +1,6 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deamonset-metrics-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deamonset-metrics-values.yaml new file mode 100644 index 0000000..1e5190a --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deamonset-metrics-values.yaml @@ -0,0 +1,8 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deamonset-psp-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deamonset-psp-values.yaml new file mode 100644 index 0000000..017b60a --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deamonset-psp-values.yaml @@ -0,0 +1,9 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deamonset-webhook-and-psp-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deamonset-webhook-and-psp-values.yaml new file mode 100644 index 0000000..88aafc6 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deamonset-webhook-and-psp-values.yaml @@ -0,0 +1,9 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: true + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deamonset-webhook-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deamonset-webhook-values.yaml new file mode 100644 index 0000000..6e3b371 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deamonset-webhook-values.yaml @@ -0,0 +1,6 @@ +controller: + kind: DaemonSet + admissionWebhooks: + enabled: true + service: + type: ClusterIP diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-autoscaling-behavior-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-autoscaling-behavior-values.yaml new file mode 100644 index 0000000..dca3f35 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-autoscaling-behavior-values.yaml @@ -0,0 +1,14 @@ +controller: + autoscaling: + enabled: true + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Pods + value: 1 + periodSeconds: 180 + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-autoscaling-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-autoscaling-values.yaml new file mode 100644 index 0000000..5314cec --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-autoscaling-values.yaml @@ -0,0 +1,7 @@ +controller: + autoscaling: + enabled: true + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-customconfig-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-customconfig-values.yaml new file mode 100644 index 0000000..e1f022e --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-customconfig-values.yaml @@ -0,0 +1,8 @@ +controller: + config: + use-proxy-protocol: "true" + allowSnippetAnnotations: false + admissionWebhooks: + enabled: false + service: + type: ClusterIP diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-customnodeport-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-customnodeport-values.yaml new file mode 100644 index 0000000..9eda282 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-customnodeport-values.yaml @@ -0,0 +1,16 @@ +controller: + admissionWebhooks: + enabled: false + service: + type: NodePort + nodePorts: + tcp: + 9000: 30090 + udp: + 9001: 30091 + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-default-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-default-values.yaml new file mode 100644 index 0000000..93a393c --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-default-values.yaml @@ -0,0 +1,4 @@ +# Left blank to test default values +controller: + service: + type: ClusterIP diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-extra-modules.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-extra-modules.yaml new file mode 100644 index 0000000..9d11b79 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-extra-modules.yaml @@ -0,0 +1,6 @@ +controller: + service: + type: ClusterIP + extraModules: + - name: opentelemetry + image: busybox diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-headers-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-headers-values.yaml new file mode 100644 index 0000000..665fd48 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-headers-values.yaml @@ -0,0 +1,9 @@ +controller: + admissionWebhooks: + enabled: false + addHeaders: + X-Frame-Options: deny + proxySetHeaders: + X-Forwarded-Proto: https + service: + type: ClusterIP diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-internal-lb-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-internal-lb-values.yaml new file mode 100644 index 0000000..c7f22d6 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-internal-lb-values.yaml @@ -0,0 +1,9 @@ +controller: + admissionWebhooks: + enabled: false + service: + type: ClusterIP + internal: + enabled: true + annotations: + service.beta.kubernetes.io/aws-load-balancer-internal: "true" diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-metrics-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-metrics-values.yaml new file mode 100644 index 0000000..887ed0f --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-metrics-values.yaml @@ -0,0 +1,7 @@ +controller: + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-nodeport-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-nodeport-values.yaml new file mode 100644 index 0000000..84f1f75 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-nodeport-values.yaml @@ -0,0 +1,5 @@ +controller: + admissionWebhooks: + enabled: false + service: + type: NodePort diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-podannotations-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-podannotations-values.yaml new file mode 100644 index 0000000..b65a091 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-podannotations-values.yaml @@ -0,0 +1,12 @@ +controller: + admissionWebhooks: + enabled: false + metrics: + enabled: true + service: + type: ClusterIP + podAnnotations: + prometheus.io/path: /metrics + prometheus.io/port: "10254" + prometheus.io/scheme: http + prometheus.io/scrape: "true" diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-psp-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-psp-values.yaml new file mode 100644 index 0000000..e339c69 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-psp-values.yaml @@ -0,0 +1,6 @@ +controller: + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-tcp-udp-configMapNamespace-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-tcp-udp-configMapNamespace-values.yaml new file mode 100644 index 0000000..141e06b --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-tcp-udp-configMapNamespace-values.yaml @@ -0,0 +1,15 @@ +controller: + admissionWebhooks: + enabled: false + service: + type: ClusterIP + tcp: + configMapNamespace: default + udp: + configMapNamespace: default + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-tcp-udp-portNamePrefix-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-tcp-udp-portNamePrefix-values.yaml new file mode 100644 index 0000000..1f8f260 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-tcp-udp-portNamePrefix-values.yaml @@ -0,0 +1,13 @@ +controller: + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" + +portNamePrefix: "port" diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-tcp-udp-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-tcp-udp-values.yaml new file mode 100644 index 0000000..bc29abe --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-tcp-udp-values.yaml @@ -0,0 +1,11 @@ +controller: + admissionWebhooks: + enabled: false + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + +udp: + 9001: "default/test:8080" diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-tcp-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-tcp-values.yaml new file mode 100644 index 0000000..b7f54c0 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-tcp-values.yaml @@ -0,0 +1,7 @@ +controller: + service: + type: ClusterIP + +tcp: + 9000: "default/test:8080" + 9001: "default/test:8080" diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-webhook-and-psp-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-webhook-and-psp-values.yaml new file mode 100644 index 0000000..a829c36 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-webhook-and-psp-values.yaml @@ -0,0 +1,8 @@ +controller: + admissionWebhooks: + enabled: true + service: + type: ClusterIP + +podSecurityPolicy: + enabled: true diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-webhook-resources-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-webhook-resources-values.yaml new file mode 100644 index 0000000..49ebbb0 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-webhook-resources-values.yaml @@ -0,0 +1,23 @@ +controller: + service: + type: ClusterIP + admissionWebhooks: + enabled: true + createSecretJob: + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi + patchWebhookJob: + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi + patch: + enabled: true diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-webhook-values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-webhook-values.yaml new file mode 100644 index 0000000..4f18a70 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/ci/deployment-webhook-values.yaml @@ -0,0 +1,5 @@ +controller: + admissionWebhooks: + enabled: true + service: + type: ClusterIP diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-custom-response.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-custom-response.yaml new file mode 100644 index 0000000..33b9043 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-custom-response.yaml @@ -0,0 +1,40 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata : + name : customresponses.openappsec.io + +spec: + group: openappsec.io + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + mode: + type: string + enum: + - block-page + #- redirect + - response-code-only + message-title: + type: string + message-body: + type: string + http-response-code: + type: integer + minimum: 100 + maximum: 599 + + scope: Cluster + names: + plural: customresponses + singular: customresponse + kind: CustomResponse + shortNames: + - customresponse diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-exception.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-exception.yaml new file mode 100644 index 0000000..baf4cd0 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-exception.yaml @@ -0,0 +1,75 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: exceptions.openappsec.io + +spec: + group: openappsec.io + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: array + items: + type: object + required: + - action + properties: + action: + type: string + enum: + - skip + - accept + - drop + - suppressLog + sourceIp: + type: array + items: + type: string + url: + type: array + items: + type: string + sourceIdentifier: + type: array + items: + type: string + protectionName: + type: array + items: + type: string + paramValue: + type: array + items: + type: string + paramName: + type: array + items: + type: string + hostName: + type: array + items: + type: string + countryCode: + type: array + items: + type: string + countryName: + type: array + items: + type: string + comment: + type: string + + scope: Cluster + names: + plural: exceptions + singular: exception + kind: Exception + shortNames: + - exception diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-log-trigger.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-log-trigger.yaml new file mode 100644 index 0000000..ab4e4b6 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-log-trigger.yaml @@ -0,0 +1,107 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata : + name : logtriggers.openappsec.io + +spec: + group: openappsec.io + versions: + - name: v1beta1 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + access-control-logging: + type: object + properties: + allow-events: + type: boolean + drop-events: + type: boolean + appsec-logging: + type: object + properties: + detect-events: + type: boolean + prevent-events: + type: boolean + all-web-requests: + type: boolean + additional-suspicious-events-logging: + type: object + properties: + enabled: + type: boolean + minimum-severity: + type: string + enum: + - high + - critical + response-body: + type: boolean + response-code: + type: boolean + extended-logging: + type: object + properties: + url-path: + type: boolean + url-query: + type: boolean + http-headers: + type: boolean + request-body: + type: boolean + log-destination: + type: object + properties: + cloud: + type: boolean + syslog-service: #change to object array + type: array + items: + type: object + properties: + address: + type: string + port: + type: integer + file: + type: string + stdout: + type: object + properties: + format: + type: string + enum: + - json + - json-formatted + cef-service: + type: array + items: + type: object + properties: + address: + type: string + port: + type: integer + proto: + type: string + enum: + - tcp + - udp + + scope: Cluster + names: + plural: logtriggers + singular: logtrigger + kind: LogTrigger + shortNames: + - logtrigger diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-policy.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-policy.yaml new file mode 100644 index 0000000..f0d3be5 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-policy.yaml @@ -0,0 +1,90 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata : + name : policies.openappsec.io + +spec: + group: openappsec.io + versions: + - name: v1beta1 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + default: + type: object + properties: + mode: + type: string + enum: + - prevent-learn + - detect-learn + - prevent + - detect + - inactive + practices: + type: array + items: + type: string + triggers: + type: array + items: + type: string + custom-response: + type: string + source-identifiers: + type: string + trusted-sources: + type: string + exceptions: + type: array + items: + type: string + specific-rules: + type: array + items: + type: object + properties: + host: + type: string + mode: + type: string + enum: + - prevent-learn + - detect-learn + - prevent + - detect + - inactive + practices: + type: array + items: + type: string + triggers: + type: array + items: + type: string + custom-response: + type: string + source-identifiers: + type: string + trusted-sources: + type: string + exceptions: + type: array + items: + type: string + + scope: Cluster + names: + plural: policies + singular: policy + kind: Policy + shortNames: + - policy diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-practice.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-practice.yaml new file mode 100644 index 0000000..99143f7 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-practice.yaml @@ -0,0 +1,135 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata : + name : practices.openappsec.io + +spec: + group: openappsec.io + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + web-attacks: + type: object + properties: + override-mode: + type: string + enum: + - prevent-learn + - detect-learn + - prevent + - detect + - inactive + minimum-confidence: + type: string + enum: + - medium + - high + - critical + max-url-size-bytes: + type: integer + max-object-depth: + type: integer + max-body-size-kb: + type: integer + max-header-size-bytes: + type: integer + protections: + type: object + properties: + csrf-enabled: + type: string + enum: + - prevent-learn + - detect-learn + - prevent + - detect + - inactive + error-disclosure-enabled: + type: string + enum: + - prevent-learn + - detect-learn + - prevent + - detect + - inactive + open-redirect-enabled: + type: string + enum: + - prevent-learn + - detect-learn + - prevent + - detect + - inactive + non-valid-http-methods: + type: boolean + anti-bot: + type: object + properties: + override-mode: + type: string + enum: + - prevent-learn + - detect-learn + - prevent + - detect + - inactive + injected-URIs: + type: array + items: + type: object + properties: + uri: + type: string + validated-URIs: + type: array + items: + type: object + properties: + uri: + type: string + snort-signatures: + type: object + properties: + override-mode: + type: string + enum: + - prevent-learn + - detect-learn + - prevent + - detect + - inactive + configmap: + type: array + items: + type: string + openapi-schema-validation: + type: object + properties: + override-mode: + type: string + enum: + - prevent-learn + - detect-learn + - prevent + - detect + - inactive + configmap: + type: array + items: + type: string + + scope: Cluster + names: + plural: practices + singular: practice + kind: Practice + shortNames: + - practice diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-sources-identifier.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-sources-identifier.yaml new file mode 100644 index 0000000..bf15df6 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-sources-identifier.yaml @@ -0,0 +1,40 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata : + name : sourcesidentifiers.openappsec.io + +spec: + group: openappsec.io + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: array + items: + type: object + properties: + sourceIdentifier: + type: string + enum: + - headerkey + - JWTKey + - cookie + - sourceip + - x-forwarded-for + value: + type: array + items: + type: string + + scope: Cluster + names: + plural: sourcesidentifiers + singular: sourcesidentifier + kind: SourcesIdentifier + shortNames: + - sourcesidentifier diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-trusted-sources.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-trusted-sources.yaml new file mode 100644 index 0000000..17bf760 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/crds/crd-openappsec-trusted-sources.yaml @@ -0,0 +1,32 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata : + name : trustedsources.openappsec.io + +spec: + group: openappsec.io + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + minNumOfSources: + type: integer + sourcesIdentifiers: + type: array + items: + type: string + + scope: Cluster + names: + plural: trustedsources + singular: trustedsource + kind: TrustedSource + shortNames: + - trustedsource diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/_helpers.tpl b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/_helpers.tpl new file mode 100644 index 0000000..e69de0c --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/_helpers.tpl @@ -0,0 +1,185 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "ingress-nginx.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ingress-nginx.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + + +{{/* +Container SecurityContext. +*/}} +{{- define "controller.containerSecurityContext" -}} +{{- if .Values.controller.containerSecurityContext -}} +{{- toYaml .Values.controller.containerSecurityContext -}} +{{- else -}} +capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + {{- if .Values.controller.image.chroot }} + - SYS_CHROOT + {{- end }} +runAsUser: {{ .Values.controller.image.runAsUser }} +allowPrivilegeEscalation: {{ .Values.controller.image.allowPrivilegeEscalation }} +{{- end }} +{{- end -}} + +{{/* +Get specific image +*/}} +{{- define "ingress-nginx.image" -}} +{{- if .chroot -}} +{{- printf "%s-chroot" .image -}} +{{- else -}} +{{- printf "%s" .image -}} +{{- end }} +{{- end -}} + +{{/* +Get specific image digest +*/}} +{{- define "ingress-nginx.imageDigest" -}} +{{- if .chroot -}} +{{- if .digestChroot -}} +{{- printf "@%s" .digestChroot -}} +{{- end }} +{{- else -}} +{{ if .digest -}} +{{- printf "@%s" .digest -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified controller name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.controller.fullname" -}} +{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.controller.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Construct the path for the publish-service. + +By convention this will simply use the / to match the name of the +service generated. + +Users can provide an override for an explicit service they want bound via `.Values.controller.publishService.pathOverride` + +*/}} +{{- define "ingress-nginx.controller.publishServicePath" -}} +{{- $defServiceName := printf "%s/%s" "$(POD_NAMESPACE)" (include "ingress-nginx.controller.fullname" .) -}} +{{- $servicePath := default $defServiceName .Values.controller.publishService.pathOverride }} +{{- print $servicePath | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified default backend name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "ingress-nginx.defaultBackend.fullname" -}} +{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.defaultBackend.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "ingress-nginx.labels" -}} +helm.sh/chart: {{ include "ingress-nginx.chart" . }} +{{ include "ingress-nginx.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/part-of: {{ template "ingress-nginx.name" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.commonLabels}} +{{ toYaml .Values.commonLabels }} +{{- end }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "ingress-nginx.selectorLabels" -}} +app.kubernetes.io/name: {{ include "ingress-nginx.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the controller service account to use +*/}} +{{- define "ingress-nginx.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "ingress-nginx.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the backend service account to use - only used when podsecuritypolicy is also enabled +*/}} +{{- define "ingress-nginx.defaultBackend.serviceAccountName" -}} +{{- if .Values.defaultBackend.serviceAccount.create -}} + {{ default (printf "%s-backend" (include "ingress-nginx.fullname" .)) .Values.defaultBackend.serviceAccount.name }} +{{- else -}} + {{ default "default-backend" .Values.defaultBackend.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Check the ingress controller version tag is at most three versions behind the last release +*/}} +{{- define "isControllerTagValid" -}} +{{- if not (semverCompare ">=0.27.0-0" .Values.controller.image.tag) -}} +{{- fail "Controller container image tag should be 0.27.0 or higher" -}} +{{- end -}} +{{- end -}} + +{{/* +IngressClass parameters. +*/}} +{{- define "ingressClass.parameters" -}} + {{- if .Values.controller.ingressClassResource.parameters -}} + parameters: +{{ toYaml .Values.controller.ingressClassResource.parameters | indent 4}} + {{ end }} +{{- end -}} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/_params.tpl b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/_params.tpl new file mode 100644 index 0000000..305ce0d --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/_params.tpl @@ -0,0 +1,62 @@ +{{- define "ingress-nginx.params" -}} +- /nginx-ingress-controller +{{- if .Values.defaultBackend.enabled }} +- --default-backend-service=$(POD_NAMESPACE)/{{ include "ingress-nginx.defaultBackend.fullname" . }} +{{- end }} +{{- if and .Values.controller.publishService.enabled .Values.controller.service.enabled }} +{{- if .Values.controller.service.external.enabled }} +- --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }} +{{- else if .Values.controller.service.internal.enabled }} +- --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }}-internal +{{- end }} +{{- end }} +- --election-id={{ .Values.controller.electionID }} +- --controller-class={{ .Values.controller.ingressClassResource.controllerValue }} +{{- if .Values.controller.ingressClass }} +- --ingress-class={{ .Values.controller.ingressClass }} +{{- end }} +- --configmap={{ default "$(POD_NAMESPACE)" .Values.controller.configMapNamespace }}/{{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.tcp }} +- --tcp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.tcp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-tcp +{{- end }} +{{- if .Values.udp }} +- --udp-services-configmap={{ default "$(POD_NAMESPACE)" .Values.controller.udp.configMapNamespace }}/{{ include "ingress-nginx.fullname" . }}-udp +{{- end }} +{{- if .Values.controller.scope.enabled }} +- --watch-namespace={{ default "$(POD_NAMESPACE)" .Values.controller.scope.namespace }} +{{- end }} +{{- if and (not .Values.controller.scope.enabled) .Values.controller.scope.namespaceSelector }} +- --watch-namespace-selector={{ default "" .Values.controller.scope.namespaceSelector }} +{{- end }} +{{- if and .Values.controller.reportNodeInternalIp .Values.controller.hostNetwork }} +- --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} +- --validating-webhook=:{{ .Values.controller.admissionWebhooks.port }} +- --validating-webhook-certificate={{ .Values.controller.admissionWebhooks.certificate }} +- --validating-webhook-key={{ .Values.controller.admissionWebhooks.key }} +{{- end }} +{{- if .Values.controller.maxmindLicenseKey }} +- --maxmind-license-key={{ .Values.controller.maxmindLicenseKey }} +{{- end }} +{{- if .Values.controller.healthCheckHost }} +- --healthz-host={{ .Values.controller.healthCheckHost }} +{{- end }} +{{- if not (eq .Values.controller.healthCheckPath "/healthz") }} +- --health-check-path={{ .Values.controller.healthCheckPath }} +{{- end }} +{{- if .Values.controller.ingressClassByName }} +- --ingress-class-by-name=true +{{- end }} +{{- if .Values.controller.watchIngressWithoutClass }} +- --watch-ingress-without-class=true +{{- end }} +{{- range $key, $value := .Values.controller.extraArgs }} +{{- /* Accept keys without values or with false as value */}} +{{- if eq ($value | quote | len) 2 }} +- --{{ $key }} +{{- else }} +- --{{ $key }}={{ $value }} +{{- end }} +{{- end }} +{{- end -}} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/clusterrole.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/clusterrole.yaml new file mode 100644 index 0000000..5659a1f --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/clusterrole.yaml @@ -0,0 +1,34 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + {{- with .Values.controller.admissionWebhooks.existingPsp }} + - {{ . }} + {{- else }} + - {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} +{{- end }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/clusterrolebinding.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/clusterrolebinding.yaml new file mode 100644 index 0000000..abf17fb --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/clusterrolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "ingress-nginx.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/job-createSecret.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/job-createSecret.yaml new file mode 100644 index 0000000..f20e247 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/job-createSecret.yaml @@ -0,0 +1,76 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-create + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- with .Values.controller.admissionWebhooks.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 +{{- end }} + template: + metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-create + {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 8 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + containers: + - name: create + {{- with .Values.controller.admissionWebhooks.patch.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} + args: + - create + - --host={{ include "ingress-nginx.controller.fullname" . }}-admission,{{ include "ingress-nginx.controller.fullname" . }}-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name={{ include "ingress-nginx.fullname" . }}-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + {{- if .Values.controller.admissionWebhooks.createSecretJob.resources }} + resources: {{ toYaml .Values.controller.admissionWebhooks.createSecretJob.resources | nindent 12 }} + {{- end }} + restartPolicy: OnFailure + serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission + {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.tolerations }} + tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }} + {{- end }} + securityContext: + runAsNonRoot: true + runAsUser: {{ .Values.controller.admissionWebhooks.patch.runAsUser }} + fsGroup: {{ .Values.controller.admissionWebhooks.patch.fsGroup }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/job-patchWebhook.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/job-patchWebhook.yaml new file mode 100644 index 0000000..8583685 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/job-patchWebhook.yaml @@ -0,0 +1,78 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-patch + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- with .Values.controller.admissionWebhooks.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 +{{- end }} + template: + metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission-patch + {{- if .Values.controller.admissionWebhooks.patch.podAnnotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.patch.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 8 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.controller.admissionWebhooks.patch.priorityClassName }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + containers: + - name: patch + {{- with .Values.controller.admissionWebhooks.patch.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.controller.admissionWebhooks.patch.image.pullPolicy }} + args: + - patch + - --webhook-name={{ include "ingress-nginx.fullname" . }}-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name={{ include "ingress-nginx.fullname" . }}-admission + - --patch-failure-policy={{ .Values.controller.admissionWebhooks.failurePolicy }} + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + {{- if .Values.controller.admissionWebhooks.patchWebhookJob.resources }} + resources: {{ toYaml .Values.controller.admissionWebhooks.patchWebhookJob.resources | nindent 12 }} + {{- end }} + restartPolicy: OnFailure + serviceAccountName: {{ include "ingress-nginx.fullname" . }}-admission + {{- if .Values.controller.admissionWebhooks.patch.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.admissionWebhooks.patch.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.patch.tolerations }} + tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }} + {{- end }} + securityContext: + runAsNonRoot: true + runAsUser: {{ .Values.controller.admissionWebhooks.patch.runAsUser }} + fsGroup: {{ .Values.controller.admissionWebhooks.patch.fsGroup }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/psp.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/psp.yaml new file mode 100644 index 0000000..70edde3 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/psp.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled .Values.podSecurityPolicy.enabled (empty .Values.controller.admissionWebhooks.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/role.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/role.yaml new file mode 100644 index 0000000..795bac6 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/role.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/rolebinding.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/rolebinding.yaml new file mode 100644 index 0000000..698c5c8 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/rolebinding.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/serviceaccount.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/serviceaccount.yaml new file mode 100644 index 0000000..eae4751 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/job-patch/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "ingress-nginx.fullname" . }}-admission + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.patch.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/validating-webhook.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/validating-webhook.yaml new file mode 100644 index 0000000..8caffcb --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/admission-webhooks/validating-webhook.yaml @@ -0,0 +1,48 @@ +{{- if .Values.controller.admissionWebhooks.enabled -}} +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + {{- if .Values.controller.admissionWebhooks.annotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.annotations | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: admission-webhook + {{- with .Values.controller.admissionWebhooks.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: {{ .Values.controller.admissionWebhooks.failurePolicy | default "Fail" }} + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: {{ .Release.Namespace | quote }} + name: {{ include "ingress-nginx.controller.fullname" . }}-admission + path: /networking/v1/ingresses + {{- if .Values.controller.admissionWebhooks.timeoutSeconds }} + timeoutSeconds: {{ .Values.controller.admissionWebhooks.timeoutSeconds }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.namespaceSelector }} + namespaceSelector: {{ toYaml .Values.controller.admissionWebhooks.namespaceSelector | nindent 6 }} + {{- end }} + {{- if .Values.controller.admissionWebhooks.objectSelector }} + objectSelector: {{ toYaml .Values.controller.admissionWebhooks.objectSelector | nindent 6 }} + {{- end }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/appsec-learning-pvc.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/appsec-learning-pvc.yaml new file mode 100644 index 0000000..9df2b3d --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/appsec-learning-pvc.yaml @@ -0,0 +1,20 @@ +{{- if and (eq "stand-alone" .Values.appsec.mode) (eq .Values.appsec.persistence.enabled true) -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ .Values.appsec.name }}-storage + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/resource-policy": keep +spec: + accessModes: + - {{ .Values.appsec.persistence.learning.storageClass.accessModes }} + resources: + requests: + storage: {{ .Values.appsec.persistence.learning.size | quote }} +{{- if .Values.appsec.persistence.learning.storageClass.name }} + storageClassName: "{{ .Values.appsec.persistence.learning.storageClass.name }}" +{{- else }} + storageClassName: {{ required "A storage class for learning data is required" .Values.appsec.persistence.learning.storageClass.name }} +{{- end -}} +{{- end }} \ No newline at end of file diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/appsec-pvc.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/appsec-pvc.yaml new file mode 100644 index 0000000..3839dad --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/appsec-pvc.yaml @@ -0,0 +1,51 @@ +{{- if (eq .Values.controller.kind "Deployment") -}} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ .Values.appsec.name }}-conf + namespace: {{ .Release.Namespace }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: open-appsec + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.appsec.persistence.conf.size | quote }} +{{- if .Values.appsec.persistence.storageClass }} +{{- if (eq "-" .Values.appsec.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.appsec.persistence.storageClass }}" +{{- end -}} +{{- end }} +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ .Values.appsec.name }}-data + namespace: {{ .Release.Namespace }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: open-appsec + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.appsec.persistence.data.size | quote }} +{{- if .Values.appsec.persistence.storageClass }} +{{- if (eq "-" .Values.appsec.persistence.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.appsec.persistence.storageClass }}" +{{- end -}} +{{- end }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/clusterrole.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/clusterrole.yaml new file mode 100644 index 0000000..a7f857b --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/clusterrole.yaml @@ -0,0 +1,95 @@ +{{- if .Values.rbac.create }} + +{{- if and .Values.rbac.scope (not .Values.controller.scope.enabled) -}} + {{ required "Invalid configuration: 'rbac.scope' should be equal to 'controller.scope.enabled' (true/false)." (index (dict) ".") }} +{{- end }} + +{{- if not .Values.rbac.scope -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets +{{- if not .Values.controller.scope.enabled }} + - namespaces +{{- end}} + verbs: + - list + - watch +{{- if and .Values.controller.scope.enabled .Values.controller.scope.namespace }} + - apiGroups: + - "" + resources: + - namespaces + resourceNames: + - "{{ .Values.controller.scope.namespace }}" + verbs: + - get +{{- end }} + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - openappsec.io + resources: + - '*' + verbs: + - get + - list + - watch +{{- end }} + +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/clusterrolebinding.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..acbbd8b --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.rbac.create (not .Values.rbac.scope) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "ingress-nginx.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-configmap-addheaders.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-configmap-addheaders.yaml new file mode 100644 index 0000000..dfd49a1 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-configmap-addheaders.yaml @@ -0,0 +1,14 @@ +{{- if .Values.controller.addHeaders -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-custom-add-headers + namespace: {{ .Release.Namespace }} +data: {{ toYaml .Values.controller.addHeaders | nindent 2 }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-configmap-proxyheaders.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-configmap-proxyheaders.yaml new file mode 100644 index 0000000..f8d15fa --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-configmap-proxyheaders.yaml @@ -0,0 +1,19 @@ +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-custom-proxy-headers + namespace: {{ .Release.Namespace }} +data: +{{- if .Values.controller.proxySetHeaders }} +{{ toYaml .Values.controller.proxySetHeaders | indent 2 }} +{{ else if and .Values.controller.headers (not .Values.controller.proxySetHeaders) }} +{{ toYaml .Values.controller.headers | indent 2 }} +{{- end }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-configmap-tcp.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-configmap-tcp.yaml new file mode 100644 index 0000000..0f6088e --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-configmap-tcp.yaml @@ -0,0 +1,17 @@ +{{- if .Values.tcp -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.tcp.annotations }} + annotations: {{ toYaml .Values.controller.tcp.annotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.fullname" . }}-tcp + namespace: {{ .Release.Namespace }} +data: {{ tpl (toYaml .Values.tcp) . | nindent 2 }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-configmap-udp.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-configmap-udp.yaml new file mode 100644 index 0000000..3772ec5 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-configmap-udp.yaml @@ -0,0 +1,17 @@ +{{- if .Values.udp -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.udp.annotations }} + annotations: {{ toYaml .Values.controller.udp.annotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.fullname" . }}-udp + namespace: {{ .Release.Namespace }} +data: {{ tpl (toYaml .Values.udp) . | nindent 2 }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-configmap.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-configmap.yaml new file mode 100644 index 0000000..f28b26e --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-configmap.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if .Values.controller.configAnnotations }} + annotations: {{ toYaml .Values.controller.configAnnotations | nindent 4 }} +{{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +data: + allow-snippet-annotations: "{{ .Values.controller.allowSnippetAnnotations }}" +{{- if .Values.controller.addHeaders }} + add-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-add-headers +{{- end }} +{{- if or .Values.controller.proxySetHeaders .Values.controller.headers }} + proxy-set-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-proxy-headers +{{- end }} +{{- if .Values.dhParam }} + ssl-dh-param: {{ printf "%s/%s" .Release.Namespace (include "ingress-nginx.controller.fullname" .) }} +{{- end }} +{{- range $key, $value := .Values.controller.config }} + {{- $key | nindent 2 }}: {{ $value | quote }} +{{- end }} + diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-daemonset.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-daemonset.yaml new file mode 100644 index 0000000..2dca8e5 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-daemonset.yaml @@ -0,0 +1,223 @@ +{{- if or (eq .Values.controller.kind "DaemonSet") (eq .Values.controller.kind "Both") -}} +{{- include "isControllerTagValid" . -}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.controller.annotations }} + annotations: {{ toYaml .Values.controller.annotations | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- if .Values.controller.updateStrategy }} + updateStrategy: {{ toYaml .Values.controller.updateStrategy | nindent 4 }} + {{- end }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.controller.podLabels }} + {{- toYaml .Values.controller.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.dnsConfig }} + dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }} + {{- end }} + {{- if .Values.controller.hostname }} + hostname: {{ toYaml .Values.controller.hostname | nindent 8 }} + {{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName }} + {{- end }} + {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} + securityContext: + {{- end }} + {{- if .Values.controller.podSecurityContext }} + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.controller.sysctls }} + sysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - name: {{ $sysctl | quote }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.controller.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.controller.shareProcessNamespace }} + {{- end }} + containers: + - name: {{ .Values.controller.containerName }} + {{- with .Values.controller.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ include "ingress-nginx.image" . }}{{- end -}}:{{ .tag }}{{ include "ingress-nginx.imageDigest" . }}" + {{- end }} + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + {{- if .Values.controller.lifecycle }} + lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} + {{- end }} + args: + {{- include "ingress-nginx.params" . | nindent 12 }} + securityContext: {{ include "controller.containerSecurityContext" . | nindent 12 }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.enableMimalloc }} + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + {{- end }} + {{- if .Values.controller.extraEnvs }} + {{- toYaml .Values.controller.extraEnvs | nindent 12 }} + {{- end }} + {{- if .Values.controller.startupProbe }} + startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }} + {{- end }} + livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }} + readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + containerPort: {{ $key }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + containerPort: {{ $key }} + protocol: UDP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules) }} + volumeMounts: + {{- if .Values.controller.extraModules }} + - name: modules + mountPath: /modules_mount + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.resources }} + resources: {{ toYaml .Values.controller.resources | nindent 12 }} + {{- end }} + {{- if .Values.controller.extraContainers }} + {{ toYaml .Values.controller.extraContainers | nindent 8 }} + {{- end }} + + + {{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules) }} + initContainers: + {{- if .Values.controller.extraInitContainers }} + {{ toYaml .Values.controller.extraInitContainers | nindent 8 }} + {{- end }} + {{- if .Values.controller.extraModules }} + {{- range .Values.controller.extraModules }} + - name: {{ .Name }} + image: {{ .Image }} + command: ['sh', '-c', '/usr/local/bin/init_module.sh'] + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- end }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: {{ toYaml .Values.controller.affinity | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules) }} + volumes: + {{- if .Values.controller.extraModules }} + - name: modules + emptyDir: {} + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} + {{- if .Values.controller.extraVolumes }} + {{ toYaml .Values.controller.extraVolumes | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-deployment.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-deployment.yaml new file mode 100644 index 0000000..5285a0f --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-deployment.yaml @@ -0,0 +1,261 @@ +{{- if or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both") -}} +{{- include "isControllerTagValid" . -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.controller.annotations }} + annotations: {{ toYaml .Values.controller.annotations | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + {{- if not .Values.controller.autoscaling.enabled }} + replicas: {{ .Values.controller.replicaCount }} + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- if .Values.controller.updateStrategy }} + strategy: + {{ toYaml .Values.controller.updateStrategy | nindent 4 }} + {{- end }} + minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.controller.podLabels }} + {{- toYaml .Values.controller.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.dnsConfig }} + dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }} + {{- end }} + {{- if .Values.controller.hostname }} + hostname: {{ toYaml .Values.controller.hostname | nindent 8 }} + {{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName | quote }} + {{- end }} + {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} + securityContext: + {{- end }} + {{- if .Values.controller.podSecurityContext }} + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.controller.sysctls }} + sysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - name: {{ $sysctl | quote }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.controller.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.controller.shareProcessNamespace }} + {{- end }} + containers: + - name: {{ .Values.controller.containerName }} + {{- with .Values.controller.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}{{- if .tag }}:{{ .tag }}{{- end -}}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + {{- if .Values.controller.lifecycle }} + lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} + {{- end }} + args: + {{- include "ingress-nginx.params" . | nindent 12 }} + securityContext: {{ include "controller.containerSecurityContext" . | nindent 12 }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.enableMimalloc }} + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + {{- end }} + {{- if .Values.controller.extraEnvs }} + {{- toYaml .Values.controller.extraEnvs | nindent 12 }} + {{- end }} + {{- if .Values.controller.startupProbe }} + startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }} + {{- end }} + livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }} + readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + containerPort: {{ $key }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + containerPort: {{ $key }} + protocol: UDP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules) }} + volumeMounts: + {{- if .Values.controller.extraModules }} + - name: modules + mountPath: /modules_mount + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.resources }} + resources: {{ toYaml .Values.controller.resources | nindent 12 }} + {{- end }} + - name: {{ .Values.appsec.name }} + securityContext: + {{ toYaml .Values.appsec.securityContext | nindent 12 }} + {{- with .Values.appsec.image }} + image: "{{- if .registry }}{{ .registry }}/{{- end }}{{- if .repository }}{{ .repository }}/{{- end }}{{ .image }}{{- if .tag }}:{{ .tag }}{{- end }}{{- if (.digest) -}} @{{.digest}} {{- end }}" + {{- end }} + args: + - --token + - {{ .Values.appsec.agentToken }} + {{- if .Values.appsec.customFog.enabled }} + - --fog + - {{ .Values.appsec.customFog.fogAddress }} + {{- end }} + {{- if .Values.appsec.proxy }} + - --proxy + - {{ .Values.appsec.proxy }} + {{- end }} + command: + - {{ .Values.appsec.command }} + imagePullPolicy: {{ .Values.appsec.image.pullPolicy }} + resources: + {{ toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: appsec-conf + mountPath: /etc/cp/conf + - name: appsec-data + mountPath: /etc/cp/data + {{- if .Values.controller.extraContainers }} + {{ toYaml .Values.controller.extraContainers | nindent 8 }} + {{- end }} + {{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules) }} + initContainers: + {{- if .Values.controller.extraInitContainers }} + {{ toYaml .Values.controller.extraInitContainers | nindent 8 }} + {{- end }} + {{- if .Values.controller.extraModules }} + {{- range .Values.controller.extraModules }} + - name: {{ .name }} + image: {{ .image }} + command: ['sh', '-c', '/usr/local/bin/init_module.sh'] + volumeMounts: + - name: modules + mountPath: /modules_mount + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- end }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: {{ toYaml .Values.controller.affinity | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + volumes: + - name: appsec-conf + persistentVolumeClaim: + claimName: {{ .Values.appsec.name }}-conf + - name: appsec-data + persistentVolumeClaim: + claimName: {{ .Values.appsec.name }}-data + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules) }} + {{- if .Values.controller.extraModules }} + - name: modules + emptyDir: {} + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} + {{- if .Values.controller.extraVolumes }} + {{ toYaml .Values.controller.extraVolumes | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-hpa.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-hpa.yaml new file mode 100644 index 0000000..e0979f1 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-hpa.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.controller.autoscaling.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}} +{{- if not .Values.controller.keda.enabled }} + +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + annotations: + {{- with .Values.controller.autoscaling.annotations }} + {{- toYaml . | trimSuffix "\n" | nindent 4 }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "ingress-nginx.controller.fullname" . }} + minReplicas: {{ .Values.controller.autoscaling.minReplicas }} + maxReplicas: {{ .Values.controller.autoscaling.maxReplicas }} + metrics: + {{- with .Values.controller.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .Values.controller.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ . }} + {{- end }} + {{- with .Values.controller.autoscalingTemplate }} + {{- toYaml . | nindent 2 }} + {{- end }} + {{- with .Values.controller.autoscaling.behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} + diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-ingressclass.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-ingressclass.yaml new file mode 100644 index 0000000..9492784 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-ingressclass.yaml @@ -0,0 +1,21 @@ +{{- if .Values.controller.ingressClassResource.enabled -}} +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ .Values.controller.ingressClassResource.name }} +{{- if .Values.controller.ingressClassResource.default }} + annotations: + ingressclass.kubernetes.io/is-default-class: "true" +{{- end }} +spec: + controller: {{ .Values.controller.ingressClassResource.controllerValue }} + {{ template "ingressClass.parameters" . }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-keda.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-keda.yaml new file mode 100644 index 0000000..875157e --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-keda.yaml @@ -0,0 +1,42 @@ +{{- if and .Values.controller.keda.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}} +# https://keda.sh/docs/ + +apiVersion: {{ .Values.controller.keda.apiVersion }} +kind: ScaledObject +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + {{- if .Values.controller.keda.scaledObject.annotations }} + annotations: {{ toYaml .Values.controller.keda.scaledObject.annotations | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: +{{- if eq .Values.controller.keda.apiVersion "keda.k8s.io/v1alpha1" }} + deploymentName: {{ include "ingress-nginx.controller.fullname" . }} +{{- else if eq .Values.controller.keda.apiVersion "keda.sh/v1alpha1" }} + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- end }} + pollingInterval: {{ .Values.controller.keda.pollingInterval }} + cooldownPeriod: {{ .Values.controller.keda.cooldownPeriod }} + minReplicaCount: {{ .Values.controller.keda.minReplicas }} + maxReplicaCount: {{ .Values.controller.keda.maxReplicas }} + triggers: +{{- with .Values.controller.keda.triggers }} +{{ toYaml . | indent 2 }} +{{ end }} + advanced: + restoreToOriginalReplicaCount: {{ .Values.controller.keda.restoreToOriginalReplicaCount }} +{{- if .Values.controller.keda.behavior }} + horizontalPodAutoscalerConfig: + behavior: +{{ with .Values.controller.keda.behavior -}} +{{ toYaml . | indent 8 }} +{{ end }} + +{{- end }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-poddisruptionbudget.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-poddisruptionbudget.yaml new file mode 100644 index 0000000..8dfbe98 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-poddisruptionbudget.yaml @@ -0,0 +1,19 @@ +{{- if or (and .Values.controller.autoscaling.enabled (gt (.Values.controller.autoscaling.minReplicas | int) 1)) (and (not .Values.controller.autoscaling.enabled) (gt (.Values.controller.replicaCount | int) 1)) }} +apiVersion: {{ ternary "policy/v1" "policy/v1beta1" (semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version) }} +kind: PodDisruptionBudget +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + minAvailable: {{ .Values.controller.minAvailable }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-prometheusrules.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-prometheusrules.yaml new file mode 100644 index 0000000..ca54275 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-prometheusrules.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.prometheusRule.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.controller.metrics.prometheusRule.namespace }} + namespace: {{ .Values.controller.metrics.prometheusRule.namespace | quote }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.prometheusRule.additionalLabels }} + {{- toYaml .Values.controller.metrics.prometheusRule.additionalLabels | nindent 4 }} + {{- end }} +spec: +{{- if .Values.controller.metrics.prometheusRule.rules }} + groups: + - name: {{ template "ingress-nginx.name" . }} + rules: {{- toYaml .Values.controller.metrics.prometheusRule.rules | nindent 4 }} +{{- end }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-psp.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-psp.yaml new file mode 100644 index 0000000..fe34408 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-psp.yaml @@ -0,0 +1,92 @@ +{{- if and .Values.podSecurityPolicy.enabled (empty .Values.controller.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowedCapabilities: + - NET_BIND_SERVICE + {{- if .Values.controller.image.chroot }} + - SYS_CHROOT + {{- end }} +{{- if .Values.controller.sysctls }} + allowedUnsafeSysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - {{ $sysctl }} + {{- end }} +{{- end }} + privileged: false + allowPrivilegeEscalation: true + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + #- 'projected' + - 'secret' + #- 'downwardAPI' +{{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} +{{- end }} +{{- if or .Values.controller.hostNetwork .Values.controller.hostPort.enabled }} + hostPorts: +{{- if .Values.controller.hostNetwork }} +{{- range $key, $value := .Values.controller.containerPort }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- else if .Values.controller.hostPort.enabled }} +{{- range $key, $value := .Values.controller.hostPort.ports }} + # {{ $key }} + - min: {{ $value }} + max: {{ $value }} +{{- end }} +{{- end }} +{{- if .Values.controller.metrics.enabled }} + # metrics + - min: {{ .Values.controller.metrics.port }} + max: {{ .Values.controller.metrics.port }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.enabled }} + # admission webhooks + - min: {{ .Values.controller.admissionWebhooks.port }} + max: {{ .Values.controller.admissionWebhooks.port }} +{{- end }} +{{- range $key, $value := .Values.tcp }} + # {{ $key }}-tcp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- range $key, $value := .Values.udp }} + # {{ $key }}-udp + - min: {{ $key }} + max: {{ $key }} +{{- end }} +{{- end }} + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + seLinux: + rule: 'RunAsAny' +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-role.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-role.yaml new file mode 100644 index 0000000..ff039e0 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-role.yaml @@ -0,0 +1,101 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + - {{ .Values.controller.electionID }} + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - openappsec.io + resources: + - '*' + verbs: + - get + - list + - watch +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}] + resources: ['podsecuritypolicies'] + verbs: ['use'] + {{- with .Values.controller.existingPsp }} + resourceNames: [{{ . }}] + {{- else }} + resourceNames: [{{ include "ingress-nginx.fullname" . }}] + {{- end }} +{{- end }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-rolebinding.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-rolebinding.yaml new file mode 100644 index 0000000..e846a11 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-service-internal.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-service-internal.yaml new file mode 100644 index 0000000..aae3e15 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-service-internal.yaml @@ -0,0 +1,79 @@ +{{- if and .Values.controller.service.enabled .Values.controller.service.internal.enabled .Values.controller.service.internal.annotations}} +apiVersion: v1 +kind: Service +metadata: + annotations: + {{- range $key, $value := .Values.controller.service.internal.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.service.labels }} + {{- toYaml .Values.controller.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-internal + namespace: {{ .Release.Namespace }} +spec: + type: "{{ .Values.controller.service.type }}" +{{- if .Values.controller.service.internal.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.service.internal.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.service.internal.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.service.internal.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.internal.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.service.internal.externalTrafficPolicy }} +{{- end }} + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: http + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: https + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + port: {{ $key }} + protocol: TCP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + {{- if $.Values.controller.service.nodePorts.tcp }} + {{- if index $.Values.controller.service.nodePorts.tcp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + port: {{ $key }} + protocol: UDP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + {{- if $.Values.controller.service.nodePorts.udp }} + {{- if index $.Values.controller.service.nodePorts.udp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }} + {{- end }} + {{- end }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-service-metrics.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-service-metrics.yaml new file mode 100644 index 0000000..0aaf414 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-service-metrics.yaml @@ -0,0 +1,45 @@ +{{- if .Values.controller.metrics.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.metrics.service.annotations }} + annotations: {{ toYaml .Values.controller.metrics.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.service.labels }} + {{- toYaml .Values.controller.metrics.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-metrics + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.metrics.service.type }} +{{- if .Values.controller.metrics.service.clusterIP }} + clusterIP: {{ .Values.controller.metrics.service.clusterIP }} +{{- end }} +{{- if .Values.controller.metrics.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.metrics.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.metrics.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.metrics.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.metrics.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.metrics.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.metrics.service.externalTrafficPolicy }} +{{- end }} + ports: + - name: metrics + port: {{ .Values.controller.metrics.service.servicePort }} + protocol: TCP + targetPort: metrics + {{- $setNodePorts := (or (eq .Values.controller.metrics.service.type "NodePort") (eq .Values.controller.metrics.service.type "LoadBalancer")) }} + {{- if (and $setNodePorts (not (empty .Values.controller.metrics.service.nodePort))) }} + nodePort: {{ .Values.controller.metrics.service.nodePort }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-service-webhook.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-service-webhook.yaml new file mode 100644 index 0000000..2aae24f --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-service-webhook.yaml @@ -0,0 +1,40 @@ +{{- if .Values.controller.admissionWebhooks.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.controller.admissionWebhooks.service.annotations }} + annotations: {{ toYaml .Values.controller.admissionWebhooks.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }}-admission + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.admissionWebhooks.service.type }} +{{- if .Values.controller.admissionWebhooks.service.clusterIP }} + clusterIP: {{ .Values.controller.admissionWebhooks.service.clusterIP }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.admissionWebhooks.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.admissionWebhooks.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.admissionWebhooks.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} + ports: + - name: https-webhook + port: 443 + targetPort: webhook + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: https + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-service.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-service.yaml new file mode 100644 index 0000000..2b28196 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-service.yaml @@ -0,0 +1,101 @@ +{{- if and .Values.controller.service.enabled .Values.controller.service.external.enabled -}} +apiVersion: v1 +kind: Service +metadata: + annotations: + {{- range $key, $value := .Values.controller.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.service.labels }} + {{- toYaml .Values.controller.service.labels | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.controller.service.type }} +{{- if .Values.controller.service.clusterIP }} + clusterIP: {{ .Values.controller.service.clusterIP }} +{{- end }} +{{- if .Values.controller.service.externalIPs }} + externalIPs: {{ toYaml .Values.controller.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.controller.service.loadBalancerIP }} +{{- end }} +{{- if .Values.controller.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.controller.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} +{{- if .Values.controller.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.controller.service.externalTrafficPolicy }} +{{- end }} +{{- if .Values.controller.service.sessionAffinity }} + sessionAffinity: {{ .Values.controller.service.sessionAffinity }} +{{- end }} +{{- if .Values.controller.service.healthCheckNodePort }} + healthCheckNodePort: {{ .Values.controller.service.healthCheckNodePort }} +{{- end }} +{{- if semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version -}} +{{- if .Values.controller.service.ipFamilyPolicy }} + ipFamilyPolicy: {{ .Values.controller.service.ipFamilyPolicy }} +{{- end }} +{{- end }} +{{- if semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version -}} +{{- if .Values.controller.service.ipFamilies }} + ipFamilies: {{ toYaml .Values.controller.service.ipFamilies | nindent 4 }} +{{- end }} +{{- end }} + ports: + {{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }} + {{- if .Values.controller.service.enableHttp }} + - name: http + port: {{ .Values.controller.service.ports.http }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.http }} + {{- if and (semverCompare ">=1.20" .Capabilities.KubeVersion.Version) (.Values.controller.service.appProtocol) }} + appProtocol: http + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.http))) }} + nodePort: {{ .Values.controller.service.nodePorts.http }} + {{- end }} + {{- end }} + {{- if .Values.controller.service.enableHttps }} + - name: https + port: {{ .Values.controller.service.ports.https }} + protocol: TCP + targetPort: {{ .Values.controller.service.targetPorts.https }} + {{- if and (semverCompare ">=1.20" .Capabilities.KubeVersion.Version) (.Values.controller.service.appProtocol) }} + appProtocol: https + {{- end }} + {{- if (and $setNodePorts (not (empty .Values.controller.service.nodePorts.https))) }} + nodePort: {{ .Values.controller.service.nodePorts.https }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + port: {{ $key }} + protocol: TCP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + {{- if $.Values.controller.service.nodePorts.tcp }} + {{- if index $.Values.controller.service.nodePorts.tcp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.tcp $key }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + port: {{ $key }} + protocol: UDP + targetPort: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + {{- if $.Values.controller.service.nodePorts.udp }} + {{- if index $.Values.controller.service.nodePorts.udp $key }} + nodePort: {{ index $.Values.controller.service.nodePorts.udp $key }} + {{- end }} + {{- end }} + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-serviceaccount.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-serviceaccount.yaml new file mode 100644 index 0000000..824b2a1 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-serviceaccount.yaml @@ -0,0 +1,18 @@ +{{- if or .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.serviceAccount.annotations }} + annotations: + {{ toYaml .Values.serviceAccount.annotations | indent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-servicemonitor.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-servicemonitor.yaml new file mode 100644 index 0000000..4dbc6da --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.serviceMonitor.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "ingress-nginx.controller.fullname" . }} +{{- if .Values.controller.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.controller.metrics.serviceMonitor.namespace | quote }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.controller.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.controller.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + interval: {{ .Values.controller.metrics.serviceMonitor.scrapeInterval }} + {{- if .Values.controller.metrics.serviceMonitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if .Values.controller.metrics.serviceMonitor.relabelings }} + relabelings: {{ toYaml .Values.controller.metrics.serviceMonitor.relabelings | nindent 8 }} + {{- end }} + {{- if .Values.controller.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{ toYaml .Values.controller.metrics.serviceMonitor.metricRelabelings | nindent 8 }} + {{- end }} +{{- if .Values.controller.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.controller.metrics.serviceMonitor.jobLabel | quote }} +{{- end }} +{{- if .Values.controller.metrics.serviceMonitor.namespaceSelector }} + namespaceSelector: {{ toYaml .Values.controller.metrics.serviceMonitor.namespaceSelector | nindent 4 }} +{{- else }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} +{{- if .Values.controller.metrics.serviceMonitor.targetLabels }} + targetLabels: + {{- range .Values.controller.metrics.serviceMonitor.targetLabels }} + - {{ . }} + {{- end }} +{{- end }} + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-statefulset.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-statefulset.yaml new file mode 100644 index 0000000..4f5dae2 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/controller-statefulset.yaml @@ -0,0 +1,304 @@ +{{- if or (eq .Values.controller.kind "StatefulSet") (eq .Values.controller.kind "Both") -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.controller.fullname" . }} + namespace: {{ .Release.Namespace }} + {{- if .Values.controller.annotations }} + annotations: {{ toYaml .Values.controller.annotations | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: controller + {{- if not .Values.controller.autoscaling.enabled }} + serviceName: "open-appsec-stateful-set" + replicas: {{ .Values.controller.replicaCount }} + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- if .Values.controller.updateStrategy }} + strategy: + {{ toYaml .Values.controller.updateStrategy | nindent 4 }} + {{- end }} + #minReadySeconds: {{ .Values.controller.minReadySeconds }} + template: + metadata: + {{- if .Values.controller.podAnnotations }} + annotations: + {{- range $key, $value := .Values.controller.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: controller + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.controller.podLabels }} + {{- toYaml .Values.controller.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.controller.dnsConfig }} + dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }} + {{- end }} + {{- if .Values.controller.hostname }} + hostname: {{ toYaml .Values.controller.hostname | nindent 8 }} + {{- end }} + dnsPolicy: {{ .Values.controller.dnsPolicy }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.controller.priorityClassName }} + priorityClassName: {{ .Values.controller.priorityClassName | quote }} + {{- end }} + {{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }} + securityContext: + {{- end }} + {{- if .Values.controller.podSecurityContext }} + {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.controller.sysctls }} + sysctls: + {{- range $sysctl, $value := .Values.controller.sysctls }} + - name: {{ $sysctl | quote }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.controller.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.controller.shareProcessNamespace }} + {{- end }} + containers: + - name: {{ .Values.appsec.name }} + securityContext: + {{ toYaml .Values.appsec.securityContext | nindent 12 }} + {{- with .Values.appsec.image }} + image: "{{- if .registry }}{{ .registry }}/{{- end }}{{- if .repository }}{{ .repository }}/{{- end }}{{ .image }}{{- if .tag }}:{{ .tag }}{{- end }}{{- if (.digest) -}} @{{.digest}} {{- end }}" + {{- end }} + command: + - {{ .Values.appsec.command }} + imagePullPolicy: {{ .Values.appsec.image.pullPolicy }} + args: + {{- if (eq "stand-alone" .Values.appsec.mode) }} + - --hybrid-mode + - --token + - cp-3fb5c718-5e39-47e6-8d5e-99b4bc5660b74b4b7fc8-5312-451d-a763-aaf7872703c0 + {{- else }} + - --token + - {{ .Values.appsec.agentToken }} + {{- end -}} + {{- if .Values.appsec.customFog.enabled }} + - --fog + - {{ .Values.appsec.customFog.fogAddress }} + {{- end }} + {{- if .Values.appsec.proxy }} + - --proxy + - {{ .Values.appsec.proxy }} + {{- end }} + imagePullPolicy: {{ .Values.appsec.image.pullPolicy }} + {{- if eq .Values.appsec.playground false }} + env: + - name: SHARED_STORAGE_HOST + value: {{ .Values.appsec.storage.name }}-svc + - name: LEARNING_HOST + value: {{ .Values.appsec.learning.name }}-svc + {{- end }} + resources: + {{ toYaml .Values.resources | nindent 12 }} + {{- if .Values.appsec.persistence.enabled }} + volumeMounts: + - name: appsec-conf + mountPath: /etc/cp/conf + - name: appsec-data + mountPath: /etc/cp/data + {{- end }} + - name: {{ .Values.controller.containerName }} + {{- with .Values.controller.image }} + image: "{{- if .registry }}{{ .registry }}/{{- end }}{{- if .repository }}{{ .repository }}/{{- end }}{{ .image }}{{- if .tag }}:{{ .tag }}{{- end }}{{- if (.digest) -}} @{{.digest}} {{- end }}" + {{- end }} + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + {{- if .Values.controller.lifecycle }} + lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }} + {{- end }} + args: + {{- include "ingress-nginx.params" . | nindent 12 }} + securityContext: {{ include "controller.containerSecurityContext" . | nindent 12 }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.controller.enableMimalloc }} + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + {{- end }} + {{- if .Values.controller.extraEnvs }} + {{- toYaml .Values.controller.extraEnvs | nindent 12 }} + {{- end }} + {{- if .Values.controller.startupProbe }} + startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }} + {{- end }} + livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }} + readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }} + ports: + {{- range $key, $value := .Values.controller.containerPort }} + - name: {{ $key }} + containerPort: {{ $value }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ index $.Values.controller.hostPort.ports $key | default $value }} + {{- end }} + {{- end }} + {{- if .Values.controller.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.controller.metrics.port }} + protocol: TCP + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook + containerPort: {{ .Values.controller.admissionWebhooks.port }} + protocol: TCP + {{- end }} + {{- range $key, $value := .Values.tcp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-tcp + containerPort: {{ $key }} + protocol: TCP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.udp }} + - name: {{ if $.Values.portNamePrefix }}{{ $.Values.portNamePrefix }}-{{ end }}{{ $key }}-udp + containerPort: {{ $key }} + protocol: UDP + {{- if $.Values.controller.hostPort.enabled }} + hostPort: {{ $key }} + {{- end }} + {{- end }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules) }} + volumeMounts: + {{- if .Values.controller.extraModules }} + - name: modules + mountPath: /modules_mount + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - mountPath: /etc/nginx/template + name: nginx-template-volume + readOnly: true + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + {{- end }} + {{- if .Values.controller.extraVolumeMounts }} + {{- toYaml .Values.controller.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.controller.resources }} + resources: {{ toYaml .Values.controller.resources | nindent 12 }} + {{- end }} + {{- if .Values.controller.extraContainers }} + {{ toYaml .Values.controller.extraContainers | nindent 8 }} + {{- end }} + {{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules) }} + initContainers: + {{- if .Values.controller.extraInitContainers }} + {{ toYaml .Values.controller.extraInitContainers | nindent 8 }} + {{- end }} + {{- if .Values.controller.extraModules }} + {{- range .Values.controller.extraModules }} + - name: {{ .name }} + image: {{ .image }} + command: ['sh', '-c', '/usr/local/bin/init_module.sh'] + volumeMounts: + - name: modules + mountPath: /modules_mount + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.controller.hostNetwork }} + hostNetwork: {{ .Values.controller.hostNetwork }} + {{- end }} + {{- if .Values.controller.nodeSelector }} + nodeSelector: {{ toYaml .Values.controller.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.controller.tolerations }} + tolerations: {{ toYaml .Values.controller.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.controller.affinity }} + affinity: {{ toYaml .Values.controller.affinity | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }} + terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }} + {{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules) }} + volumes: + {{- if .Values.controller.extraModules }} + - name: modules + emptyDir: {} + {{- end }} + {{- if .Values.controller.customTemplate.configMapName }} + - name: nginx-template-volume + configMap: + name: {{ .Values.controller.customTemplate.configMapName }} + items: + - key: {{ .Values.controller.customTemplate.configMapKey }} + path: nginx.tmpl + {{- end }} + {{- if .Values.controller.admissionWebhooks.enabled }} + - name: webhook-cert + secret: + secretName: {{ include "ingress-nginx.fullname" . }}-admission + {{- end }} + {{- if .Values.controller.extraVolumes }} + {{ toYaml .Values.controller.extraVolumes | nindent 8 }} + {{- end }} + {{- end }} +{{- if .Values.appsec.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: appsec-conf + spec: + accessModes: [ "ReadWriteOnce" ] + # Need to create a storage class resource. + {{- if .Values.appsec.persistence.storageClass }} + {{- if (eq "-" .Values.appsec.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.appsec.persistence.storageClass }}" + {{- end -}} + {{- end }} + resources: + requests: + storage: {{ .Values.appsec.persistence.conf.size | quote }} + - metadata: + name: appsec-data + spec: + accessModes: [ "ReadWriteOnce" ] + # Need to create a storage class resource. + {{- if .Values.appsec.persistence.storageClass }} + {{- if (eq "-" .Values.appsec.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.appsec.persistence.storageClass }}" + {{- end -}} + {{- end }} + resources: + requests: + storage: {{ .Values.appsec.persistence.data.size | quote }} +{{- end }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-deployment.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-deployment.yaml new file mode 100644 index 0000000..fd3e96e --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-deployment.yaml @@ -0,0 +1,118 @@ +{{- if .Values.defaultBackend.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: default-backend +{{- if not .Values.defaultBackend.autoscaling.enabled }} + replicas: {{ .Values.defaultBackend.replicaCount }} +{{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + template: + metadata: + {{- if .Values.defaultBackend.podAnnotations }} + annotations: {{ toYaml .Values.defaultBackend.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "ingress-nginx.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.podLabels }} + {{- toYaml .Values.defaultBackend.podLabels | nindent 8 }} + {{- end }} + spec: + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.priorityClassName }} + priorityClassName: {{ .Values.defaultBackend.priorityClassName }} + {{- end }} + {{- if .Values.defaultBackend.podSecurityContext }} + securityContext: {{ toYaml .Values.defaultBackend.podSecurityContext | nindent 8 }} + {{- end }} + containers: + - name: {{ template "ingress-nginx.name" . }}-default-backend + {{- with .Values.defaultBackend.image }} + image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}:{{ .tag }}{{- if (.digest) -}} @{{.digest}} {{- end -}}" + {{- end }} + imagePullPolicy: {{ .Values.defaultBackend.image.pullPolicy }} + {{- if .Values.defaultBackend.extraArgs }} + args: + {{- range $key, $value := .Values.defaultBackend.extraArgs }} + {{- /* Accept keys without values or with false as value */}} + {{- if eq ($value | quote | len) 2 }} + - --{{ $key }} + {{- else }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- end }} + {{- end }} + securityContext: + capabilities: + drop: + - ALL + runAsUser: {{ .Values.defaultBackend.image.runAsUser }} + runAsNonRoot: {{ .Values.defaultBackend.image.runAsNonRoot }} + allowPrivilegeEscalation: {{ .Values.defaultBackend.image.allowPrivilegeEscalation }} + readOnlyRootFilesystem: {{ .Values.defaultBackend.image.readOnlyRootFilesystem}} + {{- if .Values.defaultBackend.extraEnvs }} + env: {{ toYaml .Values.defaultBackend.extraEnvs | nindent 12 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.livenessProbe.failureThreshold }} + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.defaultBackend.port }} + scheme: HTTP + initialDelaySeconds: {{ .Values.defaultBackend.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.defaultBackend.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.defaultBackend.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.defaultBackend.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.defaultBackend.readinessProbe.failureThreshold }} + ports: + - name: http + containerPort: {{ .Values.defaultBackend.port }} + protocol: TCP + {{- if .Values.defaultBackend.extraVolumeMounts }} + volumeMounts: {{- toYaml .Values.defaultBackend.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.defaultBackend.resources }} + resources: {{ toYaml .Values.defaultBackend.resources | nindent 12 }} + {{- end }} + {{- if .Values.defaultBackend.nodeSelector }} + nodeSelector: {{ toYaml .Values.defaultBackend.nodeSelector | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + {{- if .Values.defaultBackend.tolerations }} + tolerations: {{ toYaml .Values.defaultBackend.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.defaultBackend.affinity }} + affinity: {{ toYaml .Values.defaultBackend.affinity | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: 60 + {{- if .Values.defaultBackend.extraVolumes }} + volumes: {{ toYaml .Values.defaultBackend.extraVolumes | nindent 8 }} + {{- end }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-hpa.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-hpa.yaml new file mode 100644 index 0000000..594d265 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-hpa.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "ingress-nginx.defaultBackend.fullname" . }} + minReplicas: {{ .Values.defaultBackend.autoscaling.minReplicas }} + maxReplicas: {{ .Values.defaultBackend.autoscaling.maxReplicas }} + metrics: +{{- with .Values.defaultBackend.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ . }} +{{- end }} +{{- with .Values.defaultBackend.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ . }} +{{- end }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-poddisruptionbudget.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-poddisruptionbudget.yaml new file mode 100644 index 0000000..00891ce --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-poddisruptionbudget.yaml @@ -0,0 +1,21 @@ +{{- if .Values.defaultBackend.enabled -}} +{{- if or (gt (.Values.defaultBackend.replicaCount | int) 1) (gt (.Values.defaultBackend.autoscaling.minReplicas | int) 1) }} +apiVersion: {{ ternary "policy/v1" "policy/v1beta1" (semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version) }} +kind: PodDisruptionBudget +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "ingress-nginx.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: default-backend + minAvailable: {{ .Values.defaultBackend.minAvailable }} +{{- end }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-psp.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-psp.yaml new file mode 100644 index 0000000..42061c5 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-psp.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled (empty .Values.defaultBackend.existingPsp) -}} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "ingress-nginx.fullname" . }}-backend + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-role.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-role.yaml new file mode 100644 index 0000000..a2b457c --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-role.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-backend + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}] + resources: ['podsecuritypolicies'] + verbs: ['use'] + {{- with .Values.defaultBackend.existingPsp }} + resourceNames: [{{ . }}] + {{- else }} + resourceNames: [{{ include "ingress-nginx.fullname" . }}-backend] + {{- end }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-rolebinding.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-rolebinding.yaml new file mode 100644 index 0000000..dbaa516 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.fullname" . }}-backend + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ingress-nginx.fullname" . }}-backend +subjects: + - kind: ServiceAccount + name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-service.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-service.yaml new file mode 100644 index 0000000..5f1d09a --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-service.yaml @@ -0,0 +1,41 @@ +{{- if .Values.defaultBackend.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.defaultBackend.service.annotations }} + annotations: {{ toYaml .Values.defaultBackend.service.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "ingress-nginx.defaultBackend.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + type: {{ .Values.defaultBackend.service.type }} +{{- if .Values.defaultBackend.service.clusterIP }} + clusterIP: {{ .Values.defaultBackend.service.clusterIP }} +{{- end }} +{{- if .Values.defaultBackend.service.externalIPs }} + externalIPs: {{ toYaml .Values.defaultBackend.service.externalIPs | nindent 4 }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.defaultBackend.service.loadBalancerIP }} +{{- end }} +{{- if .Values.defaultBackend.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ toYaml .Values.defaultBackend.service.loadBalancerSourceRanges | nindent 4 }} +{{- end }} + ports: + - name: http + port: {{ .Values.defaultBackend.service.servicePort }} + protocol: TCP + targetPort: http + {{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }} + appProtocol: http + {{- end }} + selector: + {{- include "ingress-nginx.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: default-backend +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-serviceaccount.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-serviceaccount.yaml new file mode 100644 index 0000000..b45a95a --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-backend-serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "ingress-nginx.labels" . | nindent 4 }} + app.kubernetes.io/component: default-backend + {{- with .Values.defaultBackend.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +automountServiceAccountToken: {{ .Values.defaultBackend.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-policy.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-policy.yaml new file mode 100644 index 0000000..40a7146 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/default-policy.yaml @@ -0,0 +1,49 @@ +apiVersion: openappsec.io/v1beta1 +kind: Practice +metadata: + name: appsec-best-practice +spec: + web-attacks: + override-mode: prevent-learn + minimum-confidence: high + anti-bot: + injected-URIs: [] + validated-URIs: [] + snort-signatures: + configmap: [] + openapi-schema-validation: + configmap: [] +--- +apiVersion: openappsec.io/v1beta1 +kind: LogTrigger +metadata: + name: appsec-log-trigger +spec: + appsec-logging: + detect-events: true + prevent-events: true + all-web-requests: false + additional-suspicious-events-logging: + enabled: true + minimum-severity: high # {high|critical} + response-body: false + response-code: true + extended-logging: + url-path: true + url-query: true + http-headers: false + request-body: false + log-destination: + cloud: false + stdout: + format: json-formatted +--- +apiVersion: openappsec.io/v1beta1 +kind: CustomResponse +metadata: + name: 403-forbidden +spec: + mode: response-code-only ## configurable modes: {block-page|redirect|response-code-only} + message-title: "" + message-body: "" + http-response-code: 403 diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/dh-param-secret.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/dh-param-secret.yaml new file mode 100644 index 0000000..12e7a4f --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/dh-param-secret.yaml @@ -0,0 +1,10 @@ +{{- with .Values.dhParam -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "ingress-nginx.controller.fullname" $ }} + labels: + {{- include "ingress-nginx.labels" $ | nindent 4 }} +data: + dhparam.pem: {{ . }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/learning-deployment.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/learning-deployment.yaml new file mode 100644 index 0000000..1dddbcf --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/learning-deployment.yaml @@ -0,0 +1,139 @@ +{{- if and (eq "stand-alone" .Values.appsec.mode) (eq .Values.appsec.playground false) }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Values.appsec.learning.name }}-deployment + labels: + app: {{ .Values.appsec.learning.name }}-lbl +spec: + replicas: {{ .Values.appsec.learning.replicas }} + selector: + matchLabels: + app: {{ .Values.appsec.learning.name }}-lbl + template: + metadata: + labels: + app: {{ .Values.appsec.learning.name }}-lbl + group: fog-core + spec: + securityContext: + {{- if eq .Values.appsec.persistence.enabled true }} + fsGroup: 2000 + runAsGroup: 2000 + runAsUser: 1000 + {{- else }} + runAsUser: 0 + {{- end }} + containers: + - name: {{ .Values.appsec.learning.name }} + imagePullPolicy: Always + ports: + - containerPort: 8080 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /health/live + port: 8080 + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 10 + {{- with .Values.appsec.learning.image }} + image: {{ .registry }}/{{ .image }}:{{ .tag }} + {{- end }} + readinessProbe: + failureThreshold: 3 + httpGet: + path: /health/ready + port: 8080 + scheme: HTTP + initialDelaySeconds: 3 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 10 + env: + - name: APPSEC_MODE + value: {{ .Values.appsec.mode }} + - name: RP_BASEURL + value: http://{{ .Values.appsec.storage.name }}-svc/api + - name: K8S_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if eq .Values.appsec.persistence.enabled true }} + volumeMounts: + - mountPath: /tmp/locks/ + name: flock + volumes: + - name: flock + persistentVolumeClaim: + claimName: {{ .Values.appsec.name }}-storage + {{- end }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Values.appsec.storage.name }}-deployment + labels: + app: {{ .Values.appsec.storage.name }}-lbl +spec: + replicas: {{ .Values.appsec.storage.replicas }} + selector: + matchLabels: + app: {{ .Values.appsec.storage.name }}-lbl + template: + metadata: + labels: + app: {{ .Values.appsec.storage.name }}-lbl + group: fog-core + spec: + securityContext: + {{- if eq .Values.appsec.persistence.enabled true }} + fsGroup: 2000 + runAsGroup: 2000 + runAsUser: 1000 + {{- else }} + runAsUser: 0 + {{- end }} + containers: + - name: {{ .Values.appsec.storage.name }} + imagePullPolicy: Always + {{- with .Values.appsec.storage.image }} + image: {{ .registry }}/{{ .image }}:{{ .tag }} + {{- end }} + env: + - name: K8S_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + ports: + - containerPort: 8080 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /health/live + port: 8080 + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 10 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /health/ready + port: 8080 + scheme: HTTP + initialDelaySeconds: 3 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 10 + {{- if eq .Values.appsec.persistence.enabled true }} + volumeMounts: + - mountPath: /db/ + name: files-volume + volumes: + - name: files-volume + persistentVolumeClaim: + claimName: {{ .Values.appsec.name }}-storage + {{- end }} +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/templates/learning-services.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/learning-services.yaml new file mode 100644 index 0000000..fd2a718 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/templates/learning-services.yaml @@ -0,0 +1,33 @@ +{{- if and (eq "stand-alone" .Values.appsec.mode) (eq .Values.appsec.playground false) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.appsec.learning.name }}-svc + namespace: {{ .Release.Namespace }} +spec: + ports: + - name: "http" + port: 80 + targetPort: 8080 + - name: "https" + port: 443 + targetPort: 8080 + selector: + app: {{ .Values.appsec.learning.name }}-lbl +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.appsec.storage.name }}-svc + namespace: {{ .Release.Namespace }} +spec: + ports: + - name: "http" + port: 80 + targetPort: 8080 + - name: "https" + port: 443 + targetPort: 8080 + selector: + app: {{ .Values.appsec.storage.name }}-lbl +{{- end }} diff --git a/build_system/charts/open-appsec-k8s-nginx-ingress/values.yaml b/build_system/charts/open-appsec-k8s-nginx-ingress/values.yaml new file mode 100644 index 0000000..b2adb54 --- /dev/null +++ b/build_system/charts/open-appsec-k8s-nginx-ingress/values.yaml @@ -0,0 +1,1009 @@ +## nginx configuration +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/index.md +## + +## Overrides for generated resource names +# See templates/_helpers.tpl +# nameOverride: +# fullnameOverride: + +## Labels to apply to all resources +## +commonLabels: {} +# scmhash: abc123 +# myLabel: aakkmd + +appsec: + mode: managed + name: open-appsec + image: + #registry: + repository: ghcr.io/openappsec + image: agent + tag: latest + pullPolicy: IfNotPresent + + securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + + resources: + # limits: + # cpu: 100m + # memory: 90Mi + requests: + cpu: 100m + memory: 90Mi + + agentToken: + + command: /cp-nano-agent + customFog: + enabled: true + fogAddress: "https://inext-agents.cloud.ngen.checkpoint.com/" + + #proxy: + + playground: false + + persistence: + ## open-appsec data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + enabled: true + data: + volumeName: appsec-data + size: 1Gi + conf: + volumeName: appsec-conf + size: 1Gi + learning: + ## required a storage class with read write many access mode + storageClass: + ## Azure recommended: + # name: azurefile + ## AWS: follow this guide https://docs.aws.amazon.com/eks/latest/userguide/efs-csi.html + # name: efs-csi + name: + accessModes: ReadWriteMany + volumeName: appsec-learning-storage + size: 1Gi + + learning: + name: open-appsec-learning + replicas: 1 + image: + registry: ghcr.io/openappsec + image: smartsync + tag: latest + + storage: + name: open-appsec-shared-storage + replicas: 1 + image: + registry: ghcr.io/openappsec + image: smartsync-shared-files + tag: latest + +controller: + name: controller + image: + registry: ghcr.io/openappsec + image: nginx-ingress-attachment + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: latest + digest: + pullPolicy: IfNotPresent + # www-data -> uid 101 + runAsUser: 101 + allowPrivilegeEscalation: true + + # -- Use an existing PSP instead of creating one + existingPsp: "" + + # -- Configures the controller container name + containerName: controller + + # -- Configures the ports that the nginx-controller listens on + containerPort: + http: 80 + https: 443 + + # -- Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: {} + + # -- Annotations to be added to the controller config configuration configmap. + configAnnotations: {} + + # -- Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers + proxySetHeaders: {} + + # -- Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers + addHeaders: {} + + # -- Optionally customize the pod dnsConfig. + dnsConfig: {} + + # -- Optionally customize the pod hostname. + hostname: {} + + # -- Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirst + + # -- Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network + # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply + reportNodeInternalIp: false + + # -- Process Ingress objects without ingressClass annotation/ingressClassName field + # Overrides value for --watch-ingress-without-class flag of the controller binary + # Defaults to false + watchIngressWithoutClass: false + + # -- Process IngressClass per name (additionally as per spec.controller). + ingressClassByName: false + + # -- This configuration defines if Ingress Controller should allow users to set + # their own *-snippet annotations, otherwise this is forbidden / dropped + # when users add those annotations. + # Global snippets in ConfigMap are still respected + allowSnippetAnnotations: true + + # -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: false + + ## Use host ports 80 and 443 + ## Disabled by default + hostPort: + # -- Enable 'hostPort' or not + enabled: false + ports: + # -- 'hostPort' http port + http: 80 + # -- 'hostPort' https port + https: 443 + + # -- Election ID to use for status update + electionID: ingress-controller-leader + + ## This section refers to the creation of the IngressClass resource + ## IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19 + ingressClassResource: + # -- Name of the ingressClass + name: nginx + # -- Is this ingressClass enabled or not + enabled: true + # -- Is this the default ingressClass for the cluster + default: false + # -- Controller-value of the controller that is processing this ingressClass + controllerValue: "k8s.io/ingress-nginx" + + # -- Parameters is a link to a custom resource containing additional + # configuration for the controller. This is optional if the controller + # does not require extra parameters. + parameters: {} + + # -- For backwards compatibility with ingress.class annotation, use ingressClass. + # Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation + ingressClass: nginx + + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Security Context policies for controller pods + podSecurityContext: {} + + # -- See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls + sysctls: {} + # sysctls: + # "net.core.somaxconn": "8192" + + # -- Allows customization of the source of the IP address or FQDN to report + # in the ingress status field. By default, it reads the information provided + # by the service. If disable, the status field reports the IP address of the + # node or nodes where an ingress controller pod is running. + publishService: + # -- Enable 'publishService' or not + enabled: true + # -- Allows overriding of the publish service to bind to + # Must be / + pathOverride: "" + + # Limit the scope of the controller to a specific namespace + scope: + # -- Enable 'scope' or not + enabled: false + # -- Namespace to limit the controller to; defaults to $(POD_NAMESPACE) + namespace: "" + # -- When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels + # only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. + namespaceSelector: "" + + # -- Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + + tcp: + # -- Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the tcp config configmap + annotations: {} + + udp: + # -- Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the udp config configmap + annotations: {} + + # -- Maxmind license key to download GeoLite2 Databases. + ## https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases + maxmindLicenseKey: "" + + # -- Additional command line arguments to pass to nginx-ingress-controller + # E.g. to specify the default SSL certificate you can use + extraArgs: {} + ## extraArgs: + ## default-ssl-certificate: "/" + + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + + # -- Use a `DaemonSet` or `Deployment` + kind: StatefulSet + + # -- Annotations to be added to the controller Deployment or DaemonSet + ## + annotations: {} + # keel.sh/pollSchedule: "@every 60m" + + # -- Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels + ## + labels: {} + # keel.sh/policy: patch + # keel.sh/trigger: poll + + + # -- The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # -- `minReadySeconds` to avoid killing pods before we are ready + ## + minReadySeconds: 0 + + + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Affinity and anti-affinity rules for server scheduling to nodes + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: "kubernetes.io/hostname" + + # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app.kubernetes.io/instance: ingress-nginx-internal + + # -- `terminationGracePeriodSeconds` to avoid killing pods before we are ready + ## wait up to five minutes for the drain of connections + ## + terminationGracePeriodSeconds: 300 + + # -- Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: + kubernetes.io/os: linux + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + ## startupProbe: + ## httpGet: + ## # should match container.healthCheckPath + ## path: "/healthz" + ## port: 10254 + ## scheme: HTTP + ## initialDelaySeconds: 5 + ## periodSeconds: 5 + ## timeoutSeconds: 2 + ## successThreshold: 1 + ## failureThreshold: 5 + livenessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + + + # -- Path of the health check endpoint. All requests received on the port defined by + # the healthz-port parameter are forwarded internally to this path. + healthCheckPath: "/healthz" + + # -- Address to bind the health check endpoint. + # It is better to set this option to the internal node address + # if the ingress nginx controller is running in the `hostNetwork: true` mode. + healthCheckHost: "" + + # -- Annotations to be added to controller pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + ## Define requests resources to avoid probe issues due to CPU utilization in busy nodes + ## ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 + ## Ideally, there should be no limits. + ## https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/ + resources: + ## limits: + ## cpu: 100m + ## memory: 90Mi + requests: + cpu: 100m + memory: 90Mi + + # Mutually exclusive with keda autoscaling + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 11 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + + autoscalingTemplate: [] + # Custom or additional autoscaling metrics + # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics + # - type: Pods + # pods: + # metric: + # name: nginx_ingress_controller_nginx_process_requests_total + # target: + # type: AverageValue + # averageValue: 10000m + + # Mutually exclusive with hpa autoscaling + keda: + apiVersion: "keda.sh/v1alpha1" + ## apiVersion changes with keda 1.x vs 2.x + ## 2.x = keda.sh/v1alpha1 + ## 1.x = keda.k8s.io/v1alpha1 + enabled: false + minReplicas: 1 + maxReplicas: 11 + pollingInterval: 30 + cooldownPeriod: 300 + restoreToOriginalReplicaCount: false + scaledObject: + annotations: {} + # Custom annotations for ScaledObject resource + # annotations: + # key: value + triggers: [] + # - type: prometheus + # metadata: + # serverAddress: http://:9090 + # metricName: http_requests_total + # threshold: '100' + # query: sum(rate(http_requests_total{deployment="my-deployment"}[2m])) + + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + + # -- Enable mimalloc as a drop-in replacement for malloc. + ## ref: https://github.com/microsoft/mimalloc + ## + enableMimalloc: true + + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + + service: + enabled: true + + # -- If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were + # using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http + # It allows choosing the protocol for each backend specified in the Kubernetes service. + # See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 + # Will be ignored for Kubernetes versions older than 1.20 + ## + appProtocol: true + + annotations: {} + labels: {} + # clusterIP: "" + + # -- List of IP addresses at which the controller services are available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # -- Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + enableHttp: true + enableHttps: true + + ## Set external traffic policy to: "Local" to preserve source IP on providers supporting it. + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + # externalTrafficPolicy: "" + + ## Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + # sessionAffinity: "" + + ## Specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified, + ## the service controller allocates a port from your cluster’s NodePort range. + ## Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + # healthCheckNodePort: 0 + + # -- Represents the dual-stack-ness requested or required by this Service. Possible values are + # SingleStack, PreferDualStack or RequireDualStack. + # The ipFamilies and clusterIPs fields depend on the value of this field. + ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ + ipFamilyPolicy: "SingleStack" + + # -- List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically + # based on cluster configuration and the ipFamilyPolicy field. + ## Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ + ipFamilies: + - IPv4 + + ports: + http: 80 + https: 443 + + targetPorts: + http: http + https: https + + type: LoadBalancer + + ## type: NodePort + ## nodePorts: + ## http: 32080 + ## https: 32443 + ## tcp: + ## 8080: 32808 + nodePorts: + http: "" + https: "" + tcp: {} + udp: {} + + external: + enabled: true + + internal: + # -- Enables an additional internal load balancer (besides the external one). + enabled: false + # -- Annotations are mandatory for the load balancer to come up. Varies with the cloud service. + annotations: {} + + # loadBalancerIP: "" + + # -- Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. + loadBalancerSourceRanges: [] + + ## Set external traffic policy to: "Local" to preserve source IP on + ## providers supporting it + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + # externalTrafficPolicy: "" + + # shareProcessNamespace enables process namespace sharing within the pod. + # This can be used for example to signal log rotation using `kill -USR1` from a sidecar. + shareProcessNamespace: false + + # -- Additional containers to be added to the controller pod. + # See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + extraContainers: [] + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + # -- Additional volumeMounts to the controller main container. + extraVolumeMounts: [] + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + # -- Additional volumes to the controller pod. + extraVolumes: [] + # - name: copy-portal-skins + # emptyDir: {} + + # -- Containers, which are run before the app containers are started. + extraInitContainers: [] + # - name: init-myservice + # image: busybox + # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + + extraModules: [] + ## Modules, which are mounted into the core nginx image + # - name: opentelemetry + # image: registry.k8s.io/ingress-nginx/opentelemetry:v20220415-controller-v1.2.0-beta.0-2-g81c2afd97@sha256:ce61e2cf0b347dffebb2dcbf57c33891d2217c1bad9c0959c878e5be671ef941 + # + # The image must contain a `/usr/local/bin/init_module.sh` executable, which + # will be executed as initContainers, to move its config files within the + # mounted volume. + + admissionWebhooks: + annotations: {} + # ignore-check.kube-linter.io/no-read-only-rootfs: "This deployment needs write access to root filesystem". + + ## Additional annotations to the admission webhooks. + ## These annotations will be added to the ValidatingWebhookConfiguration and + ## the Jobs Spec of the admission webhooks. + enabled: true + failurePolicy: Fail + # timeoutSeconds: 10 + port: 8443 + certificate: "/usr/local/certificates/cert" + key: "/usr/local/certificates/key" + namespaceSelector: {} + objectSelector: {} + # -- Labels to be added to admission webhooks + labels: {} + + # -- Use an existing PSP instead of creating one + existingPsp: "" + + service: + annotations: {} + # clusterIP: "" + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + + createSecretJob: + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + patchWebhookJob: + resources: {} + + patch: + enabled: true + image: + registry: registry.k8s.io + image: ingress-nginx/kube-webhook-certgen + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: v1.1.1 + digest: sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660 + pullPolicy: IfNotPresent + # -- Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + nodeSelector: + kubernetes.io/os: linux + tolerations: [] + # -- Labels to be added to patch job resources + labels: {} + runAsUser: 2000 + fsGroup: 2000 + + metrics: + port: 10254 + # if this port is changed, change healthz-port: in extraArgs: accordingly + enabled: false + + service: + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + + # clusterIP: "" + + # -- List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 10254 + type: ClusterIP + # externalTrafficPolicy: "" + # nodePort: "" + + serviceMonitor: + enabled: false + additionalLabels: {} + ## The label to use to retrieve the job name from. + ## jobLabel: "app.kubernetes.io/name" + namespace: "" + namespaceSelector: {} + ## Default: scrape .Release.Namespace only + ## To scrape all, use the following: + ## namespaceSelector: + ## any: true + scrapeInterval: 30s + # honorLabels: true + targetLabels: [] + relabelings: [] + metricRelabelings: [] + + prometheusRule: + enabled: false + additionalLabels: {} + # namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: NGINXConfigFailed + # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: bad ingress config - nginx config test failed + # summary: uninstall the latest ingress changes to allow config reloads to resume + # - alert: NGINXCertificateExpiry + # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: ssl certificate(s) will expire in less then a week + # summary: renew expiring certificates to avoid downtime + # - alert: NGINXTooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 5XXs + # summary: More than 5% of all requests returned 5XX, this requires your attention + # - alert: NGINXTooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 4XXs + # summary: More than 5% of all requests returned 4XX, this requires your attention + + # -- Improve connection draining when ingress controller pod is deleted using a lifecycle hook: + # With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds + # to 300, allowing the draining of connections up to five minutes. + # If the active connections end before that, the pod will terminate gracefully at that time. + # To effectively take advantage of this feature, the Configmap feature + # worker-shutdown-timeout new value is 240s instead of 10s. + ## + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + + priorityClassName: "" + +# -- Rollback limit +## +revisionHistoryLimit: 10 + +## Default 404 backend +## +defaultBackend: + ## + enabled: false + + name: defaultbackend + image: + registry: registry.k8s.io + image: defaultbackend-amd64 + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "1.5" + pullPolicy: IfNotPresent + # nobody user -> uid 65534 + runAsUser: 65534 + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + + # -- Use an existing PSP instead of creating one + existingPsp: "" + + extraArgs: {} + + serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Additional environment variables to set for defaultBackend pods + extraEnvs: [] + + port: 8080 + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + affinity: {} + + # -- Security Context policies for controller pods + # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + # notes on enabling and using sysctls + ## + podSecurityContext: {} + + # -- Security Context policies for controller main container. + # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + # notes on enabling and using sysctls + ## + containerSecurityContext: {} + + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Node labels for default backend pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: + kubernetes.io/os: linux + + # -- Annotations to be added to default backend pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + extraVolumeMounts: [] + ## Additional volumeMounts to the default backend container. + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + extraVolumes: [] + ## Additional volumes to the default backend pod. + # - name: copy-portal-skins + # emptyDir: {} + + autoscaling: + annotations: {} + enabled: false + minReplicas: 1 + maxReplicas: 2 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + + service: + annotations: {} + + # clusterIP: "" + + # -- List of IP addresses at which the default backend service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + + priorityClassName: "" + # -- Labels to be added to the default backend resources + labels: {} + +## Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266 +rbac: + create: true + scope: false + +## If true, create & use Pod Security Policy resources +## https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +podSecurityPolicy: + enabled: false + +serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Annotations for the controller service account + annotations: {} + +# -- Optional array of imagePullSecrets containing private registry credentials +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: secretName + +# -- TCP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +tcp: {} +# 8080: "default/example-tcp-svc:9000" + +# -- UDP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +udp: {} +# 53: "kube-system/kube-dns:53" + +# -- Prefix for TCP and UDP ports names in ingress controller service +## Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration +portNamePrefix: "" + +# -- (string) A base64-encoded Diffie-Hellman parameter. +# This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` +## Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param +dhParam: diff --git a/build_system/docker/CMakeLists.txt b/build_system/docker/CMakeLists.txt new file mode 100644 index 0000000..3b464ec --- /dev/null +++ b/build_system/docker/CMakeLists.txt @@ -0,0 +1,9 @@ +install(FILES Dockerfile entry.sh DESTINATION .) + +add_custom_command( + OUTPUT ${CMAKE_INSTALL_PREFIX}/agent-docker.img + COMMAND docker build -t agent-docker ${CMAKE_INSTALL_PREFIX} + COMMAND docker image save agent-docker -o ${CMAKE_INSTALL_PREFIX}/agent-docker.img +) + +add_custom_target(docker DEPENDS ${CMAKE_INSTALL_PREFIX}/agent-docker.img) diff --git a/build_system/docker/Dockerfile b/build_system/docker/Dockerfile new file mode 100644 index 0000000..cc56970 --- /dev/null +++ b/build_system/docker/Dockerfile @@ -0,0 +1,23 @@ +FROM alpine + +RUN apk add --no-cache -u busybox +RUN apk add --no-cache -u zlib +RUN apk add --no-cache bash +RUN apk add --no-cache libstdc++ +RUN apk add --no-cache libexecinfo +RUN apk add --no-cache boost +RUN apk add --no-cache icu-libs +RUN apk add --no-cache curl +RUN apk add --no-cache libunwind +RUN apk add --no-cache gdb +RUN apk add --no-cache libxml2 +RUN apk add --no-cache pcre2 +RUN apk add --update coreutils + +COPY install*.sh /nano-service-installers/ +COPY entry.sh /entry.sh + +RUN chmod +x entry.sh /nano-service-installers/* +RUN ln -s entry.sh cp-nano-agent + +CMD [ "/cp-nano-agent" ] diff --git a/build_system/docker/entry.sh b/build_system/docker/entry.sh new file mode 100644 index 0000000..dad65af --- /dev/null +++ b/build_system/docker/entry.sh @@ -0,0 +1,81 @@ +#!/bin/bash + +HTTP_TRANSACTION_HANDLER_SERVICE="install-cp-nano-service-http-transaction-handler.sh" +ATTACHMENT_REGISTRATION_SERVICE="install-cp-nano-attachment-registration-manager.sh" +ORCHESTRATION_INSTALLATION_SCRIPT="install-cp-nano-agent.sh" + +var_fog_address= +var_proxy= +var_mode= +var_token= +init= + +if [ ! -f /nano-service-installers/$ORCHESTRATION_INSTALLATION_SCRIPT ]; then + echo "Error: agent installation package doesn't exist." + exit 1 +fi + +while true; do + if [ -z "$1" ]; then + break + elif [ "$1" == "--fog" ]; then + shift + var_fog_address="$1" + elif [ "$1" == "--proxy" ]; then + shift + var_proxy="$1" + elif [ "$1" == "--hybrid-mode" ]; then + var_mode="--hybrid_mode" + elif [ "$1" == "--token" ]; then + shift + var_token="$1" + fi + shift +done + +if [ -z $var_token ]; then + echo "Error: Token was not provided as input argument." + exit 1 +fi + +orchestration_service_installation_flags="--token $var_token --container_mode --skip_registration" +if [ ! -z $var_fog_address ]; then + orchestration_service_installation_flags="$orchestration_service_installation_flags --fog $var_fog_address" +fi +if [ ! -z $var_proxy ]; then + orchestration_service_installation_flags="$orchestration_service_installation_flags --proxy $var_proxy" +fi + +if [ ! -z $var_mode ]; then + orchestration_service_installation_flags="$orchestration_service_installation_flags $var_mode" +fi + + +/nano-service-installers/$ORCHESTRATION_INSTALLATION_SCRIPT --install $orchestration_service_installation_flags + +/nano-service-installers/$ATTACHMENT_REGISTRATION_SERVICE --install +/nano-service-installers/$HTTP_TRANSACTION_HANDLER_SERVICE --install + +touch /etc/cp/watchdog/wd.startup +while true; do + if [ -z "$init" ]; then + init=true + /etc/cp/watchdog/cp-nano-watchdog >/dev/null 2>&1 & + sleep 5 + active_watchdog_pid=$(pgrep -f -x -o "/bin/bash /etc/cp/watchdog/cp-nano-watchdog") + fi + + current_watchdog_pid=$(pgrep -f -x -o "/bin/bash /etc/cp/watchdog/cp-nano-watchdog") + if [ ! -f /tmp/restart_watchdog ] && [ "$current_watchdog_pid" != "$active_watchdog_pid" ]; then + echo "Error: Watchdog exited abnormally" + exit 1 + elif [ -f /tmp/restart_watchdog ]; then + rm -f /tmp/restart_watchdog + kill -9 "$(pgrep -f -x -o "/bin/bash /etc/cp/watchdog/cp-nano-watchdog")" + /etc/cp/watchdog/cp-nano-watchdog >/dev/null 2>&1 & + sleep 5 + active_watchdog_pid=$(pgrep -f -x -o "/bin/bash /etc/cp/watchdog/cp-nano-watchdog") + fi + + sleep 5 +done diff --git a/build_system/tools/packaging/makeself_wrapper.sh b/build_system/tools/packaging/makeself_wrapper.sh new file mode 100755 index 0000000..aa7e0ff --- /dev/null +++ b/build_system/tools/packaging/makeself_wrapper.sh @@ -0,0 +1,17 @@ +export param="$2" +export label="$5" +export version="$7" +export script="$8" + +echo $6 | sed 's/ARGPACKINGMAGIC/\n/g' | awk -v cmd=$1 -v dir=$3 -v artifact=$4 ' +{ + offset = index($0,"ARGSPACEMAGIC"); + space=""; + while(offset + length(space) < 40) space = space " "; + gsub(/ARGSPACEMAGIC/,space,$0); + if(length($0)) help = help "\\n" $0 +} +END { + gsub(/\\ /, " ", help); + system(cmd" -q "ENVIRON["param"]" "dir" "artifact " \"" ENVIRON["label"] "\" \"" help "\" \"" ENVIRON["version"] "\" " ENVIRON["script"]); +}' diff --git a/components/CMakeLists.txt b/components/CMakeLists.txt new file mode 100644 index 0000000..b051072 --- /dev/null +++ b/components/CMakeLists.txt @@ -0,0 +1,14 @@ +add_subdirectory(report_messaging) +add_subdirectory(http_manager) +add_subdirectory(http_transaction_data) +add_subdirectory(generic_rulebase) +add_subdirectory(signal_handler) +add_subdirectory(gradual_deployment) +add_subdirectory(packet) +add_subdirectory(pending_key) +add_subdirectory(messaging_downloader) +add_subdirectory(health_check_manager) + +add_subdirectory(utils) +add_subdirectory(attachment-intakers) +add_subdirectory(security_apps) diff --git a/components/attachment-intakers/CMakeLists.txt b/components/attachment-intakers/CMakeLists.txt new file mode 100644 index 0000000..d7fb7d2 --- /dev/null +++ b/components/attachment-intakers/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(nginx_attachment) +add_subdirectory(attachment_registrator) diff --git a/components/attachment-intakers/attachment_registrator/CMakeLists.txt b/components/attachment-intakers/attachment_registrator/CMakeLists.txt new file mode 100755 index 0000000..83ef7b1 --- /dev/null +++ b/components/attachment-intakers/attachment_registrator/CMakeLists.txt @@ -0,0 +1 @@ +add_library(attachment_registrator attachment_registrator.cc) diff --git a/components/attachment-intakers/attachment_registrator/attachment_registrator.cc b/components/attachment-intakers/attachment_registrator/attachment_registrator.cc new file mode 100755 index 0000000..6ecb5d2 --- /dev/null +++ b/components/attachment-intakers/attachment_registrator/attachment_registrator.cc @@ -0,0 +1,470 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "attachment_registrator.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "config.h" +#include "singleton.h" +#include "i_mainloop.h" +#include "buffer.h" +#include "enum_array.h" +#include "nginx_attachment_common.h" + +USE_DEBUG_FLAG(D_ATTACHMENT_REGISTRATION); + +using namespace std; + +class AttachmentRegistrator::Impl +{ +public: + void + init() + { + i_socket = Singleton::Consume::by(); + Singleton::Consume::by()->addOneTimeRoutine( + I_MainLoop::RoutineType::System, + [this] () + { + while(!initSocket()) { + Singleton::Consume::by()->yield(chrono::seconds(1)); + } + }, + "Initialize attachment registration IPC" + ); + + uint expiration_timeout = getProfileAgentSettingWithDefault( + 300, "attachmentRegistrator.expirationCheckSeconds" + ); + Singleton::Consume::by()->addRecurringRoutine( + I_MainLoop::RoutineType::Timer, + chrono::seconds(expiration_timeout), + [this] () { handleExpiration(); }, + "Attachment's expiration handler", + true + ); + } + + void + fini() + { + if (server_sock > 0) { + i_socket->closeSocket(server_sock); + server_sock = -1; + } + + if (shared_registration_path != "") unlink(shared_registration_path.c_str()); + } + +private: + bool + registerAttachmentProcess( + const uint8_t &uid, + const string &family_id, + const uint8_t num_of_members, + const AttachmentType type) + { + registered_attachments[family_id] = vector(num_of_members, true); + + const int cmd_tmout = 900; + I_ShellCmd *shell_cmd = Singleton::Consume::by(); + Maybe registration_res = shell_cmd->getExecOutput( + genRegCommand(family_id, num_of_members, type), + cmd_tmout + ); + if (!registration_res.ok()) { + dbgWarning(D_ATTACHMENT_REGISTRATION) + << "Failed to register attachment." + << "Attachment Type: " + << static_cast(type) + << ", Attachment id: " + << uid + <<", Family id: " + << family_id + << ", Total number of instances: " + << num_of_members; + + return false; + } + + return true; + } + + void + replyWithRelevantHandler( + I_Socket::socketFd socket, + const uint8_t &uid, + const string &family_id, + const AttachmentType type) + { + string handler_path = genHandlerPath(uid, family_id, type); + + uint8_t path_size = handler_path.size(); + vector path_size_data(reinterpret_cast(&path_size), reinterpret_cast(&path_size) + 1); + if (!i_socket->writeData(socket, path_size_data)) { + dbgWarning(D_ATTACHMENT_REGISTRATION) << "Failed to send handler path size to attachment"; + return; + } + + dbgDebug(D_ATTACHMENT_REGISTRATION) + << "Successfully sent handler path size to attachment. Size: " + << to_string(path_size); + + vector path_data(handler_path.data(), handler_path.data() + handler_path.size()); + if (!i_socket->writeData(socket, path_data)) { + dbgWarning(D_ATTACHMENT_REGISTRATION) + << "Failed to send handler path data to attachment. Path: " + << handler_path; + return; + } + + dbgDebug(D_ATTACHMENT_REGISTRATION) + << "Successfully sent handler path data to attachment. Path: " + << handler_path; + } + + string + genHandlerPath(const uint8_t &uid, const string &family_id, const AttachmentType type) const + { + static const string handler_path_format = "/dev/shm/check-point/cp-nano-"; + stringstream handler_path; + handler_path << handler_path_format; + switch(type) { + case (AttachmentType::NGINX_ATT_ID): { + handler_path << "http-transaction-handler-"; + break; + } + default: + dbgAssert(false) << "Unsupported Attachment " << static_cast(type); + } + + if (!family_id.empty()) handler_path << family_id << "_"; + handler_path << to_string(uid); + + return handler_path.str(); + } + + string + genRegCommand(const string &family_id, const uint num_of_members, const AttachmentType type) const + { + dbgAssert(num_of_members > 0) << "Failed to generate a registration command for an empty group of attachments"; + + static const string registration_format = "/etc/cp/watchdog/cp-nano-watchdog --register "; + stringstream registration_command; + registration_command<< registration_format; + switch(type) { + case (AttachmentType::NGINX_ATT_ID): { + registration_command << "/etc/cp/HttpTransactionHandler/cp-nano-http-transaction-handler"; + break; + } + default: + dbgAssert(false) << "Unsupported Attachment " << static_cast(type); + } + + if (!family_id.empty()) registration_command << " --family " << family_id; + registration_command << " --count " << to_string(num_of_members); + + return registration_command.str(); + } + + bool + initSocket() + { + shared_registration_path = getConfigurationWithDefault( + "/dev/shm/check-point/cp-nano-attachment-registration", + "Attachment Registration", + "Registration IPC Path" + ); + + size_t last_slash_idx = shared_registration_path.find_last_of("/"); + string directory_path = shared_registration_path.substr(0, last_slash_idx); + mkdir(directory_path.c_str(), 0777); + + if (server_sock < 0) { + server_sock = getNewSocket(shared_registration_path); + if (server_sock < 0) { + dbgWarning(D_ATTACHMENT_REGISTRATION) + << "Failed to create server socket. Path: " + << shared_registration_path; + return false; + } + + Singleton::Consume::by()->addFileRoutine( + I_MainLoop::RoutineType::RealTime, + server_sock, + [this] () { handleAttachmentRegistration(); }, + "Attachment's registration handler", + true + ); + } + + string shared_expiration_path = getConfigurationWithDefault( + SHARED_KEEP_ALIVE_PATH, + "Attachment Registration", + "Registration IPC Path" + ); + + if (keep_alive_sock < 0) { + keep_alive_sock = getNewSocket(shared_expiration_path); + if (keep_alive_sock < 0) { + dbgWarning(D_ATTACHMENT_REGISTRATION) << "Failed to create keep-alive socket"; + return false; + } + + Singleton::Consume::by()->addFileRoutine( + I_MainLoop::RoutineType::System, + keep_alive_sock, + [this] () { handleKeepAlives(); }, + "Attachment keep alive registration", + true + ); + } + return true; + } + + I_Socket::socketFd + getNewSocket(const string &path) + { + Maybe new_socket = i_socket->genSocket( + I_Socket::SocketType::UNIX, + false, + true, + path + ); + if (!new_socket.ok()) { + dbgWarning(D_ATTACHMENT_REGISTRATION) << "Failed to open a socket. Error: " << new_socket.getErr(); + return -1; + } + + dbgAssert(new_socket.unpack() > 0) << "Generated socket is OK yet negative"; + return new_socket.unpack(); + } + + void + handleKeepAlives() + { + Maybe accepted_socket = i_socket->acceptSocket(keep_alive_sock, false); + if (!accepted_socket.ok()) { + dbgWarning(D_ATTACHMENT_REGISTRATION) + << "Failed to accept new keep-alive request socket: " + << accepted_socket.getErr(); + return; + } + + I_Socket::socketFd client_socket = accepted_socket.unpack(); + dbgAssert(client_socket > 0) << "Generated client socket is OK yet negative"; + auto close_socket_on_exit = make_scope_exit([&]() { i_socket->closeSocket(client_socket); }); + + Maybe attachment_id = readNumericParam(client_socket); + if (!attachment_id.ok()) { + dbgWarning(D_ATTACHMENT_REGISTRATION) << "Failed to register new attachment: " << attachment_id.getErr(); + return; + } + + Maybe family_id = readStringParam(client_socket); + if (!family_id.ok()) { + dbgWarning(D_ATTACHMENT_REGISTRATION) << "Failed to register new attachment: " << family_id.getErr(); + return; + } + + if (family_id.unpack() == "") return; + + auto family_members = registered_attachments.find(family_id.unpack()); + if (family_members == registered_attachments.end()) { + dbgWarning(D_ATTACHMENT_REGISTRATION) + << "Adding new unregistered family. Family ID: " + << family_id.unpack(); + registered_attachments[family_id.unpack()] = vector(attachment_id.unpack() + 1, true); + return; + } + + if (family_members->second.size() <= attachment_id.unpack()) { + dbgWarning(D_ATTACHMENT_REGISTRATION) + << "Adding new non-monitored family members. Family ID: " + << family_id.unpack() + << ", Instance ID:" + << attachment_id.unpack(); + + registered_attachments[family_id.unpack()] = vector(attachment_id.unpack() + 1, true); + return; + } + family_members->second[attachment_id.unpack()] = true; + } + + void + handleExpiration() + { + I_ShellCmd *shell_cmd = Singleton::Consume::by(); + vector deleted_families; + for (pair>> &family : registered_attachments) { + const string &family_id = family.first; + if (family_id == "") continue; + + bool is_family_inactive = true; + vector &family_members = family.second; + for (const bool member : family_members) { + if (member == true) is_family_inactive = false; + } + + if (is_family_inactive) { + static const string unregister_format = "/etc/cp/watchdog/cp-nano-watchdog --un-register "; + stringstream unregister_command; + unregister_command << unregister_format; + unregister_command << "/etc/cp/HttpTransactionHandler/cp-nano-http-transaction-handler"; + unregister_command << " --family " << family_id; + + Maybe res = shell_cmd->getExecOutput(unregister_command.str()); + if (!res.ok()) { + dbgWarning(D_ATTACHMENT_REGISTRATION) + << "Failed to un-register attachment. Family id: " + << family_id; + } else { + deleted_families.push_back(family_id); + } + } else { + fill(family_members.begin(), family_members.end(), false); + } + } + + for (const string &family : deleted_families) { + registered_attachments.erase(family); + dbgWarning(D_ATTACHMENT_REGISTRATION) + << "Successfully un-registered attachments family. Family id: " + << family; + } + } + + void + handleAttachmentRegistration() + { + Maybe accepted_socket = i_socket->acceptSocket(server_sock, false); + if (!accepted_socket.ok()) { + dbgWarning(D_ATTACHMENT_REGISTRATION) + << "Failed to accept a new client socket: " + << accepted_socket.getErr(); + return; + } + + I_Socket::socketFd client_socket = accepted_socket.unpack(); + dbgAssert(client_socket > 0) << "Generated client socket is OK yet negative"; + auto close_socket_on_exit = make_scope_exit([&]() { i_socket->closeSocket(client_socket); }); + + Maybe attachment_type = readAttachmentType(client_socket); + if (!attachment_type.ok()) { + dbgWarning(D_ATTACHMENT_REGISTRATION) + << "Failed to register a new attachment: " + << attachment_type.getErr(); + return; + } + + Maybe attachment_id = readNumericParam(client_socket); + if (!attachment_id.ok()) { + dbgWarning(D_ATTACHMENT_REGISTRATION) << "Failed to register a new attachment: " << attachment_id.getErr(); + return; + } + + Maybe instances_count = readNumericParam(client_socket); + if (!instances_count.ok()) { + dbgWarning(D_ATTACHMENT_REGISTRATION) + << "Failed to register a new attachment: " + << instances_count.getErr(); + return; + } + + Maybe family_id = readStringParam(client_socket); + if (!family_id.ok()) { + dbgWarning(D_ATTACHMENT_REGISTRATION) << "Failed to register a new attachment: " << family_id.getErr(); + return; + } + + if (!registerAttachmentProcess(*attachment_id, *family_id, *instances_count, *attachment_type)) { + return; + } + + replyWithRelevantHandler(client_socket, *attachment_id, *family_id, *attachment_type); + } + + Maybe + readNumericParam(I_Socket::socketFd socket) + { + Maybe> param_to_read = i_socket->receiveData(socket, sizeof(uint8_t)); + if (!param_to_read.ok()) { + dbgWarning(D_ATTACHMENT_REGISTRATION) << "Failed to read param: " << param_to_read.getErr(); + return genError("Failed to read numeric parameter"); + } + + return *reinterpret_cast(param_to_read.unpack().data()); + } + + Maybe + readAttachmentType(I_Socket::socketFd socket) + { + Maybe attachment_type = readNumericParam(socket); + if (!attachment_type.ok()) return attachment_type.passErr(); + + dbgTrace(D_ATTACHMENT_REGISTRATION) + << "Successfully received attachment type. Attachment type value: " + << static_cast(*attachment_type); + + return convertToEnum(*attachment_type); + } + + Maybe + readStringParam(I_Socket::socketFd socket) + { + Maybe param_size = readNumericParam(socket); + if (!param_size.ok()) return param_size.passErr(); + + dbgTrace(D_ATTACHMENT_REGISTRATION) + << "Successfully received string size. Size: " + << static_cast(*param_size); + + Maybe> param_to_read = i_socket->receiveData(socket, param_size.unpack()); + + return string(param_to_read.unpack().begin(), param_to_read.unpack().end()); + } + + I_Socket::socketFd server_sock = -1; + I_Socket::socketFd keep_alive_sock = -1; + I_Socket *i_socket = nullptr; + map> registered_attachments; + string shared_registration_path; +}; + +AttachmentRegistrator::AttachmentRegistrator() : Component("AttachmentRegistrator"), pimpl(make_unique()) {} + +AttachmentRegistrator::~AttachmentRegistrator() {} + +void AttachmentRegistrator::init() { pimpl->init(); } + +void AttachmentRegistrator::fini() { pimpl->fini(); } + +void +AttachmentRegistrator::preload() +{ + registerExpectedConfiguration("Attachment Registration", "Registration IPC Path"); +} diff --git a/components/attachment-intakers/nginx_attachment/CMakeLists.txt b/components/attachment-intakers/nginx_attachment/CMakeLists.txt new file mode 100755 index 0000000..40c0bd9 --- /dev/null +++ b/components/attachment-intakers/nginx_attachment/CMakeLists.txt @@ -0,0 +1,5 @@ +add_definitions(-DUSERSPACE) + +add_library(nginx_attachment nginx_attachment.cc nginx_attachment_config.cc nginx_attachment_opaque.cc nginx_parser.cc user_identifiers_config.cc nginx_intaker_metric.cc nginx_attachment_metric.cc cidrs_data.cc) + +target_link_libraries(nginx_attachment http_configuration http_transaction_data connkey table buffers -lshmem_ipc) diff --git a/components/attachment-intakers/nginx_attachment/cidrs_data.cc b/components/attachment-intakers/nginx_attachment/cidrs_data.cc new file mode 100755 index 0000000..484ba28 --- /dev/null +++ b/components/attachment-intakers/nginx_attachment/cidrs_data.cc @@ -0,0 +1,128 @@ +#include "cidrs_data.h" + +#include "log_generator.h" + +using namespace std; + +USE_DEBUG_FLAG(D_NGINX_ATTACHMENT_PARSER); + + +bool +CIDRSData::matchCidr(const in_addr &address, const in_addr &network) const +{ + if (network_bits == 0) { + // C99 6.5.7 (3): u32 << 32 is undefined behaviour + return true; + } + return !((address.s_addr ^ network.s_addr) & htonl(0xFFFFFFFFu << (32 - network_bits))); +} + +bool +CIDRSData::matchCidr(const in6_addr &address, const in6_addr &network) const +{ +#ifdef __linux__ + const uint32_t *a = address.s6_addr32; + const uint32_t *n = network.s6_addr32; +#else + const uint32_t *a = address.__u6_addr.__u6_addr32; + const uint32_t *n = network.__u6_addr.__u6_addr32; +#endif + int bits_whole, bits_incomplete; + bits_whole = network_bits >> 5; // number of whole u32 + bits_incomplete = network_bits & 0x1F; // number of bits in incomplete u32 + if (bits_whole) { + if (memcmp(a, n, bits_whole << 2)) { + return false; + } + } + if (bits_incomplete) { + uint32_t mask = htonl((0xFFFFFFFFu) << (32 - bits_incomplete)); + if ((a[bits_whole] ^ n[bits_whole]) & mask) { + return false; + } + } + return true; +} + +CIDRSData::CIDRSData(const string &str_cidr) +{ + size_t processed_bits = 0; + + size_t pos = str_cidr.find_last_of('/'); + + // get ip from targetCidr + string str_prefix = pos != string::npos ? str_cidr.substr(0, pos) : str_cidr; + // get subnet mask from targetCidr or calculate it based on ipv4 / ipv6 + string str_suffix; + if (pos != string::npos) { + str_suffix = str_cidr.substr(pos + 1); + } else if (str_cidr.find(':') == string::npos) { + str_suffix = "32"; + } else { + str_suffix = "128"; + } + + + int bits = -1; + try { + bits = stoi(str_suffix, &processed_bits); + network_bits = (uint8_t)bits; + // convert int to uint8_t + } catch (...) { + dbgWarning(D_NGINX_ATTACHMENT_PARSER) + << "Failed to convert CIDR number of bits from string to int" + << str_cidr; + return; + } + + // check if CIDR is valid + if (processed_bits != str_suffix.length() || bits > 128 || bits < 0) { + dbgWarning(D_NGINX_ATTACHMENT_PARSER) + << "Failed to convert CIDR number of bits from string to int (out of range)." + << str_cidr; + return; + } + + if (IPAddr::isValidIPAddr(str_prefix)) { + ip_addr = IPAddr::createIPAddr(str_prefix).unpack(); + } else { + dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "Failed to convert CIDR number of bits from string to int"; + return; + } + + dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "successfully created cidr from the following string: " << str_cidr; + valid_cidr = true; +} + +bool +CIDRSData::contains(const string &source_ip) const +{ + if(!valid_cidr) { + dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "Invalid CIDR."; + return false; + } + + // check from which type the target ip and check if ip belongs to is mask ip + //convert source_ip to ip v4 or v6. + switch (ip_addr.getType()) { + case IPType::V4: { + struct in_addr source_inaddr; + if (inet_pton(AF_INET, source_ip.c_str(), &source_inaddr) == 1) { + return matchCidr(source_inaddr, ip_addr.getIPv4()); + } + break; + } + case IPType::V6: { + struct in6_addr source_inaddr6; + if (inet_pton(AF_INET6, source_ip.c_str(), &source_inaddr6) == 1) { + return matchCidr(source_inaddr6, ip_addr.getIPv6()); + } + break; + } + default: { + dbgWarning(D_NGINX_ATTACHMENT_PARSER) << "Unexpected ip type"; + } + } + + return false; +} diff --git a/components/attachment-intakers/nginx_attachment/cidrs_data.h b/components/attachment-intakers/nginx_attachment/cidrs_data.h new file mode 100755 index 0000000..8804d7b --- /dev/null +++ b/components/attachment-intakers/nginx_attachment/cidrs_data.h @@ -0,0 +1,28 @@ +#ifndef __CIDRS_DATA_H__ +#define __CIDRS_DATA_H__ + +#include +#include +#include +#include +#include +#include +#include "maybe_res.h" +#include "connkey.h" + +class CIDRSData +{ +public: + CIDRSData(const std::string &str_cidr); + bool contains(const std::string &source_ip) const; + +private: + bool matchCidr(const in_addr &address, const in_addr &net) const; + bool matchCidr(const in6_addr &address, const in6_addr &network) const; + + IPAddr ip_addr; + uint8_t network_bits; + bool valid_cidr = false; +}; + +#endif // __CIDRS_DATA_H__ diff --git a/components/attachment-intakers/nginx_attachment/intentional_failure.cc b/components/attachment-intakers/nginx_attachment/intentional_failure.cc new file mode 100755 index 0000000..0f804ed --- /dev/null +++ b/components/attachment-intakers/nginx_attachment/intentional_failure.cc @@ -0,0 +1,145 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "intentional_failure.h" + +#include + +#include +#include "config.h" +#include "debug.h" + +using namespace std; + +USE_DEBUG_FLAG(D_NGINX_ATTACHMENT); + +IntentionalFailureHandler::FailureType +getFailureTypeFromString(const string &failure) +{ + if (failure == "create socket") return IntentionalFailureHandler::FailureType::CreateSocket; + if (failure == "accept socket") return IntentionalFailureHandler::FailureType::AcceptSocket; + if (failure == "initialize connection channel") + return IntentionalFailureHandler::FailureType::InitializeConnectionChannel; + if (failure == "write to socket") return IntentionalFailureHandler::FailureType::WriteDataToSocket; + if (failure == "read from socket") return IntentionalFailureHandler::FailureType::ReceiveDataFromSocket; + if (failure == "parse response") return IntentionalFailureHandler::FailureType::ParsingResponse; + if (failure == "get data from attachment") return IntentionalFailureHandler::FailureType::GetDataFromAttchment; + if (failure == "register attachment") return IntentionalFailureHandler::FailureType::RegisterAttchment; + if (failure == "get instance id") return IntentionalFailureHandler::FailureType::GetInstanceID; + + if (failure != "") { + dbgInfo(D_NGINX_ATTACHMENT) << "Ignoring unknown intentional failure type:" << failure; + } + return IntentionalFailureHandler::FailureType::None; +} + +void +IntentionalFailureHandler::RegisterIntentionalFailure() +{ + is_failure_enabled = getConfigurationWithDefault( + false, "HTTP manager", "Enable intentional failure mode" + ); + + string failure_type_str = getConfigurationWithDefault("", "HTTP manager", "Intentional failure type"); + failure_type = getFailureTypeFromString(failure_type_str); + if (failure_type == FailureType::None) is_failure_enabled = false; + + allow_count = getConfigurationWithDefault(0, "HTTP manager", "Intentional failure allow times"); + fail_count = getConfigurationWithDefault(-1, "HTTP manager", "Intentional failure limit"); + is_limited = fail_count > 0; + + is_delay_enabled = getConfigurationWithDefault( + false, "HTTP manager", "Enable intentional delay mode" + ); + + string delay_failure_type_str = getConfigurationWithDefault( + "", "HTTP manager", "Intentional delay failure type" + ); + delay_failure_type = getFailureTypeFromString(delay_failure_type_str); + + delay_amount = chrono::microseconds( + getConfigurationWithDefault(-1, "HTTP manager", "Intentional delay amount") + ); + + if (delay_failure_type == FailureType::None || delay_amount <= chrono::microseconds(0)) is_delay_enabled = false; + + if (is_failure_enabled) { + dbgInfo(D_NGINX_ATTACHMENT) << "Registered Intentional failure. Type: " << failure_type_str + << ", will allow first " << to_string(allow_count) << " actions" + << ", fail limit: " << (is_limited ? to_string(fail_count) : "unlimited"); + } + + if (is_delay_enabled) { + dbgInfo(D_NGINX_ATTACHMENT) << "Registered Intentional delay. Type: " << delay_failure_type_str + << ", amount: " << delay_amount.count() << " microseconds"; + } + +} + +void +IntentionalFailureHandler::init() +{ + RegisterIntentionalFailure(); + registerConfigLoadCb([this]() { RegisterIntentionalFailure(); }); + if (!is_failure_enabled && !is_delay_enabled) { + dbgInfo(D_NGINX_ATTACHMENT) << "Initialized Intentional failure. No failure/delay was specified"; + } +} + +bool +IntentionalFailureHandler::shouldFail( + bool was_originaly_successful, + IntentionalFailureHandler::FailureType failure, + bool *failed_on_purpose +) +{ + *failed_on_purpose = false; + if (is_failure_enabled && failure_type == failure) { + if (allow_count > 0) { + allow_count --; + dbgInfo(D_NGINX_ATTACHMENT) << "Intentional failure: allowed action, remaining tries to be allowed: " + << to_string(allow_count); + return !was_originaly_successful; + } + if (is_limited) { + if (fail_count <= 0) return !was_originaly_successful; + fail_count --; + } + dbgInfo(D_NGINX_ATTACHMENT) << "Intentional failure was activated, remaining failures: " + << (is_limited ? to_string(fail_count) : "unlimited"); + *failed_on_purpose = true; + return true; + } + return !was_originaly_successful; +} + +void +IntentionalFailureHandler::delayIfNeeded(IntentionalFailureHandler::FailureType failure) +{ + if (is_delay_enabled && delay_failure_type == failure) { + dbgInfo(D_NGINX_ATTACHMENT) << "Intentional delay was activated (" << delay_amount.count() << " microseconds)"; + usleep(delay_amount.count()); + } +} + +void +IntentionalFailureHandler::preload() +{ + registerExpectedConfiguration("HTTP manager", "Enable intentional failure mode"); + registerExpectedConfiguration("HTTP manager", "Intentional failure type"); + registerExpectedConfiguration("HTTP manager", "Intentional failure limit"); + registerExpectedConfiguration("HTTP manager", "Intentional failure allow times"); + registerExpectedConfiguration("HTTP manager", "Enable intentional delay mode"); + registerExpectedConfiguration("HTTP manager", "Intentional delay failure type"); + registerExpectedConfiguration("HTTP manager", "Intentional delay amount"); +} diff --git a/components/attachment-intakers/nginx_attachment/intentional_failure.h b/components/attachment-intakers/nginx_attachment/intentional_failure.h new file mode 100755 index 0000000..630325d --- /dev/null +++ b/components/attachment-intakers/nginx_attachment/intentional_failure.h @@ -0,0 +1,56 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __INTENTIONAL_FAILURE__ +#define __INTENTIONAL_FAILURE__ + +#include + +class IntentionalFailureHandler +{ +public: + enum class FailureType { + None, + CreateSocket, + AcceptSocket, + InitializeConnectionChannel, + WriteDataToSocket, + ReceiveDataFromSocket, + ParsingResponse, + GetDataFromAttchment, + RegisterAttchment, + GetInstanceID, + COUNT + }; + + void init(); + bool shouldFail(bool was_originaly_successful, FailureType failure, bool *failed_on_purpose); + void delayIfNeeded(FailureType failure); + + void preload(); + +private: + void RegisterIntentionalFailure(); + + FailureType failure_type; + bool is_failure_enabled; + bool is_limited; + int fail_count; + int allow_count; + + FailureType delay_failure_type; + bool is_delay_enabled; + std::chrono::microseconds delay_amount; +}; + +#endif // __INTENTIONAL_FAILURE__ diff --git a/components/attachment-intakers/nginx_attachment/nginx_attachment.cc b/components/attachment-intakers/nginx_attachment/nginx_attachment.cc new file mode 100755 index 0000000..87a1415 --- /dev/null +++ b/components/attachment-intakers/nginx_attachment/nginx_attachment.cc @@ -0,0 +1,1785 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "nginx_attachment.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "nginx_attachment_config.h" +#include "nginx_attachment_opaque.h" +#include "nginx_parser.h" +#include "i_instance_awareness.h" +#include "common.h" +#include "config.h" +#include "singleton.h" +#include "i_mainloop.h" +#include "buffer.h" +#include "enum_array.h" +#include "shmem_ipc.h" +#include "sasal.h" +#include "i_http_manager.h" +#include "http_transaction_common.h" +#include "nginx_attachment_common.h" +#include "hash_combine.h" +#include "cpu/failopen_mode_status.h" +#include "attachment_registrator.h" +#include "cache.h" +#include "log_generator.h" +#include "report/report_enums.h" +#include "user_identifiers_config.h" +#include "agent_core_utilities.h" + +#ifdef FAILURE_TEST +#include "intentional_failure.h" +#define SHOULD_FAIL(is_ok, type, indicator) intentional_failure_handler.shouldFail((is_ok), type, indicator) +#define DELAY_IF_NEEDED(type) intentional_failure_handler.delayIfNeeded(type); + +#else +#define SHOULD_FAIL(is_ok, type, indicator) !(is_ok) +#define DELAY_IF_NEEDED(type) + +#endif // FAILURE_TEST + +SASAL_START // HTTP Manager main + +USE_DEBUG_FLAG(D_NGINX_ATTACHMENT); +USE_DEBUG_FLAG(D_COMPRESSION); +USE_DEBUG_FLAG(D_METRICS_NGINX_ATTACHMENT); + +using namespace std; + +using ChunkType = ngx_http_chunk_type_e; + +static const uint32_t corrupted_session_id = CORRUPTED_SESSION_ID; + +class FailopenModeListener : public Listener +{ +public: + FailopenModeListener() = default; + + void + upon(const FailopenModeEvent &event) override + { + current_failopen_status = event.getFailopenMode(); + } + + bool + isFailopenMode() const + { + return current_failopen_status; + } + +private: + bool current_failopen_status = false; +}; + +void +IpcDebug(int is_error, const char *func, const char *file, int line_num, const char *fmt, ...) +{ + if (!Debug::evalFlags(is_error ? Debug::DebugLevel::WARNING : Debug::DebugLevel::TRACE, D_NGINX_ATTACHMENT)) { + return; + } + + va_list args; + va_start(args, fmt); + size_t len = vsnprintf(NULL, 0, fmt, args); + va_end(args); + vector message(len + 1); + va_start(args, fmt); + vsnprintf(&message[0], len + 1, fmt, args); + va_end(args); + + Debug( + file, + func, + line_num, + is_error ? Debug::DebugLevel::WARNING : Debug::DebugLevel::TRACE, + D_NGINX_ATTACHMENT + ).getStreamAggr() << message.data(); +} + +class NginxAttachment::Impl + : + Singleton::Provide::From +{ + static constexpr auto INSPECT = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT; + static constexpr auto ACCEPT = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT; + static constexpr auto DROP = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP; + static constexpr auto INJECT = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT; + static constexpr auto IRRELEVANT = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_IRRELEVANT; + static constexpr auto RECONF = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_RECONF; + static constexpr auto WAIT = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_WAIT; + +public: + Impl() + : +#ifdef FAILURE_TEST + intentional_failure_handler(), +#endif + nginx_plugin_cpu_metric(true) + {} + + void + init() + { + dbgFlow(D_NGINX_ATTACHMENT) << "Initializing NGINX attachment"; + + timer = Singleton::Consume::by(); + i_socket = Singleton::Consume::by(); + mainloop = Singleton::Consume::by(); + http_manager = Singleton::Consume::by(); + i_transaction_table = Singleton::Consume>::by(); + inst_awareness = Singleton::Consume::by(); + + metric_report_interval = chrono::seconds( + getConfigurationWithDefault( + METRIC_PERIODIC_TIMEOUT, + "Nginx Attachment", + "metric reporting interval" + ) + ); + + num_of_nginx_ipc_elements = getProfileAgentSettingWithDefault( + NUM_OF_NGINX_IPC_ELEMENTS, "nginxAttachment.numOfNginxIpcElements" + ); + + nginx_attachment_metric.init( + "Nginx Attachment data", + ReportIS::AudienceTeam::AGENT_CORE, + ReportIS::IssuingEngine::AGENT_CORE, + metric_report_interval, + true + ); + nginx_attachment_metric.registerListener(); + + nginx_intaker_metric.init( + "Nginx Attachment Plugin data", + ReportIS::AudienceTeam::AGENT_CORE, + ReportIS::IssuingEngine::AGENT_CORE, + metric_report_interval, + true + ); + nginx_intaker_metric.registerListener(); + + transaction_table_metric.init( + "Nginx transaction table data", + ReportIS::AudienceTeam::AGENT_CORE, + ReportIS::IssuingEngine::AGENT_CORE, + metric_report_interval, + true + ); + transaction_table_metric.registerListener(); + + nginx_plugin_cpu_metric.init( + "Nginx Attachment Plugin CPU data", + ReportIS::AudienceTeam::AGENT_CORE, + ReportIS::IssuingEngine::AGENT_CORE, + metric_report_interval, + true + ); + + nginx_plugin_cpu_metric.registerContext("Service Name", "Nginx Attachment"); + nginx_plugin_cpu_metric.registerListener(); + +#ifdef FAILURE_TEST + intentional_failure_handler.init(); +#endif + + generateAttachmentConfig(); + registerConfigLoadCb([this]() { generateAttachmentConfig(); }); + + createStaticResourcesFolder(); + + setCompressionDebugFunctions(); + + setMetricHandlers(); + + fail_open_mode_listener.registerListener(); + + if (!initSocket()) { + mainloop->addOneTimeRoutine( + I_MainLoop::RoutineType::System, + [this] () + { + while(!initSocket()) { + mainloop->yield(true); + } + }, + "Nginx Attachment IPC initializer" + ); + } + + dbgInfo(D_NGINX_ATTACHMENT) << "Successfully initialized NGINX Attachment"; + } + + void + fini() + { + resetCompressionDebugFunctionsToStandardError(); + + if (server_sock > 0) { + i_socket->closeSocket(server_sock); + server_sock = -1; + } + + if (attachment_routine_id > 0 && mainloop->doesRoutineExist(attachment_routine_id)) { + mainloop->stop(attachment_routine_id); + attachment_routine_id = 0; + } + + if (attachment_sock > 0) { + i_socket->closeSocket(attachment_sock); + attachment_sock = -1; + } + + if (attachment_ipc != nullptr) { + destroyIpc(attachment_ipc, 1); + attachment_ipc = nullptr; + } + } + + bool + registerStaticResource(const string &resource_name, const string &resource_path) + { + string dest_path = static_resources_path + "/" + resource_name; + if (NGEN::Filesystem::exists(dest_path)) { + dbgDebug(D_NGINX_ATTACHMENT) << "Static resource already exist. path: " << dest_path; + return true; + } + + if (!NGEN::Filesystem::copyFile( + resource_path, + dest_path, + false, + S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH + )) { + dbgWarning(D_NGINX_ATTACHMENT) + << "Failed to write the static resource to the shared memory. Resource name: " + << resource_name + << ", static resource's path: " + << resource_path; + return false; + } + + dbgTrace(D_NGINX_ATTACHMENT) + << "Successfully wrote the static resource to the shared memory. Resource Name: " + << resource_name + << ", static resource's path: " + << resource_path; + + return true; + } + + void + printMetrics() + { + dbgDebug(D_METRICS_NGINX_ATTACHMENT) + << "Total number of responses received: " + << to_string(num_uncompressed_responses + num_compressed_responses) + << ", number of uncompressed responses: " + << to_string(num_uncompressed_responses) + << ", number of compressed responses: " + << to_string(num_compressed_responses); + + metrics_average_table_size = + (i_transaction_table->count() + metrics_average_table_size * metrics_sample_count) / + (metrics_sample_count + 1); + + metrics_sample_count++; + dbgDebug(D_METRICS_NGINX_ATTACHMENT) << "Maximum transactions table size: " << metrics_max_table_size; + dbgDebug(D_METRICS_NGINX_ATTACHMENT) << "Average transactions table size: " << metrics_average_table_size; + dbgDebug(D_METRICS_NGINX_ATTACHMENT) << "Current transactions table size: " << i_transaction_table->count(); + } + + void + preload() + { +#ifdef FAILURE_TEST + intentional_failure_handler.preload(); +#endif + } + +private: + bool + registerAttachmentProcess(uint32_t nginx_user_id, uint32_t nginx_group_id, I_Socket::socketFd new_socket) + { + dbgAssert(server_sock > 0) << "Registration attempt occurred while registration socket is uninitialized"; +#ifdef FAILURE_TEST + bool did_fail_on_purpose = false; +#endif + + if (attachment_routine_id > 0 && mainloop->doesRoutineExist(attachment_routine_id)) { + mainloop->stop(attachment_routine_id); + attachment_routine_id = 0; + } + + string curr_instance_unique_id = inst_awareness->getUniqueID().unpack(); + if (attachment_ipc != nullptr) { + if (nginx_worker_user_id != nginx_user_id || nginx_worker_group_id != nginx_group_id) { + destroyIpc(attachment_ipc, 1); + attachment_ipc = nullptr; + } else if (isCorruptedShmem(attachment_ipc, 1)) { + dbgWarning(D_NGINX_ATTACHMENT) + << "Destroying shmem IPC for Attachment with corrupted shared memory. Attachment id: " + << curr_instance_unique_id; + + destroyIpc(attachment_ipc, 1); + attachment_ipc = nullptr; + } else { + dbgInfo(D_NGINX_ATTACHMENT) << "Re-registering attachment with id: " << curr_instance_unique_id; + uint max_registrations = getProfileAgentSettingWithDefault( + 6, + "httpManager.maximumRegistrationsAllowed" + ); + uint duration_of_registrations = getProfileAgentSettingWithDefault( + 20000, + "httpManager.allowedDurationOfRegistrations" + ); + chrono::milliseconds curr_times_diff = chrono::duration_cast( + chrono::steady_clock::now() - + registration_duration_start + ); + if (curr_times_diff < chrono::milliseconds(duration_of_registrations)) { + if (++curr_attachment_registrations_counter > max_registrations) { + destroyIpc(attachment_ipc, 1); + attachment_ipc = nullptr; + + dbgWarning(D_NGINX_ATTACHMENT) + << "Attachment with id: " + << curr_instance_unique_id + << " reached maximum number of allowed registration attempts"; + + registration_duration_start = chrono::steady_clock::now(); + curr_attachment_registrations_counter = 1; + } + } else { + registration_duration_start = chrono::steady_clock::now(); + curr_attachment_registrations_counter = 1; + } + } + } + + if (attachment_ipc == nullptr) { + attachment_ipc = initIpc( + curr_instance_unique_id.c_str(), + nginx_user_id, + nginx_group_id, + 1, + num_of_nginx_ipc_elements, + IpcDebug + ); + + if (SHOULD_FAIL( + attachment_ipc != nullptr, + IntentionalFailureHandler::FailureType::InitializeConnectionChannel, + &did_fail_on_purpose + )) { + dbgWarning(D_NGINX_ATTACHMENT) << "Failed to initialize communication channel with attachment"; + return false; + } + } + + dbgDebug(D_NGINX_ATTACHMENT) << "Successfully initialized shmem channel"; + nginx_worker_user_id = nginx_user_id; + nginx_worker_group_id = nginx_group_id; + instance_unique_id = curr_instance_unique_id; + + if (attachment_sock > 0 && attachment_sock != new_socket) { + i_socket->closeSocket(attachment_sock); + } + attachment_sock = new_socket; + + uint8_t success = 1; + vector reg_success(reinterpret_cast(&success), reinterpret_cast(&success) + 1); + DELAY_IF_NEEDED(IntentionalFailureHandler::FailureType::WriteDataToSocket); + bool res = i_socket->writeData(attachment_sock, reg_success); + if (SHOULD_FAIL( + res, IntentionalFailureHandler::FailureType::WriteDataToSocket, &did_fail_on_purpose + )) { + dbgWarning(D_NGINX_ATTACHMENT) << "Failed to ack registration success to attachment"; + i_socket->closeSocket(attachment_sock); + attachment_sock = -1; + return false; + } + + attachment_routine_id = mainloop->addFileRoutine( + I_MainLoop::RoutineType::RealTime, + attachment_sock, + [this] () mutable + { + auto on_exit = make_scope_exit( + [this]() + { + nginx_attachment_event.notify(); + nginx_attachment_event.resetAllCounters(); + nginx_intaker_event.notify(); + nginx_intaker_event.resetAllCounters(); + } + ); + + while (isSignalPending()) { + if (!handleInspection()) break; + } + }, + "Nginx Attachment inspection handler", + true + ); + + traffic_indicator = true; + dbgInfo(D_NGINX_ATTACHMENT) << "Successfully registered attachment"; + + nginx_attachment_event.addNetworkingCounter(nginxAttachmentEvent::networkVerdict::REGISTRATION_SUCCESS); + nginx_attachment_event.notify(); + nginx_attachment_event.resetAllCounters(); + return true; + } + +private: + bool + handleInspection() + { + Maybe> comm_trigger = genError("comm trigger uninitialized");; + + static map comm_status; + if (comm_status.find(attachment_sock) == comm_status.end()) { + comm_status[attachment_sock] = true; + } + + DELAY_IF_NEEDED(IntentionalFailureHandler::FailureType::ReceiveDataFromSocket); + + uint32_t signaled_session_id = 0; + for (int retry = 0; retry < 3; retry++) { + comm_trigger = i_socket->receiveData(attachment_sock, sizeof(signaled_session_id)); + if (comm_trigger.ok()) break; + } + + bool did_fail_on_purpose = false; + if (SHOULD_FAIL( + comm_trigger.ok(), + IntentionalFailureHandler::FailureType::ReceiveDataFromSocket, + &did_fail_on_purpose + )) { + if (comm_status[attachment_sock] == true) { + dbgDebug(D_NGINX_ATTACHMENT) + << "Failed to get signal from attachment socket " + << ", Socket: " + << attachment_sock + << ", Error: " + << (did_fail_on_purpose ? "Intentional Failure" : comm_trigger.getErr()); + comm_status[attachment_sock] = false; + } + return false; + } + + signaled_session_id = *reinterpret_cast(comm_trigger.unpack().data()); + comm_status.erase(attachment_sock); + traffic_indicator = true; + + while (isDataAvailable(attachment_ipc)) { + traffic_indicator = true; + Maybe> session_verdict = handleRequestFromQueue(attachment_ipc, signaled_session_id); + if (!session_verdict.ok()) return true; + + uint32_t handled_session_id = session_verdict.unpack().first; + bool is_signal_needed = session_verdict.unpack().second; + if (is_signal_needed || !isDataAvailable(attachment_ipc)) { + dbgTrace(D_NGINX_ATTACHMENT) << "Signaling attachment to read verdict"; + bool res = false; + vector session_id_data( + reinterpret_cast(&handled_session_id), + reinterpret_cast(&handled_session_id) + sizeof(handled_session_id) + ); + + DELAY_IF_NEEDED(IntentionalFailureHandler::FailureType::WriteDataToSocket); + + if (!SHOULD_FAIL( + true, + IntentionalFailureHandler::FailureType::WriteDataToSocket, + &did_fail_on_purpose + )) { + for (int retry = 0; retry < 3; retry++) { + if (i_socket->writeData(attachment_sock, session_id_data)) { + dbgTrace(D_NGINX_ATTACHMENT) + << "Successfully sent signal to attachment to read verdict."; + res = true; + return true; + } + + dbgDebug(D_NGINX_ATTACHMENT) + << "Failed to send ACK to attachment (try number " << retry << ")"; + mainloop->yield(true); + } + } + if (!res) { + dbgWarning(D_NGINX_ATTACHMENT) << "Failed to send ACK to attachment" + << (did_fail_on_purpose ? "[Intentional Failure]" : ""); + return false; + } + } + } + + return true; + } + + bool + isSignalPending() + { + if (attachment_sock < 0) return false; + return i_socket->isDataAvailable(attachment_sock); + } + + void + setMetricHandlers() + { + chrono::seconds metrics_print_interval_sec = chrono::seconds( + getConfigurationWithDefault( + default_metrics_print_interval_sec, + "HTTP manager", + "Metrics printing interval in sec" + ) + ); + auto metrics_print_interval_usec = chrono::duration_cast(metrics_print_interval_sec); + mainloop->addRecurringRoutine( + I_MainLoop::RoutineType::Offline, + metrics_print_interval_usec, + [&]() { printMetrics(); }, + "Nginx Attachment metric printer", + false + ); + } + + void + setCompressionDebugFunctions() + { + setCompressionDebugFunction( + CompressionUtilsDebugLevel::COMPRESSION_DBG_LEVEL_ERROR, + [](const char *debug_message) { dbgError(D_COMPRESSION) << debug_message; } + ); + } + + void + deleteStaticResourcesFolder() + { + if (!NGEN::Filesystem::deleteDirectory(static_resources_path)) { + dbgWarning(D_NGINX_ATTACHMENT) + << "Failed to delete the static resources' folder. Folder's path: " + << static_resources_path; + } else { + dbgTrace(D_NGINX_ATTACHMENT) + << "Successfully deleted the static resources' folder. Folder's path: " + << static_resources_path; + } + } + + void + createStaticResourcesFolder() + { + static_resources_path = getConfigurationWithDefault( + default_static_resources_path, + "HTTP manager", + "Static resources path" + ); + + dbgDebug(D_NGINX_ATTACHMENT) + << "Trying to create the static resources' folder at path: " + << static_resources_path; + + if (NGEN::Filesystem::exists(static_resources_path)) { + dbgDebug(D_NGINX_ATTACHMENT) << "Static resources' folder already exists"; + return; + } + + if (!NGEN::Filesystem::makeDir(static_resources_path, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH)) { + dbgWarning(D_NGINX_ATTACHMENT) + << "Failed to create a folder for transferring static resources to attachments. Folder's path: " + << static_resources_path; + return; + } + dbgTrace(D_NGINX_ATTACHMENT) + << "Successfully created the static resources' folder. Folder's path: " + << static_resources_path; + } + + void + generateAttachmentConfig() + { + auto on_exit = make_scope_exit( + [this]() + { + if (attachment_ipc == nullptr) return; + + handleVerdictResponse(FilterVerdict(RECONF), attachment_ipc, 0, false); + + dbgDebug(D_NGINX_ATTACHMENT) + << "Sending verdict RECONF for NGINX attachment with UID: " + << attachment_ipc; + } + ); + + auto tenant_header_key = getProfileAgentSetting("tenantIdKey"); + if (tenant_header_key.ok()) NginxParser::tenant_header_key = tenant_header_key.unpack(); + + HttpAttachmentConfig new_conf; + new_conf.init(); + + default_verdict = FilterVerdict(new_conf.getIsFailOpenModeEnabled() ? ACCEPT : DROP); + + if (attachment_config == new_conf) return; + attachment_config = new_conf; + num_of_nginx_ipc_elements = new_conf.getNumOfNginxElements(); + + string settings_path = getConfigurationWithDefault( + SHARED_ATTACHMENT_CONF_PATH, + "HTTP manager", + "Shared settings path" + ); + + for (uint retries = 0 ; retries < 3 ; retries++) { + if (remove(settings_path.c_str()) == 0) break; + usleep(1); + } + + ofstream setting_stream(settings_path, ofstream::out); + if (!setting_stream.is_open()) { + dbgWarning(D_NGINX_ATTACHMENT) + << "Could not set new attachment settings. Error: shared settings file \"" + << settings_path + << "\" could not be opened"; + mainloop->addOneTimeRoutine( + I_MainLoop::RoutineType::Offline, +// LCOV_EXCL_START Reason: coverage upgrade + [this] () { generateAttachmentConfig(); }, +// LCOV_EXCL_STOP + "Nginx Attachment configuration generator", + false + ); + return; + } + + cereal::JSONOutputArchive archive_out(setting_stream); + attachment_config.save(archive_out); + } + + void + sendMetricToKibana(const ngx_http_cp_metric_data_t *received_metric_data) + { + nginx_intaker_event.addPluginMetricCounter(received_metric_data); + nginx_intaker_event.notify(); + nginx_intaker_event.resetAllCounters(); + } + + string + convertChunkTypeToString(ChunkType data_type) + { + switch (data_type) { + case ChunkType::CONTENT_LENGTH: + return "Content Length"; + case ChunkType::RESPONSE_CODE: + return "Response Code"; + case ChunkType::RESPONSE_BODY: + return "Response Body"; + case ChunkType::RESPONSE_HEADER: + return "Response Header"; + case ChunkType::RESPONSE_END: + return "Response End"; + case ChunkType::REQUEST_START: + return "Request Start"; + case ChunkType::REQUEST_HEADER: + return "Request Header"; + case ChunkType::REQUEST_BODY: + return "Request Body"; + case ChunkType::REQUEST_END: + return "Request End"; + case ChunkType::METRIC_DATA_FROM_PLUGIN: + return "Metrics"; + case ChunkType::HOLD_DATA: + return "HOLD_DATA"; + case ChunkType::COUNT: + dbgAssert(false) << "Invalid 'COUNT' ChunkType"; + return ""; + } + dbgAssert(false) << "ChunkType was not handled by the switch case"; + return ""; + } + + FilterVerdict + handleStartTransaction(const Buffer &data) + { + if (data.size() == 0) { + dbgWarning(D_NGINX_ATTACHMENT) + << "Could not handle new transaction with an empty buffer. Returning default verdict: " + << verdictToString(default_verdict.getVerdict()); + return default_verdict; + } + + NginxAttachmentOpaque &opaque = i_transaction_table->getState(); + auto rule_by_ctx = getConfiguration("rulebase", "rulesConfig"); + if (rule_by_ctx.ok()) { + BasicRuleConfig rule = rule_by_ctx.unpack(); + opaque.setSavedData("assetId", rule.getAssetId(), EnvKeyAttr::LogSection::SOURCEANDDATA); + opaque.setSavedData("assetName", rule.getAssetName(), EnvKeyAttr::LogSection::SOURCEANDDATA); + } + return http_manager->inspect(opaque.getTransactionData()); + } + + FilterVerdict + handleResponseCode(const Buffer &data) + { + auto status_code = NginxParser::parseResponseCode(data); + if (!status_code.ok()) { + dbgWarning(D_NGINX_ATTACHMENT) + << "Failed to parse response status code. Returning default verdict: " + << verdictToString(default_verdict.getVerdict()) + << ", Error: " + << status_code.getErr(); + return default_verdict; + } + + return http_manager->inspect(status_code.unpack()); + } + + FilterVerdict + handleContentLength(const Buffer &data) + { + auto content_len = NginxParser::parseContentLength(data); + if (!content_len.ok()) { + dbgWarning(D_NGINX_ATTACHMENT) + << "Failed to parse response content length. Returning default verdict: " + << verdictToString(default_verdict.getVerdict()) + << ", Error: " + << content_len.getErr(); + return default_verdict; + } + + ModificationList mod_buff_list; + mod_buff_list.emplace_back(INJECT_POS_IRRELEVANT, ModificationType::REPLACE, string("Content-Length")); + + FilterVerdict verdict(INJECT); + verdict.addModifications(mod_buff_list, 0); + + return verdict; + } + + template + FilterVerdict + handleModifiableChunk(const Maybe &chunk, const string &chunk_desc, bool is_request) + { + if (!chunk.ok()) { + dbgWarning(D_NGINX_ATTACHMENT) + << "Failed to parse " + << chunk_desc + << ". Returning default verdict: " + << verdictToString(default_verdict.getVerdict()) + << ", Error: " + << chunk.getErr(); + return default_verdict; + } + + return http_manager->inspect(*chunk, is_request); + } + + template + FilterVerdict + handleMultiModifiableChunks(const vector &chunks, bool is_request) + { + FilterVerdict injection_verdict(INJECT); + bool injection_required = false; + for (const M &chunk : chunks) { + FilterVerdict cur_verdict = http_manager->inspect(chunk, is_request); + if (cur_verdict.getVerdict() == ACCEPT || + cur_verdict.getVerdict() == DROP || + cur_verdict.getVerdict() == WAIT) { + return cur_verdict; + } + + if (cur_verdict.getVerdict() == INJECT) { + injection_verdict.addModifications(cur_verdict); + injection_required = true; + } + } + if (!injection_required) return FilterVerdict(); + + return injection_verdict; + } + + template + FilterVerdict + handleMultiModifiableChunks(const Maybe> &chunks, const string &chunk_desc, bool is_request) + { + if (!chunks.ok()) { + dbgWarning(D_NGINX_ATTACHMENT) + << "Failed to parse " + << chunk_desc + << ". Returning default verdict: " + << verdictToString(default_verdict.getVerdict()) + << ", Error: " + << chunks.getErr(); + return default_verdict; + } + + return handleMultiModifiableChunks(chunks.unpack(), is_request); + } + + void + setResponseContentEncoding(const CompressionType content_encoding) + { + if (content_encoding == HttpTransactionData::default_response_content_encoding) { + dbgDebug(D_NGINX_ATTACHMENT) << "New content encoding is the default. Skipping change of currect state"; + return; + } + auto &opaque = i_transaction_table->getState(); + auto &transaction_data = opaque.getTransactionData(); + + transaction_data.setResponseContentEncoding(content_encoding); + } + + void + updateMetrics(const CompressionType response_content_encoding) + { + if (response_content_encoding == CompressionType::NO_COMPRESSION) { + num_uncompressed_responses++; + } else { + num_compressed_responses++; + } + } + + FilterVerdict + handleResponseHeaders(const Buffer &headers_data) + { + dbgFlow(D_NGINX_ATTACHMENT) << "Handling response headers"; + bool did_fail_on_purpose = false; + auto response_headers_maybe = NginxParser::parseResponseHeaders(headers_data); + if (SHOULD_FAIL( + response_headers_maybe.ok(), IntentionalFailureHandler::FailureType::ParsingResponse, &did_fail_on_purpose + )) { + dbgWarning(D_NGINX_ATTACHMENT) + << "Failed to parse response headers. Returning default verdict: " + << verdictToString(default_verdict.getVerdict()) + << ", Error: " + << (did_fail_on_purpose ? "Intentional Failure" : response_headers_maybe.getErr()); + return default_verdict; + } + dbgTrace(D_NGINX_ATTACHMENT) << "Successfully parsed response headers"; + + auto response_headers = response_headers_maybe.unpack(); + auto parsed_content_encoding_maybe = NginxParser::parseContentEncoding(response_headers); + if (SHOULD_FAIL( + parsed_content_encoding_maybe.ok(), + IntentionalFailureHandler::FailureType::ParsingResponse, + &did_fail_on_purpose + )) { + dbgWarning(D_NGINX_ATTACHMENT) + << "Failed to parse content encoding from response headers. Returning default verdict: " + << verdictToString(default_verdict.getVerdict()) + << ", Error: " + << (did_fail_on_purpose ? "Intentional Failure" : parsed_content_encoding_maybe.getErr()); + return default_verdict; + } + dbgTrace(D_NGINX_ATTACHMENT) << "Successfully parsed response's content encoding"; + + auto parsed_content_encoding = parsed_content_encoding_maybe.unpack(); + setResponseContentEncoding(parsed_content_encoding); + updateMetrics(parsed_content_encoding); + + return handleMultiModifiableChunks(response_headers, false); + } + + FilterVerdict + handleResponseBody(const Buffer &data) + { + auto &opaque = i_transaction_table->getState(); + auto &transaction_data = opaque.getTransactionData(); + + CompressionType content_encoding = transaction_data.getResponseContentEncoding(); + CompressionStream *compression_stream = content_encoding == CompressionType::NO_COMPRESSION ? + nullptr : + opaque.getResponseCompressionStream(); + auto http_response_body_maybe = NginxParser::parseResponseBody(data, compression_stream); + + return handleModifiableChunk(http_response_body_maybe, "response body", false); + } + + FilterVerdict + handleChunkedData(ChunkType chunk_type, const Buffer &data) + { + ScopedContext event_type; + event_type.registerValue("HTTP Chunk type", chunk_type); + + auto rule_by_ctx = getConfiguration("rulebase", "rulesConfig"); + if (!rule_by_ctx.ok() && chunk_type > ChunkType::REQUEST_HEADER) { + ngx_http_cp_verdict_e verdict_action = + getSettingWithDefault(false, "allowOnlyDefinedApplications") ? DROP : ACCEPT; + + dbgDebug(D_NGINX_ATTACHMENT) + << "No policy rule was found for the current context. Setting verdict to " + << verdictToString(verdict_action); + + return FilterVerdict(verdict_action); + } + + switch (chunk_type) { + case ChunkType::REQUEST_START: + return handleStartTransaction(data); + case ChunkType::REQUEST_HEADER: + return handleMultiModifiableChunks(NginxParser::parseRequestHeaders(data), "request header", true); + case ChunkType::REQUEST_BODY: + return handleModifiableChunk(NginxParser::parseRequestBody(data), "request body", true); + case ChunkType::REQUEST_END: { + i_transaction_table->setExpiration(chrono::hours(1)); + return FilterVerdict(http_manager->inspectEndRequest()); + } + case ChunkType::RESPONSE_CODE: { + i_transaction_table->setExpiration(chrono::minutes(1)); + return handleResponseCode(data); + } + case ChunkType::CONTENT_LENGTH: { + return handleContentLength(data); + } + case ChunkType::RESPONSE_HEADER: + return handleResponseHeaders(data); + case ChunkType::RESPONSE_BODY: + nginx_attachment_event.addResponseInspectionCounter(1); + return handleResponseBody(data); + case ChunkType::RESPONSE_END: + return FilterVerdict(http_manager->inspectEndTransaction()); + case ChunkType::METRIC_DATA_FROM_PLUGIN: + return FilterVerdict(ngx_http_cp_verdict::TRAFFIC_VERDICT_IRRELEVANT); + case ChunkType::HOLD_DATA: + return FilterVerdict(http_manager->inspectDelayedVerdict()); + case ChunkType::COUNT: + break; + } + dbgWarning(D_NGINX_ATTACHMENT) + << "Received invalid 'ChunkType' chunk_type enum. Returning default verdict: " + << verdictToString(default_verdict.getVerdict()) + << ", enum: " + << static_cast(chunk_type); + return default_verdict; + } + + void + handleModifiedResponse( + SharedMemoryIPC *ipc, + const vector &modifications_lists, + uint32_t modifications_amount, + vector &verdict_data, + vector &verdict_data_sizes, + bool is_header) + { + dbgFlow(D_NGINX_ATTACHMENT) + << "Handling Injection of HTTP session modification data. Modifications amount: " + << modifications_amount; + + vector injection_data_persistency(modifications_amount); + for (const EventModifications &modifications : modifications_lists) { + for (const ModificationBuffer &modification_buffer_list : modifications.second) { + ngx_http_cp_inject_data injection_data; + injection_data.orig_buff_index = modifications.first; + injection_data.injection_pos = std::get<0>(modification_buffer_list); + injection_data.mod_type = std::get<1>(modification_buffer_list); + injection_data.injection_size = std::get<2>(modification_buffer_list).size(); + injection_data.is_header = is_header ? 1 : 0; + injection_data_persistency.push_back(injection_data); + verdict_data.push_back(reinterpret_cast(&injection_data_persistency.back())); + verdict_data_sizes.push_back(sizeof(injection_data)); + + const Buffer &modification_data = std::get<2>(modification_buffer_list); + verdict_data.push_back(reinterpret_cast(modification_data.data())); + verdict_data_sizes.push_back(modification_data.size()); + + dbgTrace(D_NGINX_ATTACHMENT) + << "Added modification (" + << injection_data_persistency.size() + << " out of " + << modifications_amount + << ") data to current session data. Modification position: " + << injection_data.injection_pos + << ", Modification size: " + << injection_data.injection_size + <<",: single_inject_data.is_header: " + << to_string(injection_data.is_header) + << ", Original buffer index: " + << to_string(injection_data.orig_buff_index) + << ", Modification data: " + << dumpHex(modification_data); + } + } + + sendChunkedData(ipc, verdict_data_sizes.data(), verdict_data.data(), verdict_data.size()); + } + + void + handleCustomWebResponse( + SharedMemoryIPC *ipc, + vector &verdict_data, + vector &verdict_data_sizes) + { + ngx_http_cp_web_response_data_t web_response_data; + + WebTriggerConf web_trigger_conf = getConfigurationWithDefault( + WebTriggerConf::default_trigger_conf, + "rulebase", + "webUserResponse" + ); + + string uuid; + if (i_transaction_table->hasState()) { + NginxAttachmentOpaque &opaque = i_transaction_table->getState(); + uuid = opaque.getSessionUUID(); + } + web_response_data.uuid_size = + string("Incident Id: ").length() + uuid.size(); + + if (web_trigger_conf.getDetailsLevel() == "Redirect") { + web_response_data.response_data.redirect_data.redirect_location_size = + web_trigger_conf.getRedirectURL().size(); + web_response_data.response_data.redirect_data.add_event_id = web_trigger_conf.getAddEventId() ? 1 : 0; + web_response_data.web_repsonse_type = static_cast(ngx_web_response_type_e::REDIRECT_WEB_RESPONSE); + } else { + web_response_data.response_data.custom_response_data.title_size = + web_trigger_conf.getResponseTitle().size(); + web_response_data.response_data.custom_response_data.body_size = web_trigger_conf.getResponseBody().size(); + web_response_data.response_data.custom_response_data.response_code = web_trigger_conf.getResponseCode(); + web_response_data.web_repsonse_type = static_cast(ngx_web_response_type_e::CUSTOM_WEB_RESPONSE); + } + + verdict_data.push_back(reinterpret_cast(&web_response_data)); + verdict_data_sizes.push_back(sizeof(ngx_http_cp_web_response_data_t)); + + if (web_trigger_conf.getDetailsLevel() == "Redirect") { + verdict_data.push_back(reinterpret_cast(web_trigger_conf.getRedirectURL().data())); + verdict_data_sizes.push_back(web_trigger_conf.getRedirectURL().size()); + } else { + verdict_data.push_back(reinterpret_cast(web_trigger_conf.getResponseTitle().data())); + verdict_data_sizes.push_back(web_trigger_conf.getResponseTitle().size()); + + verdict_data.push_back(reinterpret_cast(web_trigger_conf.getResponseBody().data())); + verdict_data_sizes.push_back(web_trigger_conf.getResponseBody().size()); + } + + verdict_data.push_back(reinterpret_cast(uuid.data())); + verdict_data_sizes.push_back(uuid.size()); + + if (web_trigger_conf.getDetailsLevel() == "Redirect") { + dbgTrace(D_NGINX_ATTACHMENT) + << "Added custom redirect response to current session." + << ", Redirect Location: " + << web_trigger_conf.getRedirectURL() + << " (redirect location size: " + << static_cast(web_response_data.response_data.redirect_data.redirect_location_size) + << ")" + << ", Should add event id to header: " + << static_cast(web_response_data.response_data.redirect_data.add_event_id) + << ", UUID: " + << uuid + << " (UUID size: " + << static_cast(web_response_data.uuid_size) + << ")"; + } else { + dbgTrace(D_NGINX_ATTACHMENT) + << "Added custom response to current session." + << "Response code: " + << static_cast(web_response_data.response_data.custom_response_data.response_code) + << ", Title: " + << web_trigger_conf.getResponseTitle() + << " (title size: " + << static_cast(web_response_data.response_data.custom_response_data.title_size) + << "), Body: " + << web_trigger_conf.getResponseBody() + << " (body size: " + << static_cast(web_response_data.response_data.custom_response_data.body_size) + << "), UUID: " + << uuid + << " (UUID size: " + << static_cast(web_response_data.uuid_size) + << ")"; + } + + sendChunkedData(ipc, verdict_data_sizes.data(), verdict_data.data(), verdict_data.size()); + } + + void + handleVerdictResponse(const FilterVerdict &verdict, SharedMemoryIPC *ipc, SessionID session_id, bool is_header) + { + ngx_http_cp_reply_from_service_t verdict_to_send; + verdict_to_send.verdict = static_cast(verdict.getVerdict()); + verdict_to_send.session_id = session_id; + + vector verdict_fragments = { reinterpret_cast(&verdict_to_send) }; + vector fragments_sizes = { sizeof(verdict_to_send) }; + + if (verdict.getVerdict() == INJECT) { + nginx_attachment_event.addTrafficVerdictCounter(nginxAttachmentEvent::trafficVerdict::INJECT); + verdict_to_send.modification_count = verdict.getModificationsAmount(); + return handleModifiedResponse( + ipc, + verdict.getModifications(), + verdict.getModificationsAmount(), + verdict_fragments, + fragments_sizes, + is_header + ); + } + + if (verdict.getVerdict() == DROP) { + nginx_attachment_event.addTrafficVerdictCounter(nginxAttachmentEvent::trafficVerdict::DROP); + verdict_to_send.modification_count = 1; + return handleCustomWebResponse(ipc, verdict_fragments, fragments_sizes); + } + + if (verdict.getVerdict() == ACCEPT) { + nginx_attachment_event.addTrafficVerdictCounter(nginxAttachmentEvent::trafficVerdict::ACCEPT); + } else if (verdict.getVerdict() == INSPECT) { + nginx_attachment_event.addTrafficVerdictCounter(nginxAttachmentEvent::trafficVerdict::INSPECT); + } else if (verdict.getVerdict() == IRRELEVANT) { + nginx_attachment_event.addTrafficVerdictCounter(nginxAttachmentEvent::trafficVerdict::IRRELEVANT); + } else if (verdict.getVerdict() == RECONF) { + nginx_attachment_event.addTrafficVerdictCounter(nginxAttachmentEvent::trafficVerdict::RECONF); + } else if (verdict.getVerdict() == WAIT) { + nginx_attachment_event.addTrafficVerdictCounter(nginxAttachmentEvent::trafficVerdict::WAIT); + } + + sendChunkedData(ipc, fragments_sizes.data(), verdict_fragments.data(), verdict_fragments.size()); + } + +// LCOV_EXCL_START Reason: cannot test dump of memory raw data (written in c) during UT + const string + dumpIpcWrapper(SharedMemoryIPC *attachment_ipc) + { + dumpIpcMemory(attachment_ipc); + return ""; + } +// LCOV_EXCL_STOP + + bool + isFailOpenTriggered() const + { + return attachment_config.getIsFailOpenModeEnabled() && fail_open_mode_listener.isFailopenMode(); + } + + void + handleFailureMode(SharedMemoryIPC *attachment_ipc, uint32_t cur_session_id) + { + popData(attachment_ipc); + while (isDataAvailable(attachment_ipc)) { + Maybe> read_data = readData(attachment_ipc); + if (!read_data.ok()) break; + + uint16_t incoming_data_size = read_data.unpack().first; + const char *incoming_data = read_data.unpack().second; + if (incoming_data_size == 0 || incoming_data == nullptr) { + dbgWarning(D_NGINX_ATTACHMENT) << "No data received from NGINX attachment"; + break; + } + + auto transaction_data = reinterpret_cast(incoming_data); + if (transaction_data->session_id != cur_session_id) break; + + popData(attachment_ipc); + } + + handleVerdictResponse( + FilterVerdict(ACCEPT), + attachment_ipc, + cur_session_id, + false + ); + } + + Maybe> + readData(SharedMemoryIPC *attachment_ipc) + { + const char *incoming_data = nullptr; + uint16_t incoming_data_size; + + DELAY_IF_NEEDED(IntentionalFailureHandler::FailureType::GetDataFromAttchment); + int res = receiveData(attachment_ipc, &incoming_data_size, &incoming_data); + if (res == corrupted_shmem_error) { + dbgError(D_NGINX_ATTACHMENT) + << "Failed to receive data from corrupted IPC Resetting the IPC" + << dumpIpcWrapper(attachment_ipc); + + resetIpc(attachment_ipc, num_of_nginx_ipc_elements); + nginx_attachment_event.addNetworkingCounter(nginxAttachmentEvent::networkVerdict::CONNECTION_FAIL); + return genError("Failed to receive data from corrupted IPC"); + } + + bool did_fail_on_purpose = false; + if (SHOULD_FAIL( + res == 0, IntentionalFailureHandler::FailureType::GetDataFromAttchment, &did_fail_on_purpose + )) { + dbgWarning(D_NGINX_ATTACHMENT) << "Failed to receive data from NGINX attachment"; + nginx_attachment_event.addNetworkingCounter(nginxAttachmentEvent::networkVerdict::CONNECTION_FAIL); + return pair(0, nullptr); + } + + if (SHOULD_FAIL( + incoming_data_size >= sizeof(ngx_http_cp_request_data_t), + IntentionalFailureHandler::FailureType::GetDataFromAttchment, + &did_fail_on_purpose + )) { + dbgError(D_NGINX_ATTACHMENT) + << "Corrupted transaction raw data received from NGINX attachment, size received: " + << incoming_data_size + << " is lower than ngx_http_cp_request_data_t size=" + << sizeof(ngx_http_cp_request_data_t) + << ". Resetting IPC" + << dumpIpcWrapper(attachment_ipc) + << (did_fail_on_purpose ? "[Intentional Failure]" : ""); + + popData(attachment_ipc); + resetIpc(attachment_ipc, num_of_nginx_ipc_elements); + nginx_attachment_event.addNetworkingCounter(nginxAttachmentEvent::networkVerdict::CONNECTION_FAIL); + return genError("Data received is smaller than expected"); + } + + return make_pair(incoming_data_size, incoming_data); + } + + Maybe> + handleRequestFromQueue(SharedMemoryIPC *attachment_ipc, uint32_t signaled_session_id) + { + Maybe> read_data = readData(attachment_ipc); + if (!read_data.ok()) { + dbgWarning(D_NGINX_ATTACHMENT) << "Failed to read data. Error: " << read_data.getErr(); + return make_pair(corrupted_session_id, true); + } + + uint16_t incoming_data_size = read_data.unpack().first; + const char *incoming_data = read_data.unpack().second; + if (incoming_data_size == 0 || incoming_data == nullptr) { + dbgWarning(D_NGINX_ATTACHMENT) << "No data received from NGINX attachment"; + return make_pair(corrupted_session_id, false); + } + + const ngx_http_cp_request_data_t *transaction_data = + reinterpret_cast(incoming_data); + + Maybe chunked_data_type = convertToEnum(transaction_data->data_type); + if (!chunked_data_type.ok()) { + dbgWarning(D_NGINX_ATTACHMENT) + << "Could not convert " + << static_cast(transaction_data->data_type) + << " to ChunkType enum. Resetting IPC" + << dumpIpcWrapper(attachment_ipc); + popData(attachment_ipc); + resetIpc(attachment_ipc, num_of_nginx_ipc_elements); + nginx_attachment_event.addNetworkingCounter(nginxAttachmentEvent::networkVerdict::CONNECTION_FAIL); + return make_pair(corrupted_session_id, true); + } + + if (chunked_data_type.unpack() == ChunkType::METRIC_DATA_FROM_PLUGIN) { + const ngx_http_cp_metric_data_t *recieved_metric_data = + reinterpret_cast(incoming_data); + sendMetricToKibana(recieved_metric_data); + popData(attachment_ipc); + return pair(0, false); + } + + dbgTrace(D_NGINX_ATTACHMENT) + << "Reading " + << incoming_data_size + <<" bytes " + << convertChunkTypeToString(*chunked_data_type) + << "(type = " + << static_cast(*chunked_data_type) + << ") of data from NGINX attachment for session ID: " + << transaction_data->session_id; + + const uint32_t cur_session_id = transaction_data->session_id; + if (signaled_session_id != cur_session_id) { + dbgDebug(D_NGINX_ATTACHMENT) + << "Ignoring inspection of irrelevant transaction. Signaled session ID: " + << signaled_session_id + << ", Inspected Session ID: " + << cur_session_id; + + popData(attachment_ipc); + return make_pair(cur_session_id, false); + } + + if (isFailOpenTriggered()) { + dbgTrace(D_NGINX_ATTACHMENT) + << "Agent is set to Fail Open Mode. Passing inspection and returning Accept." + << " Session ID: " + << cur_session_id + << ", Chunked data type: " + << static_cast(*chunked_data_type); + + if (i_transaction_table->hasEntry(cur_session_id)) { + i_transaction_table->deleteEntry(cur_session_id); + } + + handleFailureMode(attachment_ipc, cur_session_id); + return make_pair(cur_session_id, *chunked_data_type == ChunkType::REQUEST_START); + } + + if (!setActiveTransactionEntry(transaction_data->session_id, chunked_data_type.unpack())) { + popData(attachment_ipc); + return make_pair(cur_session_id, false); + } + + const Buffer inspection_data( + transaction_data->data, + incoming_data_size - sizeof(ngx_http_cp_request_data_t), + Buffer::MemoryType::VOLATILE + ); + + if (*chunked_data_type == ChunkType::REQUEST_START && !createTransactionState(inspection_data)) { + dbgWarning(D_NGINX_ATTACHMENT) + << "Failed to handle new request. Returning default verdict: " + << verdictToString(default_verdict.getVerdict()); + + handleVerdictResponse( + default_verdict, + attachment_ipc, + transaction_data->session_id, + false + ); + popData(attachment_ipc); + removeTransactionEntry(transaction_data->session_id); + return make_pair(cur_session_id, true); + } + + if (i_transaction_table != nullptr) { + transaction_table_event.setTransactionTableSize(i_transaction_table->count()); + transaction_table_event.notify(); + } + + NginxAttachmentOpaque &opaque = i_transaction_table->getState(); + opaque.activateContext(); + + FilterVerdict verdict = handleChunkedData(*chunked_data_type, inspection_data); + + bool is_header = + *chunked_data_type == ChunkType::REQUEST_HEADER || + *chunked_data_type == ChunkType::RESPONSE_HEADER || + *chunked_data_type == ChunkType::CONTENT_LENGTH; + handleVerdictResponse(verdict, attachment_ipc, transaction_data->session_id, is_header); + + bool is_final_verdict = verdict.getVerdict() == ACCEPT || + verdict.getVerdict() == DROP || + verdict.getVerdict() == IRRELEVANT; + + dbgTrace(D_NGINX_ATTACHMENT) + << "Request handled successfully - for" + << " NGINX attachment session ID: " + << transaction_data->session_id + << " verdict: " + << verdictToString(verdict.getVerdict()) + << " verdict_data_code=" + << static_cast(verdict.getVerdict()); + + popData(attachment_ipc); + + opaque.deactivateContext(); + if (is_final_verdict) { + removeTransactionEntry(transaction_data->session_id); + } else { + i_transaction_table->unsetActiveKey(); + } + + return make_pair(cur_session_id, is_final_verdict); + } + + bool + createTransactionState(const Buffer &data) + { + auto transaction_data = NginxParser::parseStartTrasaction(data); + if (!transaction_data.ok()) { + dbgWarning(D_NGINX_ATTACHMENT) << "Failed to parse new transaction data: " << transaction_data.getErr(); + return false; + } + if (i_transaction_table->hasState()) { + dbgInfo(D_NGINX_ATTACHMENT) << "Trying to recreate a state of type NginxAttachmentOpaque"; + i_transaction_table->deleteState(); + } + + if (!i_transaction_table->createState(transaction_data.unpack())) { + dbgWarning(D_NGINX_ATTACHMENT) << "Failed to create attachment opaque"; + return false; + } + + return true; + } + + bool + setActiveTransactionEntry(const SessionID session_id, ChunkType data_type) + { + if (data_type == ChunkType::REQUEST_START && i_transaction_table->hasEntry(session_id)) { + dbgInfo(D_NGINX_ATTACHMENT) << "Recreating transaction entry. Key: " << session_id; + i_transaction_table->deleteEntry(session_id); + } + + if (!i_transaction_table->hasEntry(session_id)) { + if (data_type != ChunkType::REQUEST_START) { + dbgDebug(D_NGINX_ATTACHMENT) + << "Transaction entry does not exist for session ID: " + << session_id + << " ignoring inspection for data type != request start"; + return false; + } + + if (!i_transaction_table->createEntry(session_id, chrono::minutes(1))) { + dbgWarning(D_NGINX_ATTACHMENT) + << "Failed to create table entry for transaction with session ID: " << session_id; + return false; + } + + dbgDebug(D_NGINX_ATTACHMENT) << "New transaction entry created. Key: " << session_id; + if (i_transaction_table->count() > metrics_max_table_size) { + metrics_max_table_size = i_transaction_table->count(); + } + } + if (!i_transaction_table->setActiveKey(session_id)) { + dbgWarning(D_NGINX_ATTACHMENT) + << "Failed to set active table entry for transaction. Session ID: " << session_id; + return false; + } + dbgTrace(D_NGINX_ATTACHMENT) << "Entry exists - setting it active"; + + return true; + } + + void + removeTransactionEntry(const SessionID session_id) + { + i_transaction_table->unsetActiveKey(); + bool entry_deleted = i_transaction_table->deleteEntry(session_id); + + if (!entry_deleted) { + dbgWarning(D_NGINX_ATTACHMENT) << "No Entry to delete, Session ID: " << session_id << "."; + } else { + dbgTrace(D_NGINX_ATTACHMENT) << "Removed the transaction entry"; + } + } + + string + verdictToString(const EventVerdict &verdict) + { + switch (verdict.getVerdict()) { + case DROP: + return "DROP"; + case ACCEPT: + return "ACCEPT"; + case INJECT: + return "INJECT"; + case INSPECT: + return "INSPECT"; + case IRRELEVANT: + return "IRRELEVANT"; + case RECONF: + return "RECONF"; + case WAIT: + return "WAIT"; + } + dbgAssert(false) << "Invalid EventVerdict enum: " << static_cast(verdict.getVerdict()); + return string(); + } + + bool + initSocket() + { + bool did_fail_on_purpose = false; + string shared_verdict_signal_path = getConfigurationWithDefault( + SHARED_VERDICT_SIGNAL_PATH, + "HTTP manager", + "Shared verdict signal path" + ); + + size_t last_slash_idx = shared_verdict_signal_path.find_last_of("/"); + string directory_path = shared_verdict_signal_path.substr(0, last_slash_idx); + mkdir(directory_path.c_str(), 0777); + + auto id = inst_awareness->getUniqueID(); + static bool already_failed_on_id = false; + if (SHOULD_FAIL(id.ok(), IntentionalFailureHandler::FailureType::GetInstanceID, &did_fail_on_purpose)) { + if (!already_failed_on_id) { + dbgError(D_NGINX_ATTACHMENT) + << "Failed to get instance ID. Error: " + << (did_fail_on_purpose ? "Intentional Failure" : id.getErr()); + already_failed_on_id = true; + } else { + dbgWarning(D_NGINX_ATTACHMENT) + << "Failed to get instance ID. Error: " + << (did_fail_on_purpose ? "Intentional Failure" : id.getErr()); + } + return false; + } + already_failed_on_id = false; + shared_verdict_signal_path += ("-" + id.unpack()); + + Maybe sock = i_socket->genSocket( + I_Socket::SocketType::UNIX, + true, + true, + shared_verdict_signal_path + ); + if (SHOULD_FAIL( + sock.ok(), IntentionalFailureHandler::FailureType::CreateSocket, &did_fail_on_purpose + )) { + dbgWarning(D_NGINX_ATTACHMENT) + << "Failed to open a server socket. Error: " + << (did_fail_on_purpose ? "Intentional Failure" : sock.getErr()); + return false; + } + + dbgAssert(sock.unpack() > 0) << "The generated server socket is OK, yet negative"; + server_sock = sock.unpack(); + + I_MainLoop::Routine accept_attachment_routine = + [this] () + { + dbgAssert(inst_awareness->getUniqueID().ok()) + << "NGINX attachment Initialized without Instance Awareness"; + + bool did_fail_on_purpose = false; + DELAY_IF_NEEDED(IntentionalFailureHandler::FailureType::AcceptSocket); + Maybe new_sock = i_socket->acceptSocket(server_sock, true); + if (SHOULD_FAIL( + new_sock.ok(), IntentionalFailureHandler::FailureType::AcceptSocket, &did_fail_on_purpose + )) { + dbgWarning(D_NGINX_ATTACHMENT) << "Failed to accept a new socket. Error: " + << (did_fail_on_purpose ? "Intentional Failure" : new_sock.getErr()); + return; + } + dbgAssert(new_sock.unpack() > 0) << "The generated client socket is OK, yet negative"; + I_Socket::socketFd new_attachment_socket = new_sock.unpack(); + + Maybe uid = getUidFromSocket(new_attachment_socket); + Maybe nginx_user_id = readIdFromSocket(new_attachment_socket); + Maybe nginx_group_id = readIdFromSocket(new_attachment_socket); + DELAY_IF_NEEDED(IntentionalFailureHandler::FailureType::RegisterAttchment); + if (SHOULD_FAIL( + nginx_user_id.ok() && nginx_group_id.ok() && uid.ok(), + IntentionalFailureHandler::FailureType::RegisterAttchment, + &did_fail_on_purpose + )) { + string err = "Undefined"; + if (!nginx_user_id.ok()) { + err = nginx_user_id.getErr(); + } else if (!uid.ok()) { + err = uid.getErr(); + } else if (!nginx_group_id.ok()) { + err = nginx_group_id.getErr(); + } + dbgWarning(D_NGINX_ATTACHMENT) << "Failed to register new attachment. Error: " + << (did_fail_on_purpose ? "Intentional Failure" : err); + i_socket->closeSocket(new_attachment_socket); + new_attachment_socket = -1; + + nginx_attachment_event.addNetworkingCounter( + nginxAttachmentEvent::networkVerdict::REGISTRATION_FAIL + ); + nginx_attachment_event.notify(); + nginx_attachment_event.resetAllCounters(); + return; + } + + if (!registerAttachmentProcess(*nginx_user_id, *nginx_group_id, new_attachment_socket)) { + i_socket->closeSocket(new_attachment_socket); + new_attachment_socket = -1; + + nginx_attachment_event.addNetworkingCounter( + nginxAttachmentEvent::networkVerdict::REGISTRATION_FAIL + ); + nginx_attachment_event.notify(); + nginx_attachment_event.resetAllCounters(); + dbgWarning(D_NGINX_ATTACHMENT) << "Failed to register attachment"; + } + }; + mainloop->addFileRoutine( + I_MainLoop::RoutineType::RealTime, + server_sock, + accept_attachment_routine, + "Nginx Attachment registration listener", + true + ); + + return true; + } + + Maybe + getUidFromSocket(I_Socket::socketFd new_attachment_socket) + { + dbgAssert(server_sock > 0) << "Registration attempt occurred while registration socket is uninitialized"; + + bool did_fail_on_purpose = false; + DELAY_IF_NEEDED(IntentionalFailureHandler::FailureType::ReceiveDataFromSocket); + Maybe> uid_len = i_socket->receiveData(new_attachment_socket, sizeof(uint8_t)); + if (SHOULD_FAIL( + uid_len.ok(), + IntentionalFailureHandler::FailureType::ReceiveDataFromSocket, + &did_fail_on_purpose + )) { + dbgWarning(D_NGINX_ATTACHMENT) + << "Failed to read the length of the attachment's UID. Error: " + << (did_fail_on_purpose ? "Intentional Failure" : uid_len.getErr()); + return genError("Failed to read attachment's UID length"); + } + + uint8_t attachment_uid_len = *reinterpret_cast(uid_len.unpack().data()); + dbgTrace(D_NGINX_ATTACHMENT) << "Attachment's UID length = " << static_cast(attachment_uid_len); + DELAY_IF_NEEDED(IntentionalFailureHandler::FailureType::ReceiveDataFromSocket); + Maybe> attachment_uid = i_socket->receiveData(new_attachment_socket, attachment_uid_len); + if (SHOULD_FAIL( + attachment_uid.ok(), + IntentionalFailureHandler::FailureType::ReceiveDataFromSocket, + &did_fail_on_purpose + )) { + dbgWarning(D_NGINX_ATTACHMENT) + << "Failed to read the attachment's UID. Error: " + << (did_fail_on_purpose ? "Intentional Failure" : attachment_uid.getErr()); + return genError("Failed to read the attachment's UID"); + } + + string uid(attachment_uid.unpack().begin(), attachment_uid.unpack().end()); + if (uid != inst_awareness->getUniqueID().unpack()) { + dbgWarning(D_NGINX_ATTACHMENT) << "NGINX UID is invalid, UID: " << uid; + return genError("Ivalid UID was sent"); + } + dbgTrace(D_NGINX_ATTACHMENT) << "Successfully read attachment's UID: " << uid; + return uid; + } + + Maybe + readIdFromSocket(I_Socket::socketFd new_attachment_socket) + { + bool did_fail_on_purpose = false; + DELAY_IF_NEEDED(IntentionalFailureHandler::FailureType::ReceiveDataFromSocket); + Maybe> id = i_socket->receiveData(new_attachment_socket, sizeof(uint32_t)); + if (SHOULD_FAIL( + id.ok(), + IntentionalFailureHandler::FailureType::ReceiveDataFromSocket, + &did_fail_on_purpose + )) { + return genError( + "Failed to read the attachment's User ID or Group ID" + + did_fail_on_purpose ? "[Intentional Failure]" : "" + ); + } + + uint32_t attachment_id = *reinterpret_cast(id.unpack().data()); + dbgTrace(D_NGINX_ATTACHMENT) << "Attachment ID: " << static_cast(attachment_id); + return attachment_id; + } + + string static_resources_path; + FilterVerdict default_verdict; + FailopenModeListener fail_open_mode_listener; +#ifdef FAILURE_TEST + IntentionalFailureHandler intentional_failure_handler; +#endif + CPUMetric nginx_plugin_cpu_metric; + + // Attachment Details + I_Socket::socketFd server_sock = -1; + I_Socket::socketFd attachment_sock = -1; + + uint num_of_nginx_ipc_elements = NUM_OF_NGINX_IPC_ELEMENTS; + uint32_t nginx_worker_user_id = 0; + uint32_t nginx_worker_group_id = 0; + string instance_unique_id; + SharedMemoryIPC *attachment_ipc = nullptr; + HttpAttachmentConfig attachment_config; + I_MainLoop::RoutineID attachment_routine_id = 0; + bool traffic_indicator = false; + + // Interfaces + I_Socket *i_socket = nullptr; + I_TimeGet *timer = nullptr; + I_MainLoop *mainloop = nullptr; + I_HttpManager *http_manager = nullptr; + I_InstanceAwareness *inst_awareness = nullptr; + I_TableSpecific *i_transaction_table = nullptr; + + // Metrics + const string default_static_resources_path = DEFAULT_STATIC_RESOURCES_PATH; + const uint default_metrics_print_interval_sec = 5; + float metrics_average_table_size = 0; + uint64_t metrics_sample_count = 0; + uint64_t metrics_max_table_size = 0; + uint64_t num_compressed_responses = 0; + uint64_t num_uncompressed_responses = 0; + uint curr_attachment_registrations_counter = 1; + chrono::time_point registration_duration_start = chrono::steady_clock::now(); + + chrono::seconds metric_report_interval; + nginxAttachmentEvent nginx_attachment_event; + nginxAttachmentMetric nginx_attachment_metric; + nginxIntakerEvent nginx_intaker_event; + nginxIntakerMetric nginx_intaker_metric; + TransactionTableEvent transaction_table_event; + TransactionTableMetric transaction_table_metric; +}; + +NginxAttachment::NginxAttachment() : Component("NginxAttachment"), pimpl(make_unique()) {} + +NginxAttachment::~NginxAttachment() {} + +void NginxAttachment::init() { pimpl->init(); } + +void NginxAttachment::fini() { pimpl->fini(); } + +void +NginxAttachment::preload() +{ + pimpl->preload(); + registerExpectedConfiguration("HTTP manager", "Container mode"); + registerExpectedConfiguration("HTTP manager", "Shared memory segment size in KB"); + registerExpectedConfiguration("HTTP manager", "Nginx permission"); + registerExpectedConfiguration("HTTP manager", "Attachment debug level"); + registerExpectedConfiguration("HTTP manager", "Shared verdict signal path"); + registerExpectedConfiguration("HTTP manager", "Shared settings path"); + registerExpectedConfiguration("HTTP manager", "Max wait time for verdict in sec"); + registerExpectedConfiguration("HTTP manager", "Static resources path"); + registerExpectedConfiguration("HTTP manager", "Fail Open Mode state"); + registerExpectedConfiguration("HTTP manager", "Metrics printing interval in sec"); + registerExpectedConfiguration("HTTP manager", "Keep Alive interval in sec"); + registerExpectedConfiguration("HTTP manager", "Fail Open timeout msec"); + registerExpectedSetting("HTTP manager", "debug context"); + registerExpectedConfiguration("HTTP manager", "NGINX response processing timeout msec"); + registerExpectedConfiguration("HTTP manager", "NGINX request processing timeout msec"); + registerExpectedConfiguration("HTTP manager", "NGINX registration thread timeout msec"); + registerExpectedConfiguration("HTTP manager", "NGINX request header thread timeout msec"); + registerExpectedConfiguration("HTTP manager", "NGINX request body thread timeout msec"); + registerExpectedConfiguration("HTTP manager", "NGINX response header thread timeout msec"); + registerExpectedConfiguration("HTTP manager", "NGINX response body thread timeout msec"); + registerExpectedConfiguration("HTTP manager", "NGINX inspection mode"); + registerExpectedConfiguration("Nginx Attachment", "metric reporting interval"); + registerExpectedSetting("allowOnlyDefinedApplications"); + registerExpectedConfigFile("activeContextConfig", Config::ConfigFileType::Policy); + registerExpectedConfiguration("rulebase", "usersIdentifiers"); + BasicRuleConfig::preload(); + WebTriggerConf::preload(); +} + +SASAL_END diff --git a/components/attachment-intakers/nginx_attachment/nginx_attachment_config.cc b/components/attachment-intakers/nginx_attachment/nginx_attachment_config.cc new file mode 100755 index 0000000..d26a83d --- /dev/null +++ b/components/attachment-intakers/nginx_attachment/nginx_attachment_config.cc @@ -0,0 +1,331 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "nginx_attachment_config.h" + +#include + +#include "nginx_attachment.h" +#include "config.h" +#include "singleton.h" +#include "i_gradual_deployment.h" +#include "debug.h" + +USE_DEBUG_FLAG(D_NGINX_ATTACHMENT); + +using namespace std; + +using DebugLevel = ngx_http_cp_debug_level_e; + +void +HttpAttachmentConfig::init() +{ + setDebugLevel(); + setGradualDeploymentIPs(); + setWebTriggerConf(); + setStaticResourcesPath(); + setFailOpenMode(); + setFailOpenTimeout(); + setFailOpenWaitMode(); + setSessionsPerMinuteLimitVerdict(); + setMaxSessionsPerMinute(); + setNumOfNginxIpcElements(); + setDebugByContextValues(); + setKeepAliveIntervalMsec(); +} + +bool +HttpAttachmentConfig::operator==(const HttpAttachmentConfig &other) const +{ + return + web_trigger_conf == other.web_trigger_conf && + conf_data == other.conf_data; +} + +void +HttpAttachmentConfig::save(cereal::JSONOutputArchive &out_ar) const +{ + conf_data.save(out_ar); +} + +template +static Conf +getAttachmentConf(const Conf &default_val, const string &profile_conf, const Strings & ...conf) +{ + const Conf &profile_settings = getProfileAgentSettingWithDefault(default_val, profile_conf); + return getConfigurationWithDefault(profile_settings, conf...); +} + +void +HttpAttachmentConfig::setGradualDeploymentIPs() +{ + auto i_gradual_deployment = Singleton::Consume::by(); + conf_data.setExcludeSources(i_gradual_deployment->getPolicy(I_GradualDeployment::AttachmentType::NGINX)); +} + +void +HttpAttachmentConfig::setWebTriggerConf() +{ + web_trigger_conf = getConfigurationWithDefault( + WebTriggerConf::default_trigger_conf, + "HTTP manager", + "Web trigger conf" + ); +} + +void +HttpAttachmentConfig::setDebugLevel() { + string debug_level = getAttachmentConf( + "info", + "agent.debug.flag.nginxModule", + "HTTP manager", + "Attachment debug level" + ); + + debug_level[0] = toupper(debug_level[0]); + + if (debug_level == "Trace") { + conf_data.setNumericalValue("dbg_level", (unsigned int)DebugLevel::DBG_LEVEL_TRACE); + } else if (debug_level == "Debug") { + conf_data.setNumericalValue("dbg_level", (unsigned int)DebugLevel::DBG_LEVEL_DEBUG); + } else if (debug_level == "Info") { + conf_data.setNumericalValue("dbg_level", (unsigned int)DebugLevel::DBG_LEVEL_INFO); + } else if (debug_level == "Warning") { + conf_data.setNumericalValue("dbg_level", (unsigned int)DebugLevel::DBG_LEVEL_WARNING); + } else if (debug_level == "Error") { + conf_data.setNumericalValue("dbg_level", (unsigned int)DebugLevel::DBG_LEVEL_ERROR); + } else { + dbgWarning(D_NGINX_ATTACHMENT) + << "Debug level \"" + << debug_level + << "\" is not valid. using default level \"warning\""; + conf_data.setNumericalValue("dbg_level", (unsigned int)DebugLevel::DBG_LEVEL_INFO); + } +} + +void +HttpAttachmentConfig::setFailOpenMode() +{ + bool is_fail_open_mode_enabled = getAttachmentConf( + true, + "agent.failOpenState.nginxModule", + "HTTP manager", + "Fail Open Mode state" + ); + + dbgTrace(D_NGINX_ATTACHMENT) + << "Attachment failure mode is: " + << (is_fail_open_mode_enabled ? "Enabled" : "Disabled"); + conf_data.setNumericalValue("is_fail_open_mode_enabled", is_fail_open_mode_enabled); +} + +void +HttpAttachmentConfig::setFailOpenTimeout() +{ + conf_data.setNumericalValue("fail_open_timeout", getAttachmentConf( + 50, + "agent.failOpenTimeout.nginxModule", + "HTTP manager", + "Fail Open timeout msec" + )); + + conf_data.setNumericalValue("fail_open_hold_timeout", getAttachmentConf( + 150, + "agent.failOpenWaitTimeout.nginxModule", + "HTTP manager", + "Fail Open wait timeout msec" + )); + + conf_data.setNumericalValue("res_proccessing_timeout_msec", getAttachmentConf( + 3000, + "agent.resProccessingTimeout.nginxModule", + "HTTP manager", + "NGINX response processing timeout msec" + )); + + conf_data.setNumericalValue("req_proccessing_timeout_msec", getAttachmentConf( + 3000, + "agent.reqProccessingTimeout.nginxModule", + "HTTP manager", + "NGINX request processing timeout msec" + )); + + conf_data.setNumericalValue("registration_thread_timeout_msec", getAttachmentConf( + 100, + "agent.registrationThreadTimeout.nginxModule", + "HTTP manager", + "NGINX registration thread timeout msec" + )); + + conf_data.setNumericalValue("req_header_thread_timeout_msec", getAttachmentConf( + 100, + "agent.reqHeaderThreadTimeout.nginxModule", + "HTTP manager", + "NGINX request header thread timeout msec" + )); + + conf_data.setNumericalValue("req_body_thread_timeout_msec", getAttachmentConf( + 150, + "agent.reqBodyThreadTimeout.nginxModule", + "HTTP manager", + "NGINX request body thread timeout msec" + )); + + conf_data.setNumericalValue("res_header_thread_timeout_msec", getAttachmentConf( + 100, + "agent.resHeaderThreadTimeout.nginxModule", + "HTTP manager", + "NGINX response header thread timeout msec" + )); + + conf_data.setNumericalValue("res_body_thread_timeout_msec", getAttachmentConf( + 150, + "agent.resBodyThreadTimeout.nginxModule", + "HTTP manager", + "NGINX response body thread timeout msec" + )); + + conf_data.setNumericalValue("waiting_for_verdict_thread_timeout_msec", getAttachmentConf( + 150, + "agent.waitThreadTimeout.nginxModule", + "HTTP manager", + "NGINX wait thread timeout msec" + )); + + uint inspection_mode = getAttachmentConf( + static_cast(ngx_http_inspection_mode_e::NON_BLOCKING_THREAD), + "agent.inspectionMode.nginxModule", + "HTTP manager", + "NGINX inspection mode" + ); + + if (inspection_mode >= ngx_http_inspection_mode_e::INSPECTION_MODE_COUNT) { + inspection_mode = ngx_http_inspection_mode_e::NON_BLOCKING_THREAD; + } + conf_data.setNumericalValue("nginx_inspection_mode", inspection_mode); +} + +void +HttpAttachmentConfig::setFailOpenWaitMode() +{ + bool is_fail_open_mode_hold_enabled = getAttachmentConf( + true, + "agent.failOpenWaitState.nginxModule", + "HTTP manager", + "Fail Open Mode state" + ); + + dbgTrace(D_NGINX_ATTACHMENT) + << "Attachment waiting failure mode is: " + << (is_fail_open_mode_hold_enabled ? "Enabled" : "Disabled"); + conf_data.setNumericalValue("is_fail_open_mode_hold_enabled", is_fail_open_mode_hold_enabled); +} + +void +HttpAttachmentConfig::setSessionsPerMinuteLimitVerdict() +{ + string sessions_per_minute_limit_verdict = getAttachmentConf( + "Accept", + "agent.sessionsPerMinuteLimitVerdict.nginxModule", + "HTTP manager", + "Sessions Per Minute Limit Verdict" + ); + + dbgTrace(D_NGINX_ATTACHMENT) + << "Attachment sessions per minute limit verdict is: " + << sessions_per_minute_limit_verdict; + + conf_data.setStringValue("sessions_per_minute_limit_verdict", sessions_per_minute_limit_verdict); +} + +void +HttpAttachmentConfig::setMaxSessionsPerMinute() +{ + uint max_sessions_per_minute = getAttachmentConf( + 0, + "agent.maxSessionsPerMinute.nginxModule", + "HTTP manager", + "Max Sessions Per Minute" + ); + + dbgTrace(D_NGINX_ATTACHMENT) + << "Attachment max sessions per minute is: " + << max_sessions_per_minute; + + conf_data.setNumericalValue("max_sessions_per_minute", max_sessions_per_minute); +} + +void +HttpAttachmentConfig::setNumOfNginxIpcElements() +{ + uint num_of_nginx_ipc_elements = getProfileAgentSettingWithDefault( + NUM_OF_NGINX_IPC_ELEMENTS, "nginxAttachment.numOfNginxIpcElements" + ); + dbgTrace(D_NGINX_ATTACHMENT) + << "Number of NGINX IPC elements: " + << num_of_nginx_ipc_elements; + conf_data.setNumericalValue("num_of_nginx_ipc_elements", num_of_nginx_ipc_elements); +} + +void +HttpAttachmentConfig::setKeepAliveIntervalMsec() +{ + uint keep_alive_interval_msec = getProfileAgentSettingWithDefault( + 300, "attachmentRegistrator.expirationCheckSeconds" + ); + keep_alive_interval_msec = (keep_alive_interval_msec * 1000) / 2; + dbgDebug(D_NGINX_ATTACHMENT) + << "Interval keeps alives size: " + << keep_alive_interval_msec << " msec"; + conf_data.setNumericalValue("keep_alive_interval_msec", keep_alive_interval_msec); +} + +void +HttpAttachmentConfig::setStaticResourcesPath() +{ + string static_resources_path = getConfigurationWithDefault( + DEFAULT_STATIC_RESOURCES_PATH, + "HTTP manager", + "Static resources path" + ); + dbgDebug(D_NGINX_ATTACHMENT) << "Static resources path is : " << static_resources_path; + conf_data.setStringValue("static_resources_path", static_resources_path); +} + +void +HttpAttachmentConfig::setDebugByContextValues() +{ + DebugConfig new_ctx_cfg; + auto maybe_ctx_config = getSetting("HTTP manager", "debug context"); + if(!maybe_ctx_config.ok()) { + dbgDebug(D_NGINX_ATTACHMENT) << "Failed to set context values. Setting default values"; + conf_data.setDebugContext(new_ctx_cfg); + return; + } + new_ctx_cfg = maybe_ctx_config.unpack(); + conf_data.setDebugContext(new_ctx_cfg); + dbgDebug(D_NGINX_ATTACHMENT) + << "Setting context values : " + << "client_ip: " + << new_ctx_cfg.client + << ", listening_ip: " + << new_ctx_cfg.server + << ", uri_prefix: " + << new_ctx_cfg.uri + << ", hostname: " + << new_ctx_cfg.host + << ", http_method: " + << new_ctx_cfg.method + << ", listening_port: " + << new_ctx_cfg.port; +} diff --git a/components/attachment-intakers/nginx_attachment/nginx_attachment_config.h b/components/attachment-intakers/nginx_attachment/nginx_attachment_config.h new file mode 100755 index 0000000..e7f23ca --- /dev/null +++ b/components/attachment-intakers/nginx_attachment/nginx_attachment_config.h @@ -0,0 +1,77 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __NGINX_ATTACHMENT_CONFIG_H__ +#define __NGINX_ATTACHMENT_CONFIG_H__ + +#include +#include + +#include "nginx_attachment_util.h" +#include "cereal/archives/json.hpp" + +#include "generic_rulebase/triggers_config.h" +#include "http_configuration.h" + +class HttpAttachmentConfig +{ +public: + void init(); + + bool operator==(const HttpAttachmentConfig &other) const; + + void save(cereal::JSONOutputArchive &out_ar) const; + + unsigned int getDebugLevel() const { return conf_data.getNumericalValue("dbg_level"); } + bool getIsFailOpenModeEnabled() const { return conf_data.getNumericalValue("is_fail_open_mode_enabled"); } + + bool + getSessionsPerMinuteLimitVerdict() const + { + return conf_data.getNumericalValue("sessions_per_minute_limit_verdict"); + } + + unsigned int getMaxSessionsPerMinute() const { return conf_data.getNumericalValue("max_sessions_per_minute"); } + unsigned int getNumOfNginxElements() const { return conf_data.getNumericalValue("num_of_nginx_ipc_elements"); } + unsigned int getKeepAliveIntervalMsec() const { return conf_data.getNumericalValue("keep_alive_interval_msec"); } + +private: + void setGradualDeploymentIPs(); + + void setWebTriggerConf(); + + void setDebugLevel(); + + void setFailOpenMode(); + + void setFailOpenTimeout(); + + void setFailOpenWaitMode(); + + void setSessionsPerMinuteLimitVerdict(); + + void setMaxSessionsPerMinute(); + + void setNumOfNginxIpcElements(); + + void setKeepAliveIntervalMsec(); + + void setStaticResourcesPath(); + + void setDebugByContextValues(); + + WebTriggerConf web_trigger_conf; + HttpAttachmentConfiguration conf_data; +}; + +#endif // __NGINX_ATTACHMENT_CONFIG_H__ diff --git a/components/attachment-intakers/nginx_attachment/nginx_attachment_metric.cc b/components/attachment-intakers/nginx_attachment/nginx_attachment_metric.cc new file mode 100755 index 0000000..3f2f854 --- /dev/null +++ b/components/attachment-intakers/nginx_attachment/nginx_attachment_metric.cc @@ -0,0 +1,163 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "nginx_attachment_metric.h" + +USE_DEBUG_FLAG(D_METRICS_NGINX_ATTACHMENT); + +void +nginxAttachmentEvent::resetAllCounters() +{ + successfull_registrations_counter = 0; + failed_registrations_counter = 0; + failed_connections_counter = 0; + accept_verdict_counter = 0; + inspect_verdict_counter = 0; + drop_verdict_counter = 0; + inject_verdict_counter = 0; + irrelevant_verdict_counter = 0; + reconf_verdict_counter = 0; + wait_verdict_counter = 0; +} + +void +nginxAttachmentEvent::addNetworkingCounter(networkVerdict _verdict) +{ + switch (_verdict) { + case networkVerdict::REGISTRATION_SUCCESS: { + successfull_registrations_counter += 1; + break; + } + case networkVerdict::REGISTRATION_FAIL: { + failed_registrations_counter += 1; + break; + } + case networkVerdict::CONNECTION_FAIL: { + failed_connections_counter += 1; + break; + } + default: + dbgWarning(D_METRICS_NGINX_ATTACHMENT) << "Unsupported metric type. Type: " << static_cast(_verdict); + return; + } +} + +void +nginxAttachmentEvent::addTrafficVerdictCounter(trafficVerdict _verdict) +{ + switch (_verdict) { + case trafficVerdict::INSPECT: { + inspect_verdict_counter += 1; + break; + } + case trafficVerdict::ACCEPT: { + accept_verdict_counter += 1; + break; + } + case trafficVerdict::DROP: { + drop_verdict_counter += 1; + break; + } + case trafficVerdict::INJECT: { + inject_verdict_counter += 1; + break; + } + case trafficVerdict::IRRELEVANT: { + irrelevant_verdict_counter += 1; + break; + } + case trafficVerdict::RECONF: { + reconf_verdict_counter += 1; + break; + } + case trafficVerdict::WAIT: { + wait_verdict_counter += 1; + break; + } + + default: + dbgWarning(D_METRICS_NGINX_ATTACHMENT) << "Unsupported metric type. Type: " << static_cast(_verdict); + return; + } +} + +void +nginxAttachmentEvent::addResponseInspectionCounter(uint64_t _counter) +{ + response_inspection_counter += _counter; +} + +uint64_t +nginxAttachmentEvent::getNetworkingCounter(networkVerdict _verdict) const +{ + switch (_verdict) { + case networkVerdict::REGISTRATION_SUCCESS: + return successfull_registrations_counter; + case networkVerdict::REGISTRATION_FAIL: + return failed_registrations_counter; + case networkVerdict::CONNECTION_FAIL: + return failed_connections_counter; + default: + dbgWarning(D_METRICS_NGINX_ATTACHMENT) << "Unsupported metric type. Type: " << static_cast(_verdict); + return 0; + } +} + +uint64_t +nginxAttachmentEvent::getTrafficVerdictCounter(trafficVerdict _verdict) const +{ + switch (_verdict) { + case trafficVerdict::INSPECT: + return inspect_verdict_counter; + case trafficVerdict::ACCEPT: + return accept_verdict_counter; + case trafficVerdict::DROP: + return drop_verdict_counter; + case trafficVerdict::INJECT: + return inject_verdict_counter; + case trafficVerdict::IRRELEVANT: + return irrelevant_verdict_counter; + case trafficVerdict::RECONF: + return reconf_verdict_counter; + case trafficVerdict::WAIT: + return wait_verdict_counter; + default: + dbgWarning(D_METRICS_NGINX_ATTACHMENT) << "Unsupported metric type. Type: " << static_cast(_verdict); + return 0; + } +} + +uint64_t +nginxAttachmentEvent::getResponseInspectionCounter() const +{ + return response_inspection_counter; +} + +void +nginxAttachmentMetric::upon(const nginxAttachmentEvent &event) +{ + successfull_registrations.report( + event.getNetworkingCounter(nginxAttachmentEvent::networkVerdict::REGISTRATION_SUCCESS) + ); + failed_registrations.report( + event.getNetworkingCounter(nginxAttachmentEvent::networkVerdict::REGISTRATION_FAIL) + ); + failed_connections.report(event.getNetworkingCounter(nginxAttachmentEvent::networkVerdict::CONNECTION_FAIL)); + inspect_verdict.report(event.getTrafficVerdictCounter(nginxAttachmentEvent::trafficVerdict::INSPECT)); + accept_verdict.report(event.getTrafficVerdictCounter(nginxAttachmentEvent::trafficVerdict::ACCEPT)); + drop_verdict.report(event.getTrafficVerdictCounter(nginxAttachmentEvent::trafficVerdict::DROP)); + inject_verdict.report(event.getTrafficVerdictCounter(nginxAttachmentEvent::trafficVerdict::INJECT)); + irrelevant_verdict.report(event.getTrafficVerdictCounter(nginxAttachmentEvent::trafficVerdict::IRRELEVANT)); + reconf_verdict.report(event.getTrafficVerdictCounter(nginxAttachmentEvent::trafficVerdict::RECONF)); + response_inspection.report(event.getResponseInspectionCounter()); +} diff --git a/components/attachment-intakers/nginx_attachment/nginx_attachment_opaque.cc b/components/attachment-intakers/nginx_attachment/nginx_attachment_opaque.cc new file mode 100755 index 0000000..a9029be --- /dev/null +++ b/components/attachment-intakers/nginx_attachment/nginx_attachment_opaque.cc @@ -0,0 +1,121 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "nginx_attachment_opaque.h" + +#include "boost/uuid/uuid.hpp" +#include "boost/uuid/uuid_generators.hpp" +#include "boost/uuid/uuid_io.hpp" + +#include "config.h" +#include "sasal.h" +#include "virtual_modifiers.h" + +SASAL_START // HTTP Manager - Transaction data + +using namespace std; +using namespace boost::uuids; + +USE_DEBUG_FLAG(D_HTTP_MANAGER); + +NginxAttachmentOpaque::NginxAttachmentOpaque(HttpTransactionData _transaction_data) + : + TableOpaqueSerialize(this), + transaction_data(move(_transaction_data)), + ctx(), + session_tenant(), + uuid() +{ + try { + uuid = to_string(boost::uuids::random_generator()()); + } catch (const boost::uuids::entropy_error &e) { + dbgWarning(D_HTTP_MANAGER) << "Failed to generate UUID. Error: " << e.what(); + } + + dbgTrace(D_HTTP_MANAGER) << "Creating nginx opaque environment from: " << transaction_data; + + response_compression_stream = initCompressionStream(); + + auto client_ip = transaction_data.getSourceIP(); + std::stringstream client_ip_str; + client_ip_str << client_ip; + setSourceIdentifier("sourceip", client_ip_str.str()); + + ctx.registerValue("eventReferenceId", uuid, EnvKeyAttr::LogSection::DATA); + ctx.registerValue(HttpTransactionData::http_proto_ctx, transaction_data.getHttpProtocol()); + ctx.registerValue(HttpTransactionData::method_ctx, transaction_data.getHttpMethod()); + ctx.registerValue(HttpTransactionData::host_name_ctx, transaction_data.getDestinationHost()); + ctx.registerValue(HttpTransactionData::listening_port_ctx, transaction_data.getListeningPort()); + ctx.registerValue(HttpTransactionData::listening_ip_ctx, transaction_data.getListeningIP()); + ctx.registerValue(HttpTransactionData::client_ip_ctx, transaction_data.getSourceIP()); + ctx.registerValue(HttpTransactionData::client_port_ctx, transaction_data.getSourcePort()); + ctx.registerFunc(HttpTransactionData::source_identifier, [this](){ return source_identifier; }); + + ctx.registerValue(HttpTransactionData::uri_ctx, transaction_data.getURI()); + auto decoder = makeVirtualContainer>(transaction_data.getURI()); + string decoded_url(decoder.begin(), decoder.end()); + auto question_mark_location = decoded_url.find('?'); + if (question_mark_location != string::npos) { + ctx.registerValue(HttpTransactionData::uri_query_decoded, decoded_url.substr(question_mark_location + 1)); + } + ctx.registerValue(HttpTransactionData::uri_path_decoded, decoded_url.substr(0, question_mark_location)); +} + +NginxAttachmentOpaque::~NginxAttachmentOpaque() +{ + finiCompressionStream(response_compression_stream); +} + +// LCOV_EXCL_START - sync functions, can only be tested once the sync module exists +std::unique_ptr +NginxAttachmentOpaque::prototype() +{ + return make_unique(HttpTransactionData()); +} +// LCOV_EXCL_STOP + +void +NginxAttachmentOpaque::setSessionTenant(const string &tenant) +{ + session_tenant = tenant; + Singleton::Consume::by()->setActiveTenant(session_tenant); +} + +void +NginxAttachmentOpaque::setSourceIdentifier(const string &header_key, const string &new_source_identifier) +{ + identifier_type = header_key; + source_identifier = new_source_identifier; +} + +const string & +NginxAttachmentOpaque::getSourceIdentifiersType() const +{ + return identifier_type; +} + +void +NginxAttachmentOpaque::addToSavedData(const string &name, const string &data) +{ + saved_data[name] += data; + ctx.registerValue(name, saved_data[name]); +} + +void +NginxAttachmentOpaque::setSavedData(const string &name, const string &data, EnvKeyAttr::LogSection log_ctx) +{ + saved_data[name] = data; + ctx.registerValue(name, data, log_ctx); +} + +SASAL_END diff --git a/components/attachment-intakers/nginx_attachment/nginx_attachment_opaque.h b/components/attachment-intakers/nginx_attachment/nginx_attachment_opaque.h new file mode 100755 index 0000000..70052d0 --- /dev/null +++ b/components/attachment-intakers/nginx_attachment/nginx_attachment_opaque.h @@ -0,0 +1,94 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __NGINX_ATTACHMENT_OPAQUE_H__ +#define __NGINX_ATTACHMENT_OPAQUE_H__ + +#include +#include +#include + +#include "compression_utils.h" +#include "generic_rulebase/generic_rulebase_context.h" +#include "http_transaction_data.h" +#include "table_opaque.h" +#include "context.h" +#include "i_environment.h" +#include "buffer.h" + +class NginxAttachmentOpaque : public TableOpaqueSerialize, Singleton::Consume +{ +public: + NginxAttachmentOpaque(HttpTransactionData transaction_data); + ~NginxAttachmentOpaque(); + + void + activateContext() + { + ctx.activate(); + gen_ctx.activate(); + if (session_tenant != "") { + Singleton::Consume::by()->setActiveTenant(session_tenant); + } + + } + + void + deactivateContext() + { + if (session_tenant != "") { + Singleton::Consume::by()->unsetActiveTenant(); + } + gen_ctx.deactivate(); + ctx.deactivate(); + } + + CompressionStream * getResponseCompressionStream() { return response_compression_stream; } + HttpTransactionData & getTransactionData() { return transaction_data; } + +// LCOV_EXCL_START - sync functions, can only be tested once the sync module exists + template void serialize(T &, uint) {} + static std::unique_ptr prototype(); +// LCOV_EXCL_STOP + + static const std::string name() { return "NginxAttachmentOpaque"; } + static uint currVer() { return 0; } + static uint minVer() { return 0; } + + const std::string & getSessionTenant() const { return session_tenant; } + void setSessionTenant(const std::string &tenant); + void setSourceIdentifier(const std::string &header_key, const std::string &source_identifier); + const std::string & getSourceIdentifiersType() const; + + const std::string & getSessionUUID() const { return uuid; } + + void addToSavedData(const std::string &name, const std::string &data); + void setSavedData( + const std::string &name, + const std::string &data, + EnvKeyAttr::LogSection log_ctx = EnvKeyAttr::LogSection::NONE + ); + +private: + CompressionStream *response_compression_stream; + HttpTransactionData transaction_data; + GenericRulebaseContext gen_ctx; + Context ctx; + std::string session_tenant; + std::string uuid; + std::string source_identifier; + std::string identifier_type; + std::map saved_data; +}; + +#endif // __NGINX_ATTACHMENT_OPAQUE_H__ diff --git a/components/attachment-intakers/nginx_attachment/nginx_intaker_metric.cc b/components/attachment-intakers/nginx_attachment/nginx_intaker_metric.cc new file mode 100755 index 0000000..18a7cc6 --- /dev/null +++ b/components/attachment-intakers/nginx_attachment/nginx_intaker_metric.cc @@ -0,0 +1,502 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "nginx_intaker_metric.h" + +USE_DEBUG_FLAG(D_METRICS_NGINX_ATTACHMENT); + +void +nginxIntakerEvent::resetAllCounters() +{ + successfull_inspection_counter = 0; + open_failure_inspection_counter = 0; + close_failure_inspection_counter = 0; + transparent_mode_counter = 0; + total_transparent_time = 0; + accept_verdict_counter = 0; + inspect_verdict_counter = 0; + drop_verdict_counter = 0; + inject_verdict_counter = 0; + irrelevant_verdict_counter = 0; + reconf_verdict_counter = 0; + wait_verdict_counter = 0; + req_failed_compression_counter = 0; + res_failed_compression_counter = 0; + req_failed_decompression_counter = 0; + res_failed_decompression_counter = 0; + req_successful_compression_counter = 0; + res_successful_compression_counter = 0; + req_successful_decompression_counter = 0; + res_successful_decompression_counter = 0; + corrupted_zip_skipped_session_counter = 0; + thread_timeout = 0; + reg_thread_timeout = 0; + req_header_thread_timeout = 0; + req_body_thread_timeout = 0; + res_header_thread_timeout = 0; + res_body_thread_timeout = 0; + thread_failure = 0; + req_proccessing_timeout = 0; + res_proccessing_timeout = 0; + req_failed_to_reach_upstream = 0; + cpu_event.setCPU(0); +} + +ngx_http_plugin_metric_type_e +nginxIntakerEvent::EnumOfIndex(int i) +{ + return static_cast(i); +} + +void +nginxIntakerEvent::addPluginMetricCounter(const ngx_http_cp_metric_data_t *recieved_metric_data) +{ + for (int i = 0; i < static_cast(ngx_http_plugin_metric_type_e::METRIC_TYPES_COUNT); i++) { + ngx_http_plugin_metric_type_e metric_type = EnumOfIndex(i); + uint64_t amount = recieved_metric_data->data[i]; + switch (metric_type) { + case ngx_http_plugin_metric_type_e::INSPECTION_SUCCESSES_COUNT: { + successfull_inspection_counter += amount; + break; + } + case ngx_http_plugin_metric_type_e::INSPECTION_OPEN_FAILURES_COUNT: { + open_failure_inspection_counter += amount; + break; + } + case ngx_http_plugin_metric_type_e::INSPECTION_CLOSE_FAILURES_COUNT: { + close_failure_inspection_counter += amount; + break; + } + case ngx_http_plugin_metric_type_e::TRANSPARENTS_COUNT: { + transparent_mode_counter += amount; + break; + } + case ngx_http_plugin_metric_type_e::TOTAL_TRANSPARENTS_TIME: { + total_transparent_time += amount; + break; + } + case ngx_http_plugin_metric_type_e::INSPECT_VERDICTS_COUNT: { + inspect_verdict_counter += amount; + break; + } + case ngx_http_plugin_metric_type_e::ACCEPT_VERDICTS_COUNT: { + accept_verdict_counter += amount; + break; + } + case ngx_http_plugin_metric_type_e::DROP_VERDICTS_COUNT: { + drop_verdict_counter += amount; + break; + } + case ngx_http_plugin_metric_type_e::INJECT_VERDICTS_COUNT: { + inject_verdict_counter += amount; + break; + } + case ngx_http_plugin_metric_type_e::IRRELEVANT_VERDICTS_COUNT: { + irrelevant_verdict_counter += amount; + break; + } + case ngx_http_plugin_metric_type_e::RECONF_VERDICTS_COUNT: { + reconf_verdict_counter += amount; + break; + } + case ngx_http_plugin_metric_type_e::AVERAGE_OVERALL_PPROCESSING_TIME_UNTIL_VERDICT: { + if (amount > 0) average_overall_processing_time_until_verdict = amount; + break; + } + case ngx_http_plugin_metric_type_e::MAX_OVERALL_PPROCESSING_TIME_UNTIL_VERDICT: { + if (amount > 0) max_overall_processing_time_until_verdict = amount; + break; + } + case ngx_http_plugin_metric_type_e::MIN_OVERALL_PPROCESSING_TIME_UNTIL_VERDICT: { + if (amount > 0) min_overall_processing_time_until_verdict = amount; + break; + } + case ngx_http_plugin_metric_type_e::AVERAGE_REQ_PPROCESSING_TIME_UNTIL_VERDICT: { + if (amount > 0) average_req_processing_time_until_verdict = amount; + break; + } + case ngx_http_plugin_metric_type_e::MAX_REQ_PPROCESSING_TIME_UNTIL_VERDICT: { + if (amount > 0) max_req_processing_time_until_verdict = amount; + break; + } + case ngx_http_plugin_metric_type_e::MIN_REQ_PPROCESSING_TIME_UNTIL_VERDICT: { + if (amount > 0) min_req_processing_time_until_verdict = amount; + break; + } + case ngx_http_plugin_metric_type_e::AVERAGE_RES_PPROCESSING_TIME_UNTIL_VERDICT: { + if (amount > 0) average_res_processing_time_until_verdict = amount; + break; + } + case ngx_http_plugin_metric_type_e::MAX_RES_PPROCESSING_TIME_UNTIL_VERDICT: { + if (amount > 0) max_res_processing_time_until_verdict = amount; + break; + } + case ngx_http_plugin_metric_type_e::MIN_RES_PPROCESSING_TIME_UNTIL_VERDICT: { + if (amount > 0) min_res_processing_time_until_verdict = amount; + break; + } + case ngx_http_plugin_metric_type_e::REQ_FAILED_COMPRESSION_COUNT: { + req_failed_compression_counter += amount; + break; + } + case ngx_http_plugin_metric_type_e::RES_FAILED_COMPRESSION_COUNT: { + res_failed_compression_counter += amount; + break; + } + case ngx_http_plugin_metric_type_e::REQ_FAILED_DECOMPRESSION_COUNT: { + req_failed_decompression_counter += amount; + break; + } + case ngx_http_plugin_metric_type_e::RES_FAILED_DECOMPRESSION_COUNT: { + res_failed_decompression_counter += amount; + break; + } + case ngx_http_plugin_metric_type_e::REQ_SUCCESSFUL_COMPRESSION_COUNT: { + req_successful_compression_counter += amount; + break; + } + case ngx_http_plugin_metric_type_e::RES_SUCCESSFUL_COMPRESSION_COUNT: { + res_successful_compression_counter += amount; + break; + } + case ngx_http_plugin_metric_type_e::REQ_SUCCESSFUL_DECOMPRESSION_COUNT: { + req_successful_decompression_counter += amount; + break; + } + case ngx_http_plugin_metric_type_e::RES_SUCCESSFUL_DECOMPRESSION_COUNT: { + res_successful_decompression_counter += amount; + break; + } + case ngx_http_plugin_metric_type_e::CORRUPTED_ZIP_SKIPPED_SESSION_COUNT: { + corrupted_zip_skipped_session_counter += amount; + break; + } + case ngx_http_plugin_metric_type_e::THREAD_TIMEOUT: { + thread_timeout += amount; + break; + } + case ngx_http_plugin_metric_type_e::REG_THREAD_TIMEOUT: { + reg_thread_timeout += amount; + break; + } + case ngx_http_plugin_metric_type_e::REQ_HEADER_THREAD_TIMEOUT: { + req_header_thread_timeout += amount; + break; + } + case ngx_http_plugin_metric_type_e::REQ_BODY_THREAD_TIMEOUT: { + req_body_thread_timeout += amount; + break; + } + case ngx_http_plugin_metric_type_e::AVERAGE_REQ_BODY_SIZE_UPON_TIMEOUT: { + if (amount > 0) average_req_body_size_upon_timeout = amount; + break; + } + case ngx_http_plugin_metric_type_e::MAX_REQ_BODY_SIZE_UPON_TIMEOUT: { + if (amount > 0) max_req_body_size_upon_timeout = amount; + break; + } + case ngx_http_plugin_metric_type_e::MIN_REQ_BODY_SIZE_UPON_TIMEOUT: { + if (amount > 0) min_req_body_size_upon_timeout = amount; + break; + } + case ngx_http_plugin_metric_type_e::RES_HEADER_THREAD_TIMEOUT: { + res_header_thread_timeout += amount; + break; + } + case ngx_http_plugin_metric_type_e::RES_BODY_THREAD_TIMEOUT: { + res_body_thread_timeout += amount; + break; + } + case ngx_http_plugin_metric_type_e::AVERAGE_RES_BODY_SIZE_UPON_TIMEOUT: { + if (amount > 0) average_res_body_size_upon_timeout = amount; + break; + } + case ngx_http_plugin_metric_type_e::MAX_RES_BODY_SIZE_UPON_TIMEOUT: { + if (amount > 0) max_res_body_size_upon_timeout = amount; + break; + } + case ngx_http_plugin_metric_type_e::MIN_RES_BODY_SIZE_UPON_TIMEOUT: { + if (amount > 0) min_res_body_size_upon_timeout = amount; + break; + } + case ngx_http_plugin_metric_type_e::THREAD_FAILURE: { + thread_failure += amount; + break; + } + case ngx_http_plugin_metric_type_e::REQ_PROCCESSING_TIMEOUT: { + req_proccessing_timeout += amount; + break; + } + case ngx_http_plugin_metric_type_e::RES_PROCCESSING_TIMEOUT: { + res_proccessing_timeout += amount; + break; + } + case ngx_http_plugin_metric_type_e::REQ_FAILED_TO_REACH_UPSTREAM: { + req_failed_to_reach_upstream += amount; + break; + } + case ngx_http_plugin_metric_type_e::CPU_USAGE: { + cpu_event.setCPU(amount); + break; + } + default: + dbgWarning(D_METRICS_NGINX_ATTACHMENT) + << "Unsupported metric type. Type: " << static_cast(metric_type); + return; + } + } +} + +uint64_t +nginxIntakerEvent::getPluginMetricCounter(ngx_http_plugin_metric_type_e metric_type) const +{ + switch (metric_type) { + case ngx_http_plugin_metric_type_e::INSPECTION_SUCCESSES_COUNT: + return successfull_inspection_counter; + case ngx_http_plugin_metric_type_e::INSPECTION_OPEN_FAILURES_COUNT: + return open_failure_inspection_counter; + case ngx_http_plugin_metric_type_e::INSPECTION_CLOSE_FAILURES_COUNT: + return close_failure_inspection_counter; + case ngx_http_plugin_metric_type_e::TRANSPARENTS_COUNT: + return transparent_mode_counter; + case ngx_http_plugin_metric_type_e::TOTAL_TRANSPARENTS_TIME: + return total_transparent_time; + case ngx_http_plugin_metric_type_e::INSPECT_VERDICTS_COUNT: + return inspect_verdict_counter; + case ngx_http_plugin_metric_type_e::ACCEPT_VERDICTS_COUNT: + return accept_verdict_counter; + case ngx_http_plugin_metric_type_e::DROP_VERDICTS_COUNT: + return drop_verdict_counter; + case ngx_http_plugin_metric_type_e::INJECT_VERDICTS_COUNT: + return inject_verdict_counter; + case ngx_http_plugin_metric_type_e::IRRELEVANT_VERDICTS_COUNT: + return irrelevant_verdict_counter; + case ngx_http_plugin_metric_type_e::RECONF_VERDICTS_COUNT: + return reconf_verdict_counter; + case ngx_http_plugin_metric_type_e::AVERAGE_OVERALL_PPROCESSING_TIME_UNTIL_VERDICT: + return average_overall_processing_time_until_verdict; + case ngx_http_plugin_metric_type_e::MAX_OVERALL_PPROCESSING_TIME_UNTIL_VERDICT: + return max_overall_processing_time_until_verdict; + case ngx_http_plugin_metric_type_e::MIN_OVERALL_PPROCESSING_TIME_UNTIL_VERDICT: + return min_overall_processing_time_until_verdict; + case ngx_http_plugin_metric_type_e::AVERAGE_REQ_PPROCESSING_TIME_UNTIL_VERDICT: + return average_req_processing_time_until_verdict; + case ngx_http_plugin_metric_type_e::MAX_REQ_PPROCESSING_TIME_UNTIL_VERDICT: + return max_req_processing_time_until_verdict; + case ngx_http_plugin_metric_type_e::MIN_REQ_PPROCESSING_TIME_UNTIL_VERDICT: + return min_req_processing_time_until_verdict; + case ngx_http_plugin_metric_type_e::AVERAGE_RES_PPROCESSING_TIME_UNTIL_VERDICT: + return average_res_processing_time_until_verdict; + case ngx_http_plugin_metric_type_e::MAX_RES_PPROCESSING_TIME_UNTIL_VERDICT: + return max_res_processing_time_until_verdict; + case ngx_http_plugin_metric_type_e::MIN_RES_PPROCESSING_TIME_UNTIL_VERDICT: + return min_res_processing_time_until_verdict; + case ngx_http_plugin_metric_type_e::REQ_FAILED_COMPRESSION_COUNT: + return req_failed_compression_counter; + case ngx_http_plugin_metric_type_e::RES_FAILED_COMPRESSION_COUNT: + return res_failed_compression_counter; + case ngx_http_plugin_metric_type_e::REQ_FAILED_DECOMPRESSION_COUNT: + return req_failed_decompression_counter; + case ngx_http_plugin_metric_type_e::RES_FAILED_DECOMPRESSION_COUNT: + return res_failed_decompression_counter; + case ngx_http_plugin_metric_type_e::REQ_SUCCESSFUL_COMPRESSION_COUNT: + return req_successful_compression_counter; + case ngx_http_plugin_metric_type_e::RES_SUCCESSFUL_COMPRESSION_COUNT: + return res_successful_compression_counter; + case ngx_http_plugin_metric_type_e::REQ_SUCCESSFUL_DECOMPRESSION_COUNT: + return req_successful_decompression_counter; + case ngx_http_plugin_metric_type_e::RES_SUCCESSFUL_DECOMPRESSION_COUNT: + return res_successful_decompression_counter; + case ngx_http_plugin_metric_type_e::CORRUPTED_ZIP_SKIPPED_SESSION_COUNT: + return corrupted_zip_skipped_session_counter; + case ngx_http_plugin_metric_type_e::THREAD_TIMEOUT: + return thread_timeout; + case ngx_http_plugin_metric_type_e::REG_THREAD_TIMEOUT: + return reg_thread_timeout; + case ngx_http_plugin_metric_type_e::REQ_HEADER_THREAD_TIMEOUT: + return req_header_thread_timeout; + case ngx_http_plugin_metric_type_e::REQ_BODY_THREAD_TIMEOUT: + return req_body_thread_timeout; + case ngx_http_plugin_metric_type_e::AVERAGE_REQ_BODY_SIZE_UPON_TIMEOUT: + return average_req_body_size_upon_timeout; + case ngx_http_plugin_metric_type_e::MAX_REQ_BODY_SIZE_UPON_TIMEOUT: + return max_req_body_size_upon_timeout; + case ngx_http_plugin_metric_type_e::MIN_REQ_BODY_SIZE_UPON_TIMEOUT: + return min_req_body_size_upon_timeout; + case ngx_http_plugin_metric_type_e::RES_HEADER_THREAD_TIMEOUT: + return res_header_thread_timeout; + case ngx_http_plugin_metric_type_e::RES_BODY_THREAD_TIMEOUT: + return res_body_thread_timeout; + case ngx_http_plugin_metric_type_e::AVERAGE_RES_BODY_SIZE_UPON_TIMEOUT: + return average_res_body_size_upon_timeout; + case ngx_http_plugin_metric_type_e::MAX_RES_BODY_SIZE_UPON_TIMEOUT: + return max_res_body_size_upon_timeout; + case ngx_http_plugin_metric_type_e::MIN_RES_BODY_SIZE_UPON_TIMEOUT: + return min_res_body_size_upon_timeout; + case ngx_http_plugin_metric_type_e::THREAD_FAILURE: + return thread_failure; + case ngx_http_plugin_metric_type_e::REQ_PROCCESSING_TIMEOUT: + return req_proccessing_timeout; + case ngx_http_plugin_metric_type_e::RES_PROCCESSING_TIMEOUT: + return res_proccessing_timeout; + case ngx_http_plugin_metric_type_e::REQ_FAILED_TO_REACH_UPSTREAM: + return req_failed_to_reach_upstream; + case ngx_http_plugin_metric_type_e::CPU_USAGE: + return static_cast(cpu_event.getCPU()); + default: + dbgWarning(D_METRICS_NGINX_ATTACHMENT) + << "Unsupported metric type. Type: " << static_cast(metric_type); + return 0; + } +} + +void +nginxIntakerMetric::upon(const nginxIntakerEvent &event) +{ + successfull_inspection_counter.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::INSPECTION_SUCCESSES_COUNT) + ); + transparent_mode_counter.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::TRANSPARENTS_COUNT) + ); + total_transparent_time.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::TOTAL_TRANSPARENTS_TIME) + ); + open_failure_inspection_counter.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::INSPECTION_OPEN_FAILURES_COUNT) + ); + close_failure_inspection_counter.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::INSPECTION_CLOSE_FAILURES_COUNT) + ); + inject_verdict_counter.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::INJECT_VERDICTS_COUNT) + ); + inspect_verdict_counter.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::INSPECT_VERDICTS_COUNT) + ); + accept_verdict_counter.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::ACCEPT_VERDICTS_COUNT) + ); + drop_verdict_counter.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::DROP_VERDICTS_COUNT) + ); + irrelevant_verdict_counter.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::IRRELEVANT_VERDICTS_COUNT) + ); + reconf_verdict_counter.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::RECONF_VERDICTS_COUNT) + ); + average_overall_processing_time_until_verdict.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::AVERAGE_OVERALL_PPROCESSING_TIME_UNTIL_VERDICT) + ); + max_overall_processing_time_until_verdict.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::MAX_OVERALL_PPROCESSING_TIME_UNTIL_VERDICT) + ); + min_overall_processing_time_until_verdict.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::MIN_OVERALL_PPROCESSING_TIME_UNTIL_VERDICT) + ); + average_req_processing_time_until_verdict.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::AVERAGE_REQ_PPROCESSING_TIME_UNTIL_VERDICT) + ); + max_req_processing_time_until_verdict.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::MAX_REQ_PPROCESSING_TIME_UNTIL_VERDICT) + ); + min_req_processing_time_until_verdict.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::MIN_REQ_PPROCESSING_TIME_UNTIL_VERDICT) + ); + average_res_processing_time_until_verdict.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::AVERAGE_RES_PPROCESSING_TIME_UNTIL_VERDICT) + ); + max_res_processing_time_until_verdict.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::MAX_RES_PPROCESSING_TIME_UNTIL_VERDICT) + ); + min_res_processing_time_until_verdict.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::MIN_RES_PPROCESSING_TIME_UNTIL_VERDICT) + ); + req_failed_compression_counter.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::REQ_FAILED_COMPRESSION_COUNT) + ); + res_failed_compression_counter.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::RES_FAILED_COMPRESSION_COUNT) + ); + req_failed_decompression_counter.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::REQ_FAILED_DECOMPRESSION_COUNT) + ); + res_failed_decompression_counter.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::RES_FAILED_DECOMPRESSION_COUNT) + ); + req_successful_compression_counter.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::REQ_SUCCESSFUL_COMPRESSION_COUNT) + ); + res_successful_compression_counter.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::RES_SUCCESSFUL_COMPRESSION_COUNT) + ); + req_successful_decompression_counter.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::REQ_SUCCESSFUL_DECOMPRESSION_COUNT) + ); + res_successful_decompression_counter.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::RES_SUCCESSFUL_DECOMPRESSION_COUNT) + ); + corrupted_zip_skipped_session_counter.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::CORRUPTED_ZIP_SKIPPED_SESSION_COUNT) + ); + thread_timeout.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::THREAD_TIMEOUT) + ); + reg_thread_timeout.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::REG_THREAD_TIMEOUT) + ); + req_header_thread_timeout.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::REQ_HEADER_THREAD_TIMEOUT) + ); + req_body_thread_timeout.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::REQ_BODY_THREAD_TIMEOUT) + ); + average_req_body_size_upon_timeout.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::AVERAGE_REQ_BODY_SIZE_UPON_TIMEOUT) + ); + max_req_body_size_upon_timeout.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::MAX_REQ_BODY_SIZE_UPON_TIMEOUT) + ); + min_req_body_size_upon_timeout.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::MIN_REQ_BODY_SIZE_UPON_TIMEOUT) + ); + res_header_thread_timeout.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::RES_HEADER_THREAD_TIMEOUT) + ); + res_body_thread_timeout.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::RES_BODY_THREAD_TIMEOUT) + ); + average_res_body_size_upon_timeout.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::AVERAGE_RES_BODY_SIZE_UPON_TIMEOUT) + ); + max_res_body_size_upon_timeout.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::MAX_RES_BODY_SIZE_UPON_TIMEOUT) + ); + min_res_body_size_upon_timeout.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::MIN_RES_BODY_SIZE_UPON_TIMEOUT) + ); + thread_failure.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::THREAD_FAILURE) + ); + req_proccessing_timeout.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::REQ_PROCCESSING_TIMEOUT) + ); + res_proccessing_timeout.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::RES_PROCCESSING_TIMEOUT) + ); + req_failed_to_reach_upstream.report( + event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::REQ_FAILED_TO_REACH_UPSTREAM) + ); + event.notifyCPU(); +} diff --git a/components/attachment-intakers/nginx_attachment/nginx_parser.cc b/components/attachment-intakers/nginx_attachment/nginx_parser.cc new file mode 100755 index 0000000..0e29449 --- /dev/null +++ b/components/attachment-intakers/nginx_attachment/nginx_parser.cc @@ -0,0 +1,369 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "nginx_parser.h" + +#include "config.h" +#include +#include "connkey.h" +#include "compression_utils.h" +#include "nginx_attachment.h" +#include "nginx_attachment_opaque.h" +#include "user_identifiers_config.h" +#include "debug.h" + +using namespace std; + +USE_DEBUG_FLAG(D_NGINX_ATTACHMENT_PARSER); + +Buffer NginxParser::tenant_header_key = Buffer(); +static const Buffer proxy_ip_header_key("X-Forwarded-For", 15, Buffer::MemoryType::STATIC); +static const Buffer source_ip("sourceip", 8, Buffer::MemoryType::STATIC); + +map NginxParser::content_encodings = { + {Buffer("identity"), CompressionType::NO_COMPRESSION}, + {Buffer("gzip"), CompressionType::GZIP}, + {Buffer("deflate"), CompressionType::ZLIB} +}; + +Maybe +NginxParser::parseStartTrasaction(const Buffer &data) +{ + return HttpTransactionData::createTransactionData(data); +} + +Maybe +NginxParser::parseResponseCode(const Buffer &data) +{ + if (data.size() < sizeof(uint16_t)) { + dbgWarning(D_NGINX_ATTACHMENT_PARSER) << "Failed to get response code"; + return genError("Response code size is lower than uint16_t"); + } + + return reinterpret_cast(data.data())[0]; +} + +Maybe +NginxParser::parseContentLength(const Buffer &data) +{ + if (data.size() < sizeof(uint64_t)) { + dbgWarning(D_NGINX_ATTACHMENT_PARSER) + << "Failed to get content length"; + return genError("Content length size is lower than uint64"); + } + return **data.getTypePtr(0); +} + +Maybe +genHeaderPart(const Buffer &raw_data, uint16_t &cur_pos) +{ + if (cur_pos >= raw_data.size()) return genError("Current header data possession is after header part end"); + + auto value = raw_data.getTypePtr(cur_pos); + + if (!value.ok()) { + return genError("Failed to get header part size: " + value.getErr()); + } + + uint16_t part_len = *(value.unpack()); + cur_pos += sizeof(uint16_t); + if (cur_pos + part_len > raw_data.size()) return genError("Header data extends beyond current buffer"); + + const u_char *part_data = raw_data.data(); + Buffer header_part(part_data + cur_pos, part_len, Buffer::MemoryType::VOLATILE); + + cur_pos += part_len; + + return header_part; +} + +Maybe> +genHeaders(const Buffer &raw_data) +{ + dbgFlow(D_NGINX_ATTACHMENT_PARSER) << "Generating headers"; + + uint16_t cur_pos = 0; + auto is_last_header_data = raw_data.getTypePtr(cur_pos); + if (!is_last_header_data.ok()) { + return genError("Failed to get 'is last header' value: " + is_last_header_data.getErr()); + } + + bool is_last_header = *(is_last_header_data.unpack()) == 1; + dbgTrace(D_NGINX_ATTACHMENT_PARSER) + << "Current header bulk " + << (is_last_header ? "contains " : "does not contain ") + << "last header"; + + cur_pos += sizeof(uint8_t); + auto part_count = raw_data.getTypePtr(cur_pos); + if (!part_count.ok()) { + return genError("Failed to get part count value: " + part_count.getErr()); + } + dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "Current header bulk index: " << to_string(*(part_count.unpack())); + + static const string key_val_desc[] = {"key", "value"}; + Maybe header[2] = {Buffer(), Buffer()}; + vector headers; + + cur_pos += sizeof(uint8_t); + uint8_t cur_part = *(part_count.unpack()); + while (cur_pos < raw_data.size()) { + for (int i = 0 ; i < 2 ; i ++) { + dbgTrace(D_NGINX_ATTACHMENT_PARSER) + << "Generating" + << (is_last_header ? " last " : " ") + << "header's " + << key_val_desc[i]; + + header[i] = genHeaderPart(raw_data, cur_pos); + if (!header[i].ok()) { + return genError("Failed to generate header's " + key_val_desc[i] + ":" + header[i].getErr()); + } + + dbgTrace(D_NGINX_ATTACHMENT_PARSER) + << "Successfully generated header part. Header part type:" + << key_val_desc[i] + << ", data: '" + << dumpHex(header[i].unpack()) + << "', size: " + << header[i].unpack().size(); + } + + // is_last_header in bulk relates only to the last header in the bulk. + headers.emplace_back( + header[0].unpack(), + header[1].unpack(), + cur_part, + cur_pos >= raw_data.size() && is_last_header + ); + + dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "end pos: " << cur_pos; + cur_part++; + } + return headers; +} + +Maybe> +NginxParser::parseRequestHeaders(const Buffer &data) +{ + auto parsed_headers = genHeaders(data); + if (!parsed_headers.ok()) return parsed_headers.passErr(); + + auto i_transaction_table = Singleton::Consume>::by(); + + for (const HttpHeader &header : *parsed_headers) { + auto source_identifiers = getConfigurationWithDefault( + UsersAllIdentifiersConfig(), + "rulebase", + "usersIdentifiers" + ); + source_identifiers.parseRequestHeaders(header); + + NginxAttachmentOpaque &opaque = i_transaction_table->getState(); + opaque.addToSavedData( + HttpTransactionData::req_headers, + static_cast(header.getKey()) + ": " + static_cast(header.getValue()) + "\r\n" + ); + + if (NginxParser::tenant_header_key == header.getKey()) { + dbgDebug(D_NGINX_ATTACHMENT_PARSER) + << "Identified active tenant header. Key: " + << dumpHex(header.getKey()) + << ", Value: " + << dumpHex(header.getValue()); + + string active_tenant(static_cast(header.getValue())); + opaque.setSessionTenant(active_tenant); + } else if (proxy_ip_header_key == header.getKey()) { + source_identifiers.setXFFValuesToOpaqueCtx(header, UsersAllIdentifiersConfig::ExtractType::PROXYIP); + } + } + + return parsed_headers; +} + +Maybe> +NginxParser::parseResponseHeaders(const Buffer &data) +{ + return genHeaders(data); +} + +Maybe +decompressBuffer(CompressionStream *compression_stream, const Buffer &compressed_buffer) +{ + if (compressed_buffer.size() == 0) return Buffer(); + + auto compression_result = decompressData(compression_stream, compressed_buffer.size(), compressed_buffer.data()); + if (!compression_result.ok) return genError("Failed to decompress data"); + + if (compression_result.output == nullptr) return Buffer();; + + Buffer decompressed_buffer( + compression_result.output, + compression_result.num_output_bytes, + Buffer::MemoryType::OWNED + ); + free(compression_result.output); + + return decompressed_buffer; +} + +Maybe +parseCompressedHttpBodyData(CompressionStream *compression_stream, const Buffer &body_raw_data) +{ + if (compression_stream == nullptr) return genError("Cannot decompress body without compression stream"); + + Maybe decompressed_buffer_maybe = decompressBuffer(compression_stream, body_raw_data); + if (!decompressed_buffer_maybe.ok()) { + return genError("Failed to decompress buffer. Error: " + decompressed_buffer_maybe.getErr()); + } + + return decompressed_buffer_maybe.unpack(); +} + +Maybe +genBody(const Buffer &raw_response_body, CompressionStream *compression_stream = nullptr) +{ + uint offset = 0; + auto is_last_part_maybe = raw_response_body.getTypePtr(offset); + if (!is_last_part_maybe.ok()) { + return genError("Failed to get 'is last part' value: " + is_last_part_maybe.getErr()); + } + bool is_last_part = *is_last_part_maybe.unpack(); + + offset += sizeof(uint8_t); + auto part_count_maybe = raw_response_body.getTypePtr(offset); + if (!part_count_maybe.ok()) { + return genError("Failed to get part count value: " + part_count_maybe.getErr()); + } + uint8_t body_chunk_index = *part_count_maybe.unpack(); + + offset += sizeof(uint8_t); + Buffer body_raw_data( + raw_response_body.data() + offset, + raw_response_body.size() - offset, + Buffer::MemoryType::VOLATILE + ); + + if (compression_stream == nullptr) { + dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "Successfully generated body chunk from non compressed buffer"; + return HttpBody(body_raw_data, is_last_part, body_chunk_index); + } + + Maybe body_data_maybe = parseCompressedHttpBodyData(compression_stream, body_raw_data); + if (!body_data_maybe.ok()) { + dbgWarning(D_NGINX_ATTACHMENT_PARSER) + << "Failed to decompress body chunk. Chunk index: " + << to_string(body_chunk_index) + << ", raw input size: " + << body_raw_data.size(); + return genError("Failed to parse HTTP body data: " + body_data_maybe.getErr()); + } + + dbgTrace(D_NGINX_ATTACHMENT_PARSER) + << "Successfully generated decompressed body chunk. Compressed original size: " + << body_raw_data.size(); + + return HttpBody(body_data_maybe.unpack(), is_last_part, body_chunk_index); +} + +Maybe +NginxParser::parseRequestBody(const Buffer &data) +{ + Maybe body = genBody(data); + if (!body.ok()) return genError("Failed to generate body from buffer: " + body.getErr()); + + dbgTrace(D_NGINX_ATTACHMENT_PARSER) + << "Successfully generated request body chunk. Chunk index: " + << to_string(body.unpack().getBodyChunkIndex()) + << ", is last chunk: " + << (body.unpack().isLastChunk() ? "true" : "false") + << ", size: " + << body.unpack().getData().size() + << ", value: " + << dumpHex(body.unpack().getData()); + + auto i_transaction_table = Singleton::Consume>::by(); + auto &state = i_transaction_table->getState(); + state.setSavedData(HttpTransactionData::req_body, (*body).getData()); + + return body; +} + +Maybe +NginxParser::parseResponseBody(const Buffer &raw_response_body, CompressionStream *compression_stream) +{ + Maybe body = genBody(raw_response_body, compression_stream); + if (!body.ok()) return genError("Failed to generate body from buffer: " + body.getErr()); + + dbgTrace(D_NGINX_ATTACHMENT_PARSER) + << "Successfully generated response body chunk. Chunk index: " + << to_string(body.unpack().getBodyChunkIndex()) + << ", is last chunk: " + << (body.unpack().isLastChunk() ? "true" : "false") + << ", size: " + << body.unpack().getData().size() + << ", value: " + << dumpHex(body.unpack().getData());; + + return body; +} + +Maybe +NginxParser::parseContentEncoding(const vector &headers) +{ + static const Buffer content_encoding_header_key("Content-Encoding"); + + auto it = find_if( + headers.begin(), + headers.end(), + [&] (const HttpHeader &http_header) { return http_header.getKey() == content_encoding_header_key; } + ); + if (it == headers.end()) { + dbgTrace(D_NGINX_ATTACHMENT_PARSER) + << "Headers do not contain \"Content-Encoding\" header: " + << "body is expected to be plain-text"; + + return CompressionType::NO_COMPRESSION; + } + + dbgTrace(D_NGINX_ATTACHMENT_PARSER) + << "Found header with key \"Content-Encoding\". Value: " + << dumpHex((*it).getValue()); + auto content_encoding_maybe = convertToContentEncoding((*it).getValue()); + if (!content_encoding_maybe.ok()) { + return genError( + "Failed to parse value of \"Content-Encoding\" header: " + + content_encoding_maybe.getErr() + ); + } + dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "Successfully parsed value of \"Content-Encoding\" header"; + + return content_encoding_maybe.unpack(); +} + +Maybe +NginxParser::convertToContentEncoding(const Buffer &content_encoding_header_value) +{ + if (content_encoding_header_value.contains(',')) { + return genError("Multiple content encodings for a specific HTTP request/response body are not supported"); + } + + if (content_encodings.find(content_encoding_header_value) == content_encodings.end()) { + return genError( + "Unsupported or undefined \"Content-Encoding\" value: " + + static_cast(content_encoding_header_value) + ); + } + return content_encodings[content_encoding_header_value]; +} diff --git a/components/attachment-intakers/nginx_attachment/nginx_parser.h b/components/attachment-intakers/nginx_attachment/nginx_parser.h new file mode 100755 index 0000000..37c96f0 --- /dev/null +++ b/components/attachment-intakers/nginx_attachment/nginx_parser.h @@ -0,0 +1,45 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __NGINX_PARSER_H__ +#define __NGINX_PARSER_H__ + +#include + +#include "compression_utils.h" +#include "nginx_attachment_common.h" +#include "http_transaction_common.h" +#include "http_inspection_events.h" +#include "i_encryptor.h" + +class NginxParser : Singleton::Consume +{ +public: + static Maybe parseStartTrasaction(const Buffer &data); + static Maybe parseResponseCode(const Buffer &data); + static Maybe parseContentLength(const Buffer &data); + static Maybe> parseRequestHeaders(const Buffer &data); + static Maybe> parseResponseHeaders(const Buffer &data); + static Maybe parseRequestBody(const Buffer &data); + static Maybe parseResponseBody(const Buffer &raw_response_body, CompressionStream *compression_stream); + static Maybe parseContentEncoding(const std::vector &headers); + + static Buffer tenant_header_key; + +private: + static Maybe convertToContentEncoding(const Buffer &content_encoding_header_value); + + static std::map content_encodings; +}; + +#endif // __NGINX_PARSER_H__ diff --git a/components/attachment-intakers/nginx_attachment/user_identifiers_config.cc b/components/attachment-intakers/nginx_attachment/user_identifiers_config.cc new file mode 100755 index 0000000..267184e --- /dev/null +++ b/components/attachment-intakers/nginx_attachment/user_identifiers_config.cc @@ -0,0 +1,447 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "user_identifiers_config.h" + +#include "buffer.h" +#include "nginx_attachment.h" +#include "nginx_attachment_opaque.h" +#include "nginx_parser.h" +#include "cidrs_data.h" + +using namespace std; + +USE_DEBUG_FLAG(D_NGINX_ATTACHMENT_PARSER); + +static const Buffer header_key("headerkey", 9, Buffer::MemoryType::STATIC); +static const Buffer jwt("authorization", 13, Buffer::MemoryType::STATIC); +static const Buffer xff("x-forwarded-for", 15, Buffer::MemoryType::STATIC); +static const Buffer cookie("cookie", 6, Buffer::MemoryType::STATIC); +static const Buffer source_ip("sourceip", 8, Buffer::MemoryType::STATIC); +static const Buffer oauth("_oauth2_proxy", 13, Buffer::MemoryType::STATIC); +static const Buffer empty_buffer("", 0, Buffer::MemoryType::STATIC); + +const static string jwt_prefix = "Bearer "; + +UsersAllIdentifiersConfig::UsersIdentifiersConfig::UsersIdentifiersConfig() : source_identifier(source_ip){}; + +UsersAllIdentifiersConfig::UsersIdentifiersConfig::UsersIdentifiersConfig(const std::string &identifier) + : + source_identifier(identifier) +{} + +bool +UsersAllIdentifiersConfig::UsersIdentifiersConfig::operator==(const UsersIdentifiersConfig &other) const +{ + return source_identifier == other.source_identifier; +} + +void +UsersAllIdentifiersConfig::UsersIdentifiersConfig::load(cereal::JSONInputArchive &ar) +{ + parseJSONKey("sourceIdentifier", source_identifier, ar); + parseJSONKey>("identifierValues", identifier_values, ar); +} + +bool +UsersAllIdentifiersConfig::UsersIdentifiersConfig::isEqualSourceIdentifier(const string &other) const +{ + if (source_identifier.size() != other.size()) return false; + return equal( + source_identifier.begin(), + source_identifier.end(), + other.begin(), + [] (char c1, char c2) { return tolower(c1) == tolower(c2); } + ); +} + +UsersAllIdentifiersConfig::UsersAllIdentifiersConfig() +{ +} + +vector +UsersAllIdentifiersConfig::getHeaderValuesFromConfig(const string &header_key) const +{ + for (auto user_identifier : user_identifiers) { + if (user_identifier.isEqualSourceIdentifier(header_key)) { + dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "Match source identifier is found"; + return user_identifier.getIdentifierValues(); + } + } + return vector(); +} + +void +UsersAllIdentifiersConfig::load(cereal::JSONInputArchive &ar) +{ + vector tmp_user_identifiers; + parseJSONKey>("sourceIdentifiers", tmp_user_identifiers, ar); + + user_identifiers.clear(); + user_identifiers.reserve(tmp_user_identifiers.size() + 1); + for (auto &identifier : tmp_user_identifiers) { + if (identifier.getSourceIdentifier() == header_key) { + for (const auto &header : identifier.getIdentifierValues()) { + user_identifiers.emplace_back(header); + } + } else { + user_identifiers.push_back(identifier); + } + } + + vector default_order = { + UsersIdentifiersConfig(cookie), + UsersIdentifiersConfig(jwt), + UsersIdentifiersConfig(xff) + }; + + auto last_user_defined_header = find_first_of( + default_order.rbegin(), + default_order.rend(), + user_identifiers.begin(), + user_identifiers.end() + ); + if (last_user_defined_header == default_order.rend()) { + user_identifiers.insert(user_identifiers.end(), default_order.begin(), default_order.end()); + } else { + auto last_defined_forwards = find(default_order.begin(), default_order.end(), *last_user_defined_header); + user_identifiers.insert(user_identifiers.end(), last_defined_forwards + 1, default_order.end()); + } +} + +static bool +compareBufferWithoutCase(const Buffer &b1, const Buffer &b2) +{ + if (b1.size() != b2.size()) return false; + return equal(b1.begin(), b1.end(), b2.begin(), [] (u_char c1, u_char c2) { return tolower(c1) == tolower(c2); }); +} + +void +UsersAllIdentifiersConfig::setIdentifierTopaqueCtx(const HttpHeader &header) const +{ + if (compareBufferWithoutCase(jwt, header.getKey())) { + setJWTValuesToOpaqueCtx(header); + } else if (compareBufferWithoutCase(xff, header.getKey())) { + setXFFValuesToOpaqueCtx(header, ExtractType::SOURCEIDENTIFIER); + } else if (compareBufferWithoutCase(cookie, header.getKey())) { + setCookieValuesToOpaqueCtx(header); + } else { + setCustomHeaderToOpaqueCtx(header); + } +} + +bool +UsersAllIdentifiersConfig::isHigherPriority(const string ¤t_identifier, const string &header_key) const +{ + for (auto user_identifier : user_identifiers) { + if (user_identifier.isEqualSourceIdentifier(current_identifier)) return false; + if (user_identifier.isEqualSourceIdentifier(header_key)) return true; + } + return false; +} + +void +UsersAllIdentifiersConfig::setJWTValuesToOpaqueCtx(const HttpHeader &header) const +{ + const vector jwt_values = getHeaderValuesFromConfig(header.getKey()); + if (jwt_values.size() == 0) { + dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "No JWT keys exists in configuration"; + return; + } + if (bcmp(header.getValue().data(), jwt_prefix.c_str(), jwt_prefix.size()) != 0) { + dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "Invalid JWT header, 'Bearer' prefix missing"; + return; + } + int start_dot = -1; + int end_dot = -1; + for (uint i = 0 ; i < header.getValue().size() ; i++) { + if (header.getValue()[i] == '.') { + if (start_dot < 0) { + start_dot = i; + } else if (end_dot < 0) { + end_dot = i; + } + } + } + if (start_dot < 0 || end_dot < 0) { + dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "The header does not contain dots"; + return; + } + + string jwt_str( + reinterpret_cast(header.getValue().data()), + start_dot + 1, + end_dot - start_dot - 1 + ); + I_Encryptor *encryptor = Singleton::Consume::by(); + auto decoded_jwt = encryptor->base64Decode(jwt_str); + dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "Base64 decoded JWT: " << decoded_jwt; + + auto i_transaction_table = Singleton::Consume>::by(); + if (!i_transaction_table || !i_transaction_table->hasState()) { + dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "Can't get the transaction table"; + return; + } + NginxAttachmentOpaque &opaque = i_transaction_table->getState(); + stringstream ss; + ss.str(decoded_jwt); + cereal::JSONInputArchive in_ar(ss); + for (const string &field_name : jwt_values) { + try { + string tmp_val; + in_ar(cereal::make_nvp(field_name, tmp_val)); + opaque.setSourceIdentifier(header.getKey(), tmp_val); + dbgDebug(D_NGINX_ATTACHMENT_PARSER) + << "Added source identifir to context. Key: " + << field_name + << ". Value: " + << tmp_val; + return; + } catch (const cereal::Exception &e) { + dbgTrace(D_NGINX_ATTACHMENT_PARSER) + << "Unable to find value for the key: " + << field_name + << ". Error: " + << e.what(); + } + } +} + +static string +stripOptionalPort(const string::const_iterator &first, const string::const_iterator &last) +{ + // Microsoft XFF+IPv6+Port yikes - see also here https://github.com/eclipse/jetty.project/issues/3630 + if (*first == '[') { + // Possible bracketed IPv6 address such as "[2001:db8::1]" + optional numeric ":" + auto close_bracket = find(first + 1, last, ']'); + if (close_bracket == last) return string(first, last); + return string(first+1, close_bracket); + } + + auto first_colon = find(first, last, ':'); + if (first_colon == last) return string(first, last); + + // If there is more than one colon it means its probably IPv6 address without brackets + auto second_colon = find(first_colon + 1, last, ':'); + if (second_colon != last) return string(first, last); + + // If there's only one colon it can't be IPv6 and can only be IPv4 with port + return string(first, first_colon); +} + +static vector +split(const string &str) +{ + vector elems; + elems.reserve(str.size() / 8 + 1); + auto sub_start = str.cbegin(), sub_end = str.cbegin(); + for (auto iter = str.cbegin(); iter != str.cend(); ++iter) { + if (isspace(*iter)) { + if (sub_start == iter) { + ++sub_start; + ++sub_end; + } + } else if (*iter == ',') { + if (sub_start != sub_end) { + elems.push_back(stripOptionalPort(sub_start, sub_end)); + } + sub_end = iter + 1; + sub_start = iter + 1; + } else { + sub_end = iter + 1; + } + } + + if (sub_start != sub_end) { + elems.push_back(stripOptionalPort(sub_start, sub_end)); + } + + return elems; +} + +static bool +isIpTrusted(const string &value, const vector &cidr_values) +{ + if (cidr_values.empty()) return true; + + for(const auto &cidr_data : cidr_values) { + if (cidr_data.contains(value)) return true; + } + + return false; +} + +Maybe +UsersAllIdentifiersConfig::parseXForwardedFor(const string &str) const +{ + vector header_values = split(str); + + if (header_values.empty()) return genError("No IP found in the xff header list"); + + vector xff_values = getHeaderValuesFromConfig("x-forwarded-for"); + vector cidr_values(xff_values.begin(), xff_values.end()); + + for (const string &value : header_values) { + if (!IPAddr::createIPAddr(value).ok()) { + dbgWarning(D_NGINX_ATTACHMENT_PARSER) << "Invalid IP address found in the xff header IPs list: " << value; + return genError("Invalid IP address"); + } + if (!isIpTrusted(value, cidr_values)) return genError("Untrusted Ip found"); + } + + return header_values[0]; +} + + +void +UsersAllIdentifiersConfig::setXFFValuesToOpaqueCtx(const HttpHeader &header, ExtractType type) const +{ + auto value = parseXForwardedFor(header.getValue()); + if (!value.ok()) { + dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "Could not extract source identifier from X-Forwarded-For header"; + return; + }; + auto i_transaction_table = Singleton::Consume>::by(); + if (!i_transaction_table || !i_transaction_table->hasState()) { + dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "Can't get the transaction table"; + return; + } + NginxAttachmentOpaque &opaque = i_transaction_table->getState(); + if (type == ExtractType::SOURCEIDENTIFIER) { + opaque.setSourceIdentifier(header.getKey(), value.unpack()); + dbgDebug(D_NGINX_ATTACHMENT_PARSER) + << "Added source identifir to XFF " + << value.unpack(); + } else { + opaque.setSavedData(HttpTransactionData::proxy_ip_ctx, value.unpack()); + } +} + +void +UsersAllIdentifiersConfig::setCustomHeaderToOpaqueCtx(const HttpHeader &header) const +{ + auto i_transaction_table = Singleton::Consume>::by(); + if (!i_transaction_table || !i_transaction_table->hasState()) { + dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "Can't get the transaction table"; + return; + } + i_transaction_table->getState().setSourceIdentifier(header.getKey(), header.getValue()); + dbgDebug(D_NGINX_ATTACHMENT_PARSER) + << "Added source identifir to custom header: " + << static_cast(header.getValue()); + return; +} + +Maybe +UsersAllIdentifiersConfig::parseCookieElement( + const string::const_iterator &start, + const string::const_iterator &end, + const string &key) const +{ + auto curr_pos = start; + + // Skip whitespace + for (; curr_pos != end && isspace(*curr_pos); ++curr_pos); + + // Check key + for (auto key_pos = key.begin(); key_pos != key.end(); ++key_pos) { + if (curr_pos == end || tolower(*curr_pos) != tolower(*key_pos)) return genError("Key value not found"); + ++curr_pos; + } + + // Skip whitespace + for (; curr_pos != end && isspace(*curr_pos); ++curr_pos); + + // Check for '=' + if (curr_pos == end || *curr_pos != '=') return genError("Equal sign not found"); + ++curr_pos; + + // Skip whitespace + for (; curr_pos != end && isspace(*curr_pos); ++curr_pos); + + auto value_start = curr_pos; + + // Read value + for (; curr_pos != end && !isspace(*curr_pos); ++curr_pos); + + auto value_end = curr_pos; + + // Verify value read currectly - should be only whitespaces to the end; + for (; curr_pos != end && isspace(*curr_pos); ++curr_pos); + if (curr_pos != end) return genError("Unexpected characters when reading a value"); + + return string(value_start, value_end); +} + +Buffer +UsersAllIdentifiersConfig::extractKeyValueFromCookie(const string &cookie_value, const string &key) const +{ + auto curr_start = cookie_value.begin(); + auto end = cookie_value.end(); + + while (curr_start != end) { + auto curr_end = find(curr_start, end, ';'); + auto res = parseCookieElement(curr_start, curr_end, key); + if (res.ok()) { + if (key != oauth) return *res; + I_Encryptor *encryptor = Singleton::Consume::by(); + auto decoded_value = encryptor->base64Decode(*res); + auto decoded_end = find(decoded_value.begin(), decoded_value.end(), '|'); + return Buffer(string(decoded_value.begin(), decoded_end)); + } + + if (curr_end != end) ++curr_end; + curr_start = curr_end; + } + + return empty_buffer; +} + +void +UsersAllIdentifiersConfig::setCookieValuesToOpaqueCtx(const HttpHeader &header) const +{ + vector cookie_keys = getHeaderValuesFromConfig(header.getKey()); + cookie_keys.push_back(oauth); + cookie_keys.push_back("jsessionid"); + for (const string &key : cookie_keys) { + string value = extractKeyValueFromCookie(header.getValue(), key); + if (!value.empty()) { + dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "Set source identifier from cookie: Oauth 2"; + auto i_transaction_table = Singleton::Consume>::by(); + if (!i_transaction_table || !i_transaction_table->hasState()) { + dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "Can't get the transaction table"; + return; + } + NginxAttachmentOpaque &opaque = i_transaction_table->getState(); + opaque.setSourceIdentifier(header.getKey(), value); + return; + } + } +} + +void +UsersAllIdentifiersConfig::parseRequestHeaders(const HttpHeader &header) const +{ + auto i_transaction_table = Singleton::Consume>::by(); + if (!i_transaction_table || !i_transaction_table->hasState()) { + dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "Can't get the transaction table"; + return; + } + + NginxAttachmentOpaque &opaque = i_transaction_table->getState(); + const string ¤t_identifier = opaque.getSourceIdentifiersType(); + + if (!isHigherPriority(current_identifier, header.getKey())) return; + + setIdentifierTopaqueCtx(header); +} diff --git a/components/generic_rulebase/CMakeLists.txt b/components/generic_rulebase/CMakeLists.txt new file mode 100755 index 0000000..d5a08b6 --- /dev/null +++ b/components/generic_rulebase/CMakeLists.txt @@ -0,0 +1,4 @@ +add_definitions(-DUSERSPACE) + +add_subdirectory(evaluators) +add_library(generic_rulebase generic_rulebase.cc rulebase_config.cc triggers_config.cc parameters_config.cc generic_rulebase_context.cc zones_config.cc zone.cc assets_config.cc match_query.cc) diff --git a/components/generic_rulebase/assets_config.cc b/components/generic_rulebase/assets_config.cc new file mode 100755 index 0000000..64e0984 --- /dev/null +++ b/components/generic_rulebase/assets_config.cc @@ -0,0 +1,137 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "generic_rulebase/assets_config.h" + +#include +#include +#include + +#include "generic_rulebase/generic_rulebase_utils.h" +#include "config.h" +#include "debug.h" +#include "ip_utilities.h" + +USE_DEBUG_FLAG(D_RULEBASE_CONFIG); + +using namespace std; + +void +RuleAsset::load(cereal::JSONInputArchive &archive_in) +{ + archive_in(cereal::make_nvp("assetId", asset_id)); + archive_in(cereal::make_nvp("assetName", asset_name)); + archive_in(cereal::make_nvp("assetUrls", asset_urls)); + + dbgWarning(D_RULEBASE_CONFIG) << "Adding asset with UID: " << asset_id; +} + +void +RuleAsset::AssetUrl::load(cereal::JSONInputArchive &archive_in) +{ + archive_in(cereal::make_nvp("protocol", protocol)); + transform(protocol.begin(), protocol.end(), protocol.begin(), [](unsigned char c) { return tolower(c); }); + + archive_in(cereal::make_nvp("ip", ip)); + archive_in(cereal::make_nvp("port", port)); + + int value; + if (protocol == "*") { + is_any_proto = true; + } else { + is_any_proto = false; + try { + value = 0; + if(protocol == "udp") value = IPPROTO_UDP; + if(protocol == "tcp") value = IPPROTO_TCP; + if(protocol == "dccp") value = IPPROTO_DCCP; + if(protocol == "sctp") value = IPPROTO_SCTP; + if(protocol == "icmp") value = IPPROTO_ICMP; + if(protocol == "icmpv6") value = IPPROTO_ICMP; + + if (value > static_cast(UINT8_MAX) || value < 0) { + dbgWarning(D_RULEBASE_CONFIG) + << "provided value is not a legal IP protocol number. Value: " + << protocol; + } else { + parsed_proto = value; + } + } catch (...) { + dbgWarning(D_RULEBASE_CONFIG) << "provided value is not a legal IP protocol. Value: " << protocol; + } + } + + if (port == "*") { + is_any_port = true; + } else { + is_any_port = false; + try { + value = stoi(port); + if (value > static_cast(UINT16_MAX) || value < 0) { + dbgWarning(D_RULEBASE_CONFIG) << "provided value is not a legal port number. Value: " << port; + } else { + parsed_port = value; + } + } catch (...) { + dbgWarning(D_RULEBASE_CONFIG) << "provided value is not a legal port. Value: " << port; + } + } + + if (ip == "*") { + is_any_ip = true; + } else { + is_any_ip = false; + auto ip_addr = IPAddr::createIPAddr(ip); + if (!ip_addr.ok()) { + dbgWarning(D_RULEBASE_CONFIG) << "Could not create IP address. Error: " << ip_addr.getErr(); + } else { + parsed_ip = ConvertToIpAddress(ip_addr.unpackMove()); + } + } +} + +IpAddress +RuleAsset::AssetUrl::ConvertToIpAddress(const IPAddr &addr) +{ + IpAddress address; + switch (addr.getType()) { + case IPType::UNINITIALIZED: { + address.addr4_t = {0}; + address.ip_type = IP_VERSION_ANY; + break; + } + case IPType::V4: { + address.addr4_t = addr.getIPv4(); + address.ip_type = IP_VERSION_4; + break; + } + case IPType::V6: { + address.addr6_t = addr.getIPv6(); + address.ip_type = IP_VERSION_6; + break; + } + default: + address.addr4_t = {0}; + address.ip_type = IP_VERSION_ANY; + dbgWarning(D_RULEBASE_CONFIG) << "Unsupported IP type: " << static_cast(addr.getType()); + } + return address; +} + +const Assets Assets::empty_assets_config = Assets(); + +void +Assets::preload() +{ + registerExpectedSetting("rulebase", "usedAssets"); +} diff --git a/components/generic_rulebase/evaluators/CMakeLists.txt b/components/generic_rulebase/evaluators/CMakeLists.txt new file mode 100755 index 0000000..1de0cc6 --- /dev/null +++ b/components/generic_rulebase/evaluators/CMakeLists.txt @@ -0,0 +1 @@ +add_library(generic_rulebase_evaluators asset_eval.cc parameter_eval.cc practice_eval.cc query_eval.cc trigger_eval.cc zone_eval.cc connection_eval.cc http_transaction_data_eval.cc) diff --git a/components/generic_rulebase/evaluators/asset_eval.cc b/components/generic_rulebase/evaluators/asset_eval.cc new file mode 100755 index 0000000..3207ed0 --- /dev/null +++ b/components/generic_rulebase/evaluators/asset_eval.cc @@ -0,0 +1,40 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "generic_rulebase/evaluators/asset_eval.h" + +#include +#include + +#include "generic_rulebase/assets_config.h" +#include "config.h" +#include "debug.h" + +using namespace std; + +string AssetMatcher::ctx_key = "asset_id"; + +AssetMatcher::AssetMatcher(const vector ¶ms) +{ + if (params.size() != 1) reportWrongNumberOfParams(AssetMatcher::getName(), params.size(), 1, 1); + asset_id = params[0]; +} + +Maybe +AssetMatcher::evalVariable() const +{ + I_Environment *env = Singleton::Consume::by(); + auto bc_asset_id_ctx = env->get(AssetMatcher::ctx_key); + + return bc_asset_id_ctx.ok() && *bc_asset_id_ctx == asset_id; +} diff --git a/components/generic_rulebase/evaluators/connection_eval.cc b/components/generic_rulebase/evaluators/connection_eval.cc new file mode 100755 index 0000000..589a96d --- /dev/null +++ b/components/generic_rulebase/evaluators/connection_eval.cc @@ -0,0 +1,299 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "generic_rulebase/evaluators/connection_eval.h" + +#include +#include + +#include "generic_rulebase/rulebase_config.h" +#include "config.h" +#include "debug.h" +#include "ip_utilities.h" + +using namespace std; +USE_DEBUG_FLAG(D_RULEBASE_CONFIG); + +string IpAddressMatcher::ctx_key = "ipAddress"; +string SourceIpMatcher::ctx_key = "sourceIP"; +string DestinationIpMatcher::ctx_key = "destinationIP"; +string SourcePortMatcher::ctx_key = "sourcePort"; +string ListeningPortMatcher::ctx_key = "listeningPort"; +string IpProtocolMatcher::ctx_key = "ipProtocol"; +string UrlMatcher::ctx_key = "url"; + +Maybe +getIpAddrFromEnviroment(I_Environment *env, Context::MetaDataType enum_data_type, const string &str_data_type) +{ + auto ip_str = env->get(enum_data_type); + if (!ip_str.ok()) { + dbgWarning(D_RULEBASE_CONFIG) << "Failed to get " << str_data_type << " from the enviroment."; + return genError("Failed to get " + str_data_type + " from the enviroment."); + } + return IPAddr::createIPAddr(ip_str.unpack()); +} + +bool +checkIfIpInRangesVec(const vector> &values, const IPAddr &ip_to_check) +{ + if (values.size() == 0) { + dbgTrace(D_RULEBASE_CONFIG) << "Ip addersses vector empty. Match is true."; + return true; + } + for (const CustomRange &range : values) { + if (range.contains(ip_to_check)) { + dbgTrace(D_RULEBASE_CONFIG) << "Ip adderss matched: " << ip_to_check; + return true; + } + } + dbgTrace(D_RULEBASE_CONFIG) << "Ip adderss not match: " << ip_to_check; + return false; +} + + +IpAddressMatcher::IpAddressMatcher(const vector ¶ms) +{ + for (const string ¶m : params) { + Maybe> ip_range = CustomRange::createRange(param); + if (!ip_range.ok()) { + dbgWarning(D_RULEBASE_CONFIG) << "Failed to create ip. Error: " + ip_range.getErr(); + continue; + } + values.push_back(ip_range.unpack()); + } +} + +Maybe +IpAddressMatcher::evalVariable() const +{ + I_Environment *env = Singleton::Consume::by(); + Maybe subject_ip = getIpAddrFromEnviroment( + env, + Context::MetaDataType::SubjectIpAddr, + "subject ip address" + ); + if (subject_ip.ok() && checkIfIpInRangesVec(values, subject_ip.unpack())) return true; + + Maybe other_ip = getIpAddrFromEnviroment( + env, + Context::MetaDataType::OtherIpAddr, + "other ip address" + ); + if (other_ip.ok() && checkIfIpInRangesVec(values, other_ip.unpack())) return true; + if (!subject_ip.ok() && !other_ip.ok()) { + dbgWarning(D_RULEBASE_CONFIG) << "Error in getting subject ip and other ip from the enviroment"; + return false; + } + dbgTrace(D_RULEBASE_CONFIG) << "Ip adderss didn't match"; + return false; +} + +SourceIpMatcher::SourceIpMatcher(const vector ¶ms) +{ + for (const string ¶m : params) { + Maybe> ip_range = CustomRange::createRange(param); + if (!ip_range.ok()) { + dbgWarning(D_RULEBASE_CONFIG) << "Failed to create source ip. Error: " + ip_range.getErr(); + continue; + } + values.push_back(ip_range.unpack()); + } +} + +Maybe +SourceIpMatcher::evalVariable() const +{ + I_Environment *env = Singleton::Consume::by(); + auto direction_maybe = env->get(Context::MetaDataType::Direction); + if (!direction_maybe.ok()) { + dbgWarning(D_RULEBASE_CONFIG) << "Failed to get direction from the enviroment."; + return false; + } + string direction = direction_maybe.unpack(); + if (direction == "incoming") { + Maybe other_ip = getIpAddrFromEnviroment( + env, + Context::MetaDataType::OtherIpAddr, + "other ip address" + ); + return other_ip.ok() && checkIfIpInRangesVec(values, other_ip.unpack()); + } else if (direction == "outgoing") { + Maybe subject_ip = getIpAddrFromEnviroment( + env, + Context::MetaDataType::SubjectIpAddr, + "subject ip address" + ); + return subject_ip.ok() && checkIfIpInRangesVec(values, subject_ip.unpack()); + } + dbgTrace(D_RULEBASE_CONFIG) << "Source ip adderss didn't match"; + return false; +} + +DestinationIpMatcher::DestinationIpMatcher(const vector ¶ms) +{ + for (const string ¶m : params) { + Maybe> ip_range = CustomRange::createRange(param); + if (!ip_range.ok()) { + dbgWarning(D_RULEBASE_CONFIG) << "Failed to create destination ip. Error: " + ip_range.getErr(); + continue; + } + values.push_back(ip_range.unpack()); + } +} + +Maybe +DestinationIpMatcher::evalVariable() const +{ + I_Environment *env = Singleton::Consume::by(); + auto direction_maybe = env->get(Context::MetaDataType::Direction); + if (!direction_maybe.ok()) { + dbgWarning(D_RULEBASE_CONFIG) << "Failed to get direction."; + return false; + } + string direction = direction_maybe.unpack(); + if (direction == "outgoing") { + Maybe other_ip = getIpAddrFromEnviroment( + env, + Context::MetaDataType::OtherIpAddr, + "other ip address" + ); + return other_ip.ok() && checkIfIpInRangesVec(values, other_ip.unpack()); + } else if (direction == "incoming") { + Maybe subject_ip = getIpAddrFromEnviroment( + env, + Context::MetaDataType::SubjectIpAddr, + "subject ip address" + ); + return subject_ip.ok() && checkIfIpInRangesVec(values, subject_ip.unpack()); + } + dbgTrace(D_RULEBASE_CONFIG) << "Destination ip adderss didn't match"; + return false; +} + +SourcePortMatcher::SourcePortMatcher(const vector ¶ms) +{ + for (const string ¶m : params) { + Maybe> port_range = CustomRange::createRange(param); + if (!port_range.ok()) { + dbgWarning(D_RULEBASE_CONFIG) << "Failed to create source port."; + continue; + } + values.push_back(port_range.unpack()); + } +} + +Maybe +SourcePortMatcher::evalVariable() const +{ + dbgTrace(D_RULEBASE_CONFIG) << "Source is not a match"; + return false; +} + + +ListeningPortMatcher::ListeningPortMatcher(const vector ¶ms) +{ + for (const string ¶m : params) { + Maybe> port_range = CustomRange::createRange(param); + if (!port_range.ok()) { + dbgWarning(D_RULEBASE_CONFIG) << "Failed to create listening port range."; + continue; + } + values.push_back(port_range.unpack()); + } +} + +Maybe +ListeningPortMatcher::evalVariable() const +{ + I_Environment *env = Singleton::Consume::by(); + auto port_str = env->get(Context::MetaDataType::Port); + if (!port_str.ok()) { + dbgWarning(D_RULEBASE_CONFIG) << "Failed to get port from the enviroment."; + return false; + } + PortNumber port; + if (ConnKeyUtil::fromString(port_str.unpack(), port)) { + if (values.size() == 0) return true; + for (const CustomRange &port_range : values) { + if (port_range.contains(port)) { + dbgTrace(D_RULEBASE_CONFIG) << "Listening port is a match. Value: " << port_str.unpack(); + return true; + } + } + } + dbgTrace(D_RULEBASE_CONFIG) << "Listening port is not a match. Value: " << port_str.unpack(); + return false; +} + +IpProtocolMatcher::IpProtocolMatcher(const vector ¶ms) +{ + for (const string ¶m : params) { + Maybe> proto_range = CustomRange::createRange(param); + if (!proto_range.ok()) { + dbgWarning(D_RULEBASE_CONFIG) << "Failed to create ip protocol."; + continue; + } + values.push_back(proto_range.unpack()); + } +} + +Maybe +IpProtocolMatcher::evalVariable() const +{ + I_Environment *env = Singleton::Consume::by(); + auto proto_str = env->get(Context::MetaDataType::Protocol); + if (!proto_str.ok()) { + dbgWarning(D_RULEBASE_CONFIG) << "Failed to get ip protocol from the enviroment."; + return false; + } + IPProto protocol; + if (ConnKeyUtil::fromString(proto_str.unpack(), protocol)) { + if (values.size() == 0) return true; + for (const CustomRange &proto_range : values) { + if (proto_range.contains(protocol)) { + dbgTrace(D_RULEBASE_CONFIG) << "Ip protocol is a match. Value: " << proto_str.unpack(); + return true; + } + } + } + dbgTrace(D_RULEBASE_CONFIG) << "Source port is not a match. Value: " << proto_str.unpack(); + return false; +} + +UrlMatcher::UrlMatcher(const vector ¶ms) : values(params) {} + +Maybe +UrlMatcher::evalVariable() const +{ + I_Environment *env = Singleton::Consume::by(); + auto curr_url_ctx = env->get(Context::MetaDataType::Url); + if (!curr_url_ctx.ok()) { + dbgWarning(D_RULEBASE_CONFIG) << "Failed to get URL from the enviroment."; + return false; + } + + if (values.size() == 0) { + dbgTrace(D_RULEBASE_CONFIG) << "Matched URL on \"any\". Url: " << *curr_url_ctx; + return true; + } + + for (const string &url : values) { + if (*curr_url_ctx == url) { + dbgTrace(D_RULEBASE_CONFIG) << "Matched URL. Value: " << *curr_url_ctx; + return true; + } + } + + dbgTrace(D_RULEBASE_CONFIG) << "URL is not a match. Value: " << *curr_url_ctx; + return false; +} diff --git a/components/generic_rulebase/evaluators/http_transaction_data_eval.cc b/components/generic_rulebase/evaluators/http_transaction_data_eval.cc new file mode 100755 index 0000000..3706d68 --- /dev/null +++ b/components/generic_rulebase/evaluators/http_transaction_data_eval.cc @@ -0,0 +1,125 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "generic_rulebase/evaluators/http_transaction_data_eval.h" + +#include +#include + +#include "http_transaction_data.h" +#include "environment/evaluator_templates.h" +#include "i_environment.h" +#include "singleton.h" +#include "sasal.h" + +SASAL_START // HTTP Manager - Transaction data + +using namespace std; +using namespace EnvironmentHelper; + +EqualHost::EqualHost(const vector ¶ms) +{ + if (params.size() != 1) reportWrongNumberOfParams("EqualHost", params.size(), 1, 1); + host = params[0]; +} + +Maybe +EqualHost::evalVariable() const +{ + I_Environment *env = Singleton::Consume::by(); + auto host_ctx = env->get(HttpTransactionData::host_name_ctx); + + if (!host_ctx.ok()) + { + return false; + } + + std::string lower_host_ctx = host_ctx.unpack(); + std::transform(lower_host_ctx.begin(), lower_host_ctx.end(), lower_host_ctx.begin(), ::tolower); + + std::string lower_host = host; + std::transform(lower_host.begin(), lower_host.end(), lower_host.begin(), ::tolower); + + + if (lower_host_ctx == lower_host) return true; + size_t pos = lower_host_ctx.find_last_of(':'); + if (pos == string::npos) return false; + lower_host_ctx = string(lower_host_ctx.data(), pos); + return lower_host_ctx == lower_host; +} + +EqualListeningIP::EqualListeningIP(const vector ¶ms) +{ + if (params.size() != 1) reportWrongNumberOfParams("EqualListeningIP", params.size(), 1, 1); + + auto maybe_ip = IPAddr::createIPAddr(params[0]); + if (!maybe_ip.ok()) reportWrongParamType(getName(), params[0], "Not a valid IP Address"); + + listening_ip = maybe_ip.unpack(); +} + +Maybe +EqualListeningIP::evalVariable() const +{ + I_Environment *env = Singleton::Consume::by(); + auto listening_ip_ctx = env->get(HttpTransactionData::listening_ip_ctx); + return listening_ip_ctx.ok() && listening_ip_ctx.unpack() == listening_ip; +} + +EqualListeningPort::EqualListeningPort(const vector ¶ms) +{ + if (params.size() != 1) reportWrongNumberOfParams("EqualListeningPort", params.size(), 1, 1); + + try { + listening_port = boost::lexical_cast(params[0]); + } catch (boost::bad_lexical_cast const&) { + reportWrongParamType(getName(), params[0], "Not a valid port number"); + } +} + +Maybe +EqualListeningPort::evalVariable() const +{ + I_Environment *env = Singleton::Consume::by(); + auto port_ctx = env->get(HttpTransactionData::listening_port_ctx); + + return port_ctx.ok() && port_ctx.unpack() == listening_port; +} + +BeginWithUri::BeginWithUri(const vector ¶ms) +{ + if (params.size() != 1) reportWrongNumberOfParams("BeginWithUri", params.size(), 1, 1); + uri_prefix = params[0]; +} + +Maybe +BeginWithUri::evalVariable() const +{ + I_Environment *env = Singleton::Consume::by(); + auto uri_ctx = env->get(HttpTransactionData::uri_ctx); + + if (!uri_ctx.ok()) + { + return false; + } + + std::string lower_uri_ctx = uri_ctx.unpack(); + std::transform(lower_uri_ctx.begin(), lower_uri_ctx.end(), lower_uri_ctx.begin(), ::tolower); + + std::string lower_uri_prefix = uri_prefix; + std::transform(lower_uri_prefix.begin(), lower_uri_prefix.end(), lower_uri_prefix.begin(), ::tolower); + + return lower_uri_ctx.find(lower_uri_prefix) == 0; +} + +SASAL_END diff --git a/components/generic_rulebase/evaluators/parameter_eval.cc b/components/generic_rulebase/evaluators/parameter_eval.cc new file mode 100755 index 0000000..2430ba6 --- /dev/null +++ b/components/generic_rulebase/evaluators/parameter_eval.cc @@ -0,0 +1,38 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "generic_rulebase/evaluators/parameter_eval.h" + +#include +#include + +#include "generic_rulebase/rulebase_config.h" +#include "config.h" +#include "debug.h" + +using namespace std; + +string ParameterMatcher::ctx_key = "parameters"; + +ParameterMatcher::ParameterMatcher(const vector ¶ms) +{ + if (params.size() != 1) reportWrongNumberOfParams(ParameterMatcher::getName(), params.size(), 1, 1); + parameter_id = params[0]; +} + +Maybe +ParameterMatcher::evalVariable() const +{ + auto rule = getConfiguration("rulebase", "rulesConfig"); + return rule.ok() && rule.unpack().isParameterActive(parameter_id); +} diff --git a/components/generic_rulebase/evaluators/practice_eval.cc b/components/generic_rulebase/evaluators/practice_eval.cc new file mode 100755 index 0000000..fc570e1 --- /dev/null +++ b/components/generic_rulebase/evaluators/practice_eval.cc @@ -0,0 +1,50 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "generic_rulebase/evaluators/practice_eval.h" + +#include +#include + +#include "generic_rulebase/rulebase_config.h" +#include "config.h" +#include "debug.h" + +USE_DEBUG_FLAG(D_RULEBASE_CONFIG); + +using namespace std; + +string PracticeMatcher::ctx_key = "practices"; + +PracticeMatcher::PracticeMatcher(const vector ¶ms) +{ + if (params.size() != 1) reportWrongNumberOfParams(PracticeMatcher::getName(), params.size(), 1, 1); + practice_id = params[0]; +} + +Maybe +PracticeMatcher::evalVariable() const +{ + I_Environment *env = Singleton::Consume::by(); + auto bc_practice_id_ctx = env->get>(PracticeMatcher::ctx_key); + dbgTrace(D_RULEBASE_CONFIG) + << "Trying to match practice. ID: " + << practice_id << ", Current set IDs: " + << makeSeparatedStr(bc_practice_id_ctx.ok() ? *bc_practice_id_ctx : set(), ", "); + if (bc_practice_id_ctx.ok()) { + return bc_practice_id_ctx.unpack().count(practice_id) > 0; + } + + auto rule = getConfiguration("rulebase", "rulesConfig"); + return rule.ok() && rule.unpack().isPracticeActive(practice_id); +} diff --git a/components/generic_rulebase/evaluators/query_eval.cc b/components/generic_rulebase/evaluators/query_eval.cc new file mode 100755 index 0000000..23f722f --- /dev/null +++ b/components/generic_rulebase/evaluators/query_eval.cc @@ -0,0 +1,136 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "generic_rulebase/evaluators/query_eval.h" + +#include +#include +#include + +#include "generic_rulebase/rulebase_config.h" +#include "generic_rulebase/zones_config.h" +#include "i_environment.h" +#include "singleton.h" +#include "config.h" +#include "debug.h" +#include "enum_range.h" + +using namespace std; + +USE_DEBUG_FLAG(D_RULEBASE_CONFIG); + +QueryMatcher::QueryMatcher(const vector ¶ms) +{ + if (params.size() < 1) reportWrongNumberOfParams(QueryMatcher::getName(), params.size(), 1); + + key = params.front(); + if (key == "any") { + is_any = true; + } else { + values.reserve(params.size() - 1); + for (uint i = 1; i < params.size() ; i++) { + if (params[i] == "any") { + values.clear(); + break; + } + values.insert(params[i]); + } + } +} + +const string +QueryMatcher::contextKeyToString(Context::MetaDataType type) +{ + if (type == Context::MetaDataType::SubjectIpAddr || type == Context::MetaDataType::OtherIpAddr) return "ip"; + return Context::convertToString(type); +} + +class QueryMatchSerializer +{ +public: + static const string req_attr_ctx_key; + + template + void + serialize(Archive &ar) + { + I_Environment *env = Singleton::Consume::by(); + auto req_attr = env->get(req_attr_ctx_key); + if (!req_attr.ok()) return; + + try { + ar(cereal::make_nvp(*req_attr, value)); + dbgDebug(D_RULEBASE_CONFIG) + << "Found value for requested attribute. Tag: " + << *req_attr + << ", Value: " + << value; + } catch (exception &e) { + dbgDebug(D_RULEBASE_CONFIG) << "Could not find values for requested attribute. Tag: " << *req_attr; + ar.finishNode(); + } + } + + template + bool + matchValues(const Values &requested_vals) const + { + return value != "" && (requested_vals.empty() || requested_vals.count(value) > 0); + } + +private: + string value; +}; + +const string QueryMatchSerializer::req_attr_ctx_key = "requested attribute key"; + +Maybe +QueryMatcher::evalVariable() const +{ + if (is_any) return true; + + I_Environment *env = Singleton::Consume::by(); + auto local_asset_ctx = env->get("is local asset"); + bool is_remote_asset = local_asset_ctx.ok() && !(*local_asset_ctx); + + QueryRequest request; + for (Context::MetaDataType name : makeRange()) { + auto val = env->get(name); + if (val.ok()) { + if ((name == Context::MetaDataType::SubjectIpAddr && is_remote_asset) || + (name == Context::MetaDataType::OtherIpAddr && !is_remote_asset)) { + continue; + } + + request.addCondition(Condition::EQUALS, contextKeyToString(name), *val); + } + } + if (request.empty()) return false; + + request.setRequestedAttr(key); + ScopedContext req_attr_key; + req_attr_key.registerValue(QueryMatchSerializer::req_attr_ctx_key, key); + + I_Intelligence_IS_V2 *intelligence = Singleton::Consume::by(); + auto query_res = intelligence->queryIntelligence(request); + if (!query_res.ok()) { + dbgWarning(D_RULEBASE_CONFIG) << "Failed to perform intelligence query. Error: " << query_res.getErr(); + return false; + } + + for (const AssetReply &asset : query_res.unpack()) { + if (asset.matchValues>(values)) return true; + } + + return false; +} diff --git a/components/generic_rulebase/evaluators/trigger_eval.cc b/components/generic_rulebase/evaluators/trigger_eval.cc new file mode 100755 index 0000000..cd26ac4 --- /dev/null +++ b/components/generic_rulebase/evaluators/trigger_eval.cc @@ -0,0 +1,57 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "generic_rulebase/evaluators/trigger_eval.h" + +#include +#include + +#include "generic_rulebase/rulebase_config.h" +#include "config.h" +#include "debug.h" + +using namespace std; + +USE_DEBUG_FLAG(D_RULEBASE_CONFIG); + +string TriggerMatcher::ctx_key = "triggers"; + +TriggerMatcher::TriggerMatcher(const vector ¶ms) +{ + if (params.size() != 1) reportWrongNumberOfParams(TriggerMatcher::getName(), params.size(), 1, 1); + trigger_id = params[0]; +} + +Maybe +TriggerMatcher::evalVariable() const +{ + I_Environment *env = Singleton::Consume::by(); + auto ac_bc_trigger_id_ctx = env->get>("ac_trigger_id"); + dbgTrace(D_RULEBASE_CONFIG) + << "Trying to match trigger for access control rule. ID: " + << trigger_id << ", Current set IDs: " + << makeSeparatedStr(ac_bc_trigger_id_ctx.ok() ? *ac_bc_trigger_id_ctx : set(), ", "); + if (ac_bc_trigger_id_ctx.ok()) { + return ac_bc_trigger_id_ctx.unpack().count(trigger_id) > 0; + } + + auto bc_trigger_id_ctx = env->get>(TriggerMatcher::ctx_key); + dbgTrace(D_RULEBASE_CONFIG) + << "Trying to match trigger. ID: " + << trigger_id << ", Current set IDs: " + << makeSeparatedStr(bc_trigger_id_ctx.ok() ? *bc_trigger_id_ctx : set(), ", "); + if (bc_trigger_id_ctx.ok() && bc_trigger_id_ctx.unpack().count(trigger_id) > 0 ) return true; + + auto rule = getConfiguration("rulebase", "rulesConfig"); + return rule.ok() && rule.unpack().isTriggerActive(trigger_id); +} diff --git a/components/generic_rulebase/evaluators/zone_eval.cc b/components/generic_rulebase/evaluators/zone_eval.cc new file mode 100755 index 0000000..17eb328 --- /dev/null +++ b/components/generic_rulebase/evaluators/zone_eval.cc @@ -0,0 +1,44 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "generic_rulebase/evaluators/zone_eval.h" + +#include +#include + +#include "generic_rulebase/zone.h" +#include "generic_rulebase/rulebase_config.h" +#include "config.h" + +using namespace std; + +string ZoneMatcher::ctx_key = "zone_id"; + +ZoneMatcher::ZoneMatcher(const vector ¶ms) +{ + if (params.size() != 1) reportWrongNumberOfParams(ZoneMatcher::getName(), params.size(), 1, 1); + zone_id = params[0]; +} + +Maybe +ZoneMatcher::evalVariable() const +{ + I_Environment *env = Singleton::Consume::by(); + auto bc_zone_id_ctx = env->get(ZoneMatcher::ctx_key); + if (bc_zone_id_ctx.ok() && *bc_zone_id_ctx == zone_id) return true; + + if (!getProfileAgentSettingWithDefault(false, "rulebase.enableQueryBasedMatch")) return false; + + auto zone = getConfiguration("rulebase", "zones"); + return zone.ok() && zone.unpack().getId() == zone_id; +} diff --git a/components/generic_rulebase/generic_rulebase.cc b/components/generic_rulebase/generic_rulebase.cc new file mode 100755 index 0000000..4f115c3 --- /dev/null +++ b/components/generic_rulebase/generic_rulebase.cc @@ -0,0 +1,125 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "generic_rulebase/generic_rulebase.h" + +#include + +#include "generic_rulebase/evaluators/trigger_eval.h" +#include "generic_rulebase/evaluators/practice_eval.h" +#include "generic_rulebase/evaluators/parameter_eval.h" +#include "generic_rulebase/evaluators/zone_eval.h" +#include "generic_rulebase/evaluators/asset_eval.h" +#include "generic_rulebase/evaluators/query_eval.h" +#include "generic_rulebase/evaluators/connection_eval.h" +#include "generic_rulebase/evaluators/http_transaction_data_eval.h" +#include "generic_rulebase/zone.h" +#include "generic_rulebase/triggers_config.h" +#include "singleton.h" +#include "common.h" +#include "debug.h" +#include "cache.h" +#include "config.h" + +using namespace std; + +USE_DEBUG_FLAG(D_RULEBASE_CONFIG); + +class GenericRulebase::Impl : Singleton::Provide::From +{ +public: + void init() {} + void fini() {} + + void preload(); + + Maybe getLocalZone() const override { return getZoneConfig(true); } + Maybe getOtherZone() const override { return getZoneConfig(false); } + + set getBehavior(const ParameterKeyValues &key_value_pairs) const override; + +private: + Maybe + getZoneConfig(bool is_local_zone) const + { + ScopedContext asset_location_ctx; + asset_location_ctx.registerValue("is local asset", is_local_zone); + return getConfiguration("rulebase", "zones"); + } +}; + +void +GenericRulebase::Impl::preload() +{ + addMatcher(); + addMatcher(); + addMatcher(); + addMatcher(); + addMatcher(); + addMatcher(); + addMatcher(); + addMatcher(); + addMatcher(); + addMatcher(); + addMatcher(); + addMatcher(); + addMatcher(); + addMatcher(); + addMatcher(); + addMatcher(); + addMatcher(); + BasicRuleConfig::preload(); + LogTriggerConf::preload(); + ParameterException::preload(); + registerExpectedConfiguration("rulebase", "zones"); + registerExpectedConfigFile("zones", Config::ConfigFileType::Policy); + registerExpectedConfigFile("triggers", Config::ConfigFileType::Policy); + registerExpectedConfigFile("rules", Config::ConfigFileType::Policy); + registerExpectedConfigFile("parameters", Config::ConfigFileType::Policy); + registerExpectedConfigFile("exceptions", Config::ConfigFileType::Policy); + +} + +set +GenericRulebase::Impl::getBehavior(const ParameterKeyValues &key_value_pairs) const +{ + auto &exceptions = getConfiguration("rulebase", "exception"); + + if (!exceptions.ok()) { + dbgTrace(D_RULEBASE_CONFIG) << "Could not find any exception with the current rule's context"; + return {}; + } + return (*exceptions).getBehavior(key_value_pairs); +} + +GenericRulebase::GenericRulebase() : Component("GenericRulebase"), pimpl(make_unique()) {} + +GenericRulebase::~GenericRulebase() {} + +void +GenericRulebase::init() +{ + pimpl->init(); +} + +void +GenericRulebase::fini() +{ + pimpl->fini(); +} + +void +GenericRulebase::preload() +{ + pimpl->preload(); +} diff --git a/components/generic_rulebase/generic_rulebase_context.cc b/components/generic_rulebase/generic_rulebase_context.cc new file mode 100755 index 0000000..7dd98cb --- /dev/null +++ b/components/generic_rulebase/generic_rulebase_context.cc @@ -0,0 +1,109 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "generic_rulebase/generic_rulebase_context.h" + +#include + +#include "context.h" +#include "config.h" +#include "generic_rulebase/evaluators/trigger_eval.h" +#include "generic_rulebase/evaluators/parameter_eval.h" +#include "generic_rulebase/evaluators/practice_eval.h" +#include "generic_rulebase/evaluators/zone_eval.h" +#include "generic_rulebase/evaluators/asset_eval.h" + +USE_DEBUG_FLAG(D_RULEBASE_CONFIG); + +using namespace std; + +template +set +extractIds(const vector &configurations) +{ + set ids; + for (const Configs &conf : configurations) { + ids.insert(conf.getId()); + } + return ids; +} + +void +GenericRulebaseContext::activate(const BasicRuleConfig &rule) +{ + switch(registration_state) { + case RuleRegistrationState::UNINITIALIZED: { + registration_state = RuleRegistrationState::REGISTERED; + ctx.registerValue>( + TriggerMatcher::ctx_key, + extractIds(rule.getTriggers()) + ); + ctx.registerValue>( + PracticeMatcher::ctx_key, + extractIds(rule.getPractices()) + ); + dbgTrace(D_RULEBASE_CONFIG) + << "Activating current practices. Current practice IDs: " + << makeSeparatedStr(extractIds(rule.getPractices()), ", "); + + ctx.registerValue>( + ParameterMatcher::ctx_key, + extractIds(rule.getParameters()) + ); + ctx.registerValue( + ZoneMatcher::ctx_key, + rule.getZoneId() + ); + ctx.registerValue( + AssetMatcher::ctx_key, + rule.getAssetId() + ); + ctx.activate(); + break; + } + case RuleRegistrationState::REGISTERED: { + dbgTrace(D_RULEBASE_CONFIG) << "Activating registered rule values"; + ctx.activate(); + break; + } + case RuleRegistrationState::UNREGISTERED: { + dbgTrace(D_RULEBASE_CONFIG) << "Failed to register rule values"; + } + } +} + +void +GenericRulebaseContext::activate() +{ + switch(registration_state) { + case RuleRegistrationState::UNINITIALIZED: { + auto maybe_rule = getConfiguration("rulebase", "rulesConfig"); + if (!maybe_rule.ok()) { + registration_state = RuleRegistrationState::UNREGISTERED; + return; + } + dbgTrace(D_RULEBASE_CONFIG) << "Registering new rule values"; + activate(maybe_rule.unpack()); + registration_state = RuleRegistrationState::REGISTERED; + break; + } + case RuleRegistrationState::REGISTERED: { + dbgTrace(D_RULEBASE_CONFIG) << "Activating registered rule values"; + ctx.activate(); + break; + } + case RuleRegistrationState::UNREGISTERED: { + dbgTrace(D_RULEBASE_CONFIG) << "Failed to register rule values"; + } + } +} diff --git a/components/generic_rulebase/match_query.cc b/components/generic_rulebase/match_query.cc new file mode 100755 index 0000000..6e2852d --- /dev/null +++ b/components/generic_rulebase/match_query.cc @@ -0,0 +1,291 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "generic_rulebase/match_query.h" + +#include "cereal/types/set.hpp" + +#include "generic_rulebase/generic_rulebase_utils.h" +#include "config.h" +#include "ip_utilities.h" +#include "agent_core_utilities.h" + +USE_DEBUG_FLAG(D_RULEBASE_CONFIG); + +using namespace std; + +static const unordered_map string_to_match_type = { + { "condition", MatchQuery::MatchType::Condition }, + { "operator", MatchQuery::MatchType::Operator } +}; + +static const unordered_map string_to_operator = { + { "and", MatchQuery::Operators::And }, + { "or", MatchQuery::Operators::Or } +}; + +static const unordered_map string_to_condition = { + { "equals", MatchQuery::Conditions::Equals }, + { "not-equals", MatchQuery::Conditions::NotEquals }, + { "not equals", MatchQuery::Conditions::NotEquals }, + { "in", MatchQuery::Conditions::In }, + { "not-in", MatchQuery::Conditions::NotIn }, + { "not in", MatchQuery::Conditions::NotIn }, + { "exist", MatchQuery::Conditions::Exist } +}; + +static const string ip_addr_type_name = "IP address"; +static const string port_type_name = "port"; +static const string ip_proto_type_name = "IP protocol"; + +static const unordered_map string_to_key = { + { "sourceIP", MatchQuery::StaticKeys::SrcIpAddress }, + { "sourceIpAddr", MatchQuery::StaticKeys::SrcIpAddress }, + { "destinationIP", MatchQuery::StaticKeys::DstIpAddress }, + { "destinationIpAddr", MatchQuery::StaticKeys::DstIpAddress }, + { "ipAddress", MatchQuery::StaticKeys::IpAddress }, + { "sourcePort", MatchQuery::StaticKeys::SrcPort }, + { "listeningPort", MatchQuery::StaticKeys::ListeningPort }, + { "ipProtocol", MatchQuery::StaticKeys::IpProtocol }, + { "domain", MatchQuery::StaticKeys::Domain } +}; + +void +MatchQuery::load(cereal::JSONInputArchive &archive_in) +{ + string type_as_string; + archive_in(cereal::make_nvp("type", type_as_string)); + + string op_as_string; + archive_in(cereal::make_nvp("op", op_as_string)); + + auto maybe_type = string_to_match_type.find(type_as_string); + if (maybe_type == string_to_match_type.end()) { + reportConfigurationError("Illegal Zone match query type. Provided type in configuration: " + type_as_string); + } + + type = maybe_type->second; + switch (type) { + case (MatchType::Condition): { + auto maybe_condition = string_to_condition.find(op_as_string); + if (maybe_condition == string_to_condition.end()) { + reportConfigurationError( + "Illegal op provided for condition. Provided op in configuration: " + + op_as_string + ); + } + condition_type = maybe_condition->second; + operator_type = Operators::None; + archive_in(cereal::make_nvp("key", key)); + key_type = getKeyByName(key); + if (key_type == StaticKeys::NotStatic) { + if (key.rfind("containerLabels.", 0) == 0) { + is_specific_label = true; + } else { + is_specific_label = false; + } + } + if (condition_type != Conditions::Exist) { + archive_in(cereal::make_nvp("value", value)); + for(const auto &val: value) { + if (isKeyTypeIp()) { + auto ip_range = IPUtilities::createRangeFromString(val, ip_addr_type_name); + if (ip_range.ok()) { + ip_addr_value.push_back(ip_range.unpack()); + } else { + dbgWarning(D_RULEBASE_CONFIG) + << "Failed to parse IP address range. Error: " + << ip_range.getErr(); + } + } else if (isKeyTypePort()) { + auto port_range = IPUtilities::createRangeFromString( + val, + port_type_name + ); + if (port_range.ok()) { + port_value.push_back(port_range.unpack()); + } else { + dbgWarning(D_RULEBASE_CONFIG) + << "Failed to parse port range. Error: " + << port_range.getErr(); + } + } else if (isKeyTypeProtocol()) { + auto proto_range = IPUtilities::createRangeFromString( + val, + ip_proto_type_name + ); + if (proto_range.ok()) { + ip_proto_value.push_back(proto_range.unpack()); + } else { + dbgWarning(D_RULEBASE_CONFIG) + << "Failed to parse IP protocol range. Error: " + << proto_range.getErr(); + } + } + + try { + regex_values.insert(boost::regex(val)); + } catch (const exception &e) { + dbgDebug(D_RULEBASE_CONFIG) << "Failed to compile regex. Error: " << e.what(); + } + } + first_value = *(value.begin()); + } + break; + } + case (MatchType::Operator): { + auto maybe_operator = string_to_operator.find(op_as_string); + if (maybe_operator == string_to_operator.end()) { + reportConfigurationError( + "Illegal op provided for operator. Provided op in configuration: " + + op_as_string + ); + } + operator_type = maybe_operator->second; + condition_type = Conditions::None; + archive_in(cereal::make_nvp("items", items)); + break; + } + } +} + +MatchQuery::StaticKeys +MatchQuery::getKeyByName(const string &key_type_name) +{ + auto key = string_to_key.find(key_type_name); + if (key == string_to_key.end()) return StaticKeys::NotStatic; + return key->second; +} + +bool +MatchQuery::isKeyTypeIp() const +{ + return (key_type >= StaticKeys::IpAddress && key_type <= StaticKeys::DstIpAddress); +} + +bool +MatchQuery::isKeyTypePort() const +{ + return (key_type == StaticKeys::SrcPort || key_type == StaticKeys::ListeningPort); +} + +bool +MatchQuery::isKeyTypeProtocol() const +{ + return (key_type == StaticKeys::IpProtocol); +} + +bool +MatchQuery::isKeyTypeDomain() const +{ + return (key_type == StaticKeys::Domain); +} + +bool +MatchQuery::isKeyTypeSpecificLabel() const +{ + return is_specific_label; +} + +bool +MatchQuery::isKeyTypeStatic() const +{ + return (key_type != StaticKeys::NotStatic); +} + +set +MatchQuery::getAllKeys() const +{ + set keys; + if (type == MatchType::Condition) { + if (!key.empty()) keys.insert(key); + return keys; + } + + for (const MatchQuery &inner_match: items) { + set iner_keys = inner_match.getAllKeys(); + keys.insert(iner_keys.begin(), iner_keys.end()); + } + + return keys; +} + +bool +MatchQuery::matchAttributes(const unordered_map> &key_value_pairs) const +{ + if (type == MatchType::Condition) { + auto key_value_pair = key_value_pairs.find(key); + if (key_value_pair == key_value_pairs.end()) { + dbgTrace(D_RULEBASE_CONFIG) << "Ignoring irrelevant key: " << key; + return false; + } + return matchAttributes(key_value_pair->second); + } else if (type == MatchType::Operator && operator_type == Operators::And) { + for (const MatchQuery &inner_match: items) { + if (!inner_match.matchAttributes(key_value_pairs)) return false; + } + return true; + } else if (type == MatchType::Operator && operator_type == Operators::Or) { + for (const MatchQuery &inner_match: items) { + if (inner_match.matchAttributes(key_value_pairs)) return true; + } + return false; + } else { + dbgWarning(D_RULEBASE_CONFIG) << "Unsupported match query type"; + } + return false; +} + +bool +MatchQuery::matchAttributes(const set &values) const +{ + auto &type = condition_type; + bool negate = type == MatchQuery::Conditions::NotEquals || type == MatchQuery::Conditions::NotIn; + bool match = isRegEx() ? matchAttributesRegEx(values) : matchAttributesString(values); + return negate ? !match : match; +} + +bool +MatchQuery::matchAttributesRegEx(const set &values) const +{ + boost::cmatch value_matcher; + for (const boost::regex &val_regex : regex_values) { + for (const string &requested_match_value : values) { + if (NGEN::Regex::regexMatch( + __FILE__, + __LINE__, + requested_match_value.c_str(), + value_matcher, + val_regex)) + { + return true; + } + } + } + return false; +} + +bool +MatchQuery::matchAttributesString(const set &values) const +{ + for (const string &requested_value : values) { + if (value.find(requested_value) != value.end()) return true; + } + return false; +} + +bool +MatchQuery::isRegEx() const +{ + return key != "protectionName"; +} diff --git a/components/generic_rulebase/parameters_config.cc b/components/generic_rulebase/parameters_config.cc new file mode 100755 index 0000000..897ba05 --- /dev/null +++ b/components/generic_rulebase/parameters_config.cc @@ -0,0 +1,126 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "generic_rulebase/parameters_config.h" + +USE_DEBUG_FLAG(D_RULEBASE_CONFIG); + +using namespace std; + +bool ParameterException::is_geo_location_exception_exists(false); +bool ParameterException::is_geo_location_exception_being_loaded(false); + +void +ParameterOverrides::load(cereal::JSONInputArchive &archive_in) +{ + parseJSONKey>("parsedBehavior", parsed_behaviors, archive_in); +} + +void +ParameterTrustedSources::load(cereal::JSONInputArchive &archive_in) +{ + parseJSONKey("numOfSources", num_of_sources, archive_in); + parseJSONKey>("sourcesIdentifiers", sources_identidiers, archive_in); +} + +void +ParameterBehavior::load(cereal::JSONInputArchive &archive_in) +{ + string key_string; + string val_string; + parseJSONKey("id", id, archive_in); + parseJSONKey("key", key_string, archive_in); + parseJSONKey("value", val_string, archive_in); + if (string_to_behavior_key.find(key_string) == string_to_behavior_key.end()) { + dbgWarning(D_RULEBASE_CONFIG) << "Unsupported behavior key: " << key_string; + return; + } + key = string_to_behavior_key.at(key_string); + + if (string_to_behavior_val.find(val_string) == string_to_behavior_val.end()) { + dbgWarning(D_RULEBASE_CONFIG) << "Unsupported behavior value: " << val_string; + return; + } + value = string_to_behavior_val.at(val_string); +} + +void +ParameterAntiBot::load(cereal::JSONInputArchive &archive_in) +{ + parseJSONKey>("injected", injected, archive_in); + parseJSONKey>("validated", validated, archive_in); +} + +void +ParameterOAS::load(cereal::JSONInputArchive &archive_in) +{ + parseJSONKey("value", value, archive_in); +} + +void +ParameterException::MatchBehaviorPair::load(cereal::JSONInputArchive &archive_in) +{ + parseJSONKey("match", match, archive_in); + parseJSONKey("behavior", behavior, archive_in); +} + +void +ParameterException::load(cereal::JSONInputArchive &archive_in) +{ + try { + archive_in( + cereal::make_nvp("match", match), + cereal::make_nvp("behavior", behavior) + ); + } catch (...) { + parseJSONKey>("exceptions", match_queries, archive_in); + } + + function isGeoLocationExists = + [&](const MatchQuery &query) + { + if (query.getKey() == "countryCode" || query.getKey() == "countryName") { + is_geo_location_exception_being_loaded = true; + return true; + } + + for (const MatchQuery &query_item : query.getItems()) { + if (isGeoLocationExists(query_item)) return true; + } + + return false; + }; + + if (isGeoLocationExists(match)) return; + for (const MatchBehaviorPair &match_query : match_queries) { + if (isGeoLocationExists(match_query.match)) return; + } +} + +set +ParameterException::getBehavior(const unordered_map> &key_value_pairs) const +{ + set matched_behaviors; + dbgTrace(D_RULEBASE_CONFIG) << "Matching exception"; + for (const MatchBehaviorPair &match_behavior_pair: match_queries) { + if (match_behavior_pair.match.matchAttributes(key_value_pairs)) { + dbgTrace(D_RULEBASE_CONFIG) << "Successfully matched an exception from a list of matches."; + matched_behaviors.insert(match_behavior_pair.behavior); + } + } + if (match_queries.empty() && match.matchAttributes(key_value_pairs)) { + dbgTrace(D_RULEBASE_CONFIG) << "Successfully matched an exception."; + matched_behaviors.insert(behavior); + } + return matched_behaviors; +} diff --git a/components/generic_rulebase/rulebase_config.cc b/components/generic_rulebase/rulebase_config.cc new file mode 100755 index 0000000..2dd018f --- /dev/null +++ b/components/generic_rulebase/rulebase_config.cc @@ -0,0 +1,79 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "generic_rulebase/rulebase_config.h" + +#include "telemetry.h" +#include "config.h" + +USE_DEBUG_FLAG(D_RULEBASE_CONFIG); + +using namespace std; + +set BasicRuleConfig::assets_ids{}; +set BasicRuleConfig::assets_ids_aggregation{}; + +void +BasicRuleConfig::load(cereal::JSONInputArchive &ar) +{ + parseJSONKey>("practices", practices, ar); + parseJSONKey>("triggers", triggers, ar); + parseJSONKey>("parameters", parameters, ar); + parseJSONKey("priority", priority, ar); + parseJSONKey("ruleId", rule_id, ar); + parseJSONKey("ruleName", rule_name, ar); + parseJSONKey("assetId", asset_id, ar); + parseJSONKey("assetName", asset_name, ar); + parseJSONKey("zoneId", zone_id, ar); + parseJSONKey("zoneName", zone_name, ar); + + assets_ids_aggregation.insert(asset_id); +} + +void +BasicRuleConfig::updateCountMetric() +{ + BasicRuleConfig::assets_ids = BasicRuleConfig::assets_ids_aggregation; + AssetCountEvent(AssetType::ALL, BasicRuleConfig::assets_ids.size()).notify(); +} + +bool +BasicRuleConfig::isPracticeActive(const string &practice_id) const +{ + for (auto practice: practices) { + if (practice.getId() == practice_id) return true; + } + return false; +} + +bool +BasicRuleConfig::isTriggerActive(const string &trigger_id) const +{ + for (auto trigger: triggers) { + if (trigger.getId() == trigger_id) { + return true; + } + } + return false; +} + +bool +BasicRuleConfig::isParameterActive(const string ¶meter_id) const +{ + for (auto param: parameters) { + if (param.getId() == parameter_id) { + return true; + } + } + return false; +} diff --git a/components/generic_rulebase/triggers_config.cc b/components/generic_rulebase/triggers_config.cc new file mode 100755 index 0000000..abfbbaf --- /dev/null +++ b/components/generic_rulebase/triggers_config.cc @@ -0,0 +1,216 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "generic_rulebase/triggers_config.h" +#include "generic_rulebase/generic_rulebase_utils.h" + +USE_DEBUG_FLAG(D_RULEBASE_CONFIG); + +using namespace std; + +WebTriggerConf::WebTriggerConf() : response_title(""), response_body(""), response_code(0) {} +WebTriggerConf::WebTriggerConf(const string &title, const string &body, uint code) + : + response_title(title), + response_body(body), + response_code(code) +{} + +WebTriggerConf WebTriggerConf::default_trigger_conf = WebTriggerConf( + "Attack blocked by web application protection", // title + "Check Point's Application Security has detected an attack and blocked it.", // body + 403 +); + +void +WebTriggerConf::load(cereal::JSONInputArchive &archive_in) +{ + try { + parseJSONKey("details level", details_level, archive_in); + if (details_level == "Redirect") { + parseJSONKey("redirect URL", redirect_url, archive_in); + parseJSONKey("xEventId", add_event_id_to_header, archive_in); + parseJSONKey("eventIdInHeader", add_event_id_to_header, archive_in); + return; + } + parseJSONKey("response code", response_code, archive_in); + if (response_code < 100 || response_code > 599) { + throw cereal::Exception( + "illegal web trigger response code: " + + to_string(response_code) + + " is out of range (100-599)" + ); + } + + if (details_level == "Response Code") return; + + parseJSONKey("response body", response_body, archive_in); + parseJSONKey("response title", response_title, archive_in); + } catch (const exception &e) { + dbgWarning(D_RULEBASE_CONFIG) << "Failed to parse the web trigger configuration: '" << e.what() << "'"; + archive_in.setNextName(nullptr); + } +} + +bool +WebTriggerConf::operator==(const WebTriggerConf &other) const +{ + return + response_code == other.response_code && + response_title == other.response_title && + response_body == other.response_body; +} + +LogTriggerConf::LogTriggerConf(string trigger_name, bool log_detect, bool log_prevent) : name(trigger_name) +{ + if (log_detect) should_log_on_detect.setAll(); + if (log_prevent) should_log_on_prevent.setAll(); + active_streams.setFlag(ReportIS::StreamType::JSON_FOG); + active_streams.setFlag(ReportIS::StreamType::JSON_LOG_FILE); +} + +ReportIS::Severity +LogTriggerConf::getSeverity(bool is_action_drop_or_prevent) const +{ + return is_action_drop_or_prevent ? ReportIS::Severity::MEDIUM : ReportIS::Severity::LOW; +} + +ReportIS::Priority +LogTriggerConf::getPriority(bool is_action_drop_or_prevent) const +{ + return is_action_drop_or_prevent ? ReportIS::Priority::HIGH : ReportIS::Priority::MEDIUM; +} + +Flags +LogTriggerConf::getStreams(SecurityType security_type, bool is_action_drop_or_prevent) const +{ + if (is_action_drop_or_prevent && should_log_on_prevent.isSet(security_type)) return active_streams; + if (!is_action_drop_or_prevent && should_log_on_detect.isSet(security_type)) return active_streams; + + return Flags(); +} + +Flags +LogTriggerConf::getEnrechments(SecurityType security_type) const +{ + Flags enreachments; + + if (log_geo_location.isSet(security_type)) enreachments.setFlag(ReportIS::Enreachments::GEOLOCATION); + if (should_format_output) enreachments.setFlag(ReportIS::Enreachments::BEAUTIFY_OUTPUT); + + return enreachments; +} + +template +static void +setTriggersFlag(const string &key, cereal::JSONInputArchive &ar, EnumClass flag, Flags &flags) +{ + bool value = false; + parseJSONKey(key, value, ar); + if (value) flags.setFlag(flag); +} + +static void +setLogConfiguration(const ReportIS::StreamType &log_type, const string &log_server_url = "") +{ + dbgTrace(D_RULEBASE_CONFIG) << "log server url:" << log_server_url; + if (log_server_url != "") { + Singleton::Consume::by()->addStream(log_type, log_server_url); + } else { + Singleton::Consume::by()->addStream(log_type); + } +} + +void +LogTriggerConf::load(cereal::JSONInputArchive& archive_in) +{ + try { + parseJSONKey("triggerName", name, archive_in); + parseJSONKey("verbosity", verbosity, archive_in); + parseJSONKey("urlForSyslog", url_for_syslog, archive_in); + parseJSONKey("urlForCef", url_for_cef, archive_in); + + setTriggersFlag("webBody", archive_in, WebLogFields::webBody, log_web_fields); + setTriggersFlag("webHeaders", archive_in, WebLogFields::webHeaders, log_web_fields); + setTriggersFlag("webRequests", archive_in, WebLogFields::webRequests, log_web_fields); + setTriggersFlag("webUrlPath", archive_in, WebLogFields::webUrlPath, log_web_fields); + setTriggersFlag("webUrlQuery", archive_in, WebLogFields::webUrlQuery, log_web_fields); + setTriggersFlag("logToAgent", archive_in, ReportIS::StreamType::JSON_LOG_FILE, active_streams); + setTriggersFlag("logToCloud", archive_in, ReportIS::StreamType::JSON_FOG, active_streams); + setTriggersFlag("logToSyslog", archive_in, ReportIS::StreamType::SYSLOG, active_streams); + setTriggersFlag("logToCef", archive_in, ReportIS::StreamType::CEF, active_streams); + setTriggersFlag("acAllow", archive_in, SecurityType::AccessControl, should_log_on_detect); + setTriggersFlag("acDrop", archive_in, SecurityType::AccessControl, should_log_on_prevent); + setTriggersFlag("tpDetect", archive_in, SecurityType::ThreatPrevention, should_log_on_detect); + setTriggersFlag("tpPrevent", archive_in, SecurityType::ThreatPrevention, should_log_on_prevent); + setTriggersFlag("complianceWarnings", archive_in, SecurityType::Compliance, should_log_on_detect); + setTriggersFlag("complianceViolations", archive_in, SecurityType::Compliance, should_log_on_prevent); + setTriggersFlag("acLogGeoLocation", archive_in, SecurityType::AccessControl, log_geo_location); + setTriggersFlag("tpLogGeoLocation", archive_in, SecurityType::ThreatPrevention, log_geo_location); + setTriggersFlag("complianceLogGeoLocation", archive_in, SecurityType::Compliance, log_geo_location); + + bool extend_logging = false; + parseJSONKey("extendLogging", extend_logging, archive_in); + if (extend_logging) { + setTriggersFlag("responseCode", archive_in, WebLogFields::responseCode, log_web_fields); + setTriggersFlag("responseBody", archive_in, WebLogFields::responseBody, log_web_fields); + + string severity; + static const map extend_logging_severity_strings = { + {"High", extendLoggingSeverity::High}, + {"Critical", extendLoggingSeverity::Critical} + }; + parseJSONKey("extendLoggingMinSeverity", severity, archive_in); + auto extended_severity = extend_logging_severity_strings.find(severity); + if (extended_severity != extend_logging_severity_strings.end()) { + extend_logging_severity = extended_severity->second; + } else { + dbgWarning(D_RULEBASE_CONFIG) + << "Failed to parse the extendLoggingMinSeverityfield: '" + << severity + << "'"; + } + } + + for (ReportIS::StreamType log_stream : makeRange()) { + if (!active_streams.isSet(log_stream)) continue; + switch (log_stream) { + case ReportIS::StreamType::JSON_DEBUG: + setLogConfiguration(ReportIS::StreamType::JSON_DEBUG); + break; + case ReportIS::StreamType::JSON_FOG: + setLogConfiguration(ReportIS::StreamType::JSON_FOG); + break; + case ReportIS::StreamType::JSON_LOG_FILE: + setLogConfiguration(ReportIS::StreamType::JSON_LOG_FILE); + break; + case ReportIS::StreamType::SYSLOG: + setLogConfiguration(ReportIS::StreamType::SYSLOG, getUrlForSyslog()); + break; + case ReportIS::StreamType::CEF: + setLogConfiguration(ReportIS::StreamType::CEF, getUrlForCef()); + break; + case ReportIS::StreamType::NONE: break; + case ReportIS::StreamType::COUNT: break; + } + } + + parseJSONKey("formatLoggingOutput", should_format_output, archive_in); + } catch (const exception &e) { + dbgWarning(D_RULEBASE_CONFIG) << "Failed to parse the log trigger configuration: '" << e.what() << "'"; + archive_in.setNextName(nullptr); + } +} diff --git a/components/generic_rulebase/zone.cc b/components/generic_rulebase/zone.cc new file mode 100755 index 0000000..b8266b7 --- /dev/null +++ b/components/generic_rulebase/zone.cc @@ -0,0 +1,179 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "generic_rulebase/zone.h" + +#include +#include +#include + +using namespace std; + +static const unordered_map string_to_direction = { + { "to", Zone::Direction::To }, + { "from", Zone::Direction::From }, + { "bidirectional", Zone::Direction::Bidirectional } +}; + +class AdjacentZone +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + string direction_as_string; + archive_in(cereal::make_nvp("direction", direction_as_string)); + archive_in(cereal::make_nvp("zoneId", id)); + auto maybe_direction = string_to_direction.find(direction_as_string); + if (maybe_direction == string_to_direction.end()) { + reportConfigurationError( + "Illegal direction provided for adjacency. Provided direction in configuration: " + + direction_as_string + ); + } + dir = maybe_direction->second; + } + + pair getValue() const { return make_pair(dir, id); } + +private: + Zone::Direction dir; + GenericConfigId id; +}; + +class TagsValues +{ +public: + static const string req_attrs_ctx_key; + + TagsValues() {} + + template + void + serialize(Archive &ar) + { + I_Environment *env = Singleton::Consume::by(); + auto req_attrs = env->get>(req_attrs_ctx_key); + if (!req_attrs.ok()) return; + + for (const string &req_attr : *req_attrs) { + try { + string data; + ar(cereal::make_nvp(req_attr, data)); + dbgDebug(D_RULEBASE_CONFIG) + << "Found value for requested attribute. Tag: " + << req_attr + << ", Value: " + << data; + + tags_set[req_attr].insert(data); + } catch (const exception &e) { + dbgDebug(D_RULEBASE_CONFIG) << "Could not find values for requested attribute. Tag: " << req_attr; + ar.setNextName(nullptr); + } + } + } + + bool + matchValueByKey(const string &requested_key, const unordered_set &possible_values) const + { + auto values = tags_set.find(requested_key); + if (values == tags_set.end()) return false; + + for (const string &val : possible_values) { + if (values->second.count(val)) return true; + } + return false; + } + + void + insert(const TagsValues &other) + { + for (auto &single_tags_value : other.getData()) { + tags_set[single_tags_value.first].insert(single_tags_value.second.begin(), single_tags_value.second.end()); + } + } + + const unordered_map> & getData() const { return tags_set; } + +private: + unordered_map> tags_set; +}; + +const string TagsValues::req_attrs_ctx_key = "requested attributes key"; + +void +Zone::load(cereal::JSONInputArchive &archive_in) +{ + archive_in(cereal::make_nvp("id", zone_id)); + archive_in(cereal::make_nvp("name", zone_name)); + vector adjacency; + try { + archive_in(cereal::make_nvp("adjacentZones", adjacency)); + } catch (const cereal::Exception &) { + dbgTrace(D_RULEBASE_CONFIG) + << "List of adjacentZones does not exist for current object. Zone id: " + << zone_id + << ", Zone name: " + << zone_name; + + archive_in.setNextName(nullptr); + } + + for (const AdjacentZone &zone : adjacency) { + adjacent_zones.push_back(zone.getValue()); + } + + archive_in(cereal::make_nvp("match", match_query)); + + is_any = + match_query.getType() == MatchQuery::MatchType::Condition && + match_query.getKey() == "any" && + match_query.getValue().count("any") > 0; + + set keys = match_query.getAllKeys(); +} + +const string +contextKeyToString(Context::MetaDataType type) +{ + if (type == Context::MetaDataType::SubjectIpAddr || type == Context::MetaDataType::OtherIpAddr) return "ip"; + return Context::convertToString(type); +} + +bool +Zone::contains(const Asset &asset) +{ + QueryRequest request; + + for (const pair &main_attr : asset.getAttrs()) { + request.addCondition(Condition::EQUALS, contextKeyToString(main_attr.first), main_attr.second); + } + + ScopedContext req_attrs_key; + req_attrs_key.registerValue>(TagsValues::req_attrs_ctx_key, match_query.getAllKeys()); + + I_Intelligence_IS_V2 *intelligence = Singleton::Consume::by(); + auto query_res = intelligence->queryIntelligence(request); + if (!query_res.ok()) { + dbgWarning(D_RULEBASE_CONFIG) << "Failed to perform intelligence query. Error: " << query_res.getErr(); + return false; + } + + for (const AssetReply &asset : query_res.unpack()) { + TagsValues tag_values = asset.mergeReplyData(); + + if (match_query.matchAttributes(tag_values.getData())) return true; + } + return false; +} diff --git a/components/generic_rulebase/zones_config.cc b/components/generic_rulebase/zones_config.cc new file mode 100755 index 0000000..7a4df79 --- /dev/null +++ b/components/generic_rulebase/zones_config.cc @@ -0,0 +1,114 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "generic_rulebase/zones_config.h" + +#include +#include + +#include "generic_rulebase/generic_rulebase_utils.h" +#include "config.h" +#include "ip_utilities.h" +#include "connkey.h" +#include "i_generic_rulebase.h" + +USE_DEBUG_FLAG(D_RULEBASE_CONFIG); + +using namespace std; + +void +ZonesConfig::load(cereal::JSONInputArchive &archive_in) +{ + dbgFlow(D_RULEBASE_CONFIG) << "Saving active zones"; + set used_zones; + cereal::load(archive_in, used_zones); + + dbgTrace(D_RULEBASE_CONFIG) << "Loading all zones"; + auto all_zones_maybe = getSetting("rulebase", "zones"); + if (!all_zones_maybe.ok()) { + dbgWarning(D_RULEBASE_CONFIG) << "Failed to load zones"; + return; + } + + dbgTrace(D_RULEBASE_CONFIG) << "Creating cache of all zones by ID"; + map all_zones; + for (const auto &single_zone : all_zones_maybe.unpack().zones) { + if (used_zones.count(single_zone.getId()) > 0 && single_zone.isAnyZone()) { + dbgTrace(D_RULEBASE_CONFIG) << "Found used zone of type \"Any\": saving all zones as active zones"; + zones = all_zones_maybe.unpack().zones; + return; + } + + dbgWarning(D_RULEBASE_CONFIG) + << "Adding specific zone to cache. Zone ID: " + << single_zone.getId() + << ", name: " + << single_zone.getName(); + all_zones.emplace(single_zone.getId(), single_zone); + } + + dbgTrace(D_RULEBASE_CONFIG) << "Creating list of active zones"; + map active_zones_set; + for (const auto &single_used_zone_id : used_zones) { + const auto &found_zone = all_zones[single_used_zone_id]; + dbgTrace(D_RULEBASE_CONFIG) + << "Adding zone to list of active zones. Zone ID: " + << single_used_zone_id + << ", zone name: " + << found_zone.getName(); + active_zones_set.emplace(found_zone.getId(), found_zone); + + for (const auto &adjacent_zone : found_zone.getAdjacentZones()) { + const auto &adjacent_zone_obj = all_zones[adjacent_zone.second]; + dbgTrace(D_RULEBASE_CONFIG) + << "Adding adjacent zone to list of active zones. Zone ID: " + << adjacent_zone_obj.getId() + << ", zone name: " + << adjacent_zone_obj.getName(); + active_zones_set.emplace(adjacent_zone_obj.getId(), adjacent_zone_obj); + } + } + + vector implied_zones = { + "impliedAzure", + "impliedDNS", + "impliedSSH", + "impliedProxy", + "impliedFog" + }; + + GenericConfigId any_zone_id = ""; + for (const auto &single_zone : all_zones_maybe.unpack().zones) { + if (single_zone.isAnyZone()) any_zone_id = single_zone.getId(); + } + for (GenericConfigId &implied_id: implied_zones) { + if (all_zones.find(implied_id) != all_zones.end()) { + dbgWarning(D_RULEBASE_CONFIG) << "Adding implied zone to cache. Zone ID: " << implied_id; + active_zones_set.emplace(implied_id, all_zones[implied_id]); + if (any_zone_id != "" && active_zones_set.count(any_zone_id) == 0) { + active_zones_set.emplace(any_zone_id, all_zones[any_zone_id]); + } + } + } + + for (const auto &single_id_zone_pair : active_zones_set) { + zones.push_back(single_id_zone_pair.second); + } +} + +void +ZonesConfig::preload() +{ + registerExpectedSetting("rulebase", "zones"); + registerExpectedSetting("rulebase", "usedZones"); +} diff --git a/components/gradual_deployment/CMakeLists.txt b/components/gradual_deployment/CMakeLists.txt new file mode 100644 index 0000000..271620b --- /dev/null +++ b/components/gradual_deployment/CMakeLists.txt @@ -0,0 +1,7 @@ +add_definitions(-DUSERSPACE) + +include_directories(include) + +add_library(gradual_deployment gradual_deployment.cc ) + +add_subdirectory(gradual_deployment_ut) diff --git a/components/gradual_deployment/gradual_deployment.cc b/components/gradual_deployment/gradual_deployment.cc new file mode 100644 index 0000000..4b25b3b --- /dev/null +++ b/components/gradual_deployment/gradual_deployment.cc @@ -0,0 +1,227 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "gradual_deployment.h" + +#include +#include +#include + +#include "enum_range.h" +#include "connkey.h" + +USE_DEBUG_FLAG(D_GRADUAL_DEPLOYMENT); + +using namespace std; + +class SetGradualDeploymentRanges : public ServerRest +{ +public: + void doCall() override + { + dbgFlow(D_GRADUAL_DEPLOYMENT) << "Set gradual policy API"; + + auto maybe_type = convertServiceStrToAttachmentType(attachment_type.get()); + if (!maybe_type.ok()) { + string error = "Failed to determine attachment type. Type: " + + attachment_type.get() + + ", error: " + + maybe_type.getErr(); + dbgWarning(D_GRADUAL_DEPLOYMENT) << error; + throw JsonError(error); + } + dbgTrace(D_GRADUAL_DEPLOYMENT) + << "Setting gradual policy for attachment of type: " + << attachment_type.get(); + + auto i_gradual_deployment = Singleton::Consume::from(); + auto set_policy_res = i_gradual_deployment->setPolicy(maybe_type.unpackMove(), ip_ranges.get()); + if (!set_policy_res.ok()) throw JsonError(set_policy_res.getErr()); + + return; + } + +private: + C2S_PARAM(vector, ip_ranges) + C2S_PARAM(string, attachment_type) + + Maybe + convertServiceStrToAttachmentType(string &type) { + transform(type.begin(), type.end(), type.begin(), ::tolower); + if (type == "http-manager") return I_GradualDeployment::AttachmentType::NGINX; + if (type == "access-control") return I_GradualDeployment::AttachmentType::KERNEL; + + return genError("unknown attachment type"); + } +}; + +class GradualDeployment::Impl + : + Singleton::Provide::From +{ +public: + void + init() + { + dbgFlow(D_GRADUAL_DEPLOYMENT) << "Initializing Gradual Deployment Manager"; + + auto rest = Singleton::Consume::by(); + rest->addRestCall(RestAction::SET, "gradual-deployment-policy"); + + dbgTrace(D_GRADUAL_DEPLOYMENT) << "Gradual Deployment Manager initialization is done successfully"; + } + + Maybe + setPolicy(I_GradualDeployment::AttachmentType type, const vector &str_ip_ranges) override + { + auto maybe_policy = parseIpRanges(str_ip_ranges); + if (!maybe_policy.ok()) { + auto error = "Failed to set gradual deployment policy. Error: " + maybe_policy.getErr(); + dbgWarning(D_GRADUAL_DEPLOYMENT) << error; + return genError(error); + } + + ip_ranges_map[static_cast(type)] = maybe_policy.unpackMove(); + return Maybe(); + } + + vector + getPolicy(I_GradualDeployment::AttachmentType type) override + { + vector res; + for (const IPRange &range : ip_ranges_map[static_cast(type)]) { + // Range is validated on insertion + res.push_back(convertIpRangeToStr(range).unpack()); + } + return res; + } + + vector & + getParsedPolicy(I_GradualDeployment::AttachmentType type) override + { + return ip_ranges_map[static_cast(type)]; + } + +private: + IpAddress + ConvertToIpAddress(const IPAddr &addr) { + IpAddress address; + switch (addr.getType()) { + case IPType::V4: { + address.addr4_t = addr.getIPv4(); + address.ip_type = IP_VERSION_4; + break; + } + case IPType::V6: { + address.addr6_t = addr.getIPv6(); + address.ip_type = IP_VERSION_6; + break; + } + default: + dbgAssert(false) << "Unsupported IP type"; + } + return address; + } + + Maybe + createRangeFromStr(const string &range) + { + vector temp_params_list; + boost::split(temp_params_list, range, boost::is_any_of("-")); + + if (temp_params_list.size() == 1) { + Maybe maybe_ip = IPAddr::createIPAddr(temp_params_list[0]); + if (!maybe_ip.ok()) return genError("Could not create IP address, " + maybe_ip.getErr()); + IpAddress addr = move(ConvertToIpAddress(maybe_ip.unpackMove())); + + return move(IPRange{.start = addr, .end = addr}); + } + + if (temp_params_list.size() == 2) { + Maybe maybe_ip_min = IPAddr::createIPAddr(temp_params_list[0]); + Maybe maybe_ip_max = IPAddr::createIPAddr(temp_params_list[1]); + if (!maybe_ip_min.ok()) return genError("Could not create IP address, " + maybe_ip_min.getErr()); + if (!maybe_ip_max.ok()) return genError("Could not create IP address, " + maybe_ip_max.getErr()); + + IPAddr min_addr = maybe_ip_min.unpackMove(); + IPAddr max_addr = maybe_ip_max.unpackMove(); + if (min_addr > max_addr) return genError("Could not create ip range - start greater then end"); + + IpAddress addr_min = move(ConvertToIpAddress(move(min_addr))); + IpAddress addr_max = move(ConvertToIpAddress(move(max_addr))); + if (addr_max.ip_type != addr_min.ip_type) return genError("Range IP's type does not match"); + + return move(IPRange{.start = move(addr_min), .end = move(addr_max)}); + } + + return genError("Illegal range received: " + range); + } + + Maybe> + parseIpRanges(const vector &str_ip_ranges) + { + vector ip_ranges; + for (const string &range : str_ip_ranges) { + Maybe ip_range = createRangeFromStr(range); + if (!ip_range.ok()) { + return genError("Failed to parse gradual deployment IP range: " + ip_range.getErr()); + } + + ip_ranges.push_back(ip_range.unpackMove()); + } + return move(ip_ranges); + } + + Maybe + convertIpRangeToStr(const IPRange &range) + { + if (range.start.ip_type != IP_VERSION_4 && range.start.ip_type != IP_VERSION_6) { + return genError("Unknown IP type received: " + range.start.ip_type); + } + + size_t len; + int type; + const void *in_addr_min; + const void *in_addr_max; + + if (range.start.ip_type == IP_VERSION_4) { + len = INET_ADDRSTRLEN; + type = AF_INET; + in_addr_min = &range.start.ip.ipv4; + in_addr_max = &range.end.ip.ipv4; + } else { + len = INET6_ADDRSTRLEN; + type = AF_INET6; + in_addr_min = &range.start.ip.ipv6; + in_addr_max = &range.end.ip.ipv6; + } + + char str_min[len]; + inet_ntop(type, in_addr_min, str_min, len); + char str_max[len]; + inet_ntop(type, in_addr_max, str_max, len); + + string start(str_min, strnlen(str_min, len)); + string end(str_max, strnlen(str_max, len)); + + return start + "-" + end; + } + + unordered_map> ip_ranges_map; +}; + +GradualDeployment::GradualDeployment() : Component("GradualDeployment"), pimpl(make_unique()) {} + +GradualDeployment::~GradualDeployment() {} + +void GradualDeployment::init() { pimpl->init(); } diff --git a/components/gradual_deployment/gradual_deployment_ut/CMakeLists.txt b/components/gradual_deployment/gradual_deployment_ut/CMakeLists.txt new file mode 100644 index 0000000..8a9d84a --- /dev/null +++ b/components/gradual_deployment/gradual_deployment_ut/CMakeLists.txt @@ -0,0 +1,8 @@ +include_directories(${Boost_INCLUDE_DIRS}) +include_directories(${CMAKE_SOURCE_DIR}/components/include) + +add_unit_test( + gradual_deployment_ut + "gradual_deployment_ut.cc" + "singleton;rest;connkey;${RT_LIBRARY};gradual_deployment;" +) diff --git a/components/gradual_deployment/gradual_deployment_ut/gradual_deployment_ut.cc b/components/gradual_deployment/gradual_deployment_ut/gradual_deployment_ut.cc new file mode 100644 index 0000000..6204257 --- /dev/null +++ b/components/gradual_deployment/gradual_deployment_ut/gradual_deployment_ut.cc @@ -0,0 +1,127 @@ +#include "http_manager.h" + +#include +#include +#include + +#include "cptest.h" +#include "config.h" +#include "singleton.h" +#include "environment.h" +#include "rest_server.h" +#include "table.h" +#include "time_proxy.h" +#include "mainloop.h" +#include "mock/mock_rest_api.h" +#include "i_http_manager.h" +#include "gradual_deployment.h" + +using namespace std; +using namespace testing; + +class GradualDeploymentTest : public Test +{ +public: + GradualDeploymentTest() + { + EXPECT_CALL(rest, mockRestCall(RestAction::SET, "gradual-deployment-policy", _)).WillOnce( + WithArg<2>(Invoke(this, &GradualDeploymentTest::setGDPolicy)) + ); + + gradual_deployment.init(); + i_gradual_deployment = Singleton::Consume::from(gradual_deployment); + } + + bool + setGDPolicy(const unique_ptr &p) + { + gradual_rest_listener = p->getRest(); + return true; + } + + unique_ptr gradual_rest_listener; + I_GradualDeployment *i_gradual_deployment; + +private: + StrictMock rest; + GradualDeployment gradual_deployment; +}; + +TEST_F(GradualDeploymentTest, getPolicyTest) +{ + stringstream is; + is << "{" + << "\"attachment_type\":\"HTTP-Manager\"," + << "\"ip_ranges\":[\"8.8.8.8\",\"9.9.9.9-10.10.10.10\"," + << "\"0:0:0:0:0:0:0:1-0:0:0:0:0:0:0:4\"" + << "]}"; + Maybe rest_call_result = gradual_rest_listener->performRestCall(is); + EXPECT_TRUE(rest_call_result.ok()); + + vector expected = {"8.8.8.8-8.8.8.8", "9.9.9.9-10.10.10.10", "::1-::4"}; + vector curr_policy = i_gradual_deployment->getPolicy(I_GradualDeployment::AttachmentType::NGINX); + EXPECT_EQ(curr_policy, expected); +} + +TEST_F(GradualDeploymentTest, MissingAttachmentType) +{ + stringstream is("{\"ip_ranges\":[\"8.8\"]}"); + Maybe rest_call_result = gradual_rest_listener->performRestCall(is); + EXPECT_FALSE(rest_call_result.ok()); + EXPECT_THAT( + rest_call_result.getErr(), + HasSubstr("Couldn't get variable attachment_type") + ); + + vector expected = {}; + vector curr_policy = i_gradual_deployment->getPolicy(I_GradualDeployment::AttachmentType::NGINX); + EXPECT_EQ(curr_policy, expected); +} + +TEST_F(GradualDeploymentTest, InvalidAttachmentType) +{ + stringstream is; + is << "{" + << "\"attachment_type\":\"unsupported-attachment-type\"," + << "\"ip_ranges\":[\"8.8.8.8\",\"9.9.9.9-10.10.10.10\"," + << "\"0:0:0:0:0:0:0:1-0:0:0:0:0:0:0:4\"" + << "]}"; + Maybe rest_call_result = gradual_rest_listener->performRestCall(is); + EXPECT_FALSE(rest_call_result.ok()); + EXPECT_THAT( + rest_call_result.getErr(), + HasSubstr( + "Failed to determine attachment type. " + "Type: unsupported-attachment-type, error: unknown attachment type" + ) + ); + + vector expected = {}; + vector curr_policy = i_gradual_deployment->getPolicy(I_GradualDeployment::AttachmentType::NGINX); + EXPECT_EQ(curr_policy, expected); +} + +TEST_F(GradualDeploymentTest, InvalidIPRanges) +{ + stringstream is; + is << "{" + << "\"attachment_type\":\"HTTP-Manager\"," + << "\"ip_ranges\":[\"8.8\"]" + << "}"; + + Maybe rest_call_result = gradual_rest_listener->performRestCall(is); + EXPECT_FALSE(rest_call_result.ok()); + EXPECT_THAT( + rest_call_result.getErr(), + HasSubstr( + "Failed to set gradual deployment policy. " + "Error: Failed to parse gradual deployment IP range: " + "Could not create IP address, String '8.8' is not a valid IPv4/IPv6 address" + ) + ); + + vector expected = {}; + vector curr_policy = i_gradual_deployment->getPolicy(I_GradualDeployment::AttachmentType::NGINX); + + EXPECT_EQ(curr_policy, expected); +} diff --git a/components/health_check_manager/CMakeLists.txt b/components/health_check_manager/CMakeLists.txt new file mode 100755 index 0000000..fd4127e --- /dev/null +++ b/components/health_check_manager/CMakeLists.txt @@ -0,0 +1,3 @@ +add_library(health_check_manager health_check_manager.cc) + +add_subdirectory(health_check_manager_ut) diff --git a/components/health_check_manager/health_check_manager.cc b/components/health_check_manager/health_check_manager.cc new file mode 100755 index 0000000..519ea63 --- /dev/null +++ b/components/health_check_manager/health_check_manager.cc @@ -0,0 +1,250 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "health_check_manager.h" + +#include +#include + +#include "health_check_status/health_check_status.h" +#include "i_rest_api.h" +#include "config.h" +#include "cereal/archives/json.hpp" +#include "customized_cereal_map.h" + +using namespace std; + +USE_DEBUG_FLAG(D_HEALTH_CHECK_MANAGER); + +class HealthCheckOnDemand : public ServerRest, Singleton::Consume +{ +public: + void + doCall() override + { + string output_path = getProfileAgentSettingWithDefault( + "/tmp/cpnano_health_check_output.txt", + "agent.healthCheck.outputTmpFilePath" + ); + ofstream health_check_output_file; + health_check_output_file.open(output_path, ofstream::out | ofstream::trunc); + + auto manager = Singleton::Consume::by(); + manager->printRepliesHealthStatus(health_check_output_file); + + health_check_output_file.close(); + } +}; + +class HealthCheckError +{ +public: + HealthCheckError(const string &comp_name, const string &error) + : + code_name(comp_name), + is_internal(true) + { + message.push_back(error); + } + + template + void + serialize(Archive &ar) + { + ar( + cereal::make_nvp("code", code_name), + cereal::make_nvp("message", message), + cereal::make_nvp("internal", is_internal) + ); + } + +private: + string code_name; + bool is_internal; + vector message; +}; + +class HealthCheckValue +{ +public: + HealthCheckValue() = default; + + HealthCheckValue(HealthCheckStatus raw_status, const map &descriptions) + : + status(raw_status) + { + for (const pair &single_stat : descriptions) { + if (single_stat.second.getStatus() == HealthCheckStatus::HEALTHY) { + dbgTrace(D_HEALTH_CHECK_MANAGER) << "Ignoring healthy status reply. Comp name: " << single_stat.first; + continue; + } + + for (const pair &status : single_stat.second.getExtendedStatus()) { + errors.push_back(HealthCheckError(single_stat.first + " " + status.first, status.second)); + } + } + } + + template + void + serialize(Archive &ar) + { + ar( + cereal::make_nvp("status", HealthCheckStatusReply::convertHealthCheckStatusToStr(status)), + cereal::make_nvp("errors", errors) + ); + } + +private: + HealthCheckStatus status = HealthCheckStatus::IGNORED; + vector errors; +}; + +class HealthCheckPatch : public ClientRest +{ +public: + HealthCheckPatch(HealthCheckStatus raw_status, const map &descriptions) + { + health_check = HealthCheckValue(raw_status, descriptions); + } + + C2S_LABEL_PARAM(HealthCheckValue, health_check, "healthCheck"); +}; + +class HealthCheckManager::Impl + : + Singleton::Provide::From +{ +public: + void + init() + { + auto rest = Singleton::Consume::by(); + rest->addRestCall(RestAction::SHOW, "health-check-on-demand"); + + int interval_in_seconds = + getProfileAgentSettingWithDefault(30, "agent.healthCheck.intervalInSeconds"); + + auto i_mainloop = Singleton::Consume::by(); + i_mainloop->addRecurringRoutine( + I_MainLoop::RoutineType::System, + chrono::seconds(interval_in_seconds), + [this]() { executeHealthCheck(); }, + "Health check manager periodic check" + ); + + auto is_orch = Singleton::Consume::by()->get("Is Orchestrator"); + should_patch_report = is_orch.ok() && *is_orch; + } + + HealthCheckStatus + getAggregatedStatus() + { + executeHealthCheck(); + return general_health_aggregated_status; + } + + void + printRepliesHealthStatus(ofstream &oputput_file) + { + getRegisteredComponentsHealthStatus(); + cereal::JSONOutputArchive ar(oputput_file); + ar(cereal::make_nvp("allComponentsHealthCheckReplies", all_comps_health_status)); + } + +private: + bool + sendHealthCheckPatch() + { + dbgFlow(D_HEALTH_CHECK_MANAGER); + + HealthCheckPatch patch_to_send(general_health_aggregated_status, all_comps_health_status); + auto messaging = Singleton::Consume::by(); + return messaging->sendNoReplyObject(patch_to_send, I_Messaging::Method::PATCH, "/agents"); + } + + void + getRegisteredComponentsHealthStatus() + { + vector health_check_event_reply = HealthCheckStatusEvent().query(); + all_comps_health_status.clear(); + for (const auto &reply : health_check_event_reply) { + if (reply.getStatus() != HealthCheckStatus::IGNORED) { + all_comps_health_status.emplace(reply.getCompName(), reply); + } + } + } + + void + calcGeneralHealthAggregatedStatus() + { + general_health_aggregated_status = HealthCheckStatus::HEALTHY; + + for (const pair &reply : all_comps_health_status) { + HealthCheckStatus status = reply.second.getStatus(); + + dbgTrace(D_HEALTH_CHECK_MANAGER) + << "Current aggregated status is: " + << HealthCheckStatusReply::convertHealthCheckStatusToStr( + general_health_aggregated_status + ) + << ". Got health status: " + << HealthCheckStatusReply::convertHealthCheckStatusToStr(status) + << "for component: " + << reply.first; + + switch (status) { + case HealthCheckStatus::UNHEALTHY : { + general_health_aggregated_status = HealthCheckStatus::UNHEALTHY; + return; + } + case HealthCheckStatus::DEGRADED : { + general_health_aggregated_status = HealthCheckStatus::DEGRADED; + break; + } + case HealthCheckStatus::IGNORED : break; + case HealthCheckStatus::HEALTHY : break; + } + } + } + + void + executeHealthCheck() + { + dbgFlow(D_HEALTH_CHECK_MANAGER) << "Collecting health status from all registered components."; + + getRegisteredComponentsHealthStatus(); + calcGeneralHealthAggregatedStatus(); + + dbgTrace(D_HEALTH_CHECK_MANAGER) + << "Aggregated status: " + << HealthCheckStatusReply::convertHealthCheckStatusToStr(general_health_aggregated_status); + + if (!should_patch_report) return; + + if (!sendHealthCheckPatch()) { + dbgWarning(D_HEALTH_CHECK_MANAGER) << "Failed to send periodic health check patch to the fog"; + } else { + dbgDebug(D_HEALTH_CHECK_MANAGER) << "Successfully sent periodic health check patch to the fog"; + }; + } + + HealthCheckStatus general_health_aggregated_status; + map all_comps_health_status; + bool should_patch_report; +}; + +HealthCheckManager::HealthCheckManager() : Component("HealthCheckManager"), pimpl(make_unique()) {} +HealthCheckManager::~HealthCheckManager() {} + +void HealthCheckManager::init() { pimpl->init(); } diff --git a/components/health_check_manager/health_check_manager_ut/CMakeLists.txt b/components/health_check_manager/health_check_manager_ut/CMakeLists.txt new file mode 100755 index 0000000..98cf82a --- /dev/null +++ b/components/health_check_manager/health_check_manager_ut/CMakeLists.txt @@ -0,0 +1,8 @@ +include_directories(${CMAKE_SOURCE_DIR}/components/include) +link_directories(${BOOST_ROOT}/lib) + +add_unit_test( + health_check_manager_ut + "health_check_manager_ut.cc" + "singleton;mainloop;health_check_manager;event_is;metric;-lboost_regex" +) diff --git a/components/health_check_manager/health_check_manager_ut/health_check_manager_ut.cc b/components/health_check_manager/health_check_manager_ut/health_check_manager_ut.cc new file mode 100755 index 0000000..a1fb42e --- /dev/null +++ b/components/health_check_manager/health_check_manager_ut/health_check_manager_ut.cc @@ -0,0 +1,219 @@ +#include "health_check_manager.h" + +#include +#include +#include +#include + +#include "health_check_status/health_check_status.h" +#include "environment.h" +#include "config.h" +#include "config_component.h" +#include "cptest.h" +#include "mock/mock_mainloop.h" +#include "mock/mock_messaging.h" +#include "mock/mock_rest_api.h" + +using namespace std; +using namespace testing; + +USE_DEBUG_FLAG(D_HEALTH_CHECK); + +class TestHealthCheckStatusListener : public Listener +{ +public: + void upon(const HealthCheckStatusEvent &) override {} + + HealthCheckStatusReply + respond(const HealthCheckStatusEvent &) override + { + map extended_status; + extended_status["team"] = team; + extended_status["city"] = city; + HealthCheckStatusReply reply(comp_name, status, extended_status); + return reply; + } + + void setStatus(HealthCheckStatus new_status) { status = new_status; } + + string getListenerName() const { return "TestHealthCheckStatusListener"; } + +private: + static const string comp_name; + HealthCheckStatus status = HealthCheckStatus::HEALTHY; + static const string team; + static const string city; +}; + +const string TestHealthCheckStatusListener::comp_name = "Test"; +const string TestHealthCheckStatusListener::team = "Hapoel"; +const string TestHealthCheckStatusListener::city = "Tel-Aviv"; + +class TestEnd {}; + +class HealthCheckManagerTest : public Test +{ +public: + HealthCheckManagerTest() + { + Debug::setNewDefaultStdout(&debug_output); + Debug::setUnitTestFlag(D_HEALTH_CHECK, Debug::DebugLevel::INFO); + + EXPECT_CALL(mock_ml, addRecurringRoutine(_, _, _, _, _)).WillRepeatedly( + DoAll(SaveArg<2>(&health_check_periodic_routine), Return(1)) + ); + + EXPECT_CALL(mock_rest, mockRestCall(RestAction::ADD, "declare-boolean-variable", _)).WillOnce(Return(true)); + + EXPECT_CALL(mock_rest, mockRestCall(RestAction::SHOW, "health-check-on-demand", _)).WillOnce( + WithArg<2>(Invoke(this, &HealthCheckManagerTest::setHealthCheckOnDemand)) + ); + + env.preload(); + event_listener.registerListener(); + + env.init(); + + ScopedContext ctx; + ctx.registerValue("Is Orchestrator", true); + + health_check_manager.init(); + i_health_check_manager = Singleton::Consume::from(health_check_manager); + } + + ~HealthCheckManagerTest() + { + env.fini(); + Debug::setNewDefaultStdout(&cout); + } + + bool + setHealthCheckOnDemand(const unique_ptr &rest_ptr) + { + health_check_server = rest_ptr->getRest(); + return true; + } + + I_MainLoop::Routine health_check_periodic_routine; + StrictMock mock_ml; + StrictMock mock_rest; + StrictMock mock_message; + stringstream debug_output; + ConfigComponent config; + Config::I_Config *i_config = nullptr; + ::Environment env; + HealthCheckManager health_check_manager; + I_Health_Check_Manager *i_health_check_manager; + unique_ptr health_check_server; + TestHealthCheckStatusListener event_listener; +}; + +TEST_F(HealthCheckManagerTest, runPeriodicHealthCheckTest) +{ + string actual_body; + EXPECT_CALL( + mock_message, + sendMessage( + false, + _, + I_Messaging::Method::PATCH, + "/agents", + "", + _, + _, + MessageTypeTag::GENERIC + ) + ).Times(4).WillRepeatedly(DoAll(SaveArg<1>(&actual_body), Return(string()))); + + try { + health_check_periodic_routine(); + } catch (const TestEnd &t) {} + + HealthCheckStatus aggregated_status = i_health_check_manager->getAggregatedStatus(); + string aggregated_status_str = HealthCheckStatusReply::convertHealthCheckStatusToStr(aggregated_status); + + string expected_healthy_body( + "{\n" + " \"healthCheck\": {\n" + " \"status\": \"Healthy\",\n" + " \"errors\": []\n" + " }\n" + "}" + ); + EXPECT_EQ(actual_body, expected_healthy_body); + EXPECT_EQ("Healthy", aggregated_status_str); + + event_listener.setStatus(HealthCheckStatus::DEGRADED); + try { + health_check_periodic_routine(); + } catch (const TestEnd &t) {} + + aggregated_status = i_health_check_manager->getAggregatedStatus(); + aggregated_status_str = HealthCheckStatusReply::convertHealthCheckStatusToStr(aggregated_status); + + string expected_degraded_body( + "{\n" + " \"healthCheck\": {\n" + " \"status\": \"Degraded\",\n" + " \"errors\": [\n" + " {\n" + " \"code\": \"Test city\",\n" + " \"message\": [\n" + " \"Tel-Aviv\"\n" + " ],\n" + " \"internal\": true\n" + " },\n" + " {\n" + " \"code\": \"Test team\",\n" + " \"message\": [\n" + " \"Hapoel\"\n" + " ],\n" + " \"internal\": true\n" + " }\n" + " ]\n" + " }\n" + "}" + ); + EXPECT_EQ(actual_body, expected_degraded_body); + EXPECT_EQ("Degraded", aggregated_status_str); +} + +TEST_F(HealthCheckManagerTest, runOnDemandHealthCheckTest) +{ + const vector health_check{""}; + CPTestTempfile health_check_tmp_file(health_check); + + string config_json = + "{" + " \"agentSettings\": [\n" + " {\n" + " \"id\": \"yallaHapoel\",\n" + " \"key\": \"agent.healthCheck.outputTmpFilePath\",\n" + " \"value\": \"" + health_check_tmp_file.fname + "\"\n" + " }]\n" + "}"; + + istringstream ss(config_json); + config.preload(); + Singleton::Consume::from(config)->loadConfiguration(ss); + + stringstream is; + is << "{}"; + health_check_server->performRestCall(is); + + string expected_status = + "{\n" + " \"allComponentsHealthCheckReplies\": {\n" + " \"Test\": {\n" + " \"status\": \"Healthy\",\n" + " \"extendedStatus\": {\n" + " \"city\": \"Tel-Aviv\",\n" + " \"team\": \"Hapoel\"\n" + " }\n" + " }\n" + " }\n" + "}"; + + string health_check_res = health_check_tmp_file.readFile(); + EXPECT_EQ(health_check_res, expected_status); +} diff --git a/components/http_manager/CMakeLists.txt b/components/http_manager/CMakeLists.txt new file mode 100644 index 0000000..095fdfa --- /dev/null +++ b/components/http_manager/CMakeLists.txt @@ -0,0 +1 @@ +add_library(http_manager_comp http_manager.cc http_manager_opaque.cc ) diff --git a/components/http_manager/http_manager.cc b/components/http_manager/http_manager.cc new file mode 100755 index 0000000..5c380a2 --- /dev/null +++ b/components/http_manager/http_manager.cc @@ -0,0 +1,379 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "http_manager.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "config.h" +#include "table_opaque.h" +#include "http_manager_opaque.h" +#include "log_generator.h" +#include "sasal.h" +#include "http_inspection_events.h" + +SASAL_START // HTTP Manager + +USE_DEBUG_FLAG(D_HTTP_MANAGER); + +using namespace std; + +static ostream & +operator<<(ostream &os, const EventVerdict &event) +{ + switch (event.getVerdict()) { + case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT: return os << "Inspect"; + case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT: return os << "Accept"; + case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP: return os << "Drop"; + case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT: return os << "Inject"; + case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_IRRELEVANT: return os << "Irrelevant"; + case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_RECONF: return os << "Reconf"; + case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_WAIT: return os << "Wait"; + } + + dbgAssert(false) << "Illegal Event Verdict value: " << static_cast(event.getVerdict()); + return os; +} + +class HttpManager::Impl + : + Singleton::Provide::From +{ +public: + void + init() + { + dbgFlow(D_HTTP_MANAGER); + + i_transaction_table = Singleton::Consume::by(); + + Singleton::Consume::by()->addGeneralModifier(compressAppSecLogs); + } + + FilterVerdict + inspect(const HttpTransactionData &event) override + { + if (!i_transaction_table->createState()) { + dbgWarning(D_HTTP_MANAGER) << "Failed to create new transaction table state - Returning default verdict."; + return FilterVerdict(default_verdict); + } + + ScopedContext ctx; + ctx.registerValue(app_sec_marker_key, i_transaction_table->keyToString(), EnvKeyAttr::LogSection::MARKER); + + return handleEvent(NewHttpTransactionEvent(event).performNamedQuery()); + } + + FilterVerdict + inspect(const HttpHeader &event, bool is_request) override + { + if (!i_transaction_table->hasState()) { + dbgWarning(D_HTTP_MANAGER) << "Transaction state was not found - Returning default verdict."; + return FilterVerdict(default_verdict); + } + + ScopedContext ctx; + ctx.registerValue(app_sec_marker_key, i_transaction_table->keyToString(), EnvKeyAttr::LogSection::MARKER); + + auto event_responds = + is_request ? + HttpRequestHeaderEvent(event).performNamedQuery() : + HttpResponseHeaderEvent(event).performNamedQuery(); + FilterVerdict verdict = handleEvent(event_responds); + if (verdict.getVerdict() == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT) { + applyInjectionModifications(verdict, event_responds, event.getHeaderIndex()); + } + return verdict; + } + + FilterVerdict + inspect(const HttpBody &event, bool is_request) override + { + if (!i_transaction_table->hasState()) { + dbgWarning(D_HTTP_MANAGER) << "Transaction state was not found - Returning default verdict."; + return FilterVerdict(default_verdict); + } + + ngx_http_cp_verdict_e body_size_limit_verdict = handleBodySizeLimit(is_request, event); + if (body_size_limit_verdict != ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT) { + return FilterVerdict(body_size_limit_verdict); + } + + HttpManagerOpaque &state = i_transaction_table->getState(); + + ScopedContext ctx; + ctx.registerValue(app_sec_marker_key, i_transaction_table->keyToString(), EnvKeyAttr::LogSection::MARKER); + + FilterVerdict verdict(ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT); + if (!is_request && event.getData().size() == 0 && !event.isLastChunk()) { + dbgDebug(D_HTTP_MANAGER) << "Skipping inspection of first empty chunk for respond body"; + return verdict; + } + + auto event_responds = + is_request ? + HttpRequestBodyEvent(event, state.getPreviousDataCache()).performNamedQuery() : + HttpResponseBodyEvent(event, state.getPreviousDataCache()).performNamedQuery(); + verdict = handleEvent(event_responds); + state.saveCurrentDataToCache(event.getData()); + if (verdict.getVerdict() == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT) { + applyInjectionModifications(verdict, event_responds, event.getBodyChunkIndex()); + } + return verdict; + } + + FilterVerdict + inspect(const ResponseCode &event) override + { + if (!i_transaction_table->hasState()) { + dbgWarning(D_HTTP_MANAGER) << "Transaction state was not found - Returning default verdict."; + return FilterVerdict(default_verdict); + } + + ScopedContext ctx; + ctx.registerValue(app_sec_marker_key, i_transaction_table->keyToString(), EnvKeyAttr::LogSection::MARKER); + + return handleEvent(ResponseCodeEvent(event).performNamedQuery()); + } + + FilterVerdict + inspectEndRequest() override + { + if (!i_transaction_table->hasState()) { + dbgWarning(D_HTTP_MANAGER) << "Transaction state was not found - Returning default verdict."; + return FilterVerdict(default_verdict); + } + + HttpManagerOpaque &state = i_transaction_table->getState(); + state.resetPayloadSize(); + + ScopedContext ctx; + ctx.registerValue(app_sec_marker_key, i_transaction_table->keyToString(), EnvKeyAttr::LogSection::MARKER); + + return handleEvent(EndRequestEvent().performNamedQuery()); + } + + FilterVerdict + inspectEndTransaction() override + { + if (!i_transaction_table->hasState()) { + dbgWarning(D_HTTP_MANAGER) << "Transaction state was not found - Returning default verdict."; + return FilterVerdict(default_verdict); + } + + HttpManagerOpaque &state = i_transaction_table->getState(); + state.resetPayloadSize(); + + ScopedContext ctx; + ctx.registerValue(app_sec_marker_key, i_transaction_table->keyToString(), EnvKeyAttr::LogSection::MARKER); + + return handleEvent(EndTransactionEvent().performNamedQuery()); + } + + FilterVerdict + inspectDelayedVerdict() override + { + if (!i_transaction_table->hasState()) { + dbgWarning(D_HTTP_MANAGER) << "Transaction state was not found - Returning default verdict."; + return FilterVerdict(default_verdict); + } + + ScopedContext ctx; + ctx.registerValue(app_sec_marker_key, i_transaction_table->keyToString(), EnvKeyAttr::LogSection::MARKER); + + return handleEvent(WaitTransactionEvent().performNamedQuery()); + } + + void + sendPolicyLog() + { + LogGen( + "Web AppSec Policy Loaded Successfully", + ReportIS::Audience::SECURITY, + ReportIS::Severity::LOW, + ReportIS::Priority::LOW, + ReportIS::Tags::THREAT_PREVENTION + ); + } + +private: + ngx_http_cp_verdict_e + handleBodySizeLimit(bool is_request_body_type, const HttpBody &event) + { + HttpManagerOpaque &state = i_transaction_table->getState(); + state.updatePayloadSize(event.getData().size()); + + auto size_limit = getConfiguration( + "HTTP manager", + is_request_body_type ? "Max Request Body Size" : "Max Response Body Size" + ); + + string size_limit_verdict = getConfigurationWithDefault( + "Accept", + "HTTP manager", + is_request_body_type ? "Request Size Limit Verdict" : "Response Size Limit Verdict" + ); + + if (!size_limit.ok() || state.getAggeregatedPayloadSize() < size_limit.unpack()) { + return ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT; + } + + ngx_http_cp_verdict_e verdict = size_limit_verdict == "Drop" ? + ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP : + ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT; + + dbgDebug(D_HTTP_MANAGER) + << "Transaction body size is over the limit. Max body size: " + << size_limit.unpack() + << ", Returned verdict: " + << size_limit_verdict + << "."; + + state.setManagerVerdict(verdict); + return verdict; + } + + static void + applyInjectionModifications( + FilterVerdict &verdict, + const vector> &event_responds, + ModifiedChunkIndex event_idx) + { + for (const pair &respond : event_responds) { + if (respond.second.getVerdict() == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT) { + dbgTrace(D_HTTP_MANAGER) + << "Applying inject verdict modifications for security App: " + << respond.first; + verdict.addModifications(respond.second.getModifications(), event_idx); + } + } + } + + FilterVerdict + handleEvent(const vector> &event_responds) + { + HttpManagerOpaque &state = i_transaction_table->getState(); + + for (const pair &respond : event_responds) { + if (state.getApplicationsVerdict(respond.first) == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT) { + dbgTrace(D_HTTP_MANAGER) + << "Skipping event verdict for app that already accepted traffic. App: " + << respond.first; + continue; + } + + dbgTrace(D_HTTP_MANAGER) + << "Security app " + << respond.first + << " returned verdict " + << respond.second.getVerdict(); + + state.setApplicationVerdict(respond.first, respond.second.getVerdict()); + } + + return state.getCurrVerdict(); + } + + static void + compressAppSecLogs(LogBulkRest &bulk) + { + dbgTrace(D_HTTP_MANAGER) << "Starting to reduce logs"; + + map app_sec_logs_by_key; + + for (const auto &log : bulk) { + auto &markers = log.getMarkers(); + auto appsec_marker = markers.find(app_sec_marker_key); + if (appsec_marker != markers.end()) app_sec_logs_by_key[appsec_marker->second]++; + } + + for (const auto &specific_set_of_logs : app_sec_logs_by_key) { + if (specific_set_of_logs.second > 1) reduceLogs(bulk, specific_set_of_logs.first); + } + + dbgTrace(D_HTTP_MANAGER) << "Finished logs reduction"; + } + + static void + reduceLogs(LogBulkRest &bulk, const string ¤t_id) + { + dbgTrace(D_HTTP_MANAGER) << "Reducing logs for marker " << current_id; + + vector::iterator> relevent_logs; + vector::iterator keep_log = bulk.end(); + for (auto curr_log = bulk.begin(); curr_log != bulk.end(); ++curr_log) { + if (isRelevantLog(curr_log, current_id)) { + relevent_logs.push_back(curr_log); + if (keep_log == bulk.end() || (isPreventLog(curr_log) && !isPreventLog(keep_log))) keep_log = curr_log; + } + } + + dbgTrace(D_HTTP_MANAGER) << "Found " << relevent_logs.size() << " logs that match marker " << current_id; + + // Reverse iteration to avoid iterator invalidation + for (auto iter = relevent_logs.rbegin(); iter != relevent_logs.rend(); ++iter) { + if (*iter != keep_log) bulk.erase(*iter); + } + + dbgTrace(D_HTTP_MANAGER) << "Finished going over maker " << current_id; + } + + static bool + isRelevantLog(const vector::iterator &log, const string ¤t_id) + { + const auto &markers = log->getMarkers(); + auto app_sec_marker = markers.find(app_sec_marker_key); + if (app_sec_marker == markers.end()) return false; + return app_sec_marker->second == current_id; + } + + static bool + isPreventLog(const vector::iterator &log) + { + auto res = log->getStringData("securityAction"); + return res.ok() && *res == "Prevent"; + } + + I_Table *i_transaction_table; + static const ngx_http_cp_verdict_e default_verdict; + static const string app_sec_marker_key; +}; + +const ngx_http_cp_verdict_e HttpManager::Impl::default_verdict(ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP); +const string HttpManager::Impl::app_sec_marker_key = "app_sec_marker"; + +HttpManager::HttpManager() : Component("HttpManager"), pimpl(make_unique()) {} +HttpManager::~HttpManager() {} + +void HttpManager::init() { pimpl->init(); } + +void +HttpManager::preload() +{ + registerExpectedConfiguration("HTTP manager", "Previous Buffer Cache size"); + registerExpectedConfiguration("HTTP manager", "Max Request Body Size"); + registerExpectedConfiguration("HTTP manager", "Max Response Body Size"); + registerExpectedConfiguration("HTTP manager", "Request Size Limit Verdict"); + registerExpectedConfiguration("HTTP manager", "Response Size Limit Verdict"); + registerConfigLoadCb([this] () { pimpl->sendPolicyLog(); }); +} + +SASAL_END diff --git a/components/http_manager/http_manager_opaque.cc b/components/http_manager/http_manager_opaque.cc new file mode 100644 index 0000000..9549b18 --- /dev/null +++ b/components/http_manager/http_manager_opaque.cc @@ -0,0 +1,103 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "http_manager_opaque.h" + +#include "config.h" +#include "sasal.h" + +SASAL_START // HTTP Manager - Transaction data + +using namespace std; + +USE_DEBUG_FLAG(D_HTTP_MANAGER); + +HttpManagerOpaque::HttpManagerOpaque() + : + TableOpaqueSerialize(this), + prev_data_cache() +{ +} + +void +HttpManagerOpaque::setApplicationVerdict(const string &app_name, ngx_http_cp_verdict_e verdict) +{ + applications_verdicts[app_name] = verdict; +} + +ngx_http_cp_verdict_e +HttpManagerOpaque::getApplicationsVerdict(const string &app_name) const +{ + auto verdict = applications_verdicts.find(app_name); + return verdict == applications_verdicts.end() ? ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT : verdict->second; +} + +ngx_http_cp_verdict_e +HttpManagerOpaque::getCurrVerdict() const +{ + if (manager_verdict == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP) { + return manager_verdict; + } + + uint accepted_apps = 0; + ngx_http_cp_verdict_e verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT; + for (const pair &app_verdic_pair : applications_verdicts) { + switch (app_verdic_pair.second) { + case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP: + return app_verdic_pair.second; + case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT: + verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT; + break; + case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT: + accepted_apps++; + break; + case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT: + break; + case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_IRRELEVANT: + dbgTrace(D_HTTP_MANAGER) << "Verdict 'Irrelevant' is not yet supported. Returning Accept"; + accepted_apps++; + break; + case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_WAIT: + verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_WAIT; + break; + default: + dbgAssert(false) + << "Received unknown verdict " + << static_cast(app_verdic_pair.second); + } + } + + return accepted_apps == applications_verdicts.size() ? ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT : verdict; +} + +void +HttpManagerOpaque::saveCurrentDataToCache(const Buffer &full_data) +{ + uint data_cache_size = getConfigurationWithDefault(0, "HTTP manager", "Previous Buffer Cache size"); + if (data_cache_size == 0) { + prev_data_cache.clear(); + return; + } + prev_data_cache = full_data.getSubBuffer( + full_data.size() <= data_cache_size ? 0 : full_data.size() - data_cache_size, + full_data.size() + ); +} + +void +HttpManagerOpaque::updatePayloadSize(const uint curr_payload_size) +{ + aggregated_payload_size += curr_payload_size; +} + +SASAL_END diff --git a/components/http_manager/http_manager_opaque.h b/components/http_manager/http_manager_opaque.h new file mode 100644 index 0000000..5a11f62 --- /dev/null +++ b/components/http_manager/http_manager_opaque.h @@ -0,0 +1,55 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __HTTP_MANAGER_OPAQUE_H__ +#define __HTTP_MANAGER_OPAQUE_H__ + +#include + +#include "buffer.h" +#include "table_opaque.h" +#include "nginx_attachment_common.h" + +class HttpManagerOpaque : public TableOpaqueSerialize +{ +public: + HttpManagerOpaque(); + + void setApplicationVerdict(const std::string &app_name, ngx_http_cp_verdict_e verdict); + ngx_http_cp_verdict_e getApplicationsVerdict(const std::string &app_name) const; + void setManagerVerdict(ngx_http_cp_verdict_e verdict) { manager_verdict = verdict; } + ngx_http_cp_verdict_e getManagerVerdict() const { return manager_verdict; } + ngx_http_cp_verdict_e getCurrVerdict() const; + void saveCurrentDataToCache(const Buffer &full_data); + const Buffer & getPreviousDataCache() const { return prev_data_cache; } + uint getAggeregatedPayloadSize() const { return aggregated_payload_size; } + void updatePayloadSize(const uint curr_payload); + void resetPayloadSize() { aggregated_payload_size = 0; } + +// LCOV_EXCL_START - sync functions, can only be tested once the sync module exists + template void serialize(T &ar, uint) { ar(applications_verdicts, prev_data_cache); } + static std::unique_ptr prototype() { return std::make_unique(); } +// LCOV_EXCL_STOP + + static const std::string name() { return "HttpTransactionData"; } + static uint currVer() { return 0; } + static uint minVer() { return 0; } + +private: + std::unordered_map applications_verdicts; + ngx_http_cp_verdict_e manager_verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT; + Buffer prev_data_cache; + uint aggregated_payload_size = 0; +}; + +#endif // __HTTP_MANAGER_OPAQUE_H__ diff --git a/components/http_transaction_data/CMakeLists.txt b/components/http_transaction_data/CMakeLists.txt new file mode 100755 index 0000000..5c12efc --- /dev/null +++ b/components/http_transaction_data/CMakeLists.txt @@ -0,0 +1,5 @@ +add_definitions(-DUSERSPACE) + +add_library(http_transaction_data http_transaction_data.cc) + +add_subdirectory(http_transaction_data_ut) diff --git a/components/http_transaction_data/http_transaction_data.cc b/components/http_transaction_data/http_transaction_data.cc new file mode 100644 index 0000000..6c73ddc --- /dev/null +++ b/components/http_transaction_data/http_transaction_data.cc @@ -0,0 +1,265 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "http_transaction_data.h" + +#include +#include +#include + +#include "enum_array.h" +#include "sasal.h" +#include "buffer.h" +#include "nginx_attachment_common.h" + +SASAL_START // HTTP Manager - Transaction data + +using namespace std; + +USE_DEBUG_FLAG(D_NGINX_ATTACHMENT); + +enum class ETransactionData { + HTTP_PROTO, + METHOD, + HOST_NAME, + LISTENING_IP, + LISTENING_PORT, + URI, + CLIENT_IP, + CLIENT_PORT, + + COUNT +}; + +const string HttpTransactionData::http_proto_ctx = "transaction_http_proto"; +const string HttpTransactionData::method_ctx = "transaction_method"; +const string HttpTransactionData::host_name_ctx = "transaction_host_name"; +const string HttpTransactionData::listening_ip_ctx = "transaction_listening_ip"; +const string HttpTransactionData::listening_port_ctx = "transaction_listening_port"; +const string HttpTransactionData::uri_ctx = "transaction_uri"; +const string HttpTransactionData::uri_path_decoded = "transaction_uri_path_decoded"; +const string HttpTransactionData::uri_query_decoded = "transaction_uri_query_decoded"; +const string HttpTransactionData::client_ip_ctx = "transaction_client_ip"; +const string HttpTransactionData::client_port_ctx = "transaction_client_port"; +const string HttpTransactionData::req_headers = "transaction_request_headers"; +const string HttpTransactionData::req_body = "transaction_request_body"; +const string HttpTransactionData::source_identifier = "sourceIdentifiers"; +const string HttpTransactionData::proxy_ip_ctx = "proxy_ip"; + +const CompressionType HttpTransactionData::default_response_content_encoding = CompressionType::NO_COMPRESSION; + +Maybe +deserializeUintParam(const Buffer &data, uint &cur_pos) +{ + // Int value is encoded in binary form + auto value = data.getTypePtr(cur_pos); + + if (!value.ok()) { + return genError("Failed to get Uint param " + value.getErr()); + } + + cur_pos += sizeof(uint16_t); + dbgTrace(D_NGINX_ATTACHMENT) << "Successfully parsed the number parameter. Value: " << *(value.unpack()); + + return *(value.unpack()); +} + +Maybe +deserializeStrParam(const Buffer &data, uint &cur_pos) +{ + //String is encoded by 16-bit uint representing length followed by const c-type string data bytes + Maybe str_size = deserializeUintParam(data, cur_pos); + if (!str_size.ok()) return genError("Could Not parse string size value: " + str_size.getErr()); + + dbgTrace(D_NGINX_ATTACHMENT) + << "Deserializing string parameter. Current position: " + << cur_pos + << ", String size: " + << *str_size; + + string res = ""; + if (*str_size > 0) { + auto value = data.getPtr(cur_pos, *str_size); + if (!value.ok()) { + return genError("Failed to get String param " + value.getErr()); + } + + const u_char *ptr = value.unpack(); + res = string(reinterpret_cast(ptr), *str_size); + } + dbgTrace(D_NGINX_ATTACHMENT) + << "Successfully parsed string parameter. Result: " + << res + << ", Length: " + << to_string(*str_size); + + cur_pos += *str_size; + + return move(res); +} + +Maybe +deserializeIpAddrParam(const Buffer &data, uint &cur_pos) +{ + Maybe str_value = deserializeStrParam(data, cur_pos); + if (!str_value.ok()) return str_value.passErr(); + + Maybe ip = IPAddr::createIPAddr(str_value.unpackMove()); + if (!ip.ok()) return genError("Could not parse IP Address: " + ip.getErr()); + + return move(ip.unpackMove()); +} + +Maybe +HttpTransactionData::createTransactionData(const Buffer &transaction_raw_data) +{ + // Deserialize TransactionData from binary blob sent from attachment + uint cur_pos = 0; + + dbgTrace(D_NGINX_ATTACHMENT) + << "Parsing buffer " + << dumpHex(transaction_raw_data) + << " of size " + << transaction_raw_data.size(); + + Maybe http_protocol = deserializeStrParam(transaction_raw_data, cur_pos); + if (!http_protocol.ok()) { + return genError("Could not deserialize HTTP protocol: " + http_protocol.getErr()); + } else { + dbgTrace(D_NGINX_ATTACHMENT) << "Successfully deserialized HTTP protocol: " << http_protocol.unpack(); + } + + Maybe http_method = deserializeStrParam(transaction_raw_data, cur_pos); + if (!http_method.ok()) { + return genError("Could not deserialize HTTP method: " + http_method.getErr()); + } else { + dbgTrace(D_NGINX_ATTACHMENT) << "Successfully deserialized HTTP method: " << http_method.unpack(); + } + + Maybe host_name = deserializeStrParam(transaction_raw_data, cur_pos); + if (!host_name.ok()) { + return genError("Could not deserialize host name: " + host_name.getErr()); + } else { + dbgTrace(D_NGINX_ATTACHMENT) << "Successfully deserialized host name: " << host_name.unpack(); + } + + Maybe listening_addr = deserializeIpAddrParam(transaction_raw_data, cur_pos); + if (!listening_addr.ok()) { + return genError("Could not deserialize listening address: " + listening_addr.getErr()); + } else { + dbgTrace(D_NGINX_ATTACHMENT) << "Successfully deserialized listening address: " << listening_addr.unpack(); + } + + Maybe listening_port = deserializeUintParam(transaction_raw_data, cur_pos); + if (!listening_port.ok()) { + return genError("Could not deserialize listening port: " + listening_port.getErr()); + } else { + dbgTrace(D_NGINX_ATTACHMENT) << "Successfully deserialized listening port: " << listening_port.unpack(); + } + + Maybe uri = deserializeStrParam(transaction_raw_data, cur_pos); + if (!uri.ok()) { + return genError("Could not deserialize URI: " + uri.getErr()); + } else { + dbgTrace(D_NGINX_ATTACHMENT) << "Successfully deserialized URI: " << uri.unpack(); + } + + Maybe client_addr = deserializeIpAddrParam(transaction_raw_data, cur_pos); + if (!client_addr.ok()) { + return genError("Could not deserialize client address: " + client_addr.getErr()); + } else { + dbgTrace(D_NGINX_ATTACHMENT) << "Successfully deserialized client address: " << client_addr.unpack(); + } + + Maybe client_port = deserializeUintParam(transaction_raw_data, cur_pos); + if (!client_port.ok()) { + return genError("Could not deserialize client port: " + client_port.getErr()); + } else { + dbgTrace(D_NGINX_ATTACHMENT) << "Successfully deserialized client port: " << client_port.unpack(); + } + + // Fail if after parsing exact number of items, we didn't exactly consume whole buffer + if (cur_pos != transaction_raw_data.size()) { + dbgWarning(D_NGINX_ATTACHMENT) << "Nothing to deserialize, but raw data still remain"; + return genError("Finished deserialization and raw data still exist - Probably corrupted buffer."); + } + + HttpTransactionData transaction( + http_protocol.unpackMove(), + http_method.unpackMove(), + host_name.unpackMove(), + listening_addr.unpackMove(), + listening_port.unpackMove(), + uri.unpackMove(), + client_addr.unpackMove(), + client_port.unpackMove() + ); + + return move(transaction); +} + +HttpTransactionData::HttpTransactionData ( + string _http_proto, + string _method, + string _host_name, + IPAddr _listening_ip, + uint16_t _listening_port, + string _uri, + IPAddr _client_ip, + uint16_t _client_port +) + : + http_proto(move(_http_proto)), + method(move(_method)), + host_name(move(_host_name)), + listening_ip(move(_listening_ip)), + listening_port(move(_listening_port)), + uri(move(_uri)), + client_ip(move(_client_ip)), + client_port(move(_client_port)), + is_request(true), + response_content_encoding(default_response_content_encoding) +{ +} + +HttpTransactionData::HttpTransactionData() + : + HttpTransactionData::HttpTransactionData( + "", + "GET", + "", + IPAddr(), + -1, + "", + IPAddr(), + -1 + ) +{} + +void +HttpTransactionData::print(ostream &out_stream) const +{ + out_stream << http_proto << " " << method << endl; + out_stream << "From: " << client_ip << ":" << client_port << endl; + out_stream << "To: " + << host_name + << uri + << " (listening on " + << listening_ip + << ":" + << listening_port + << ")" + << endl; +} + +SASAL_END diff --git a/components/http_transaction_data/http_transaction_data_ut/CMakeLists.txt b/components/http_transaction_data/http_transaction_data_ut/CMakeLists.txt new file mode 100755 index 0000000..a3b8f9e --- /dev/null +++ b/components/http_transaction_data/http_transaction_data_ut/CMakeLists.txt @@ -0,0 +1,7 @@ +include_directories(${CMAKE_SOURCE_DIR}/components/include) + +add_unit_test( + http_transaction_data_ut + "http_transaction_data_ut.cc" + "http_transaction_data;http_transaction_data;connkey;${RT_LIBRARY}" +) diff --git a/components/http_transaction_data/http_transaction_data_ut/http_transaction_data_ut.cc b/components/http_transaction_data/http_transaction_data_ut/http_transaction_data_ut.cc new file mode 100644 index 0000000..f090b73 --- /dev/null +++ b/components/http_transaction_data/http_transaction_data_ut/http_transaction_data_ut.cc @@ -0,0 +1,127 @@ +#include "http_transaction_data.h" + +#include +#include + +#include "cptest.h" + +using namespace std; +using namespace testing; + +Buffer +encodeInt16(uint16_t val) +{ + vector raw_data(reinterpret_cast(&val), reinterpret_cast(&val) + sizeof(uint16_t)); + return move(Buffer(raw_data)); +} + +class HttpTransactionTest : public Test +{ +public: + Buffer + createValidBuf() + { + Buffer protocol_length = Buffer(encodeInt16(strlen("HTTP/1.1"))); + + return + protocol_length + + Buffer("HTTP/1.1") + + encodeInt16(3) + + Buffer("GET") + + encodeInt16(9) + + Buffer("localhost") + + encodeInt16(7) + + Buffer("0.0.0.0") + + encodeInt16(443) + + encodeInt16(10) + + Buffer("/user-app/") + + encodeInt16(9) + + Buffer("127.0.0.1") + + encodeInt16(47423); + } + + Buffer + createBadVerBuf() + { + Buffer protocol_length = Buffer(encodeInt16(strlen("HTTP/1.1"))); + + return + protocol_length + + Buffer("HTTP/1"); + } + + Buffer + createBadAddressBuf() + { + Buffer protocol_length = Buffer(encodeInt16(strlen("HTTP/1.1"))); + + return + protocol_length + + Buffer("HTTP/1.1") + + encodeInt16(3) + + Buffer("GET") + + encodeInt16(9) + + Buffer("localhost") + + encodeInt16(14) + + Buffer("this.is.not.IP") + + encodeInt16(443) + + encodeInt16(10) + + Buffer("/user-app/") + + encodeInt16(9) + + Buffer("127.0.0.1") + + encodeInt16(47423); + } +}; + +TEST_F(HttpTransactionTest, TestEmptyTransactionData) +{ + HttpTransactionData data; + stringstream data_stream; + data.print(data_stream); + string data_string( + " GET\nFrom: Uninitialized IP address:65535\nTo: (listening on Uninitialized IP address:65535)\n" + ); + EXPECT_EQ(data_stream.str(), data_string); +} + +TEST_F(HttpTransactionTest, TestTransactionDataFromBuf) +{ + HttpTransactionData data = HttpTransactionData::createTransactionData(createValidBuf()).unpack(); + stringstream data_stream; + data.print(data_stream); + string data_string( + "HTTP/1.1 GET\nFrom: 127.0.0.1:47423\nTo: localhost/user-app/ (listening on 0.0.0.0:443)\n" + ); + EXPECT_EQ(data_stream.str(), data_string); + + EXPECT_EQ(data.getSourceIP(), IPAddr::createIPAddr("127.0.0.1").unpack()); + EXPECT_EQ(data.getSourcePort(), 47423); + EXPECT_EQ(data.getListeningIP(), IPAddr::createIPAddr("0.0.0.0").unpack()); + EXPECT_EQ(data.getListeningPort(), 443); + EXPECT_EQ(data.getDestinationHost(), "localhost"); + EXPECT_EQ(data.getHttpProtocol(), "HTTP/1.1"); + EXPECT_EQ(data.getURI(), "/user-app/"); + EXPECT_EQ(data.getHttpMethod(), "GET"); +} + +TEST_F(HttpTransactionTest, TestTransactionDataBadVer) +{ + auto data = HttpTransactionData::createTransactionData(createBadVerBuf()); + ASSERT_FALSE(data.ok()); + EXPECT_EQ( + data.getErr(), + "Could not deserialize HTTP protocol: " + "Failed to get String param Cannot get internal pointer beyond the buffer limits" + ); +} + +TEST_F(HttpTransactionTest, TestTransactionDataBadAddress) +{ + auto data = HttpTransactionData::createTransactionData(createBadAddressBuf()); + ASSERT_FALSE(data.ok()); + EXPECT_EQ( + data.getErr(), + "Could not deserialize listening address: " + "Could not parse IP Address: String 'this.is.not.IP' is not a valid IPv4/IPv6 address" + ); +} diff --git a/components/include/WaapEnums.h b/components/include/WaapEnums.h new file mode 100755 index 0000000..7319024 --- /dev/null +++ b/components/include/WaapEnums.h @@ -0,0 +1,71 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __WAAP_ENUMS_H__ +#define __WAAP_ENUMS_H__ + +#include +#include + +#define NO_THREAT_FINAL_SCORE 0.0 +#define INFO_THREAT_THRESHOLD 1.0 +#define LOW_THREAT_THRESHOLD 3.0 +#define MED_THREAT_THRESHOLD 6.0 +#define MAX_FINAL_SCORE 10.0 +#define ATTACK_IN_PARAM "attack_in_param" + +enum ThreatLevel { + NO_THREAT = 0, + THREAT_INFO, + LOW_THREAT, + MEDIUM_THREAT, + HIGH_THREAT +}; + +enum BlockType { + NOT_BLOCKING, + FORCE_EXCEPTION, + FORCE_BLOCK, + API_BLOCK, + BOT_BLOCK, + WAF_BLOCK, + CSRF_BLOCK, + LIMIT_BLOCK +}; + +enum ParamType { + UNKNOWN_PARAM_TYPE, + HTML_PARAM_TYPE, + URL_PARAM_TYPE, + FREE_TEXT_PARAM_TYPE, + PIPE_PARAM_TYPE, + LONG_RANDOM_TEXT_PARAM_TYPE, + BASE64_PARAM_TYPE, + ADMINISTRATOR_CONFIG_PARAM_TYPE, + FILE_PATH_PARAM_TYPE, + SEMICOLON_DELIMITED_PARAM_TYPE, + ASTERISK_DELIMITED_PARAM_TYPE, + COMMA_DELIMITED_PARAM_TYPE, + AMPERSAND_DELIMITED_PARAM_TYPE, + BINARY_PARAM_TYPE, + PARAM_TYPE_COUNT +}; + +namespace std { + template<> + struct hash + { + std::size_t operator()(const ParamType& type) const noexcept { return (size_t)type; } + }; +} +#endif diff --git a/components/include/attachment_registrator.h b/components/include/attachment_registrator.h new file mode 100755 index 0000000..67e97d1 --- /dev/null +++ b/components/include/attachment_registrator.h @@ -0,0 +1,47 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __ATTACHMENT_REGISTRATOR_H__ +#define __ATTACHMENT_REGISTRATOR_H__ + +#include "singleton.h" +#include "i_mainloop.h" +#include "i_shell_cmd.h" +#include "i_socket_is.h" +#include "attachment_types.h" +#include "component.h" + +#define default_keep_alive_path "/etc/cp/attachmentRegistrator/expiration-socket" + +class AttachmentRegistrator + : + public Component, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume +{ +public: + AttachmentRegistrator(); + ~AttachmentRegistrator(); + + void preload(); + + void init(); + void fini(); + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __ATTACHMENT_REGISTRATOR_H__ diff --git a/components/include/byteorder.h b/components/include/byteorder.h new file mode 100755 index 0000000..203ba1c --- /dev/null +++ b/components/include/byteorder.h @@ -0,0 +1,48 @@ +#ifndef __BYTEORDER_H__ +#define __BYTEORDER_H__ + +// Byte Order (Net-to-Host, Host-to-Net) operations +// +// C provides htons, ntohs, htonl, ntohl, but they're not "constexpr" so are unusable in case labels. +// C++ proposal N3620 adds some function, but it's not accepted (yet?). It uses templates which are, +// IMO, a bit complicated so I chose not to adapt it. + +static inline constexpr uint16_t +constHTONS(uint16_t h_ord) +{ +#if __BYTE_ORDER == __LITTLE_ENDIAN + return ((h_ord>>8) & 0xff) | + ((h_ord&0xff) << 8); +#elif __BYTE_ORDER == __BIG_ENDIAN + return h_ord; +#else +#error unknown byte order +#endif // __BYTE_ORDER +} + +static inline constexpr uint16_t +constNTOHS(uint16_t n_ord) +{ + return constHTONS(n_ord); // Same thing +} + +static inline constexpr uint32_t +constHTONL(uint32_t h_ord) +{ +#if __BYTE_ORDER == __LITTLE_ENDIAN + return ((constHTONS(h_ord>>16)) & 0xffff) | + ((constHTONS(h_ord&0xffff)) << 16); +#elif __BYTE_ORDER == __BIG_ENDIAN + return h_ord; +#else +#error unknown byte order +#endif // __BYTE_ORDER +} + +static inline constexpr uint32_t +constNTOHL(uint32_t n_ord) +{ + return constHTONL(n_ord); // Same thing +} + +#endif // __BYTEORDER_H__ diff --git a/components/include/details_resolver.h b/components/include/details_resolver.h new file mode 100644 index 0000000..55e3b1a --- /dev/null +++ b/components/include/details_resolver.h @@ -0,0 +1,43 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __DETAILS_RESOLVER_H__ +#define __DETAILS_RESOLVER_H__ + +#include +#include + +#include "i_orchestration_tools.h" +#include "i_details_resolver.h" +#include "i_shell_cmd.h" +#include "singleton.h" +#include "component.h" + +class DetailsResolver + : + public Component, + Singleton::Provide, + Singleton::Consume +{ +public: + DetailsResolver(); + ~DetailsResolver(); + + void preload() override; + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __DETAILS_RESOLVER_H__ diff --git a/components/include/downloader.h b/components/include/downloader.h new file mode 100755 index 0000000..2217772 --- /dev/null +++ b/components/include/downloader.h @@ -0,0 +1,50 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __DOWNLOADER_H__ +#define __DOWNLOADER_H__ + +#include "i_downloader.h" +#include "i_orchestration_tools.h" +#include "i_update_communication.h" +#include "i_encryptor.h" +#include "url_parser.h" +#include "i_agent_details.h" +#include "i_mainloop.h" +#include "singleton.h" +#include "component.h" + +class Downloader + : + public Component, + Singleton::Provide, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume +{ +public: + Downloader(); + ~Downloader(); + + void preload() override; + + void init() override; + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __DOWNLOADER_H__ diff --git a/components/include/external_sdk_server.h b/components/include/external_sdk_server.h new file mode 100755 index 0000000..cd0c593 --- /dev/null +++ b/components/include/external_sdk_server.h @@ -0,0 +1,43 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __EXTERNAL_SDK_SERVER_H__ +#define __EXTERNAL_SDK_SERVER_H__ + +#include + +#include "i_external_sdk_server.h" +#include "i_rest_api.h" +#include "component.h" + +class ExternalSdkServer + : + public Component, + Singleton::Provide, + Singleton::Consume +{ +public: + ExternalSdkServer(); + ~ExternalSdkServer(); + + void init(); + void fini(); + + void preload(); + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __EXTERNAL_SDK_SERVER_H__ diff --git a/components/include/generic_rulebase/asset.h b/components/include/generic_rulebase/asset.h new file mode 100755 index 0000000..3f88743 --- /dev/null +++ b/components/include/generic_rulebase/asset.h @@ -0,0 +1,31 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __ASSET_H__ +#define __ASSET_H__ + +#include + +#include "i_environment.h" + +class Asset +{ +public: + const std::map & getAttrs() const { return attr; } + void setAttr(Context::MetaDataType type, const std::string &attr_val) { attr[type] = attr_val; } + +private: + std::map attr; +}; + +#endif // __ASSET_H__ diff --git a/components/include/generic_rulebase/assets_config.h b/components/include/generic_rulebase/assets_config.h new file mode 100755 index 0000000..0da1925 --- /dev/null +++ b/components/include/generic_rulebase/assets_config.h @@ -0,0 +1,91 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __ASSETS_CONFIG_H__ +#define __ASSETS_CONFIG_H__ + +#include +#include +#include + +#include "generic_rulebase_context.h" +#include "c_common/ip_common.h" +#include "connkey.h" + +class RuleAsset +{ +public: + class AssetUrl + { + public: + void + load(cereal::JSONInputArchive &archive_in); + const std::string & getProtocol() const { return protocol; } + const std::string & getIp() const { return ip; } + const std::string & getPort() const { return port; } + + uint8_t getParsedProtocol() const { return parsed_proto; } + const IpAddress & getParsedIp() const { return parsed_ip; } + uint16_t getParsedPort() const { return parsed_port; } + bool isAnyIp() const { return is_any_ip; } + bool isAnyPort() const { return is_any_port; } + bool isAnyProto() const { return is_any_proto; } + + private: + static IpAddress ConvertToIpAddress(const IPAddr &addr); + + std::string protocol; + std::string ip; + std::string port; + IpAddress parsed_ip; + uint16_t parsed_port; + uint8_t parsed_proto; + bool is_any_ip; + bool is_any_port; + bool is_any_proto; + }; + + void load(cereal::JSONInputArchive &archive_in); + + const GenericConfigId & getId() const { return asset_id; } + const std::string & getName() const { return asset_name; } + const std::vector & getUrls() const { return asset_urls; } + +private: + GenericConfigId asset_id; + std::string asset_name; + std::vector asset_urls; +}; + +class Assets +{ +public: + static void preload(); + + void load(cereal::JSONInputArchive &archive_in) + { + try { + cereal::load(archive_in, assets); + }catch (const cereal::Exception &) { + } + } + + static const Assets empty_assets_config; + + const std::vector & getAssets() const { return assets; } + +private: + std::vector assets; +}; + +#endif //__ASSETS_CONFIG_H__ diff --git a/components/include/generic_rulebase/evaluators/asset_eval.h b/components/include/generic_rulebase/evaluators/asset_eval.h new file mode 100755 index 0000000..4d12ba9 --- /dev/null +++ b/components/include/generic_rulebase/evaluators/asset_eval.h @@ -0,0 +1,36 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __ASSET_EVAL_H__ +#define __ASSET_EVAL_H__ + +#include "environment/evaluator_templates.h" +#include "i_environment.h" +#include "singleton.h" + +class AssetMatcher : public EnvironmentEvaluator, Singleton::Consume +{ +public: + AssetMatcher(const std::vector ¶ms); + + static std::string getName() { return "assetId"; } + + Maybe evalVariable() const override; + + static std::string ctx_key; + +private: + std::string asset_id; +}; + +#endif // __ASSET_EVAL_H__ diff --git a/components/include/generic_rulebase/evaluators/connection_eval.h b/components/include/generic_rulebase/evaluators/connection_eval.h new file mode 100755 index 0000000..9176a88 --- /dev/null +++ b/components/include/generic_rulebase/evaluators/connection_eval.h @@ -0,0 +1,127 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __CONNECTION_EVAL_H__ +#define __CONNECTION_EVAL_H__ + +#include "environment/evaluator_templates.h" +#include "i_environment.h" +#include "singleton.h" +#include "connkey.h" + +class IpAddressMatcher : public EnvironmentEvaluator, Singleton::Consume +{ +public: + IpAddressMatcher(const std::vector ¶ms); + + static std::string getName() { return "ipAddress"; } + + Maybe evalVariable() const override; + + static std::string ctx_key; + +private: + std::vector> values; +}; + +class SourceIpMatcher : public EnvironmentEvaluator, Singleton::Consume +{ +public: + SourceIpMatcher(const std::vector ¶ms); + + static std::string getName() { return "sourceIP"; } + + Maybe evalVariable() const override; + + static std::string ctx_key; + +private: + std::vector> values; +}; + +class DestinationIpMatcher : public EnvironmentEvaluator, Singleton::Consume +{ +public: + DestinationIpMatcher(const std::vector ¶ms); + + static std::string getName() { return "destinationIP"; } + + Maybe evalVariable() const override; + + static std::string ctx_key; + +private: + std::vector> values; +}; + +class SourcePortMatcher : public EnvironmentEvaluator, Singleton::Consume +{ +public: + SourcePortMatcher(const std::vector ¶ms); + + static std::string getName() { return "sourcePort"; } + + Maybe evalVariable() const override; + + static std::string ctx_key; + +private: + std::vector> values; +}; + +class ListeningPortMatcher : public EnvironmentEvaluator, Singleton::Consume +{ +public: + ListeningPortMatcher(const std::vector ¶ms); + + static std::string getName() { return "listeningPort"; } + + Maybe evalVariable() const override; + + static std::string ctx_key; + +private: + std::vector> values; +}; + +class IpProtocolMatcher : public EnvironmentEvaluator, Singleton::Consume +{ +public: + IpProtocolMatcher(const std::vector ¶ms); + + static std::string getName() { return "ipProtocol"; } + + Maybe evalVariable() const override; + + static std::string ctx_key; + +private: + std::vector> values; +}; + +class UrlMatcher : public EnvironmentEvaluator, Singleton::Consume +{ +public: + UrlMatcher(const std::vector ¶ms); + + static std::string getName() { return "url"; } + + Maybe evalVariable() const override; + + static std::string ctx_key; + +private: + std::vector values; +}; + +#endif // __CONNECTION_EVAL_H__ diff --git a/components/include/generic_rulebase/evaluators/http_transaction_data_eval.h b/components/include/generic_rulebase/evaluators/http_transaction_data_eval.h new file mode 100755 index 0000000..8603bc5 --- /dev/null +++ b/components/include/generic_rulebase/evaluators/http_transaction_data_eval.h @@ -0,0 +1,74 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __HTTP_TRANSACTION_DATA_EVAL_H__ +#define __HTTP_TRANSACTION_DATA_EVAL_H__ + +#include "environment/evaluator_templates.h" +#include "i_environment.h" +#include "singleton.h" +#include "connkey.h" + +class EqualHost : public EnvironmentEvaluator, Singleton::Consume +{ +public: + EqualHost(const std::vector ¶ms); + + static std::string getName() { return "EqualHost"; } + + Maybe evalVariable() const override; + +private: + std::string host; +}; + +class EqualListeningIP : public EnvironmentEvaluator, Singleton::Consume +{ +public: + EqualListeningIP(const std::vector ¶ms); + + static std::string getName() { return "EqualListeningIP"; } + + Maybe evalVariable() const override; + +private: + IPAddr listening_ip; +}; + +class EqualListeningPort : public EnvironmentEvaluator, Singleton::Consume +{ +public: + EqualListeningPort(const std::vector ¶ms); + + static std::string getName() { return "EqualListeningPort"; } + + Maybe evalVariable() const override; + +private: + PortNumber listening_port; +}; + +class BeginWithUri : public EnvironmentEvaluator, Singleton::Consume +{ +public: + BeginWithUri(const std::vector ¶ms); + + static std::string getName() { return "BeginWithUri"; } + + Maybe evalVariable() const override; + +private: + std::string uri_prefix; +}; + +#endif // __HTTP_TRANSACTION_DATA_EVAL_H__ diff --git a/components/include/generic_rulebase/evaluators/parameter_eval.h b/components/include/generic_rulebase/evaluators/parameter_eval.h new file mode 100755 index 0000000..7ce3563 --- /dev/null +++ b/components/include/generic_rulebase/evaluators/parameter_eval.h @@ -0,0 +1,36 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __PARAMETER_EVAL_H__ +#define __PARAMETER_EVAL_H__ + +#include "environment/evaluator_templates.h" +#include "i_environment.h" +#include "singleton.h" + +class ParameterMatcher : public EnvironmentEvaluator, Singleton::Consume +{ +public: + ParameterMatcher(const std::vector ¶ms); + + static std::string getName() { return "parameterId"; } + + Maybe evalVariable() const override; + + static std::string ctx_key; + +private: + std::string parameter_id; +}; + +#endif // __PARAMETER_EVAL_H__ diff --git a/components/include/generic_rulebase/evaluators/practice_eval.h b/components/include/generic_rulebase/evaluators/practice_eval.h new file mode 100755 index 0000000..b04d339 --- /dev/null +++ b/components/include/generic_rulebase/evaluators/practice_eval.h @@ -0,0 +1,36 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __PRACTICE_EVAL_H__ +#define __PRACTICE_EVAL_H__ + +#include "environment/evaluator_templates.h" +#include "i_environment.h" +#include "singleton.h" + +class PracticeMatcher : public EnvironmentEvaluator, Singleton::Consume +{ +public: + PracticeMatcher(const std::vector ¶ms); + + static std::string getName() { return "practiceId"; } + + Maybe evalVariable() const override; + + static std::string ctx_key; + +private: + std::string practice_id; +}; + +#endif // __PRACTICE_EVAL_H__ diff --git a/components/include/generic_rulebase/evaluators/query_eval.h b/components/include/generic_rulebase/evaluators/query_eval.h new file mode 100755 index 0000000..84ae068 --- /dev/null +++ b/components/include/generic_rulebase/evaluators/query_eval.h @@ -0,0 +1,43 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __QUERY_EVAL_H__ +#define __QUERY_EVAL_H__ + +#include "environment/evaluator_templates.h" +#include "i_environment.h" +#include "i_generic_rulebase.h" +#include "singleton.h" + +class QueryMatcher + : + public EnvironmentEvaluator, + Singleton::Consume, + Singleton::Consume +{ +public: + QueryMatcher(const std::vector &query_params); + + static std::string getName() { return "matchQuery"; } + + Maybe evalVariable() const override; + +private: + static const std::string contextKeyToString(Context::MetaDataType type); + + std::string key; + std::unordered_set values; + bool is_any = false; +}; + +#endif // __QUERY_EVAL_H__ diff --git a/components/include/generic_rulebase/evaluators/trigger_eval.h b/components/include/generic_rulebase/evaluators/trigger_eval.h new file mode 100755 index 0000000..8c985fe --- /dev/null +++ b/components/include/generic_rulebase/evaluators/trigger_eval.h @@ -0,0 +1,36 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __TRIGGER_EVAL_H__ +#define __TRIGGER_EVAL_H__ + +#include "environment/evaluator_templates.h" +#include "i_environment.h" +#include "singleton.h" + +class TriggerMatcher : public EnvironmentEvaluator, Singleton::Consume +{ +public: + TriggerMatcher(const std::vector ¶ms); + + static std::string getName() { return "triggerId"; } + + Maybe evalVariable() const override; + + static std::string ctx_key; + +private: + std::string trigger_id; +}; + +#endif // __TRIGGER_EVAL_H__ diff --git a/components/include/generic_rulebase/evaluators/zone_eval.h b/components/include/generic_rulebase/evaluators/zone_eval.h new file mode 100755 index 0000000..21022d4 --- /dev/null +++ b/components/include/generic_rulebase/evaluators/zone_eval.h @@ -0,0 +1,36 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __ZONE_EVAL_H__ +#define __ZONE_EVAL_H__ + +#include "environment/evaluator_templates.h" +#include "i_environment.h" +#include "singleton.h" + +class ZoneMatcher : public EnvironmentEvaluator, Singleton::Consume +{ +public: + ZoneMatcher(const std::vector &zones); + + static std::string getName() { return "zoneId"; } + + Maybe evalVariable() const override; + + static std::string ctx_key; + +private: + std::string zone_id; +}; + +#endif // __ZONE_EVAL_H__ diff --git a/components/include/generic_rulebase/generic_rulebase.h b/components/include/generic_rulebase/generic_rulebase.h new file mode 100755 index 0000000..1109eab --- /dev/null +++ b/components/include/generic_rulebase/generic_rulebase.h @@ -0,0 +1,44 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __GENERIC_RULEBASE_H__ +#define __GENERIC_RULEBASE_H__ + +#include + +#include "i_generic_rulebase.h" +#include "i_intelligence_is_v2.h" +#include "singleton.h" +#include "component.h" + +class GenericRulebase + : + public Component, + Singleton::Provide, + Singleton::Consume +{ +public: + GenericRulebase(); + ~GenericRulebase(); + + void preload(); + + void init(); + void fini(); + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __GENERIC_RULEBASE_H__ diff --git a/components/include/generic_rulebase/generic_rulebase_context.h b/components/include/generic_rulebase/generic_rulebase_context.h new file mode 100755 index 0000000..340f7d8 --- /dev/null +++ b/components/include/generic_rulebase/generic_rulebase_context.h @@ -0,0 +1,39 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __GENERIC_RULEBASE_CONTEXT_H__ +#define __GENERIC_RULEBASE_CONTEXT_H__ + +#include "rulebase_config.h" +#include "context.h" +#include "config.h" + +enum class RuleRegistrationState {REGISTERED, UNREGISTERED, UNINITIALIZED}; + +class GenericRulebaseContext +{ +public: + GenericRulebaseContext() : ctx(), registration_state(RuleRegistrationState::UNINITIALIZED) {} + + void activate(const BasicRuleConfig &rule); + + void activate(); + + void deactivate() { if (registration_state == RuleRegistrationState::REGISTERED) ctx.deactivate(); } + +private: + Context ctx; + RuleRegistrationState registration_state; +}; + +#endif //__GENERIC_RULEBASE_CONTEXT_H__ diff --git a/components/include/generic_rulebase/generic_rulebase_utils.h b/components/include/generic_rulebase/generic_rulebase_utils.h new file mode 100755 index 0000000..3aa83c1 --- /dev/null +++ b/components/include/generic_rulebase/generic_rulebase_utils.h @@ -0,0 +1,39 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __GENERIC_RULEBASE_UTILS_H__ +#define __GENERIC_RULEBASE_UTILS_H__ + +#include + +#include "debug.h" +#include "cereal/archives/json.hpp" + +USE_DEBUG_FLAG(D_RULEBASE_CONFIG); + +template +void +parseJSONKey(const std::string &key_name, T &value, cereal::JSONInputArchive &archive_in) +{ + try { + archive_in(cereal::make_nvp(key_name, value)); + } catch (const cereal::Exception &e) { + dbgDebug(D_RULEBASE_CONFIG) + << "Could not parse the required key. Key: " + << key_name + << ", Error: " + << e.what(); + } +} + +#endif //__GENERIC_RULEBASE_UTILS_H__ diff --git a/components/include/generic_rulebase/match_query.h b/components/include/generic_rulebase/match_query.h new file mode 100755 index 0000000..71a6d6e --- /dev/null +++ b/components/include/generic_rulebase/match_query.h @@ -0,0 +1,93 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __MATCH_QUERY_H__ +#define __MATCH_QUERY_H__ + +#include +#include +#include +#include +#include + +#include "cereal/types/string.hpp" +#include "cereal/types/vector.hpp" +#include "cereal/archives/json.hpp" + +#include + +#include "c_common/ip_common.h" + +class MatchQuery +{ +public: + enum class MatchType { Condition, Operator }; + enum class Operators { And, Or, None }; + enum class Conditions { Equals, NotEquals, In, NotIn, Exist, None }; + enum class StaticKeys + { + IpAddress, + SrcIpAddress, + DstIpAddress, + SrcPort, + ListeningPort, + IpProtocol, + Domain, + NotStatic + }; + + void load(cereal::JSONInputArchive &archive_in); + + MatchType getType() const { return type; } + Operators getOperatorType() const { return operator_type; } + Conditions getConditionType() const { return condition_type; } + const std::string & getKey() const { return key; } + const std::set & getValue() const { return value; } + const std::vector & getIpAddrValue() const { return ip_addr_value; } + const std::vector & getPortValue() const { return port_value; } + const std::vector & getProtoValue() const { return ip_proto_value; } + const std::vector & getItems() const { return items; } + std::string getFirstValue() const { return first_value; } + bool matchAttributes(const std::unordered_map> &key_value_pairs) const; + bool matchException(const std::string &behaviorKey, const std::string &behaviorValue) const; + bool isKeyTypeIp() const; + bool isKeyTypePort() const; + bool isKeyTypeProtocol() const; + bool isKeyTypeDomain() const; + bool isKeyTypeSpecificLabel() const; + bool isKeyTypeStatic() const; + std::set getAllKeys() const; + +private: + StaticKeys getKeyByName(const std::string &key_type_name); + bool matchAttributes(const std::set &values) const; + bool matchAttributesRegEx(const std::set &values) const; + bool matchAttributesString(const std::set &values) const; + bool isRegEx() const; + + MatchType type; + Operators operator_type; + Conditions condition_type; + std::string key; + StaticKeys key_type; + bool is_specific_label; + std::string first_value; + std::set value; + std::set regex_values; + std::vector ip_addr_value; + std::vector port_value; + std::vector ip_proto_value; + std::vector items; +}; + +#endif // __MATCH_QUERY_H__ diff --git a/components/include/generic_rulebase/parameters_config.h b/components/include/generic_rulebase/parameters_config.h new file mode 100755 index 0000000..8317ae3 --- /dev/null +++ b/components/include/generic_rulebase/parameters_config.h @@ -0,0 +1,221 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __PARAMETERS_CONFIG_H__ +#define __PARAMETERS_CONFIG_H__ + +#include +#include +#include +#include + +#include "cereal/types/string.hpp" +#include "cereal/types/vector.hpp" +#include "cereal/archives/json.hpp" +#include "generic_rulebase/generic_rulebase_utils.h" +#include "match_query.h" +#include "maybe_res.h" +#include "config.h" + +enum class BehaviorKey +{ + ACTION, + LOG, + SOURCE_IDENTIFIER, + HTTP_SOURCE_ID, + HTTPS_SOURCE_ID +}; + +enum class BehaviorValue +{ + REJECT, + ACCEPT, + IGNORE, + DROP, + X_FORWARDED_FOR, + COOKIE_AOUTH2_PROXY, + COOKIE_JSESSIONID +}; + +static const std::unordered_map string_to_behavior_key = { + { "action", BehaviorKey::ACTION }, + { "log", BehaviorKey::LOG }, + { "sourceIdentifier", BehaviorKey::SOURCE_IDENTIFIER }, + { "httpSourceId", BehaviorKey::HTTP_SOURCE_ID }, + { "httpsSourceId", BehaviorKey::HTTPS_SOURCE_ID } +}; + +static const std::unordered_map string_to_behavior_val = { + { "Cookie:_oauth2_proxy", BehaviorValue::COOKIE_AOUTH2_PROXY }, + { "Cookie:JSESSIONID", BehaviorValue::COOKIE_JSESSIONID }, + { "X-Forwarded-For", BehaviorValue::X_FORWARDED_FOR }, + { "reject", BehaviorValue::REJECT }, + { "accept", BehaviorValue::ACCEPT }, + { "ignore", BehaviorValue::IGNORE }, + { "drop", BehaviorValue::DROP } +}; + +class ParameterOverrides +{ +public: + class ParsedBehavior + { + public: + void + serialize(cereal::JSONInputArchive &archive_in) + { + parseJSONKey("log", log, archive_in); + } + + const std::string & getParsedBehaviorLog() const { return log; } + + private: + std::string log; + }; + + void load(cereal::JSONInputArchive &archive_in); + + const std::vector & getParsedBehaviors() const { return parsed_behaviors; } + +private: + std::vector parsed_behaviors; +}; + +class ParameterTrustedSources +{ +public: + class SourcesIdentifier + { + public: + + SourcesIdentifier() = default; + + void + serialize(cereal::JSONInputArchive &archive_in) + { + parseJSONKey("sourceIdentifier", source_identifier, archive_in); + parseJSONKey("value", value, archive_in); + } + + const std::string & getSourceIdentifier() const {return source_identifier; } + + const std::string & getValue() const {return value; } + + private: + std::string source_identifier; + std::string value; + }; + + void load(cereal::JSONInputArchive &archive_in); + + uint getNumOfSources() const { return num_of_sources; } + + const std::vector & getSourcesIdentifiers() const { return sources_identidiers; } + +private: + uint num_of_sources; + std::vector sources_identidiers; +}; + +class ParameterBehavior +{ +public: + ParameterBehavior() = default; + ParameterBehavior(BehaviorKey &_key, BehaviorValue &_value) : key(_key), value(_value) {} + ParameterBehavior(BehaviorKey &&_key, BehaviorValue &&_value) + : + key(std::move(_key)), + value(std::move(_value)) + {} + + void load(cereal::JSONInputArchive &archive_in); + + const BehaviorValue & getValue() const { return value; } + + const BehaviorKey & getKey() const { return key; } + + const std::string & getId() const { return id; } + + bool + operator<(const ParameterBehavior &other) const { + return (key < other.key) || (key == other.key && value < other.value); + } + + bool operator==(const ParameterBehavior &other) const { return key == other.key && value == other.value; } + +private: + std::string id; + BehaviorKey key; + BehaviorValue value; +}; + +class ParameterAntiBot +{ +public: + void load(cereal::JSONInputArchive &archive_in); + + std::vector & getInjected() { return injected; } + + std::vector & getValidated() { return validated; } + +private: + std::vector injected; + std::vector validated; +}; + +class ParameterOAS +{ +public: + void load(cereal::JSONInputArchive &archive_in); + + const std::string & getValue() const { return value; } + +private: + std::string value; +}; + +class ParameterException +{ +public: + static void + preload() + { + registerExpectedConfiguration("rulebase", "exception"); + registerConfigLoadCb([](){ is_geo_location_exception_exists = is_geo_location_exception_being_loaded; }); + registerConfigPrepareCb([](){ is_geo_location_exception_being_loaded = false; }); + } + + void load(cereal::JSONInputArchive &archive_in); + + std::set + getBehavior(const std::unordered_map> &key_value_pairs) const; + + static bool isGeoLocationExceptionExists() { return is_geo_location_exception_exists; } + +private: + class MatchBehaviorPair + { + public: + void load(cereal::JSONInputArchive &archive_in); + MatchQuery match; + ParameterBehavior behavior; + }; + + std::vector match_queries; + MatchQuery match; + ParameterBehavior behavior; + static bool is_geo_location_exception_exists; + static bool is_geo_location_exception_being_loaded; +}; + +#endif //__PARAMETERS_CONFIG_H__ diff --git a/components/include/generic_rulebase/rulebase_config.h b/components/include/generic_rulebase/rulebase_config.h new file mode 100755 index 0000000..a2f6a0d --- /dev/null +++ b/components/include/generic_rulebase/rulebase_config.h @@ -0,0 +1,167 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __RULEBASE_CONFIG_H__ +#define __RULEBASE_CONFIG_H__ + +#include +#include +#include +#include + +#include "generic_rulebase/generic_rulebase_utils.h" +#include "environment/evaluator_templates.h" +#include "cereal/types/string.hpp" +#include "cereal/types/vector.hpp" +#include "cereal/archives/json.hpp" +#include "i_environment.h" +#include "singleton.h" +#include "maybe_res.h" +#include "config.h" + +using GenericConfigId = std::string; + +class RulePractice +{ +public: + RulePractice() = default; + + RulePractice(GenericConfigId &_id, std::string &_name) : practice_id(_id), practice_name(_name) {}; + + void + serialize(cereal::JSONInputArchive &ar) + { + parseJSONKey("practiceId", practice_id, ar); + parseJSONKey("practiceName", practice_name, ar); + } + + const GenericConfigId getId() const { return practice_id; } + + const std::string getName() const { return practice_name; } + + bool + operator==(const RulePractice &other) const + { + return practice_id == other.getId() && practice_name == other.getName(); + } + +private: + GenericConfigId practice_id; + std::string practice_name; +}; + +class RuleTrigger +{ +public: + void + serialize(cereal::JSONInputArchive &ar) + { + parseJSONKey("triggerId", trigger_id, ar); + parseJSONKey("triggerType", trigger_type, ar); + parseJSONKey("triggerName", trigger_name, ar); + } + + const GenericConfigId getId() const { return trigger_id; } + + const std::string getType() const { return trigger_type; } + + const std::string getName() const { return trigger_name; } + +private: + GenericConfigId trigger_id; + std::string trigger_type; + std::string trigger_name; +}; + +class RuleParameter +{ +public: + void + serialize(cereal::JSONInputArchive &ar) + { + parseJSONKey("parameterId", parameter_id, ar); + parseJSONKey("parameterType", parameter_type, ar); + parseJSONKey("parameterName", parameter_name, ar); + } + + const GenericConfigId getId() const { return parameter_id; } + + const std::string getType() const { return parameter_type; } + + const std::string getName() const { return parameter_name; } + +private: + GenericConfigId parameter_id; + std::string parameter_type; + std::string parameter_name; +}; + +class BasicRuleConfig +{ +public: + static void + preload() + { + registerExpectedConfiguration("rulebase", "rulesConfig"); + registerExpectedSetting>("rulebase", "rulesConfig"); + registerConfigLoadCb(BasicRuleConfig::updateCountMetric); + registerConfigPrepareCb([](){ BasicRuleConfig::assets_ids_aggregation.clear(); }); + } + + void load(cereal::JSONInputArchive &ar); + + static void updateCountMetric(); + + bool isPracticeActive(const GenericConfigId &practice_id) const; + + bool isTriggerActive(const GenericConfigId &trigger_id) const; + + bool isParameterActive(const GenericConfigId ¶meter_id) const; + + uint8_t getPriority() const { return priority; } + + const GenericConfigId & getRuleId() const { return rule_id; } + + const std::string & getRuleName() const { return rule_name; } + + const GenericConfigId & getAssetId() const { return asset_id; } + + const std::string & getAssetName() const { return asset_name; } + + const GenericConfigId & getZoneId() const { return zone_id; } + + const std::string & getZoneName() const { return zone_name; } + + const std::vector & getPractices() const { return practices; } + + const std::vector & getTriggers() const { return triggers; } + + const std::vector & getParameters() const { return parameters; } + +private: + uint8_t priority = 0; + GenericConfigId rule_id = ""; + std::string rule_name; + GenericConfigId asset_id; + std::string asset_name; + GenericConfigId zone_id; + std::string zone_name; + std::vector practices; + std::vector triggers; + std::vector parameters; + + static std::set assets_ids; + static std::set assets_ids_aggregation; +}; + +#endif // __RULEBASE_CONFIG_H__ diff --git a/components/include/generic_rulebase/triggers_config.h b/components/include/generic_rulebase/triggers_config.h new file mode 100755 index 0000000..27f2e09 --- /dev/null +++ b/components/include/generic_rulebase/triggers_config.h @@ -0,0 +1,174 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __TRIGGERS_CONFIG_H__ +#define __TRIGGERS_CONFIG_H__ + +#include +#include + +#include "environment/evaluator_templates.h" +#include "cereal/types/string.hpp" +#include "cereal/types/vector.hpp" +#include "cereal/archives/json.hpp" +#include "i_environment.h" +#include "i_logging.h" +#include "singleton.h" +#include "maybe_res.h" +#include "config.h" +#include "log_generator.h" +#include "generic_rulebase_utils.h" + +class WebTriggerConf +{ +public: + WebTriggerConf(); + WebTriggerConf(const std::string &title, const std::string &body, uint code); + + static void + preload() + { + registerExpectedConfiguration("rulebase", "webUserResponse"); + } + + void load(cereal::JSONInputArchive &archive_in); + + bool operator==(const WebTriggerConf &other) const; + + uint getResponseCode() const { return response_code; } + + const std::string & getResponseTitle() const { return response_title; } + + const std::string & getResponseBody() const { return response_body; } + + const std::string & getDetailsLevel() const { return details_level; } + + const std::string & getRedirectURL() const { return redirect_url; } + + bool getAddEventId() const { return add_event_id_to_header; } + + static WebTriggerConf default_trigger_conf; + +private: + std::string response_title; + std::string details_level; + std::string response_body; + std::string redirect_url; + uint response_code; + bool add_event_id_to_header = false; +}; + +class LogTriggerConf : Singleton::Consume +{ +public: + enum class SecurityType { AccessControl, ThreatPrevention, Compliance, COUNT }; + enum class extendLoggingSeverity { None, High, Critical }; + + enum class WebLogFields { + webBody, + webHeaders, + webRequests, + webUrlPath, + webUrlQuery, + responseBody, + responseCode, + COUNT + }; + + LogTriggerConf() {} + + LogTriggerConf(std::string trigger_name, bool log_detect, bool log_prevent); + + static void + preload() + { + registerExpectedConfiguration("rulebase", "log"); + } + + template + LogGen + operator()( + const std::string &title, + SecurityType security, + ReportIS::Severity severity, + ReportIS::Priority priority, + bool is_action_drop_or_prevent, + Tags ...tags) const + { + return LogGen( + title, + ReportIS::Level::LOG, + ReportIS::Audience::SECURITY, + severity, + priority, + std::forward(tags)..., + getStreams(security, is_action_drop_or_prevent), + getEnrechments(security) + ); + } + + template + LogGen + operator()(const std::string &title, SecurityType security, bool is_action_drop_or_prevent, Tags ...tags) const + { + return (*this)( + title, + security, + getSeverity(is_action_drop_or_prevent), + getPriority(is_action_drop_or_prevent), + is_action_drop_or_prevent, + std::forward(tags)... + ); + } + + void load(cereal::JSONInputArchive &archive_in); + + bool isWebLogFieldActive(WebLogFields log_field) const { return log_web_fields.isSet(log_field); } + + bool isLogStreamActive(ReportIS::StreamType stream_type) const { return active_streams.isSet(stream_type); } + + bool isPreventLogActive(SecurityType security_type) const { return should_log_on_prevent.isSet(security_type); } + + bool isDetectLogActive(SecurityType security_type) const { return should_log_on_detect.isSet(security_type); } + + bool isLogGeoLocationActive(SecurityType security_type) const { return log_geo_location.isSet(security_type); } + + extendLoggingSeverity getExtendLoggingSeverity() const { return extend_logging_severity; } + + const std::string & getVerbosity() const { return verbosity; } + const std::string & getName() const { return name; } + + const std::string & getUrlForSyslog() const { return url_for_syslog; } + const std::string & getUrlForCef() const { return url_for_cef; } + +private: + ReportIS::Severity getSeverity(bool is_action_drop_or_prevent) const; + ReportIS::Priority getPriority(bool is_action_drop_or_prevent) const; + + Flags getStreams(SecurityType security_type, bool is_action_drop_or_prevent) const; + Flags getEnrechments(SecurityType security_type) const; + + std::string name; + std::string verbosity; + std::string url_for_syslog = ""; + std::string url_for_cef = ""; + Flags active_streams; + Flags should_log_on_detect; + Flags should_log_on_prevent; + Flags log_geo_location; + Flags log_web_fields; + extendLoggingSeverity extend_logging_severity = extendLoggingSeverity::None; + bool should_format_output = false; +}; + +#endif //__TRIGGERS_CONFIG_H__ diff --git a/components/include/generic_rulebase/zone.h b/components/include/generic_rulebase/zone.h new file mode 100755 index 0000000..5709937 --- /dev/null +++ b/components/include/generic_rulebase/zone.h @@ -0,0 +1,55 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __ZONE_H__ +#define __ZONE_H__ + +#include +#include +#include +#include + +#include "generic_rulebase_context.h" +#include "match_query.h" +#include "i_environment.h" +#include "i_intelligence_is_v2.h" +#include "asset.h" + +class Zone : Singleton::Consume, Singleton::Consume +{ + using AttrData = std::unordered_map>; + +public: + enum class Direction { To, From, Bidirectional }; + + void load(cereal::JSONInputArchive &archive_in); + + bool contains(const Asset &asset); + + GenericConfigId getId() const { return zone_id; } + const std::string & getName() const { return zone_name; } + const std::vector> & getAdjacentZones() const { return adjacent_zones; } + const MatchQuery & getMatchQuery() const { return match_query; } + bool isAnyZone() const { return is_any; } + +private: + bool matchAttributes(const AttrData &data); + + GenericConfigId zone_id; + std::string zone_name; + std::vector> adjacent_zones; + MatchQuery match_query; + bool is_any; +}; + +#endif // __ZONE_H__ diff --git a/components/include/generic_rulebase/zones_config.h b/components/include/generic_rulebase/zones_config.h new file mode 100755 index 0000000..c803f9d --- /dev/null +++ b/components/include/generic_rulebase/zones_config.h @@ -0,0 +1,53 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __ZONES_CONFIG_H__ +#define __ZONES_CONFIG_H__ + +#include +#include +#include +#include + +#include "generic_rulebase_context.h" +#include "match_query.h" +#include "i_generic_rulebase.h" +#include "zone.h" + +class Zones +{ +public: + void load(cereal::JSONInputArchive &archive_in) + { + cereal::load(archive_in, zones); + } + + const std::vector & getZones() const { return zones; } + + std::vector zones; +}; + +class ZonesConfig : Singleton::Consume +{ +public: + static void preload(); + + void load(cereal::JSONInputArchive &archive_in); + + const std::vector & getZones() const { return zones; } + +private: + std::vector zones; +}; + +#endif //__ZONES_CONFIG_H__ diff --git a/components/include/gradual_deployment.h b/components/include/gradual_deployment.h new file mode 100644 index 0000000..c8a4f7f --- /dev/null +++ b/components/include/gradual_deployment.h @@ -0,0 +1,43 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __GRADUAL_DEPLOYMENT_H__ +#define __GRADUAL_DEPLOYMENT_H__ + +#include "i_gradual_deployment.h" + +#include "singleton.h" +#include "i_rest_api.h" +#include "i_table.h" +#include "i_mainloop.h" +#include "component.h" + +class GradualDeployment + : + public Component, + Singleton::Provide, + Singleton::Consume, + Singleton::Consume +{ +public: + GradualDeployment(); + ~GradualDeployment(); + + void init(); + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __GRADUAL_DEPLOYMENT_H__ diff --git a/components/include/health_check_manager.h b/components/include/health_check_manager.h new file mode 100755 index 0000000..0c9924c --- /dev/null +++ b/components/include/health_check_manager.h @@ -0,0 +1,45 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __HEALTH_CHECK_MANAGER_H__ +#define __HEALTH_CHECK_MANAGER_H__ + +#include "singleton.h" +#include "i_health_check_manager.h" +#include "i_mainloop.h" +#include "i_rest_api.h" +#include "component.h" +#include "i_messaging.h" +#include "i_environment.h" + +class HealthCheckManager + : + public Component, + Singleton::Provide, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume +{ +public: + HealthCheckManager(); + ~HealthCheckManager(); + + void init() override; + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __HEALTH_CHECK_MANAGER_H__ diff --git a/components/include/health_checker.h b/components/include/health_checker.h new file mode 100755 index 0000000..e62f841 --- /dev/null +++ b/components/include/health_checker.h @@ -0,0 +1,44 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __HEALTH_CHECKER_H__ +#define __HEALTH_CHECKER_H__ + +#include "singleton.h" +#include "i_mainloop.h" +#include "i_socket_is.h" +#include "i_health_check_manager.h" +#include "component.h" + +class HealthChecker + : + public Component, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume +{ +public: + HealthChecker(); + ~HealthChecker(); + + void init() override; + void fini() override; + + void preload() override; + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __HEALTH_CHECKER_H__ diff --git a/components/include/http_event_impl/filter_verdict.h b/components/include/http_event_impl/filter_verdict.h new file mode 100755 index 0000000..174ffd0 --- /dev/null +++ b/components/include/http_event_impl/filter_verdict.h @@ -0,0 +1,69 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __FILTER_VERDICT_H__ +#define __FILTER_VERDICT_H__ + +#include + +#include "maybe_res.h" +#include "i_http_event_impl.h" + +class FilterVerdict +{ +public: + FilterVerdict(ngx_http_cp_verdict_e _verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT) + : + verdict(_verdict) + {} + + FilterVerdict(const EventVerdict &_verdict, ModifiedChunkIndex _event_idx = -1) + : + verdict(_verdict.getVerdict()) + { + if (verdict == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT) { + addModifications(_verdict.getModifications(), _event_idx); + } + } + + void + addModifications(const FilterVerdict &other) + { + if (other.verdict != ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT) return; + + modifications.insert(modifications.end(), other.modifications.begin(), other.modifications.end()); + total_modifications += other.total_modifications; + } + + void + addModifications( + const ModificationList &mods, + ModifiedChunkIndex _event_idx, + ngx_http_cp_verdict_e alt_verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_IRRELEVANT) + { + total_modifications += mods.size(); + modifications.push_back(EventModifications(_event_idx, mods)); + if (alt_verdict != ngx_http_cp_verdict_e::TRAFFIC_VERDICT_IRRELEVANT) verdict = alt_verdict; + } + + uint getModificationsAmount() const { return total_modifications; } + ngx_http_cp_verdict_e getVerdict() const { return verdict; } + const std::vector & getModifications() const { return modifications; } + +private: + ngx_http_cp_verdict_e verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT; + std::vector modifications; + uint total_modifications = 0; +}; + +#endif // __FILTER_VERDICT_H__ diff --git a/components/include/http_event_impl/i_http_event_impl.h b/components/include/http_event_impl/i_http_event_impl.h new file mode 100755 index 0000000..94b63c0 --- /dev/null +++ b/components/include/http_event_impl/i_http_event_impl.h @@ -0,0 +1,387 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __I_HTTP_EVENT_IMPL_H__ +#define __I_HTTP_EVENT_IMPL_H__ + +#ifndef __HTTP_INSPECTION_EVENTS_H__ +#error i_http_event_impl.h should not be included directly! +#endif //__HTTP_INSPECTION_EVENTS_H__ + +#include +#include +#include + +#include "debug.h" +#include "buffer.h" +#include "http_transaction_data.h" +#include "nginx_attachment_common.h" + +USE_DEBUG_FLAG(D_HTTP_MANAGER); + +using ModificationType = ngx_http_modification_type_e; +using ModificationPosition = ngx_http_cp_inject_pos_t; + +static const ModificationPosition injection_pos_irrelevant = INJECT_POS_IRRELEVANT; + +template +class Modification +{ +public: + Modification(const TMod &mod, ModificationType mod_type) + : + Modification(mod, mod_type, injection_pos_irrelevant) + {} + + Modification(const TMod &mod, ModificationType mod_type, ModificationPosition mod_position) + : + modification(mod), + type(mod_type), + position(mod_position) + { + dbgAssert(mod_type != ModificationType::APPEND || position == injection_pos_irrelevant) + << "Injection position is not applicable to a modification of type \"Append\""; + + dbgAssert(mod_type != ModificationType::INJECT || position >= 0) + << "Invalid injection position: must be non-negative. Position: " + << position; + } + + ModificationPosition getModificationPosition() const { return position; } + ModificationType getModificationType() const { return type; } + const TMod & getModification() const { return modification; } + +private: + TMod modification; + ModificationType type; + ModificationPosition position; +}; + +using ModifiedChunkIndex = int; +using ModificationBuffer = std::tuple; +using ModificationList = std::vector; +using EventModifications = std::pair; + +template +class I_ModifiableContent +{ +public: + virtual Maybe modify(const Modification &mod) = 0; + + virtual ModificationList getModificationList() const = 0; + +protected: + virtual ~I_ModifiableContent() {} +}; + +using HeaderKey = std::string; +using HeaderModification = std::pair, Buffer>; + +class HttpHeaderModification : I_ModifiableContent +{ +public: + Maybe + appendHeader(const HeaderKey &key, const Buffer &value) + { + return modify( + Modification( + HeaderModification({ { injection_pos_irrelevant, key }, value }), + ModificationType::APPEND + ) + ); + } + + Maybe + injectValue(ModificationPosition position, const Buffer &data) + { + return modify( + Modification( + HeaderModification({ { position, HeaderKey() }, data }), + ModificationType::INJECT, + position + ) + ); + } + + ModificationList + getModificationList() const override + { + ModificationList modification_list; + + for (const auto &modification : headers_to_append) { + modification_list.emplace_back(injection_pos_irrelevant, ModificationType::APPEND, modification.first); + modification_list.emplace_back(injection_pos_irrelevant, ModificationType::APPEND, modification.second); + } + for (const auto &modification : header_injections) { + modification_list.emplace_back(modification.first, ModificationType::INJECT, modification.second); + } + + return modification_list; + } + +private: + Maybe + modify(const Modification &mod) override + { + auto modification_type = mod.getModificationType(); + switch (modification_type) { + case ModificationType::APPEND: { + const HeaderKey &appended_header_key = mod.getModification().first.second; + auto iterator = headers_to_append.find(appended_header_key); + if (iterator != headers_to_append.end()) { + return + genError( + "Append modification with provided header key already exists. Header key: \"" + + appended_header_key + + "\"" + ); + } + + headers_to_append.emplace(appended_header_key, mod.getModification().second); + break; + } + case ModificationType::INJECT: { + auto iterator = header_injections.find(mod.getModificationPosition()); + if (iterator != header_injections.end()) { + return genError("Inject modification with provided position already exists"); + } + + header_injections.emplace(mod.getModificationPosition(), mod.getModification().second); + break; + } + case ModificationType::REPLACE: { + // future support to pass new Content-Length + dbgWarning(D_HTTP_MANAGER) << "Replace modification is not yet supported"; + break; + } + default: + dbgAssert(false) + << "Unknown type of ModificationType: " + << static_cast(modification_type); + } + + return Maybe(); + } + +private: + std::map headers_to_append; + std::map header_injections; +}; + +class HttpHeader +{ +public: + HttpHeader() = default; + HttpHeader(const Buffer &_key, const Buffer &_value, uint8_t _header_index, bool _is_last_header = false) + : + key(_key), + value(_value), + is_last_header(_is_last_header), + header_index(_header_index) + { + } + +// LCOV_EXCL_START - sync functions, can only be tested once the sync module exists + template + void + save(Archive &ar) const + { + ar( + key, + value, + is_last_header, + header_index + ); + } + + template + void + load(Archive &ar) + { + ar( + key, + value, + is_last_header, + header_index + ); + } +// LCOV_EXCL_STOP + + void + print(std::ostream &out_stream) const + { + out_stream + << "'" + << std::dumpHex(key) + << "': '" + << std::dumpHex(value) + << "' (Index: " + << std::to_string(header_index) + << ", Is last header: " + << (is_last_header ? "True" : "False") + << ")"; + } + + const Buffer & getKey() const { return key; } + const Buffer & getValue() const { return value; } + + bool isLastHeader() const { return is_last_header; } + uint8_t getHeaderIndex() const { return header_index; } + +private: + Buffer key; + Buffer value; + bool is_last_header = false; + uint8_t header_index = 0; +}; + +using BodyModification = Buffer; +class HttpBodyModification : I_ModifiableContent +{ +public: + Maybe + inject(ModificationPosition position, const Buffer &data) + { + return modify( + Modification( + std::move(data), + ModificationType::INJECT, + position + ) + ); + } + + ModificationList + getModificationList() const override + { + ModificationList injected_data; + for (const auto &injection : modifications) { + auto injection_buffer = injection.second; + injected_data.emplace_back(injection.first, ModificationType::INJECT, injection_buffer); + } + return injected_data; + } + +private: + Maybe + modify(const Modification &mod) override + { + if (modifications.find(mod.getModificationPosition()) != modifications.end()) { + return genError("Modification at the provided index already exists"); + } + modifications[mod.getModificationPosition()] = mod.getModification(); + return Maybe(); + } + + std::map modifications; +}; + +class HttpBody +{ +public: + HttpBody() + : + data(), + previous_chunked_data(), + is_last_chunk(false), + body_chunk_index(0) + {} + + HttpBody(const Buffer &body_data, bool _is_last_chunk, uint8_t _body_chunk_index) + : + data(body_data), + previous_chunked_data(), + is_last_chunk(_is_last_chunk), + body_chunk_index(_body_chunk_index) + {} + +// LCOV_EXCL_START - sync functions, can only be tested once the sync module exists + template + void + save(Archive &ar) const + { + ar( + data, + previous_chunked_data, + is_last_chunk, + body_chunk_index + ); + } + + template + void + load(Archive &ar) + { + ar( + data, + previous_chunked_data, + is_last_chunk, + body_chunk_index + ); + } +// LCOV_EXCL_STOP + + void + print(std::ostream &out_stream) const + { + out_stream + << "'" + << std::dumpHex(data) + << "' (Index: " + << std::to_string(body_chunk_index) + << ", Is last chunk: " + << (is_last_chunk ? "True" : "False") + << ")"; + } + + const Buffer & getData() const { return data; } + const Buffer & getPreviousChunkedData() const { return previous_chunked_data; } + void setPreviousChunkedData(const Buffer &prev_body_data) { previous_chunked_data = prev_body_data; } + + bool isLastChunk() const { return is_last_chunk; } + uint8_t getBodyChunkIndex() const { return body_chunk_index; } + +private: + Buffer data; + Buffer previous_chunked_data; + bool is_last_chunk; + uint8_t body_chunk_index; +}; + +class EventVerdict +{ +public: + EventVerdict() = default; + + EventVerdict(ngx_http_cp_verdict_e event_verdict) : modifications(), verdict(event_verdict) {} + + EventVerdict(const ModificationList &mods) : modifications(mods) {} + + EventVerdict(const ModificationList &mods, ngx_http_cp_verdict_e event_verdict) : + modifications(mods), + verdict(event_verdict) + {} + +// LCOV_EXCL_START - sync functions, can only be tested once the sync module exists + template void serialize(T &ar, uint) { ar(verdict); } +// LCOV_EXCL_STOP + + const ModificationList & getModifications() const { return modifications; } + ngx_http_cp_verdict_e getVerdict() const { return verdict; } + +private: + ModificationList modifications; + ngx_http_cp_verdict_e verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT; +}; + +#endif // __I_HTTP_EVENT_IMPL_H__ diff --git a/components/include/http_inspection_events.h b/components/include/http_inspection_events.h new file mode 100755 index 0000000..fb06278 --- /dev/null +++ b/components/include/http_inspection_events.h @@ -0,0 +1,186 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __HTTP_INSPECTION_EVENTS_H__ +#define __HTTP_INSPECTION_EVENTS_H__ + +#include "debug.h" +#include "event.h" + +#include "http_event_impl/filter_verdict.h" +#include "http_event_impl/i_http_event_impl.h" + +using ResponseCode = uint16_t; + +class HttpRequestHeaderEvent : public Event +{ +public: + HttpRequestHeaderEvent(const HttpHeader &header) : req_header(header) {} + + const Buffer & getKey() const { return req_header.getKey(); } + const Buffer & getValue() const { return req_header.getValue(); } + bool isLastHeader() const { return req_header.isLastHeader(); } + uint8_t getHeaderIndex() const { return req_header.getHeaderIndex(); } + + template + void + save(Archive &ar) const + { + req_header.save(ar); + } + + void print(std::ostream &out_stream) const { req_header.print(out_stream); } + +private: + const HttpHeader &req_header; +}; + +class HttpResponseHeaderEvent: public Event +{ +public: + HttpResponseHeaderEvent(const HttpHeader &header) : res_header(header) {} + + const Buffer & getKey() const { return res_header.getKey(); } + const Buffer & getValue() const { return res_header.getValue(); } + bool isLastHeader() const { return res_header.isLastHeader(); } + uint8_t getHeaderIndex() const { return res_header.getHeaderIndex(); } + + template + void + save(Archive &ar) const + { + res_header.save(ar); + } + + void print(std::ostream &out_stream) const { res_header.print(out_stream); } + +private: + const HttpHeader &res_header; +}; + +class HttpRequestBodyEvent: public Event +{ +public: + HttpRequestBodyEvent(const HttpBody &body, const Buffer &previous_chunked_data) + : + req_body(body), + prev_chunked_data(previous_chunked_data) + {} + + const Buffer & getData() const { return req_body.getData(); } + const Buffer & getPreviousChunkedData() const { return prev_chunked_data; } + bool isLastChunk() const { return req_body.isLastChunk(); } + + template + void + save(Archive &ar) const + { + req_body.save(ar); + } + + void print(std::ostream &out_stream) const { req_body.print(out_stream); } + +private: + const HttpBody &req_body; + const Buffer &prev_chunked_data; +}; + +class HttpResponseBodyEvent: public Event +{ +public: + HttpResponseBodyEvent(const HttpBody &body, const Buffer &previous_chunked_data) + : + res_body(body), + prev_chunked_data(previous_chunked_data) + {} + + const Buffer & getData() const { return res_body.getData(); } + const Buffer & getPreviousChunkedData() const { return prev_chunked_data; } + bool isLastChunk() const { return res_body.isLastChunk(); } + uint8_t getBodyChunkIndex() const { return res_body.getBodyChunkIndex(); } + + template + void + save(Archive &ar) const + { + res_body.save(ar); + } + + void print(std::ostream &out_stream) const { res_body.print(out_stream); } + +private: + const HttpBody &res_body; + const Buffer &prev_chunked_data; +}; + + +class NewHttpTransactionEvent : public Event +{ +public: + NewHttpTransactionEvent(const HttpTransactionData &event_data) : http_transaction_event_data(event_data) {} + + const IPAddr & getSourceIP() const { return http_transaction_event_data.getSourceIP(); } + uint16_t getSourcePort() const { return http_transaction_event_data.getSourcePort(); } + const IPAddr & getListeningIP() const { return http_transaction_event_data.getListeningIP(); } + uint16_t getListeningPort() const { return http_transaction_event_data.getListeningPort(); } + const std::string & getDestinationHost() const { return http_transaction_event_data.getDestinationHost(); } + const std::string & getHttpProtocol() const { return http_transaction_event_data.getHttpProtocol(); } + const std::string & getURI() const { return http_transaction_event_data.getURI(); } + const std::string & getHttpMethod() const { return http_transaction_event_data.getHttpMethod(); } + + void print(std::ostream &out_stream) const { http_transaction_event_data.print(out_stream); } + + template + void + save(Archive &ar) const + { + http_transaction_event_data.save(ar); + } + +private: + const HttpTransactionData &http_transaction_event_data; +}; + +class ResponseCodeEvent : public Event +{ +public: + ResponseCodeEvent(const ResponseCode &res_code) : http_response_code(res_code) {} + + const ResponseCode & getResponseCode() const { return http_response_code; } + + template + void + save(Archive &ar) const + { + ar(http_response_code); + } + + void print(std::ostream &out_stream) const { out_stream << http_response_code; } + +private: + ResponseCode http_response_code; +}; + +class EndRequestEvent : public Event +{ +}; + +class EndTransactionEvent : public Event +{ +}; + +class WaitTransactionEvent : public Event +{ +}; + +#endif // __HTTP_INSPECTION_EVENTS_H__ diff --git a/components/include/http_manager.h b/components/include/http_manager.h new file mode 100755 index 0000000..983cdcf --- /dev/null +++ b/components/include/http_manager.h @@ -0,0 +1,47 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __HTTP_MANAGER_H__ +#define __HTTP_MANAGER_H__ + +#include "singleton.h" +#include "i_mainloop.h" +#include "i_http_manager.h" +#include "i_rest_api.h" +#include "i_table.h" +#include "i_logging.h" +#include "component.h" + +class HttpManager + : + public Component, + Singleton::Provide, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume +{ +public: + HttpManager(); + ~HttpManager(); + + void preload(); + + void init(); + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __HTTP_MANAGER_H__ diff --git a/components/include/http_transaction_common.h b/components/include/http_transaction_common.h new file mode 100644 index 0000000..69c1c72 --- /dev/null +++ b/components/include/http_transaction_common.h @@ -0,0 +1,37 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __HTTP_TRANSACTION_ENUM_H__ +#define __HTTP_TRANSACTION_ENUM_H__ + +namespace HttpTransaction { + +enum class Method { GET, HEAD, POST, DELETE, CONNECT, OPTIONS, TRACE, PATCH, PUT }; +enum class Dir { REQUEST, RESPONSE }; +enum class Verdict { ACCEPT, DROP, INJECT, REDIRECT, NONE, DEFAULT }; + +enum class StatusCode { + OK = 200, + CREATED = 201, + NO_CONTENT = 204, + NOT_MODIFIED = 304, + BAD_REQUEST = 400, + UNAUTHORIZED = 401, + FORBIDDEN = 403, + NOT_FOUND = 404, + CONFLICT = 409, + INTERNAL_SERVER_ERROR = 500 +}; + +} +#endif // __HTTP_TRANSACTION_ENUM_H__ diff --git a/components/include/http_transaction_data.h b/components/include/http_transaction_data.h new file mode 100755 index 0000000..5fff08b --- /dev/null +++ b/components/include/http_transaction_data.h @@ -0,0 +1,136 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __HTTP_TRANSACTION_DATA_H__ +#define __HTTP_TRANSACTION_DATA_H__ + +#include +#include +#include +#include +#include + +#include "connkey.h" +#include "buffer.h" +#include "enum_range.h" +#include "maybe_res.h" +#include "http_transaction_common.h" +#include "compression_utils.h" + +class HttpTransactionData +{ +public: + HttpTransactionData(); + + HttpTransactionData ( + std::string http_proto, + std::string method, + std::string host_name, + IPAddr listening_ip, + uint16_t listening_port, + std::string uri, + IPAddr client_ip, + uint16_t client_port + ); + +// LCOV_EXCL_START - sync functions, can only be tested once the sync module exists + template + void + save(Archive &ar) const + { + ar( + http_proto, + method, + host_name, + listening_ip, + listening_port, + uri, + client_ip, + client_port, + response_content_encoding + ); + } + + template + void + load(Archive &ar) + { + ar( + http_proto, + method, + host_name, + listening_ip, + listening_port, + uri, + client_ip, + client_port, + response_content_encoding + ); + } +// LCOV_EXCL_STOP + + static Maybe createTransactionData(const Buffer &transaction_raw_data); + + const IPAddr & getSourceIP() const { return client_ip; } + uint16_t getSourcePort() const { return client_port; } + const IPAddr & getListeningIP() const { return listening_ip; } + uint16_t getListeningPort() const { return listening_port; } + const std::string & getDestinationHost() const { return host_name; } + const std::string & getHttpProtocol() const { return http_proto; } + const std::string & getURI() const { return uri; } + const std::string & getHttpMethod() const { return method; } + + void print(std::ostream &out_stream) const; + + CompressionType getResponseContentEncoding() const { return response_content_encoding; } + bool isRequest() const { return is_request; } + + void setDirection(bool _is_request) { is_request = _is_request; } + + void + setResponseContentEncoding(const CompressionType _response_content_encoding) + { + response_content_encoding = _response_content_encoding; + } + + static const std::string http_proto_ctx; + static const std::string method_ctx; + static const std::string host_name_ctx; + static const std::string listening_port_ctx; + static const std::string listening_ip_ctx; + static const std::string uri_ctx; + static const std::string uri_path_decoded; + static const std::string uri_query_decoded; + static const std::string client_ip_ctx; + static const std::string client_port_ctx; + static const std::string req_headers; + static const std::string req_body; + static const std::string source_identifier; + static const std::string proxy_ip_ctx; + + static const CompressionType default_response_content_encoding; + +private: + std::string http_proto; + std::string method = "GET"; + std::string host_name; + IPAddr listening_ip; + uint16_t listening_port; + std::string uri; + IPAddr client_ip; + uint16_t client_port; + bool is_request; + CompressionType response_content_encoding; +}; + +#endif // __HTTP_TRANSACTION_DATA_H__ diff --git a/components/include/hybrid_mode_telemetry.h b/components/include/hybrid_mode_telemetry.h new file mode 100755 index 0000000..d98bfe5 --- /dev/null +++ b/components/include/hybrid_mode_telemetry.h @@ -0,0 +1,34 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __HYBRID_MODE_TELEMETRY_H__ +#define __HYBRID_MODE_TELEMETRY_H__ + +#include "generic_metric.h" + +class HybridModeMetricEvent : public Event +{ +public: + HybridModeMetricEvent() {} +}; + +class HybridModeMetric : public GenericMetric, public Listener +{ +public: + void upon(const HybridModeMetricEvent &event) override; + +private: + MetricCalculations::LastReportedValue wd_process_restart{this, "watchdogProcessStartupEventsSum"}; +}; + +#endif // __HYBRID_MODE_TELEMETRY_H__ diff --git a/components/include/i_details_resolver.h b/components/include/i_details_resolver.h new file mode 100644 index 0000000..7bf45db --- /dev/null +++ b/components/include/i_details_resolver.h @@ -0,0 +1,42 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __I_DETAILS_RESOLVER_H__ +#define __I_DETAILS_RESOLVER_H__ + +#include "maybe_res.h" + +#include + +class I_DetailsResolver +{ +public: + virtual Maybe getHostname() = 0; + virtual Maybe getPlatform() = 0; + virtual Maybe getArch() = 0; + virtual std::string getAgentVersion() = 0; + virtual bool isKernelVersion3OrHigher() = 0; + virtual bool isGwNotVsx() = 0; + virtual bool isVersionEqualOrAboveR8110() = 0; + virtual bool isReverseProxy() = 0; + virtual Maybe> parseNginxMetadata() = 0; + virtual std::map getResolvedDetails() = 0; +#if defined(gaia) || defined(smb) + virtual bool compareCheckpointVersion(int cp_version, std::function compare_operator) const = 0; +#endif // gaia || smb + +protected: + virtual ~I_DetailsResolver() {} +}; + +#endif // __I_DETAILS_RESOLVER_H__ diff --git a/components/include/i_downloader.h b/components/include/i_downloader.h new file mode 100755 index 0000000..37f55e2 --- /dev/null +++ b/components/include/i_downloader.h @@ -0,0 +1,44 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __I_DOWNLOADER_H__ +#define __I_DOWNLOADER_H__ + +#include "i_orchestration_tools.h" +#include "i_update_communication.h" + +#include + +class I_Downloader +{ +public: + virtual Maybe downloadFileFromFog( + const std::string &checksum, + Package::ChecksumTypes, + const GetResourceFile &resourse_file + ) const = 0; + + virtual Maybe>downloadVirtualFileFromFog( + const GetResourceFile &resourse_file, + Package::ChecksumTypes checksum_type + ) const = 0; + + virtual Maybe downloadFileFromURL( + const std::string &url, + const std::string &checksum, + Package::ChecksumTypes checksum_type, + const std::string &service_name + ) const = 0; +}; + +#endif // __I_DOWNLOADER_H__ diff --git a/components/include/i_external_sdk_server.h b/components/include/i_external_sdk_server.h new file mode 100755 index 0000000..33cefc6 --- /dev/null +++ b/components/include/i_external_sdk_server.h @@ -0,0 +1,60 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __I_EXTERNAL_SDK_SERVER_H__ +#define __I_EXTERNAL_SDK_SERVER_H__ + +#include +#include + +#include "report/report.h" +#include "debug.h" + +class I_ExternalSdkServer +{ +public: + virtual void + sendLog( + const std::string &event_name, + ReportIS::Audience audience, + ReportIS::Severity severity, + ReportIS::Priority priority, + const std::string &tag, + const std::map &additional_fields) = 0; + + virtual void + sendDebug( + const std::string &file_name, + const std::string &function_name, + unsigned int line_number, + Debug::DebugLevel debug_level, + const std::string &trace_id, + const std::string &span_id, + const std::string &message, + const std::map &additional_fields) = 0; + + virtual void + sendMetric( + const std::string &event_title, + const std::string &service_name, + ReportIS::AudienceTeam team, + ReportIS::IssuingEngine issuing_engine, + const std::map &additional_fields) = 0; + + virtual Maybe getConfigValue(const std::string &config_path) = 0; + +protected: + virtual ~I_ExternalSdkServer() {} +}; + +#endif // __I_EXTERNAL_SDK_SERVER_H__ diff --git a/components/include/i_generic_rulebase.h b/components/include/i_generic_rulebase.h new file mode 100755 index 0000000..b99ad02 --- /dev/null +++ b/components/include/i_generic_rulebase.h @@ -0,0 +1,36 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __I_GENERIC_RULEBASE_H__ +#define __I_GENERIC_RULEBASE_H__ + +#include + +#include "generic_rulebase/parameters_config.h" +#include "generic_rulebase/zone.h" +#include "config.h" + +class I_GenericRulebase +{ +public: + virtual Maybe getLocalZone() const = 0; + virtual Maybe getOtherZone() const = 0; + + using ParameterKeyValues = std::unordered_map>; + virtual std::set getBehavior(const ParameterKeyValues &key_value_pairs) const = 0; + +protected: + ~I_GenericRulebase() {} +}; + +#endif // __I_GENERIC_RULEBASE_H__ diff --git a/components/include/i_gradual_deployment.h b/components/include/i_gradual_deployment.h new file mode 100644 index 0000000..2028b84 --- /dev/null +++ b/components/include/i_gradual_deployment.h @@ -0,0 +1,37 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __I_GRADUAL_DEPLOYMENT_H__ +#define __I_GRADUAL_DEPLOYMENT_H__ + +#include +#include +#include + +#include "maybe_res.h" +#include "c_common/ip_common.h" + +class I_GradualDeployment +{ +public: + enum class AttachmentType { NGINX, KERNEL, COUNT }; + + virtual Maybe setPolicy(AttachmentType type, const std::vector &str_ip_ranges) = 0; + virtual std::vector getPolicy(AttachmentType type) = 0; + virtual std::vector & getParsedPolicy(AttachmentType type) = 0; + +protected: + virtual ~I_GradualDeployment() {} +}; + +#endif // __I_GRADUAL_DEPLOYMENT_H__ diff --git a/components/include/i_http_manager.h b/components/include/i_http_manager.h new file mode 100755 index 0000000..5a517de --- /dev/null +++ b/components/include/i_http_manager.h @@ -0,0 +1,34 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __I_HTTP_MANAGER_H__ +#define __I_HTTP_MANAGER_H__ + +#include "http_inspection_events.h" + +class I_HttpManager +{ +public: + virtual FilterVerdict inspect(const HttpTransactionData &event) = 0; + virtual FilterVerdict inspect(const HttpHeader &event, bool is_request) = 0; + virtual FilterVerdict inspect(const HttpBody &event, bool is_request) = 0; + virtual FilterVerdict inspect(const ResponseCode &event) = 0; + virtual FilterVerdict inspectEndRequest() = 0; + virtual FilterVerdict inspectEndTransaction() = 0; + virtual FilterVerdict inspectDelayedVerdict() = 0; + +protected: + virtual ~I_HttpManager() {} +}; + +#endif // __I_HTTP_MANAGER_H__ diff --git a/components/include/i_k8s_policy_gen.h b/components/include/i_k8s_policy_gen.h new file mode 100755 index 0000000..ad4ddd2 --- /dev/null +++ b/components/include/i_k8s_policy_gen.h @@ -0,0 +1,27 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __I_K8S_POLICY_GEN_H__ +#define __I_K8S_POLICY_GEN_H__ + +class I_K8S_Policy_Gen +{ +public: + virtual std::string parsePolicy(const std::string &policy_version) = 0; + virtual const std::string & getPolicyPath(void) const = 0; + +protected: + ~I_K8S_Policy_Gen() {} +}; + +#endif //__I_K8S_POLICY_GEN_H__ diff --git a/components/include/i_manifest_controller.h b/components/include/i_manifest_controller.h new file mode 100755 index 0000000..ccea430 --- /dev/null +++ b/components/include/i_manifest_controller.h @@ -0,0 +1,26 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __I_MANIFEST_CONTROLLER_H__ +#define __I_MANIFEST_CONTROLLER_H__ + +#include + +class I_ManifestController +{ +public: + virtual bool updateManifest(const std::string &new_manifest_file) = 0; + virtual bool loadAfterSelfUpdate() = 0; +}; + +#endif // __I_MANIFEST_CONTROLLER_H__ diff --git a/components/include/i_orchestration_status.h b/components/include/i_orchestration_status.h new file mode 100755 index 0000000..4305c55 --- /dev/null +++ b/components/include/i_orchestration_status.h @@ -0,0 +1,87 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __I_ORCHESTRATION_STATUS_H__ +#define __I_ORCHESTRATION_STATUS_H__ + +#include +#include +#include + +#include "enum_array.h" + +enum class OrchestrationStatusResult { SUCCESS, FAILED }; +enum class OrchestrationStatusFieldType { REGISTRATION, MANIFEST, LAST_UPDATE, COUNT }; +enum class OrchestrationStatusConfigType { MANIFEST, POLICY, SETTINGS, DATA, COUNT }; + +class I_OrchestrationStatus +{ +public: + virtual void writeStatusToFile() = 0; + + virtual const std::string & getLastUpdateAttempt() const = 0; + virtual const std::string & getUpdateStatus() const = 0; + virtual const std::string & getUpdateTime() const = 0; + virtual const std::string & getLastManifestUpdate() const = 0; + virtual const std::string & getPolicyVersion() const = 0; + virtual const std::string & getLastPolicyUpdate() const = 0; + virtual const std::string & getLastSettingsUpdate() const = 0; + virtual const std::string & getUpgradeMode() const = 0; + virtual const std::string & getFogAddress() const = 0; + virtual const std::string & getRegistrationStatus() const = 0; + virtual const std::string & getAgentId() const = 0; + virtual const std::string & getProfileId() const = 0; + virtual const std::string & getTenantId() const = 0; + virtual const std::string & getManifestStatus() const = 0; + virtual const std::string & getManifestError() const = 0; + virtual const std::map & getServicePolicies() const = 0; + virtual const std::map & getServiceSettings() const = 0; + virtual const std::string getRegistrationDetails() const = 0; + virtual void recoverFields() = 0; + virtual void setIsConfigurationUpdated(EnumArray config_types) = 0; + virtual void setFogAddress(const std::string &_fog_address) = 0; + virtual void setLastUpdateAttempt() = 0; + virtual void setPolicyVersion(const std::string &_policy_version) = 0; + virtual void setRegistrationStatus(const std::string &_reg_status) = 0; + virtual void setUpgradeMode(const std::string &_upgrade_mode) = 0; + virtual void setAgentType(const std::string &_agent_type) = 0; + virtual void setAgentDetails( + const std::string &_agent_id, + const std::string &_profile_id, + const std::string &_tenant_id + ) = 0; + + virtual void + setFieldStatus( + const OrchestrationStatusFieldType &field_type_status, + const OrchestrationStatusResult &status, + const std::string &failure_reason = "" + ) = 0; + + virtual void + setRegistrationDetails( + const std::string &name, + const std::string &type, + const std::string &platform, + const std::string &arch + ) = 0; + + virtual void + setServiceConfiguration( + const std::string &service_name, + const std::string &path, + const OrchestrationStatusConfigType &configuration_file_type + ) = 0; +}; + +#endif // __I_ORCHESTRATION_STATUS_H__ diff --git a/components/include/i_orchestration_tools.h b/components/include/i_orchestration_tools.h new file mode 100755 index 0000000..6e43b0c --- /dev/null +++ b/components/include/i_orchestration_tools.h @@ -0,0 +1,125 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __I_ORCHESTRATION_TOOLS_H__ +#define __I_ORCHESTRATION_TOOLS_H__ + +#include "package.h" +#include "debug.h" +#include "maybe_res.h" + +#include + +USE_DEBUG_FLAG(D_ORCHESTRATOR); + +class I_OrchestrationTools +{ +public: + // Used for the calculation of the manifest and the policy files + static const Package::ChecksumTypes SELECTED_CHECKSUM_TYPE = Package::ChecksumTypes::SHA256; + static constexpr const char * SELECTED_CHECKSUM_TYPE_STR = "sha256sum"; + using packageName = std::string; + using packageDetails = std::string; + + template + Maybe + jsonFileToObject(const std::string &file_path) const + { + Maybe file_data = readFile(file_path); + if (file_data.ok()) { + return jsonStringToObject(file_data.unpack()); + } + return genError(file_data.getErr()); + } + + template + Maybe + jsonStringToObject(const std::string &input) const + { + std::stringstream string_stream; + string_stream << input; + return jsonStringToObject(string_stream); + } + + template + Maybe + jsonStringToObject(std::stringstream &string_stream) const + { + try { + cereal::JSONInputArchive archive_in(string_stream); + T object; + object.serialize(archive_in); + return object; + } catch (cereal::Exception &e) { + return genError(e.what()); + } + } + + template + bool + objectToJsonFile(T &obj, const std::string &file_path) const + { + try { + std::ofstream ostream(file_path); + cereal::JSONOutputArchive archive_out(ostream); + obj.serialize(archive_out); + } catch (cereal::Exception &e) { + dbgWarning(D_ORCHESTRATOR) << "Failed to write object to JSON file. Object: " << typeid(T).name() + << ", file : "<< file_path << ", error: " << e.what(); + return false; + } + return true; + } + + template + Maybe + objectToJson(const T &obj) const + { + std::stringstream sstream; + try { + cereal::JSONOutputArchive archive_out(sstream); + obj.serialize(archive_out); + } catch (cereal::Exception &e) { + std::string error_msg = "Failed to write object to JSON. Object: " + std::string(typeid(T).name()) + + ", error: " + e.what(); + return genError(error_msg); + } + return sstream.str(); + } + + virtual bool packagesToJsonFile(const std::map &packages, const std::string &path) const = 0; + virtual Maybe> loadPackagesFromJson(const std::string &path) const = 0; + + virtual Maybe> jsonObjectSplitter( + const std::string &json, + const std::string &tenant_id = "") const = 0; + + virtual bool isNonEmptyFile(const std::string &path) const = 0; + virtual Maybe readFile(const std::string &path) const = 0; + virtual bool writeFile(const std::string &text, const std::string &path) const = 0; + virtual bool removeFile(const std::string &path) const = 0; + virtual bool copyFile(const std::string &src_path, const std::string &dst_path) const = 0; + virtual bool doesFileExist(const std::string &file_path) const = 0; + virtual bool createDirectory(const std::string &directory_path) const = 0; + virtual bool doesDirectoryExist(const std::string &dir_path) const = 0; + virtual bool executeCmd(const std::string &cmd) const = 0; + + virtual std::string base64Encode(const std::string &input) const = 0; + virtual std::string base64Decode(const std::string &input) const = 0; + + virtual Maybe calculateChecksum( + Package::ChecksumTypes checksum_type, + const std::string &path) const = 0; +}; + +#endif // __I_ORCHESTRATION_TOOLS_H__ diff --git a/components/include/i_package_handler.h b/components/include/i_package_handler.h new file mode 100755 index 0000000..8d1fa90 --- /dev/null +++ b/components/include/i_package_handler.h @@ -0,0 +1,47 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __I_PACKAGE_HANDLER_H__ +#define __I_PACKAGE_HANDLER_H__ + +#include + +class I_PackageHandler +{ +public: + virtual bool shouldInstallPackage(const std::string &package_name, const std::string &install_file_path) const = 0; + + virtual bool installPackage( + const std::string &package_name, + const std::string &install_file_path, + bool restore_mode + ) const = 0; + virtual bool uninstallPackage( + const std::string &package_name, + const std::string &package_path, + const std::string &install_file_path + ) const = 0; + virtual bool preInstallPackage( + const std::string &package_name, + const std::string &install_file_path + ) const = 0; + virtual bool postInstallPackage( + const std::string &package_name, + const std::string &install_file_path + ) const = 0; + virtual bool updateSavedPackage( + const std::string &package_name, + const std::string &install_file_path + ) const = 0; +}; +#endif // __I_PACKAGE_HANDLER_H__ diff --git a/components/include/i_pm_scan.h b/components/include/i_pm_scan.h new file mode 100755 index 0000000..fdf5ea6 --- /dev/null +++ b/components/include/i_pm_scan.h @@ -0,0 +1,67 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __I_PM_SCAN_H__ +#define __I_PM_SCAN_H__ + +#include +#include +#include +#include + +#include "buffer.h" +#include "maybe_res.h" + +class PMPattern +{ +public: + PMPattern() {} + PMPattern(const std::string &pat, bool start, bool end, uint index = 0) + : + pattern(pat), + match_start(start), + match_end(end), + index(index) + {} + + bool operator<(const PMPattern &other) const; + bool operator==(const PMPattern &other) const; + + bool isStartMatch() const { return match_start; } + bool isEndMatch() const { return match_end; } + const unsigned char * data() const { return reinterpret_cast(pattern.data()); } + size_t size() const { return pattern.size(); } + bool empty() const { return pattern.empty(); } + uint getIndex() const { return index; } + +private: + std::string pattern; + bool match_start = false; + bool match_end = false; + uint index; +}; + +class I_PMScan +{ +public: + using CBFunction = std::function; + + virtual std::set scanBuf(const Buffer &buf) const = 0; + virtual std::set> scanBufWithOffset(const Buffer &buf) const = 0; + virtual void scanBufWithOffsetLambda(const Buffer &buf, CBFunction cb) const = 0; + +protected: + ~I_PMScan() {} +}; + +#endif // __I_PM_SCAN_H__ diff --git a/components/include/i_service_controller.h b/components/include/i_service_controller.h new file mode 100755 index 0000000..af2a5f6 --- /dev/null +++ b/components/include/i_service_controller.h @@ -0,0 +1,62 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __I_SERVICE_CONTROLLER_H__ +#define __I_SERVICE_CONTROLLER_H__ + +#include +#include +#include + +#include "connkey.h" +#include "rest.h" + +enum class ReconfStatus { SUCCEEDED, IN_PROGRESS, FAILED, INACTIVE }; + +class I_ServiceController +{ +public: + virtual const std::string & getPolicyVersion() const = 0; + virtual const std::string & getUpdatePolicyVersion() const = 0; + virtual void updateReconfStatus(int id, ReconfStatus status) = 0; + virtual void startReconfStatus( + int id, + ReconfStatus status, + const std::string &service_name, + const std::string &service_id + ) = 0; + + virtual bool + updateServiceConfiguration( + const std::string &new_policy_path, + const std::string &new_settings_path, + const std::vector &new_data_files = {}, + const std::string &tenant_id = "" + ) = 0; + + virtual bool isServiceInstalled(const std::string &service_name) = 0; + + virtual void registerServiceConfig( + const std::string &service_name, + PortNumber listening_port, + const std::vector &expected_configurations, + const std::string &service_id + ) = 0; + + virtual std::map getServiceToPortMap() = 0; + +protected: + virtual ~I_ServiceController() {} +}; + +#endif // __I_SERVICE_CONTROLLER_H__ diff --git a/components/include/i_static_resources_handler.h b/components/include/i_static_resources_handler.h new file mode 100755 index 0000000..0cf6e4e --- /dev/null +++ b/components/include/i_static_resources_handler.h @@ -0,0 +1,30 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __I_STATIC_RESOURCES_HANDLER_H__ +#define __I_STATIC_RESOURCES_HANDLER_H__ + +#include + +#include "maybe_res.h" + +class I_StaticResourcesHandler +{ +public: + virtual bool registerStaticResource(const std::string &resource_name, const std::string &resource_full_path) = 0; + +protected: + virtual ~I_StaticResourcesHandler() {} +}; + +#endif // __I_STATIC_RESOURCES_HANDLER_H__ diff --git a/components/include/i_update_communication.h b/components/include/i_update_communication.h new file mode 100755 index 0000000..65bf4c4 --- /dev/null +++ b/components/include/i_update_communication.h @@ -0,0 +1,36 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __I_UPDATE_COMMUNICATION_H__ +#define __I_UPDATE_COMMUNICATION_H__ + +#include "maybe_res.h" +#include "orchestrator/rest_api/get_resource_file.h" +#include "orchestrator/rest_api/orchestration_check_update.h" + +using OrchManifest = Maybe; +using OrchPolicy = Maybe; +using OrchSettings = Maybe; +using OrchData = Maybe; + +class I_UpdateCommunication +{ +public: + virtual Maybe authenticateAgent() = 0; + virtual Maybe getUpdate(CheckUpdateRequest &request) = 0; + virtual Maybe sendPolicyVersion(const std::string &policy_version) const = 0; + virtual Maybe downloadAttributeFile(const GetResourceFile &resourse_file) = 0; + virtual void setAddressExtenesion(const std::string &extension) = 0; +}; + +#endif // __I_UPDATE_COMMUNICATION_H__ diff --git a/components/include/i_waap_telemetry.h b/components/include/i_waap_telemetry.h new file mode 100755 index 0000000..fe3ff06 --- /dev/null +++ b/components/include/i_waap_telemetry.h @@ -0,0 +1,46 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "WaapEnums.h" + +struct DecisionTelemetryData +{ + BlockType blockType; + ThreatLevel threat; + std::string assetName; + std::string practiceId; + std::string practiceName; + std::string source; + std::set attackTypes; + + DecisionTelemetryData() : + blockType(NOT_BLOCKING), + threat(NO_THREAT), + assetName(), + practiceId(), + practiceName(), + source(), + attackTypes() + { + } +}; + +class I_Telemetry +{ +public: + virtual void logDecision(std::string assetId, DecisionTelemetryData& data) = 0; +protected: + virtual ~I_Telemetry() {} +}; diff --git a/components/include/ip_utilities.h b/components/include/ip_utilities.h new file mode 100755 index 0000000..9968778 --- /dev/null +++ b/components/include/ip_utilities.h @@ -0,0 +1,108 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __IP_UTILITIES_H__ +#define __IP_UTILITIES_H__ + +#include +#include +#include +#include +#include +#include + +#include "c_common/ip_common.h" +#include "common.h" +#include "maybe_res.h" +#include "debug.h" + +// LCOV_EXCL_START Reason: temporary until we add relevant UT until 07/10 +bool operator<(const IpAddress &this_ip_addr, const IpAddress &other_ip_addr); + +bool operator==(const IpAddress &this_ip_addr, const IpAddress &other_ip_addr); +// LCOV_EXCL_STOP + +Maybe> extractAddressAndMaskSize(const std::string &cidr); + +template +std::pair applyMaskOnAddress(const std::vector &oct, Integer mask); + +Maybe> createRangeFromCidrV4(const std::pair &cidr_values); + +Maybe> createRangeFromCidrV6(const std::pair &cidr_values); + +namespace IPUtilities { +Maybe> getInterfaceIPs(); + +Maybe> createRangeFromCidr(const std::string &cidr); + +bool isIpAddrInRange(const IPRange &rule_ip_range, const IpAddress &ip_addr); + +std::string IpAddrToString(const IpAddress &address); + +IpAddress createIpFromString(const std::string &ip_string); + +template +Maybe createRangeFromString(const std::string &range, const std::string &type_name); + +using IpProto = uint8_t; +using Port = uint16_t; + +class IpAttrFromString +{ +public: + IpAttrFromString(const std::string &in_data) : data(in_data) {} + + operator Maybe(); + operator Maybe(); + operator Maybe(); + +private: + std::string data; +}; + +template +Maybe +createRangeFromString(const std::string &range, const std::string &type_name) +{ + std::string range_start; + std::string range_end; + size_t delimiter_pos = range.find("/"); + if (delimiter_pos != std::string::npos) { + auto cidr = IPUtilities::createRangeFromCidr(range); + if (!cidr.ok()) return genError("Couldn't create ip range from CIDR, error: " + cidr.getErr()); + range_start = cidr.unpack().first; + range_end = cidr.unpack().second; + } else { + delimiter_pos = range.find("-"); + range_start = range.substr(0, delimiter_pos); + range_end = delimiter_pos == std::string::npos ? range_start : range.substr(delimiter_pos + 1); + } + + Maybe range_start_value = IpAttrFromString(range_start); + if (!range_start_value.ok()) { + return genError("provided value is not a legal " + type_name + ". Provided value: " + range_start); + } + + Maybe range_end_value = IpAttrFromString(range_end); + if (!range_end_value.ok()) { + return genError("provided value is not a legal " + type_name + ". Provided value: " + range_end); + } + + if (*range_end_value < *range_start_value) { + return genError("Could not create " + type_name + "range. Error: start value is greater than end value"); + } + return Range{.start = *range_start_value, .end = *range_end_value}; +} +} +#endif // __IP_UTILITIES_H__ diff --git a/components/include/k8s_policy_gen.h b/components/include/k8s_policy_gen.h new file mode 100644 index 0000000..2588cab --- /dev/null +++ b/components/include/k8s_policy_gen.h @@ -0,0 +1,44 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __K8S_POLICY_GEN_H__ +#define __K8S_POLICY_GEN_H__ + +#include "config.h" +#include "component.h" +#include "i_mainloop.h" +#include "i_environment.h" +#include "i_k8s_policy_gen.h" + +class K8sPolicyGenerator + : + public Component, + Singleton::Provide, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume +{ +public: + K8sPolicyGenerator(); + ~K8sPolicyGenerator(); + + void preload() override; + + void init() override; + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __K8S_POLICY_GEN_H__ diff --git a/components/include/manifest_controller.h b/components/include/manifest_controller.h new file mode 100755 index 0000000..7ff8be6 --- /dev/null +++ b/components/include/manifest_controller.h @@ -0,0 +1,53 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __MANIFEST_CONTROLLER_H__ +#define __MANIFEST_CONTROLLER_H__ + +#include "i_manifest_controller.h" + +#include + +#include "i_orchestration_tools.h" +#include "i_package_handler.h" +#include "i_downloader.h" +#include "manifest_diff_calculator.h" +#include "manifest_handler.h" +#include "i_orchestration_status.h" +#include "i_environment.h" +#include "i_shell_cmd.h" +#include "component.h" + +class ManifestController + : + public Component, + Singleton::Provide, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume +{ +public: + ManifestController(); + ~ManifestController(); + + void init() override; + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __MANIFEST_CONTROLLER_H__ diff --git a/components/include/manifest_diff_calculator.h b/components/include/manifest_diff_calculator.h new file mode 100755 index 0000000..9494fa0 --- /dev/null +++ b/components/include/manifest_diff_calculator.h @@ -0,0 +1,50 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __MANIFEST_DIFF_CALCULATOR_H__ +#define __MANIFEST_DIFF_CALCULATOR_H__ + +#include "package.h" +#include "i_orchestration_tools.h" + +class ManifestDiffCalculator : Singleton::Consume +{ +public: + ManifestDiffCalculator() = default; + + void init(); + + std::map + filterUntrackedPackages( + const std::map ¤t_packages, + std::map &new_packages + ); + + bool + filterCorruptedPackages( + std::map &new_packages, + std::map &corrupted_packages + ); + + bool + buildInstallationQueue( + const Package &updated_package, + std::vector &installation_queue, + const std::map ¤t_packages, + const std::map &new_packages + ); + +private: + std::string corrupted_file_path; +}; +#endif // __MANIFEST_DIFF_CALCULATOR_H__ diff --git a/components/include/manifest_handler.h b/components/include/manifest_handler.h new file mode 100755 index 0000000..0a2f382 --- /dev/null +++ b/components/include/manifest_handler.h @@ -0,0 +1,76 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __MANIFEST_HANDLER_H__ +#define __MANIFEST_HANDLER_H__ + +#include "package.h" +#include "i_package_handler.h" +#include "i_downloader.h" +#include "i_orchestration_tools.h" +#include "i_orchestration_status.h" +#include "i_environment.h" +#include "i_agent_details.h" +#include "i_details_resolver.h" +#include "i_time_get.h" + +class ManifestHandler + : + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume +{ +public: + using packageFilePath = std::string; + + ManifestHandler() = default; + void init(); + + bool + downloadPackages( + const std::vector &updated_packages, + std::vector> &downloaded_packages + ); + + bool + installPackages( + const std::vector> &downloaded_packages_files, + std::map ¤t_packages, + std::map &corrupted_packages + ); + + bool uninstallPackage(Package &removed_package); + + bool + selfUpdate( + const Package &updated_package, + std::map ¤t_packages, + const packageFilePath &installation_file + ); + +private: + Maybe downloadPackage(const Package &package, bool is_clean_installation); + + std::string manifest_file_path; + std::string temp_ext; + std::string backup_ext; + std::string packages_dir; + std::string orch_service_name; + std::string default_dir; +}; +#endif // __MANIFEST_HANDLER_H__ diff --git a/components/include/messaging_downloader_client.h b/components/include/messaging_downloader_client.h new file mode 100755 index 0000000..41b67e0 --- /dev/null +++ b/components/include/messaging_downloader_client.h @@ -0,0 +1,49 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __MESSAGING_DOWNLOADER_H__ +#define __MESSAGING_DOWNLOADER_H__ + +#include "i_messaging_downloader.h" +#include "i_messaging.h" +#include "i_rest_api.h" +#include "i_mainloop.h" +#include "i_environment.h" +#include "component.h" + +USE_DEBUG_FLAG(D_COMMUNICATION); + +class MessagingDownloaderClient + : + public Component, + Singleton::Provide, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume +{ +public: + MessagingDownloaderClient(); + ~MessagingDownloaderClient(); + + void preload() override; + + void init() override; + void fini() override; + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __MESSAGING_DOWNLOADER_H__ diff --git a/components/include/messaging_downloader_server.h b/components/include/messaging_downloader_server.h new file mode 100755 index 0000000..8549deb --- /dev/null +++ b/components/include/messaging_downloader_server.h @@ -0,0 +1,51 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __MESSAGING_DOWNLOADER_SERVER_H__ +#define __MESSAGING_DOWNLOADER_SERVER_H__ + +#include "i_messaging_downloader.h" +#include "i_messaging.h" +#include "i_rest_api.h" +#include "i_mainloop.h" +#include "i_environment.h" +#include "i_agent_details.h" +#include "component.h" + +USE_DEBUG_FLAG(D_COMMUNICATION); + +class MessagingDownloaderServer + : + public Component, + Singleton::Provide, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume +{ +public: + MessagingDownloaderServer(); + ~MessagingDownloaderServer(); + + void init(); + void fini(); + + void preload(); + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __MESSAGING_DOWNLOADER_SERVER_H__ diff --git a/components/include/mock/mock_nginx_attachment.h b/components/include/mock/mock_nginx_attachment.h new file mode 100755 index 0000000..8d51642 --- /dev/null +++ b/components/include/mock/mock_nginx_attachment.h @@ -0,0 +1,16 @@ +#ifndef __MOCK_NGINX_ATTACHMENT_H__ +#define __MOCK_NGINX_ATTACHMENT_H__ + +#include "nginx_attachment.h" + +class MockNginxAttachment: + public Singleton::Provide::From> +{ +public: + MOCK_METHOD2( + registerStaticResource, + bool(const std::string &static_resource_name, const std::string &static_resource_path) + ); +}; + +#endif // __MOCK_NGINX_ATTACHMENT_H__ diff --git a/components/include/nginx_attachment.h b/components/include/nginx_attachment.h new file mode 100755 index 0000000..b0d0716 --- /dev/null +++ b/components/include/nginx_attachment.h @@ -0,0 +1,57 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __NGINX_ATTACHMENT_H__ +#define __NGINX_ATTACHMENT_H__ + +#include "singleton.h" +#include "i_mainloop.h" +#include "i_table.h" +#include "i_gradual_deployment.h" +#include "i_http_manager.h" +#include "i_static_resources_handler.h" +#include "i_socket_is.h" +#include "transaction_table_metric.h" +#include "nginx_attachment_metric.h" +#include "nginx_intaker_metric.h" +#include "component.h" + +using SessionID = uint32_t; + +class NginxAttachment + : + public Component, + Singleton::Provide, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume>, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume +{ +public: + NginxAttachment(); + ~NginxAttachment(); + + void preload(); + + void init(); + void fini(); + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __NGINX_ATTACHMENT_H__ diff --git a/components/include/nginx_attachment_metric.h b/components/include/nginx_attachment_metric.h new file mode 100755 index 0000000..7263445 --- /dev/null +++ b/components/include/nginx_attachment_metric.h @@ -0,0 +1,87 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __NGINX_ATTACHMENT_METRIC_H__ +#define __NGINX_ATTACHMENT_METRIC_H__ + +#include "generic_metric.h" + +class nginxAttachmentEvent : public Event +{ +public: + enum class networkVerdict { + REGISTRATION_SUCCESS, + REGISTRATION_FAIL, + CONNECTION_FAIL + }; + + enum class trafficVerdict { + INSPECT, + ACCEPT, + DROP, + INJECT, + IRRELEVANT, + RECONF, + WAIT + }; + + void resetAllCounters(); + + void addNetworkingCounter(networkVerdict _verdict); + + void addTrafficVerdictCounter(trafficVerdict _verdict); + + void addResponseInspectionCounter(uint64_t _counter); + + uint64_t getNetworkingCounter(networkVerdict _verdict) const; + + uint64_t getTrafficVerdictCounter(trafficVerdict _verdict) const; + + uint64_t getResponseInspectionCounter() const; + +private: + uint64_t successfull_registrations_counter = 0; + uint64_t failed_registrations_counter = 0; + uint64_t failed_connections_counter = 0; + uint64_t accept_verdict_counter = 0; + uint64_t inspect_verdict_counter = 0; + uint64_t drop_verdict_counter = 0; + uint64_t inject_verdict_counter = 0; + uint64_t irrelevant_verdict_counter = 0; + uint64_t reconf_verdict_counter = 0; + uint64_t response_inspection_counter = 0; + uint64_t wait_verdict_counter = 0; +}; + +class nginxAttachmentMetric + : + public GenericMetric, + public Listener +{ +public: + void upon(const nginxAttachmentEvent &event) override; + +private: + MetricCalculations::Counter successfull_registrations{this, "successfullRegistrationsSum"}; + MetricCalculations::Counter failed_registrations{this, "failedRegistrationsSum"}; + MetricCalculations::Counter failed_connections{this, "failedConnectionsSum"}; + MetricCalculations::Counter inspect_verdict{this, "inspectVerdictSum"}; + MetricCalculations::Counter accept_verdict{this, "acceptVeridctSum"}; + MetricCalculations::Counter drop_verdict{this, "dropVerdictSum"}; + MetricCalculations::Counter inject_verdict{this, "injectVerdictSum"}; + MetricCalculations::Counter irrelevant_verdict{this, "irrelevantVerdictSum"}; + MetricCalculations::Counter reconf_verdict{this, "reconfVerdictSum"}; + MetricCalculations::Counter response_inspection{this, "responseInspection"}; +}; + +#endif // __NGINX_ATTACHMENT_METRIC_H__ diff --git a/components/include/nginx_intaker_metric.h b/components/include/nginx_intaker_metric.h new file mode 100755 index 0000000..b25e03b --- /dev/null +++ b/components/include/nginx_intaker_metric.h @@ -0,0 +1,146 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __NGINX_INTAKER_METRIC_H__ +#define __NGINX_INTAKER_METRIC_H__ + +#include "nginx_attachment_common.h" +#include "generic_metric.h" +#include "cpu/cpu_metric.h" + + +class nginxIntakerEvent : public Event +{ +public: + nginxIntakerEvent() : cpu_event(0, true) {} + + void resetAllCounters(); + + ngx_http_plugin_metric_type_e EnumOfIndex(int i); + + void addPluginMetricCounter(const ngx_http_cp_metric_data_t *recieved_metric_data); + + uint64_t getPluginMetricCounter(ngx_http_plugin_metric_type_e _verdict) const; + + void notifyCPU() const { cpu_event.notify(); } + +private: + uint64_t successfull_inspection_counter = 0; + uint64_t open_failure_inspection_counter = 0; + uint64_t close_failure_inspection_counter = 0; + uint64_t transparent_mode_counter = 0; + uint64_t total_transparent_time = 0; + uint64_t accept_verdict_counter = 0; + uint64_t inspect_verdict_counter = 0; + uint64_t drop_verdict_counter = 0; + uint64_t inject_verdict_counter = 0; + uint64_t irrelevant_verdict_counter = 0; + uint64_t reconf_verdict_counter = 0; + uint64_t wait_verdict_counter = 0; + uint64_t average_overall_processing_time_until_verdict = 0; + uint64_t max_overall_processing_time_until_verdict = 0; + uint64_t min_overall_processing_time_until_verdict = 0; + uint64_t average_req_processing_time_until_verdict = 0; + uint64_t max_req_processing_time_until_verdict = 0; + uint64_t min_req_processing_time_until_verdict = 0; + uint64_t average_res_processing_time_until_verdict = 0; + uint64_t max_res_processing_time_until_verdict = 0; + uint64_t min_res_processing_time_until_verdict = 0; + uint64_t req_failed_compression_counter = 0; + uint64_t res_failed_compression_counter = 0; + uint64_t req_failed_decompression_counter = 0; + uint64_t res_failed_decompression_counter = 0; + uint64_t req_successful_compression_counter = 0; + uint64_t res_successful_compression_counter = 0; + uint64_t req_successful_decompression_counter = 0; + uint64_t res_successful_decompression_counter = 0; + uint64_t corrupted_zip_skipped_session_counter = 0; + uint64_t thread_timeout = 0; + uint64_t reg_thread_timeout = 0; + uint64_t req_header_thread_timeout = 0; + uint64_t req_body_thread_timeout = 0; + uint64_t average_req_body_size_upon_timeout = 0; + uint64_t max_req_body_size_upon_timeout = 0; + uint64_t min_req_body_size_upon_timeout = 0; + uint64_t res_header_thread_timeout = 0; + uint64_t res_body_thread_timeout = 0; + uint64_t average_res_body_size_upon_timeout = 0; + uint64_t max_res_body_size_upon_timeout = 0; + uint64_t min_res_body_size_upon_timeout = 0; + uint64_t thread_failure = 0; + uint64_t req_proccessing_timeout = 0; + uint64_t res_proccessing_timeout = 0; + uint64_t req_failed_to_reach_upstream = 0; + CPUEvent cpu_event; +}; + +class nginxIntakerMetric + : + public GenericMetric, + public Listener +{ +public: + void upon(const nginxIntakerEvent &event) override; + +private: + using Counter = MetricCalculations::Counter; + using LastValue = MetricCalculations::LastReportedValue; + + Counter successfull_inspection_counter{this, "successfullInspectionTransactionsSum"}; + Counter open_failure_inspection_counter{this, "failopenTransactionsSum"}; + Counter close_failure_inspection_counter{this, "failcloseTransactionsSum"}; + Counter transparent_mode_counter{this, "transparentModeTransactionsSum"}; + Counter total_transparent_time{this, "totalTimeInTransparentModeSum"}; + Counter inspect_verdict_counter{this, "reachInspectVerdictSum"}; + Counter accept_verdict_counter{this, "reachAcceptVerdictSum"}; + Counter drop_verdict_counter{this, "reachDropVerdictSum"}; + Counter inject_verdict_counter{this, "reachInjectVerdictSum"}; + Counter irrelevant_verdict_counter{this, "reachIrrelevantVerdictSum"}; + Counter reconf_verdict_counter{this, "reachReconfVerdictSum"}; + LastValue average_overall_processing_time_until_verdict{this, "overallSessionProcessTimeToVerdictAvgSample"}; + LastValue max_overall_processing_time_until_verdict{this, "overallSessionProcessTimeToVerdictMaxSample"}; + LastValue min_overall_processing_time_until_verdict{this, "overallSessionProcessTimeToVerdictMinSample"}; + LastValue average_req_processing_time_until_verdict{this, "requestProcessTimeToVerdictAvgSample"}; + LastValue max_req_processing_time_until_verdict{this, "requestProcessTimeToVerdictMaxSample"}; + LastValue min_req_processing_time_until_verdict{this, "requestProcessTimeToVerdictMinSample"}; + LastValue average_res_processing_time_until_verdict{this, "responseProcessTimeToVerdictAvgSample"}; + LastValue max_res_processing_time_until_verdict{this, "responseProcessTimeToVerdictMaxSample"}; + LastValue min_res_processing_time_until_verdict{this, "responseProcessTimeToVerdictMinSample"}; + Counter req_failed_compression_counter{this, "requestCompressionFailureSum"}; + Counter res_failed_compression_counter{this, "responseCompressionFailureSum"}; + Counter req_failed_decompression_counter{this, "requestDecompressionFailureSum"}; + Counter res_failed_decompression_counter{this, "responseDecompressionFailureSum"}; + Counter req_successful_compression_counter{this, "requestCompressionSuccessSum"}; + Counter res_successful_compression_counter{this, "responseCompressionSuccessSum"}; + Counter req_successful_decompression_counter{this, "requestDecompressionSuccessSum"}; + Counter res_successful_decompression_counter{this, "responseDecompressionSuccessSum"}; + Counter corrupted_zip_skipped_session_counter{this, "skippedSessionsUponCorruptedZipSum"}; + Counter thread_timeout{this, "attachmentThreadReachedTimeoutSum"}; + Counter reg_thread_timeout{this, "registrationThreadReachedTimeoutSum"}; + Counter req_header_thread_timeout{this, "requestHeaderThreadReachedTimeoutSum"}; + Counter req_body_thread_timeout{this, "requestBodyThreadReachedTimeoutSum"}; + LastValue average_req_body_size_upon_timeout{this, "requestBodySizeUponTimeoutAvgSample"}; + LastValue max_req_body_size_upon_timeout{this, "requestBodySizeUponTimeoutMaxSample"}; + LastValue min_req_body_size_upon_timeout{this, "requestBodySizeUponTimeoutMinSample"}; + Counter res_header_thread_timeout{this, "respondHeaderThreadReachedTimeoutSum"}; + Counter res_body_thread_timeout{this, "respondBodyThreadReachedTimeoutSum"}; + LastValue average_res_body_size_upon_timeout{this, "responseBodySizeUponTimeoutAvgSample"}; + LastValue max_res_body_size_upon_timeout{this, "responseBodySizeUponTimeoutMaxSample"}; + LastValue min_res_body_size_upon_timeout{this, "responseBodySizeUponTimeoutMinSample"}; + Counter thread_failure{this, "attachmentThreadFailureSum"}; + Counter req_proccessing_timeout{this, "httpRequestProcessingReachedTimeoutSum"}; + Counter res_proccessing_timeout{this, "httpResponseProcessingReachedTimeoutSum"}; + Counter req_failed_to_reach_upstream{this, "httpRequestFailedToReachWebServerUpstreamSum"}; +}; + +#endif // __NGINX_INTAKER_METRIC_H__ diff --git a/components/include/orchestration_comp.h b/components/include/orchestration_comp.h new file mode 100755 index 0000000..e5c8c16 --- /dev/null +++ b/components/include/orchestration_comp.h @@ -0,0 +1,73 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __ORCHESTRATION_COMP_H__ +#define __ORCHESTRATION_COMP_H__ + +#include "i_messaging_downloader.h" +#include "i_messaging.h" +#include "i_mainloop.h" +#include "i_shell_cmd.h" +#include "i_encryptor.h" +#include "i_orchestration_status.h" +#include "i_rest_api.h" +#include "i_orchestration_tools.h" +#include "i_downloader.h" +#include "i_service_controller.h" +#include "i_manifest_controller.h" +#include "i_update_communication.h" +#include "i_details_resolver.h" +#include "i_shell_cmd.h" +#include "i_agent_details.h" +#include "i_environment.h" +#include "i_tenant_manager.h" +#include "i_package_handler.h" +#include "component.h" + +class OrchestrationComp + : + public Component, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume +{ +public: + OrchestrationComp(); + ~OrchestrationComp(); + + void preload() override; + + void init() override; + void fini() override; + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __ORCHESTRATION_COMP_H__ diff --git a/components/include/orchestration_status.h b/components/include/orchestration_status.h new file mode 100755 index 0000000..84879a4 --- /dev/null +++ b/components/include/orchestration_status.h @@ -0,0 +1,49 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __ORCHESTRATION_STATUS_H__ +#define __ORCHESTRATION_STATUS_H__ + +#include "i_orchestration_status.h" + +#include + +#include "singleton.h" +#include "component.h" +#include "i_orchestration_tools.h" +#include "i_time_get.h" +#include "i_mainloop.h" +#include "i_agent_details.h" +#include "customized_cereal_map.h" + +class OrchestrationStatus + : + public Component, + Singleton::Provide, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume +{ +public: + OrchestrationStatus(); + ~OrchestrationStatus(); + + void init() override; + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __ORCHESTRATION_STATUS_H__ diff --git a/components/include/orchestration_tools.h b/components/include/orchestration_tools.h new file mode 100755 index 0000000..0e26d3e --- /dev/null +++ b/components/include/orchestration_tools.h @@ -0,0 +1,33 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __ORCHESTRATION_TOOLS_H__ +#define __ORCHESTRATION_TOOLS_H__ + +#include + +#include "i_orchestration_tools.h" +#include "component.h" + +class OrchestrationTools : public Component, Singleton::Provide +{ +public: + OrchestrationTools(); + ~OrchestrationTools(); + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __ORCHESTRATION_TOOLS_H__ diff --git a/components/include/orchestrator/data.h b/components/include/orchestrator/data.h new file mode 100755 index 0000000..7497544 --- /dev/null +++ b/components/include/orchestrator/data.h @@ -0,0 +1,46 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __DATA_H__ +#define __DATA_H__ + +#include +#include + +#include "cereal/archives/json.hpp" +#include "cereal/types/string.hpp" +#include "cereal/types/vector.hpp" + +#include "debug.h" +#include "maybe_res.h" + +class Data +{ +public: + enum class ChecksumTypes { SHA1, SHA256, SHA512, MD5 }; + + const std::string & getDownloadPath() const { return download_path; } + const std::string & getVersion() const { return version; } + const std::string & getChecksum() const { return checksum_value; } + const ChecksumTypes & getChecksumType() const { return checksum_type; } + + void serialize(cereal::JSONInputArchive & in_archive); + +private: + ChecksumTypes checksum_type = ChecksumTypes::SHA256; + std::string version; + std::string download_path; + std::string checksum_value; +}; + +#endif // __DATA_H__ diff --git a/components/include/orchestrator/rest_api/get_resource_file.h b/components/include/orchestrator/rest_api/get_resource_file.h new file mode 100644 index 0000000..4d0632d --- /dev/null +++ b/components/include/orchestrator/rest_api/get_resource_file.h @@ -0,0 +1,123 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __GET_RESOURCE_FILE_H__ +#define __GET_RESOURCE_FILE_H__ + +#include + +#include "rest.h" +#include "i_messaging.h" + +class GetResourceFile : public ClientRest +{ + class TenantResource : public ClientRest + { + public: + TenantResource(const std::string &_tenant_id, const std::string &_version, const std::string &_checksum) + : + tenant_id(_tenant_id), + version(_version), + checksum(_checksum) + { + } + + bool + operator==(const TenantResource &other) const + { + return + tenant_id.get() == other.tenant_id.get() && + version.get() == other.version.get() && + checksum.get() == other.checksum.get(); + } + + C2S_LABEL_PARAM(std::string, tenant_id, "tenantId"); + C2S_LABEL_PARAM(std::string, version, "version"); + C2S_LABEL_PARAM(std::string, checksum, "checksum"); + }; + +public: + enum class ResourceFileType { + MANIFEST, + POLICY, + SETTINGS, + DATA, + VIRTUAL_SETTINGS, + VIRTUAL_POLICY, + + COUNT + }; + + GetResourceFile() = default; + + GetResourceFile(const ResourceFileType _file_type) + : + file_type(_file_type) + { + } + + bool + operator==(const GetResourceFile &other) const + { + if (file_type != other.file_type) return false; + if (tenants.isActive() && other.tenants.isActive()) return tenants.get() == other.tenants.get(); + + return (!tenants.isActive() && !other.tenants.isActive()); + } + + void + addTenant(const std::string &tenant_id, const std::string &version, const std::string &checksum) + { + if (!isVirtual()) return; + + if (!tenants.isActive()) tenants = std::vector(); + tenants.get().emplace_back(tenant_id, version, checksum); + } + + std::string + getFileName() const + { + switch (file_type) + { + case ResourceFileType::MANIFEST: return "manifest"; + case ResourceFileType::POLICY: return "policy"; + case ResourceFileType::SETTINGS: return "settings"; + case ResourceFileType::DATA: return "data"; + case ResourceFileType::VIRTUAL_SETTINGS: return "virtualSettings"; + case ResourceFileType::VIRTUAL_POLICY: return "virtualPolicy"; + default: + dbgAssert(false) << "Unknown file type"; + } + return std::string(); + } + + I_Messaging::Method + getRequestMethod() const + { + return isVirtual() ? I_Messaging::Method::POST : I_Messaging::Method::GET; + } + +private: + bool + isVirtual() const + { + return + file_type == ResourceFileType::VIRTUAL_SETTINGS || + file_type == ResourceFileType::VIRTUAL_POLICY; + } + + C2S_LABEL_OPTIONAL_PARAM(std::vector, tenants, "tenants"); + ResourceFileType file_type; +}; + +#endif // __GET_RESOURCE_FILE_H__ diff --git a/components/include/orchestrator/rest_api/orchestration_check_update.h b/components/include/orchestrator/rest_api/orchestration_check_update.h new file mode 100644 index 0000000..e06aaa0 --- /dev/null +++ b/components/include/orchestrator/rest_api/orchestration_check_update.h @@ -0,0 +1,182 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __ORCHESTRATION_CHECK_UPDATE_H__ +#define __ORCHESTRATION_CHECK_UPDATE_H__ + +#include + +#include "rest.h" +#include "maybe_res.h" + +class CheckUpdateRequest : public ClientRest +{ +public: + class Tenants : public ClientRest + { + public: + Tenants() = default; + + Tenants(const Tenants &other) + { + tenant_id = other.tenant_id; + checksum = other.checksum; + version = other.version; + } + + Tenants(const std::string &_tenant_id, const std::string &_checksum, const std::string &_version) + : + tenant_id(_tenant_id), + checksum(_checksum), + version(_version) + { + } + + bool + operator==(const Tenants &other) const + { + return + tenant_id.get() == other.tenant_id.get() && + checksum.get() == other.checksum.get() && + version.get() == other.version.get(); + } + + const std::string & getTenantID() const { return tenant_id.get(); } + const std::string & getChecksum() const { return checksum.get(); } + const std::string & getVersion() const { return version.get(); } + + private: + BOTH_LABEL_PARAM(std::string, tenant_id, "tenantId"); + BOTH_LABEL_PARAM(std::string, checksum, "checksum"); + BOTH_LABEL_PARAM(std::string, version, "version"); + }; + + CheckUpdateRequest( + const std::string &_manifest, + const std::string &_policy, + const std::string &_settings, + const std::string &_data, + const std::string &_checksum_type, + const std::string &_policy_version) + : + manifest(_manifest), + policy(_policy), + settings(_settings), + data(_data), + checksum_type(_checksum_type), + policy_version(_policy_version) + { + out_virtual_policy.setActive(true); + out_virtual_settings.setActive(true); + } + + Maybe + getManifest() const + { + if (manifest.get().empty()) return genError("No manifest"); + return manifest.get(); + } + + Maybe + getPolicy() const + { + if (policy.get().empty()) return genError("No policy"); + return policy.get(); + } + + Maybe + getSettings() const + { + if (settings.get().empty()) return genError("No settings"); + return settings.get(); + } + + Maybe + getData() const + { + if (data.get().empty()) return genError("No data"); + return data.get(); + } + + Maybe> + getVirtualPolicy() const + { + if (!in_virtual_policy.isActive()) return genError("no virtual policy is found"); + return in_virtual_policy.get().getTenants(); + } + + Maybe> + getVirtualSettings() const + { + if (!in_virtual_settings.isActive()) return genError("no virtual settings are found"); + return in_virtual_settings.get().getTenants(); + } + + template + void + addTenantPolicy(Args ...args) + { + if (!out_virtual_policy.isActive()) out_virtual_policy.setActive(true); + out_virtual_policy.get().addTenant(std::forward(args)...); + } + + template + void + addTenantSettings(Args ...args) + { + if (!out_virtual_settings.isActive()) out_virtual_settings.setActive(true); + out_virtual_settings.get().addTenant(std::forward(args)...); + } + + void setGreedyMode() { check_all_tenants = true; } + +private: + class VirtualConfig : public ClientRest + { + public: + VirtualConfig() + { + tenants.setActive(true); + } + + template + void + addTenant(Args ...args) + { + if (!tenants.isActive()) tenants.setActive(true); + tenants.get().emplace_back(std::forward(args)...); + } + + const std::vector & getTenants() const { return tenants.get(); } + + private: + BOTH_LABEL_PARAM(std::vector, tenants, "tenants"); + }; + + BOTH_LABEL_PARAM(std::string, manifest, "manifest"); + BOTH_LABEL_PARAM(std::string, policy, "policy"); + BOTH_LABEL_PARAM(std::string, settings, "settings"); + BOTH_LABEL_OPTIONAL_PARAM(std::string, data, "data"); + + C2S_LABEL_OPTIONAL_PARAM(VirtualConfig, out_virtual_settings, "virtualSettings"); + C2S_LABEL_OPTIONAL_PARAM(VirtualConfig, out_virtual_policy, "virtualPolicy"); + BOTH_LABEL_OPTIONAL_PARAM(bool, check_all_tenants, "checkForAllTenants"); + + C2S_LABEL_PARAM(std::string, checksum_type, "checksum-type"); + C2S_LABEL_PARAM(std::string, policy_version, "policyVersion"); + + S2C_LABEL_OPTIONAL_PARAM(VirtualConfig, in_virtual_policy, "virtualPolicy"); + S2C_LABEL_OPTIONAL_PARAM(VirtualConfig, in_virtual_settings, "virtualSettings"); +}; + +#endif // __ORCHESTRATION_CHECK_UPDATE_H__ diff --git a/components/include/package.h b/components/include/package.h new file mode 100755 index 0000000..568879c --- /dev/null +++ b/components/include/package.h @@ -0,0 +1,75 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __PACKAGE_H__ +#define __PACKAGE_H__ + +#include +#include + +#include "cereal/archives/json.hpp" +#include "cereal/types/string.hpp" +#include "cereal/types/vector.hpp" + +#include "debug.h" +#include "maybe_res.h" + +class Package +{ +public: + enum class ChecksumTypes { SHA1, SHA256, SHA512, MD5 }; + enum class PackageType { Service, SharedObject }; + + const std::string & getDownloadPath() const { return download_path; } + const std::string & getRelativeDownloadPath() const { return relative_path; } + const std::string & getName() const { return name; } + const std::string & getVersion() const { return version; } + const std::string & getChecksum() const { return checksum_value; } + const PackageType & getType() const { return package_type; } + const std::vector & getRequire() const { return require_packages; } + const ChecksumTypes & getChecksumType() const { return checksum_type; } + const Maybe & isInstallable() const { return installable; } + + bool operator==(const Package &other) const; + bool operator!=(const Package &other) const; + + void serialize(cereal::JSONOutputArchive & out_archive) const; + void serialize(cereal::JSONInputArchive & in_archive); + +private: + template + std::string + mapTypeToString(const T &type, const std::map &type_mapper) const + { + for (auto &mapped_type : type_mapper) { + if (mapped_type.second == type) return mapped_type.first; + } + + dbgAssert(false) << "Unsupported type " << static_cast(type); + // Just satisfying the compiler, this return never reached + return std::string(); + } + + Maybe installable = Maybe(); + std::string mirror; + std::string name; + std::string version; + std::string download_path; + std::string relative_path; + ChecksumTypes checksum_type; + std::string checksum_value; + PackageType package_type; + std::vector require_packages; +}; + +#endif // __PACKAGE_H__ diff --git a/components/include/package_handler.h b/components/include/package_handler.h new file mode 100755 index 0000000..6f852ca --- /dev/null +++ b/components/include/package_handler.h @@ -0,0 +1,42 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __PACKAGE_HANDLER_H__ +#define __PACKAGE_HANDLER_H__ + +#include "i_package_handler.h" +#include "i_orchestration_tools.h" +#include "i_shell_cmd.h" +#include "component.h" + +class PackageHandler + : + public Component, + Singleton::Provide, + Singleton::Consume, + Singleton::Consume +{ +public: + PackageHandler(); + ~PackageHandler(); + + void preload() override; + + void init() override; + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __PACKAGE_HANDLER_H__ diff --git a/components/include/packet.h b/components/include/packet.h new file mode 100755 index 0000000..28539b9 --- /dev/null +++ b/components/include/packet.h @@ -0,0 +1,189 @@ +#ifndef __PACKET_H__ +#define __PACKET_H__ + +#include +#include +#include + +#include "connkey.h" +#include "buffer.h" +#include "maybe_res.h" +#include "type_defs.h" + +enum class CDir : u_char +{ + C2S, + S2C +}; + +static inline CDir +otherCDir(CDir cdir) +{ + return static_cast(static_cast(CDir::S2C) - static_cast(cdir)); +} + +static inline std::ostream & +operator<<(std::ostream &os, CDir cdir) +{ + switch (cdir) { + case CDir::C2S: { + return os << "c2s"; + } + case CDir::S2C: { + return os << "s2c"; + } + } + return os << "Could not match direction of a connection - neither C2S, nor S2C (" << static_cast(cdir) << ")"; +} + +enum class PktErr +{ + UNINITIALIZED, + NON_ETHERNET_FRAME, + MAC_LEN_TOO_BIG, + NON_IP_PACKET, + UNKNOWN_L3_PROTOCOL, + + IP_SIZE_MISMATCH, + IP_VERSION_MISMATCH, + IP_HEADER_TOO_SMALL, + + PKT_TOO_SHORT_FOR_IP_HEADER, + PKT_TOO_SHORT_FOR_IP_EXTENSION_HEADER, + PKT_TOO_SHORT_FOR_IP_EXTENSION_HEADER_BODY, + UNKNOWN_IPV6_EXTENSION_HEADER, + + PKT_TOO_SHORT_FOR_L4_HEADER, + PKT_TOO_SHORT_FOR_TCP_OPTIONS, + TCP_HEADER_TOO_SMALL, + + PKT_TOO_SHORT_FOR_ICMP_ERROR_DATA, + ICMP_VERSION_MISMATCH, +}; + +enum class PktType +{ + PKT_L2 = 1, + PKT_L3 = 2, +}; + +std::ostream & operator<<(std::ostream &os, const PktErr &err); + +USE_DEBUG_FLAG(D_PACKET); + +class Packet +{ +public: + explicit Packet() {} + + template + static Maybe, PktErr> + genPacket(PktType type, IPType proto, Args&&... args) + { + // Same as make_unique, but I can't use make unique and keep the ctor private... + auto pkt = std::unique_ptr(new Packet()); + + pkt->setPacketType(type); + pkt->pkt_data = Buffer(std::forward(args)...); + + auto key = pkt->parsePacket(type, proto); + + if (!key.ok()) { + return genError(key.getErr()); + } + + dbgTrace(D_PACKET) << "Extracted key: " << *key; + pkt->key = *key; + + return std::move(pkt); + } + + PktType getPacketType() const { return pkt_type; } + IPType getPacketProto() const { return key.getType(); } + + bool isFragment() const { return is_fragment; } + + const Buffer & getL4Data() const { return l4_payload; } + const Buffer & getL4Header() const { return l4_header; } + const Buffer & getL3() const { return l3; } + const Buffer & getL3Data() const { return l3_payload; } + const Buffer & getL3Header() const { return l3_header; } + const Buffer & getL2Data() const { return l2_payload; } + const Buffer & getL2Header() const { return l2_header; } + const Buffer & getPacket() const { return pkt_data; } + + const ConnKey & getKey() const { return key; } + + void setInterface(NetworkIfNum value); + Maybe getInterface() const; + void setKey(const ConnKey &_key) { key = _key; } + CDir getCDir() const { return cdir; } + void setCDir(CDir _cdir) { cdir = _cdir; } + + void setZecoOpaque(u_int64_t _zeco_opaque); + Maybe getZecoOpaque() const; + + // Get the data (L2 and up) as a vector. Copies everything. + std::vector getL2DataVec() const; + + template + void + serialize(Archive &ar, uint32_t) + { + ar( + key, + cdir, + pkt_type, + has_zeco_opaque, + zeco_opaque, + is_interface, + is_fragment, + interface, + pkt_data, + l2_header, + l2_payload, + l3, + l3_header, + l3_payload, + l4_header, + l4_payload + ); + } + +private: + ConnKey key; + CDir cdir; + PktType pkt_type; + + bool is_interface = false; + bool is_fragment = false; + NetworkIfNum interface; + Buffer pkt_data; + Buffer l2_header; + Buffer l2_payload; + Buffer l3; + Buffer l3_header; + Buffer l3_payload; + Buffer l4_header; + Buffer l4_payload; + bool has_zeco_opaque = false; + u_int64_t zeco_opaque; + + Maybe parsePacket(PktType type, IPType proto); + Maybe parseFromL2(); + Maybe parseFromL3v4(); + Maybe parseFromL3v6(); + Maybe getIPv6Proto(IPProto proto); + Maybe getIPv6ExtLen(uint offset_to_ext_hdr, IPProto ext_hdr_type); + Maybe getIPv6GenericExtLen(uint offset_to_ext_hdr, uint length_multiplier); + Maybe parseFromL4(const IPAddr &src, const IPAddr &dst, IPProto proto); + + std::tuple getICMPPortsV6(); + std::tuple getICMPPortsV4(); + std::tuple getICMPPorts(IPProto proto); + + void setPacketType(const PktType _pkt_type) { pkt_type = _pkt_type; } + Maybe getIcmpHdrLen(IPProto proto, IPType ip_type); +}; + +#endif // __PACKET_H__ diff --git a/components/include/pending_key.h b/components/include/pending_key.h new file mode 100755 index 0000000..fc5c89e --- /dev/null +++ b/components/include/pending_key.h @@ -0,0 +1,85 @@ +#ifndef __PENDING_KEY_H__ +#define __PENDING_KEY_H__ + +#include +#include +#include +#include "debug.h" +#include "maybe_res.h" +#include "connkey.h" + +class PendingKey +{ +public: + explicit PendingKey() {} + explicit PendingKey( + const IPAddr &_src, + const IPAddr &_dst, + PortNumber dport, + IPProto proto) + : + src(_src), + dst(_dst) + { + dst.port = dport; + src.proto = proto; + dst.proto = proto; + } + + PendingKey(const ConnKey &key) : PendingKey(key.getSrc(), key.getDst(), key.getDPort(), key.getProto()) {} + + bool + operator==(const PendingKey &other) const + { + auto my_tuple = std::tie(src, dst, dst.port, src.proto); + auto other_tuple = std::tie(other.src, other.dst, other.dst.port, other.src.proto); + return my_tuple == other_tuple; + } + + bool + operator!=(const PendingKey &other) const + { + return !(*this == other); + } + + const IPAddr & getSrc() const { return src; } + const IPAddr & getDst() const { return dst; } + PortNumber getDPort() const { return dst.port; } + IPProto getProto() const { return src.proto; } + + Maybe + getType() const + { + if(src.type != dst.type) return genError("Mismatch in connection types (Src and Dst types are not identical)"); + return src.type; + } + + std::ostream & print(std::ostream &os) const; + size_t hash() const; + + template + void + serialize(Archive &ar, uint32_t) + { + ar(src, dst); + } + +private: + IPAddr src, dst; +}; + +// Specialization of std::hash<> for ConnKey +namespace std +{ + +template <> +struct hash +{ + size_t operator()(const PendingKey &k) const { return k.hash(); } +}; + +} // namespace std + +static inline std::ostream & operator<<(std::ostream &os, const PendingKey &k) { return k.print(os); } + +#endif // __PENDING_KEY_H__ diff --git a/components/include/pm_hook.h b/components/include/pm_hook.h new file mode 100644 index 0000000..282d96d --- /dev/null +++ b/components/include/pm_hook.h @@ -0,0 +1,45 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __PM_HOOK_H__ +#define __PM_HOOK_H__ + +#include +#include +#include +#include +#include "i_pm_scan.h" + +class KissThinNFA; + +class PMHook final : public I_PMScan +{ +public: + explicit PMHook(); + ~PMHook(); + + Maybe prepare(const std::set &patterns); + std::set scanBuf(const Buffer &buf) const override; + std::set> scanBufWithOffset(const Buffer &buf) const override; + void scanBufWithOffsetLambda(const Buffer &buf, I_PMScan::CBFunction cb) const override; + + // Line may begin with ^ or $ sign to mark LSS is at begin/end of buffer. + static Maybe lineToPattern(const std::string &line); + bool ok() const { return static_cast(handle); } + +private: + std::shared_ptr handle; + std::map patterns; +}; + +#endif // __PM_HOOK_H__ diff --git a/components/include/report_messaging.h b/components/include/report_messaging.h new file mode 100755 index 0000000..fb863d1 --- /dev/null +++ b/components/include/report_messaging.h @@ -0,0 +1,150 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __REPORT_MESSAGING_H__ +#define __REPORT_MESSAGING_H__ + +#include "singleton.h" +#include "i_time_get.h" +#include "i_messaging.h" +#include "report/report.h" + +class ReportMessaging + : + Singleton::Consume, + Singleton::Consume +{ +public: + template + ReportMessaging( + const std::string &title, + const ReportIS::AudienceTeam &audience_team, + const T &obj, + Args ...args) + : + ReportMessaging( + title, + audience_team, + obj, + false, + MessageTypeTag::GENERIC, + std::forward(args)... + ) + { + } + + template + ReportMessaging( + const std::string &title, + const ReportIS::AudienceTeam &audience_team, + const T &obj, + bool is_async_message, + Args ...args) + : + ReportMessaging( + title, + audience_team, + obj, + is_async_message, + MessageTypeTag::GENERIC, + std::forward(args)... + ) + { + } + + template + ReportMessaging( + const std::string &title, + const ReportIS::AudienceTeam &audience_team, + const T &obj, + bool is_async_message, + const MessageTypeTag &message_type, + Args ...args) + : + ReportMessaging( + title, + audience_team, + ReportIS::Severity::INFO, + ReportIS::Priority::LOW, + obj, + is_async_message, + message_type, + std::forward(args)... + ) + { + } + + template + ReportMessaging( + const std::string &title, + const ReportIS::AudienceTeam &audience_team, + const ReportIS::Severity &severity, + const ReportIS::Priority &priority, + const T &obj, + Args ...args) + : + ReportMessaging( + title, + audience_team, + severity, + priority, + obj, + false, + MessageTypeTag::GENERIC, + std::forward(args)... + ) + { + } + + + template + ReportMessaging( + const std::string &title, + const ReportIS::AudienceTeam &audience_team, + const ReportIS::Severity &severity, + const ReportIS::Priority &priority, + const T &obj, + bool _is_async_message, + const MessageTypeTag &message_type, + Args ...args) + : + report( + title, + Singleton::Consume::by()->getWalltime(), + ReportIS::Type::EVENT, + ReportIS::Level::LOG, + ReportIS::LogLevel::INFO, + ReportIS::Audience::INTERNAL, + audience_team, + severity, + priority, + std::chrono::seconds(0), + std::forward(args)... + ), + is_async_message(_is_async_message), + message_type_tag(message_type) + { + report << LogField("eventObject", obj); + } + + ~ReportMessaging(); + + ReportMessaging & operator<<(const LogField &field); + +private: + Report report; + bool is_async_message; + MessageTypeTag message_type_tag; +}; + +#endif // __REPORT_MESSAGING_H__ diff --git a/components/include/service_controller.h b/components/include/service_controller.h new file mode 100755 index 0000000..a7648f0 --- /dev/null +++ b/components/include/service_controller.h @@ -0,0 +1,55 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __SERVICE_CONTROLLER_H__ +#define __SERVICE_CONTROLLER_H__ + +#include "i_service_controller.h" + +#include +#include + +#include "i_orchestration_tools.h" +#include "i_orchestration_status.h" +#include "i_shell_cmd.h" +#include "i_rest_api.h" +#include "i_tenant_manager.h" +#include "service_details.h" +#include "i_mainloop.h" +#include "component.h" + +class ServiceController + : + public Component, + Singleton::Provide, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume +{ +public: + ServiceController(); + ~ServiceController(); + + void init() override; + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __SERVICE_CONTROLLER_H__ diff --git a/components/include/service_details.h b/components/include/service_details.h new file mode 100755 index 0000000..ed8c088 --- /dev/null +++ b/components/include/service_details.h @@ -0,0 +1,79 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __SERVICE_DETAILS_H__ +#define __SERVICE_DETAILS_H__ + +#include +#include + +#include "connkey.h" +#include "rest.h" +#include "config.h" +#include "i_service_controller.h" + +class ServiceDetails : Singleton::Consume +{ +public: + ServiceDetails() = default; + ServiceDetails( + const std::string &name, + const PortNumber &port, + const std::vector relevant_configurations, + const std::string &id = "") + : + service_name(name), + service_id(id), + service_port(port) + { + relevant_configs.insert(relevant_configurations.begin(), relevant_configurations.end()); + } + + template + void serialize(Archive &ar); + + ReconfStatus sendNewConfigurations(int conf_id, const std::string &policy_version); + + bool isConfigurationRelevant(const std::string &config) const { return relevant_configs.count(config) > 0; } + + bool sendRequest(const std::string &uri, ClientRest &request_json) const; + + bool isServiceActive() const; + + const PortNumber & getPort() const { return service_port; } + + const std::string & getServiceID() const {return service_id; } + + const std::string & getServiceName() const {return service_name; } + +private: + + std::string service_name; + std::string service_id; + PortNumber service_port; + std::unordered_set relevant_configs; +}; + +class SetNanoServiceConfig : public ServerRest +{ +public: + void doCall() override; + + C2S_PARAM(std::string, service_name); + C2S_OPTIONAL_PARAM(std::string, service_id); + C2S_PARAM(int, service_listening_port); + C2S_PARAM(std::vector, expected_configurations); + S2C_PARAM(bool, status); +}; + +#endif // __SERVICE_DETAILS_H__ diff --git a/components/include/signal_handler.h b/components/include/signal_handler.h new file mode 100755 index 0000000..21463c5 --- /dev/null +++ b/components/include/signal_handler.h @@ -0,0 +1,50 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __SIGNAL_HANDLER_H__ +#define __SIGNAL_HANDLER_H__ + +#include "config.h" +#include "i_mainloop.h" +#include "i_time_get.h" +#include "i_agent_details.h" +#include "i_environment.h" +#include "i_messaging.h" +#include "i_signal_handler.h" +#include "config/i_config.h" +#include "component.h" + +class SignalHandler + : + public Component, + Singleton::Provide, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume +{ +public: + SignalHandler(); + ~SignalHandler(); + + void init(); + void preload(); + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __SIGNAL_HANDLER_H__ diff --git a/components/include/telemetry.h b/components/include/telemetry.h new file mode 100755 index 0000000..96700c0 --- /dev/null +++ b/components/include/telemetry.h @@ -0,0 +1,122 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#ifndef __TELEMETRY_H__ +#define __TELEMETRY_H__ + +#include +#include +#include +#include "i_mainloop.h" +#include "i_waap_telemetry.h" +#include "i_agent_details.h" +#include "i_logging.h" +#include "logging_comp.h" +#include +#include +#include +#include "waap.h" +#include "generic_metric.h" + +enum class AssetType { API, WEB, ALL, COUNT }; + +class WaapTelemetryEvent : public Event +{ +public: + WaapTelemetryEvent(const std::string &_asset_id, const DecisionTelemetryData &_data) + : + asset_id(_asset_id), + data(_data) + {} + + const DecisionTelemetryData & getData() const { return data; } + const std::string & getAssetId() const { return asset_id; } + +private: + std::string asset_id; + DecisionTelemetryData data; +}; + +class WaapTelemetrics : public GenericMetric +{ +public: + void updateMetrics(const std::string &asset_id, const DecisionTelemetryData &data); + void initMetrics(); + +private: + MetricCalculations::Counter requests{this, "reservedNgenA"}; + MetricCalculations::Counter sources{this, "reservedNgenB"}; + MetricCalculations::Counter force_and_block_exceptions{this, "reservedNgenC"}; + MetricCalculations::Counter waf_blocked{this, "reservedNgenD"}; + MetricCalculations::Counter api_blocked{this, "reservedNgenE"}; + MetricCalculations::Counter bot_blocked{this, "reservedNgenF"}; + MetricCalculations::Counter threat_info{this, "reservedNgenG"}; + MetricCalculations::Counter threat_low{this, "reservedNgenH"}; + MetricCalculations::Counter threat_medium{this, "reservedNgenI"}; + MetricCalculations::Counter threat_high{this, "reservedNgenJ"}; + std::unordered_set sources_seen; +}; + +class WaapAttackTypesMetrics : public GenericMetric +{ +public: + void updateMetrics(const std::string &asset_id, const DecisionTelemetryData &data); + void initMetrics(); + +private: + MetricCalculations::Counter sql_inj{this, "reservedNgenA"}; + MetricCalculations::Counter vulnerability_scan{this, "reservedNgenB"}; + MetricCalculations::Counter path_traversal{this, "reservedNgenC"}; + MetricCalculations::Counter ldap_inj{this, "reservedNgenD"}; + MetricCalculations::Counter evasion_techs{this, "reservedNgenE"}; + MetricCalculations::Counter remote_code_exec{this, "reservedNgenF"}; + MetricCalculations::Counter xml_extern_entity{this, "reservedNgenG"}; + MetricCalculations::Counter cross_site_scripting{this, "reservedNgenH"}; + MetricCalculations::Counter general{this, "reservedNgenI"}; +}; + +class WaapMetricWrapper : public Listener, Singleton::Consume +{ +public: + void upon(const WaapTelemetryEvent &event) override; + +private: + std::map> metrics; + std::map> telemetries; + std::map> attack_types; + std::map> attack_types_telemetries; +}; + +class AssetCountEvent : public Event +{ +public: + AssetCountEvent(AssetType type, const int &asset_count) : asset_type(type), assets_count(asset_count) {}; + const AssetType & getAssetType() const { return asset_type; } + const int & getAssetCount() const { return assets_count; } +private: + AssetType asset_type; + int assets_count; +}; + +class AssetsMetric : public GenericMetric, Listener +{ +public: + void upon(const AssetCountEvent &event) override; +private: + MetricCalculations::LastReportedValue api_assets{this, "numberOfProtectedApiAssetsSample"}; + MetricCalculations::LastReportedValue web_assets{this, "numberOfProtectedWebAppAssetsSample"}; + MetricCalculations::LastReportedValue all_assets{this, "numberOfProtectedAssetsSample"}; +}; + +#endif // __TELEMETRY_H__ diff --git a/components/include/transaction_table_metric.h b/components/include/transaction_table_metric.h new file mode 100755 index 0000000..3aed420 --- /dev/null +++ b/components/include/transaction_table_metric.h @@ -0,0 +1,61 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __TRANSACTION_TABLE_METRIC_H__ +#define __TRANSACTION_TABLE_METRIC_H__ + +#include "generic_metric.h" + +class TransactionTableEvent : public Event +{ +public: + void + setTransactionTableSize(uint64_t _value) + { + transaction_table_size = _value; + } + + uint64_t + getTransactionTableSize() const + { + return transaction_table_size; + } + +private: + uint64_t transaction_table_size = 0; +}; + +class TransactionTableMetric + : + public GenericMetric, + public Listener +{ +public: + void + upon(const TransactionTableEvent &event) override + { + max_transaction_table_size.report(event.getTransactionTableSize()); + avg_transaction_table_size.report(event.getTransactionTableSize()); + last_report_transaction_handler_size.report(event.getTransactionTableSize()); + } + +private: + MetricCalculations::Max max_transaction_table_size{this, "maxTransactionTableSizeSample", 0}; + MetricCalculations::Average avg_transaction_table_size{this, "averageTransactionTableSizeSample"}; + MetricCalculations::LastReportedValue last_report_transaction_handler_size{ + this, + "lastReportTransactionTableSizeSample" + }; +}; + +#endif // __TRANSACTION_TABLE_METRIC_H__ diff --git a/components/include/type_defs.h b/components/include/type_defs.h new file mode 100644 index 0000000..0ee670e --- /dev/null +++ b/components/include/type_defs.h @@ -0,0 +1,7 @@ +#ifndef __TYPE_DEFS_H__ +#define __TYPE_DEFS_H__ + +// Definition of FD.io interface information +using NetworkIfNum = uint16_t; + +#endif // __TYPE_DEFS_H__ diff --git a/components/include/update_communication.h b/components/include/update_communication.h new file mode 100755 index 0000000..a2a7ca3 --- /dev/null +++ b/components/include/update_communication.h @@ -0,0 +1,46 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __UPDATE_COMMUNICATION_H__ +#define __UPDATE_COMMUNICATION_H__ + +#include "i_update_communication.h" +#include "i_environment.h" +#include "i_rest_api.h" +#include "i_mainloop.h" +#include "i_orchestration_tools.h" +#include "component.h" + +class UpdateCommunication + : + public Component, + Singleton::Provide, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume +{ +public: + UpdateCommunication(); + ~UpdateCommunication(); + + void preload() override; + + void init() override; + void fini() override; + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __UPDATE_COMMUNICATION_H__ diff --git a/components/include/url_parser.h b/components/include/url_parser.h new file mode 100755 index 0000000..554df67 --- /dev/null +++ b/components/include/url_parser.h @@ -0,0 +1,56 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __URL_PARSER_H__ +#define __URL_PARSER_H__ + +#include "orchestration_tools.h" + +#include +#include + +enum class URLProtocol +{ + HTTP, + HTTPS, + LOCAL_FILE +}; + +class URLParser +{ +public: + URLParser(const std::string &url); + + Maybe getBaseURL() const; + bool isOverSSL() const { return over_ssl; } + std::string getPort() const { return port; } + std::string getQuery() const { return query; } + URLProtocol getProtocol() const { return protocol; } + std::string toString() const; + void setQuery(const std::string &new_query); + +private: + void parseURL(const std::string &url); + URLProtocol parseProtocol(const std::string &url) const; + + bool over_ssl; + std::string base_url; + std::string port; + std::string query; + URLProtocol protocol; +}; + +std::ostream & operator<<(std::ostream &os, const URLParser &url); +std::ostream & operator<<(std::ostream &os, const URLProtocol &protocol); + +#endif // __URL_PARSER_H__ diff --git a/components/include/user_identifiers_config.h b/components/include/user_identifiers_config.h new file mode 100755 index 0000000..61274f1 --- /dev/null +++ b/components/include/user_identifiers_config.h @@ -0,0 +1,66 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __USER_IDENTIFIERS_CONFIG_H__ +#define __USER_IDENTIFIERS_CONFIG_H__ + +#include +#include + +#include "http_inspection_events.h" +#include "cereal/archives/json.hpp" + +class UsersAllIdentifiersConfig +{ +public: + enum class ExtractType { SOURCEIDENTIFIER, PROXYIP}; + + UsersAllIdentifiersConfig(); + void load(cereal::JSONInputArchive &ar); + void parseRequestHeaders(const HttpHeader &header) const; + std::vector getHeaderValuesFromConfig(const std::string &header_key) const; + void setXFFValuesToOpaqueCtx(const HttpHeader &header, ExtractType type) const; + +private: + class UsersIdentifiersConfig + { + public: + UsersIdentifiersConfig(); + UsersIdentifiersConfig(const std::string &identifier); + bool operator==(const UsersIdentifiersConfig &other) const; + void load(cereal::JSONInputArchive &ar); + bool isEqualSourceIdentifier(const std::string &other) const; + const std::string & getSourceIdentifier() const { return source_identifier; } + const std::vector & getIdentifierValues() const { return identifier_values; } + + private: + std::string source_identifier; + std::vector identifier_values; + }; + + bool isHigherPriority(const std::string ¤t_identifier, const std::string &header_key) const; + void setIdentifierTopaqueCtx(const HttpHeader &header) const; + void setCookieValuesToOpaqueCtx(const HttpHeader &header) const; + void setJWTValuesToOpaqueCtx(const HttpHeader &header) const; + void setCustomHeaderToOpaqueCtx(const HttpHeader &header) const; + Maybe parseCookieElement( + const std::string::const_iterator &start, + const std::string::const_iterator &end, + const std::string &key) const; + Buffer extractKeyValueFromCookie(const std::string &cookie_value, const std::string &key) const; + Maybe parseXForwardedFor(const std::string &str) const; + + std::vector user_identifiers; +}; + +#endif // __USER_IDENTIFIERS_CONFIG_H__ diff --git a/components/include/waap.h b/components/include/waap.h new file mode 100755 index 0000000..7ea2b38 --- /dev/null +++ b/components/include/waap.h @@ -0,0 +1,66 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __WAAP_H__ +#define __WAAP_H__ + +#include + +#include "singleton.h" +#include "i_mainloop.h" +#include "i_table.h" +#include "i_static_resources_handler.h" +#include "http_inspection_events.h" +#include "i_instance_awareness.h" +#include "table_opaque.h" +#include "component.h" + +// forward decleration +class I_Telemetry; +class I_DeepAnalyzer; +class I_WaapAssetStatesManager; +class I_Messaging; +class I_AgentDetails; +class I_Encryptor; + +class WaapComponent + : + public Component, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume +{ +public: + WaapComponent(); + ~WaapComponent(); + + void preload(); + + void init(); + void fini(); + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __WAAP_H__ diff --git a/components/messaging_downloader/CMakeLists.txt b/components/messaging_downloader/CMakeLists.txt new file mode 100755 index 0000000..11acfc3 --- /dev/null +++ b/components/messaging_downloader/CMakeLists.txt @@ -0,0 +1,4 @@ +add_library(messaging_downloader_server messaging_downloader_server.cc) +add_library(messaging_downloader_client messaging_downloader_client.cc) + +add_subdirectory(messaging_downloader_ut) diff --git a/components/messaging_downloader/messaging_downloader_client.cc b/components/messaging_downloader/messaging_downloader_client.cc new file mode 100755 index 0000000..1f2887b --- /dev/null +++ b/components/messaging_downloader/messaging_downloader_client.cc @@ -0,0 +1,230 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "messaging_downloader_client.h" +#include "i_messaging.h" +#include "config.h" +#include "rest.h" + +USE_DEBUG_FLAG(D_COMMUNICATION); + +using namespace std; + +class MessagingDownloaderClientRequest : public ClientRest +{ +public: + MessagingDownloaderClientRequest() + { + file_name = ""; + url = ""; + port = 80; + response_port = 0; + status = false; + } + + MessagingDownloaderClientRequest( + const string &_file_name, + const string &_url, + const unsigned int _port, + const unsigned int _response_port + ) : + file_name(_file_name), + url(_url), + port(_port), + response_port(_response_port), + status(false) + {} + + bool getStatus() const { return status.get(); } + const string & getUuid() const { return uuid.get(); } + + C2S_PARAM(string, file_name); + C2S_PARAM(string, url); + C2S_PARAM(unsigned int, port); + C2S_PARAM(unsigned int, response_port); + + S2C_PARAM(string, uuid); + S2C_PARAM(bool, status); +}; + +class DownloaderCbHandler +{ +public: + void + addCallback(const string &uuid, I_MessagingDownloader::OnCompleteCB &cb) + { + DownloaderCbHandler::uuid_to_cb[uuid] = cb; + } + + static void + handleDownloadCB(const string &uuid, Maybe &downloaded_file) + { + dbgTrace(D_COMMUNICATION) << "Handling downloading complete callback. UUID: " << uuid; + if(DownloaderCbHandler::uuid_to_cb.find(uuid) == DownloaderCbHandler::uuid_to_cb.end()) { + dbgWarning(D_COMMUNICATION) << "Failed to execute download completion callback."; + return; + } + if (DownloaderCbHandler::uuid_to_cb.at(uuid) != nullptr) { + DownloaderCbHandler::uuid_to_cb.at(uuid)(downloaded_file); + DownloaderCbHandler::uuid_to_cb.erase(uuid); + } else { + string curr_status; + if (downloaded_file.ok()) { + curr_status = ". File path: " + downloaded_file.unpack(); + } else { + curr_status = ". Error: " + downloaded_file.getErr(); + } + dbgWarning(D_COMMUNICATION) + << "Illegal download completion callback for downloading process with UUID: " + << uuid + << curr_status; + } + dbgTrace(D_COMMUNICATION) << "Successfully handled the downloading complete callback. UUID: " << uuid; + } + + static unordered_map uuid_to_cb; +}; + +unordered_map DownloaderCbHandler::uuid_to_cb; + +class MessagingDownloaderClientRes : public ServerRest +{ +public: + void + doCall() override + { + dbgTrace(D_COMMUNICATION) << "Received response from the downloading server."; + if (status.get() && filepath.isActive()) { + Maybe response(filepath.get()); + DownloaderCbHandler::handleDownloadCB(uuid.get(), response); + } else { + if (!error.isActive()) error = "unknown error"; + dbgWarning(D_COMMUNICATION) << "Failed to download. Error: " << error.get(); + Maybe response = genError(error.get()); + DownloaderCbHandler::handleDownloadCB(uuid.get(), response); + } + } + + C2S_PARAM(string, uuid); + C2S_PARAM(bool, status); + C2S_OPTIONAL_PARAM(string, filepath); + C2S_OPTIONAL_PARAM(string, error); +}; + +class MessagingDownloaderClient::Impl : Singleton::Provide::From +{ +public: + void + init() + { + i_msg = Singleton::Consume::by(); + Singleton::Consume::by()->addRestCall( + RestAction::SHOW, + "download-status" + ); + } + + void + fini() + { + i_msg = nullptr; + } + + bool + downloadFile( + const string &file_name, + const string &url, + I_MessagingDownloader::OnCompleteCB cb = nullptr, + const unsigned int port = 0 + ) override + { + dbgTrace(D_COMMUNICATION) + << "Processing new download request." + << "File name: " + << file_name + << "URL: " + << url; + + auto response_port = Singleton::Consume::by()->get( + "Listening Port" + ); + + if (!response_port.ok()) { + dbgWarning(D_COMMUNICATION) << "Failed to get the service listening port."; + return false; + } + + vector download_ports = { + getConfigurationWithDefault(8164, "Downloader", "Downloader Primary Port"), + getConfigurationWithDefault(8167, "Downloader", "Downloader Secondary Port") + }; + + MessagingDownloaderClientRequest download_obj( + file_name, + url, + port, + response_port.unpack() + ); + Flags conn_flags; + conn_flags.setFlag(MessageConnConfig::EXPECT_REPLY); + if (i_msg != nullptr) { + dbgTrace(D_COMMUNICATION) << "Sending request to the downloading service."; + bool res = false; + for (int port: download_ports) { + dbgTrace(D_COMMUNICATION) << "Trying to request downloading with downloading service port " << port; + res = i_msg->sendObject( + download_obj, + I_Messaging::Method::POST, + "127.0.0.1", + port, + conn_flags, + "/add-download-file" + ); + if (res) break; + } + + if (!res) { + dbgInfo(D_COMMUNICATION) << "Failed to request for file downloading"; + return false; + } + dbgTrace(D_COMMUNICATION) << "Successfully requested for downloading."; + cb_handler.addCallback(download_obj.getUuid(), cb); + } else { + dbgDebug(D_COMMUNICATION) << "Failed to request downloading. Illegal messaging infrastructure."; + } + return download_obj.getStatus(); + } + +private: + I_Messaging *i_msg; + DownloaderCbHandler cb_handler; +}; + +MessagingDownloaderClient::MessagingDownloaderClient() + : + Component("MessagingDownloaderClient"), + pimpl(make_unique()) +{} +MessagingDownloaderClient::~MessagingDownloaderClient() {} + +void MessagingDownloaderClient::init() { pimpl->init(); } +void MessagingDownloaderClient::fini() { pimpl->fini(); } + +void +MessagingDownloaderClient::preload() +{ + registerExpectedConfiguration("Downloader", "Downloader Primary Port"); + registerExpectedConfiguration("Downloader", "Downloader Secondary Port"); +}; diff --git a/components/messaging_downloader/messaging_downloader_server.cc b/components/messaging_downloader/messaging_downloader_server.cc new file mode 100755 index 0000000..ffb50b4 --- /dev/null +++ b/components/messaging_downloader/messaging_downloader_server.cc @@ -0,0 +1,375 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "messaging_downloader_server.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "i_messaging.h" +#include "rest.h" +#include "config.h" +#include "url_parser.h" +#include "agent_core_utilities.h" + +USE_DEBUG_FLAG(D_COMMUNICATION); + +using namespace std; + +class MessagingDownloaderResponser : public ClientRest +{ +public: + MessagingDownloaderResponser()=delete; + + MessagingDownloaderResponser(string &_uuid, const Maybe &_filepath) + : + uuid(_uuid), + status(_filepath.ok()) + { + if (_filepath.ok()) { + filepath = _filepath.unpack(); + } else { + error = _filepath.getErr(); + } + } + C2S_PARAM(string, uuid); + C2S_PARAM(bool, status); + C2S_OPTIONAL_PARAM(string, filepath) + C2S_OPTIONAL_PARAM(string, error) +}; + +class MessagingDownloaderReceiver : public ServerRest +{ +public: + void + doCall() override + { + dbgTrace(D_COMMUNICATION) << "Received new downloading request."; + + stringstream uuid_ss; + uuid_ss << boost::uuids::random_generator()(); + uuid = uuid_ss.str(); + + if (!port.isActive()) { + dbgTrace(D_COMMUNICATION) << "Request does not contain explicit port."; + port = 0; + } + + dbgInfo(D_COMMUNICATION) + << "Downloading a file and using the next parameters: " + << "file_name: " + << file_name.get() + << ", url: " + << url.get() + << ", uuid: " + << uuid.get() + << ", port: " + << port.get() + << ", notification port: " + << response_port.get(); + + unsigned int response_port_cap = response_port.get(); + string uuid_capture = uuid.get(); + status = Singleton::Consume::from()->downloadFile( + file_name.get(), + url.get(), + [uuid_capture, response_port_cap](const Maybe &downloaded_file) mutable + { + Flags conn_flags; + MessagingDownloaderResponser res(uuid_capture, downloaded_file); + dbgTrace(D_COMMUNICATION) << "Sending the download status to the client."; + bool res_status = Singleton::Consume::by()->sendNoReplyObject( + res, + I_Messaging::Method::POST, + "127.0.0.1", + response_port_cap, + conn_flags, + "/show-download-status" + ); + if (!res_status) { + dbgInfo(D_COMMUNICATION) << "Failed to send the download status."; + } else { + dbgDebug(D_COMMUNICATION) + << "Successfully sent the download status. Notification port: " + << response_port_cap + << ", Status: " + << downloaded_file.ok(); + } + }, + port.get() + ); + } + + C2S_PARAM(string, file_name); + C2S_PARAM(string, url); + C2S_PARAM(int, response_port); + C2S_PARAM(int, port); + S2C_PARAM(string, uuid); + S2C_PARAM(bool, status); +}; + +class DownloadingInstance +{ +public: + DownloadingInstance()=default; + + DownloadingInstance( + const string &_file_name, + const string &_url, + const unsigned int _port + ) : + file_name(_file_name), + url(_url), + port(_port), + url_parser(_url) + { + parseURL(); + } + + Maybe + genJson() const + { + return string(""); + } + + bool + loadJson(const string &_body) + { + body = vector(_body.begin(), _body.end()); + return true; + } + + const vector & + getResponse() const + { + return body; + } + + bool + operator==(const DownloadingInstance &other) const + { + return file_name == other.file_name && + host == other.host && + url == other.url && + port == other.port && + is_secure == other.is_secure && + origin_is_fog == other.origin_is_fog; + } + + bool + operator<(const DownloadingInstance &other) const + { + return file_name < other.file_name || + host < other.host || + url < other.url || + port < other.port || + is_secure < other.is_secure || + origin_is_fog < other.origin_is_fog; + } + + const string & getFileName() const { return file_name; } + const string & getHost() const { return host; } + const string & getUrl() const { return url; } + unsigned int getPort() const { return port; } + bool getIsSecure() const { return is_secure; } + bool getIsFogOrigin() const { return origin_is_fog; } + +private: + void + parseURL() + { + dbgTrace(D_COMMUNICATION) << "Parsing the URL to extract the relevant info. URL: " << url; + origin_is_fog = false; + auto maybe_host = url_parser.getBaseURL(); + if (!maybe_host.ok()) { + dbgWarning(D_COMMUNICATION) << "Failed to parse the URL"; + return; + } + host = maybe_host.unpack(); + is_secure = url_parser.isOverSSL(); + if (port == 0 && url_parser.getPort() != "") { + try { + port = stoi(url_parser.getPort()); + } catch (exception &e) { + port = 443; + dbgInfo(D_COMMUNICATION) + << "Failed to parse the port for the downloading request. Error " + << e.what() + << ". Using the default port " + << port; + } + } else { + dbgTrace(D_COMMUNICATION) << "Using explicitly defined port. Port: " << port; + } + + I_AgentDetails *agent_details = Singleton::Consume::by(); + if (agent_details->getFogDomain().ok()) { + string fog_domain = agent_details->getFogDomain().unpack(); + if (host.find(fog_domain) != string::npos) { + origin_is_fog = true; + } + } else { + dbgTrace(D_COMMUNICATION) << "Failed to receive fog domain."; + } + } + + string file_name = ""; + string url = ""; + unsigned int port = 0; + URLParser url_parser; + vector body = {}; + string host = ""; + bool is_secure = true; + bool origin_is_fog = true; +}; + +class MessagingDownloaderServer::Impl : Singleton::Provide::From +{ +public: + void + init() + { + i_msg = Singleton::Consume::by(); + i_mainloop = Singleton::Consume::by(); + auto rest = Singleton::Consume::by(); + rest->addRestCall(RestAction::ADD, "download-file"); + string default_downloading_dir = "/tmp/cp_nano_downloader/"; + download_dir = getConfigurationWithDefault( + default_downloading_dir, + "Downloader", + "Downloading Directory" + ); + NGEN::Filesystem::makeDirRecursive(download_dir); + } + + void + fini() + { + i_msg = nullptr; + i_mainloop = nullptr; + } + + bool + downloadFile( + const string &file_name, + const string &url, + OnCompleteCB on_complete_func = nullptr, + const unsigned int port = 443 + ) override + { + dbgTrace(D_COMMUNICATION) << "Handling new download request. URL: " << url << ". File name: " << file_name; + DownloadingInstance req(file_name, url, port); + if (downloading_queue.find(req) != downloading_queue.end()) { + dbgInfo(D_COMMUNICATION) << "Failed to download the file. Similar download request already exists."; + return false; + } + if (!isValidPath(file_name)) { + dbgInfo(D_COMMUNICATION) << "Failed to validate the download path. Path: " << download_dir + file_name; + return false; + } + downloading_queue.insert(req); + + i_mainloop->addOneTimeRoutine( + I_MainLoop::RoutineType::RealTime, + [this, req, on_complete_func]() mutable + { + Flags conn_flags; + if (req.getIsSecure()) conn_flags.setFlag(MessageConnConfig::SECURE_CONN); + if (!req.getIsFogOrigin()) conn_flags.setFlag(MessageConnConfig::EXTERNAL); + auto on_exit = make_scope_exit([this, &req]() { downloading_queue.erase(req); } ); + bool response = i_msg->sendObject( + req, + I_Messaging::Method::GET, + req.getHost(), + req.getPort(), + conn_flags, + req.getUrl() + ); + if (response) { + dbgTrace(D_COMMUNICATION) << "Successfully received a response from the downloading file host."; + std::ofstream downloaded_file; + downloaded_file.open(download_dir + req.getFileName()); + if (!downloaded_file.is_open()) { + dbgInfo(D_COMMUNICATION) + << "Failed to download file. Error: Failed to open the file " + << req.getFileName(); + Maybe err = genError("Failed to open the file"); + on_complete_func(err); + if (i_mainloop != nullptr) i_mainloop->yield(true); + } + auto &res_body = req.getResponse(); + downloaded_file.write(res_body.data(), res_body.size()); + downloaded_file.close(); + dbgInfo(D_COMMUNICATION) << "Successfully downloaded the file. File name: " << req.getFileName(); + Maybe filepath = download_dir + req.getFileName(); + on_complete_func(filepath); + } else { + dbgInfo(D_COMMUNICATION) << "Failed to download file. File name: " << req.getFileName(); + Maybe err = genError("Failed during the downloading process."); + on_complete_func(err); + } + }, + "Download file routine for '" + file_name + "'", + false + ); + return true; + } + +private: + bool + isValidPath(const string &file_name) + { + struct stat info; + string file_to_download = download_dir + file_name; + dbgTrace(D_COMMUNICATION) << "Validating the downloading file path. Path: " << file_to_download; + if (stat(download_dir.c_str(), &info) != 0) { + dbgDebug(D_COMMUNICATION) << "Failed to access the downloading directory"; + return false; + } + if (stat(file_to_download.c_str(), &info) == 0) { + dbgDebug(D_COMMUNICATION) + << "The file with the name '" + << file_name + << "' is already exist in the downloading directory"; + return false; + } + return true; + } + + I_Messaging *i_msg; + I_MainLoop *i_mainloop; + string download_dir; + set downloading_queue; +}; + +MessagingDownloaderServer::MessagingDownloaderServer() + : + Component("MessagingDownloaderServer"), + pimpl(make_unique()) +{} + +MessagingDownloaderServer::~MessagingDownloaderServer() {} + +void MessagingDownloaderServer::init() { pimpl->init(); } +void MessagingDownloaderServer::fini() { pimpl->fini(); } + +void +MessagingDownloaderServer::preload() +{ + registerExpectedConfiguration("Downloader", "Downloading Directory"); +}; diff --git a/components/messaging_downloader/messaging_downloader_ut/CMakeLists.txt b/components/messaging_downloader/messaging_downloader_ut/CMakeLists.txt new file mode 100755 index 0000000..21833c3 --- /dev/null +++ b/components/messaging_downloader/messaging_downloader_ut/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(downloader_server_ut) +add_subdirectory(downloader_client_ut) \ No newline at end of file diff --git a/components/messaging_downloader/messaging_downloader_ut/downloader_client_ut/CMakeLists.txt b/components/messaging_downloader/messaging_downloader_ut/downloader_client_ut/CMakeLists.txt new file mode 100755 index 0000000..d2b7b97 --- /dev/null +++ b/components/messaging_downloader/messaging_downloader_ut/downloader_client_ut/CMakeLists.txt @@ -0,0 +1,9 @@ +link_directories(${BOOST_ROOT}/lib) +include_directories(${CMAKE_SOURCE_DIR}/core/include) +link_directories(${CMAKE_BINARY_DIR}/core/include) + +add_unit_test( + downloader_client_ut + "downloader_client_ut.cc" + "singleton;messaging_downloader_client;time_proxy;mainloop;rest;metric;event_is;message;-lboost_context;agent_core_utilities;orchestration_modules;connkey;-lboost_regex;-lboost_filesystem" +) diff --git a/components/messaging_downloader/messaging_downloader_ut/downloader_client_ut/downloader_client_ut.cc b/components/messaging_downloader/messaging_downloader_ut/downloader_client_ut/downloader_client_ut.cc new file mode 100755 index 0000000..1ceaa44 --- /dev/null +++ b/components/messaging_downloader/messaging_downloader_ut/downloader_client_ut/downloader_client_ut.cc @@ -0,0 +1,113 @@ +#include "messaging_downloader_client.h" + +#include + +#include "environment.h" +#include "singleton.h" +#include "config.h" +#include "config_component.h" +#include "mainloop.h" +#include "cptest.h" +#include "mock/mock_mainloop.h" +#include "mock/mock_messaging.h" +#include "mock/mock_rest_api.h" +#include "mock/mock_agent_details.h" +#include "mock/mock_time_get.h" + +using namespace std; +using namespace testing; + +class MessagingDownloaderClientTest : public Test +{ +public: + MessagingDownloaderClientTest() + { + EXPECT_CALL( + rest, + mockRestCall(RestAction::SHOW, "download-status", _) + ).WillOnce(WithArg<2>(Invoke(this, &MessagingDownloaderClientTest::restHandler))); + + EXPECT_CALL(rest, mockRestCall(RestAction::ADD, "declare-boolean-variable", _)).WillOnce(Return(true)); + + Debug::setUnitTestFlag(D_COMMUNICATION, Debug::DebugLevel::TRACE); + Debug::setNewDefaultStdout(&capture_debug); + + messaging_downloader.preload(); + env.preload(); + env.init(); + messaging_downloader.init(); + } + + ~MessagingDownloaderClientTest() + { + boost::filesystem::remove_all("/tmp/test_download_dir/"); + messaging_downloader.fini(); + } + + bool + restHandler(const unique_ptr &rest_ptr) + { + rest_handler = rest_ptr->getRest(); + return true; + } + + unique_ptr rest_handler; + ostringstream capture_debug; + I_MainLoop::Routine downloading_routine; + MessagingDownloaderClient messaging_downloader; + NiceMock mock_time; + NiceMock mock_agent_details; + StrictMock mock_msg; + StrictMock rest; + StrictMock mock_ml; + ::Environment env; + ConfigComponent conf; +}; + +TEST_F(MessagingDownloaderClientTest, do_nothing) +{ +} + +TEST_F(MessagingDownloaderClientTest, request_download) +{ + string file_name = "test_file"; + string url = "https://download_test.com/test_download"; + Singleton::Consume::by()->registerValue("Listening Port", 6464); + + stringstream ss; + ss << "{\n \"file_name\": \"" << file_name << "\"," + << "\n \"url\": \"" << url << "\"," + << "\n \"port\": 0,\n \"response_port\": 6464\n}"; + + EXPECT_CALL(mock_msg, sendMessage( + true, + ss.str(), + I_Messaging::Method::POST, + "127.0.0.1", + 8164, + _, + "/add-download-file", + _, + _, + _ + )).WillOnce(Return(Maybe(string("{\"uuid\": \"111\", \"status\": true}")))); + + bool is_cb_run = false; + bool res = Singleton::Consume::from()->downloadFile( + file_name, + url, + [&is_cb_run](const Maybe& filepath) + { + is_cb_run = true; + EXPECT_TRUE(filepath.ok()); + EXPECT_EQ(filepath.unpack(), "/tmp/test_download_dir/test_file"); + } + ); + EXPECT_TRUE(res); + + stringstream is; + is << "{\"uuid\": \"111\", \"status\": true, \"filepath\": \"/tmp/test_download_dir/test_file\"}"; + EXPECT_FALSE(is_cb_run); + rest_handler->performRestCall(is); + EXPECT_TRUE(is_cb_run); +} diff --git a/components/messaging_downloader/messaging_downloader_ut/downloader_server_ut/CMakeLists.txt b/components/messaging_downloader/messaging_downloader_ut/downloader_server_ut/CMakeLists.txt new file mode 100755 index 0000000..d6fba93 --- /dev/null +++ b/components/messaging_downloader/messaging_downloader_ut/downloader_server_ut/CMakeLists.txt @@ -0,0 +1,9 @@ +link_directories(${BOOST_ROOT}/lib) +include_directories(${CMAKE_SOURCE_DIR}/core/include) +link_directories(${CMAKE_BINARY_DIR}/core/include) + +add_unit_test( + downloader_server_ut + "downloader_server_ut.cc" + "singleton;messaging_downloader_server;time_proxy;mainloop;rest;metric;event_is;message;-lboost_context;agent_core_utilities;orchestration_modules;agent_details;connkey;-lboost_regex;-lboost_filesystem" +) diff --git a/components/messaging_downloader/messaging_downloader_ut/downloader_server_ut/downloader_server_ut.cc b/components/messaging_downloader/messaging_downloader_ut/downloader_server_ut/downloader_server_ut.cc new file mode 100755 index 0000000..9b1d262 --- /dev/null +++ b/components/messaging_downloader/messaging_downloader_ut/downloader_server_ut/downloader_server_ut.cc @@ -0,0 +1,304 @@ +#include "messaging_downloader_server.h" + +#include + +#include "environment.h" +#include "singleton.h" +#include "config.h" +#include "config_component.h" +#include "mainloop.h" +#include "cptest.h" +#include "mock/mock_mainloop.h" +#include "mock/mock_messaging.h" +#include "mock/mock_rest_api.h" +#include "mock/mock_agent_details.h" +#include "mock/mock_time_get.h" + +using namespace std; +using namespace testing; + +class MessagingDownloaderServerTest : public Test +{ +public: + MessagingDownloaderServerTest() + { + setConfiguration(string("/tmp/test_download_dir/"), "Downloader", "Downloading Directory"); + EXPECT_CALL( + rest, + mockRestCall(RestAction::ADD, "download-file", _) + ).WillOnce(WithArg<2>(Invoke(this, &MessagingDownloaderServerTest::restHandler))); + + Maybe fog_addr(string("test.fog.com")); + EXPECT_CALL( + mock_agent_details, + getFogDomain() + ).WillRepeatedly(Return(fog_addr)); + + Debug::setUnitTestFlag(D_COMMUNICATION, Debug::DebugLevel::TRACE); + Debug::setNewDefaultStdout(&capture_debug); + + messaging_downloader.preload(); + messaging_downloader.init(); + } + + ~MessagingDownloaderServerTest() + { + boost::filesystem::remove_all("/tmp/test_download_dir/"); + messaging_downloader.fini(); + } + + bool + restHandler(const unique_ptr &rest_ptr) + { + rest_handler = rest_ptr->getRest(); + return true; + } + + void + expectRequestSuccess( + string &test_file_name, + string &host, + string &url, + string &uuid, + unsigned int port, + unsigned int response_port, + string &success_msg + ) + { + EXPECT_CALL( + mock_ml, + addOneTimeRoutine(I_MainLoop::RoutineType::RealTime, _, _, false) + ).WillOnce(DoAll(SaveArg<1>(&downloading_routine), Return(0))); + + EXPECT_CALL( + mock_msg, + sendMessage(true, "", I_Messaging::Method::GET, host, port, _, url, _, _, _) + ).WillOnce(Return(Maybe(string("test_body")))); + + stringstream expected_response; + expected_response + << "\n \"status\": true," + << "\n \"filepath\": \"/tmp/test_download_dir/" << test_file_name << "\"\n}"; + + string saved_response; + + EXPECT_CALL(mock_msg, sendMessage( + false, + _, + I_Messaging::Method::POST, + "127.0.0.1", + response_port, + _, + "/show-download-status", + _, + _, + _ + )).WillOnce(DoAll(SaveArg<1>(&saved_response), Return(Maybe(string())))); + + + stringstream is; + is << "{\"file_name\": \"" << test_file_name << "\"," + << "\"response_port\": " << response_port << "," + << "\"url\": \"" << url << "\"," + << "\"port\": " << port << "," + << "\"uuid\": \"" << uuid << "\"}"; + + rest_handler->performRestCall(is); + downloading_routine(); + EXPECT_THAT(saved_response, HasSubstr(expected_response.str())); + EXPECT_THAT(capture_debug.str(), HasSubstr(success_msg)); + } + + unique_ptr rest_handler; + ostringstream capture_debug; + I_MainLoop::Routine downloading_routine; + MessagingDownloaderServer messaging_downloader; + NiceMock mock_time; + StrictMock mock_agent_details; + StrictMock mock_msg; + StrictMock rest; + StrictMock mock_ml; + ::Environment env; + ConfigComponent conf; +}; + +TEST_F(MessagingDownloaderServerTest, do_nothing) +{ +} + +TEST_F(MessagingDownloaderServerTest, add_one_secured_request) +{ + string test_file_name = "test_file_name"; + string host = "test_host"; + string url = "https://test_host/test_url"; + string uuid = "111"; + string success_msg = "Successfully downloaded the file. File name: " + test_file_name; + unsigned int port = 443; + unsigned int response_port = 123; + expectRequestSuccess(test_file_name, host, url, uuid, port, response_port, success_msg); +} + +TEST_F(MessagingDownloaderServerTest, add_one_non_secured_request) +{ + string test_file_name = "test_file_name"; + string host = "test_host"; + string url = "http://test_host/test_url"; + string uuid = "111"; + string success_msg = "Successfully downloaded the file. File name: " + test_file_name; + unsigned int port = 80; + unsigned int response_port = 123; + expectRequestSuccess(test_file_name, host, url, uuid, port, response_port, success_msg); +} + +TEST_F(MessagingDownloaderServerTest, add_multiple_requests) +{ + string test_file_name1 = "test_file_name1"; + string test_file_name2 = "test_file_name2"; + string host = "test_host"; + string url = "https://test_host/test_url"; + string uuid = "111"; + string success_msg1 = "Successfully downloaded the file. File name: " + test_file_name1; + string success_msg2 = "Successfully downloaded the file. File name: " + test_file_name2; + unsigned int port = 443; + unsigned int response_port = 123; + expectRequestSuccess(test_file_name1, host, url, uuid, port, response_port, success_msg1); + expectRequestSuccess(test_file_name2, host, url, uuid, port, response_port, success_msg2); +} + +TEST_F(MessagingDownloaderServerTest, add_same_request_twice) +{ + string test_file_name = "test_file_name"; + string host = "test_host"; + string url = "https://test_host/test_url"; + string uuid = "111"; + unsigned int response_port = 123; + + EXPECT_CALL( + mock_ml, + addOneTimeRoutine(I_MainLoop::RoutineType::RealTime, _, _, false) + ).WillOnce(DoAll(SaveArg<1>(&downloading_routine), Return(0))); + + EXPECT_CALL( + mock_msg, + sendMessage(true, "", I_Messaging::Method::GET, host, 442, _, url, _, _, _) + ).WillOnce(Return(Maybe(string("test_body")))); + + stringstream expected_response; + expected_response + << "\n \"status\": true," + << "\n \"filepath\": \"/tmp/test_download_dir/" << test_file_name << "\"\n}"; + + string saved_response; + + EXPECT_CALL(mock_msg, sendMessage( + false, + _, + I_Messaging::Method::POST, + "127.0.0.1", + response_port, + _, + "/show-download-status", + _, + _, + _ + )).WillOnce(DoAll(SaveArg<1>(&saved_response), Return(Maybe(string())))); + + stringstream is; + is + << "{\"file_name\": \"" << test_file_name << "\"," + << "\"response_port\": " << response_port << "," + << "\"uuid\": \"" << uuid << "\"," + << "\"port\": 442," + << "\"url\": \"" << url << "\"}"; + + rest_handler->performRestCall(is); + rest_handler->doCall(); + downloading_routine(); + + EXPECT_THAT(saved_response, HasSubstr(expected_response.str())); + EXPECT_THAT( + capture_debug.str(), + HasSubstr("Failed to download the file. Similar download request already exists.") + ); +} + +TEST_F(MessagingDownloaderServerTest, add_request_that_fails) +{ + string test_file_name = "test_file_name"; + string host = "test_host"; + string url = "https://test_host/test_url"; + string uuid = "111"; + unsigned int response_port = 123; + unsigned int additional_port_test = 123; + + Maybe err = genError("no"); + + EXPECT_CALL( + mock_ml, + addOneTimeRoutine(I_MainLoop::RoutineType::RealTime, _, _, false) + ).WillOnce(DoAll(SaveArg<1>(&downloading_routine), Return(0))); + + EXPECT_CALL( + mock_msg, + sendMessage(true, "", I_Messaging::Method::GET, host, additional_port_test, _, url, _, _, _) + ).WillOnce(Return(err)); + + stringstream expected_response; + expected_response + << "\n \"status\": false," + << "\n \"error\": \"Failed during the downloading process.\"\n}"; + + string saved_response; + + EXPECT_CALL(mock_msg, sendMessage( + false, + _, + I_Messaging::Method::POST, + "127.0.0.1", + response_port, + _, + "/show-download-status", + _, + _, + _ + )).WillOnce(DoAll(SaveArg<1>(&saved_response), Return(Maybe(string())))); + + stringstream is; + is + << "{\"file_name\": \"" << test_file_name << "\"," + << "\"response_port\": " << response_port << "," + << "\"url\": \"" << url << "\"," + << "\"port\": " << additional_port_test << "," + << "\"uuid\": \"" << uuid << "\"}"; + + rest_handler->performRestCall(is); + downloading_routine(); + EXPECT_THAT(saved_response, HasSubstr(expected_response.str())); + EXPECT_THAT(capture_debug.str(), HasSubstr("Failed to download file. File name: test_file_name")); +} + +TEST_F(MessagingDownloaderServerTest, download_with_same_filename) +{ + string test_file_name = "test_file_name"; + string host = "test_host"; + string url1 = "https://test_host/test_url1"; + string url2 = "https://test_host/test_url2"; + string uuid = "111"; + unsigned int port = 443; + string success_msg = "Successfully downloaded the file. File name: " + test_file_name; + unsigned int response_port = 123; + expectRequestSuccess(test_file_name, host, url1, uuid, port, response_port, success_msg); + + stringstream is; + is + << "{\"file_name\": \"" << test_file_name << "\"," + << "\"response_port\": " << response_port << "," + << "\"port\": " << port << "," + << "\"url\": \"" << url2 << "\"}"; + + rest_handler->performRestCall(is); + EXPECT_THAT( + capture_debug.str(), + HasSubstr("The file with the name 'test_file_name' is already exist in the downloading directory") + ); +} diff --git a/components/packet/CMakeLists.txt b/components/packet/CMakeLists.txt new file mode 100644 index 0000000..d6cc0a2 --- /dev/null +++ b/components/packet/CMakeLists.txt @@ -0,0 +1,2 @@ +add_library(packet packet.cc) +add_subdirectory(packet_ut) diff --git a/components/packet/packet.cc b/components/packet/packet.cc new file mode 100755 index 0000000..5a23cc3 --- /dev/null +++ b/components/packet/packet.cc @@ -0,0 +1,607 @@ +#include "packet.h" +#include "debug.h" +#include "byteorder.h" +#include "c_common/network_defs.h" +#include "config.h" + +#include +#include +#include +#include +#include +#include + +using namespace std; + +USE_DEBUG_FLAG(D_PACKET); + +ostream & +operator<<(ostream &os, const PktErr &err) +{ + switch (err) { + case PktErr::UNINITIALIZED: { + return os << "Uninitialized packet"; + } + case PktErr::NON_ETHERNET_FRAME: { + return os << "Layer 2 frame length does not match the Ethernet frame length"; + } + case PktErr::MAC_LEN_TOO_BIG: { + return os << "Layer 2 frame length is greater than the packet length"; + } + case PktErr::NON_IP_PACKET: { + return os << "Ethernet frame contains a non-IP packet"; + } + case PktErr::UNKNOWN_L3_PROTOCOL: { + return os << "Unknown Layer 3 protocol type"; + } + case PktErr::IP_SIZE_MISMATCH: { + return os << "Wrong IP header size"; + } + case PktErr::IP_VERSION_MISMATCH: { + return os << "IP header version differs from the IP version defined by the Ethernet frame"; + } + case PktErr::IP_HEADER_TOO_SMALL: { + return os << "Reported IP header length is shorter than the allowed minimum"; + } + case PktErr::PKT_TOO_SHORT_FOR_IP_HEADER: { + return os << "Packet is too short for the IP header"; + } + case PktErr::PKT_TOO_SHORT_FOR_IP_EXTENSION_HEADER: { + return os << "Packet is too short for the IP extension header"; + } + case PktErr::PKT_TOO_SHORT_FOR_IP_EXTENSION_HEADER_BODY: { + return os << "Packet is too short for the IP extension body"; + } + case PktErr::UNKNOWN_IPV6_EXTENSION_HEADER: { + return os << "Unknown IPv6 extension"; + } + case PktErr::PKT_TOO_SHORT_FOR_L4_HEADER: { + return os << "IP content is too short to hold a Layer 4 header"; + } + case PktErr::PKT_TOO_SHORT_FOR_TCP_OPTIONS: { + return os << "IP content is too short to hold all the TCP Options"; + } + case PktErr::TCP_HEADER_TOO_SMALL: { + return os << "Reported TCP header length is shorter than the allowed minimum"; + } + case PktErr::PKT_TOO_SHORT_FOR_ICMP_ERROR_DATA: { + return os << "ICMP data is too short to hold all ICMP error information"; + } + case PktErr::ICMP_VERSION_MISMATCH: { + return os << "ICMP version does not match the IP version"; + } + }; + + return os << "Unknown error: " << static_cast(err); +} + +// This is common for (almost) all extension headers. +// All headers that ipv6_is_proto_extension returns true for them must have this layout +struct IPv6ExtBasic +{ + u_char next_type; +}; + +// This is common for some extension headers +struct IPv6ExtGeneric +{ + u_char next_type; + u_char ext_hdr_len; // Not in bytes! Sometimes *4, sometimes *8... +}; + +static const uint basic_ext_len = 8; +static const uint format_multiplier_four = 4; +static const uint format_multiplier_eight = 8; +static const char ipv4_chr = 0x40; +static const char ipv6_chr = 0x60; +static const char ipversion_mask = 0x60; + +static bool +isIPv6ProtoExtension(u_char proto) +{ + // ESP and None are not considered as ext headers, as their first 4 bytes are not of type IPv6ExtBasic + switch (proto) { + case IPPROTO_HOPOPTS: // 0 IPv6 hop-by-hop options - RFC2460 + case IPPROTO_ROUTING: // 43 IPv6 routing header - RFC2460 + case IPPROTO_FRAGMENT: // 44 IPv6 fragmentation header - RFC2460 +// case IPPROTO_ESP: // 50 IPv6 encapsulation security payload header - RFC4303 + case IPPROTO_AH: // 51 IPv6 authentication header - RFC4302 +// case IPPROTO_NONE: // 59 IPv6 no next header - RFC2460 + case IPPROTO_DSTOPTS: // 60 IPv6 destination options - RFC2460 + case IPPROTO_MH: { // 135 IPv6 mobility header - RFC3775 + return true; + } + } + return false; +} + +Maybe +Packet::parseFromL4(const IPAddr &src, const IPAddr &dst, IPProto proto) +{ + // Here so we got the l3 headers on both IPv4 and IPv6. + if (is_fragment) return ConnKey(src, 0, dst, 0, proto); + // Add ports + PortNumber sport, dport; + switch (proto) { + case IPPROTO_TCP: { + auto maybe_tcp = l3_payload.getTypePtr(0); + if (!maybe_tcp.ok()) { + dbgTrace(D_PACKET) + << "TCP packet is too short (" + << l3_payload.size() + << ") to contain a basic TCP header"; + return genError(PktErr::PKT_TOO_SHORT_FOR_L4_HEADER); + } + auto tcp = maybe_tcp.unpack(); + auto l4_hdr_len = tcp->doff * sizeof(int32_t); + + if (l4_hdr_len < sizeof(struct TcpHdr)) { + dbgTrace(D_PACKET) << + "TCP header length is smaller than the minimum: " << l4_hdr_len << " < " << sizeof(struct tcphdr); + return genError(PktErr::TCP_HEADER_TOO_SMALL); + } + if (l4_hdr_len > l3_payload.size()) { + dbgTrace(D_PACKET) << + "TCP packet is too short (" << l3_payload.size() << ") for a TCP header (" << l4_hdr_len << ")"; + return genError(PktErr::PKT_TOO_SHORT_FOR_TCP_OPTIONS); + } + + l4_header = l3_payload.getSubBuffer(0, l4_hdr_len); + l4_payload = l3_payload.getSubBuffer(l4_hdr_len, l3_payload.size()); + sport = ntohs(tcp->source); + dport = ntohs(tcp->dest); + break; + } + case IPPROTO_UDP: { + auto maybe_udp = l3_payload.getTypePtr(0); + if (!maybe_udp.ok()) { + dbgTrace(D_PACKET) + << "UDP packet is too short (" + << l3_payload.size() + << ") to contain a basic UDP header"; + return genError(PktErr::PKT_TOO_SHORT_FOR_L4_HEADER); + } + auto udp = maybe_udp.unpack(); + auto l4_hdr_len = sizeof(struct UdpHdr); + + l4_header = l3_payload.getSubBuffer(0, l4_hdr_len); + l4_payload = l3_payload.getSubBuffer(l4_hdr_len, l3_payload.size()); + sport = ntohs(udp->source); + dport = ntohs(udp->dest); + break; + } + case IPPROTO_ICMP: + case IPPROTO_ICMPV6: { + auto icmp_hdr_len = getIcmpHdrLen(proto, src.getType()); + if (!icmp_hdr_len.ok()) return icmp_hdr_len.passErr(); + auto l4_hdr_len = icmp_hdr_len.unpack(); + if (l4_hdr_len > l3_payload.size()) { + dbgTrace(D_PACKET) + << "ICMPv6 packet is too short (" + << l3_payload.size() + << ") to contain an ICMP header (" + << l4_hdr_len + << ")"; + return genError(PktErr::PKT_TOO_SHORT_FOR_L4_HEADER); + } + l4_header = l3_payload.getSubBuffer(0, l4_hdr_len); + l4_payload = l3_payload.getSubBuffer(l4_hdr_len, l3_payload.size()); + tie(sport, dport) = getICMPPorts(proto); + break; + } + case IPPROTO_GRE: { + auto maybe_gre = l3_payload.getTypePtr(0); + if (!maybe_gre.ok()) { + dbgTrace(D_PACKET) + << "GRE packet is too short (" + << l3_payload.size() + << ") to contain a basic GRE header"; + return genError(PktErr::PKT_TOO_SHORT_FOR_L4_HEADER); + } + auto l4_hdr_len = sizeof(struct GreHdr); + + l4_header = l3_payload.getSubBuffer(0, l4_hdr_len); + l4_payload = l3_payload.getSubBuffer(l4_hdr_len, l3_payload.size()); + sport = 0; + dport = 0; + break; + } + case IPPROTO_SCTP: { + auto maybe_sctp = l3_payload.getTypePtr(0); + if (!maybe_sctp.ok()) { + dbgTrace(D_PACKET) + << "SCTP packet is too short (" + << l3_payload.size() + << ") to contain a basic SCTP header"; + return genError(PktErr::PKT_TOO_SHORT_FOR_L4_HEADER); + } + auto sctp = maybe_sctp.unpack(); + auto l4_hdr_len = sizeof(struct SctpHdr); + + l4_header = l3_payload.getSubBuffer(0, l4_hdr_len); + l4_payload = l3_payload.getSubBuffer(l4_hdr_len, l3_payload.size()); + sport = ntohs(sctp->sport); + dport = ntohs(sctp->dport); + break; + } + case IPPROTO_DCCP: { + auto maybe_dccp = l3_payload.getTypePtr(0); + if (!maybe_dccp.ok()) { + dbgTrace(D_PACKET) + << "DCCP packet is too short (" + << l3_payload.size() + << ") to contain a basic DCCP header"; + return genError(PktErr::PKT_TOO_SHORT_FOR_L4_HEADER); + } + auto dccp = maybe_dccp.unpack(); + auto l4_hdr_len = sizeof(struct DccpHdr); + + l4_header = l3_payload.getSubBuffer(0, l4_hdr_len); + l4_payload = l3_payload.getSubBuffer(l4_hdr_len, l3_payload.size()); + sport = ntohs(dccp->dccph_sport); + dport = ntohs(dccp->dccph_dport); + break; + } + // other protocols + default: { + l4_payload = l3_payload; + sport = 0; + dport = 0; + break; + } + } + + return ConnKey(src, sport, dst, dport, proto); +} + +tuple +Packet::getICMPPortsV6() +{ + auto icmp_hdr = l4_header.getTypePtr(0).unpack(); + PortNumber sport = 0; + PortNumber dport = 0; + switch(icmp_hdr->icmp6_type) { + case ICMP6_ECHO_REQUEST: + sport = ntohs(icmp_hdr->icmp6_id); + if (!getConfigurationWithDefault(false, "Allow simultaneous ping")) { + dport = ntohs(icmp_hdr->icmp6_seq); + } + break; + case ICMP6_ECHO_REPLY: + if (!getConfigurationWithDefault(false, "Allow simultaneous ping")) { + sport = ntohs(icmp_hdr->icmp6_seq); + } + dport = ntohs(icmp_hdr->icmp6_id); + break; + case ICMP6_DST_UNREACH: + case ICMP6_PACKET_TOO_BIG: + case ICMP6_TIME_EXCEEDED: + case ICMP6_PARAM_PROB: + case ND_REDIRECT: + sport = icmp_hdr->icmp6_code; + dport = icmp_hdr->icmp6_type; + break; + } + return make_tuple(sport, dport); +} + +tuple +Packet::getICMPPortsV4() +{ + auto icmp_hdr = l4_header.getTypePtr(0).unpack(); + PortNumber sport = 0; + PortNumber dport = 0; + switch(icmp_hdr->type) { + case ICMP_ECHO: + case ICMP_TSTAMP: + case ICMP_IREQ: + case ICMP_MASKREQ: + sport = ntohs(icmp_hdr->un.echo.id); + if (!getConfigurationWithDefault(false, "Allow simultaneous ping")) { + dport = ntohs(icmp_hdr->un.echo.sequence); + } + break; + case ICMP_ECHOREPLY: + case ICMP_TSTAMPREPLY: + case ICMP_IREQREPLY: + case ICMP_MASKREPLY: + if (!getConfigurationWithDefault(false, "Allow simultaneous ping")) { + sport = ntohs(icmp_hdr->un.echo.sequence); + } + dport = ntohs(icmp_hdr->un.echo.id); + break; + case ICMP_UNREACH: + case ICMP_SOURCEQUENCH: + case ICMP_TIMXCEED: + case ICMP_PARAMPROB: + case ICMP_REDIRECT: + sport = icmp_hdr->code; + dport = icmp_hdr->type; + break; + } + return make_tuple(sport, dport); +} + +tuple +Packet::getICMPPorts(IPProto proto) +{ + return proto == IPPROTO_ICMP ? getICMPPortsV4() : getICMPPortsV6(); +} + +Maybe +Packet::getIcmpHdrLen(IPProto proto, IPType ip_type) +{ + if (proto == IPPROTO_ICMP && ip_type == IPType::V4) { + return sizeof(struct icmphdr); + } else if (proto == IPPROTO_ICMPV6 && ip_type == IPType::V6) { + return sizeof(struct icmp6_hdr); + } + return genError(PktErr::ICMP_VERSION_MISMATCH); +} + +Maybe +Packet::getIPv6GenericExtLen(uint offset_to_ext_hdr, uint length_multiplier) +{ + auto maybe_header = l3.getTypePtr(offset_to_ext_hdr); + if (!maybe_header.ok()) { + dbgTrace(D_PACKET) << + "Not enough room for an IPv6 Extension header basic data (" << offset_to_ext_hdr << " + " << + sizeof(IPv6ExtGeneric) << " > " << l3.size() << ")"; + return genError(PktErr::PKT_TOO_SHORT_FOR_IP_EXTENSION_HEADER); + } + auto header = maybe_header.unpack(); + + return basic_ext_len + (header->ext_hdr_len * length_multiplier); +} + +Maybe +Packet::getIPv6ExtLen(uint offset_to_ext_hdr, IPProto ext_hdr_type) +{ + switch (ext_hdr_type) { + case IPPROTO_FRAGMENT: { + // The length of Fragmentation and ESP headers is always 8 bytes. They don't have a length field. + return basic_ext_len; + } + case IPPROTO_AH: { + // In AH header the length field specifies the header's length in units of 4 bytes + return getIPv6GenericExtLen(offset_to_ext_hdr, format_multiplier_four); + } + case IPPROTO_HOPOPTS: + case IPPROTO_ROUTING: + case IPPROTO_DSTOPTS: + case IPPROTO_MH: { + // For these headers, the length field specifies the header's length in units of 8 bytes + return getIPv6GenericExtLen(offset_to_ext_hdr, format_multiplier_eight); + } + } + dbgWarning(D_PACKET) << "Unknown IPv6 Extension header type" << static_cast(ext_hdr_type); + return genError(PktErr::UNKNOWN_IPV6_EXTENSION_HEADER); +} + +Maybe +Packet::getIPv6Proto(IPProto proto) +{ + uint offset_to_ext_hdr = sizeof(struct ip6_hdr); + + while (isIPv6ProtoExtension(proto)) { + auto res = getIPv6ExtLen(offset_to_ext_hdr, proto); + if (!res.ok()) return res.passErr(); + auto ext_len = *res; + + if (offset_to_ext_hdr + ext_len > l3.size()) { + dbgTrace(D_PACKET) << + "IPv6 Extension header " << static_cast(proto) << " body is too long" << + " - Body length=" << ext_len << ", offset=" << offset_to_ext_hdr << ", L3 data length=" << l3.size(); + return genError(PktErr::PKT_TOO_SHORT_FOR_IP_EXTENSION_HEADER_BODY); + } + + if (proto == IPPROTO_FRAGMENT) { + dbgTrace(D_PACKET) << "Fragmented IPv6 packet"; + is_fragment = true; + } + + auto header = l3.getTypePtr(offset_to_ext_hdr).unpack(); + proto = header->next_type; + offset_to_ext_hdr += ext_len; + } + + l3_header = l3.getSubBuffer(0, offset_to_ext_hdr); + l3_payload = l3.getSubBuffer(offset_to_ext_hdr, l3.size()); + return proto; +} + +Maybe +Packet::parseFromL3v6() +{ + auto maybe_ip6 = l2_payload.getTypePtr(0); + if (!maybe_ip6.ok()) { + dbgTrace(D_PACKET) + << "IPv6 packet is too short for an IPv6 header: " + << l2_payload.size() + << " < " + << sizeof(struct ip); + return genError(PktErr::PKT_TOO_SHORT_FOR_IP_HEADER); + } + auto ip6 = maybe_ip6.unpack(); + + uint ip_version = (ip6->ip6_vfc) >> 4; + if (ip_version != 6) { + dbgTrace(D_PACKET) << "Bad IPv6 version " << ip_version; + return genError(PktErr::IP_VERSION_MISMATCH); + } + + auto l3_len_reported_by_header = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen); + if (l3_len_reported_by_header > l2_payload.size()) { + dbgTrace(D_PACKET) << + "IP header reports a total of " << l3_len_reported_by_header << + " bytes, but the packet length is only " << l2_payload.size() << " bytes"; + return genError(PktErr::IP_SIZE_MISMATCH); + } + + l3 = l2_payload.getSubBuffer(0, l3_len_reported_by_header); // Remove padding + + auto proto = getIPv6Proto(ip6->ip6_nxt); + if (!proto.ok()) return genError(proto.getErr()); + + return parseFromL4(IPAddr(ip6->ip6_src), IPAddr(ip6->ip6_dst), proto.unpack()); +} + +Maybe +Packet::parseFromL3v4() +{ + auto maybe_ip4 = l2_payload.getTypePtr(0); + if (!maybe_ip4.ok()) { + dbgTrace(D_PACKET) + << "IPv4 packet is too short for an IPv4 header: " + << l2_payload.size() + << "<" + << sizeof(struct ip); + return genError(PktErr::PKT_TOO_SHORT_FOR_IP_HEADER); + } + auto ip4 = maybe_ip4.unpack(); + + uint ip_version = ip4->ip_v; + if (ip_version != 4) { + dbgTrace(D_PACKET) << "Bad IPv4 version " << ip_version << " length: " << ntohs(ip4->ip_len); + return genError(PktErr::IP_VERSION_MISMATCH); + } + + auto l3_len_reported_by_header = ntohs(ip4->ip_len); + if (l3_len_reported_by_header < sizeof(struct ip)) { + dbgTrace(D_PACKET) << + "IPv4 payload length is smaller than the IPv4 header: " << + l3_len_reported_by_header << " < " << sizeof(struct ip); + return genError(PktErr::IP_SIZE_MISMATCH); + } + if (l3_len_reported_by_header > l2_payload.size()) { + dbgTrace(D_PACKET) << + "IP header reports a total of " << l3_len_reported_by_header << + " bytes, but the packet length is only " << l2_payload.size() << " bytes"; + return genError(PktErr::IP_SIZE_MISMATCH); + } + + auto l3_hdr_len = ip4->ip_hl * sizeof(int); + if (l3_hdr_len < sizeof(struct ip)) { + dbgTrace(D_PACKET) + << "The reported IPv4 header length is smaller than the allowed minimum: " + << l3_hdr_len + << " < " + << sizeof(struct ip); + return genError(PktErr::IP_HEADER_TOO_SMALL); + } + if (l3_hdr_len > l2_payload.size()) { + dbgTrace(D_PACKET) + << "IPv4 header is too big for the IPv4 packet: " + << l3_hdr_len + << " > " + << l2_payload.size(); + return genError(PktErr::PKT_TOO_SHORT_FOR_IP_HEADER); + } + + auto frag_offset = ntohs(ip4->ip_off); + if ((frag_offset & IP_OFFMASK) || (frag_offset & IP_MF)) { + dbgTrace(D_PACKET) << "Fragmented IPv4 packet"; + is_fragment = true; + } + + l3 = l2_payload.getSubBuffer(0, l3_len_reported_by_header); // Remove padding + l3_header = l3.getSubBuffer(0, l3_hdr_len); + l3_payload = l3.getSubBuffer(l3_hdr_len, l3.size()); + + return parseFromL4(IPAddr(ip4->ip_src), IPAddr(ip4->ip_dst), ip4->ip_p); +} + +Maybe +Packet::parseFromL2() +{ + // In case of VLAN we want to remove the additional information and pass the packet as normal. + uint _maclen = sizeof(struct ether_header) - 4; // -4 for the first do loop. + uint16_t ether_type; + do + { + _maclen += 4; // 4 is the size of vlan tag. + auto maybe_ether_type = pkt_data.getTypePtr(_maclen - 2); // Last 2 Bytes contain the ether type. + if (!maybe_ether_type.ok()) { + dbgTrace(D_PACKET) + << "VLAN tag length is greater than the packet length: " + << _maclen + << " > " + << pkt_data.size(); + return genError(PktErr::MAC_LEN_TOO_BIG); + } + ether_type = *(maybe_ether_type.unpack()); + } while (ether_type == constHTONS(ETHERTYPE_VLAN)); + + l2_header = pkt_data.getSubBuffer(0, _maclen); + l2_payload = pkt_data.getSubBuffer(_maclen, pkt_data.size()); + + switch (ether_type) { + case constHTONS(ETHERTYPE_IP): { + return parseFromL3v4(); + } + case constHTONS(ETHERTYPE_IPV6): { + return parseFromL3v6(); + } + default: { + dbgTrace(D_PACKET) << "Unsupported Ethernet type: " << ether_type; + return genError(PktErr::NON_IP_PACKET); + } + } +} + +Maybe +Packet::parsePacket(PktType type, IPType proto) +{ + if (type == PktType::PKT_L2) return parseFromL2(); + + l2_payload = pkt_data; + switch (proto) { + case IPType::V4: { + return parseFromL3v4(); + } + case IPType::V6: { + return parseFromL3v6(); + } + default: { + dbgAssert(false) << "Unknown (neither IPv4, nor IPv6), or uninitialized packet type: " << proto; + } + } + + return genError(PktErr::UNKNOWN_L3_PROTOCOL); +} + +std::vector +Packet::getL2DataVec() const +{ + auto p = pkt_data.data(); + std::vector buf(p, p+pkt_data.size()); + return buf; +} + +void +Packet::setInterface(NetworkIfNum value) +{ + interface = value; + is_interface = true; +} + +void +Packet::setZecoOpaque(u_int64_t value) +{ + zeco_opaque = value; + has_zeco_opaque = true; +} + +Maybe +Packet::getInterface() const +{ + if (!is_interface) return genError("Could not set an interface to send the packet"); + return interface; +} + +Maybe +Packet::getZecoOpaque() const +{ + if (!has_zeco_opaque) return genError("Could not get the zeco opaque"); + return zeco_opaque; +} diff --git a/components/packet/packet_ut/CMakeLists.txt b/components/packet/packet_ut/CMakeLists.txt new file mode 100644 index 0000000..b3e8001 --- /dev/null +++ b/components/packet/packet_ut/CMakeLists.txt @@ -0,0 +1,7 @@ +link_directories(${BOOST_ROOT}/lib) + +add_unit_test( + packet_ut + "packet_ut.cc" + "packet;connkey;buffers;environment;metric;event_is;-lboost_regex" +) diff --git a/components/packet/packet_ut/packet_ut.cc b/components/packet/packet_ut/packet_ut.cc new file mode 100755 index 0000000..a0a423e --- /dev/null +++ b/components/packet/packet_ut/packet_ut.cc @@ -0,0 +1,844 @@ +#include +#include + +#include "cptest.h" +#include "packet.h" +#include "c_common/network_defs.h" +#include "config.h" +#include "config_component.h" + +using namespace std; +using namespace testing; + +// !!!!!!!!!! NOTE !!!!!!!!!! +// If you are wondering how the hell to read the hex dumps, or how to make new tests - +// Wireshark has an option (under the File menu) to import hex dumps. + +static const uint mac_len = 14; +static const uint ipv4_basic_hdr_size = 20; +static const uint ipv6_basic_hdr_size = 40; +static const uint tcp_basic_hdr_size = 20; +static const uint udp_hdr_size = 8; + +// Using IsError(Maybe) requires T to be printable. So we need to print unique_ptr: +static ostream & +operator<<(ostream &os, const unique_ptr &p) +{ + return os << "unique_ptr(" << p.get() << ")"; +} + +class PacketTest : public Test +{ +public: + ConnKey v4_key, v6_key; + + PacketTest() + : + v4_key( + IPAddr::createIPAddr("172.23.34.11").unpack(), + 0xae59, + IPAddr::createIPAddr("172.23.53.31").unpack(), + 80, + 6 + ), + v6_key( + IPAddr::createIPAddr("2001:6f8:102d:0:2d0:9ff:fee3:e8de").unpack(), + 59201, + IPAddr::createIPAddr("2001:6f8:900:7c0::2").unpack(), + 80, + 6 + ) + { + } + + Maybe, PktErr> + getV4PacketL2() + { + // IPv4 TCP with 12 bytes of TCP options, 0 data + auto v = cptestParseHex( + "0000: cc d8 c1 b1 cc 77 00 50 56 b9 4f 5c 08 00 45 00 " + "0010: 00 34 93 24 40 00 40 06 f8 46 ac 17 22 0b ac 17 " + "0020: 35 1f ae 59 00 50 1a bb 79 14 5f 45 dc 97 80 10 " + "0030: 00 6c 1a 8c 00 00 01 01 08 0a ff fe eb 97 68 00 " + "0040: da 7e " + ); + return Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + } + + Maybe, PktErr> + getV4PacketL3() + { + // IPv4 TCP with 12 bytes of TCP options, 0 data + auto v = cptestParseHex( + "0000: 45 00 00 34 93 24 40 00 40 06 f8 46 ac 17 22 0b " + "0010: ac 17 35 1f ae 59 00 50 1a bb 79 14 5f 45 dc 97 " + "0020: 80 10 00 6c 1a 8c 00 00 01 01 08 0a ff fe eb 97 " + "0030: 68 00 da 7e " + ); + return Packet::genPacket(PktType::PKT_L3, IPType::V4, v); + } + + Maybe, PktErr> + getV6PacketL2() + { + // IPv6 TCP with 20 bytes of TCP options, 0 data + auto v = cptestParseHex( + "0000: 00 11 25 82 95 b5 00 d0 09 e3 e8 de 86 dd 60 00 " + "0010: 00 00 00 28 06 40 20 01 06 f8 10 2d 00 00 02 d0 " + "0020: 09 ff fe e3 e8 de 20 01 06 f8 09 00 07 c0 00 00 " + "0030: 00 00 00 00 00 02 e7 41 00 50 ab dc d6 60 00 00 " + "0040: 00 00 a0 02 16 80 41 a2 00 00 02 04 05 a0 04 02 " + "0050: 08 0a 00 0a 22 a8 00 00 00 00 01 03 03 05 " + ); + return Packet::genPacket(PktType::PKT_L2, IPType::V6, v); + } +}; + +TEST_F(PacketTest, check_zeco_opaque) +{ + auto v4_pkt = getV4PacketL2().unpackMove(); + auto zeco_opaque = v4_pkt->getZecoOpaque(); + EXPECT_FALSE(zeco_opaque.ok()); + + v4_pkt->setZecoOpaque(11); + + zeco_opaque = v4_pkt->getZecoOpaque(); + EXPECT_TRUE(zeco_opaque.ok()); + EXPECT_EQ(zeco_opaque.unpack(), 11u); +} + +TEST_F(PacketTest, check_fixture_ctor) +{ + EXPECT_TRUE(getV4PacketL2().ok()); + EXPECT_TRUE(getV6PacketL2().ok()); +} + +TEST_F(PacketTest, l2_v4_good) +{ + auto v4_pkt = getV4PacketL2().unpackMove(); + EXPECT_EQ(v4_pkt->getPacket().size(), mac_len + 52); + EXPECT_EQ(v4_pkt->getL3().size(), 52u); + EXPECT_EQ(v4_pkt->getL3Header().size(), ipv4_basic_hdr_size); + EXPECT_EQ(v4_pkt->getL4Header().size(), tcp_basic_hdr_size + 12); + Buffer l2_buf = v4_pkt->getPacket(); + l2_buf.truncateHead(mac_len); + EXPECT_EQ(v4_pkt->getL3(), l2_buf); + EXPECT_EQ(v4_pkt->getKey(), v4_key); +} + +TEST_F(PacketTest, l3_v4_good) +{ + auto v4_pkt = getV4PacketL3().unpackMove(); + EXPECT_EQ(v4_pkt->getPacket().size(), 52u); + EXPECT_EQ(v4_pkt->getL3().size(), 52u); + EXPECT_EQ(v4_pkt->getL3Header().size(), ipv4_basic_hdr_size); + EXPECT_EQ(v4_pkt->getL4Header().size(), tcp_basic_hdr_size + 12); + Buffer l2_buf = v4_pkt->getPacket(); + EXPECT_EQ(v4_pkt->getL3(), l2_buf); + EXPECT_EQ(v4_pkt->getKey(), v4_key); +} + +TEST_F(PacketTest, v6_good) +{ + auto v6_pkt = getV6PacketL2().unpackMove(); + EXPECT_EQ(v6_pkt->getPacket().size(), mac_len + 80); + EXPECT_EQ(v6_pkt->getL3().size(), 80u); + EXPECT_EQ(v6_pkt->getL3Header().size(), ipv6_basic_hdr_size); + EXPECT_EQ(v6_pkt->getL4Header().size(), tcp_basic_hdr_size + 20); + Buffer l2_buf = v6_pkt->getPacket(); + l2_buf.truncateHead(mac_len); + EXPECT_EQ(v6_pkt->getL3(), l2_buf); + EXPECT_EQ(v6_pkt->getKey(), v6_key); +} + +TEST_F(PacketTest, l2_v4_get_l4) +{ + auto v4_pkt = getV4PacketL2().unpackMove(); + Buffer buf = v4_pkt->getL4Data(); + EXPECT_EQ(buf.size(), 0u); +} + +TEST_F(PacketTest, l3_v4_get_l4) +{ + auto v4_pkt = getV4PacketL3().unpackMove(); + Buffer buf = v4_pkt->getL4Data(); + EXPECT_EQ(buf.size(), 0u); +} + + +TEST_F(PacketTest, v6_get_l4) +{ + auto v6_pkt = getV6PacketL2().unpackMove(); + Buffer buf = v6_pkt->getL4Data(); + EXPECT_EQ(buf.size(), 0u); +} + +TEST(Packet, packet_with_padding) +{ + ConnKey ck( + IPAddr::createIPAddr("192.168.170.8").unpack(), + 32795, + IPAddr::createIPAddr("192.168.170.20").unpack(), + 53, + 17 + ); + auto v = cptestParseHex( + "0000: 00 c0 9f 32 41 8c 00 e0 18 b1 0c ad 08 00 45 00 " + "0010: 00 3d 00 00 40 00 40 11 65 42 c0 a8 aa 08 c0 a8 " + "0020: aa 14 80 1b 00 35 00 29 88 61 bc 1f 01 00 00 01 " + "0030: 00 00 00 00 00 00 03 77 77 77 07 65 78 61 6d 70 " + "0040: 6c 65 03 63 6f 6d 00 00 1c 00 01 00 00 00 00 00 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + ASSERT_TRUE(ptr.ok()); + auto p = ptr.unpackMove(); + + EXPECT_EQ(p->getL3().size(), 61u); // True size as reported by IP header. + EXPECT_EQ(p->getL3Header().size(), ipv4_basic_hdr_size); + EXPECT_EQ(p->getL4Header().size(), udp_hdr_size); + EXPECT_EQ(p->getKey(), ck); + EXPECT_EQ(p->getL4Data().size(), 33u); +} + + +TEST(Packet, v4_ip_options) +{ + ConnKey ck( + IPAddr::createIPAddr("172.23.34.11").unpack(), + 44633, + IPAddr::createIPAddr("172.23.53.31").unpack(), + 80, + 6 + ); + auto v = cptestParseHex( + "0000: cc d8 c1 b1 cc 77 00 50 56 b9 4f 5c 08 00 47 00 " // Modified: 4500 => 4700 for 2 option ints. + "0010: 00 3c 93 24 40 00 40 06 f8 46 ac 17 22 0b ac 17 " + "0020: 35 1f 01 04 12 34 56 78 00 00 ae 59 00 50 1a bb " // Inserted IP options: NOP; EOL; + "0030: 79 14 5f 45 dc 97 80 10 00 6c 1a 8c 00 00 01 01 " + "0040: 08 0a ff fe eb 97 68 00 da 7e " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + ASSERT_TRUE(ptr.ok()); + auto p = ptr.unpackMove(); + + EXPECT_EQ(p->getKey(), ck); + EXPECT_EQ(p->getL3().size(), 60u); + EXPECT_EQ(p->getL3Header().size(), ipv4_basic_hdr_size + 8); // 8 bytes IP options + EXPECT_EQ(p->getL4Header().size(), tcp_basic_hdr_size + 12); // 12 bytes TCP options + + // Get L4 fields + EXPECT_EQ(p->getL4Data().size(), 0u); +} + +TEST(Packet, l2_v4_udp) +{ + ConnKey ck( + IPAddr::createIPAddr("192.168.170.8").unpack(), + 32795, + IPAddr::createIPAddr("192.168.170.20").unpack(), + 53, + 17 + ); + auto v = cptestParseHex( + "0000: 00 c0 9f 32 41 8c 00 e0 18 b1 0c ad 08 00 45 00 " + "0010: 00 3d 00 00 40 00 40 11 65 42 c0 a8 aa 08 c0 a8 " + "0020: aa 14 80 1b 00 35 00 29 88 61 bc 1f 01 00 00 01 " + "0030: 00 00 00 00 00 00 03 77 77 77 07 65 78 61 6d 70 " + "0040: 6c 65 03 63 6f 6d 00 00 1c 00 01 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + ASSERT_TRUE(ptr.ok()); + auto p = ptr.unpackMove(); + + EXPECT_EQ(p->getL3().size(), 61u); + EXPECT_EQ(p->getL3Header().size(), ipv4_basic_hdr_size); + EXPECT_EQ(p->getL4Header().size(), udp_hdr_size); + EXPECT_EQ(p->getKey(), ck); + + EXPECT_EQ(p->getL4Data().size(), 33u); +} + +TEST(Packet, l3_v4_udp) +{ + ConnKey ck( + IPAddr::createIPAddr("192.168.170.8").unpack(), + 32795, + IPAddr::createIPAddr("192.168.170.20").unpack(), + 53, + 17 + ); + auto v = cptestParseHex( + "0000: 45 00 00 3d 00 00 40 00 40 11 65 42 c0 a8 aa 08 " + "0010: c0 a8 aa 14 80 1b 00 35 00 29 88 61 bc 1f 01 00 " + "0020: 00 01 00 00 00 00 00 00 03 77 77 77 07 65 78 61 " + "0030: 6d 70 6c 65 03 63 6f 6d 00 00 1c 00 01 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L3, IPType::V4, v); + ASSERT_TRUE(ptr.ok()); + auto p = ptr.unpackMove(); + + EXPECT_EQ(p->getL3().size(), 61u); + EXPECT_EQ(p->getL3Header().size(), ipv4_basic_hdr_size); + EXPECT_EQ(p->getL4Header().size(), udp_hdr_size); + EXPECT_EQ(p->getKey(), ck); + + EXPECT_EQ(p->getL4Data().size(), 33u); +} + +TEST(Packet, v6_ping) +{ + ::Environment env; + ConfigComponent config_comp; + ConnKey ck( + IPAddr::createIPAddr("3ffe:507:0:1:200:86ff:fe05:80da").unpack(), + 31520, + IPAddr::createIPAddr("3ffe:507:0:1:260:97ff:fe07:69ea").unpack(), + 1024, + 58 + ); + auto v = cptestParseHex( + "0000: 00 60 97 07 69 ea 00 00 86 05 80 da 86 dd 60 00 " + "0010: 00 00 00 10 3a 40 3f fe 05 07 00 00 00 01 02 00 " + "0020: 86 ff fe 05 80 da 3f fe 05 07 00 00 00 01 02 60 " + "0030: 97 ff fe 07 69 ea 80 00 ae 76 7b 20 04 00 1d c9 " + "0040: e7 36 ad df 0b 00 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V6, v); + ASSERT_TRUE(ptr.ok()); + auto p = ptr.unpackMove(); + + EXPECT_EQ(p->getL3().size(), 56u); + EXPECT_EQ(p->getL3Header().size(), ipv6_basic_hdr_size); + EXPECT_EQ(p->getL4Header().size(), sizeof(struct icmp6_hdr)); + EXPECT_EQ(p->getKey(), ck); +} + +TEST(Packet, v6_IPPROTO_ROUTING_extension_hdr) +{ + // IPv6 TCP packet with IPPROTO_ROUTING extension header (56 bytes), 20 bytes of data + ConnKey ck( + IPAddr::createIPAddr("3001::200:1080:8110:11fe").unpack(), + 32768, + IPAddr::createIPAddr("3000::215:1780:8116:b881").unpack(), + 80, + 6 + ); + auto v = cptestParseHex( + "0000: 00 60 97 07 69 ea 00 00 86 05 80 da 86 dd 60 00 " + "0010: 00 00 00 60 2b 80 30 01 00 00 00 00 00 00 02 00 " + "0020: 10 80 81 10 11 fe 30 00 00 00 00 00 00 00 02 15 " + "0030: 17 80 81 16 b8 81 06 06 00 01 00 00 00 00 30 02 " + "0040: 00 00 00 00 00 00 02 00 10 80 81 10 12 62 30 03 " + "0050: 00 00 00 00 00 00 02 00 10 80 81 10 10 60 ff 00 " + "0060: 1d 00 00 00 00 00 00 00 00 00 00 00 00 00 80 00 " + "0070: 00 50 11 11 11 11 22 22 22 22 50 18 67 68 2b d2 " + "0080: 00 00 6d 6e 6f 70 71 72 73 74 75 76 77 61 62 63 " + "0090: 64 65 66 67 68 69 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V6, v); + ASSERT_TRUE(ptr.ok()); + auto p = ptr.unpackMove(); + + static const uint routing_ext_hdr_size = 56; + static const uint total_packet_len = 150; + + EXPECT_EQ(p->getPacket().size(), total_packet_len); + EXPECT_EQ(p->getL3().size(), total_packet_len - mac_len); + EXPECT_EQ(p->getL3Header().size(), ipv6_basic_hdr_size + routing_ext_hdr_size); + EXPECT_EQ(p->getL4Header().size(), tcp_basic_hdr_size); + EXPECT_EQ(p->getKey(), ck); + EXPECT_EQ(p->getL4Data().size(), 20u); +} + + +TEST(Packet, v6_IPPROTO_HOPOPT_and_IPPROTO_ROUTING_ext_hdrs) +{ + ConnKey ck( + IPAddr::createIPAddr("3001::200:1080:8110:11fe").unpack(), + 32768, + IPAddr::createIPAddr("3000::215:1780:8116:b881").unpack(), + 58205, + 17 + ); + auto v = cptestParseHex( + "0000: 00 60 97 07 69 ea 00 00 86 05 80 da 86 dd 60 00 " + "0010: 00 00 00 70 00 80 30 01 00 00 00 00 00 00 02 00 " + "0020: 10 80 81 10 11 fe 30 00 00 00 00 00 00 00 02 15 " + "0030: 17 80 81 16 b8 81 2b 01 00 00 00 00 00 00 00 00 " + "0040: 00 00 00 00 00 00 11 06 00 01 00 00 00 00 30 02 " + "0050: 00 00 00 00 00 00 02 00 10 80 81 10 12 62 30 03 " + "0060: 00 00 00 00 00 00 02 00 10 80 81 10 10 60 ff 00 " + "0070: 1d 00 00 00 00 00 00 00 00 00 00 00 00 00 80 00 " + "0080: e3 5d 00 28 00 0c 61 62 63 64 65 66 67 68 69 6a " + "0090: 6b 6c 6d 6e 6f 70 71 72 73 74 75 76 77 61 62 63 " + "00a0: 64 65 66 67 68 69 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V6, v); + ASSERT_TRUE(ptr.ok()); + auto p = ptr.unpackMove(); + + static const uint routing_ext_hdr_size = 56; + static const uint hop_ext_hdr_size = 16; + static const uint total_packet_len = 166; + static const uint total_extensions_size = routing_ext_hdr_size + hop_ext_hdr_size; + + EXPECT_EQ(p->getPacket().size(), total_packet_len); + EXPECT_EQ(p->getL3().size(), total_packet_len - mac_len); + EXPECT_EQ(p->getL3Header().size(), ipv6_basic_hdr_size + total_extensions_size); + EXPECT_EQ(p->getL4Header().size(), udp_hdr_size); + EXPECT_EQ(p->getKey(), ck); + + EXPECT_EQ(p->getL4Data().size(), 32u); +} + +TEST(Packet, DISABLED_non_ethernet_mac_len) +{ + auto v = cptestParseHex( + "0000: cc d8 c1 b1 cc 77 00 50 56 b9 4f 5c 08 00 45 00 " + "0010: 00 34 93 24 40 00 40 06 f8 46 ac 17 22 0b ac 17 " + "0020: 35 1f ae 59 00 50 1a bb 79 14 5f 45 dc 97 80 10 " + "0030: 00 6c 1a 8c 00 00 01 01 08 0a ff fe eb 97 68 00 " + "0040: da 7e " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + EXPECT_THAT(ptr, IsError(PktErr::NON_ETHERNET_FRAME)); +} + +TEST(Packet, DISABLED_too_big_mac_len) +{ + auto v = cptestParseHex( + "0000: cc d8 c1 b1 cc 77 00 50 56 b9 4f 5c 08 00 45 00 " + "0010: 00 34 93 24 40 00 40 06 f8 46 ac 17 22 0b ac 17 " + "0020: 35 1f ae 59 00 50 1a bb 79 14 5f 45 dc 97 80 10 " + "0030: 00 6c 1a 8c 00 00 01 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + EXPECT_THAT(ptr, IsError(PktErr::MAC_LEN_TOO_BIG)); +} + +TEST(Packet, non_ip_packet) +{ + auto v = cptestParseHex( + "0000: cc d8 c1 b1 cc 77 00 50 56 b9 4f 5c 08 88 45 00 " + "0010: 00 34 93 24 40 00 40 06 f8 46 ac 17 22 0b ac 17 " + "0020: 35 1f ae 59 00 50 1a bb 79 14 5f 45 dc 97 80 10 " + "0030: 00 6c 1a 8c 00 00 01 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + EXPECT_THAT(ptr, IsError(PktErr::NON_IP_PACKET)); +} + +TEST(Packet, version_mismatch_v4) +{ + // Valid IPv4 packet, but Ethernet header says it is IPv6 + auto v = cptestParseHex( + "0000: cc d8 c1 b1 cc 77 00 50 56 b9 4f 5c 86 dd 45 00 " + "0010: 00 34 93 24 40 00 40 06 f8 46 ac 17 22 0b ac 17 " + "0020: 35 1f ae 59 00 50 1a bb 79 14 5f 45 dc 97 80 10 " + "0030: 00 6c 1a 8c 00 00 01 01 08 0a ff fe eb 97 68 00 " + "0040: da 7e " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + EXPECT_THAT(ptr, IsError(PktErr::IP_VERSION_MISMATCH)); +} + +TEST(Packet, version_mismatch_v6) +{ + // Valid IPv6 packet, but Ethernet header says it is IPv4 + auto v = cptestParseHex( + "0000: 00 11 25 82 95 b5 00 d0 09 e3 e8 de 08 00 60 00 " + "0010: 00 00 00 28 06 40 20 01 06 f8 10 2d 00 00 02 d0 " + "0020: 09 ff fe e3 e8 de 20 01 06 f8 09 00 07 c0 00 00 " + "0030: 00 00 00 00 00 02 e7 41 00 50 ab dc d6 60 00 00 " + "0040: 00 00 a0 02 16 80 41 a2 00 00 02 04 05 a0 04 02 " + "0050: 08 0a 00 0a 22 a8 00 00 00 00 01 03 03 05 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V6, v); + EXPECT_THAT(ptr, IsError(PktErr::IP_VERSION_MISMATCH)); +} + +TEST(Packet, empty_frame_v4) +{ + auto v = cptestParseHex( + "0000: 00 c0 9f 32 41 8c 00 e0 18 b1 0c ad 08 00 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + EXPECT_THAT(ptr, IsError(PktErr::PKT_TOO_SHORT_FOR_IP_HEADER)); +} + +TEST(Packet, empty_frame_v6) +{ + auto v = cptestParseHex( + "0000: 00 c0 9f 32 41 8c 00 e0 18 b1 0c ad 86 dd " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + EXPECT_THAT(ptr, IsError(PktErr::PKT_TOO_SHORT_FOR_IP_HEADER)); +} + +TEST(Packet, ipv4_pkt_no_room_for_header) +{ + auto v = cptestParseHex( + "0000: 00 c0 9f 32 41 8c 00 e0 18 b1 0c ad 08 00 45 00 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + EXPECT_THAT(ptr, IsError(PktErr::PKT_TOO_SHORT_FOR_IP_HEADER)); +} + +TEST(Packet, ipv4_pkt_no_room_for_header_with_options) +{ + auto v = cptestParseHex( + "0000: 00 c0 9f 32 41 8c 00 e0 18 b1 0c ad 08 00 48 00 " + "0010: 00 1c 00 00 40 00 40 11 65 42 c0 a8 aa 08 c0 a8 " + "0020: aa 14 80 1b 00 35 00 29 88 61 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + EXPECT_THAT(ptr, IsError(PktErr::PKT_TOO_SHORT_FOR_IP_HEADER)); +} + +TEST(Packet, ipv6_pkt_no_room_for_header) +{ + auto v = cptestParseHex( + "0000: 00 c0 9f 32 41 8c 00 e0 18 b1 0c ad 86 dd 60 00 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V6, v); + EXPECT_THAT(ptr, IsError(PktErr::PKT_TOO_SHORT_FOR_IP_HEADER)); +} + +TEST(Packet, ipv4_payload_length_smaller_than_ipv4_header) +{ + auto v = cptestParseHex( + "0000: 00 c0 9f 32 41 8c 00 e0 18 b1 0c ad 08 00 45 00 " + "0010: 00 10 00 00 40 00 40 11 65 42 c0 a8 aa 08 c0 a8 " + "0020: aa 14 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + EXPECT_THAT(ptr, IsError(PktErr::IP_SIZE_MISMATCH)); +} + +TEST(Packet, v6_ext_hdr_not_complete) +{ + // IPv6 packet with IPPROTO_HOPOPTS cut at the middle of the header + auto v = cptestParseHex( + "0000: 00 60 97 07 69 ea 00 00 86 05 80 da 86 dd 60 00 " + "0010: 00 00 00 01 00 80 30 01 00 00 00 00 00 00 02 00 " + "0020: 10 80 81 10 11 fe 30 00 00 00 00 00 00 00 02 15 " + "0030: 17 80 81 16 b8 81 3a " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V6, v); + EXPECT_THAT(ptr, IsError(PktErr::PKT_TOO_SHORT_FOR_IP_EXTENSION_HEADER)); +} + + +TEST(Packet, v6_no_room_for_ext_hdr_body) +{ + // IPv6 packet with IPPROTO_HOPOPTS ext header specified as 16 bytes, but packet too short + auto v = cptestParseHex( + "0000: 00 60 97 07 69 ea 00 00 86 05 80 da 86 dd 60 00 " + "0010: 00 00 00 02 00 80 30 01 00 00 00 00 00 00 02 00 " + "0020: 10 80 81 10 11 fe 30 00 00 00 00 00 00 00 02 15 " + "0030: 17 80 81 16 b8 81 3a 01 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V6, v); + EXPECT_THAT(ptr, IsError(PktErr::PKT_TOO_SHORT_FOR_IP_EXTENSION_HEADER_BODY)); +} + + +TEST(Packet, ipv4_size_mismatch) +{ + auto v = cptestParseHex( + "0000: cc d8 c1 b1 cc 77 00 50 56 b9 4f 5c 08 00 45 00 " + "0010: 00 35 93 24 40 00 40 06 f8 46 ac 17 22 0b ac 17 " + "0020: 35 1f ae 59 00 50 1a bb 79 14 5f 45 dc 97 80 10 " + "0030: 00 6c 1a 8c 00 00 01 01 08 0a ff fe eb 97 68 00 " + "0040: da 7e " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + EXPECT_THAT(ptr, IsError(PktErr::IP_SIZE_MISMATCH)); +} + +TEST(Packet, ipv6_size_mismatch) +{ + auto v = cptestParseHex( + "0000: 00 11 25 82 95 b5 00 d0 09 e3 e8 de 86 dd 60 00 " + "0010: 00 00 00 29 06 40 20 01 06 f8 10 2d 00 00 02 d0 " + "0020: 09 ff fe e3 e8 de 20 01 06 f8 09 00 07 c0 00 00 " + "0030: 00 00 00 00 00 02 e7 41 00 50 ab dc d6 60 00 00 " + "0040: 00 00 a0 02 16 80 41 a2 00 00 02 04 05 a0 04 02 " + "0050: 08 0a 00 0a 22 a8 00 00 00 00 01 03 03 05 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V6, v); + EXPECT_THAT(ptr, IsError(PktErr::IP_SIZE_MISMATCH)); +} + + +TEST(Packet, no_room_for_udp_header) +{ + auto v = cptestParseHex( + "0000: cc d8 c1 b1 cc 77 00 50 56 b9 4f 5c 08 00 45 00 " + "0010: 00 18 93 24 40 00 40 11 f8 57 ac 17 22 0b ac 17 " + "0020: 35 1f ae 59 00 50 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + EXPECT_THAT(ptr, IsError(PktErr::PKT_TOO_SHORT_FOR_L4_HEADER)); +} + +TEST(Packet, no_room_for_tcp_header) +{ + auto v = cptestParseHex( + "0000: cc d8 c1 b1 cc 77 00 50 56 b9 4f 5c 08 00 45 00 " + "0010: 00 22 93 24 40 00 40 06 f8 58 ac 17 22 0b ac 17 " + "0020: 35 1f ae 59 00 50 1a bb 79 14 5f 45 dc 97 80 10 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + EXPECT_THAT(ptr, IsError(PktErr::PKT_TOO_SHORT_FOR_L4_HEADER)); +} + +TEST(Packet, tcp_header_len_too_short) +{ + auto v = cptestParseHex( + "0000: cc d8 c1 b1 cc 77 00 50 56 b9 4f 5c 08 00 45 00 " + "0010: 00 28 93 24 40 00 40 06 f8 52 ac 17 22 0b ac 17 " + "0020: 35 1f ae 59 00 50 1a bb 79 14 5f 45 dc 97 20 10 " + "0030: 00 6c 1a 8c 00 00 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + EXPECT_THAT(ptr, IsError(PktErr::TCP_HEADER_TOO_SMALL)); +} + + +TEST(Packet, tcp_header_len_too_big) +{ + auto v = cptestParseHex( + "0000: cc d8 c1 b1 cc 77 00 50 56 b9 4f 5c 08 00 45 00 " + "0010: 00 29 93 24 40 00 40 06 f8 51 ac 17 22 0b ac 17 " + "0020: 35 1f ae 59 00 50 1a bb 79 14 5f 45 dc 97 80 10 " + "0030: 00 6c 1a 8c 00 00 01 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + EXPECT_THAT(ptr, IsError(PktErr::PKT_TOO_SHORT_FOR_TCP_OPTIONS)); +} + +TEST(Packet, get_l2_data_vec) +{ + auto v = cptestParseHex( // Same as v4_udp + "0000: 00 c0 9f 32 41 8c 00 e0 18 b1 0c ad 08 00 45 00 " + "0010: 00 3d 00 00 40 00 40 11 65 42 c0 a8 aa 08 c0 a8 " + "0020: aa 14 80 1b 00 35 00 29 88 61 bc 1f 01 00 00 01 " + "0030: 00 00 00 00 00 00 03 77 77 77 07 65 78 61 6d 70 " + "0040: 6c 65 03 63 6f 6d 00 00 1c 00 01 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + + EXPECT_EQ(v, (*ptr)->getL2DataVec()); +} + +TEST(Packet, interface_set_and_get) +{ + auto v = cptestParseHex( // Same as v4_udp + "0000: 00 c0 9f 32 41 8c 00 e0 18 b1 0c ad 08 00 45 00 " + "0010: 00 3d 00 00 40 00 40 11 65 42 c0 a8 aa 08 c0 a8 " + "0020: aa 14 80 1b 00 35 00 29 88 61 bc 1f 01 00 00 01 " + "0030: 00 00 00 00 00 00 03 77 77 77 07 65 78 61 6d 70 " + "0040: 6c 65 03 63 6f 6d 00 00 1c 00 01 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + auto p = ptr.unpackMove(); + + EXPECT_FALSE(p->getInterface().ok()); + + p->setInterface(5); + EXPECT_TRUE(p->getInterface().ok()); + EXPECT_EQ(5, p->getInterface().unpack()); + + p->setInterface(42); + EXPECT_TRUE(p->getInterface().ok()); + EXPECT_EQ(42, p->getInterface().unpack()); +} + +TEST(Packet, no_room_for_icmp_header) +{ + // only 7 bytes of ICMPV4 (min is 8) + auto v = cptestParseHex( + "0000: 00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 " + "0010: 00 1b 12 34 40 00 ff 01 6b ab 7f 00 00 01 7f 00 " + "0020: 00 01 00 00 ff fd 00 01 00 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + ASSERT_FALSE(ptr.ok()); + EXPECT_EQ(ptr.getErr(), PktErr::PKT_TOO_SHORT_FOR_L4_HEADER); +} + +TEST(Packet, icmp) +{ + ::Environment env; + ConfigComponent config_comp; + // correct ICMPV4 packet + auto v = cptestParseHex( + "0000: 00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 " + "0010: 00 1c 12 34 40 00 ff 01 6b aa 7f 00 00 01 7f 00 " + "0020: 00 01 00 00 ff fd 00 01 00 01 00 00 00 00 00 00 " + "0030: 00 00 00 00 00 00 00 00 00 00 00 00 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + ASSERT_TRUE(ptr.ok()); + EXPECT_EQ(ptr->get()->getKey().getProto(), IPPROTO_ICMP); + EXPECT_EQ(ptr->get()->getPacketProto(), IPType::V4); + auto p = ptr.unpackMove(); + auto icmp = p->getL4Header().getTypePtr(0).unpack(); + auto checksum = ntohs(icmp->checksum); + EXPECT_EQ(icmp->type, ICMP_ECHOREPLY); + EXPECT_EQ(icmp->code, 0); + EXPECT_EQ(checksum, 0xfffd); +} + +TEST(Packet, no_room_for_icmpv6_header) +{ + // only 7 bytes of ICMPV6 (min is 8) + auto v = cptestParseHex( + "0000: 00 00 00 00 00 00 00 00 00 00 00 00 86 dd 60 00 " + "0010: 00 00 00 07 3a ff 00 00 00 00 00 00 00 00 00 00 " + "0020: 00 00 00 00 00 01 00 00 00 00 00 00 00 00 00 00 " + "0030: 00 00 00 00 00 01 80 00 7f bc 00 00 00 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V6, v); + ASSERT_FALSE(ptr.ok()); + EXPECT_EQ(ptr.getErr(), PktErr::PKT_TOO_SHORT_FOR_L4_HEADER); +} + +TEST(Packet, icmpv6) +{ + ::Environment env; + ConfigComponent config_comp; + // correct ICMPV6 packet + auto v = cptestParseHex( + "0000: 00 00 00 00 00 00 00 00 00 00 00 00 86 dd 60 00 " + "0010: 00 00 00 0c 3a ff 00 00 00 00 00 00 00 00 00 00 " + "0020: 00 00 00 00 00 01 00 00 00 00 00 00 00 00 00 00 " + "0030: 00 00 00 00 00 01 80 00 3b 51 00 00 00 00 11 22 " + "0040: 33 44 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V6, v); + ASSERT_TRUE(ptr.ok()); + EXPECT_EQ(ptr->get()->getKey().getProto(), IPPROTO_ICMPV6); + EXPECT_EQ(ptr->get()->getPacketProto(), IPType::V6); + auto p = ptr.unpackMove(); + auto icmp = p->getL4Header().getTypePtr(0).unpack(); + auto checksum = ntohs(icmp->icmp6_cksum); + EXPECT_EQ(icmp->icmp6_type, ICMP6_ECHO_REQUEST); + EXPECT_EQ(icmp->icmp6_code, 0); + EXPECT_EQ(checksum, 0x3b51); +} + +TEST(Packet, icmp_over_ipv6) +{ + // correct ICMPV4 packet over IPV6 + auto v = cptestParseHex( + "0000: 00 00 00 00 00 00 00 00 00 00 00 00 86 dd 60 00 " + "0010: 00 00 00 0c 01 ff 00 00 00 00 00 00 00 00 00 00 " + "0020: 00 00 00 00 00 01 00 00 00 00 00 00 00 00 00 00 " + "0030: 00 00 00 00 00 01 00 00 ff fd 00 01 00 01 00 00 " + "0040: 00 00 00 00 00 00 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V6, v); + EXPECT_THAT(ptr, IsError(PktErr::ICMP_VERSION_MISMATCH)); +} + +TEST(Packet, icmpv6_over_ipv4) +{ + // correct ICMPV6 packet over IPV4 + auto v = cptestParseHex( + "0000: 00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 " + "0010: 00 1c 12 34 40 00 ff 3a 6b aa 7f 00 00 01 7f 00 " + "0020: 00 01 80 00 3b 51 00 00 00 00 11 22 33 44 00 00 " + "0030: 00 00 00 00 00 00 00 00 00 00 00 00 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + EXPECT_THAT(ptr, IsError(PktErr::ICMP_VERSION_MISMATCH)); +} + +TEST(Packet, tcp_fragment_noheader) +{ + // IPv4 TCP fragmented packet with no TCP header + auto v = cptestParseHex( + "0000: 00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 " + "0010: 00 28 12 34 00 5d ff 06 00 00 7f 00 00 01 7f 00 " + "0020: 00 01 00 00 00 50 00 00 00 64 00 00 00 64 50 00 " + "0030: 0f a0 a1 2a 00 00 00 00 00 00 00 00 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + ConnKey key( + IPAddr::createIPAddr("127.0.0.1").unpack(), 0, + IPAddr::createIPAddr("127.0.0.1").unpack(), 0, + 6 + ); + EXPECT_EQ(key, ptr.unpack()->getKey()); +} + +TEST(Packet, tcp_notfragment) +{ + // IPv4 fragmented packet with TCP header + auto v = cptestParseHex( + "0000: 00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 " + "0010: 00 28 12 34 20 00 ff 06 00 00 7f 00 00 01 7f 00 " + "0020: 00 01 00 00 00 50 00 00 00 64 00 00 00 64 50 00 " + "0030: 0f a0 a1 2a 00 00 00 00 00 00 00 00 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + ConnKey key( + IPAddr::createIPAddr("127.0.0.1").unpack(), 0, + IPAddr::createIPAddr("127.0.0.1").unpack(), 0, + 6 + ); + EXPECT_EQ(key, ptr.unpack()->getKey()); +} + +TEST(Packet, ipv6_fragment_noheader) +{ + // IPv6 fragmented packet with no L4 header + auto v = cptestParseHex( + "0000: 00 1d 09 94 65 38 68 5b 35 c0 61 b6 86 dd 60 02 " + "0010: 12 89 00 1a 2c 40 26 07 f0 10 03 f9 00 00 00 00 " + "0020: 00 00 00 00 10 01 26 07 f0 10 03 f9 00 00 00 00 " + "0030: 00 00 00 11 00 00 11 00 05 a9 f8 8e b4 66 68 68 " + "0040: 68 68 68 68 68 68 68 68 68 68 68 68 68 68 68 68 " + "0050: 68 68 68 68 68 68 68 68 68 68 68 68 68 68 68 68 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V4, v); + ConnKey key( + IPAddr::createIPAddr("2607:f010:3f9::1001").unpack(), 0, + IPAddr::createIPAddr("2607:f010:3f9::11:0").unpack(), 0, + 17 + ); + EXPECT_EQ(key, ptr.unpack()->getKey()); +} + +TEST(Packet, ipv6_fragment_with_header) +{ + // IPv6 fragmented packet with L4 header + auto v = cptestParseHex( + "0000: 00 1d 09 94 65 38 68 5b 35 c0 61 b6 86 dd 60 02 " + "0010: 12 89 00 1a 2c 40 26 07 f0 10 03 f9 00 00 00 00 " + "0020: 00 00 00 00 10 01 26 07 f0 10 03 f9 00 00 00 00 " + "0030: 00 00 00 11 00 00 11 00 00 01 f8 8e b4 66 18 db " + "0040: 18 db 15 0b 79 16 06 fd 14 ff 07 29 08 07 65 78 " + "0050: 61 6d 70 6c 65 08 07 74 65 73 74 41 70 70 08 01 " + ); + auto ptr = Packet::genPacket(PktType::PKT_L2, IPType::V6, v); + ConnKey key( + IPAddr::createIPAddr("2607:f010:3f9::1001").unpack(), 0, + IPAddr::createIPAddr("2607:f010:3f9::11:0").unpack(), 0, + 17 + ); + EXPECT_EQ(key, ptr.unpack()->getKey()); +} + +TEST(CDir, printout_operator) +{ + stringstream buf_c2s; + buf_c2s << CDir::C2S; + EXPECT_EQ(buf_c2s.str(), "c2s"); + + stringstream buf_s2c; + buf_s2c << CDir::S2C; + EXPECT_EQ(buf_s2c.str(), "s2c"); +} diff --git a/components/pending_key/CMakeLists.txt b/components/pending_key/CMakeLists.txt new file mode 100755 index 0000000..6d08f10 --- /dev/null +++ b/components/pending_key/CMakeLists.txt @@ -0,0 +1,2 @@ +add_library(pending_key pending_key.cc) +add_subdirectory(pending_key_ut) diff --git a/components/pending_key/pending_key.cc b/components/pending_key/pending_key.cc new file mode 100755 index 0000000..6f61a9b --- /dev/null +++ b/components/pending_key/pending_key.cc @@ -0,0 +1,54 @@ +#include "pending_key.h" + +#include +#include +#include + +#include "debug.h" +#include "hash_combine.h" +#include "enum_range.h" +#include "cereal/types/memory.hpp" + +using namespace std; + +CEREAL_CLASS_VERSION(PendingKey, 0); + +static bool +protoHasPorts(IPProto proto) +{ + return (proto==IPPROTO_TCP) || (proto==IPPROTO_UDP); +} + +// Format a port numbers. Use a pair, becuase it depends on the protocl (only TCP/UDP have ports). +static ostream & +operator<<(ostream &os, pair pp) +{ + if (protoHasPorts(get<0>(pp))) { + os << "|" << get<1>(pp); + } + return os; +} + +ostream & +PendingKey::print(ostream &os) const +{ + if (getType() == IPType::UNINITIALIZED) return os << ""; + + return os << "<" << + getSrc() << " -> " << + getDst() << make_pair(getProto(), getDPort()) << + " " << static_cast(getProto()) << ">"; // Cast needed to print as a number. +} + +size_t +PendingKey::hash() const +{ + dbgAssert(src.type != IPType::UNINITIALIZED) << "PendingKey::hash was called on an uninitialized object"; + size_t seed = 0; + hashCombine(seed, static_cast(src.type)); + hashCombine(seed, src.proto); + hashCombine(seed, src); + hashCombine(seed, dst); + hashCombine(seed, dst.port); + return seed; +} diff --git a/components/pending_key/pending_key_ut/CMakeLists.txt b/components/pending_key/pending_key_ut/CMakeLists.txt new file mode 100755 index 0000000..e357d9f --- /dev/null +++ b/components/pending_key/pending_key_ut/CMakeLists.txt @@ -0,0 +1,5 @@ +add_unit_test( + pending_key_ut + "pending_key_ut.cc" + "connkey;pending_key" +) diff --git a/components/pending_key/pending_key_ut/pending_key_ut.cc b/components/pending_key/pending_key_ut/pending_key_ut.cc new file mode 100755 index 0000000..5dbcbbd --- /dev/null +++ b/components/pending_key/pending_key_ut/pending_key_ut.cc @@ -0,0 +1,143 @@ +#include +#include +#include + +#include "pending_key.h" +#include "cptest.h" + +using namespace std; +using namespace testing; + +class PendingKeyTest : public Test +{ +protected: + + PendingKey + get_pkey(string src_ip, string dst_ip, PortNumber d_port, IPProto ip_p) + { + return PendingKey(IPAddr::createIPAddr(src_ip).unpack(), IPAddr::createIPAddr(dst_ip).unpack(), d_port, ip_p); + } + + virtual void + SetUp() + { + cptestPrepareToDie(); + ck_v4 = get_pkey("1.1.1.1", "2.2.2.2", 80, 6); + ck_v6 = get_pkey("2000::1", "3000::2", 53, 17); + } + + virtual void + TearDown() + { + } + +public: + PendingKey ck_v4; + PendingKey ck_v6; +}; + + +TEST_F(PendingKeyTest, equality_v4) +{ + EXPECT_EQ(ck_v4, ck_v4); +} + +TEST_F(PendingKeyTest, equality_v6) +{ + EXPECT_EQ(ck_v6, ck_v6); +} + +TEST_F(PendingKeyTest, equality_mixed_versions) +{ + EXPECT_NE(ck_v4, ck_v6); +} + +TEST_F(PendingKeyTest, equality_mixed_versions_same_fields) +{ + PendingKey zero4 = get_pkey("0.0.0.0", "0.0.0.0", 0, 17); + PendingKey zero6 = get_pkey("0::0", "0::0", 0, 17); + EXPECT_NE(zero4, zero6); +} + +TEST_F(PendingKeyTest, equality_diff_only_in_ip) +{ + PendingKey k1 = get_pkey("1.1.1.1", "2.2.2.2", 0, 17); + PendingKey k2 = get_pkey("1.1.1.1", "3.3.3.3", 0, 17); + PendingKey k3 = get_pkey("4.4.4.4", "2.2.2.2", 0, 17); + EXPECT_NE(k1, k2); + EXPECT_NE(k1, k3); + EXPECT_NE(k2, k3); +} + +TEST_F(PendingKeyTest, equality_diff_only_in_port) +{ + PendingKey k1 = get_pkey("1.1.1.1", "2.2.2.2", 1, 17); + PendingKey k2 = get_pkey("1:1::1", "2:2::2", 1, 17); + PendingKey k3 = get_pkey("1.1.1.1", "2.2.2.2", 2, 17); + PendingKey k4 = get_pkey("1:1::1", "2:2::2", 2, 17); + EXPECT_NE(k1, k3); + EXPECT_NE(k2, k4); +} + +TEST_F(PendingKeyTest, equality_diff_only_in_proto) +{ + PendingKey k1 = get_pkey("1.1.1.1", "2.2.2.2", 2, 6); + PendingKey k2 = get_pkey("1.1.1.1", "2.2.2.2", 2, 17); + EXPECT_NE(k1, k2); +} + +TEST_F(PendingKeyTest, copy_operator) +{ + PendingKey ck4_copy = ck_v4; + PendingKey ck6_copy = ck_v6; + EXPECT_EQ(ck4_copy, ck_v4); + EXPECT_EQ(ck6_copy, ck_v6); +} + +TEST_F(PendingKeyTest, hash) +{ + PendingKey copy_v4 = ck_v4; + PendingKey copy_v6 = ck_v6; + EXPECT_EQ(copy_v4.hash(), ck_v4.hash()); + EXPECT_EQ(copy_v6.hash(), ck_v6.hash()); +} + +TEST_F(PendingKeyTest, formatting_v4) +{ + EXPECT_EQ(ToString(ck_v4), "<1.1.1.1 -> 2.2.2.2|80 6>"); +} + +TEST_F(PendingKeyTest, formatting_v6) +{ + string expected_str = "<2000::1 -> 3000::2|53 17>"; + EXPECT_EQ(ToString(ck_v6), expected_str); + + PendingKey src_extra_zeros = get_pkey("2000:0::0:1", "3000::2", 53, 17); + EXPECT_EQ(ToString(src_extra_zeros), expected_str); +} + +TEST_F(PendingKeyTest, formatting_no_ports) +{ + // Port number not printed for non-TCP/UDP (whether its zero or not) + PendingKey proto123 = get_pkey("2000:0::0:1", "3000::2", 0, 123); + PendingKey proto123_ports = get_pkey("2000:0::0:1", "3000::2", 333, 123); + EXPECT_EQ(ToString(proto123), "<2000::1 -> 3000::2 123>"); + EXPECT_NE(ToString(proto123_ports), "<2000::1 -> 3000::2|333 123>"); + + // Port number printed for TCP/UDP, even if its zero + PendingKey port0 = get_pkey("1.1.1.1", "2.2.2.2", 0, 6); + EXPECT_EQ(ToString(port0), "<1.1.1.1 -> 2.2.2.2|0 6>"); +} + +TEST_F(PendingKeyTest, death_hash_on_uninit) +{ + cptestPrepareToDie(); + PendingKey uninit; + EXPECT_DEATH(uninit.hash(), "PendingKey::hash was called on an uninitialized object"); +} + +TEST_F(PendingKeyTest, death_eqaulity_on_uninit) +{ + PendingKey uninit; + EXPECT_DEATH((void)(uninit == uninit), "Called on an uninitialized IPType object"); +} diff --git a/components/report_messaging/CMakeLists.txt b/components/report_messaging/CMakeLists.txt new file mode 100644 index 0000000..7c9597d --- /dev/null +++ b/components/report_messaging/CMakeLists.txt @@ -0,0 +1,3 @@ +add_library(report_messaging report_messaging.cc) + +add_subdirectory(report_messaging_ut) diff --git a/components/report_messaging/report_messaging.cc b/components/report_messaging/report_messaging.cc new file mode 100755 index 0000000..7c45e61 --- /dev/null +++ b/components/report_messaging/report_messaging.cc @@ -0,0 +1,47 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "report_messaging.h" + +#include + +#include "report/log_rest.h" + +using namespace std; + +static const string url = "/api/v1/agents/events"; + +ReportMessaging::~ReportMessaging() +{ + if (!Singleton::exists()) return; + + LogRest log_rest(report); + + auto messaging = Singleton::Consume::by(); + messaging->sendObjectWithPersistence( + log_rest, + I_Messaging::Method::POST, + url, + "", + true, + message_type_tag, + is_async_message + ); +} + +ReportMessaging & +ReportMessaging::operator<<(const LogField &field) +{ + report << field; + return *this; +} diff --git a/components/report_messaging/report_messaging_ut/CMakeLists.txt b/components/report_messaging/report_messaging_ut/CMakeLists.txt new file mode 100644 index 0000000..028a5dd --- /dev/null +++ b/components/report_messaging/report_messaging_ut/CMakeLists.txt @@ -0,0 +1,3 @@ +link_directories(${BOOST_ROOT}/lib) + +add_unit_test(report_messaging_ut "report_messaging_ut.cc" "report_messaging;report;singleton;-lboost_regex") \ No newline at end of file diff --git a/components/report_messaging/report_messaging_ut/report_messaging_ut.cc b/components/report_messaging/report_messaging_ut/report_messaging_ut.cc new file mode 100644 index 0000000..b37089d --- /dev/null +++ b/components/report_messaging/report_messaging_ut/report_messaging_ut.cc @@ -0,0 +1,334 @@ +#include "report_messaging.h" + +#include +#include +#include +#include + +#include "config.h" +#include "config_component.h" +#include "cptest.h" +#include "mock/mock_messaging.h" +#include "mock/mock_time_get.h" +#include "mock/mock_environment.h" +#include "cereal/archives/json.hpp" +#include "cereal/types/string.hpp" +#include "cereal/types/common.hpp" + +using namespace std; +using namespace testing; + +class ReportObject +{ +public: + ReportObject(int _integer_val, string _string_val, vector _vec_val) + : + integer_val(_integer_val), + string_val(_string_val), + vec_val(_vec_val) + { + } + + void + serialize(cereal::JSONOutputArchive &ar) const + { + ar(cereal::make_nvp("integerVal", integer_val)); + ar(cereal::make_nvp("stringVal", string_val)); + ar(cereal::make_nvp("vecVal", vec_val)); + } + + friend ostream & + operator<<(ostream &os, const ReportObject &) + { + return os; + } + +private: + int integer_val; + string string_val; + vector vec_val; +}; + +class ReportMessagingTest : public Test +{ +public: + ReportMessagingTest() + { + EXPECT_CALL(mock_time_get, getWalltime()).WillRepeatedly(Return(chrono::microseconds(0))); + EXPECT_CALL(mock_time_get, getWalltimeStr(_)).WillRepeatedly(Return("Best Time ever")); + } + + StrictMock mock_messaging; + StrictMock mock_time_get; + +private: + ConfigComponent config; +}; + +TEST_F(ReportMessagingTest, title_only) +{ + EXPECT_CALL( + mock_messaging, + mockSendPersistentMessage( + _, + "{\n" + " \"log\": {\n" + " \"eventTime\": \"Best Time ever\",\n" + " \"eventName\": \"test\",\n" + " \"eventSeverity\": \"Info\",\n" + " \"eventPriority\": \"Low\",\n" + " \"eventType\": \"Event Driven\",\n" + " \"eventLevel\": \"Log\",\n" + " \"eventLogLevel\": \"info\",\n" + " \"eventAudience\": \"Internal\",\n" + " \"eventAudienceTeam\": \"Agent Core\",\n" + " \"eventFrequency\": 0,\n" + " \"eventTags\": [\n" + " \"Access Control\"\n" + " ],\n" + " \"eventSource\": {\n" + " \"eventTraceId\": \"\",\n" + " \"eventSpanId\": \"\",\n" + " \"issuingEngineVersion\": \"\",\n" + " \"serviceName\": \"Unnamed Nano Service\"\n" + " },\n" + " \"eventData\": {\n" + " \"eventObject\": 1\n" + " }\n" + " }\n" + "}", + _, + _, + _, + _, + _ + ) + ).WillOnce(Return(string())); + ReportMessaging("test", ReportIS::AudienceTeam::AGENT_CORE, 1, ReportIS::Tags::ACCESS_CONTROL); +} + +TEST_F(ReportMessagingTest, with_dynamic_fields) +{ + EXPECT_CALL( + mock_messaging, + mockSendPersistentMessage( + _, + "{\n" + " \"log\": {\n" + " \"eventTime\": \"Best Time ever\",\n" + " \"eventName\": \"test\",\n" + " \"eventSeverity\": \"Info\",\n" + " \"eventPriority\": \"Low\",\n" + " \"eventType\": \"Event Driven\",\n" + " \"eventLevel\": \"Log\",\n" + " \"eventLogLevel\": \"info\",\n" + " \"eventAudience\": \"Internal\",\n" + " \"eventAudienceTeam\": \"Agent Core\",\n" + " \"eventFrequency\": 0,\n" + " \"eventTags\": [\n" + " \"Access Control\"\n" + " ],\n" + " \"eventSource\": {\n" + " \"eventTraceId\": \"\",\n" + " \"eventSpanId\": \"\",\n" + " \"issuingEngineVersion\": \"\",\n" + " \"serviceName\": \"Unnamed Nano Service\"\n" + " },\n" + " \"eventData\": {\n" + " \"eventObject\": 1,\n" + " \"ASD\": \"QWE\"\n" + " }\n" + " }\n" + "}", + _, + _, + _, + _, + _ + ) + ).WillOnce(Return(string())); + ReportMessaging("test", ReportIS::AudienceTeam::AGENT_CORE, 1, ReportIS::Tags::ACCESS_CONTROL) + << LogField("ASD", "QWE"); +} + +TEST_F(ReportMessagingTest, custom_event_object) +{ + EXPECT_CALL( + mock_messaging, + mockSendPersistentMessage( + _, + "{\n" + " \"log\": {\n" + " \"eventTime\": \"Best Time ever\",\n" + " \"eventName\": \"test\",\n" + " \"eventSeverity\": \"Info\",\n" + " \"eventPriority\": \"Low\",\n" + " \"eventType\": \"Event Driven\",\n" + " \"eventLevel\": \"Log\",\n" + " \"eventLogLevel\": \"info\",\n" + " \"eventAudience\": \"Internal\",\n" + " \"eventAudienceTeam\": \"Agent Core\",\n" + " \"eventFrequency\": 0,\n" + " \"eventTags\": [\n" + " \"Access Control\"\n" + " ],\n" + " \"eventSource\": {\n" + " \"eventTraceId\": \"\",\n" + " \"eventSpanId\": \"\",\n" + " \"issuingEngineVersion\": \"\",\n" + " \"serviceName\": \"Unnamed Nano Service\"\n" + " },\n" + " \"eventData\": {\n" + " \"eventObject\": {\n" + " \"integerVal\": 1,\n" + " \"stringVal\": \"2\",\n" + " \"vecVal\": [\n" + " 1,\n" + " 2,\n" + " 3\n" + " ]\n" + " }\n" + " }\n" + " }\n" + "}", + _, + _, + _, + _, + _ + ) + ).WillOnce(Return(string())); + + ReportMessaging( + "test", + ReportIS::AudienceTeam::AGENT_CORE, + ReportObject(1, "2", { 1, 2, 3}), + ReportIS::Tags::ACCESS_CONTROL + ); +} + +TEST_F(ReportMessagingTest, custom_priority) +{ + EXPECT_CALL( + mock_messaging, + mockSendPersistentMessage( + _, + "{\n" + " \"log\": {\n" + " \"eventTime\": \"Best Time ever\",\n" + " \"eventName\": \"test\",\n" + " \"eventSeverity\": \"High\",\n" + " \"eventPriority\": \"Medium\",\n" + " \"eventType\": \"Event Driven\",\n" + " \"eventLevel\": \"Log\",\n" + " \"eventLogLevel\": \"info\",\n" + " \"eventAudience\": \"Internal\",\n" + " \"eventAudienceTeam\": \"Agent Core\",\n" + " \"eventFrequency\": 0,\n" + " \"eventTags\": [\n" + " \"Access Control\"\n" + " ],\n" + " \"eventSource\": {\n" + " \"eventTraceId\": \"\",\n" + " \"eventSpanId\": \"\",\n" + " \"issuingEngineVersion\": \"\",\n" + " \"serviceName\": \"Unnamed Nano Service\"\n" + " },\n" + " \"eventData\": {\n" + " \"eventObject\": {\n" + " \"integerVal\": 1,\n" + " \"stringVal\": \"2\",\n" + " \"vecVal\": [\n" + " 1,\n" + " 2,\n" + " 3\n" + " ]\n" + " }\n" + " }\n" + " }\n" + "}", + _, + _, + _, + _, + _ + ) + ).WillOnce(Return(string())); + + ReportMessaging( + "test", + ReportIS::AudienceTeam::AGENT_CORE, + ReportIS::Severity::HIGH, + ReportIS::Priority::MEDIUM, + ReportObject(1, "2", {1, 2, 3}), + ReportIS::Tags::ACCESS_CONTROL + ); +} + +TEST_F(ReportMessagingTest, with_env_details) +{ + StrictMock mock_env; + + Context context; + context.registerValue("Service Name", "Access Control App"); + context.registerValue("Service Version", "1.2.3.0.0"); + I_Environment::ActiveContexts active_context({&context}, true); + EXPECT_CALL(mock_env, getActiveContexts()).WillRepeatedly(ReturnRef(active_context)); + EXPECT_CALL(mock_env, getCurrentTrace()).WillOnce(Return(string("best trace"))); + EXPECT_CALL(mock_env, getCurrentSpan()).WillOnce(Return(string("best span"))); + + EXPECT_CALL( + mock_messaging, + mockSendPersistentMessage( + _, + "{\n" + " \"log\": {\n" + " \"eventTime\": \"Best Time ever\",\n" + " \"eventName\": \"test\",\n" + " \"eventSeverity\": \"High\",\n" + " \"eventPriority\": \"Medium\",\n" + " \"eventType\": \"Event Driven\",\n" + " \"eventLevel\": \"Log\",\n" + " \"eventLogLevel\": \"info\",\n" + " \"eventAudience\": \"Internal\",\n" + " \"eventAudienceTeam\": \"Agent Core\",\n" + " \"eventFrequency\": 0,\n" + " \"eventTags\": [\n" + " \"Access Control\"\n" + " ],\n" + " \"eventSource\": {\n" + " \"eventTraceId\": \"best trace\",\n" + " \"eventSpanId\": \"best span\",\n" + " \"issuingEngineVersion\": \"1.2.3.0.0\",\n" + " \"serviceName\": \"Access Control App\"\n" + " },\n" + " \"eventData\": {\n" + " \"eventObject\": {\n" + " \"integerVal\": 1,\n" + " \"stringVal\": \"2\",\n" + " \"vecVal\": [\n" + " 1,\n" + " 2,\n" + " 3\n" + " ]\n" + " }\n" + " }\n" + " }\n" + "}", + _, + _, + _, + _, + _ + ) + ).WillOnce(Return(string())); + + ReportMessaging( + "test", + ReportIS::AudienceTeam::AGENT_CORE, + ReportIS::Severity::HIGH, + ReportIS::Priority::MEDIUM, + ReportObject(1, "2", {1, 2, 3}), + ReportIS::Tags::ACCESS_CONTROL + ); +} diff --git a/components/security_apps/CMakeLists.txt b/components/security_apps/CMakeLists.txt new file mode 100644 index 0000000..8717429 --- /dev/null +++ b/components/security_apps/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(orchestration) +add_subdirectory(waap) diff --git a/components/security_apps/orchestration/CMakeLists.txt b/components/security_apps/orchestration/CMakeLists.txt new file mode 100755 index 0000000..5f46818 --- /dev/null +++ b/components/security_apps/orchestration/CMakeLists.txt @@ -0,0 +1,17 @@ +add_definitions(-DUSERSPACE) + +include_directories(include) + +add_library(orchestration orchestration_comp.cc hybrid_mode_telemetry.cc) +add_subdirectory(orchestration_tools) +add_subdirectory(modules) +add_subdirectory(downloader) +add_subdirectory(service_controller) +add_subdirectory(package_handler) +add_subdirectory(manifest_controller) +add_subdirectory(update_communication) +add_subdirectory(details_resolver) +add_subdirectory(health_check) +add_subdirectory(k8s_policy_gen) + +add_subdirectory(orchestration_ut) diff --git a/components/security_apps/orchestration/details_resolver/CMakeLists.txt b/components/security_apps/orchestration/details_resolver/CMakeLists.txt new file mode 100644 index 0000000..3389d65 --- /dev/null +++ b/components/security_apps/orchestration/details_resolver/CMakeLists.txt @@ -0,0 +1,3 @@ +include_directories(details_resolver_handlers) + +add_library(details_resolver details_resolver.cc details_resolving_handler.cc) diff --git a/components/security_apps/orchestration/details_resolver/details_resolver.cc b/components/security_apps/orchestration/details_resolver/details_resolver.cc new file mode 100644 index 0000000..5c4e358 --- /dev/null +++ b/components/security_apps/orchestration/details_resolver/details_resolver.cc @@ -0,0 +1,288 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "details_resolver.h" + +#include +#include +#include + +#include "details_resolving_handler.h" +#include "i_orchestration_tools.h" +#include "maybe_res.h" +#include "version.h" +#include "config.h" + +using namespace std; + +USE_DEBUG_FLAG(D_ORCHESTRATOR); + +class DetailsResolver::Impl + : + Singleton::Provide::From +{ +public: + Maybe getHostname() override; + Maybe getPlatform() override; + Maybe getArch() override; + + map getResolvedDetails() override; + + string getAgentVersion() override; + bool isKernelVersion3OrHigher() override; + bool isGwNotVsx() override; + bool isVersionEqualOrAboveR8110() override; + bool isReverseProxy() override; + Maybe> parseNginxMetadata() override; +#if defined(gaia) || defined(smb) + bool compareCheckpointVersion(int cp_version, std::function compare_operator) const override; +#endif // gaia || smb + +private: +#if defined(gaia) || defined(smb) + int getCheckpointVersion() const; +#endif // gaia || smb + + DetailsResolvingHanlder handler; +}; + +map +DetailsResolver::Impl::getResolvedDetails() +{ + return handler.getResolvedDetails(); +} + +Maybe +DetailsResolver::Impl::getHostname() +{ +#if defined(arm32_musl) || defined(openwrt) + auto host_name = DetailsResolvingHanlder::getCommandOutput("uname -a | awk '{print $(2)}'"); +#else // not arm32_musl || openwrt + auto host_name = DetailsResolvingHanlder::getCommandOutput("hostname"); +#endif // defined(arm32_musl) || defined(openwrt) + if (!host_name.ok()) return genError("Failed to load host name, Error: " + host_name.getErr()); + return host_name; +} + +Maybe +DetailsResolver::Impl::getPlatform() +{ +#if defined(gaia) + return string("gaia"); +#elif defined(arm32_rpi) + return string("glibc"); +#elif defined(arm32_musl) + return string("musl"); +#elif defined(smb_mrv_v1) + return string("smb_mrv_v1"); +#elif defined(smb_sve_v2) + return string("smb_sve_v2"); +#elif defined(smb_thx_v3) + return string("smb_thx_v3"); +#elif defined(openwrt) + return string("uclibc"); +#elif defined(arm64_linaro) + return string("arm64_linaro"); +#elif defined(alpine) + return string("alpine"); +#elif defined(arm64_trustbox) + return string("arm64_trustbox"); +#elif defined(linux) + return string("linux"); +#else + return genError("Failed to load platform details"); +#endif +} + +Maybe +DetailsResolver::Impl::getArch() +{ +#if defined(arm32_rpi) || defined(arm32_musl) || defined(openwrt) + auto architecture = DetailsResolvingHanlder::getCommandOutput("uname -a | awk '{print $(NF -1) }'"); +#else // not arm32_rpi || arm32_musl || openwrt + auto architecture = DetailsResolvingHanlder::getCommandOutput("arch"); +#endif // defined(arm32_rpi) || defined(arm32_musl) || defined(openwrt) + if (!architecture.ok()) return genError("Failed to load platform architecture, Error: " + architecture.getErr()); + return architecture; +} + +string +DetailsResolver::Impl::getAgentVersion() +{ + return Version::getFullVersion(); +} + +bool +DetailsResolver::Impl::isReverseProxy() +{ +#if defined(gaia) || defined(smb) + auto is_reverse_proxy = DetailsResolvingHanlder::getCommandOutput("cpprod_util CPPROD_IsConfigured CPwaap"); + if (is_reverse_proxy.ok()) { + return is_reverse_proxy.unpack().front() == '1'; + } +#endif + return false; +} + +bool +DetailsResolver::Impl::isKernelVersion3OrHigher() +{ + static const string cmd = + "clish -c 'show version os kernel' | awk '{print $4}' " + "| cut -d '.' -f 1 | awk -F: '{ if ( $1 >= 3 ) {print 1} else {print 0}}'"; + + auto is_gogo = DetailsResolvingHanlder::getCommandOutput(cmd); + if (is_gogo.ok()) { + return is_gogo.unpack().front() == '1'; + } + return false; +} + +bool +DetailsResolver::Impl::isGwNotVsx() +{ + static const string is_gw_cmd = "cpprod_util FwIsFirewallModule"; + static const string is_vsx_cmd = "cpprod_util FWisVSX"; + auto is_gw = DetailsResolvingHanlder::getCommandOutput(is_gw_cmd); + auto is_vsx = DetailsResolvingHanlder::getCommandOutput(is_vsx_cmd); + if (is_gw.ok() && is_vsx.ok()) { + return is_gw.unpack().front() == '1' && is_vsx.unpack().front() == '0'; + } + return false; +} + +#if defined(gaia) || defined(smb) +bool +DetailsResolver::Impl::compareCheckpointVersion(int cp_version, std::function compare_operator) const +{ + int curr_version = getCheckpointVersion(); + return compare_operator(curr_version, cp_version); +} + +int +DetailsResolver::Impl::getCheckpointVersion() const +{ +#ifdef gaia + static const string cmd = + "echo $CPDIR | awk -F'-' '{print $NF}' | cut -c 2- |" + " awk -F'.' '{ if( NF == 1 ) {print $1\"00\"} else {print $1$2} }'"; +#else // smb + static const string cmd = "sqlcmd 'select major,minor from cpver' |" + "awk '{if ($1 == \"major\") v += (substr($3,2) * 100);" + " if ($1 == \"minor\") v += $3; } END { print v}'"; + +#endif // gaia + auto version_out = DetailsResolvingHanlder::getCommandOutput(cmd); + int cp_version = 0; + if (version_out.ok()) { + stringstream version_stream(version_out.unpack()); + version_stream >> cp_version; + } + return cp_version; +} +#endif // gaia || smb + +bool +DetailsResolver::Impl::isVersionEqualOrAboveR8110() +{ +#if defined(gaia) || defined(smb) + return compareCheckpointVersion(8110, std::greater_equal()); +#endif + return false; +} + +Maybe> +DetailsResolver::Impl::parseNginxMetadata() +{ + auto output_path = getConfigurationWithDefault( + "/tmp/nginx_meta_data.txt", + "orchestration", + "Nginx metadata temp file" + ); + const string srcipt_exe_cmd = + getFilesystemPathConfig() + + "/scripts/cp-nano-makefile-generator.sh -f -o " + + output_path; + + dbgTrace(D_ORCHESTRATOR) << "Details resolver, srcipt exe cmd: " << srcipt_exe_cmd; + auto is_nginx_exist = DetailsResolvingHanlder::getCommandOutput("which nginx"); + if (!is_nginx_exist.ok() || is_nginx_exist.unpack().size() == 0) { + return genError("Nginx isn't installed"); + } + + auto script_output = DetailsResolvingHanlder::getCommandOutput(srcipt_exe_cmd); + if (!script_output.ok()) { + return genError("Failed to generate nginx metadata, Error: " + script_output.getErr()); + } + + I_OrchestrationTools *orchestration_tools = Singleton::Consume::by(); + if (!orchestration_tools->doesFileExist(output_path)) { + return genError("Failed to access nginx metadata file."); + } + + vector lines; + try { + ifstream input_stream(output_path); + if (!input_stream) { + return genError("Cannot open the file with nginx metadata, File: " + output_path); + } + + string line; + while (getline(input_stream, line)) { + lines.push_back(line); + } + input_stream.close(); + + orchestration_tools->removeFile(output_path); + } catch (const ifstream::failure &exception) { + dbgWarning(D_ORCHESTRATOR) + << "Cannot read the file with required nginx metadata." + << " File: " << output_path + << " Error: " << exception.what(); + } + + if (lines.size() == 0) return genError("Failed to read nginx metadata file"); + string nginx_version; + string config_opt; + string cc_opt; + + for(string &line : lines) { + if (line.size() == 0) continue; + if (line.find("RELEASE_VERSION") != string::npos) continue; + if (line.find("--with-cc=") != string::npos) continue; + if (line.find("NGINX_VERSION") != string::npos) { + auto eq_index = line.find("="); + nginx_version = "nginx-" + line.substr(eq_index + 1); + continue; + } + if (line.find("EXTRA_CC_OPT") != string::npos) { + auto eq_index = line.find("="); + cc_opt = line.substr(eq_index + 1); + continue; + } + if (line.find("CONFIGURE_OPT") != string::npos) continue; + if (line.back() == '\\') line.pop_back(); + config_opt += line; + } + return make_tuple(config_opt, cc_opt, nginx_version); +} + +DetailsResolver::DetailsResolver() : Component("DetailsResolver"), pimpl(make_unique()) {} + +DetailsResolver::~DetailsResolver() {} + +void +DetailsResolver::preload() +{ + registerExpectedConfiguration("orchestration", "Details resolver time out"); +} diff --git a/components/security_apps/orchestration/details_resolver/details_resolver_handlers/checkpoint_product_handlers.h b/components/security_apps/orchestration/details_resolver/details_resolver_handlers/checkpoint_product_handlers.h new file mode 100755 index 0000000..d66055c --- /dev/null +++ b/components/security_apps/orchestration/details_resolver/details_resolver_handlers/checkpoint_product_handlers.h @@ -0,0 +1,183 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __CHECKPOINT_PRODUCT_HANDLERS_H__ +#define __CHECKPOINT_PRODUCT_HANDLERS_H__ + +#include +#include + +#if defined(gaia) || defined(smb) +Maybe +checkHasSDWan(const string &command_output) +{ + if (command_output.front() == '1') return string("true"); + + return genError("Current host does not have SDWAN capability"); +} + +Maybe +getMgmtObjType(const string &command_output) +{ + if (!command_output.empty()) { + if (command_output[0] == '1') return string("management"); + if (command_output[0] == '0') return string("gateway"); + } + + return genError("Object type was not found"); +} + +Maybe +chopHeadAndTail(const string &str, const string &prefix, const string &suffix) +{ + if (str.size() < prefix.size() + suffix.size()) return genError("String too short"); + if (str.compare(0, prefix.size(), prefix)) return genError("Prefix mismatch"); + if (str.compare(str.size() - suffix.size(), suffix.size(), suffix)) return genError("Suffix mismatch"); + + return str.substr(prefix.size(), str.size() - prefix.size() - suffix.size()); +} + +Maybe +getMgmtObjAttr(shared_ptr file_stream, const string &attr) +{ + string line; + while (getline(*file_stream, line)) { + size_t attr_pos = line.find(attr); + if (attr_pos == string::npos) continue; + line = line.substr(attr_pos + attr.size()); + return chopHeadAndTail(line, "(", ")"); + } + return genError("Object attribute was not found. Attr: " + attr); +} + +Maybe +getMgmtObjUid(shared_ptr file_stream) +{ + return getMgmtObjAttr(file_stream, "uuid "); +} + +Maybe +getMgmtObjName(shared_ptr file_stream) +{ + return getMgmtObjAttr(file_stream, "name "); +} + +Maybe +getMgmtParentObjAttr(shared_ptr file_stream, const string &parent_obj, const string &attr) +{ + string line; + bool found_parent_obj = false; + while (getline(*file_stream, line)) { + size_t parent_obj_pos = line.find(parent_obj); + if (parent_obj_pos != string::npos) found_parent_obj = true; + if (!found_parent_obj) continue; + + size_t attr_pos = line.find(attr); + if (attr_pos == string::npos) continue; + line = line.substr(attr_pos + attr.size()); + return line; + } + return genError("Parent object attribute was not found. Attr: " + attr); +} + +Maybe +getMgmtParentObjUid(shared_ptr file_stream) +{ + auto maybe_unparsed_uid = getMgmtParentObjAttr(file_stream, "cluster_object", "Uid "); + if (!maybe_unparsed_uid.ok()) { + return maybe_unparsed_uid; + } + const string &unparsed_uid = maybe_unparsed_uid.unpack(); + auto maybe_uid = chopHeadAndTail(unparsed_uid, "(\"{", "}\")"); + if (!maybe_uid.ok()) { + return maybe_uid; + } + string uid = maybe_uid.unpack(); + transform(uid.begin(), uid.end(), uid.begin(), ::tolower); + return uid; +} + +Maybe +getMgmtParentObjName(shared_ptr file_stream) +{ + auto maybe_unparsed_name = getMgmtParentObjAttr(file_stream, "cluster_object", "Name "); + if (!maybe_unparsed_name.ok()) { + return maybe_unparsed_name; + } + const string &unparsed_name = maybe_unparsed_name.unpack(); + return chopHeadAndTail(unparsed_name, "(", ")"); +} +#endif // gaia || smb + +Maybe +getOsRelease(shared_ptr file_stream) +{ + string line; + while (getline(*file_stream, line)) { + if (line.find("Check Point") != string::npos) return line; + + static const string prety_name_attr = "PRETTY_NAME="; + size_t pretty_name_idx = line.find(prety_name_attr); + if (pretty_name_idx == string::npos) continue; + line = line.substr(pretty_name_idx + prety_name_attr.size()); + if (line.front() == '"') line.erase(0, 1); + if (line.back() == '"') line.pop_back(); + return line; + } + + return genError("Os release was not found"); +} + +#if defined(alpine) +string & +ltrim(string &s) +{ + auto it = find_if( + s.begin(), + s.end(), + [](char c) { return !isspace(c, locale::classic()); } + ); + s.erase(s.begin(), it); + return s; +} + +string & +rtrim(string &s) +{ + auto it = find_if( + s.rbegin(), + s.rend(), + [](char c) { return !isspace(c, locale::classic()); } + ); + s.erase(it.base(), s.end()); + return s; +} + +string & +trim(string &s) +{ + return ltrim(rtrim(s)); +} + +Maybe +getCPAlpineTag(shared_ptr file_stream) +{ + string line; + while (getline(*file_stream, line)) { + if (trim(line) != "") return line; + } + return genError("Alpine tag was not found"); +} +#endif // alpine + +#endif // __CHECKPOINT_PRODUCT_HANDLERS_H__ diff --git a/components/security_apps/orchestration/details_resolver/details_resolver_handlers/details_resolver_impl.h b/components/security_apps/orchestration/details_resolver/details_resolver_handlers/details_resolver_impl.h new file mode 100755 index 0000000..97df217 --- /dev/null +++ b/components/security_apps/orchestration/details_resolver/details_resolver_handlers/details_resolver_impl.h @@ -0,0 +1,72 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __DETAILS_RESOLVER_HANDLER_CC__ +#error details_resolver_handlers/details_resolver_impl.h should not be included directly. +#endif // __DETAILS_RESOLVER_HANDLER_CC__ + +// use SHELL_CMD_HANDLER(key as string, shell command as string, ptr to Maybe handler(const string&)) +// to return a string value for an attribute key based on a logic executed in a handler that receives +// shell command execution output as its input +#ifdef SHELL_CMD_HANDLER + +#if defined(gaia) || defined(smb) +SHELL_CMD_HANDLER("cpProductIntegrationMgmtObjectType", "cpprod_util CPPROD_IsMgmtMachine", getMgmtObjType) +SHELL_CMD_HANDLER("hasSDWan", "[ -f $FWDIR/bin/sdwan_steering ] && echo '1' || echo '0'", checkHasSDWan) +#endif //gaia + +#endif // SHELL_CMD_HANDLER + + +// use SHELL_CMD_OUTPUT(key as string, shell command as string) to return a shell command output as the value +// for a given key +#ifdef SHELL_CMD_OUTPUT +SHELL_CMD_OUTPUT("kernel_version", "uname -r") +SHELL_CMD_OUTPUT("helloWorld", "cat /tmp/agentHelloWorld 2>/dev/null") +#endif // SHELL_CMD_OUTPUT + + +// use FILE_CONTENT_HANDLER(key as string, path to file as string, ptr to Maybe handler(ifstream&)) +// to return a string value for an attribute key based on a logic executed in a handler that receives file as input +#ifdef FILE_CONTENT_HANDLER + +#if defined(alpine) +FILE_CONTENT_HANDLER("alpine_tag", "/usr/share/build/cp-alpine-tag", getCPAlpineTag) +#endif // alpine +#if defined(gaia) || defined(smb) +FILE_CONTENT_HANDLER("os_release", "/etc/cp-release", getOsRelease) +FILE_CONTENT_HANDLER( + "cpProductIntegrationMgmtObjectUid", + (getenv("FWDIR") ? string(getenv("FWDIR")) : "") + "/database/myown.C", + getMgmtObjUid +) +FILE_CONTENT_HANDLER( + "cpProductIntegrationMgmtObjectName", + (getenv("FWDIR") ? string(getenv("FWDIR")) : "") + "/database/myown.C", + getMgmtObjName +) +FILE_CONTENT_HANDLER( + "cpProductIntegrationMgmtParentObjectUid", + (getenv("FWDIR") ? string(getenv("FWDIR")) : "") + "/database/myself_objects.C", + getMgmtParentObjUid +) +FILE_CONTENT_HANDLER( + "cpProductIntegrationMgmtParentObjectName", + (getenv("FWDIR") ? string(getenv("FWDIR")) : "") + "/database/myself_objects.C", + getMgmtParentObjName +) +#else // !(gaia || smb) +FILE_CONTENT_HANDLER("os_release", "/etc/os-release", getOsRelease) +#endif // gaia || smb + +#endif // FILE_CONTENT_HANDLER diff --git a/components/security_apps/orchestration/details_resolver/details_resolving_handler.cc b/components/security_apps/orchestration/details_resolver/details_resolving_handler.cc new file mode 100755 index 0000000..f525f15 --- /dev/null +++ b/components/security_apps/orchestration/details_resolver/details_resolving_handler.cc @@ -0,0 +1,128 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "details_resolving_handler.h" + +#include +#include +#include +#include +#include + +#include "maybe_res.h" +#include "enum_array.h" +#include "i_shell_cmd.h" +#include "config.h" + +using namespace std; + +USE_DEBUG_FLAG(D_AGENT_DETAILS); + +using ShellCommandHandler = function(const string &raw_otput)>; +using FileContentHandler = function(shared_ptr file_otput)>; + +#define __DETAILS_RESOLVER_HANDLER_CC__ + +#include "checkpoint_product_handlers.h" + +class DetailsResolvingHanlder::Impl +{ +public: + map getResolvedDetails() const; + static Maybe getCommandOutput(const string &cmd); + +private: +#define SHELL_CMD_OUTPUT(ATTRIBUTE, COMMAND) SHELL_CMD_HANDLER(ATTRIBUTE, COMMAND, [](const string &s) { return s; }) +#define SHELL_CMD_HANDLER(ATTRIBUTE, COMMAND, HANDLER) {ATTRIBUTE, {COMMAND, ShellCommandHandler(HANDLER)}}, + map> shell_command_handlers = { + #include "details_resolver_impl.h" + }; +#undef SHELL_CMD_OUTPUT +#undef SHELL_CMD_HANDLER + +#define FILE_CONTENT_HANDLER(ATTRIBUTE, FILE, HANDLER) {ATTRIBUTE, {FILE, FileContentHandler(HANDLER)}}, + map> file_content_handlers = { + #include "details_resolver_impl.h" + }; +#undef FILE_CONTENT_HANDLER +}; + +map +DetailsResolvingHanlder::Impl::getResolvedDetails() const +{ + map resolved_details; + for (auto shell_handler : shell_command_handlers) { + const string &attr = shell_handler.first; + const string &command = shell_handler.second.first; + ShellCommandHandler handler = shell_handler.second.second; + + Maybe shell_command_output = getCommandOutput(command); + if (!shell_command_output.ok()) continue; + Maybe handler_ret = handler(*shell_command_output); + if (handler_ret.ok()) resolved_details[attr] = *handler_ret; + } + + for (auto file_handler : file_content_handlers) { + const string &attr = file_handler.first; + const string &path = file_handler.second.first; + FileContentHandler handler = file_handler.second.second; + + shared_ptr in_file = make_shared(path); + if (!in_file->is_open()) { + dbgWarning(D_AGENT_DETAILS) << "Could not open file for processing. Path: " << path; + continue; + } + + dbgDebug(D_AGENT_DETAILS) << "Successfully opened file for processing. Path: " << path; + if (in_file->peek() != ifstream::traits_type::eof()) { + Maybe handler_ret = handler(in_file); + if (handler_ret.ok()) resolved_details[attr] = *handler_ret; + } + in_file->close(); + } + + I_AgentDetailsReporter *reporter = Singleton::Consume::by(); + reporter->addAttr(resolved_details); + + return resolved_details; +} + +Maybe +DetailsResolvingHanlder::Impl::getCommandOutput(const string &cmd) +{ + I_ShellCmd *shell = Singleton::Consume::by(); + uint32_t timeout = getConfigurationWithDefault(5000, "orchestration", "Details resolver time out"); + auto result = shell->getExecOutput(cmd, timeout); + if (!result.ok()) return result; + + auto unpacked_result = result.unpack(); + if (unpacked_result.back() == '\n') unpacked_result.pop_back(); + + return unpacked_result; +} + +DetailsResolvingHanlder::DetailsResolvingHanlder() : pimpl(make_unique()) {} +DetailsResolvingHanlder::~DetailsResolvingHanlder() {} + + +map +DetailsResolvingHanlder::getResolvedDetails() const +{ + return pimpl->getResolvedDetails(); +} + +Maybe +DetailsResolvingHanlder::getCommandOutput(const string &cmd) +{ + return DetailsResolvingHanlder::Impl::getCommandOutput(cmd); +} diff --git a/components/security_apps/orchestration/details_resolver/details_resolving_handler.h b/components/security_apps/orchestration/details_resolver/details_resolving_handler.h new file mode 100755 index 0000000..7ffe122 --- /dev/null +++ b/components/security_apps/orchestration/details_resolver/details_resolving_handler.h @@ -0,0 +1,41 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __DETAILS_RESOLVING_HANDLER_H__ +#define __DETAILS_RESOLVING_HANDLER_H__ + +#include +#include + +#include "i_shell_cmd.h" +#include "i_agent_details_reporter.h" + +class DetailsResolvingHanlder + : + Singleton::Consume, + Singleton::Consume +{ +public: + DetailsResolvingHanlder(); + ~DetailsResolvingHanlder(); + + std::map getResolvedDetails() const; + + static Maybe getCommandOutput(const std::string &cmd); + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __DETAILS_RESOLVING_HANDLER_H__ diff --git a/components/security_apps/orchestration/downloader/CMakeLists.txt b/components/security_apps/orchestration/downloader/CMakeLists.txt new file mode 100755 index 0000000..b7b14b9 --- /dev/null +++ b/components/security_apps/orchestration/downloader/CMakeLists.txt @@ -0,0 +1,5 @@ +ADD_DEFINITIONS(-Wno-deprecated-declarations -Dalpine) + +add_library(orchestration_downloader curl_client.cc downloader.cc http_client.cc https_client.cc) + +add_subdirectory(downloader_ut) diff --git a/components/security_apps/orchestration/downloader/curl_client.cc b/components/security_apps/orchestration/downloader/curl_client.cc new file mode 100755 index 0000000..270e5ca --- /dev/null +++ b/components/security_apps/orchestration/downloader/curl_client.cc @@ -0,0 +1,438 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "curl_client.h" + +#include +#if defined(alpine) +#include +#else +#include +#endif //ifdef alpine +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "config.h" +#include "url_parser.h" +#include "debug.h" +#include "sasal.h" +#include "scope_exit.h" + +USE_DEBUG_FLAG(D_HTTP_REQUEST); + +using namespace std; + +SASAL_START // Orchestration - Communication +// LCOV_EXCL_START Reason: Depends on real download server. + +class CurlGlobalInit +{ +public: + CurlGlobalInit() { curl_global_init(CURL_GLOBAL_DEFAULT); } + ~CurlGlobalInit() { curl_global_cleanup(); } +}; +static CurlGlobalInit global_curl_handle; + +HttpCurl::HttpCurl( + const URLParser &_url, + ofstream &_out_file, + const string &_bearer, + const Maybe &proxy_url, + const Maybe &proxy_port, + const Maybe &proxy_auth) + : + url(_url), + out_file(_out_file), + bearer(_bearer), + curl(unique_ptr>(curl_easy_init(), curl_easy_cleanup)) +{ + string port = url.getPort(); + if (!port.empty()) + { + curl_url = url.getBaseURL().unpack() + ":" + port + url.getQuery(); + } else + { + curl_url = url.getBaseURL().unpack() + url.getQuery(); + } + + if (proxy_url.ok()) + { + //Update curl proxy + if (!proxy_port.ok()) + { + dbgWarning(D_HTTP_REQUEST) + << "Invalid proxy port, CURL default port will be used instead. Error: " + << proxy_port.getErr(); + proxy = proxy_url.unpack(); + } else + { + proxy = proxy_url.unpack() + ":" + to_string(proxy_port.unpack()); + } + } + if (proxy_auth.ok()) + { + I_Encryptor *encryptor = Singleton::Consume::by(); + proxy_credentials = "Proxy-Authorization: Basic " + encryptor->base64Encode(proxy_auth.unpack()); + } +} + +HttpCurl::HttpCurl(const HttpCurl &other) + : + url(other.url), + out_file(other.out_file), + bearer(other.bearer), + proxy(other.proxy), + proxy_credentials(other.proxy_credentials), + curl(unique_ptr>(curl_easy_init(), curl_easy_cleanup)) + { + } + +void +HttpCurl::setCurlOpts(long timeout, HTTP_VERSION http_version) +{ + struct curl_slist *headers = NULL; + struct curl_slist *proxy_headers = NULL; + CURL *curl_handle = curl.get(); + + //HTTP options + curl_easy_setopt(curl_handle, CURLOPT_HTTP_VERSION, http_version); + + //SSL options + curl_easy_setopt(curl_handle, CURLOPT_SSL_VERIFYPEER, 0L); + + //Header options + curl_easy_setopt(curl_handle, CURLOPT_URL, curl_url.c_str()); + curl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION, writeResponseCallback); + curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, &out_file); + curl_easy_setopt(curl_handle, CURLOPT_TIMEOUT, timeout); + headers = curl_slist_append(headers, "Accept: */*"); + string auth = string("Authorization: Bearer ") + bearer; + headers = curl_slist_append(headers, auth.c_str()); + headers = curl_slist_append(headers, "User-Agent: Infinity Next (a7030abf93a4c13)"); + string uuid_header = string("X-Trace-Id: ") + TraceIdGenerator::generateTraceId(); + headers = curl_slist_append(headers, "Connection: close"); + headers = curl_slist_append(headers, uuid_header.c_str()); + + //Proxy options + if (!proxy.empty()) + { + curl_easy_setopt(curl_handle, CURLOPT_PROXY, proxy.c_str()); + if (!proxy_credentials.empty()) + { + proxy_headers = curl_slist_append(proxy_headers, proxy_credentials.c_str()); + //Apply proxy headers + curl_easy_setopt(curl_handle, CURLOPT_PROXYHEADER, proxy_headers); + } + dbgTrace(D_HTTP_REQUEST) << "Using Proxy: " << proxy; + } + + //Apply headers + curl_easy_setopt(curl_handle, CURLOPT_HTTPHEADER, headers); +} + +bool +HttpCurl::connect() +{ + // Response information. + long http_code; + char errorstr[CURL_ERROR_SIZE]; + CURLcode res; + stringstream response_header; + + CURL *curl_handle = curl.get(); + + auto __scope_exit = make_scope_exit( + [this] () { + out_file.flush(); + out_file.close(); + } + ); + + //Debug options + curl_easy_setopt(curl_handle, CURLOPT_VERBOSE, 1L); + curl_easy_setopt(curl_handle, CURLOPT_DEBUGFUNCTION, trace_http_request); + curl_easy_setopt(curl_handle, CURLOPT_DEBUGDATA, static_cast(&response_header)); + curl_easy_setopt(curl_handle, CURLOPT_ERRORBUFFER, errorstr); + + // Perform the request, res will get the return code + res = curl_easy_perform(curl_handle); + if (res != CURLE_OK) { + dbgWarning(D_HTTP_REQUEST) << "Failed to perform CURL request. CURL error " << string(errorstr); + dbgWarning(D_HTTP_REQUEST) << "CURL result " + string(curl_easy_strerror(res)); + print_response_header(response_header); + + return false; + } + + curl_easy_getinfo(curl_handle, CURLINFO_RESPONSE_CODE, &http_code); + + if (http_code != 200){ + dbgWarning(D_HTTP_REQUEST) << "Failed to connect. Error code: " + to_string(http_code); + print_response_header(response_header); + + return false; + } + + dbgTrace(D_HTTP_REQUEST) << "CURL HTTP request successfully completed."; + + return true; +} + +int +HttpCurl::trace_http_request( + CURL *, + curl_infotype type, + char *data, + size_t, + void *opq) +{ + stringstream *response_header = static_cast(opq); + switch (type) + { + case CURLINFO_HEADER_OUT: + dbgTrace(D_HTTP_REQUEST) + << "=> Sending the following HTTP request:\n" + << string(data); + break; + case CURLINFO_HEADER_IN: + if (!response_header) + { + // Should never reach this if. But just in case. + // The data will be printed at chunks in this case + dbgError(D_HTTP_REQUEST) + << "<= Received the following HTTP response header (should not reach here):\n" + << string(data); + } else + { + // The response header Will be printed at once later after curl_easy_perform. + // And after assembling all the response header chunks. + *response_header << string(data); + } + break; + default: + return 0; + } + + return 0; +} + +u_int +HttpCurl::writeResponseCallback( + const char *in_buf, + uint num_of_messages, + uint size_of_data, + ostream out_stream) +{ + const unsigned long total_bytes(num_of_messages * size_of_data); + out_stream.write(in_buf, total_bytes); + return total_bytes; +} + +void +HttpCurl::print_response_header(stringstream &stream) +{ + string line; + istringstream header_stream(stream.str()); + stringstream header_lines; + int lines_to_print = 10; + int i = 0; + + while (getline(header_stream, line) && i < lines_to_print) { + header_lines << line << '\n'; + ++i; + } + + dbgWarning(D_HTTP_REQUEST) + << "<= Received the following HTTP response header:\n" + << header_lines.str(); +} + +HttpsCurl::HttpsCurl( + const URLParser &_url, + ofstream &_out_file, + const string &_bearer, + const Maybe &proxy_url, + const Maybe &proxy_port, + const Maybe &proxy_auth, + const string &_ca_path) : + HttpCurl(_url, _out_file, _bearer, proxy_url, proxy_port, proxy_auth), + ca_path(_ca_path) {} + +HttpsCurl::HttpsCurl(const HttpsCurl &other) : + HttpCurl(other), + ca_path(other.ca_path) {} + +void +HttpsCurl::setCurlOpts(long timeout, HTTP_VERSION http_version) +{ + struct curl_slist *headers = NULL; + struct curl_slist *proxy_headers = NULL; + CURL *curl_handle = curl.get(); + + URLProtocol protocol = url.getProtocol(); + if (protocol == URLProtocol::HTTPS) + { + if (curl_url.find("https://") == string::npos) + { + //Append https:// + curl_url = "https://" + curl_url; + } + } + + //HTTP options + curl_easy_setopt(curl_handle, CURLOPT_HTTP_VERSION, http_version); + + //SSL options + if (getProfileAgentSettingWithDefault( + false, + "agent.config.message.ignoreSslValidation") == false) + { + curl_easy_setopt(curl_handle, CURLOPT_SSL_VERIFYPEER, 1L); + curl_easy_setopt(curl_handle, CURLOPT_SSL_CTX_FUNCTION, ssl_ctx_verify_certificate); + curl_easy_setopt(curl_handle, CURLOPT_SSL_CTX_DATA, static_cast(&bearer)); + } else + { + curl_easy_setopt(curl_handle, CURLOPT_SSL_VERIFYPEER, 0L); + dbgWarning(D_HTTP_REQUEST) << "Ignoring SSL validation"; + } + + //Header options + curl_easy_setopt(curl_handle, CURLOPT_URL, curl_url.c_str()); + curl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION, writeResponseCallback); + curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, &out_file); + curl_easy_setopt(curl_handle, CURLOPT_TIMEOUT, timeout); + curl_easy_setopt(curl_handle, CURLOPT_CAINFO, ca_path.c_str()); + headers = curl_slist_append(headers, "Accept: */*"); + string auth = string("Authorization: Bearer ") + bearer; + headers = curl_slist_append(headers, auth.c_str()); + headers = curl_slist_append(headers, "User-Agent: Infinity Next (a7030abf93a4c13)"); + string uuid_header = string("X-Trace-Id: ") + TraceIdGenerator::generateTraceId(); + headers = curl_slist_append(headers, "Connection: close"); + headers = curl_slist_append(headers, uuid_header.c_str()); + + // Proxy options + if (!proxy.empty()) + { + curl_easy_setopt(curl_handle, CURLOPT_PROXY, proxy.c_str()); + if (!proxy_credentials.empty()) + { + proxy_headers = curl_slist_append(proxy_headers, proxy_credentials.c_str()); + //Apply proxy headers + curl_easy_setopt(curl_handle, CURLOPT_PROXYHEADER, proxy_headers); + } + dbgTrace(D_HTTP_REQUEST) << "Using Proxy : " << proxy; + } + + //Apply headers + curl_easy_setopt(curl_handle, CURLOPT_HTTPHEADER, headers); +} + +int +HttpsCurl::verify_certificate(int preverify_ok, X509_STORE_CTX *ctx) +{ + switch (X509_STORE_CTX_get_error(ctx)) + { + case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT: + dbgWarning(D_HTTP_REQUEST) << "SSL verification error: X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT"; + break; + case X509_V_ERR_CERT_NOT_YET_VALID: + case X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD: + dbgWarning(D_HTTP_REQUEST) << "SSL verification error: Certificate not yet valid"; + break; + case X509_V_ERR_CERT_HAS_EXPIRED: + case X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD: + dbgWarning(D_HTTP_REQUEST) << "Certificate expired"; + break; + case X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT: + case X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN: + dbgDebug(D_HTTP_REQUEST) << "Self signed certificate in chain"; + if (getConfigurationWithDefault(false, "orchestration", "Self signed certificates acceptable")) { + preverify_ok = true; + } + break; + default: + if (!preverify_ok) { + dbgWarning(D_HTTP_REQUEST) + << "Certificate verification error number: " + << X509_STORE_CTX_get_error(ctx); + } + break; + } + + return preverify_ok; +} + +CURLcode +HttpsCurl::ssl_ctx_verify_certificate(CURL *, void *sslctx, void *opq) +{ + SSL_CTX *ctx = (SSL_CTX *) sslctx; + string *token_ptr = static_cast(opq); + if(!token_ptr) + { + dbgWarning(D_HTTP_REQUEST) << "Invalid token (bearer) was received"; + return CURLE_BAD_FUNCTION_ARGUMENT; + } + string token = *token_ptr; + + if (token.empty()) + { + return CURLE_OK; + } + + SSL_CTX_set_verify( + ctx, + SSL_VERIFY_FAIL_IF_NO_PEER_CERT | SSL_VERIFY_PEER, + verify_certificate + ); + + return CURLE_OK; +} + +string +TraceIdGenerator::generateRandomString(uint length) +{ + string dst(length, 0); + static thread_local mt19937 range(random_device{}()); + + auto randchar = [&]() -> char + { + static const char charset[] = "0123456789abcdefghijklmnopqrstuvwxyz"; + static const size_t size = (sizeof(charset) - 1); + return charset[ range() % size ]; + }; + + generate_n(dst.begin(), length, randchar); + + return dst; +} + +string +TraceIdGenerator::generateTraceId() +{ + string part1 = generateRandomString(8); + string part2 = generateRandomString(4); + string part3 = generateRandomString(4); + string part4 = generateRandomString(4); + string part5 = generateRandomString(12); + return string(part1 + "-" + part2 + "-" + part3 + "-" + part4 + "-" + part5); +} + +SASAL_END diff --git a/components/security_apps/orchestration/downloader/curl_client.h b/components/security_apps/orchestration/downloader/curl_client.h new file mode 100755 index 0000000..14b78cb --- /dev/null +++ b/components/security_apps/orchestration/downloader/curl_client.h @@ -0,0 +1,115 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#if defined(alpine) +#include +#else +#include +#endif //ifdef alpine + +#include +#include +#include +#include +#include + +#include "i_encryptor.h" +#include "scope_exit.h" +#include "url_parser.h" +#include "sasal.h" + +USE_DEBUG_FLAG(D_HTTP_REQUEST); + +SASAL_START // Orchestration - Communication +// LCOV_EXCL_START Reason: Depends on real download server. + +enum class HTTP_VERSION +{ + HTTP_VERSION_NONE = CURL_HTTP_VERSION_NONE, //libcurl will use whatever it thinks fit. + HTTP_VERSION_1_0 = CURL_HTTP_VERSION_1_0, + HTTP_VERSION_1_1 = CURL_HTTP_VERSION_1_1, + HTTP_VERSION_2_0 = CURL_HTTP_VERSION_2_0 +}; + +class TraceIdGenerator +{ +public: + static std::string generateTraceId(); +private: + static std::string generateRandomString(uint length); +}; + +class HttpCurl : public Singleton::Consume +{ +public: + HttpCurl( + const URLParser &_url, + std::ofstream &_out_file, + const std::string &_bearer, + const Maybe &proxy_url, + const Maybe &proxy_port, + const Maybe &proxy_auth); + + HttpCurl(const HttpCurl &other); + + virtual void setCurlOpts(long timeout = 60L, HTTP_VERSION http_version = HTTP_VERSION::HTTP_VERSION_1_1); + virtual bool connect(); + +protected: + static int trace_http_request( + CURL *handle, + curl_infotype type, + char *data, + size_t size, + void *userptr); + static u_int writeResponseCallback( + const char *in_buf, + uint num_of_messages, + uint size_of_data, + std::ostream out_stream); + void print_response_header(std::stringstream &stream); + + const URLParser& url; + std::ofstream &out_file; + std::string bearer; + std::string proxy; + std::string proxy_credentials; + std::unique_ptr> curl; + std::string curl_url; +}; + +class HttpsCurl : public HttpCurl +{ +public: + HttpsCurl( + const URLParser &_url, + std::ofstream &_out_file, + const std::string &_bearer, + const Maybe &proxy_url, + const Maybe &proxy_port, + const Maybe &proxy_auth, + const std::string &_ca_path); + + HttpsCurl(const HttpsCurl& other); + + static CURLcode ssl_ctx_verify_certificate(CURL *curl, void *ssl_ctx, void *opq); + static int verify_certificate(int preverify_ok, X509_STORE_CTX *ctx); + void setCurlOpts(long timeout = 60L, HTTP_VERSION http_version = HTTP_VERSION::HTTP_VERSION_1_1) override; + +private: + std::string ca_path; +}; + +SASAL_END diff --git a/components/security_apps/orchestration/downloader/downloader.cc b/components/security_apps/orchestration/downloader/downloader.cc new file mode 100755 index 0000000..3c8e551 --- /dev/null +++ b/components/security_apps/orchestration/downloader/downloader.cc @@ -0,0 +1,387 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "downloader.h" + +#include "i_orchestration_tools.h" +#include "singleton.h" +#include "http_client.h" +#include "debug.h" +#include "config.h" +#include "rest.h" +#include "sasal.h" +#include "cereal/external/rapidjson/document.h" + +#include + +using namespace std; +using namespace rapidjson; + +SASAL_START // Orchestration - Communication + +USE_DEBUG_FLAG(D_ORCHESTRATOR); + +class Downloader::Impl : Singleton::Provide::From +{ +public: + void init(); + + Maybe downloadFileFromFog( + const string &checksum, + Package::ChecksumTypes checksum_type, + const GetResourceFile &resourse_file + ) const override; + + Maybe> downloadVirtualFileFromFog( + const GetResourceFile &resourse_file, + Package::ChecksumTypes checksum_type + ) const override; + + Maybe downloadFileFromURL( + const string &url, + const string &checksum, + Package::ChecksumTypes checksum_type, + const string &service_name + ) const override; + +private: + Maybe downloadFileFromFogByHTTP( + const GetResourceFile &resourse_file, + const string &file_name + ) const; + + Maybe validateChecksum( + const string &checksum, + Package::ChecksumTypes checksum_type, + Maybe &file_path + ) const; + + Maybe getFileFromExternalURL( + const URLParser &url, + const string &file_name, + bool auth_required + ) const; + Maybe getFileFromLocal(const string &local_file_path, const string &file_name) const; + Maybe getFileFromURL(const URLParser &url, const string &file_name, bool auth_required) const; + + tuple splitQuery(const string &query) const; + string vectorToPath(const vector &vec) const; + string dir_path; +}; + +void +Downloader::Impl::init() +{ + dir_path = getConfigurationWithDefault( + "/tmp/orchestration_downloads", + "orchestration", + "Default file download path" + ); + + Singleton::Consume::by()->createDirectory(dir_path); +} + +Maybe +Downloader::Impl::downloadFileFromFog( + const string &checksum, + Package::ChecksumTypes checksum_type, + const GetResourceFile &resourse_file) const +{ + auto file_path = downloadFileFromFogByHTTP(resourse_file, resourse_file.getFileName() + ".download"); + + if (!file_path.ok()) { + return file_path; + } + + auto checksum_validation = validateChecksum(checksum, checksum_type, file_path); + if (!checksum_validation.ok()) return checksum_validation; + + I_OrchestrationTools *orchestration_tools = Singleton::Consume::by(); + if (!orchestration_tools->isNonEmptyFile(file_path.unpack())) { + return genError("Failed to download file " + resourse_file.getFileName()); + } + + return file_path; +} + +Maybe> +Downloader::Impl::downloadVirtualFileFromFog( + const GetResourceFile &resourse_file, + Package::ChecksumTypes) const +{ + static const string tenand_id_key = "tenantId"; + static const string policy_key = "policy"; + static const string settings_key = "settings"; + static const string tenants_key = "tenants"; + static const string error_text = "error"; + + map res; + I_UpdateCommunication *update_communication = Singleton::Consume::by(); + auto downloaded_data = update_communication->downloadAttributeFile(resourse_file); + if (!downloaded_data.ok()) return downloaded_data.passErr(); + + Document document; + document.Parse(downloaded_data.unpack().c_str()); + if (document.HasParseError()) return genError("JSON file is not valid."); + + const Value &tenants_data = document[tenants_key.c_str()]; + for (Value::ConstValueIterator itr = tenants_data.Begin(); itr != tenants_data.End(); ++itr) { + + auto tenant_id_obj = itr->FindMember(tenand_id_key.c_str()); + if (tenant_id_obj == itr->MemberEnd()) continue; + + string tenant_id = tenant_id_obj->value.GetString(); + + Value::ConstMemberIterator artifact_data = itr->FindMember(policy_key.c_str()); + if (artifact_data == itr->MemberEnd()) artifact_data = itr->FindMember(settings_key.c_str()); + + if (artifact_data != itr->MemberEnd()) { + string file_path = dir_path + "/" + resourse_file.getFileName() + "_" + tenant_id + ".download"; + + rapidjson::StringBuffer buffer; + rapidjson::Writer writer(buffer); + artifact_data->value.Accept(writer); + + I_OrchestrationTools *orchestration_tools = Singleton::Consume::by(); + if (orchestration_tools->writeFile(buffer.GetString(), file_path)) { + res.insert({tenant_id, file_path}); + } + continue; + } + + Value::ConstMemberIterator error_data = itr->FindMember(error_text.c_str()); + if (error_data != itr->MemberEnd()) { + dbgDebug(D_ORCHESTRATOR) + << "Failed to download artifact" + << ", Tenant ID: " << tenant_id + << ", Error message: " << error_data->value.FindMember("message")->value.GetString() + << ", Error ID: " << error_data->value.FindMember("messageId")->value.GetString(); + continue; + } + } + return res; +} + +Maybe +Downloader::Impl::downloadFileFromURL( + const string &url, + const string &checksum, + Package::ChecksumTypes checksum_type, + const string &service_name) const +{ + dbgDebug(D_ORCHESTRATOR) << "Download file. URL: " << url; + + string new_url = url; + bool auth_required = false; + auto custom_url = getConfiguration("orchestration", "Custom download url"); + if (custom_url.ok()) { + auto resource_index = url.find_last_of("/"); + string error_msg = "Failed to parse custom URL. "; + if (resource_index == string::npos) { + return genError(error_msg + "URL: " + url); + } + new_url = custom_url.unpack(); + if (new_url.empty()) { + return genError(error_msg + "URL is empty"); + } + if (new_url.back() == '/') { + new_url = new_url.substr(0, new_url.size() - 1); + } + new_url.append(url.substr(resource_index)); + } + // Workaround - only in staging we need to add the auth header + static const string jwt_word = ""; + if (new_url.find(jwt_word) != string::npos) { + new_url = new_url.substr(jwt_word.length()); + auth_required = true; + } + + URLParser parsed_url(new_url); + Maybe base_url = parsed_url.getBaseURL(); + if (!base_url.ok()) return base_url; + Maybe file_path = genError("Empty path"); + string file_name = service_name + ".download"; + if (parsed_url.getProtocol() == URLProtocol::LOCAL_FILE) { + file_path = getFileFromLocal(base_url.unpack(), file_name); + } else { + file_path = getFileFromExternalURL(parsed_url, file_name, auth_required); + } + + if (!file_path.ok()) { + return file_path; + } + + auto checksum_validation = validateChecksum(checksum, checksum_type, file_path); + if (!checksum_validation.ok()) return checksum_validation; + + I_OrchestrationTools *orchestration_tools = Singleton::Consume::by(); + if (!orchestration_tools->isNonEmptyFile(file_path.unpack())) { + return genError("Failed to download file. URL: " + parsed_url.toString()); + } + + return file_path; +} + +Maybe +Downloader::Impl::validateChecksum( + const string &checksum, + Package::ChecksumTypes checksum_type, + Maybe &file_path) const +{ + if (file_path.ok()) { + I_OrchestrationTools *orchestration_tools = Singleton::Consume::by(); + Maybe file_checksum = orchestration_tools->calculateChecksum(checksum_type, file_path.unpack()); + if (!file_checksum.ok() || checksum != file_checksum.unpack()) { + orchestration_tools->removeFile(file_path.unpack()); + if (!file_checksum.ok()) { + return genError("Failed to calculate file checksum, with error: " + file_checksum.getErr()); + } + return genError( + "The checksum calculation is not as the expected, " + + checksum + " != " + file_checksum.unpack() + ); + } + } + return file_path; +} + +Maybe +Downloader::Impl::downloadFileFromFogByHTTP(const GetResourceFile &resourse_file, const string &file_name) const +{ + string file_path = dir_path + "/" + file_name; + + dbgInfo(D_ORCHESTRATOR) << "Downloading file from fog. File: " << resourse_file.getFileName(); + + I_UpdateCommunication *update_communication = Singleton::Consume::by(); + auto downloaded_file = update_communication->downloadAttributeFile(resourse_file); + if (!downloaded_file.ok()) return genError(downloaded_file.getErr()); + dbgInfo(D_ORCHESTRATOR) << "Download completed. File: " << resourse_file.getFileName(); + + I_OrchestrationTools *orchestration_tools = Singleton::Consume::by(); + if (orchestration_tools->writeFile(downloaded_file.unpack(), file_path)) return file_path; + return genError("Failed to write the attribute file. File: " + file_name); +} + +Maybe +Downloader::Impl::getFileFromLocal(const string &local_file_path, const string &file_name) const +{ + string file_path = dir_path + "/" + file_name; + I_OrchestrationTools *orchestration_tools = Singleton::Consume::by(); + if (!orchestration_tools->copyFile(local_file_path, file_path)) { + return genError("Get file from local failed. File: " + local_file_path); + } + + return file_path; +} + +// LCOV_EXCL_START Reason: Depends on real download server. +Maybe +Downloader::Impl::getFileFromURL(const URLParser &url, const string &file_path, bool auth_required) const +{ + ofstream outFile(file_path, ofstream::out | ofstream::binary); + HTTPClient http_client; + dbgInfo(D_ORCHESTRATOR) << "Downloading file. URL: " << url; + auto get_file_response = http_client.getFile(url, outFile, auth_required); + if (!get_file_response.ok()) { + Maybe error = genError("Failed to download file from " + url.getBaseURL().unpack() + + ". Error: " + get_file_response.getErr()); + dbgWarning(D_ORCHESTRATOR) << "Download failed"; + return error; + } + outFile.close(); + dbgInfo(D_ORCHESTRATOR) << "Download completed. URL: " << url; + return file_path; +} + +Maybe +Downloader::Impl::getFileFromExternalURL( + const URLParser &parsed_url, + const string &file_name, + bool auth_required +) const +{ + string file_path = dir_path + "/" + file_name; + auto base_url = parsed_url.getBaseURL().unpack(); + + string query_path; + string query_file; + tie(query_path, query_file) = splitQuery(parsed_url.getQuery()); + + auto try_dirs = getConfigurationWithDefault( + false, + "orchestration", + "Add tenant suffix" + ); + if (try_dirs) { + vector sub_path; + auto agent_details = Singleton::Consume::by(); + auto tenant_id = agent_details->getTenantId(); + if (!tenant_id.empty()) { + sub_path.push_back(tenant_id); + auto profile_id = agent_details->getProfileId(); + if (!profile_id.empty()) { + sub_path.push_back(profile_id); + auto agent_id = agent_details->getAgentId(); + if(!agent_id.empty()) { + sub_path.push_back(agent_id); + } + } + } + + URLParser currentUrl = parsed_url; + while (!sub_path.empty()) { + currentUrl.setQuery(query_path + vectorToPath(sub_path) + "/" + query_file); + if (getFileFromURL(currentUrl, file_path, auth_required).ok()) return file_path; + sub_path.pop_back(); + } + } + + return getFileFromURL(parsed_url, file_path, auth_required); +} + +tuple +Downloader::Impl::splitQuery(const string &query) const +{ + size_t index = query.find_last_of("/"); + if (index == string::npos) return make_tuple(string(), query); + return make_tuple(query.substr(0, index), query.substr(index + 1)); +} + +string +Downloader::Impl::vectorToPath(const vector &vec) const +{ + string s; + for (const auto &piece : vec) { s += ("/" + piece); } + return s; +} + +Downloader::Downloader() : Component("Downloader"), pimpl(make_unique()) {} + +Downloader::~Downloader() {} + +void +Downloader::init() +{ + pimpl->init(); +} + +void +Downloader::preload() +{ + registerExpectedConfiguration("orchestration", "Custom download url"); + registerExpectedConfiguration("orchestration", "Default file download path"); + registerExpectedConfiguration("orchestration", "Self signed certificates acceptable"); + registerExpectedConfiguration("orchestration", "Add tenant suffix"); +} + +SASAL_END diff --git a/components/security_apps/orchestration/downloader/downloader_ut/CMakeLists.txt b/components/security_apps/orchestration/downloader/downloader_ut/CMakeLists.txt new file mode 100755 index 0000000..a999050 --- /dev/null +++ b/components/security_apps/orchestration/downloader/downloader_ut/CMakeLists.txt @@ -0,0 +1,7 @@ +link_directories(${BOOST_ROOT}/lib) + +add_unit_test( + downloader_ut + "downloader_ut.cc" + "orchestration;orchestration_downloader;orchestration_modules;orchestration_tools;environment;config;update_communication;metric;event_is;-lcurl;-lcrypto;-lssl;-lboost_regex" +) diff --git a/components/security_apps/orchestration/downloader/downloader_ut/downloader_ut.cc b/components/security_apps/orchestration/downloader/downloader_ut/downloader_ut.cc new file mode 100755 index 0000000..de1fc42 --- /dev/null +++ b/components/security_apps/orchestration/downloader/downloader_ut/downloader_ut.cc @@ -0,0 +1,431 @@ +#include "cptest.h" +#include "config.h" +#include "config_component.h" +#include "downloader.h" +#include "enum_range.h" +#include "environment.h" +#include "mock/mock_mainloop.h" +#include "mock/mock_time_get.h" + +#include "mock/mock_update_communication.h" +#include "mock/mock_orchestration_tools.h" + +using namespace std; +using namespace testing; + +class DownloaderTest : public Test +{ +public: + DownloaderTest() + { + setConfiguration("/tmp", "orchestration", "Default file download path"); + EXPECT_CALL(mock_orchestration_tools, createDirectory("/tmp")).WillOnce(Return(true)); + downloader.init(); + } + + NiceMock mock_mainloop; + NiceMock mock_timer; + ::Environment env; + ConfigComponent config_component; + StrictMock mock_communication; + StrictMock mock_orchestration_tools; + Downloader downloader; + I_Downloader *i_downloader = Singleton::Consume::from(downloader); +}; + +TEST_F(DownloaderTest, doNothing) +{ +} + +TEST_F(DownloaderTest, downloadFileFromFog) +{ + string fog_response = "bla bla"; + string checksum = "123"; + + GetResourceFile resourse_file(GetResourceFile::ResourceFileType::VIRTUAL_SETTINGS); + + EXPECT_CALL(mock_communication, downloadAttributeFile(resourse_file)).WillOnce(Return(fog_response)); + + EXPECT_CALL( + mock_orchestration_tools, + calculateChecksum(Package::ChecksumTypes::SHA256, "/tmp/virtualSettings.download") + ).WillOnce(Return(string("123"))); + + EXPECT_CALL(mock_orchestration_tools, writeFile(fog_response, "/tmp/virtualSettings.download")) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile("/tmp/virtualSettings.download")).WillOnce(Return(true)); + + Maybe downloaded_file = i_downloader->downloadFileFromFog( + checksum, + Package::ChecksumTypes::SHA256, + resourse_file + ); + + EXPECT_THAT(downloaded_file, IsValue("/tmp/virtualSettings.download")); +} + +TEST_F(DownloaderTest, downloadFileFromFogFailure) +{ + string checksum = "123"; + + Maybe fog_response(genError("Failed to download")); + GetResourceFile resourse_file(GetResourceFile::ResourceFileType::SETTINGS); + + EXPECT_CALL(mock_communication, downloadAttributeFile(resourse_file)).WillOnce(Return(fog_response)); + + Maybe downloaded_file = i_downloader->downloadFileFromFog( + checksum, + Package::ChecksumTypes::SHA256, + resourse_file + ); + + EXPECT_FALSE(downloaded_file.ok()); + EXPECT_THAT(downloaded_file, IsError("Failed to download")); +} + +TEST_F(DownloaderTest, registerConfig) +{ + string file_path_value = "/tmp"; + string signed_certificates_value = "bla"; + string config_json = + "{\n" + " \"orchestration\": {\n" + " \"Default file download path\": [\n" + " {\n" + " \"context\": \"All()\",\n" + " \"value\": \"" + file_path_value + "\"\n" + " }\n" + " ],\n" + " \"Self signed certificates acceptable\": [\n" + " {\n" + " \"context\": \"All()\",\n" + " \"value\": \"" + signed_certificates_value + "\"\n" + " }\n" + " ]\n" + " }\n" + "}\n"; + + env.preload(); + env.init(); + downloader.preload(); + istringstream stringstream(config_json); + Singleton::Consume::from(config_component)->loadConfiguration(stringstream); + + EXPECT_THAT( + getConfiguration("orchestration", "Default file download path"), + IsValue(file_path_value) + ); + + EXPECT_THAT( + getConfiguration("orchestration", "Self signed certificates acceptable"), + IsValue(signed_certificates_value) + ); + + env.fini(); +} + +TEST_F(DownloaderTest, downloadWithBadChecksum) +{ + string local_file_path = "/tmp/test_file.sh"; + string url = "file://" + local_file_path; + string dir_path = getConfigurationWithDefault( + "/tmp/orchestration_downloads", + "orchestration", + "Default file download path" + ); + string service_name = "test"; + string file_name = service_name + ".download"; + string file_path = dir_path + "/" + file_name; + string checksum = "1234"; + Package::ChecksumTypes checksum_type = Package::ChecksumTypes::MD5; + + EXPECT_CALL( + mock_orchestration_tools, + calculateChecksum(checksum_type, file_path) + ).WillOnce(Return(checksum + "5")); + EXPECT_CALL(mock_orchestration_tools, copyFile(local_file_path, file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, removeFile("/tmp/test.download")).WillOnce(Return(true)); + EXPECT_THAT( + i_downloader->downloadFileFromURL(url, checksum, checksum_type, service_name), + IsError("The checksum calculation is not as the expected, 1234 != 12345") + ); +} + +TEST_F(DownloaderTest, downloadFromLocal) +{ + string local_file_path = "/tmp/test_file.sh"; + string url = "file://" + local_file_path; + string dir_path = getConfigurationWithDefault( + "/tmp/orchestration_downloads", + "orchestration", + "Default file download path" + ); + string service_name = "test"; + string file_name = service_name + ".download"; + string file_path = dir_path + "/" + file_name; + string checksum = "1234"; + Package::ChecksumTypes checksum_type = Package::ChecksumTypes::MD5; + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(checksum_type, file_path)).WillOnce(Return(checksum)); + EXPECT_CALL(mock_orchestration_tools, copyFile(local_file_path, file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(file_path)).WillOnce(Return(true)); + i_downloader->downloadFileFromURL(url, checksum, checksum_type, service_name); +} + +TEST_F(DownloaderTest, downloadEmptyFileFromFog) +{ + string fog_response = "bla bla"; + string checksum = "123"; + string service_name = "manifest"; + string empty_str = ""; + + GetResourceFile resourse_file(GetResourceFile::ResourceFileType::MANIFEST); + + EXPECT_CALL(mock_communication, downloadAttributeFile(resourse_file)).WillOnce(Return(fog_response)); + + EXPECT_CALL(mock_orchestration_tools, writeFile(fog_response, "/tmp/manifest.download")) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile("/tmp/manifest.download")).WillOnce(Return(false)); + + EXPECT_CALL( + mock_orchestration_tools, + calculateChecksum(Package::ChecksumTypes::SHA256, "/tmp/manifest.download") + ).WillOnce(Return(checksum)); + + Maybe downloaded_file = i_downloader->downloadFileFromFog( + checksum, + Package::ChecksumTypes::SHA256, + resourse_file + ); + + EXPECT_FALSE(downloaded_file.ok()); + EXPECT_THAT(downloaded_file, IsError("Failed to download file manifest")); +} + +TEST_F(DownloaderTest, downloadFromCustomURL) +{ + string file_prefix = "file://"; + string file_name = "/test_file.sh"; + string local_file_path = "/tmp" + file_name; + string url = file_prefix + local_file_path; + string custom_URL = "/custom"; + setConfiguration( + string(file_prefix + custom_URL), + "orchestration", + "Custom download url" + ); + string dir_path = getConfigurationWithDefault( + "/tmp/orchestration_downloads", + "orchestration", + "Default file download path" + ); + string service_name = "test"; + string download_file_name = service_name + ".download"; + string download_file_path = dir_path + "/" + download_file_name; + string checksum = "1234"; + Package::ChecksumTypes checksum_type = Package::ChecksumTypes::MD5; + EXPECT_CALL(mock_orchestration_tools, copyFile(custom_URL + file_name, download_file_path)) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(download_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(checksum_type, download_file_path)) + .WillOnce(Return(checksum)); + + i_downloader->downloadFileFromURL(url, checksum, checksum_type, service_name); +} + +TEST_F(DownloaderTest, CustomURLBackBackslash) +{ + string file_prefix = "file://"; + string file_name = "test_file.sh"; + string local_file_path = "/tmp/" + file_name; + string url = file_prefix + local_file_path; + string custom_URL = "/custom/"; + setConfiguration( + string(file_prefix + custom_URL), + "orchestration", + "Custom download url" + ); + string dir_path = getConfigurationWithDefault( + "/tmp/orchestration_downloads", + "orchestration", + "Default file download path" + ); + string service_name = "test"; + string download_file_name = service_name + ".download"; + string download_file_path = dir_path + "/" + download_file_name; + string checksum = "1234"; + Package::ChecksumTypes checksum_type = Package::ChecksumTypes::MD5; + EXPECT_CALL(mock_orchestration_tools, copyFile(custom_URL + file_name, download_file_path)) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(download_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(checksum_type, download_file_path)) + .WillOnce(Return(checksum)); + i_downloader->downloadFileFromURL(url, checksum, checksum_type, service_name); +} + +TEST_F(DownloaderTest, EmptyCustomURL) +{ + string file_prefix = "file://"; + string file_name = "/test_file.sh"; + string local_file_path = "/tmp" + file_name; + string url = file_prefix + local_file_path; + string custom_URL = ""; + setConfiguration( + string(custom_URL), + "orchestration", + "Custom download url" + ); + string dir_path = getConfigurationWithDefault( + "/tmp/orchestration_downloads", + "orchestration", + "Default file download path" + ); + string service_name = "test"; + string download_file_name = service_name + ".download"; + string download_file_path = dir_path + "/" + download_file_name; + string checksum = "1234"; + Package::ChecksumTypes checksum_type = Package::ChecksumTypes::MD5; + EXPECT_THAT( + i_downloader->downloadFileFromURL(url, checksum, checksum_type, service_name), + IsError("Failed to parse custom URL. URL is empty") + ); +} + +TEST_F(DownloaderTest, download_virtual_policy) +{ + GetResourceFile resourse_file(GetResourceFile::ResourceFileType::VIRTUAL_POLICY); + + resourse_file.addTenant("0000", "1", "checksum0000"); + resourse_file.addTenant("1111", "2", "checksum1111"); + + string tenant_0000_file = + "{" + "\"waap\":\"108-005\"," + "\"accessControl\":\"Internal error, check logs\"," + "\"idk\":\"ed5ac9a6-6924-4ebc-9ace-971896ca33c5\"," + "\"something\":\"Low\"" + "}"; + + string tenant_1111_file = + "{" + "\"messageId\":\"108-005\"," + "\"message\":\"Internal error, check logs\"," + "\"referenceId\":\"ed5ac9a6-6924-4ebc-9ace-971896ca33c5\"," + "\"severity\":\"Low\"" + "}"; + + string fog_response = + "{\n" + " \"tenants\": [\n" + " {\n" + " \"tenantId\": \"0000\",\n" + " \"policy\": {\n" + " \"waap\": \"108-005\",\n" + " \"accessControl\": \"Internal error, check logs\",\n" + " \"idk\": \"ed5ac9a6-6924-4ebc-9ace-971896ca33c5\",\n" + " \"something\": \"Low\"\n" + " }\n" + " },\n" + " {\n" + " \"tenantId\": \"1111\",\n" + " \"policy\": {\n" + " \"messageId\": \"108-005\",\n" + " \"message\": \"Internal error, check logs\",\n" + " \"referenceId\": \"ed5ac9a6-6924-4ebc-9ace-971896ca33c5\",\n" + " \"severity\": \"Low\"\n" + " }\n" + " }\n" + " ]\n" + "}"; + + EXPECT_CALL(mock_communication, downloadAttributeFile(resourse_file)).WillOnce(Return(fog_response)); + + EXPECT_CALL(mock_orchestration_tools, writeFile(tenant_0000_file, "/tmp/virtualPolicy_0000.download")) + .WillOnce(Return(true)); + + EXPECT_CALL(mock_orchestration_tools, writeFile(tenant_1111_file, "/tmp/virtualPolicy_1111.download")) + .WillOnce(Return(true)); + + map expected_downloaded_files = + { + { "0000", "/tmp/virtualPolicy_0000.download" }, + { "1111", "/tmp/virtualPolicy_1111.download" } + }; + + EXPECT_EQ( + i_downloader->downloadVirtualFileFromFog( + resourse_file, + Package::ChecksumTypes::SHA256 + ), + expected_downloaded_files + ); +} + +TEST_F(DownloaderTest, download_virtual_settings) +{ + GetResourceFile resourse_file(GetResourceFile::ResourceFileType::VIRTUAL_SETTINGS); + + resourse_file.addTenant("4c721b40-85df-4364-be3d-303a10ee9789", "1", "checksum0000"); + + string tenant_0000_file = + "{" + "\"agentSettings\":[" + "{" + "\"id\":\"f0bd081b-175a-2fb6-c6de-d05d62fdcadf\"," + "\"key\":\"\"," + "\"value\":\"\"" + "}" + "]," + "\"allowOnlyDefinedApplications\":false," + "\"anyFog\":true," + "\"reverseProxy\":{" + "\"assets\":[]" + "}," + "\"upgradeMode\":\"automatic\"" + "}"; + + string fog_response = + "{\n" + " \"tenants\": [\n" + " {\n" + " \"tenantId\": \"4c721b40-85df-4364-be3d-303a10ee9789\",\n" + " \"settings\": {\n" + " \"agentSettings\": [\n" + " {\n" + " \"id\": \"f0bd081b-175a-2fb6-c6de-d05d62fdcadf\",\n" + " \"key\": \"\",\n" + " \"value\": \"\"\n" + " }\n" + " ],\n" + " \"allowOnlyDefinedApplications\": false,\n" + " \"anyFog\": true,\n" + " \"reverseProxy\": {\n" + " \"assets\": []\n" + " },\n" + " \"upgradeMode\": \"automatic\"\n" + " }\n" + " }\n" + " ]\n" + "}"; + + EXPECT_CALL(mock_communication, downloadAttributeFile(resourse_file)).WillOnce(Return(fog_response)); + + EXPECT_CALL( + mock_orchestration_tools, + writeFile(tenant_0000_file, "/tmp/virtualSettings_4c721b40-85df-4364-be3d-303a10ee9789.download") + ).WillOnce(Return(true)); + + map expected_downloaded_files = { + { "4c721b40-85df-4364-be3d-303a10ee9789", + "/tmp/virtualSettings_4c721b40-85df-4364-be3d-303a10ee9789.download" + } + }; + + EXPECT_EQ( + i_downloader->downloadVirtualFileFromFog( + resourse_file, + Package::ChecksumTypes::SHA256 + ), + expected_downloaded_files + ); +} diff --git a/components/security_apps/orchestration/downloader/http_client.cc b/components/security_apps/orchestration/downloader/http_client.cc new file mode 100755 index 0000000..594470d --- /dev/null +++ b/components/security_apps/orchestration/downloader/http_client.cc @@ -0,0 +1,276 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "http_client.h" + +#include "curl_client.h" +#include "downloader.h" +#include "debug.h" +#include "i_encryptor.h" +#include "url_parser.h" +#include "sasal.h" +#include "config.h" +#include "i_environment.h" +#include "orchestration_comp.h" + +#include +#include +#include +#include +#include + +using boost::asio::ip::tcp; +using namespace std; + +SASAL_START // Orchestration - Communication + +USE_DEBUG_FLAG(D_ORCHESTRATOR); +USE_DEBUG_FLAG(D_HTTP_REQUEST); + +// LCOV_EXCL_START Reason: Depends on real download server. +class ClientConnection +{ +public: + ClientConnection( + const URLParser &_url, + const Maybe &_proxy_url, + const Maybe &_proxy_port, + const Maybe &_proxy_auth, + const string &_token) + : + url(_url), + proxy_url(_proxy_url), + proxy_port(_proxy_port), + proxy_auth(_proxy_auth), + token(_token) + { + } + + Maybe + handleConnect() + { + if (!url.getBaseURL().ok()) { + return genError("Failed to handle connection. Error: " + url.getBaseURL().getErr()); + } + string server_name = url.getBaseURL().unpack(); + string port = url.getPort(); + string query = url.getQuery(); + string host = server_name; + try { + if (stoi(port) != 80) { + host = host + ":" + port; + } + } catch (const exception &err) { + return genError("Failed to parse port to a number. Port: " + port ); + } + + chrono::duration> sleep_time(60); + io_stream.expires_from_now(sleep_time); + + if (proxy_url.ok()) { + if (!proxy_port.ok()) { + return genError( + "Failed to handle connection to server. proxy domain is defined with invalid port, Error: " + + proxy_port.getErr() + ); + } + io_stream.connect(proxy_url.unpack(), to_string(proxy_port.unpack())); + } else { + io_stream.connect(server_name, port); + } + + if (!io_stream) { + return genError("Failed to handle connection to server. Error: " + io_stream.error().message()); + } + + string request_url = query; + if (proxy_url.ok()) { + request_url = host + query; + } + + stringstream http_request; + http_request << "GET http://" << request_url << " HTTP/1.1\r\n"; + http_request << "Host: " << host << "\r\n"; + if (!token.empty()) { + http_request << "Authorization: " << "Bearer " << token << "\r\n"; + } + http_request << "User-Agent: Infinity Next (a7030abf93a4c13)\r\n"; + + auto i_trace_env = Singleton::Consume::by(); + http_request << i_trace_env->getCurrentHeaders(); + http_request << "Accept: */*\r\n"; + + if (proxy_url.ok()) { + http_request << "Accept-Encoding: identity"; + http_request << "Connection: close\r\n"; + http_request << "Proxy-Connection: Keep-Alive\r\n"; + + if (proxy_auth.ok()) { + I_Encryptor *encryptor = Singleton::Consume::by(); + http_request << "Proxy-Authorization: Basic " + encryptor->base64Encode(proxy_auth.unpack()) + "\r\n"; + } + http_request << "\r\n"; + } else { + http_request << "Connection: close\r\n\r\n"; + } + + dbgTrace(D_HTTP_REQUEST) << "Sending the following HTTP Request: " << endl << http_request.str(); + io_stream << http_request.str(); + return Maybe(); + } + + Maybe + handleResponse(ofstream &out_file) + { + string response_http_version; + io_stream >> response_http_version; + unsigned int status_code; + io_stream >> status_code; + string status_message; + getline(io_stream, status_message); + + if (!io_stream || response_http_version.substr(0, 5) != "HTTP/") { + return genError("Invalid response"); + } + + if (status_code != 200) { + return genError("HTTP response returned with status code " + status_code); + } + + string header; + vector headers; + while (getline(io_stream, header) && header != "\r") { + headers.push_back(header); + } + + out_file << io_stream.rdbuf(); + + dbgTrace(D_HTTP_REQUEST) + << "Received HTTP Response with the following data (downloaded file will not be printed):" + << endl + << response_http_version + << " " + << status_code + << " " + << status_message + << endl + << makeSeparatedStr(headers, "\n"); + + + return Maybe(); + } + +private: + const URLParser url; + const Maybe proxy_url; + const Maybe proxy_port; + const Maybe proxy_auth; + const string &token; + boost::asio::ip::tcp::iostream io_stream; +}; + +Maybe +HTTPClient::getFile(const URLParser &url, ofstream &out_file, bool auth_required) +{ + auto message = Singleton::Consume::by(); + auto load_env_proxy = message->loadProxy(); + if (!load_env_proxy.ok()) return load_env_proxy; + + string token = ""; + if (auth_required) { + auto message = Singleton::Consume::by(); + token = message->getAccessToken(); + } + + if (url.isOverSSL()) { + auto get_file_over_ssl_res = getFileSSL(url, out_file, token); + if (!get_file_over_ssl_res.ok()) + { + //CURL fallback + dbgWarning(D_ORCHESTRATOR) << "Failed to get file over SSL. Trying via CURL (SSL)."; + return curlGetFileOverSSL(url, out_file, token); + } + return get_file_over_ssl_res; + } + auto get_file_http_res = getFileHttp(url, out_file, token); + if (!get_file_http_res.ok()) + { + //CURL fallback + dbgWarning(D_ORCHESTRATOR) << "Failed to get file over HTTP. Trying via CURL (HTTP)."; + return curlGetFileOverHttp(url, out_file, token); + } + + return get_file_http_res; +} + +Maybe +HTTPClient::curlGetFileOverHttp(const URLParser &url, ofstream &out_file, const string &token) +{ + try { + auto message = Singleton::Consume::by(); + + HttpCurl http_curl_client( + url, + out_file, + token, + message->getProxyDomain(ProxyProtocol::HTTPS), + message->getProxyPort(ProxyProtocol::HTTPS), + message->getProxyCredentials(ProxyProtocol::HTTPS)); + + http_curl_client.setCurlOpts(); + bool connection_ok = http_curl_client.connect(); + if (!connection_ok) + { + stringstream url_s; + url_s << url; + string err_msg = string("Failed to get file over HTTP. URL: ") + url_s.str(); + return genError(err_msg); + } + + // As this class is a temporal solution catch all exception types is enabled. + } catch (const exception &e) { + string err_msg = "Failed to get file over HTTP. Exception: " + string(e.what()); + return genError(err_msg); + } + + return Maybe(); +} + +Maybe +HTTPClient::getFileHttp(const URLParser &url, ofstream &out_file, const string &token) +{ + try { + auto message = Singleton::Consume::by(); + ClientConnection client_connection( + url, + message->getProxyDomain(ProxyProtocol::HTTP), + message->getProxyPort(ProxyProtocol::HTTP), + message->getProxyCredentials(ProxyProtocol::HTTP), + token + ); + auto handle_connect_res = client_connection.handleConnect(); + if (!handle_connect_res.ok()) return handle_connect_res; + + return client_connection.handleResponse(out_file); + + // As this class is a temporal solution catch all exception types is enabled. + } catch (const exception &e) { + string err_msg = "Failed to get file over HTTP. Exception: " + string(e.what()); + return genError(err_msg); + } + + return Maybe(); +} +// LCOV_EXCL_STOP + +SASAL_END diff --git a/components/security_apps/orchestration/downloader/http_client.h b/components/security_apps/orchestration/downloader/http_client.h new file mode 100755 index 0000000..c25c5f9 --- /dev/null +++ b/components/security_apps/orchestration/downloader/http_client.h @@ -0,0 +1,39 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __HTTP_CLIENT_H__ +#define __HTTP_CLIENT_H__ + +#include +#include "maybe_res.h" +#include "url_parser.h" +#include "i_messaging.h" + +// LCOV_EXCL_START Reason: Depends on real download server. +class HTTPClient : public Singleton::Consume +{ +public: + HTTPClient() = default; + + Maybe getFile(const URLParser &url, std::ofstream &out_file, bool auth_required); + +private: + std::string loadCAChainDir(); + Maybe getFileSSL(const URLParser &url, std::ofstream &out_file, const std::string &_token); + Maybe getFileHttp(const URLParser &url, std::ofstream &out_file, const std::string &_token); + Maybe curlGetFileOverSSL(const URLParser &url, std::ofstream &out_file, const std::string &_token); + Maybe curlGetFileOverHttp(const URLParser &url, std::ofstream &out_file, const std::string &_token); +}; +// LCOV_EXCL_STOP + +#endif // __HTTP_CLIENT_H__ diff --git a/components/security_apps/orchestration/downloader/https_client.cc b/components/security_apps/orchestration/downloader/https_client.cc new file mode 100755 index 0000000..fe59ee7 --- /dev/null +++ b/components/security_apps/orchestration/downloader/https_client.cc @@ -0,0 +1,619 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "http_client.h" +#include "curl_client.h" + +#include "debug.h" +#include "i_agent_details.h" +#include "i_encryptor.h" +#include "downloader.h" +#include "config.h" +#include "sasal.h" +#include "boost/uuid/uuid.hpp" +#include "boost/uuid/uuid_generators.hpp" +#include +#include "boost/uuid/uuid_io.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace boost::placeholders; +using boost::asio::ip::tcp; +using namespace std; + +SASAL_START // Orchestration - Communication + +USE_DEBUG_FLAG(D_COMMUNICATION); +USE_DEBUG_FLAG(D_HTTP_REQUEST); +USE_DEBUG_FLAG(D_ORCHESTRATOR); + +// LCOV_EXCL_START Reason: Depends on real download server. +class BadResponseFromServer : public exception +{ +public: + BadResponseFromServer() : message("Bad response returned from server") {} + BadResponseFromServer(const string &msg) : message(msg) {} + const char * + what() const throw() + { + return message.c_str(); + } + +private: + string message; +}; + +class Client +{ +public: + Client( + ofstream &out_file, + boost::asio::io_service &io_service, + boost::asio::ssl::context &context, + const URLParser &_url, + const Maybe &_proxy_url, + const Maybe &_proxy_port, + const Maybe &_proxy_auth, + const string &_token) + : + out_file(out_file), + url(_url), + proxy_url(_proxy_url), + proxy_port(_proxy_port), + proxy_auth(_proxy_auth), + resolver_(io_service), + deadline(io_service), + socket_(io_service), + ssl_socket(socket_, context), + token(_token) + { + } + + Maybe + handleConnection() + { + ostream request_stream(&request_); + stringstream http_request; + http_request << "GET " << url.getQuery() << " HTTP/1.1\r\n"; + string host = url.getBaseURL().unpack(); + string port = url.getPort(); + int port_int; + try { + port_int = stoi(port); + } catch (const exception &err) { + dbgWarning(D_COMMUNICATION) + << "Failed to convert port number from string. Port: " + << port + << ", Error: " + << err.what(); + return genError("Failed to parse port to a number. Port: " + port); + } + if (port_int != 443) { + host = host + ":" + port; + } + + http_request << "Host: " << host << "\r\n"; + + if (!token.empty()) { + http_request << "Authorization: " << "Bearer " << token << "\r\n"; + } + http_request << "User-Agent: Infinity Next (a7030abf93a4c13)\r\n"; + boost::uuids::uuid correlation_id; + try { + correlation_id = uuid_random_gen(); + } catch (const boost::uuids::entropy_error &) { + dbgWarning(D_COMMUNICATION) << "Failed to generate random correlation id - entropy exception"; + } + http_request << "X-Trace-Id: " + boost::uuids::to_string(correlation_id) + "\r\n"; + http_request << "Accept: */*\r\n"; + http_request << "Connection: close\r\n\r\n"; + + request_stream << http_request.str(); + + deadline.expires_from_now(boost::posix_time::minutes(5)); + deadline.async_wait(boost::bind(&Client::checkDeadline, this, _1)); + + if (proxy_url.ok()) { + if (!proxy_port.ok()) { + dbgWarning(D_COMMUNICATION) + << "Failed to connect to proxy due to invalid port value, Error: " + << proxy_port.getErr(); + + return genError( + "Failed to handle connection to server. proxy port is invalid, Error: " + + proxy_port.getErr() + ); + } + if (port_int == 443) host = host + ":" + port; + ostream connect_request_stream(&connect_request); + stringstream proxy_request; + proxy_request << "CONNECT " << host << " HTTP/1.1\r\n"; + proxy_request << "Host: " << host << "\r\n"; + if (proxy_auth.ok()) { + I_Encryptor *encryptor = Singleton::Consume::by(); + proxy_request + << "Proxy-Authorization: Basic " + << encryptor->base64Encode(proxy_auth.unpack()) + << "\r\n"; + } + proxy_request << "\r\n"; + + dbgTrace(D_HTTP_REQUEST) << "Connecting to proxy: " << endl << proxy_request.str(); + connect_request_stream << proxy_request.str(); + + tcp::resolver::query query(proxy_url.unpack(), to_string(proxy_port.unpack())); + resolver_.async_resolve( + query, + boost::bind( + &Client::overProxyResolver, + this, + boost::asio::placeholders::error, + boost::asio::placeholders::iterator + ) + ); + } else { + tcp::resolver::query query(url.getBaseURL().unpack(), port); + resolver_.async_resolve( + query, + boost::bind( + &Client::handleResolve, + this, + boost::asio::placeholders::error, + boost::asio::placeholders::iterator + ) + ); + } + + dbgTrace(D_HTTP_REQUEST) << "Sending the following HTTP Request: " << endl << http_request.str(); + return Maybe(); + } + +private: + void + checkDeadline(const boost::system::error_code &err) + { + if (err) return; + if (deadline.expires_at() <= boost::asio::deadline_timer::traits_type::now()) { + boost::system::error_code ignored_ec = boost::asio::error::operation_aborted; + socket_.close(ignored_ec); + deadline.expires_at(boost::posix_time::pos_infin); + return; + } + deadline.async_wait(boost::bind(&Client::checkDeadline, this, _1)); + } + + void + overProxyResolver(const boost::system::error_code &err, tcp::resolver::iterator endpoint_iterator) + { + if (!err) { + boost::asio::async_connect(socket_, endpoint_iterator, + boost::bind(&Client::overProxyHandleConnect, this, + boost::asio::placeholders::error)); + } else { + string err_msg = "Failed to connect to proxy. Error: " + err.message(); + throw BadResponseFromServer(err_msg); + } + } + + void + overProxyHandleConnect(const boost::system::error_code &err) + { + if (!err) { + boost::asio::async_write(socket_, connect_request, + boost::bind(&Client::overProxyHandleWriteRequest, this, + boost::asio::placeholders::error)); + } else { + string err_msg = "Failed to connect to proxy. Error: " + err.message(); + throw BadResponseFromServer(err_msg); + } + } + + void + overProxyHandleWriteRequest(const boost::system::error_code &err) + { + if (!err) { + boost::asio::async_read_until( + socket_, + response_, + "\r\n", + boost::bind(&Client::overProxyHandleReadStatusLine, this, boost::asio::placeholders::error) + ); + } else { + string err_msg = "Failed to write over proxy. Error: " + err.message(); + throw BadResponseFromServer(err_msg); + } + } + + void + overProxyHandleReadStatusLine(const boost::system::error_code &err) + { + if (err) { + string err_msg = "Failed to read status line over proxy. Error: " + err.message(); + throw BadResponseFromServer(err_msg); + } + // Check that response is OK. + istream response_stream(&response_); + string response_http_version; + response_stream >> response_http_version; + unsigned int status_code; + response_stream >> status_code; + string status_message; + getline(response_stream, status_message); + if (!response_stream || response_http_version.substr(0, 5) != "HTTP/") { + throw BadResponseFromServer("Invalid response"); + return; + } + + if (status_code != 200) { + string err_msg = "Response returned with status code " + status_code; + throw BadResponseFromServer(err_msg); + } + + dbgTrace(D_HTTP_REQUEST) + << "Received HTTP Response over proxied connection with the following data:" + << endl + << response_http_version + << " " + << status_code + << " " + << status_message; + + if (getProfileAgentSettingWithDefault(false, "agent.config.message.ignoreSslValidation") == false) { + ssl_socket.set_verify_mode(boost::asio::ssl::verify_peer | boost::asio::ssl::verify_fail_if_no_peer_cert); + ssl_socket.set_verify_callback(boost::bind(&Client::verifyCertificate, this, _1, _2)); + } else { + dbgWarning(D_HTTP_REQUEST) << "Ignoring SSL validation"; + } + + ssl_socket.async_handshake( + boost::asio::ssl::stream_base::client, + boost::bind(&Client::handleHandshake, this, boost::asio::placeholders::error) + ); + } + + void + handleResolve(const boost::system::error_code &err, tcp::resolver::iterator endpoint_iterator) + { + if (!err) { + boost::asio::async_connect(ssl_socket.lowest_layer(), endpoint_iterator, + boost::bind(&Client::handleConnect, this, + boost::asio::placeholders::error)); + } else { + string message = "Failed to connect. Error: " + err.message(); + throw BadResponseFromServer(message); + } + } + + bool + verifyCertificate(bool preverified, boost::asio::ssl::verify_context &ctx) + { + if (!token.empty()) { + X509_STORE_CTX *cts = ctx.native_handle(); + + switch (X509_STORE_CTX_get_error(cts)) + { + case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT: + dbgWarning(D_ORCHESTRATOR) << "SSL verification error: X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT"; + break; + case X509_V_ERR_CERT_NOT_YET_VALID: + case X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD: + dbgWarning(D_ORCHESTRATOR) << "SSL verification error: Certificate not yet valid"; + break; + case X509_V_ERR_CERT_HAS_EXPIRED: + case X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD: + dbgWarning(D_ORCHESTRATOR) << "Certificate expired"; + break; + case X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT: + case X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN: + dbgDebug(D_ORCHESTRATOR) << "Self signed certificate in chain"; + if (getConfigurationWithDefault(false, "orchestration", "Self signed certificates acceptable")) { + preverified = true; + } + break; + default: + if (!preverified) { + dbgWarning(D_ORCHESTRATOR) + << "Certificate verification error number: " + << X509_STORE_CTX_get_error(cts); + } + break; + } + return preverified; + } + return true; + } + + void + handleConnect(const boost::system::error_code &err) + { + if (!err) { + if (getProfileAgentSettingWithDefault(false, "agent.config.message.ignoreSslValidation") == false) { + ssl_socket.set_verify_mode( + boost::asio::ssl::verify_peer | + boost::asio::ssl::verify_fail_if_no_peer_cert + ); + ssl_socket.set_verify_callback(boost::bind(&Client::verifyCertificate, this, _1, _2)); + } else { + dbgWarning(D_HTTP_REQUEST) << "Ignoring SSL validation"; + } + + ssl_socket.async_handshake(boost::asio::ssl::stream_base::client, + boost::bind(&Client::handleHandshake, this, + boost::asio::placeholders::error)); + } else { + string err_message = "Failed to connect. Error: " + err.message(); + throw BadResponseFromServer(err_message); + } + } + + void + handleHandshake(const boost::system::error_code &error) + { + if (!error) { + boost::asio::buffer_cast(request_.data()); + + boost::asio::async_write(ssl_socket, request_, + boost::bind(&Client::handleWriteRequest, this, + boost::asio::placeholders::error)); + } else { + string err_message = "Handshake failed. Error: " + error.message(); + throw BadResponseFromServer(err_message); + } + } + + void + handleWriteRequest(const boost::system::error_code &err) + { + if (!err) { + boost::asio::async_read_until(ssl_socket, resp, "\r\n", + boost::bind(&Client::handleReadStatusLine, this, + boost::asio::placeholders::error)); + } else { + string err_message = "Failed to handle write request. Error: " + err.message(); + throw BadResponseFromServer(err_message); + } + } + + void + handleReadStatusLine(const boost::system::error_code &err) + { + if (!err) { + istream response_stream(&resp); + string http_version; + response_stream >> http_version; + unsigned int status_code; + response_stream >> status_code; + string status_message; + getline(response_stream, status_message); + dbgTrace(D_HTTP_REQUEST) + << "Received HTTP Response with the following data:" + << endl + << http_version + << " " + << status_code; + + if (!response_stream || http_version.substr(0, 5) != "HTTP/") { + string err_message = "Invalid response"; + throw BadResponseFromServer(err_message); + } + if (status_code != 200) { + string err_message = "HTTPS response returned with status code " + to_string(status_code) + + ". URL: " + url.toString(); + throw BadResponseFromServer(err_message); + } + + boost::asio::async_read_until(ssl_socket, resp, "\r\n\r\n", + boost::bind(&Client::handleReadHeaders, this, + boost::asio::placeholders::error)); + } else { + dbgWarning(D_COMMUNICATION) << "Failed to read response status. Error:" << err.message(); + string err_message = "Failed to read status. Error: " + err.message(); + throw BadResponseFromServer(err_message); + } + } + + void + handleReadHeaders(const boost::system::error_code &err) + { + if (!err) { + // Process the response headers. + istream response_stream(&resp); + string header; + vector headers; + while (getline(response_stream, header) && header != "\r") { + headers.push_back(header); + } + + dbgTrace(D_HTTP_REQUEST) << "Received Response headers:" << endl << makeSeparatedStr(headers, "\n"); + // Write whatever content we already have to output. + if (resp.size() > 0) + out_file << &resp; + + // Start reading remaining data until EOF. + boost::asio::async_read(ssl_socket, resp, + boost::asio::transfer_at_least(1), + boost::bind(&Client::handleReadContent, this, + boost::asio::placeholders::error)); + } else { + dbgWarning(D_COMMUNICATION) << "Failed to read response headers. Error:" << err.message(); + string err_message = "Failed to read headers. Error: " + err.message(); + throw BadResponseFromServer(err_message); + } + } + + void + handleReadContent(const boost::system::error_code &err) + { + if (!err) { + // Write all of the data that has been read so far. + out_file << &resp; + // Continue reading remaining data until EOF. + boost::asio::async_read( + ssl_socket, + resp, + boost::asio::transfer_at_least(1), + boost::bind(&Client::handleReadContent, this, boost::asio::placeholders::error) + ); + } else if (err != boost::asio::error::eof && err != boost::asio::ssl::error::stream_truncated) { + dbgWarning(D_COMMUNICATION) << "Failed to read response body. Error:" << err.message(); + string err_message = "Failed to read content. Error: " + err.message(); + throw BadResponseFromServer(err_message); + } else if (err == boost::asio::ssl::error::stream_truncated) { + dbgError(D_COMMUNICATION) << "Had SSL warning during reading response body stage. Error:" << err.message(); + deadline.cancel(); + } else { + deadline.cancel(); + } + } + + ofstream &out_file; + const URLParser &url; + const Maybe proxy_url; + const Maybe proxy_port; + const Maybe proxy_auth; + tcp::resolver resolver_; + boost::asio::deadline_timer deadline; + boost::asio::ip::tcp::socket socket_; + boost::asio::ssl::stream ssl_socket; + boost::asio::streambuf request_; + boost::asio::streambuf connect_request; + boost::asio::streambuf response_; + boost::asio::streambuf resp; + const string &token; + boost::uuids::random_generator uuid_random_gen; +}; + +string +HTTPClient::loadCAChainDir() +{ + string ca_chain_dir; + auto agent_details = Singleton::Consume::by(); + auto load_ca_chain_dir = agent_details->getOpenSSLDir(); + if (load_ca_chain_dir.ok()) { + ca_chain_dir = load_ca_chain_dir.unpack(); + } + return getConfigurationWithDefault(ca_chain_dir, "message", "Certificate authority directory"); +} + +Maybe +HTTPClient::getFileSSL(const URLParser &url, ofstream &out_file, const string &token) +{ + try { + boost::asio::ssl::context ctx(boost::asio::ssl::context::sslv23); + if (!token.empty()) { + string cert_file_path = getConfigurationWithDefault( + getFilesystemPathConfig() + "/certs/fog.pem", + "message", + "Certificate chain file path" + ); + dbgTrace(D_ORCHESTRATOR) << "Http client, cert file path: " << cert_file_path; + auto trusted_ca_directory = getConfiguration("message", "Trusted CA directory"); + if (trusted_ca_directory.ok() && !trusted_ca_directory.unpack().empty()) { + ctx.add_verify_path(trusted_ca_directory.unpack()); + } else { + string cert_file_path = getConfigurationWithDefault( + getFilesystemPathConfig() + "/certs/fog.pem", + "message", + "Certificate chain file path" + ); + ctx.load_verify_file(cert_file_path); + } + } + boost::asio::io_service io_service; + auto message = Singleton::Consume::by(); + + Client client( + out_file, + io_service, + ctx, + url, + message->getProxyDomain(ProxyProtocol::HTTPS), + message->getProxyPort(ProxyProtocol::HTTPS), + message->getProxyCredentials(ProxyProtocol::HTTPS), + token + ); + + auto connection_result = client.handleConnection(); + if (!connection_result.ok()) { + return connection_result; + }; + + auto mainloop = Singleton::Consume::by(); + while (!io_service.stopped()) { + io_service.poll_one(); + mainloop->yield(true); + } + } catch (const exception &e) { + dbgWarning(D_COMMUNICATION) << "Failed to get file over HTTPS. Error:" << string(e.what()); + string error_str = "Failed to get file over HTTPS, exception: " + string(e.what()); + return genError(error_str); + } + + return Maybe(); +} + +Maybe +HTTPClient::curlGetFileOverSSL(const URLParser &url, ofstream &out_file, const string &token) +{ + try { + string cert_file_path; + if (!token.empty()) + { + cert_file_path = getConfigurationWithDefault( + getFilesystemPathConfig() + "/certs/fog.pem", + "message", + "Certificate chain file path" + ); + } + + auto message = Singleton::Consume::by(); + + HttpsCurl ssl_curl_client( + url, + out_file, + token, + message->getProxyDomain(ProxyProtocol::HTTPS), + message->getProxyPort(ProxyProtocol::HTTPS), + message->getProxyCredentials(ProxyProtocol::HTTPS), + cert_file_path); + + ssl_curl_client.setCurlOpts(); + bool connection_ok = ssl_curl_client.connect(); + if (!connection_ok) + { + stringstream url_s; + url_s << url; + string err_msg = string("Failed to get file over HTTPS. URL: ") + url_s.str(); + return genError(err_msg); + } + + } catch (const exception &e) { + dbgWarning(D_COMMUNICATION) << "Failed to get file over HTTPS. Error:" << string(e.what()); + string error_str = "Failed to get file over HTTPS, exception: " + string(e.what()); + return genError(error_str); + } + + return Maybe(); +} + +// LCOV_EXCL_STOP + +SASAL_END diff --git a/components/security_apps/orchestration/health_check/CMakeLists.txt b/components/security_apps/orchestration/health_check/CMakeLists.txt new file mode 100755 index 0000000..7dd884d --- /dev/null +++ b/components/security_apps/orchestration/health_check/CMakeLists.txt @@ -0,0 +1,3 @@ +add_library(health_check health_check.cc) + +add_subdirectory(health_check_ut) diff --git a/components/security_apps/orchestration/health_check/health_check.cc b/components/security_apps/orchestration/health_check/health_check.cc new file mode 100755 index 0000000..c034de4 --- /dev/null +++ b/components/security_apps/orchestration/health_check/health_check.cc @@ -0,0 +1,340 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "health_checker.h" + +#include +#include +#include +#include + +#include "config.h" +#include "log_generator.h" +#include "health_check_manager.h" + +using namespace std; +using namespace ReportIS; + +USE_DEBUG_FLAG(D_HEALTH_CHECK); + +class HealthChecker::Impl +{ +public: + void + init() + { + i_mainloop = Singleton::Consume::by(); + i_socket = Singleton::Consume::by(); + initConfig(); + initServerSocket(); + + registerConfigLoadCb( + [&]() + { + initConfig(); + initServerSocket(); + } + ); + } + + void + initServerSocket() + { + if (!enable) { + return; + } + + if (!checkInternalHealthCheckStatus()) { + reportError("Internal health check failed. Wait for restart."); + return; + } + + if (port == 0) { + string error_msg = + "Cannot initialize health check component, listening port was not provided. " + "Please provide valid port (>0)."; + reportError(error_msg); + return; + } + + if (server_sock == -1) { + i_mainloop->addOneTimeRoutine( + I_MainLoop::RoutineType::System, + [this] () { HandleProbeStartup(); }, + "Health check probe listener startup", + false + ); + } + } + + void + fini() + { + closeConnection(); + } + +private: + bool + checkInternalHealthCheckStatus() + { + dbgTrace(D_HEALTH_CHECK) << "Start agent general health check."; + + HealthCheckStatus status = + Singleton::Consume::by()->getAggregatedStatus(); + + dbgTrace(D_HEALTH_CHECK) + << "Finished agent general health check. Received aggregated status: " + << HealthCheckStatusReply::convertHealthCheckStatusToStr(status); + + return status != HealthCheckStatus::UNHEALTHY; + } + + void + reportError(const string &error_msg) + { + dbgWarning(D_HEALTH_CHECK) << error_msg; + LogGen( + error_msg, + Audience::SECURITY, + Severity::CRITICAL, + Priority::URGENT, + Tags::ORCHESTRATOR + ); + } + + void + closeConnection() + { + dbgDebug(D_HEALTH_CHECK) << "Closing connection"; + if (server_sock > 0) { + i_socket->closeSocket(server_sock); + server_sock = -1; + dbgDebug(D_HEALTH_CHECK) << "Server socket closed"; + } + + if (routine_id > 0 && i_mainloop->doesRoutineExist(routine_id)) { + i_mainloop->stop(routine_id); + routine_id = 0; + } + + for (auto socket_routine : client_sockets_routines) { + auto routine = socket_routine.first; + if (routine > 0 && i_mainloop->doesRoutineExist(routine)) { + i_mainloop->stop(routine); + } + auto socket = socket_routine.second; + + if (socket > 0) { + i_socket->closeSocket(socket); + } + } + client_sockets_routines.clear(); + } + + void + initCloudVendorConfig() + { + static const map> ip_port_defaults_map = { + {"Azure", make_pair("168.63.129.16", 8117)}, + {"Aws", make_pair("", 8117)} + }; + auto cloud_vendor_maybe = getSetting("reverseProxy", "cloudVendorName"); + if (cloud_vendor_maybe.ok()) { + const string cloud_vendor = cloud_vendor_maybe.unpack(); + auto value = ip_port_defaults_map.find(cloud_vendor); + if (value != ip_port_defaults_map.end()) { + const pair &ip_port_pair = value->second; + ip_address = ip_port_pair.first; + port = ip_port_pair.second; + enable = true; + } + } + + ip_address = getProfileAgentSettingWithDefault( + ip_address, + "agent.config.orchestration.healthCheckProbe.IP" + ); + port = getProfileAgentSettingWithDefault(port, "agent.config.orchestration.healthCheckProbe.port"); + enable = getProfileAgentSettingWithDefault(enable, "agent.config.orchestration.healthCheckProbe.enable"); + + ip_address = getConfigurationWithDefault(ip_address, "Health Check", "Probe IP"); + port = getConfigurationWithDefault(port, "Health Check", "Probe port"); + enable = getConfigurationWithDefault(enable, "Health Check", "Probe enabled"); + } + + void + initConfig() + { + auto prev_ip_address = ip_address; + auto prev_port = port; + + initCloudVendorConfig(); + + max_connections = getProfileAgentSettingWithDefault( + 10, + "agent.config.orchestration.healthCheckProbe.maximunConnections" + ); + max_connections = getConfigurationWithDefault( + max_connections, + "Health Check", + "Probe maximun open connections" + ); + + max_retry_interval = getProfileAgentSettingWithDefault( + 600, + "agent.config.orchestration.healthCheckProbe.socketReopenPeriod" + ); + max_retry_interval = getConfigurationWithDefault( + max_retry_interval, + "Health Check", + "Probe socket reopen period" + ); + if (!enable) { + if (server_sock != -1) closeConnection(); + return; + } + + if (prev_ip_address != ip_address || prev_port != port) { + if (server_sock != -1) closeConnection(); + } + } + + void + HandleProbeStartup() + { + size_t next_retry_interval = 1; + while (server_sock == -1) { + next_retry_interval = + next_retry_interval < max_retry_interval ? next_retry_interval*2 : max_retry_interval; + auto socket = i_socket->genSocket( + I_Socket::SocketType::TCP, + false, + true, + "0.0.0.0:" + to_string(port) + ); + if (socket.ok()) { + dbgInfo(D_HEALTH_CHECK) << "Successfully created probe listener." + << " port: " + << port; + server_sock = socket.unpack(); + } else { + dbgWarning(D_HEALTH_CHECK) + << "Failed to set up socket:" + << ", Error: " + << socket.getErr() + << ", trying again to set up socket in " + << next_retry_interval + << " seconds"; + i_mainloop->yield(chrono::seconds(next_retry_interval)); + } + } + routine_id = i_mainloop->addFileRoutine( + I_MainLoop::RoutineType::RealTime, + server_sock, + [this] () { handleConnection(); }, + "Health check probe server", + true + ); + } + + void + handleConnection() + { + if (open_connections_counter >= max_connections) { + dbgDebug(D_HEALTH_CHECK) + << "Cannot serve new client, reached maximun open connections bound which is:" + << open_connections_counter + << "maximun allowed: " + << max_connections; + return; + } + Maybe accepted_socket = i_socket->acceptSocket(server_sock, false, ip_address); + if (!accepted_socket.ok()) { + dbgWarning(D_HEALTH_CHECK) + << "Failed to accept a new client socket: " + << accepted_socket.getErr(); + return; + } + + auto new_client_socket = accepted_socket.unpack(); + if (new_client_socket <= 0) { + i_socket->closeSocket(new_client_socket); + dbgWarning(D_HEALTH_CHECK) + << "Failed to initialize communication, generated client socket is OK yet negative"; + return; + } + + dbgDebug(D_HEALTH_CHECK) << "Successfully accepted client, client fd: " << new_client_socket; + open_connections_counter++; + auto curr_routine = i_mainloop->addFileRoutine( + I_MainLoop::RoutineType::RealTime, + new_client_socket, + [this] () + { + auto curr_routine_id = i_mainloop->getCurrentRoutineId().unpack(); + auto curr_client_socket = client_sockets_routines[curr_routine_id]; + auto data_recieved = i_socket->receiveData(curr_client_socket, sizeof(uint8_t), false); + if (!data_recieved.ok()) { + dbgDebug(D_HEALTH_CHECK) << "Connection with client closed, client fd: " << curr_client_socket; + open_connections_counter--; + i_socket->closeSocket(curr_client_socket); + client_sockets_routines.erase(curr_routine_id); + i_mainloop->stop(); + } + }, + "Health check probe connection handler", + true + ); + client_sockets_routines[curr_routine] = new_client_socket; + } + + bool enable; + uint max_retry_interval; + unordered_map client_sockets_routines; + bool is_first_run = true; + uint open_connections_counter = 0; + uint max_connections = 0; + string ip_address = ""; + uint port = 0; + I_Socket::socketFd server_sock = -1; + I_MainLoop::RoutineID routine_id = 0; + I_MainLoop *i_mainloop = nullptr; + I_Socket *i_socket = nullptr; + I_Health_Check_Manager *i_health_check_manager = nullptr; +}; + +HealthChecker::HealthChecker() : Component("HealthChecker"), pimpl(make_unique()) {} +HealthChecker::~HealthChecker() {} + +void +HealthChecker::preload() +{ + registerExpectedConfiguration("Health Check", "Probe maximun open connections"); + registerExpectedConfiguration("Health Check", "Probe enabled"); + registerExpectedConfiguration("Health Check", "Probe IP"); + registerExpectedConfiguration("Health Check", "Probe port"); + registerExpectedConfiguration("Health Check", "Probe socket reopen period"); + registerExpectedSetting("reverseProxy", "cloudVendorName"); +} + +void +HealthChecker::init() +{ + pimpl->init(); +} + +void +HealthChecker::fini() +{ + pimpl->fini(); +} diff --git a/components/security_apps/orchestration/health_check/health_check_ut/CMakeLists.txt b/components/security_apps/orchestration/health_check/health_check_ut/CMakeLists.txt new file mode 100755 index 0000000..1a63a29 --- /dev/null +++ b/components/security_apps/orchestration/health_check/health_check_ut/CMakeLists.txt @@ -0,0 +1,7 @@ +link_directories(${BOOST_ROOT}/lib) + +add_unit_test( + health_check_ut + "health_check_ut.cc" + "health_check;mainloop;singleton;agent_details;config;logging;metric;event_is;health_check_manager;-lboost_regex" +) diff --git a/components/security_apps/orchestration/health_check/health_check_ut/health_check_ut.cc b/components/security_apps/orchestration/health_check/health_check_ut/health_check_ut.cc new file mode 100755 index 0000000..9a7c952 --- /dev/null +++ b/components/security_apps/orchestration/health_check/health_check_ut/health_check_ut.cc @@ -0,0 +1,260 @@ +#include "health_checker.h" + +#include "cptest.h" +#include "agent_details.h" +#include "mock/mock_logging.h" +#include "mock/mock_time_get.h" +#include "mock/mock_socket_is.h" +#include "mock/mock_mainloop.h" +#include "health_check_manager.h" + +#include "config.h" +#include "config_component.h" +#include "singleton.h" +#include "environment.h" + +using namespace std; +using namespace testing; + +USE_DEBUG_FLAG(D_HEALTH_CHECK); + +class HealthCheckerTest : public testing::Test +{ +public: + HealthCheckerTest() + { + setConfiguration(true, "Health Check", "Probe enabled"); + i_health_check_manager = Singleton::Consume::from(health_check_manager); + Debug::setUnitTestFlag(D_HEALTH_CHECK, Debug::DebugLevel::TRACE); + Debug::setNewDefaultStdout(&capture_debug); + } + + ~HealthCheckerTest() + { + Debug::setNewDefaultStdout(&cout); + + if (server_socket > 0) { + EXPECT_THAT(capture_debug.str(), HasSubstr("Server socket closed")); + EXPECT_CALL(mock_socket, closeSocket(server_socket)); + } + health_checker.fini(); + } + + ostringstream capture_debug; + StrictMock mock_mainloop; + NiceMock mock_time_get; + ::Environment env; + NiceMock mock_log; + AgentDetails agent_details; + StrictMock mock_socket; + I_Socket::socketFd server_socket = -1; + Context ctx; + ConfigComponent config; + HealthChecker health_checker; + I_MainLoop::Routine connection_handler_routine; + I_MainLoop::Routine client_connection_handler_routine; + I_MainLoop::Routine handle_probe_routine; + //StrictMock mock_health_check_manager; + HealthCheckManager health_check_manager; + I_Health_Check_Manager *i_health_check_manager; +}; + +TEST_F(HealthCheckerTest, empty) +{ +} + +TEST_F(HealthCheckerTest, load_policy) +{ + health_checker.preload(); + health_checker.init(); + + stringstream config; + config << "{}"; + EXPECT_TRUE(Singleton::Consume::from()->loadConfiguration(config)); +} + +TEST_F(HealthCheckerTest, clientConnection) +{ + string ip = "1.2.3.4"; + setConfiguration(ip, "Health Check", "Probe IP"); + uint port = 11600; + setConfiguration(port, "Health Check", "Probe port"); + + EXPECT_CALL( + mock_mainloop, + addOneTimeRoutine(I_MainLoop::RoutineType::System, _, _, false) + ).WillOnce(DoAll(SaveArg<1>(&handle_probe_routine), Return(0))); + + EXPECT_CALL( + mock_socket, + genSocket(I_Socket::SocketType::TCP, false, true, _) + ).WillRepeatedly(Return(1)); + + EXPECT_CALL( + mock_mainloop, + addFileRoutine(I_MainLoop::RoutineType::RealTime, _, _, _, true) + ).WillRepeatedly(DoAll(SaveArg<2>(&connection_handler_routine), Return(0))); + + int socket = 1; + EXPECT_CALL(mock_socket, acceptSocket(1, false, ip)).WillOnce(Return(socket)); + EXPECT_CALL(mock_mainloop, getCurrentRoutineId()).WillRepeatedly(Return(0)); + EXPECT_CALL(mock_socket, receiveData(_, 1, false)).WillOnce(Return(vector())); + EXPECT_CALL(mock_socket, closeSocket(socket)).Times(2); + health_checker.init(); + handle_probe_routine(); + connection_handler_routine(); + connection_handler_routine(); + health_checker.fini(); +} + +TEST_F(HealthCheckerTest, loadFromDynamicConfiguration) +{ + uint port = 11600; + + EXPECT_CALL( + mock_socket, + genSocket(I_Socket::SocketType::TCP, false, true, _) + ).WillRepeatedly(Return(1)); + + EXPECT_CALL( + mock_mainloop, + addFileRoutine(I_MainLoop::RoutineType::RealTime, _, _, _, true) + ).WillRepeatedly(DoAll(SaveArg<2>(&connection_handler_routine), Return(0))); + + health_checker.init(); + health_checker.preload(); + EXPECT_THAT( + capture_debug.str(), + HasSubstr( + "Cannot initialize health check component, " + "listening port was not provided. Please provide valid port (>0)." + ) + ); + + setConfiguration(string("1.2.3.4"), "Health Check", "Probe IP"); + setConfiguration(port, "Health Check", "Probe port"); +} + +TEST_F(HealthCheckerTest, connectionsLimit) +{ + string ip = "1.2.3.4"; + setConfiguration(ip, "Health Check", "Probe IP"); + uint port = 11600; + setConfiguration(port, "Health Check", "Probe port"); + uint a = 0; + setConfiguration(a, "Health Check", "Probe maximun open connections"); + + EXPECT_CALL( + mock_mainloop, + addOneTimeRoutine(I_MainLoop::RoutineType::System, _, _, false) + ).WillOnce(DoAll(SaveArg<1>(&handle_probe_routine), Return(0))); + + EXPECT_CALL( + mock_socket, + genSocket(I_Socket::SocketType::TCP, false, true, _) + ).WillRepeatedly(Return(1)); + + EXPECT_CALL( + mock_mainloop, + addFileRoutine(I_MainLoop::RoutineType::RealTime, _, _, _, true) + ).WillRepeatedly(DoAll(SaveArg<2>(&connection_handler_routine), Return(0))); + + EXPECT_CALL(mock_mainloop, doesRoutineExist(_)).WillRepeatedly(Return(false)); + EXPECT_CALL(mock_socket, acceptSocket(1, false, ip)).WillRepeatedly(Return(1)); + EXPECT_CALL(mock_socket, receiveData(_, 1, false)).WillRepeatedly(Return(vector())); + EXPECT_CALL(mock_socket, closeSocket(_)).WillRepeatedly(Return()); + health_checker.init(); + handle_probe_routine(); + connection_handler_routine(); + + EXPECT_THAT( + capture_debug.str(), HasSubstr("Cannot serve new client, reached maximun open connections") + ); +} + +TEST_F(HealthCheckerTest, disablingAfterEnabled) +{ + string ip = "1.2.3.4"; + setConfiguration(ip, "Health Check", "Probe IP"); + uint port = 11600; + setConfiguration(port, "Health Check", "Probe port"); + + EXPECT_CALL( + mock_mainloop, + addOneTimeRoutine(I_MainLoop::RoutineType::System, _, _, false) + ).WillOnce(DoAll(SaveArg<1>(&handle_probe_routine), Return(0))); + + EXPECT_CALL( + mock_socket, + genSocket(I_Socket::SocketType::TCP, false, true, _) + ).WillRepeatedly(Return(1)); + + EXPECT_CALL( + mock_mainloop, + addFileRoutine(I_MainLoop::RoutineType::RealTime, _, _, _, true) + ).WillRepeatedly(DoAll(SaveArg<2>(&connection_handler_routine), Return(0))); + + int socket = 1; + EXPECT_CALL(mock_socket, acceptSocket(1, false, ip)).WillOnce(Return(socket)); + EXPECT_CALL(mock_mainloop, getCurrentRoutineId()).WillRepeatedly(Return(0)); + EXPECT_CALL(mock_socket, receiveData(_, 1, false)).WillOnce(Return(vector())); + EXPECT_CALL(mock_socket, closeSocket(socket)).Times(2); + health_checker.init(); + handle_probe_routine(); + connection_handler_routine(); + connection_handler_routine(); + setConfiguration(false, "Health Check", "Probe enabled"); +} + +TEST_F(HealthCheckerTest, noPort) +{ + health_checker.init(); + health_checker.preload(); + + EXPECT_THAT( + capture_debug.str(), + HasSubstr( + "Cannot initialize health check component, " + "listening port was not provided. Please provide valid port (>0)." + ) + ); +} + +TEST_F(HealthCheckerTest, changePortIpConfig) +{ + string ip = "1.2.3.4"; + setConfiguration(ip, "Health Check", "Probe IP"); + uint port = 11600; + setConfiguration(port, "Health Check", "Probe port"); + + EXPECT_CALL( + mock_mainloop, + addOneTimeRoutine(I_MainLoop::RoutineType::System, _, _, false) + ).WillOnce(DoAll(SaveArg<1>(&handle_probe_routine), Return(0))); + + EXPECT_CALL( + mock_socket, + genSocket(I_Socket::SocketType::TCP, false, true, _) + ).WillRepeatedly(Return(1)); + + EXPECT_CALL( + mock_mainloop, + addFileRoutine(I_MainLoop::RoutineType::RealTime, _, _, _, true) + ).WillRepeatedly(DoAll(SaveArg<2>(&connection_handler_routine), Return(0))); + + int socket = 1; + EXPECT_CALL(mock_socket, acceptSocket(1, false, ip)).WillOnce(Return(socket)); + EXPECT_CALL(mock_mainloop, getCurrentRoutineId()).WillRepeatedly(Return(0)); + EXPECT_CALL(mock_socket, receiveData(_, 1, false)).Times(2).WillRepeatedly(Return(vector())); + EXPECT_CALL(mock_socket, closeSocket(socket)).Times(2); + health_checker.init(); + handle_probe_routine(); + connection_handler_routine(); + connection_handler_routine(); + setConfiguration(false, "Health Check", "Probe enabled"); + string new_ip = "1.1.1.1"; + setConfiguration(new_ip, "Health Check", "Probe IP"); + uint new_port = 11111; + setConfiguration(new_port, "Health Check", "Probe port"); + connection_handler_routine(); +} diff --git a/components/security_apps/orchestration/hybrid_mode_telemetry.cc b/components/security_apps/orchestration/hybrid_mode_telemetry.cc new file mode 100755 index 0000000..52cc834 --- /dev/null +++ b/components/security_apps/orchestration/hybrid_mode_telemetry.cc @@ -0,0 +1,59 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "hybrid_mode_telemetry.h" +#include "debug.h" +#include "orchestration_comp.h" +#include "i_shell_cmd.h" +#include + +using namespace std; + +USE_DEBUG_FLAG(D_ORCHESTRATOR); + +static inline string & +trim(string &in) +{ + in.erase(in.begin(), find_if(in.begin(), in.end(), not1(ptr_fun(isspace)))); + in.erase(find_if(in.rbegin(), in.rend(), not1(ptr_fun(isspace))).base(), in.end()); + return in; +} + +void +HybridModeMetric::upon(const HybridModeMetricEvent &) +{ + auto shell_cmd = Singleton::Consume::by(); + auto maybe_cmd_output = shell_cmd->getExecOutput( + getFilesystemPathConfig() + "/watchdog/cp-nano-watchdog --restart_count" + ); + + // get wd process restart count + if (!maybe_cmd_output.ok()) { + dbgWarning(D_ORCHESTRATOR) + << "Watchdog was unable to provide the process restart count. Error: " + << maybe_cmd_output.getErr(); + return; + } + string cmd_output = maybe_cmd_output.unpack(); + trim(cmd_output); + dbgDebug(D_ORCHESTRATOR) << "Watchdog process counter: " << cmd_output; + + try { + wd_process_restart.report(stoi(cmd_output)); + dbgDebug(D_ORCHESTRATOR) << "Succesfully reported Watchdog process counter: " << cmd_output; + } catch (invalid_argument &) { + dbgWarning(D_ORCHESTRATOR) << "counter value is not a number: " << cmd_output; + } catch (...) { + dbgWarning(D_ORCHESTRATOR) << "Reporting counter value failed with unexpected error"; + } +} diff --git a/components/security_apps/orchestration/include/fog_authenticator.h b/components/security_apps/orchestration/include/fog_authenticator.h new file mode 100755 index 0000000..d07c224 --- /dev/null +++ b/components/security_apps/orchestration/include/fog_authenticator.h @@ -0,0 +1,301 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __FOG_AUTHENTICATOR_H__ +#define __FOG_AUTHENTICATOR_H__ + +#include +#include +#include +#include +#include +#include +#include "cereal/archives/json.hpp" + +#include "i_update_communication.h" +#include "i_orchestration_tools.h" +#include "i_agent_details.h" +#include "i_orchestration_status.h" +#include "i_messaging.h" +#include "i_mainloop.h" +#include "i_encryptor.h" +#include "i_details_resolver.h" +#include "i_rest_api.h" +#include "i_time_get.h" +#include "i_encryptor.h" +#include "maybe_res.h" + +class FogAuthenticator + : + public I_UpdateCommunication, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume +{ + class AccessToken + { + public: + AccessToken(const std::string &token, std::chrono::seconds expiration); + + std::chrono::seconds getRemainingTime() const; + + const std::string & getToken() const { return token; } + uint getExpiration() const { return expiration.count(); } + + private: + std::string token; + std::chrono::seconds expiration; + std::chrono::microseconds received_time; + }; + + class AccessTokenProvider : public ServerRest + { + public: + void doCall() override; + static std::function()> getAccessToken; + + private: + S2C_PARAM(std::string, token); + S2C_PARAM(uint, expiration); + }; + +public: + class RegistrationData + { + enum class AuthenticationType { Token, PresharedSecret, COUNT }; + + public: + RegistrationData() = default; + RegistrationData(const RegistrationData &) = default; + RegistrationData(const std::string &_env_token); + + void serialize(cereal::JSONOutputArchive &out_ar) const; + void serialize(cereal::JSONInputArchive &in_ar); + + private: + AuthenticationType type; + std::string data; + }; + + FogAuthenticator() = default; + ~FogAuthenticator() = default; + + virtual void init(); + + static void preload(); + + Maybe authenticateAgent() override; + void setAddressExtenesion(const std::string &extension) override; + +protected: + class UserCredentials + { + public: + UserCredentials() = default; + UserCredentials(const std::string &client_id, const std::string &shared_secret); + + std::string getClientId() const { return client_id; } + std::string getSharedSecret() const { return shared_secret; } + + void serialize(cereal::JSONOutputArchive &out_ar) const; + void serialize(cereal::JSONInputArchive &in_ar); + + private: + std::string client_id; + std::string shared_secret; + }; + + void loadRequiredSecurityApps(); + Maybe getAccessToken(const UserCredentials &credentials) const; + Maybe + registerAgent( + const RegistrationData ®_data, + const std::string &name, + const std::string &type, + const std::string &platform, + const std::string &architecture + ) const; + + void initRestAPI(); + Maybe getCredentials(); + + bool saveCredentialsToFile(const UserCredentials &credentials) const; + Maybe getCredentialsFromFile() const; + Maybe getRegistrationData(); + + std::string base64Encode(const std::string &in) const; + std::string buildBasicAuthHeader(const std::string &username, const std::string &pass) const; + std::string buildOAuth2Header(const std::string &token) const; + + // This apps which the orchestrations requires them from Fog. + std::vector required_security_apps; + std::string fog_address_ex = ""; + std::string filesystem_prefix = ""; + std::string otp = ""; + Maybe credentials = genError("User credentials are empty"); + Maybe access_token = genError("Access token was not received yet"); + Maybe reg_data = genError("Registration data is empty"); + I_MainLoop::RoutineID routine = 0; +}; + +class AdditionalMetaData +{ +public: + AdditionalMetaData() = default; + AdditionalMetaData(const AdditionalMetaData &) = default; + + AdditionalMetaData & + operator<<(const std::pair &data) + { + additional_data.insert(data); + return *this; + } + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + for (auto &data : additional_data) { + out_ar(cereal::make_nvp(data.first, data.second)); + } + } + +private: + std::map additional_data; +}; + +class RegistrationRequest : public ClientRest +{ +private: + class MetaData + { + public: + MetaData() = default; + MetaData( + const std::string &_name, + const std::string &_type, + const std::string &_platform, + const std::string &_architecture, + const std::string &_agent_version) + : + name(_name), + type(_type), + platform(_platform), + architecture(_architecture), + agent_version(_agent_version) + { + } + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("agentName", name), + cereal::make_nvp("agentType", type), + cereal::make_nvp("platform", platform), + cereal::make_nvp("architecture", architecture), + cereal::make_nvp("agentVersion", agent_version), + cereal::make_nvp("additionalMetaData", additional_metadata) + ); + } + + AdditionalMetaData & + operator<<(const std::pair &data) + { + return additional_metadata << data; + } + + private: + std::string name; + std::string type; + std::string platform; + std::string architecture; + std::string agent_version; + AdditionalMetaData additional_metadata; + }; + +public: + RegistrationRequest( + const FogAuthenticator::RegistrationData ®_data, + const std::string &name, + const std::string &type, + const std::string &platform, + const std::string &architecture, + const std::string &agent_version) + : + authenticationData({ reg_data }), + metaData(MetaData(name, type, platform, architecture, agent_version)) + { + } + + AdditionalMetaData & + operator<<(const std::pair &data) + { + return metaData.get() << data; + } + + std::string getClientId() const { return client_id; } + std::string getSharedSecret() const { return shared_secret; } + std::string getAgentId() const { return agentId; } + std::string getProfileId() const { return profileId; } + std::string getTenantId() const { return tenantId; } + +private: + C2S_PARAM(std::vector, authenticationData); + C2S_PARAM(MetaData, metaData); + + S2C_PARAM(std::string, client_id); + S2C_PARAM(std::string, shared_secret); + S2C_PARAM(std::string, tenantId); + S2C_PARAM(std::string, profileId); + S2C_PARAM(std::string, agentId); +}; + +class PolicyVersionPatchRequest : public ClientRest +{ +public: + PolicyVersionPatchRequest(const std::string &_policy_version) + : + policy_version(_policy_version) + { + } + +private: + C2S_LABEL_PARAM(std::string, policy_version, "policyVersion"); +}; + +class TokenRequest : public ClientRest +{ +public: + std::string getAccessToken() const { return access_token; } + std::string getTokenType() const { return token_type; } + std::string getUserId() const { return user_id; } + std::string getScope() const { return scope; } + std::string getJTI() const { return jti; } + int getExpirationTime() const { return expires_in; } + +private: + S2C_PARAM(int, expires_in); + S2C_PARAM(std::string, jti); + S2C_PARAM(std::string, scope); + S2C_PARAM(std::string, token_type); + S2C_PARAM(std::string, access_token); + S2C_LABEL_PARAM(std::string, user_id, "uuid"); +}; + +#endif // __FOG_AUTHENTICATOR_H__ diff --git a/components/security_apps/orchestration/include/fog_communication.h b/components/security_apps/orchestration/include/fog_communication.h new file mode 100755 index 0000000..b93dac2 --- /dev/null +++ b/components/security_apps/orchestration/include/fog_communication.h @@ -0,0 +1,45 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __FOG_COMMUNICATION_H__ +#define __FOG_COMMUNICATION_H__ + +#include +#include +#include +#include +#include "cereal/archives/json.hpp" + +#include "i_update_communication.h" +#include "fog_authenticator.h" +#include "i_orchestration_tools.h" +#include "i_agent_details.h" +#include "i_orchestration_status.h" +#include "i_messaging.h" +#include "i_mainloop.h" +#include "i_encryptor.h" +#include "i_details_resolver.h" +#include "i_rest_api.h" +#include "i_time_get.h" +#include "i_encryptor.h" +#include "maybe_res.h" + +class FogCommunication : public FogAuthenticator +{ +public: + Maybe getUpdate(CheckUpdateRequest &request) override; + Maybe downloadAttributeFile(const GetResourceFile &resourse_file) override; + Maybe sendPolicyVersion(const std::string &policy_version) const override; +}; + +#endif // __FOG_COMMUNICATION_H__ diff --git a/components/security_apps/orchestration/include/get_status_rest.h b/components/security_apps/orchestration/include/get_status_rest.h new file mode 100755 index 0000000..ba3978d --- /dev/null +++ b/components/security_apps/orchestration/include/get_status_rest.h @@ -0,0 +1,91 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __GET_STATUS_RES_H__ +#define __GET_STATUS_RES_H__ + +#include "i_messaging_downloader.h" +#include "i_messaging.h" +#include "i_mainloop.h" +#include "i_shell_cmd.h" +#include "i_encryptor.h" +#include "i_orchestration_status.h" +#include "i_rest_api.h" +#include "i_orchestration_tools.h" +#include "i_downloader.h" +#include "i_service_controller.h" +#include "i_manifest_controller.h" +#include "i_update_communication.h" +#include "i_details_resolver.h" +#include "i_shell_cmd.h" +#include "i_agent_details.h" +#include "i_environment.h" +#include "i_tenant_manager.h" +#include "i_package_handler.h" +#include "component.h" + +class getStatusRest : public ServerRest +{ +public: + void + doCall() override + { + auto i_orch_status = Singleton::Consume::by(); + + policies = ""; + settings = ""; + for (auto &policy: i_orch_status->getServicePolicies()) { + policies = policies.get() + "\n " + policy.first + ": " + policy.second; + } + for (auto &setting: i_orch_status->getServiceSettings()) { + settings = settings.get() + "\n " + setting.first + ": " + setting.second; + } + + last_update_attempt = i_orch_status->getLastUpdateAttempt(); + last_update = i_orch_status->getUpdateTime(); + last_update_status = i_orch_status->getUpdateStatus(); + policy_version = i_orch_status->getPolicyVersion(); + last_policy_update = i_orch_status->getLastPolicyUpdate(); + last_manifest_update = i_orch_status->getLastManifestUpdate(); + last_settings_update = i_orch_status->getLastSettingsUpdate(); + registration_status = i_orch_status->getRegistrationStatus(); + manifest_status = i_orch_status->getManifestStatus(); + upgrade_mode = i_orch_status->getUpgradeMode(); + fog_address = i_orch_status->getFogAddress(); + agent_id = i_orch_status->getAgentId(); + profile_id = i_orch_status->getProfileId(); + tenant_id = i_orch_status->getTenantId(); + registration_details = i_orch_status->getRegistrationDetails(); + } + +private: + S2C_LABEL_PARAM(std::string, last_update_attempt, "Last update attempt"); + S2C_LABEL_PARAM(std::string, last_update, "Last update"); + S2C_LABEL_PARAM(std::string, last_update_status, "Last update status"); + S2C_LABEL_PARAM(std::string, policy_version, "Policy version"); + S2C_LABEL_PARAM(std::string, last_policy_update, "Last policy update"); + S2C_LABEL_PARAM(std::string, last_manifest_update, "Last manifest update"); + S2C_LABEL_PARAM(std::string, last_settings_update, "Last settings update"); + S2C_LABEL_PARAM(std::string, registration_status, "Registration status"); + S2C_LABEL_PARAM(std::string, manifest_status, "Manifest status"); + S2C_LABEL_PARAM(std::string, upgrade_mode, "Upgrade mode"); + S2C_LABEL_PARAM(std::string, fog_address, "Fog address"); + S2C_LABEL_PARAM(std::string, agent_id, "Agent ID"); + S2C_LABEL_PARAM(std::string, profile_id, "Profile ID"); + S2C_LABEL_PARAM(std::string, tenant_id, "Tenant ID"); + S2C_LABEL_PARAM(std::string, registration_details, "Registration details"); + S2C_LABEL_PARAM(std::string, policies, "Service policy"); + S2C_LABEL_PARAM(std::string, settings, "Service settings"); +}; + +#endif // __GET_STATUS_RES_H__ diff --git a/components/security_apps/orchestration/include/hybrid_communication.h b/components/security_apps/orchestration/include/hybrid_communication.h new file mode 100755 index 0000000..9c504aa --- /dev/null +++ b/components/security_apps/orchestration/include/hybrid_communication.h @@ -0,0 +1,58 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __HYBRID_COMMUNICATION_H__ +#define __HYBRID_COMMUNICATION_H__ + +#include +#include +#include +#include +#include "cereal/archives/json.hpp" + +#include "singleton.h" +#include "i_update_communication.h" +#include "fog_authenticator.h" +#include "i_k8s_policy_gen.h" +#include "i_orchestration_tools.h" +#include "i_agent_details.h" +#include "i_orchestration_status.h" +#include "i_messaging.h" +#include "i_mainloop.h" +#include "i_encryptor.h" +#include "i_details_resolver.h" +#include "i_rest_api.h" +#include "i_time_get.h" +#include "i_encryptor.h" +#include "maybe_res.h" + +class HybridCommunication + : + public FogAuthenticator, + Singleton::Consume +{ +public: + virtual void init() override; + Maybe getUpdate(CheckUpdateRequest &request) override; + Maybe downloadAttributeFile(const GetResourceFile &resourse_file) override; + Maybe sendPolicyVersion(const std::string &policy_version) const override; + std::string getChecksum(const std::string &policy_version); + +private: + Maybe getNewVersion(); + + std::string curr_version; + std::string curr_policy; +}; + +#endif // __HYBRID_COMMUNICATION_H__ diff --git a/components/security_apps/orchestration/include/local_communication.h b/components/security_apps/orchestration/include/local_communication.h new file mode 100755 index 0000000..d7f625a --- /dev/null +++ b/components/security_apps/orchestration/include/local_communication.h @@ -0,0 +1,43 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __LOCAL_COMMUNICATION_H__ +#define __LOCAL_COMMUNICATION_H__ + +#include "i_update_communication.h" +#include "i_orchestration_tools.h" +#include "maybe_res.h" + +class LocalCommunication + : + public I_UpdateCommunication, + Singleton::Consume +{ +public: + static void preload(); + + void init(); + + Maybe authenticateAgent() override; + Maybe getUpdate(CheckUpdateRequest &request) override; + + Maybe downloadAttributeFile(const GetResourceFile &resourse_file) override; + void setAddressExtenesion(const std::string &extension) override; + Maybe sendPolicyVersion(const std::string &policy_version) const override; + +private: + std::string getChecksum(const std::string &file_path); + std::string filesystem_prefix = ""; +}; + +#endif // __LOCAL_COMMUNICATION_H__ diff --git a/components/security_apps/orchestration/include/mock/mock_details_resolver.h b/components/security_apps/orchestration/include/mock/mock_details_resolver.h new file mode 100644 index 0000000..cc0cfd5 --- /dev/null +++ b/components/security_apps/orchestration/include/mock/mock_details_resolver.h @@ -0,0 +1,46 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __MOCK_DETAILS_RESOLVER_H__ +#define __MOCK_DETAILS_RESOLVER_H__ + +#include + +#include "i_details_resolver.h" +#include "cptest.h" +#include "maybe_res.h" + +std::ostream & +operator<<(std::ostream &os, const Maybe> &) +{ + return os; +} + +class MockDetailsResolver + : + public Singleton::Provide::From> +{ +public: + MOCK_METHOD0(getHostname, Maybe()); + MOCK_METHOD0(getPlatform, Maybe()); + MOCK_METHOD0(getArch, Maybe()); + MOCK_METHOD0(getAgentVersion, std::string()); + MOCK_METHOD0(isReverseProxy, bool()); + MOCK_METHOD0(isKernelVersion3OrHigher, bool()); + MOCK_METHOD0(isGwNotVsx, bool()); + MOCK_METHOD0(getResolvedDetails, std::map()); + MOCK_METHOD0(isVersionEqualOrAboveR8110, bool()); + MOCK_METHOD0(parseNginxMetadata, Maybe>()); +}; + +#endif // __MOCK_DETAILS_RESOLVER_H__ diff --git a/components/security_apps/orchestration/include/mock/mock_downloader.h b/components/security_apps/orchestration/include/mock/mock_downloader.h new file mode 100755 index 0000000..8e7479b --- /dev/null +++ b/components/security_apps/orchestration/include/mock/mock_downloader.h @@ -0,0 +1,42 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __MOCK_DOWNLOADER_H__ +#define __MOCK_DOWNLOADER_H__ + +#include "cptest.h" +#include "i_downloader.h" + +#include + +class MockDownloader : + public Singleton::Provide::From> +{ +public: + MOCK_CONST_METHOD3( + downloadFileFromFog, + Maybe(const std::string &, Package::ChecksumTypes, const GetResourceFile &) + ); + + MOCK_CONST_METHOD2( + downloadVirtualFileFromFog, + Maybe>(const GetResourceFile &, Package::ChecksumTypes) + ); + + MOCK_CONST_METHOD4( + downloadFileFromURL, + Maybe(const std::string &, const std::string &, Package::ChecksumTypes, const std::string &) + ); +}; + +#endif // __MOCK_DOWNLOADER_H__ diff --git a/components/security_apps/orchestration/include/mock/mock_manifest_controller.h b/components/security_apps/orchestration/include/mock/mock_manifest_controller.h new file mode 100755 index 0000000..ea147ad --- /dev/null +++ b/components/security_apps/orchestration/include/mock/mock_manifest_controller.h @@ -0,0 +1,28 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __MOCK_MANIFEST_CONTROLLER_H__ +#define __MOCK_MANIFEST_CONTROLLER_H__ + +#include "i_manifest_controller.h" +#include "cptest.h" + +class MockManifestController : + public Singleton::Provide::From> +{ +public: + MOCK_METHOD1(updateManifest, bool(const std::string &)); + MOCK_METHOD0(loadAfterSelfUpdate, bool()); +}; + +#endif // __MOCK_MANIFEST_CONTROLLER_H__ diff --git a/components/security_apps/orchestration/include/mock/mock_messaging_downloader.h b/components/security_apps/orchestration/include/mock/mock_messaging_downloader.h new file mode 100755 index 0000000..61d405a --- /dev/null +++ b/components/security_apps/orchestration/include/mock/mock_messaging_downloader.h @@ -0,0 +1,39 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __MOCK_MESSAGING_DOWNLOADER_H__ +#define __MOCK_MESSAGING_DOWNLOADER_H__ + +#include "cptest.h" +#include + +#include "i_messaging_downloader.h" + +class MockMessagingDownloader + : + public Singleton::Provide::From> +{ +public: + MOCK_METHOD4( + downloadFile, + bool( + const std::string &, + const std::string &, + OnCompleteCB, + const unsigned int + ) + ); +}; + + +#endif // __MOCK_MESSAGING_DOWNLOADER_H__ diff --git a/components/security_apps/orchestration/include/mock/mock_orchestration_status.h b/components/security_apps/orchestration/include/mock/mock_orchestration_status.h new file mode 100644 index 0000000..740cfa5 --- /dev/null +++ b/components/security_apps/orchestration/include/mock/mock_orchestration_status.h @@ -0,0 +1,63 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __MOCK_ORCHESTRATION_STATUS_H__ +#define __MOCK_ORCHESTRATION_STATUS_H__ + +#include "i_orchestration_status.h" +#include "cptest.h" + +class MockOrchestrationStatus + : + public Singleton::Provide::From> +{ +public: + MOCK_METHOD0(writeStatusToFile, void()); + MOCK_METHOD0(recoverFields, void()); + MOCK_METHOD1(setUpgradeMode, void(const std::string &)); + MOCK_METHOD1(setAgentType, void(const std::string &)); + MOCK_METHOD1(setRegistrationStatus, void(const std::string &)); + MOCK_METHOD1(setFogAddress, void(const std::string &)); + MOCK_METHOD1(setPolicyVersion, void(const std::string &)); + MOCK_METHOD1(setIsConfigurationUpdated, void(EnumArray config_types)); + MOCK_METHOD0(setLastUpdateAttempt, void()); + MOCK_METHOD3(setAgentDetails, void(const std::string &, const std::string &, const std::string &)); + MOCK_METHOD3(setFieldStatus, + void(const OrchestrationStatusFieldType &, const OrchestrationStatusResult &, const std::string &)); + MOCK_METHOD4(setRegistrationDetails, + void(const std::string &, const std::string &, const std::string &, const std::string &) + ); + MOCK_METHOD3(setServiceConfiguration, + void(const std::string &, const std::string &, const OrchestrationStatusConfigType &) + ); + MOCK_CONST_METHOD0(getLastUpdateAttempt, const std::string&()); + MOCK_CONST_METHOD0(getUpdateStatus, const std::string&()); + MOCK_CONST_METHOD0(getUpdateTime, const std::string&()); + MOCK_CONST_METHOD0(getLastManifestUpdate, const std::string&()); + MOCK_CONST_METHOD0(getPolicyVersion, const std::string&()); + MOCK_CONST_METHOD0(getLastPolicyUpdate, const std::string&()); + MOCK_CONST_METHOD0(getLastSettingsUpdate, const std::string&()); + MOCK_CONST_METHOD0(getUpgradeMode, const std::string&()); + MOCK_CONST_METHOD0(getFogAddress, const std::string&()); + MOCK_CONST_METHOD0(getRegistrationStatus, const std::string&()); + MOCK_CONST_METHOD0(getAgentId, const std::string&()); + MOCK_CONST_METHOD0(getProfileId, const std::string&()); + MOCK_CONST_METHOD0(getTenantId, const std::string&()); + MOCK_CONST_METHOD0(getManifestStatus, const std::string&()); + MOCK_CONST_METHOD0(getManifestError, const std::string&()); + MOCK_CONST_METHOD0(getServicePolicies, const std::map&()); + MOCK_CONST_METHOD0(getServiceSettings, const std::map&()); + MOCK_CONST_METHOD0(getRegistrationDetails, const std::string()); +}; + +#endif // __MOCK_ORCHESTRATION_STATUS_H__ diff --git a/components/security_apps/orchestration/include/mock/mock_orchestration_tools.h b/components/security_apps/orchestration/include/mock/mock_orchestration_tools.h new file mode 100755 index 0000000..3738aae --- /dev/null +++ b/components/security_apps/orchestration/include/mock/mock_orchestration_tools.h @@ -0,0 +1,58 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __MOCK_ORCHESTRATION_TOOLS_H__ +#define __MOCK_ORCHESTRATION_TOOLS_H__ + +#include "cptest.h" +#include "i_orchestration_tools.h" + +template +std::ostream & +operator<<(std::ostream &os, const std::vector &) +{ + return os; +} + +template +std::ostream & +operator<<(std::ostream &os, const std::map &) +{ + return os; +} + +class MockOrchestrationTools + : + public Singleton::Provide::From> +{ +public: + MOCK_CONST_METHOD1(loadPackagesFromJson, Maybe>(const std::string &)); + MOCK_CONST_METHOD2(packagesToJsonFile, bool(const std::map &, const std::string &)); + MOCK_CONST_METHOD1(isNonEmptyFile, bool(const std::string &)); + MOCK_CONST_METHOD1(readFile, Maybe(const std::string &)); + MOCK_CONST_METHOD2(writeFile, bool(const std::string &, const std::string &)); + MOCK_CONST_METHOD1(removeFile, bool(const std::string &)); + MOCK_CONST_METHOD2(copyFile, bool(const std::string &, const std::string &)); + MOCK_CONST_METHOD2(calculateChecksum, Maybe(Package::ChecksumTypes, const std::string &)); + MOCK_CONST_METHOD2( + jsonObjectSplitter, + Maybe>(const std::string &, const std::string &) + ); + MOCK_CONST_METHOD1(doesFileExist, bool(const std::string &)); + MOCK_CONST_METHOD1(createDirectory, bool(const std::string &)); + MOCK_CONST_METHOD1(doesDirectoryExist, bool(const std::string &)); + MOCK_CONST_METHOD1(executeCmd, bool(const std::string &)); + MOCK_CONST_METHOD1(base64Encode, std::string(const std::string &)); + MOCK_CONST_METHOD1(base64Decode, std::string(const std::string &)); +}; +#endif // __MOCK_ORCHESTRATION_TOOLS_H__ diff --git a/components/security_apps/orchestration/include/mock/mock_package_handler.h b/components/security_apps/orchestration/include/mock/mock_package_handler.h new file mode 100755 index 0000000..e47dc3a --- /dev/null +++ b/components/security_apps/orchestration/include/mock/mock_package_handler.h @@ -0,0 +1,31 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __MOCK_PACKAGE_HANDLER_H__ +#define __MOCK_PACKAGE_HANDLER_H__ + +#include "i_package_handler.h" +#include "cptest.h" + +class MockPackageHandler : + public Singleton::Provide::From> +{ +public: + MOCK_CONST_METHOD3(installPackage, bool(const std::string &, const std::string &, bool)); + MOCK_CONST_METHOD3(uninstallPackage, bool(const std::string &, const std::string &, const std::string &)); + MOCK_CONST_METHOD2(preInstallPackage, bool(const std::string &, const std::string &)); + MOCK_CONST_METHOD2(postInstallPackage, bool(const std::string &, const std::string &)); + MOCK_CONST_METHOD2(updateSavedPackage, bool(const std::string &, const std::string &)); + MOCK_CONST_METHOD2(shouldInstallPackage, bool(const std::string &, const std::string &)); +}; +#endif // __MOCK_PACKAGE_HANDLER_H__ diff --git a/components/security_apps/orchestration/include/mock/mock_service_controller.h b/components/security_apps/orchestration/include/mock/mock_service_controller.h new file mode 100755 index 0000000..d67e384 --- /dev/null +++ b/components/security_apps/orchestration/include/mock/mock_service_controller.h @@ -0,0 +1,62 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __MOCK_SERVICE_CONTROLLER_H__ +#define __MOCK_SERVICE_CONTROLLER_H__ + +#include "i_service_controller.h" + +#include "cptest.h" +#include + +class MockServiceController : + public Singleton::Provide::From> + +{ +public: + MOCK_CONST_METHOD0(getPolicyVersion, const std::string &()); + + MOCK_CONST_METHOD0(getUpdatePolicyVersion, const std::string &()); + + MOCK_METHOD4( + updateServiceConfiguration, + bool( + const std::string &new_policy_path, + const std::string &new_settings_path, + const std::vector &new_data_files, + const std::string &tenant_id + ) + ); + + MOCK_METHOD1(isServiceInstalled, bool(const std::string &service_name)); + + MOCK_METHOD4( + registerServiceConfig, + void( + const std::string &service_name, + PortNumber listening_port, + const std::vector &expected_configurations, + const std::string &id + ) + ); + + typedef std::map ServicePortMap; + MOCK_METHOD0(getServiceToPortMap, ServicePortMap()); + MOCK_METHOD2(updateReconfStatus, void(int id, ReconfStatus status)); + MOCK_METHOD4( + startReconfStatus, + void(int id, ReconfStatus status, const std::string &serivce_name, const std::string &service_id) + ); +}; + +#endif // __MOCK_SERVICE_CONTROLLER_H__ diff --git a/components/security_apps/orchestration/include/mock/mock_update_communication.h b/components/security_apps/orchestration/include/mock/mock_update_communication.h new file mode 100755 index 0000000..3557c70 --- /dev/null +++ b/components/security_apps/orchestration/include/mock/mock_update_communication.h @@ -0,0 +1,37 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __MOCK_UPDATE_COMMUNICATION_H__ +#define __MOCK_UPDATE_COMMUNICATION_H__ + +#include "i_update_communication.h" +#include "cptest.h" + +std::ostream & +operator<<(std::ostream &os, const CheckUpdateRequest &) +{ + return os; +} + +class MockUpdateCommunication : + public Singleton::Provide::From> +{ +public: + MOCK_METHOD0(authenticateAgent, Maybe()); + MOCK_METHOD1(getUpdate, Maybe(CheckUpdateRequest &)); + MOCK_METHOD1(downloadAttributeFile, Maybe(const GetResourceFile &)); + MOCK_METHOD1(setAddressExtenesion, void(const std::string &)); + MOCK_CONST_METHOD1(sendPolicyVersion, Maybe(const std::string &)); +}; + +#endif // __MOCK_UPDATE_COMMUNICATION_H__ diff --git a/components/security_apps/orchestration/include/orchestration_policy.h b/components/security_apps/orchestration/include/orchestration_policy.h new file mode 100755 index 0000000..581cfaf --- /dev/null +++ b/components/security_apps/orchestration/include/orchestration_policy.h @@ -0,0 +1,38 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __ORCHESTRATION_POLICY_H__ +#define __ORCHESTRATION_POLICY_H__ + +#include +#include "cereal/archives/json.hpp" + +class OrchestrationPolicy +{ +public: + const std::string & getFogAddress() const; + const unsigned long & getSleepInterval() const; + const unsigned long & getErrorSleepInterval() const; + + void serialize(cereal::JSONInputArchive & archive); + + bool operator==(const OrchestrationPolicy &other) const; + bool operator!=(const OrchestrationPolicy &other) const; + +private: + std::string fog_address; + unsigned long sleep_interval; + unsigned long error_sleep_interval; +}; + +#endif // __ORCHESTRATION_POLICY_H__ diff --git a/components/security_apps/orchestration/k8s_policy_gen/CMakeLists.txt b/components/security_apps/orchestration/k8s_policy_gen/CMakeLists.txt new file mode 100644 index 0000000..63c8a99 --- /dev/null +++ b/components/security_apps/orchestration/k8s_policy_gen/CMakeLists.txt @@ -0,0 +1 @@ +add_library(k8s_policy_gen k8s_policy_gen.cc) diff --git a/components/security_apps/orchestration/k8s_policy_gen/include/appsec_practice_section.h b/components/security_apps/orchestration/k8s_policy_gen/include/appsec_practice_section.h new file mode 100644 index 0000000..5174ead --- /dev/null +++ b/components/security_apps/orchestration/k8s_policy_gen/include/appsec_practice_section.h @@ -0,0 +1,768 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __APPSEC_PRACTICE_SECTION_H__ +#define __APPSEC_PRACTICE_SECTION_H__ + +#include +#include +#include +#include + +#include "config.h" +#include "debug.h" +#include "customized_cereal_map.h" +#include "k8s_policy_common.h" +#include "triggers_section.h" +#include "trusted_sources_section.h" + +USE_DEBUG_FLAG(D_K8S_POLICY); + +class AppSecWebBotsURI +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading AppSec Web Bots URI"; + parseAppsecJSONKey("uri", uri, archive_in); + } + + const std::string & getURI() const { return uri; } + +private: + std::string uri; +}; + +std::ostream & +operator<<(std::ostream &os, const AppSecWebBotsURI &obj) +{ + os << obj.getURI(); + return os; +} + +std::ostream & +operator<<(std::ostream &os, const std::vector &obj) +{ + os << "[" << std::endl; + makeSeparatedStr(obj, ","); + os << std::endl << "]"; + return os; +} + +class AppSecPracticeAntiBot +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading AppSec Web Bots"; + parseAppsecJSONKey>("injected-URIs", injected_uris, archive_in); + parseAppsecJSONKey>("validated-URIs", validated_uris, archive_in); + parseAppsecJSONKey("override-mode", override_mode, archive_in, "Inactive"); + } + + void + save(cereal::JSONOutputArchive &out_ar) const + { + std::vector injected; + std::vector validated; + for (const AppSecWebBotsURI &uri : getInjectedURIs()) injected.push_back(uri.getURI()); + for (const AppSecWebBotsURI &uri : getValidatedURIs()) injected.push_back(uri.getURI()); + out_ar( + cereal::make_nvp("injected", injected), + cereal::make_nvp("validated", validated) + ); + } + + const std::vector & getInjectedURIs() const { return injected_uris; } + const std::vector & getValidatedURIs() const { return validated_uris; } + const std::string & getOverrideMode() const { return override_mode; } + +private: + std::string override_mode; + std::vector injected_uris; + std::vector validated_uris; +}; + +std::ostream & +operator<<(std::ostream &os, const AppSecPracticeAntiBot &obj) +{ + os + << "injected-URIs: " + << obj.getInjectedURIs() + << " validated-URIs: " + << obj.getValidatedURIs() + << ", override_mode: " + << obj.getOverrideMode(); + return os; +} + +class AppSecWebAttackProtections +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading AppSec Web Attack Protections"; + parseAppsecJSONKey("csrf-protection", csrf_protection, archive_in, "Inactive"); + parseAppsecJSONKey("error-disclosure", error_disclosure, archive_in, "Inactive"); + parseAppsecJSONKey("open-redirect", open_redirect, archive_in, "Inactive"); + parseAppsecJSONKey("non-valid-http-methods", non_valid_http_methods, archive_in, false); + } + + const std::string + getCsrfProtectionMode() const + { + if (key_to_practices_val.find(csrf_protection) == key_to_practices_val.end()) { + dbgError(D_K8S_POLICY) + << "Failed to find a value for " + << csrf_protection + << ". Setting CSRF protection to Inactive"; + return "Inactive"; + } + return key_to_practices_val.at(csrf_protection); + } + + const std::string & getErrorDisclosureMode() const { return error_disclosure; } + + bool getNonValidHttpMethods() const { return non_valid_http_methods; } + + const std::string + getOpenRedirectMode() const + { + if (key_to_practices_val.find(open_redirect) == key_to_practices_val.end()) { + dbgError(D_K8S_POLICY) + << "Failed to find a value for " + << open_redirect + << ". Setting Open Redirect mode to Inactive"; + return "Inactive"; + } + return key_to_practices_val.at(open_redirect); + } + +private: + std::string csrf_protection; + std::string open_redirect; + std::string error_disclosure; + bool non_valid_http_methods; +}; + +std::ostream & +operator<<(std::ostream &os, const AppSecWebAttackProtections &obj) +{ + os + << " csrf-protection: " + << obj.getCsrfProtectionMode() + << " error-disclosure: " + << obj.getErrorDisclosureMode() + << " non-valid-http-methods: " + << obj.getNonValidHttpMethods() + << " open-redirect: " + << obj.getOpenRedirectMode(); + return os; +} + +class AppSecPracticeWebAttacks +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading AppSec practice spec"; + parseAppsecJSONKey("protections", protections, archive_in); + parseAppsecJSONKey("minimum-confidence", minimum_confidence, archive_in, "critical"); + parseAppsecJSONKey("override-mode", mode, archive_in, "Unset"); + parseAppsecJSONKey("max-body-size-kb", max_body_size_kb, archive_in, 1000000); + parseAppsecJSONKey("max-header-size-bytes", max_header_size_bytes, archive_in, 102400); + parseAppsecJSONKey("max-object-depth", max_object_depth, archive_in, 40); + parseAppsecJSONKey("max-url-size-bytes", max_url_size_bytes, archive_in, 32768); + } + + int getMaxBodySizeKb() const { return max_body_size_kb; } + int getMaxHeaderSizeBytes() const { return max_header_size_bytes; } + int getMaxObjectDepth() const { return max_object_depth; } + int getMaxUrlSizeBytes() const { return max_url_size_bytes; } + const std::string & getMinimumConfidence() const { return minimum_confidence; } + const AppSecWebAttackProtections & getprotections() const { return protections; } + + const std::string & + getMode(const std::string &default_mode = "Inactive") const + { + if (mode == "Unset" || (key_to_practices_val.find(mode) == key_to_practices_val.end())) { + dbgError(D_K8S_POLICY) << "Couldn't find a value for key: " << mode << ". Returning " << default_mode; + return default_mode; + } + return key_to_practices_val.at(mode); + } + +private: + int max_body_size_kb; + int max_header_size_bytes; + int max_object_depth; + int max_url_size_bytes; + std::string minimum_confidence; + std::string mode; + AppSecWebAttackProtections protections; +}; + +std::ostream & +operator<<(std::ostream &os, const AppSecPracticeWebAttacks &obj) +{ + os + << "mode: " + << obj.getMode() + << " max-body-size-kb: " + << obj.getMaxBodySizeKb() + << " max-header-size-bytes: " + << obj.getMaxHeaderSizeBytes() + << " max-object-depth: " + << obj.getMaxObjectDepth() + << " max-url-size-bytes: " + << obj.getMaxUrlSizeBytes() + << " minimum-confidence: " + << obj.getMinimumConfidence() + << " protections: " + << obj.getprotections(); + return os; +} + +class AppSecPracticeSnortSignatures +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading AppSec Snort Signatures practice"; + parseAppsecJSONKey("override-mode", override_mode, archive_in, "Inactive"); + parseAppsecJSONKey>("configmap", config_map, archive_in); + } + + const std::string & getOverrideMode() const { return override_mode; } + + const std::vector & getConfigMap() const { return config_map; } + +private: + std::string override_mode; + std::vector config_map; +}; + +std::ostream & +operator<<(std::ostream &os, const AppSecPracticeSnortSignatures &obj) +{ + os + << "override mode: " + << obj.getOverrideMode() + << ". Config map: [" << std::endl + << makeSeparatedStr(obj.getConfigMap(), ",") + << std::endl << "]"; + return os; +} + +class AppSecPracticeOpenSchemaAPI +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading AppSecPracticeOpenSchemaAPI practice"; + parseAppsecJSONKey("override-mode", override_mode, archive_in, "Inactive"); + parseAppsecJSONKey>("configmap", config_map, archive_in); + } + + const std::string & getOverrideMode() const { return override_mode; } + + const std::vector & getConfigMap() const { return config_map; } + +private: + std::string override_mode; + std::vector config_map; +}; + +std::ostream & +operator<<(std::ostream &os, const AppSecPracticeOpenSchemaAPI &obj) +{ + os + << "override mode: " + << obj.getOverrideMode() + << ". Config map: [" << std::endl + << makeSeparatedStr(obj.getConfigMap(), ",") + << std::endl << "]"; + return os; +} + +class AppSecPracticeSpec +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading AppSec practice spec"; + parseAppsecJSONKey( + "openapi-schema-validation", + openapi_schema_validation, + archive_in + ); + parseAppsecJSONKey("snort-signatures", snort_signatures, archive_in); + parseAppsecJSONKey("web-attacks", web_attacks, archive_in); + parseAppsecJSONKey("anti-bot", anti_bot, archive_in); + } + + const AppSecPracticeOpenSchemaAPI & getOpenSchemaValidation() const { return openapi_schema_validation; } + const AppSecPracticeSnortSignatures & getSnortSignatures() const { return snort_signatures; } + const AppSecPracticeWebAttacks & getWebAttacks() const { return web_attacks; } + const AppSecPracticeAntiBot & getAntiBot() const { return anti_bot; } + +private: + AppSecPracticeOpenSchemaAPI openapi_schema_validation; + AppSecPracticeSnortSignatures snort_signatures; + AppSecPracticeWebAttacks web_attacks; + AppSecPracticeAntiBot anti_bot; +}; + +std::ostream & +operator<<(std::ostream &os, const AppSecPracticeSpec &obj) +{ + os + << "Open Schema API:" << std::endl + << obj.getOpenSchemaValidation() + << std::endl << "Snort Signatures:" << std::endl + << obj.getOpenSchemaValidation() + << std::endl << "Web Attacks:" << std::endl + << obj.getWebAttacks() + << std::endl << "Web Bots:" << std::endl + << obj.getAntiBot(); + return os; +} + +class PracticeAdvancedConfig +{ +public: + PracticeAdvancedConfig(const AppSecPracticeSpec &parsed_appsec_spec) + : + http_header_max_size(parsed_appsec_spec.getWebAttacks().getMaxHeaderSizeBytes()), + http_illegal_methods_allowed(0), + http_request_body_max_size(parsed_appsec_spec.getWebAttacks().getMaxBodySizeKb()), + json_max_object_depth(parsed_appsec_spec.getWebAttacks().getMaxObjectDepth()), + url_max_size(parsed_appsec_spec.getWebAttacks().getMaxUrlSizeBytes()) + {} + + void + save(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("httpHeaderMaxSize", http_header_max_size), + cereal::make_nvp("httpIllegalMethodsAllowed", http_illegal_methods_allowed), + cereal::make_nvp("httpRequestBodyMaxSize", http_request_body_max_size), + cereal::make_nvp("jsonMaxObjectDepth", json_max_object_depth), + cereal::make_nvp("urlMaxSize", url_max_size) + ); + } + + void setIllegalMethodsAllowed(int val) { http_illegal_methods_allowed = val; }; + +private: + int http_header_max_size; + int http_illegal_methods_allowed; + int http_request_body_max_size; + int json_max_object_depth; + int url_max_size; +}; + +class TriggersInWaapSection +{ +public: + TriggersInWaapSection(const LogTriggerSection &log_section) + : + trigger_type("log"), + id(log_section.getTriggerId()), + name(log_section.getTriggerName()), + log(log_section) + {} + + void + save(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("$triggerType", trigger_type), + cereal::make_nvp("id", id), + cereal::make_nvp("name", name), + cereal::make_nvp("log", log) + ); + } + +private: + std::string trigger_type; + std::string id; + std::string name; + LogTriggerSection log; +}; + +class AppSecOverride +{ +public: + AppSecOverride(const SourcesIdentifiers &parsed_trusted_sources) + { + std::string source_ident = parsed_trusted_sources.getSourceIdent(); + std::map behavior = {{"httpSourceId", source_ident}}; + parsed_behavior.push_back(behavior); + parsed_match = {{"operator", "BASIC"}, {"tag", "sourceip"}, {"value", "0.0.0.0/0"}}; + } + + void + save(cereal::JSONOutputArchive &out_ar) const + { + std::string parameter_type = "TrustedSource"; + out_ar( + cereal::make_nvp("parsedBehavior", parsed_behavior), + cereal::make_nvp("parsedMatch", parsed_match) + ); + } +private: + std::vector> parsed_behavior; + std::map parsed_match; +}; + +class WebAppSection +{ +public: + WebAppSection( + const std::string &_application_urls, + const std::string &_asset_id, + const std::string &_asset_name, + const std::string &_rule_id, + const std::string &_rule_name, + const std::string &_practice_id, + const std::string &_practice_name, + const AppSecPracticeSpec &parsed_appsec_spec, + const LogTriggerSection &parsed_log_trigger, + const std::string &default_mode, + const AppSecTrustedSources &parsed_trusted_sources) + : + application_urls(_application_urls), + asset_id(_asset_id), + asset_name(_asset_name), + rule_id(_rule_id), + rule_name(_rule_name), + practice_id(_practice_id), + practice_name(_practice_name), + context("practiceId(" + practice_id +")"), + web_attack_mitigation_severity(parsed_appsec_spec.getWebAttacks().getMinimumConfidence()), + web_attack_mitigation_mode(parsed_appsec_spec.getWebAttacks().getMode(default_mode)), + practice_advanced_config(parsed_appsec_spec), + anti_bots(parsed_appsec_spec.getAntiBot()), + trusted_sources({parsed_trusted_sources}) + { + web_attack_mitigation = true; + web_attack_mitigation_action = + web_attack_mitigation_severity == "critical" ? "low" : + web_attack_mitigation_severity == "high" ? "balanced" : + web_attack_mitigation_severity == "medium" ? "high" : + "Error"; + + triggers.push_back(TriggersInWaapSection(parsed_log_trigger)); + for (const SourcesIdentifiers &source_ident : parsed_trusted_sources.getSourcesIdentifiers()) { + overrides.push_back(AppSecOverride(source_ident)); + } + } + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + std::string disabled_str = "Disabled"; + std::string detect_str = "Detect"; + std::vector empty_list; + out_ar( + cereal::make_nvp("context", context), + cereal::make_nvp("webAttackMitigation", web_attack_mitigation), + cereal::make_nvp("webAttackMitigationSeverity", web_attack_mitigation_severity), + cereal::make_nvp("webAttackMitigationAction", web_attack_mitigation_action), + cereal::make_nvp("webAttackMitigationMode", web_attack_mitigation_mode), + cereal::make_nvp("practiceAdvancedConfig", practice_advanced_config), + cereal::make_nvp("csrfProtection", disabled_str), + cereal::make_nvp("openRedirect", disabled_str), + cereal::make_nvp("errorDisclosure", disabled_str), + cereal::make_nvp("practiceId", practice_id), + cereal::make_nvp("practiceName", practice_name), + cereal::make_nvp("assetId", asset_id), + cereal::make_nvp("assetName", asset_name), + cereal::make_nvp("ruleId", rule_id), + cereal::make_nvp("ruleName", rule_name), + cereal::make_nvp("triggers", triggers), + cereal::make_nvp("applicationUrls", application_urls), + cereal::make_nvp("overrides", overrides), + cereal::make_nvp("trustedSources", trusted_sources), + cereal::make_nvp("waapParameters", empty_list), + cereal::make_nvp("botProtection", false), + cereal::make_nvp("antiBot", anti_bots), + cereal::make_nvp("botProtection_v2", detect_str) + ); + } + + const std::string & getPracticeId() const { return practice_id; } + + bool + operator<(const WebAppSection &other) const + { + return getPracticeId() < other.getPracticeId(); + } + +private: + std::string application_urls; + std::string asset_id; + std::string asset_name; + std::string rule_id; + std::string rule_name; + std::string practice_id; + std::string practice_name; + std::string context; + std::string web_attack_mitigation_action; + std::string web_attack_mitigation_severity; + std::string web_attack_mitigation_mode; + bool web_attack_mitigation; + std::vector triggers; + PracticeAdvancedConfig practice_advanced_config; + AppSecPracticeAntiBot anti_bots; + std::vector trusted_sources; + std::vector overrides; +}; + +class WebAPISection +{ +public: + WebAPISection( + const std::string &_application_urls, + const std::string &_asset_id, + const std::string &_asset_name, + const std::string &_rule_id, + const std::string &_rule_name, + const std::string &_practice_id, + const std::string &_practice_name, + const std::string &_web_attack_mitigation_action, + const std::string &_web_attack_mitigation_severity, + const std::string &_web_attack_mitigation_mode, + bool _web_attack_mitigation, + const AppSecPracticeSpec &parsed_appsec_spec) + : + application_urls(_application_urls), + asset_id(_asset_id), + asset_name(_asset_name), + rule_id(_rule_id), + rule_name(_rule_name), + practice_id(_practice_id), + practice_name(_practice_name), + context("practiceId(" + practice_id +")"), + web_attack_mitigation_action(_web_attack_mitigation_action), + web_attack_mitigation_severity(_web_attack_mitigation_severity), + web_attack_mitigation_mode(_web_attack_mitigation_mode), + web_attack_mitigation(_web_attack_mitigation), + practice_advanced_config(parsed_appsec_spec) + {} + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + std::string disabled_str = "Disabled"; + std::vector empty_list; + out_ar( + cereal::make_nvp("application_urls", application_urls), + cereal::make_nvp("asset_id", asset_id), + cereal::make_nvp("asset_name", asset_name), + cereal::make_nvp("context", context), + cereal::make_nvp("practiceAdvancedConfig", practice_advanced_config), + cereal::make_nvp("practice_id", practice_id), + cereal::make_nvp("practice_name", practice_name), + cereal::make_nvp("ruleId", rule_id), + cereal::make_nvp("ruleName", rule_name), + cereal::make_nvp("schemaValidation", false), + cereal::make_nvp("schemaValidation_v2", disabled_str), + cereal::make_nvp("web_attack_mitigation", web_attack_mitigation), + cereal::make_nvp("web_attack_mitigation_action", web_attack_mitigation_action), + cereal::make_nvp("web_attack_mitigation_severity", web_attack_mitigation_severity), + cereal::make_nvp("web_attack_mitigation_mode", web_attack_mitigation_mode), + cereal::make_nvp("oas", empty_list), + cereal::make_nvp("trustedSources", empty_list), + cereal::make_nvp("triggers", empty_list), + cereal::make_nvp("waapParameters", empty_list), + cereal::make_nvp("overrides", empty_list) + ); + } + + const std::string & getPracticeId() const { return practice_id; } + +private: + std::string application_urls; + std::string asset_id; + std::string asset_name; + std::string rule_id; + std::string rule_name; + std::string practice_id; + std::string practice_name; + std::string context; + std::string web_attack_mitigation_action; + std::string web_attack_mitigation_severity; + std::string web_attack_mitigation_mode; + bool web_attack_mitigation; + PracticeAdvancedConfig practice_advanced_config; +}; + +class AppSecRulebase +{ +public: + AppSecRulebase( + std::vector _webApplicationPractices, + std::vector _webAPIPractices) + : + webApplicationPractices(_webApplicationPractices), + webAPIPractices(_webAPIPractices) {} + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("WebAPISecurity", webAPIPractices), + cereal::make_nvp("WebApplicationSecurity", webApplicationPractices) + ); + } + +private: + std::vector webApplicationPractices; + std::vector webAPIPractices; +}; + +class AppSecWrapper +{ +public: + AppSecWrapper(const AppSecRulebase &_app_sec) + : + app_sec_rulebase(_app_sec) + {} + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + out_ar(cereal::make_nvp("WAAP", app_sec_rulebase)); + } + +private: + AppSecRulebase app_sec_rulebase; +}; + + +class ParsedRule +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading AppSec ParsedRule"; + parseAppsecJSONKey>("exceptions", exceptions, archive_in); + parseAppsecJSONKey>("triggers", log_triggers, archive_in); + parseAppsecJSONKey>("practices", practices, archive_in); + parseAppsecJSONKey("mode", mode, archive_in); + parseAppsecJSONKey("custom-response", custom_response, archive_in); + parseAppsecJSONKey("source-identifiers", source_identifiers, archive_in); + parseAppsecJSONKey("trusted-sources", trusted_sources, archive_in); + try { + archive_in(cereal::make_nvp("host", host)); + } catch (const cereal::Exception &e) + {} // The default ParsedRule does not hold a host, so no error handling + } + + const std::vector & getExceptions() const { return exceptions; } + + const std::vector & getLogTriggers() const { return log_triggers; } + + const std::vector & getPractices() const { return practices; } + + const std::string & getHost() const { return host; } + + const std::string & getMode() const { return mode; } + + void setMode(const std::string &_mode) { mode = _mode; }; + + const std::string & getCustomResponse() const { return custom_response; } + + const std::string & getSourceIdentifiers() const { return source_identifiers; } + + const std::string & getTrustedSources() const { return trusted_sources; } + +private: + std::vector exceptions; + std::vector log_triggers; + std::vector practices; + std::string host; + std::string mode; + std::string custom_response; + std::string source_identifiers; + std::string trusted_sources; +}; + +std::ostream & +operator<<(std::ostream &os, const ParsedRule &obj) +{ + os + << "host: " + << obj.getHost() + << std::endl << "log trigger: " + << makeSeparatedStr(obj.getLogTriggers(), ",") + << std::endl << "mode: " + << obj.getMode() + << std::endl << "practices: " + << makeSeparatedStr(obj.getPractices(), ",") + << std::endl << "web responce: " + << obj.getCustomResponse() + << std::endl << " Exceptions: [" << std::endl + << makeSeparatedStr(obj.getExceptions(), ",") + << std::endl << "]"; + return os; +} + +class AppsecPolicySpec : Singleton::Consume +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading AppSec policy spec"; + parseAppsecJSONKey("default", default_rule, archive_in); + auto default_mode_annot = + Singleton::Consume::by()->get("default mode annotation"); + if (default_mode_annot.ok() && !default_mode_annot.unpack().empty() && default_rule.getMode().empty()) { + default_rule.setMode(default_mode_annot.unpack()); + } + parseAppsecJSONKey>("specific-rules", specific_rules, archive_in); + } + + const ParsedRule & getDefaultRule() const { return default_rule; } + + const std::vector & getSpecificRules() const { return specific_rules; } + +private: + ParsedRule default_rule; + std::vector specific_rules; +}; + +std::ostream & +operator<<(std::ostream &os, const AppsecPolicySpec &obj) +{ + os + << "Default Rule: " + << obj.getDefaultRule() + << std::endl <<"Specific Rules: [" << std::endl + << makeSeparatedStr(obj.getSpecificRules(), ",") + << std::endl << "]"; + return os; +} + +#endif // __APPSEC_PRACTICE_SECTION_H__ diff --git a/components/security_apps/orchestration/k8s_policy_gen/include/exceptions_section.h b/components/security_apps/orchestration/k8s_policy_gen/include/exceptions_section.h new file mode 100644 index 0000000..8359aba --- /dev/null +++ b/components/security_apps/orchestration/k8s_policy_gen/include/exceptions_section.h @@ -0,0 +1,313 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __EXCEPTPIONS_SECTION_H__ +#define __EXCEPTPIONS_SECTION_H__ + +#include +#include +#include +#include +#include + +#include "config.h" +#include "debug.h" +#include "rest.h" +#include "k8s_policy_common.h" + +USE_DEBUG_FLAG(D_K8S_POLICY); + +class AppsecExceptionSpec +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading AppSec exception spec"; + parseAppsecJSONKey("action", action, archive_in); + parseAppsecJSONKey>("countryCode", country_code, archive_in); + parseAppsecJSONKey>("countryName", country_name, archive_in); + parseAppsecJSONKey>("hostName", host_name, archive_in); + parseAppsecJSONKey>("paramName", param_name, archive_in); + parseAppsecJSONKey>("paramValue", param_value, archive_in); + parseAppsecJSONKey>("protectionName", protection_name, archive_in); + parseAppsecJSONKey>("sourceIdentifier", source_identifier, archive_in); + parseAppsecJSONKey>("sourceIp", source_ip, archive_in); + parseAppsecJSONKey>("url", url, archive_in); + } + + const std::string & getAction() const { return action; } + const std::vector & getCountryCode() const { return country_code; } + const std::vector & getCountryName() const { return country_name; } + const std::vector & getHostName() const { return host_name; } + const std::vector & getParamName() const { return param_name; } + const std::vector & getParamValue() const { return param_value; } + const std::vector & getProtectionName() const { return protection_name; } + const std::vector & getSourceIdentifier() const { return source_identifier; } + const std::vector & getSourceIp() const { return source_ip; } + const std::vector & getUrl() const { return url; } + +private: + std::string action; + std::vector country_code; + std::vector country_name; + std::vector host_name; + std::vector param_name; + std::vector param_value; + std::vector protection_name; + std::vector source_identifier; + std::vector source_ip; + std::vector url; +}; + +std::ostream & +operator<<(std::ostream &os, const AppsecExceptionSpec &obj) +{ + os + << "action: " + << makeSeparatedStr(obj.getAction(), ",") + << "countryCode: " + << makeSeparatedStr(obj.getCountryCode(), ",") + << "countryName: " + << makeSeparatedStr(obj.getCountryName(), ",") + << "hostName: " + << makeSeparatedStr(obj.getHostName(), ",") + << "paramName: " + << makeSeparatedStr(obj.getParamName(), ",") + << "paramValue: " + << makeSeparatedStr(obj.getParamValue(), ",") + << "protectionName: " + << makeSeparatedStr(obj.getProtectionName(), ",") + << "sourceIdentifier: " + << makeSeparatedStr(obj.getSourceIdentifier(), ",") + << "sourceIp: " + << makeSeparatedStr(obj.getSourceIp(), ",") + << "url: " + << makeSeparatedStr(obj.getUrl(), ","); + + return os; +} + +class ExceptionMatch +{ +public: + ExceptionMatch(const AppsecExceptionSpec &parsed_exception) + : + match_type(MatchType::Operator), + op("and") + { + if (!parsed_exception.getCountryCode().empty()) { + items.push_back(ExceptionMatch("countryCode", parsed_exception.getCountryCode())); + } + if (!parsed_exception.getCountryName().empty()) { + items.push_back(ExceptionMatch("countryName", parsed_exception.getCountryName())); + } + if (!parsed_exception.getHostName().empty()) { + items.push_back(ExceptionMatch("hostName", parsed_exception.getHostName())); + } + if (!parsed_exception.getParamName().empty()) { + items.push_back(ExceptionMatch("paramName", parsed_exception.getParamName())); + } + if (!parsed_exception.getParamValue().empty()) { + items.push_back(ExceptionMatch("paramValue", parsed_exception.getParamValue())); + } + if (!parsed_exception.getProtectionName().empty()) { + items.push_back(ExceptionMatch("protectionName", parsed_exception.getProtectionName())); + } + if (!parsed_exception.getSourceIdentifier().empty()) { + items.push_back(ExceptionMatch("sourceIdentifier", parsed_exception.getSourceIdentifier())); + } + if (!parsed_exception.getSourceIp().empty()) { + items.push_back(ExceptionMatch("sourceIp", parsed_exception.getSourceIp())); + } + if (!parsed_exception.getUrl().empty()) { + items.push_back(ExceptionMatch("url", parsed_exception.getUrl())); + } + } + + ExceptionMatch(const std::string &_key, const std::vector &_value) + : + match_type(MatchType::Condition), + key(_key), + op("in"), + value(_value) + {} + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + switch (match_type) { + case (MatchType::Condition): { + std::string type_str = "condition"; + out_ar( + cereal::make_nvp("key", key), + cereal::make_nvp("op", op), + cereal::make_nvp("type", type_str), + cereal::make_nvp("value", value) + ); + break; + } + case (MatchType::Operator): { + std::string type_str = "operator"; + out_ar( + cereal::make_nvp("op", op), + cereal::make_nvp("type", type_str), + cereal::make_nvp("items", items) + ); + break; + } + default: { + dbgError(D_K8S_POLICY) << "No match for exception match type: " << static_cast(match_type); + } + } + } + +private: + MatchType match_type; + std::string key; + std::string op; + std::vector value; + std::vector items; +}; + +class ExceptionBehavior +{ +public: + ExceptionBehavior( + const std::string &_key, + const std::string &_value) + : + key(_key), + value(_value) + { + try { + id = to_string(boost::uuids::random_generator()()); + } catch (const boost::uuids::entropy_error &e) { + dbgWarning(D_K8S_POLICY) << "Failed to generate exception behavior UUID. Error: " << e.what(); + } + } + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("key", key), + cereal::make_nvp("value", value), + cereal::make_nvp("id", id) + ); + } + + const std::string getBehaviorId() const { return id; } + +private: + std::string key; + std::string id; + std::string value; +}; + +class InnerException +{ +public: + InnerException( + ExceptionBehavior _behavior, + ExceptionMatch _match) + : + behavior(_behavior), + match(_match) {} + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("behavior", behavior), + cereal::make_nvp("match", match) + ); + } + + const std::string getBehaviorId() const { return behavior.getBehaviorId(); } + + bool + operator<(const InnerException &other) const + { + return getBehaviorId() < other.getBehaviorId(); + } + +private: + ExceptionBehavior behavior; + ExceptionMatch match; +}; + +class ExceptionsRulebase +{ +public: + ExceptionsRulebase( + std::vector _exceptions) + : + exceptions(_exceptions) + { + std::string context_id_str = ""; + for (const InnerException exception : exceptions) { + std::string curr_id = "parameterId(" + exception.getBehaviorId() + "), "; + context_id_str += curr_id; + } + context_id_str = context_id_str.substr(0, context_id_str.size() - 2); + context = "Any(" + context_id_str + ")"; + } + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("context", context), + cereal::make_nvp("exceptions", exceptions) + ); + } + +private: + std::string context; + std::vector exceptions; +}; + +class ExceptionsWrapper +{ +public: + class Exception + { + public: + Exception(const std::vector &_exception) : exception(_exception) {} + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + out_ar(cereal::make_nvp("exception", exception)); + } + + private: + std::vector exception; + }; + ExceptionsWrapper(const std::vector &_exception) : exception_rulebase(Exception(_exception)) + {} + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("rulebase", exception_rulebase) + ); + } + +private: + Exception exception_rulebase; +}; + +#endif // __EXCEPTPIONS_SECTION_H__ diff --git a/components/security_apps/orchestration/k8s_policy_gen/include/ingress_data.h b/components/security_apps/orchestration/k8s_policy_gen/include/ingress_data.h new file mode 100644 index 0000000..ece21a6 --- /dev/null +++ b/components/security_apps/orchestration/k8s_policy_gen/include/ingress_data.h @@ -0,0 +1,224 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __INGRESS_DATA_H__ +#define __INGRESS_DATA_H__ + +#include +#include + +#include "config.h" +#include "debug.h" +#include "rest.h" +#include "cereal/archives/json.hpp" + +USE_DEBUG_FLAG(D_K8S_POLICY); + +class IngressMetadata +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "IngressMetadata load"; + parseAppsecJSONKey("name", name, archive_in); + parseAppsecJSONKey("resourceVersion", resourceVersion, archive_in); + parseAppsecJSONKey("namespace", namespace_name, archive_in); + parseAppsecJSONKey>("annotations", annotations, archive_in); + } + + const std::string & getName() const { return name; } + const std::string & getResourceVersion() const { return resourceVersion; } + const std::string & getNamespace() const { return namespace_name; } + const std::map & getAnnotations() const { return annotations; } + +private: + std::string name; + std::string resourceVersion; + std::string namespace_name; + std::map annotations; +}; + +class IngressRulePath +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading ingress defined rule path"; + parseAppsecJSONKey("path", path, archive_in); + } + + const std::string & getPath() const { return path; } + +private: + std::string path; +}; + +std::ostream & +operator<<(std::ostream &os, const IngressRulePath &obj) +{ + os << obj.getPath(); + return os; +} + +class IngressRulePathsWrapper +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading ingress defined rule path wrapper"; + parseAppsecJSONKey>("paths", paths, archive_in); + } + + const std::vector & getRulePaths() const { return paths; } + +private: + std::vector paths; +}; + +class IngressDefinedRule +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading ingress defined rule"; + parseAppsecJSONKey("host", host, archive_in); + parseAppsecJSONKey("http", paths_wrapper, archive_in); + } + + const std::string & getHost() const { return host; } + const IngressRulePathsWrapper & getPathsWrapper() const { return paths_wrapper; } + +private: + std::string host; + IngressRulePathsWrapper paths_wrapper; +}; + +std::ostream & +operator<<(std::ostream &os, const IngressDefinedRule &obj) +{ + os + << "host: " + << obj.getHost() + << ", paths: [" << std::endl + << makeSeparatedStr(obj.getPathsWrapper().getRulePaths(), ",") + << std::endl << "]"; + return os; +} + +class DefaultBackend +{ +public: + void + load(cereal::JSONInputArchive &) + { + dbgTrace(D_K8S_POLICY) << "Loading Default Backend"; + is_exists = true; + } + + bool isExists() const { return is_exists; } + +private: + bool is_exists = false; +}; + +class IngressSpec +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading single ingress spec"; + parseAppsecJSONKey("ingressClassName", ingress_class_name, archive_in); + parseAppsecJSONKey>("rules", rules, archive_in); + parseAppsecJSONKey("defaultBackend", default_backend, archive_in); + } + + const std::string & getIngressClassName() const { return ingress_class_name; } + const std::vector & getRules() const { return rules; } + bool isDefaultBackendExists() const { return default_backend.isExists(); } + +private: + std::string ingress_class_name; + std::vector rules; + DefaultBackend default_backend; +}; + +std::ostream & +operator<<(std::ostream &os, const IngressSpec &obj) +{ + os + << "Ingress Spec - ingressClassName: " + << obj.getIngressClassName() + << ", rules: [" << std::endl + << makeSeparatedStr(obj.getRules(), ",") + << std::endl << "]"; + return os; +} + +class SingleIngressData +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading single ingress data"; + parseAppsecJSONKey("metadata", metadata, archive_in); + parseAppsecJSONKey("spec", spec, archive_in); + } + + const IngressMetadata & getMetadata() const { return metadata; } + const IngressSpec & getSpec() const { return spec; } + +private: + IngressMetadata metadata; + IngressSpec spec; +}; + + +class IngressData : public ClientRest +{ +public: + bool + loadJson(const std::string &json) + { + std::string modified_json = json; + modified_json.pop_back(); + std::stringstream in; + in.str(modified_json); + dbgTrace(D_K8S_POLICY) << "Loading ingress data"; + try { + cereal::JSONInputArchive in_ar(in); + in_ar( + cereal::make_nvp("apiVersion", apiVersion), + cereal::make_nvp("items", items) + ); + } catch (cereal::Exception &e) { + dbgError(D_K8S_POLICY) << "Failed to load ingress data JSON. Error: " << e.what(); + return false; + } + return true; + } + + const std::string & getapiVersion() const { return apiVersion; } + const std::vector & getItems() const { return items; } + +private: + std::string apiVersion; + std::vector items; +}; + +#endif // __INGRESS_DATA_H__ diff --git a/components/security_apps/orchestration/k8s_policy_gen/include/k8s_policy_common.h b/components/security_apps/orchestration/k8s_policy_gen/include/k8s_policy_common.h new file mode 100644 index 0000000..f7ebde3 --- /dev/null +++ b/components/security_apps/orchestration/k8s_policy_gen/include/k8s_policy_common.h @@ -0,0 +1,103 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __K8S_POLICY_COMMON_H__ +#define __K8S_POLICY_COMMON_H__ + +#include +#include +#include +#include + +#include "config.h" +#include "debug.h" +#include "rest.h" + +USE_DEBUG_FLAG(D_K8S_POLICY); + +enum class PracticeType { WebApplication, WebAPI }; +enum class TriggerType { Log, WebUserResponse }; +enum class MatchType { Condition, Operator }; + +static const std::unordered_map string_to_match_type = { + { "condition", MatchType::Condition }, + { "operator", MatchType::Operator } +}; + +static const std::unordered_map string_to_practice_type = { + { "WebApplication", PracticeType::WebApplication }, + { "WebAPI", PracticeType::WebAPI } +}; + +static const std::unordered_map string_to_trigger_type = { + { "log", TriggerType::Log }, + { "WebUserResponse", TriggerType::WebUserResponse } +}; + +static const std::unordered_map key_to_practices_val = { + { "prevent-learn", "Prevent"}, + { "detect-learn", "Detect"}, + { "prevent", "Prevent"}, + { "detect", "Detect"}, + { "inactive", "Inactive"} +}; + +template +void +parseAppsecJSONKey( + const std::string &key_name, + T &value, + cereal::JSONInputArchive &archive_in, + const T &default_value = T()) +{ + try { + archive_in(cereal::make_nvp(key_name, value)); + } catch (const cereal::Exception &e) { + archive_in.setNextName(nullptr); + value = default_value; + dbgDebug(D_K8S_POLICY) + << "Could not parse the required key. Key: " + << key_name + << ", Error: " + << e.what(); + } +} + +template +class AppsecSpecParser : public ClientRest +{ +public: + bool + loadJson(const std::string &json) + { + std::string modified_json = json; + modified_json.pop_back(); + std::stringstream ss; + ss.str(modified_json); + try { + cereal::JSONInputArchive in_ar(ss); + in_ar(cereal::make_nvp("spec", spec)); + } catch (cereal::Exception &e) { + dbgError(D_K8S_POLICY) << "Failed to load spec JSON. Error: " << e.what(); + return false; + } + return true; + } + + const T & getSpec() const { return spec; } + +private: + T spec; +}; + +#endif // __K8S_POLICY_COMMON_H__ diff --git a/components/security_apps/orchestration/k8s_policy_gen/include/rules_config_section.h b/components/security_apps/orchestration/k8s_policy_gen/include/rules_config_section.h new file mode 100644 index 0000000..6044b13 --- /dev/null +++ b/components/security_apps/orchestration/k8s_policy_gen/include/rules_config_section.h @@ -0,0 +1,391 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __RULES_CONFIG_SECTION_H__ +#define __RULES_CONFIG_SECTION_H__ + +#include +#include +#include +#include +#include +#include + +#include "config.h" +#include "debug.h" +#include "k8s_policy_common.h" + +USE_DEBUG_FLAG(D_K8S_POLICY); + +class AssetUrlParser +{ +public: + std::string query_string, asset_uri, protocol, asset_url, port; + + AssetUrlParser() + {} + + AssetUrlParser(const std::string &asset) + { + parse(asset); + } + +private: + static AssetUrlParser + parse(const std::string &uri) + { + AssetUrlParser result; + + using iterator_t = std::string::const_iterator; + + if (uri.length() == 0) return result; + + iterator_t uri_end = uri.end(); + + // get query start + iterator_t query_start = std::find(uri.begin(), uri_end, '?'); + + // protocol + iterator_t protocol_start = uri.begin(); + iterator_t protocol_end = std::find(protocol_start, uri_end, ':'); //"://"); + + if (protocol_end != uri_end) { + std::string http_protocol = &*(protocol_end); + if ((http_protocol.length() > 3) && (http_protocol.substr(0, 3) == "://")) { + result.protocol = std::string(protocol_start, protocol_end); + protocol_end += 3; // :// + } else { + protocol_end = uri.begin(); // no protocol + } + } else { + protocol_end = uri.begin(); // no protocol + } + + // URL + iterator_t host_start = protocol_end; + iterator_t path_start = std::find(host_start, uri_end, '/'); + + iterator_t host_end = std::find(protocol_end, (path_start != uri_end) ? path_start : query_start, ':'); + + result.asset_url = std::string(host_start, host_end); + + // port + if ((host_end != uri_end) && ((&*(host_end))[0] == ':')) { // we have a port + host_end++; + iterator_t portEnd = (path_start != uri_end) ? path_start : query_start; + result.port = std::string(host_end, portEnd); + } + + // URI + if (path_start != uri_end) result.asset_uri = std::string(path_start, query_start); + + // query + if (query_start != uri_end) result.query_string = std::string(query_start, uri.end()); + + return result; + } // Parse +}; // uri + +class PracticeSection +{ +public: + PracticeSection(const std::string &_id, const std::string &_type, const std::string &_practice_name) + { + auto maybe_type = string_to_practice_type.find(_type); + if (maybe_type == string_to_practice_type.end()) { + dbgError(D_K8S_POLICY) << "Illegal pracrtice type: " << _type; + return; + } + + type = _type; + name = _practice_name; + id = _id; + } + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("practiceId", id), + cereal::make_nvp("practiceName", name), + cereal::make_nvp("practiceType", type) + ); + } + + const std::string & getPracticeId() const { return id; } + const std::string & getPracticeName() const { return name; } + +private: + std::string id; + std::string name; + std::string type; +}; + +class ParametersSection +{ +public: + ParametersSection( + const std::string &_id, + const std::string &_name) + : + name(_name), + id(_id) + { + if (_id.empty() && _name.empty()) { + dbgError(D_K8S_POLICY) << "Illegal Parameter values. Name and ID are empty"; + return; + } + } + + const std::string & getId() const { return id; } + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("parameterId", id), + cereal::make_nvp("parameterName", name), + cereal::make_nvp("parameterType", type) + ); + } + +private: + std::string name; + std::string id; + std::string type = "Exception"; +}; + +class RulesTriggerSection +{ +public: + RulesTriggerSection( + const std::string &_name, + const std::string &_id, + const std::string &_type) + : + name(_name), + id(_id) + { + if (_name.empty() && _id.empty()) { + dbgError(D_K8S_POLICY) << "Illegal values for trigger. Name and ID are empty"; + return; + } + auto maybe_type = string_to_trigger_type.find(_type); + if (maybe_type == string_to_trigger_type.end()) { + dbgError(D_K8S_POLICY) << "Illegal trigger type in rule: " << _type; + return; + } + type = _type; + } + + const std::string & getId() const { return id; } + const std::string & getName() const { return id; } + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("triggerId", id), + cereal::make_nvp("triggerName", name), + cereal::make_nvp("triggerType", type) + ); + } + +private: + std::string name; + std::string id; + std::string type; +}; + +class RulesConfigRulebase +{ +public: + RulesConfigRulebase() + {} + + RulesConfigRulebase( + const std::string &_name, + const std::string &_url, + const std::string &_uri, + std::vector _practices, + std::vector _parameters, + std::vector _triggers) + : + name(_name), + practices(_practices), + parameters(_parameters), + triggers(_triggers) + { + try { + id = to_string(boost::uuids::random_generator()()); + bool any = _name == "Any" && _url == "Any" && _uri == "Any"; + if (_uri != "/") { + context = any ? "All()" : "Any(" + "All(" + "Any(" + "EqualHost(" + _url + ")" + ")," + "EqualListeningPort(80)" + + std::string(_uri.empty() ? "" : ",BeginWithUri(" + _uri + ")") + + ")," + "All(" + "Any(" + "EqualHost(" + _url + ")" + ")," + "EqualListeningPort(443)" + + std::string(_uri.empty() ? "" : ",BeginWithUri(" + _uri + ")") + + ")" + ")"; + } else { + context = any ? "All()" : "Any(" + "All(" + "Any(" + "EqualHost(" + _url + ")" + ")," + "EqualListeningPort(80)" + ")," + "All(" + "Any(" + "EqualHost(" + _url + ")" + ")," + "EqualListeningPort(443)" + ")" + ")"; + } + } catch (const boost::uuids::entropy_error &e) { + dbgWarning(D_K8S_POLICY) << "Failed to generate rule UUID. Error: " << e.what(); + } + } + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + std::string empty_str = ""; + out_ar( + cereal::make_nvp("assetId", id), + cereal::make_nvp("assetName", name), + cereal::make_nvp("ruleId", id), + cereal::make_nvp("ruleName", name), + cereal::make_nvp("context", context), + cereal::make_nvp("priority", 1), + cereal::make_nvp("isCleanup", false), + cereal::make_nvp("parameters", parameters), + cereal::make_nvp("practices", practices), + cereal::make_nvp("triggers", triggers), + cereal::make_nvp("zoneId", empty_str), + cereal::make_nvp("zoneName", empty_str) + ); + } + + const std::string & getRuleId() const { return id; } + const std::string & getAssetName() const { return name; } + const std::string & getRuleName() const { return name; } + const std::string & getAsstId() const { return id; } + const std::string & getPracticeId() const { return practices[0].getPracticeId(); } + const std::string & getPracticeName() const { return practices[0].getPracticeName(); } + const std::vector & getPractice() const { return practices; } + const std::vector & getParameters() const { return parameters; } + const std::vector & getTriggers() const { return triggers; } + + +private: + std::string context; + std::string id; + std::string name; + std::vector practices; + std::vector parameters; + std::vector triggers; +}; + +class RulesConfigWrapper +{ +public: + class RulesConfig + { + public: + RulesConfig(const std::vector &_rules_config) + : + rules_config(_rules_config) + { + sort(rules_config.begin(), rules_config.end(), sortBySpecific); + } + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("rulesConfig", rules_config) + ); + } + + private: + static bool + sortBySpecific(const RulesConfigRulebase &first, const RulesConfigRulebase &second) + { + return sortBySpecificAux(first.getAssetName(), second.getAssetName()); + } + + static bool + sortBySpecificAux(const std::string &first, const std::string &second) + { + if (first.empty()) return false; + if (second.empty()) return true; + + AssetUrlParser first_parsed = AssetUrlParser(first); + AssetUrlParser second_parsed = AssetUrlParser(second); + + // sort by URL + if (first_parsed.asset_url == "*" && second_parsed.asset_url != "*") return false; + if (second_parsed.asset_url == "*" && first_parsed.asset_url != "*") return true; + + // sort by port + if (first_parsed.port == "*" && second_parsed.port != "*") return false; + if (second_parsed.port == "*" && first_parsed.port != "*") return true; + + // sort by URI + if (first_parsed.asset_uri == "*" && second_parsed.asset_uri != "*") return false; + if (second_parsed.asset_uri == "*" && first_parsed.asset_uri != "*") return true; + + if (first_parsed.asset_uri.empty()) return false; + if (second_parsed.asset_uri.empty()) return true; + + if (second_parsed.asset_uri.find(first_parsed.asset_uri) != std::string::npos) return false; + if (first_parsed.asset_uri.find(second_parsed.asset_uri) != std::string::npos) return true; + + if (first_parsed.asset_url.empty()) return false; + if (second_parsed.asset_url.empty()) return false; + + return second < first; + } + + std::vector rules_config; + }; + + RulesConfigWrapper(const std::vector &_rules_config) + : + rules_config_rulebase(RulesConfig(_rules_config)) + {} + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("rulebase", rules_config_rulebase) + ); + } + +private: + RulesConfig rules_config_rulebase; +}; + +#endif // __RULES_CONFIG_SECTION_H__ diff --git a/components/security_apps/orchestration/k8s_policy_gen/include/settings_section.h b/components/security_apps/orchestration/k8s_policy_gen/include/settings_section.h new file mode 100644 index 0000000..23b09a8 --- /dev/null +++ b/components/security_apps/orchestration/k8s_policy_gen/include/settings_section.h @@ -0,0 +1,121 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __SETTINGS_SECTION_H__ +#define __SETTINGS_SECTION_H__ + +#include +#include +#include +#include + +#include "config.h" +#include "debug.h" +#include "k8s_policy_common.h" + +USE_DEBUG_FLAG(D_K8S_POLICY); + +class AgentSettingsSection +{ +public: + AgentSettingsSection( + const std::string &_key, + const std::string &_value) + : + key(_key), + value(_value) + { + try { + id = to_string(boost::uuids::random_generator()()); + } catch (const boost::uuids::entropy_error &e) { + dbgWarning(D_K8S_POLICY) << "Failed to generate agent setting UUID. Error: " << e.what(); + } + } + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("id", id), + cereal::make_nvp("key", key), + cereal::make_nvp("value", value) + ); + } + + const std::string & getSettingId() const { return id; } + +private: + std::string id; + std::string key; + std::string value; +}; + +class SettingsRulebase +{ +public: + SettingsRulebase(std::vector _agentSettings) : agentSettings(_agentSettings) {} + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + std::string profile_type = "Kubernetes"; + std::string upgrade_mode = "automatic"; + out_ar( + cereal::make_nvp("agentSettings", agentSettings), + cereal::make_nvp("agentType", profile_type), + cereal::make_nvp("allowOnlyDefinedApplications", false), + cereal::make_nvp("anyFog", true), + cereal::make_nvp("maxNumberOfAgents", 10), + cereal::make_nvp("upgradeMode", upgrade_mode) + ); + } + +private: + std::vector agentSettings; +}; + +class SettingsWrapper +{ +public: + SettingsWrapper(SettingsRulebase _agent) : agent(_agent) + { + try { + id = to_string(boost::uuids::random_generator()()); + } catch (const boost::uuids::entropy_error &e) { + dbgWarning(D_K8S_POLICY) << "Failed to generate Settings Wrapper UUID. Error: " << e.what(); + } + } + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("profileType", profileType), + cereal::make_nvp("tokenType", isToken), + cereal::make_nvp("tokenType", tokenType), + cereal::make_nvp("name", name), + cereal::make_nvp("id", id), + cereal::make_nvp("agent", agent) + ); + } + +private: + std::string profileType = "agent"; + bool isToken = true; + std::string tokenType = "sameToken"; + std::string id; + std::string name = "Kubernetes Agents"; + SettingsRulebase agent; +}; + +#endif // __SETTINGS_SECTION_H__ diff --git a/components/security_apps/orchestration/k8s_policy_gen/include/snort_section.h b/components/security_apps/orchestration/k8s_policy_gen/include/snort_section.h new file mode 100644 index 0000000..d5b7167 --- /dev/null +++ b/components/security_apps/orchestration/k8s_policy_gen/include/snort_section.h @@ -0,0 +1,79 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __SNORT_SECTION_H__ +#define __SNORT_SECTION_H__ + +#include +#include +#include +#include + +#include "config.h" +#include "debug.h" + +USE_DEBUG_FLAG(D_K8S_POLICY); + +class AgentSettingsSection +{ +public: + AgentSettingsSection(std::string _key, std::string _value) : key(_key), value(_value) + { + try { + id = to_string(boost::uuids::random_generator()()); + } catch (const boost::uuids::entropy_error &e) { + dbgWarning(D_K8S_POLICY) << "Failed to generate agent setting UUID. Error: " << e.what(); + } + } + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("id", id), + cereal::make_nvp("key", key), + cereal::make_nvp("value", value) + ); + } + +private: + std::string id; + std::string key; + std::string value; +}; + +class IpsSnortSigsRulebase +{ +public: + IpsSnortSigsRulebase(std::vector _agentSettings) : agentSettings(_agentSettings) {} + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + std::string profile_type = "KubernetesProfile"; + std::string upgrade_mode = "automatic"; + out_ar( + cereal::make_nvp("agentSettings", agentSettings), + cereal::make_nvp("agentType", profile_type), + cereal::make_nvp("allowOnlyDefinedApplications", false), + cereal::make_nvp("anyFog", true), + cereal::make_nvp("maxNumberOfAgents", 10), + cereal::make_nvp("upgradeMode", upgrade_mode) + ); + } + +private: + std::vector agentSettings; +}; + +#endif // __SNORT_SECTION_H__ diff --git a/components/security_apps/orchestration/k8s_policy_gen/include/triggers_section.h b/components/security_apps/orchestration/k8s_policy_gen/include/triggers_section.h new file mode 100644 index 0000000..8761f37 --- /dev/null +++ b/components/security_apps/orchestration/k8s_policy_gen/include/triggers_section.h @@ -0,0 +1,625 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __TRIGGERS_SECTION_H__ +#define __TRIGGERS_SECTION_H__ + +#include +#include +#include +#include + +#include "config.h" +#include "debug.h" +#include "k8s_policy_common.h" + +USE_DEBUG_FLAG(D_K8S_POLICY); + +class LogTriggerSection +{ +public: + LogTriggerSection() + {} + + LogTriggerSection( + const std::string &_name, + const std::string &_verbosity, + const std::string &_extendloggingMinSeverity, + bool _extendlogging, + bool _logToAgent, + bool _logToCef, + bool _logToCloud, + bool _logToSyslog, + bool _responseBody, + bool _tpDetect, + bool _tpPrevent, + bool _webBody, + bool _webHeaders, + bool _webRequests, + bool _webUrlPath, + bool _webUrlQuery, + int _cefPortNum, + const std::string &_cefIpAddress, + int _syslogPortNum, + const std::string &_syslogIpAddress, + bool _beautify_logs) + : + name(_name), + verbosity(_verbosity), + extendloggingMinSeverity(_extendloggingMinSeverity), + extendlogging(_extendlogging), + logToAgent(_logToAgent), + logToCef(_logToCef), + logToCloud(_logToCloud), + logToSyslog(_logToSyslog), + responseBody(_responseBody), + tpDetect(_tpDetect), + tpPrevent(_tpPrevent), + webBody(_webBody), + webHeaders(_webHeaders), + webRequests(_webRequests), + webUrlPath(_webUrlPath), + webUrlQuery(_webUrlQuery), + cefPortNum (_cefPortNum), + cefIpAddress (_cefIpAddress), + syslogPortNum (_syslogPortNum), + syslogIpAddress (_syslogIpAddress), + beautify_logs(_beautify_logs) + { + try { + id = to_string(boost::uuids::random_generator()()); + context = "triggerId(" + id + ")"; + } catch (const boost::uuids::entropy_error &e) { + dbgWarning(D_K8S_POLICY) << "Failed to generate log trigger UUID. Error: " << e.what(); + } + } + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + std::string trigger_type = "log"; + std::string urlForSyslog = syslogIpAddress + ":" + std::to_string(syslogPortNum); + std::string urlForCef = cefIpAddress + ":" + std::to_string(cefPortNum); + out_ar( + cereal::make_nvp("context", context), + cereal::make_nvp("triggerName", name), + cereal::make_nvp("triggerType", trigger_type), + cereal::make_nvp("verbosity", verbosity), + cereal::make_nvp("acAllow", false), + cereal::make_nvp("acDrop", false), + cereal::make_nvp("complianceViolations", false), + cereal::make_nvp("complianceWarnings", false), + cereal::make_nvp("extendloggingMinSeverity", extendloggingMinSeverity), + cereal::make_nvp("extendlogging", extendlogging), + cereal::make_nvp("logToAgent", logToAgent), + cereal::make_nvp("logToCef", logToCef), + cereal::make_nvp("logToCloud", logToCloud), + cereal::make_nvp("logToSyslog", logToSyslog), + cereal::make_nvp("responseBody", responseBody), + cereal::make_nvp("responseCode", false), + cereal::make_nvp("tpDetect", tpDetect), + cereal::make_nvp("tpPrevent", tpPrevent), + cereal::make_nvp("webBody", webBody), + cereal::make_nvp("webHeaders", webHeaders), + cereal::make_nvp("webRequests", webRequests), + cereal::make_nvp("webUrlPath", webUrlPath), + cereal::make_nvp("webUrlQuery", webUrlQuery), + cereal::make_nvp("urlForSyslog", urlForSyslog), + cereal::make_nvp("urlForCef", urlForCef), + cereal::make_nvp("formatLoggingOutput", beautify_logs) + ); + } + + const std::string & getTriggerId() const { return id; } + const std::string & getTriggerName() const { return name; } + +private: + std::string id; + std::string name; + std::string context; + std::string verbosity; + std::string extendloggingMinSeverity; + bool extendlogging; + bool logToAgent; + bool logToCef; + bool logToCloud; + bool logToSyslog; + bool responseBody; + bool tpDetect; + bool tpPrevent; + bool webBody; + bool webHeaders; + bool webRequests; + bool webUrlPath; + bool webUrlQuery; + int cefPortNum; + std::string cefIpAddress; + int syslogPortNum; + std::string syslogIpAddress; + bool beautify_logs; +}; + +class WebUserResponseTriggerSection +{ +public: + WebUserResponseTriggerSection( + const std::string &_name, + const std::string &_details_level, + const std::string &_response_body, + int _response_code, + const std::string &_response_title) + : + name(_name), + context(), + details_level(_details_level), + response_body(_response_body), + response_title(_response_title), + response_code(_response_code) + { + try { + id = to_string(boost::uuids::random_generator()()); + context = "triggerId(" + id + ")"; + } catch (const boost::uuids::entropy_error &e) { + dbgWarning(D_K8S_POLICY) << "Failed to generate webUserResponse trigger UUID. Error: " << e.what(); + } + } + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("context", context), + cereal::make_nvp("triggerName", name), + cereal::make_nvp("details level", details_level), + cereal::make_nvp("response body", response_body), + cereal::make_nvp("response code", response_code), + cereal::make_nvp("response title", response_title) + ); + } + + const std::string & getTriggerId() const { return id; } + const std::string & getTriggerName() const { return name; } + +private: + std::string id; + std::string name; + std::string context; + std::string details_level; + std::string response_body; + std::string response_title; + int response_code; +}; + +class AppSecWebUserResponseSpec +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading AppSec web user response spec"; + parseAppsecJSONKey("http-response-code", httpResponseCode, archive_in, 403); + parseAppsecJSONKey("mode", mode, archive_in, "block-page"); + if (mode == "block-page") { + parseAppsecJSONKey( + "message-body", + messageBody, + archive_in, + "Openappsec's Application Security has detected an attack and blocked it." + ); + parseAppsecJSONKey( + "message-title", + messageTitle, + archive_in, + "Attack blocked by web application protection" + ); + } + } + + int getHttpResponseCode() const { return httpResponseCode; } + const std::string & getMessageBody() const { return messageBody; } + const std::string & getMessageTitle() const { return messageTitle; } + const std::string & getMode() const { return mode; } + +private: + int httpResponseCode; + std::string messageBody; + std::string messageTitle; + std::string mode; +}; + +std::ostream & +operator<<(std::ostream &os, const AppSecWebUserResponseSpec &obj) +{ + os + << "mode: " + << obj.getMode() + << "," << std::endl << "message-title: " + << obj.getMessageTitle() + << "," << std::endl << "message-body: " + << obj.getMessageBody() + << "," << std::endl << "http-response-code: " + << obj.getHttpResponseCode(); + return os; +} + +class TriggersRulebase +{ +public: + TriggersRulebase( + std::vector _logTriggers, + std::vector _webUserResponseTriggers) + : + logTriggers(_logTriggers), + webUserResponseTriggers(_webUserResponseTriggers) {} + + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("log", logTriggers), + cereal::make_nvp("webUserResponse", webUserResponseTriggers) + ); + } + +private: + std::vector logTriggers; + std::vector webUserResponseTriggers; +}; + +class AppsecTriggerAccessControlLogging +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading AppSec Trigger - Access Control Logging"; + parseAppsecJSONKey("allow-events", allow_events, archive_in, false); + parseAppsecJSONKey("drop-events", drop_events, archive_in, false); + } + + bool isAllowEvents() const { return allow_events; } + bool isDropEvents() const { return drop_events; } + +private: + bool allow_events = false; + bool drop_events = false; +}; + +std::ostream & +operator<<(std::ostream &os, const AppsecTriggerAccessControlLogging &obj) +{ + os + << "AppSec Trigger - Access Control Logging: " + << "isAllowEvents: " + << obj.isAllowEvents() + << " , isDropEvents: " + << obj.isDropEvents(); + return os; +} + +class AppsecTriggerAdditionalSuspiciousEventsLogging : public ClientRest +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading AppSec Trigger - Additional Suspicious Events Logging"; + parseAppsecJSONKey("enabled", enabled, archive_in, true); + parseAppsecJSONKey("response-body", response_body, archive_in, false); + parseAppsecJSONKey("minimum-severity", minimum_severity, archive_in, "high"); + } + + bool isEnabled() const { return enabled; } + bool isResponseBody() const { return response_body; } + const std::string & getMinimumSeverity() const { return minimum_severity; } + +private: + bool enabled = true; + bool response_body = false; + std::string minimum_severity = "high"; +}; + +std::ostream & +operator<<(std::ostream &os, const AppsecTriggerAdditionalSuspiciousEventsLogging &obj) +{ + os + << "AppsecTriggerAdditionalSuspiciousEventsLogging: " + << "Enabled: " + << obj.isEnabled() + << " response_body: " + << obj.isResponseBody() + << " minimum_severity: " + << obj.getMinimumSeverity(); + return os; +} + +class AppsecTriggerLogging : public ClientRest +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading AppSec Trigger Logging"; + parseAppsecJSONKey("all-web-requests", all_web_requests, archive_in, false); + parseAppsecJSONKey("detect-events", detect_events, archive_in, false); + parseAppsecJSONKey("prevent-events", prevent_events, archive_in, true); + } + + bool isAllWebRequests() const { return all_web_requests; } + + bool isDetectEvents() const { return detect_events; } + + bool isPreventEvents() const { return prevent_events; } + +private: + bool all_web_requests = false; + bool detect_events = false; + bool prevent_events = true; +}; + +std::ostream & +operator<<(std::ostream &os, const AppsecTriggerLogging &obj) +{ + os + << "AppsecTriggerLogging: " + << "all_web_requests: " + << obj.isAllWebRequests() + << ", detect_events: " + << obj.isDetectEvents() + << ", prevent_events: " + << obj.isPreventEvents(); + return os; +} + +class AppsecTriggerExtendedLogging : public ClientRest +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading AppSec Trigger Extended Logging"; + parseAppsecJSONKey("http-headers", http_headers, archive_in, false); + parseAppsecJSONKey("request-body", request_body, archive_in, false); + parseAppsecJSONKey("url-path", url_path, archive_in, false); + parseAppsecJSONKey("url-query", url_query, archive_in, false); + } + + bool isHttpHeaders() const { return http_headers; } + bool isRequestBody() const { return request_body; } + bool isUrlPath() const { return url_path; } + bool isUrlQuery() const { return url_query; } + +private: + bool http_headers = false; + bool request_body = false; + bool url_path = false; + bool url_query = false; +}; + +std::ostream & +operator<<(std::ostream &os, const AppsecTriggerExtendedLogging &obj) +{ + os + << "AppsecTriggerExtendedLogging: " + << "http_headers: " + << obj.isHttpHeaders() + << ", request_body: " + << obj.isRequestBody() + << ", url_path: " + << obj.isUrlPath() + << ", url_query: " + << obj.isUrlQuery(); + return os; +} + +class LoggingService +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + parseAppsecJSONKey("address", address, archive_in); + parseAppsecJSONKey("proto", proto, archive_in); + parseAppsecJSONKey("port", port, archive_in, 514); + } + + const std::string & getAddress() const { return address; } + const std::string & getProto() const { return proto; } + int getPort() const { return port; } + +private: + std::string address; + std::string proto; + int port = 514; +}; + +class StdoutLogging +{ +public: + StdoutLogging() : format("json") {} + + void + load(cereal::JSONInputArchive &archive_in) + { + parseAppsecJSONKey("format", format, archive_in, "json"); + } + + const std::string & getFormat() const { return format; } + +private: + std::string format; +}; + +class AppsecTriggerLogDestination : public ClientRest +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgError(D_K8S_POLICY) << "AppsecTriggerLogDestination load"; + // TBD: support "file" + parseAppsecJSONKey("cloud", cloud, archive_in, false); + + StdoutLogging stdout_log; + parseAppsecJSONKey("stdout", stdout_log, archive_in); + agent_local = !(stdout_log.getFormat().empty()); + beautify_logs = stdout_log.getFormat() == "json-formatted"; + parseAppsecJSONKey("syslog-service", syslog_service, archive_in); + parseAppsecJSONKey("cef-service", cef_service, archive_in); + } + + int getCefServerUdpPort() const { return getCefServiceData().getPort(); } + int getSyslogServerUdpPort() const { return getSyslogServiceData().getPort(); } + bool isAgentLocal() const { return agent_local; } + bool shouldBeautifyLogs() const { return beautify_logs; } + + bool getCloud() const { return cloud; } + bool isCefNeeded() const { return !getCefServiceData().getAddress().empty(); } + bool isSyslogNeeded() const { return !getSyslogServiceData().getAddress().empty(); } + const std::string & getSyslogServerIpv4Address() const { return getSyslogServiceData().getAddress(); } + const std::string & getCefServerIpv4Address() const { return getCefServiceData().getAddress(); } + +private: + const LoggingService & getSyslogServiceData() const { return syslog_service; } + const LoggingService & getCefServiceData() const { return cef_service; } + + bool cloud = false; + bool agent_local = true; + bool beautify_logs = true; + LoggingService syslog_service; + LoggingService cef_service; +}; + +std::ostream & +operator<<(std::ostream &os, const AppsecTriggerLogDestination &obj) +{ + os + << "AppSec Trigger Log Destination:" << std::endl + << "agent_local: " + << obj.isAgentLocal() + << ", beautify_logs: " + << obj.shouldBeautifyLogs() + << ", cef_server_udp_port: " + << obj.getCefServerUdpPort() + << ", syslog_server_udp_port: " + << obj.getSyslogServerUdpPort() + << ", cef_service: " + << obj.isCefNeeded() + << ", cloud: " + << obj.getCloud() + << ", syslog: " + << obj.isSyslogNeeded() + << ", syslog_server_ipv4_address: " + << obj.getSyslogServerIpv4Address() + << ", cef_server_ipv4_address: " + << obj.getCefServerIpv4Address(); + return os; +} + +class AppsecTriggerSpec +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading AppSec trigger spec"; + parseAppsecJSONKey( + "access-control-logging", + access_control_logging, + archive_in + ); + parseAppsecJSONKey( + "additional-suspicious-events-logging", + additional_suspicious_events_logging, + archive_in + ); + parseAppsecJSONKey("appsec-logging", appsec_logging, archive_in); + parseAppsecJSONKey("extended-logging", extended_logging, archive_in); + parseAppsecJSONKey("log-destination", log_destination, archive_in); + } + + const AppsecTriggerAccessControlLogging & + getAppsecTriggerAccessControlLogging() const + { + return access_control_logging; + } + + const AppsecTriggerAdditionalSuspiciousEventsLogging & + getAppsecTriggerAdditionalSuspiciousEventsLogging() const + { + return additional_suspicious_events_logging; + } + + const AppsecTriggerLogging & + getAppsecTriggerLogging() const + { + return appsec_logging; + } + + const AppsecTriggerExtendedLogging & + getAppsecTriggerExtendedLogging() const + { + return extended_logging; + } + + const AppsecTriggerLogDestination & + getAppsecTriggerLogDestination() const + { + return log_destination; + } + +private: + AppsecTriggerAccessControlLogging access_control_logging; + AppsecTriggerAdditionalSuspiciousEventsLogging additional_suspicious_events_logging; + AppsecTriggerLogging appsec_logging; + AppsecTriggerExtendedLogging extended_logging; + AppsecTriggerLogDestination log_destination; +}; + +std::ostream & +operator<<(std::ostream &os, const AppsecTriggerSpec &obj) +{ + os + << "AppSec Access Control Logging:" << std::endl + << obj.getAppsecTriggerAccessControlLogging() + << std::endl << "AppSec Additional Suspocious Events Logging:" << std::endl + << obj.getAppsecTriggerAdditionalSuspiciousEventsLogging() + << std::endl << "AppSec Trigger Logging:" << std::endl + << obj.getAppsecTriggerLogging() + << std::endl << "Appsec Trigger Extended Logging:" << std::endl + << obj.getAppsecTriggerExtendedLogging() + << std::endl << "AppSec Trigger Log Destination:" << std::endl + << obj.getAppsecTriggerLogDestination(); + return os; +} + +class TriggersWrapper +{ +public: + TriggersWrapper(const TriggersRulebase &_triggers) : triggers_rulebase(_triggers) + {} + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("rulebase", triggers_rulebase) + ); + } + +private: + TriggersRulebase triggers_rulebase; +}; + +#endif // __TRIGGERS_SECTION_H__ diff --git a/components/security_apps/orchestration/k8s_policy_gen/include/trusted_sources_section.h b/components/security_apps/orchestration/k8s_policy_gen/include/trusted_sources_section.h new file mode 100755 index 0000000..318c6d4 --- /dev/null +++ b/components/security_apps/orchestration/k8s_policy_gen/include/trusted_sources_section.h @@ -0,0 +1,186 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __TRUSTED_SOURCES_SECTION_H__ +#define __TRUSTED_SOURCES_SECTION_H__ + +#include +#include +#include +#include +#include + +#include "config.h" +#include "debug.h" +#include "k8s_policy_common.h" + +USE_DEBUG_FLAG(D_K8S_POLICY); + +class TrustedSourcesSpec +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading trusted sources spec"; + parseAppsecJSONKey("minNumOfSources", min_num_of_sources, archive_in, 3); + parseAppsecJSONKey>("sourcesIdentifiers", sources_identifiers, archive_in); + } + + int + getMinNumOfSources() const + { + return min_num_of_sources; + } + + const std::vector & + getSourcesIdentifiers() const + { + return sources_identifiers; + } + +private: + int min_num_of_sources; + std::vector sources_identifiers; +}; + +std::ostream & +operator<<(std::ostream &os, const TrustedSourcesSpec &obj) +{ + os + << "Min number of sources: " + << obj.getMinNumOfSources() + << ", SourceIdentifiers: [" + << makeSeparatedStr(obj.getSourcesIdentifiers(), ",") + << "]"; + return os; +} + +class SourcesIdentifiers +{ +public: + SourcesIdentifiers(const std::string &_source_identifier, const std::string &_value) + : + source_identifier(_source_identifier), + value(_value) + {} + + void + save(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("sourceIdentifier", source_identifier), + cereal::make_nvp("value", value) + ); + } + + const std::string & + getSourceIdent() const + { + return source_identifier; + } + +private: + std::string source_identifier; + std::string value; +}; + +class SourceIdentifierSpec +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgTrace(D_K8S_POLICY) << "Loading trusted sources spec"; + parseAppsecJSONKey("sourceIdentifier", source_identifier, archive_in); + parseAppsecJSONKey>("value", value, archive_in); + } + + const std::string & + getSourceIdentifier() const + { + return source_identifier; + } + + const std::vector & + getValues() const + { + return value; + } + +private: + std::string source_identifier; + std::vector value; +}; + +std::ostream & +operator<<(std::ostream &os, const SourceIdentifierSpec &obj) +{ + os + << "sourceIdentifier: " + << obj.getSourceIdentifier() + << ", values: [" + << makeSeparatedStr(obj.getValues(), ",") + << "]"; + return os; +} + +class AppSecTrustedSources +{ +public: + AppSecTrustedSources() + {} + + AppSecTrustedSources( + const std::string &_name, + int _num_of_sources, + const std::vector &_sources_identifiers) + : + name(_name), + num_of_sources(_num_of_sources), + sources_identifiers(_sources_identifiers) + { + try { + id = to_string(boost::uuids::random_generator()()); + } catch (const boost::uuids::entropy_error &e) { + dbgWarning(D_K8S_POLICY) << "Failed to generate Trusted Sources ID. Error: " << e.what(); + } + } + + void + save(cereal::JSONOutputArchive &out_ar) const + { + std::string parameter_type = "TrustedSource"; + out_ar( + cereal::make_nvp("id", id), + cereal::make_nvp("name", name), + cereal::make_nvp("numOfSources", num_of_sources), + cereal::make_nvp("sourcesIdentifiers", sources_identifiers), + cereal::make_nvp("parameterType", parameter_type) + ); + } + + const std::vector & + getSourcesIdentifiers() const + { + return sources_identifiers; + } + +private: + std::string id; + std::string name; + int num_of_sources; + std::vector sources_identifiers; +}; + +#endif // __TRUSTED_SOURCES_SECTION_H__ diff --git a/components/security_apps/orchestration/k8s_policy_gen/k8s_policy_gen.cc b/components/security_apps/orchestration/k8s_policy_gen/k8s_policy_gen.cc new file mode 100644 index 0000000..9eef08a --- /dev/null +++ b/components/security_apps/orchestration/k8s_policy_gen/k8s_policy_gen.cc @@ -0,0 +1,1167 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "k8s_policy_gen.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rest.h" +#include "debug.h" +#include "config.h" +#include "connkey.h" +#include "url_parser.h" +#include "i_messaging.h" +#include "i_agent_details.h" +#include "customized_cereal_map.h" +#include "include/appsec_practice_section.h" +#include "include/ingress_data.h" +#include "include/settings_section.h" +#include "include/triggers_section.h" +#include "include/k8s_policy_common.h" +#include "include/exceptions_section.h" +#include "include/rules_config_section.h" +#include "include/trusted_sources_section.h" + +using namespace std; + +USE_DEBUG_FLAG(D_K8S_POLICY); + +const static string policy_path = "/tmp/k8s.policy"; +const static string open_appsec_io = "openappsec.io/"; +const static string policy_key = "policy"; +const static string syslog_key = "syslog"; +const static string mode_key = "mode"; + +class SecurityAppsWrapper +{ +public: + SecurityAppsWrapper( + const AppSecWrapper &_waap, + const TriggersWrapper &_trrigers, + const RulesConfigWrapper &_rules, + const ExceptionsWrapper &_exceptions, + const string &_policy_version) + : + waap(_waap), + trrigers(_trrigers), + rules(_rules), + exceptions(_exceptions), + policy_version(_policy_version) {} + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + out_ar( + cereal::make_nvp("waap", waap), + cereal::make_nvp("triggers", trrigers), + cereal::make_nvp("rules", rules), + cereal::make_nvp("exceptions", exceptions), + cereal::make_nvp("version", policy_version) + ); + } + +private: + AppSecWrapper waap; + TriggersWrapper trrigers; + RulesConfigWrapper rules; + ExceptionsWrapper exceptions; + string policy_version; +}; + +class K8sPolicyWrapper +{ +public: + K8sPolicyWrapper( + const SettingsWrapper &_settings, + const SecurityAppsWrapper &_security_apps) + : + settings(_settings), + security_apps(_security_apps) {} + + void + serialize(cereal::JSONOutputArchive &out_ar) const + { + security_apps.serialize(out_ar); + } + +private: + SettingsWrapper settings; + SecurityAppsWrapper security_apps; +}; + +class NamespaceMetadata +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + dbgInfo(D_K8S_POLICY) << "NamespaceMetadata load"; + parseAppsecJSONKey("name", name, archive_in); + parseAppsecJSONKey("uid", uid, archive_in); + } + + const std::string & getName() const { return name; } + const std::string & getUID() const { return uid; } + +private: + std::string name; + std::string uid; +}; + +class SingleNamespaceData +{ +public: + void + load(cereal::JSONInputArchive &archive_in) + { + parseAppsecJSONKey("metadata", metadata, archive_in); + } + + const NamespaceMetadata & getMetadata() const { return metadata; } + +private: + NamespaceMetadata metadata; +}; + +class NamespaceData : public ClientRest +{ +public: + bool + loadJson(const std::string &json) + { + dbgTrace(D_K8S_POLICY) << "Loading namespace data"; + std::string modified_json = json; + modified_json.pop_back(); + std::stringstream in; + in.str(modified_json); + try { + cereal::JSONInputArchive in_ar(in); + in_ar( + cereal::make_nvp("items", items) + ); + } catch (cereal::Exception &e) { + dbgError(D_K8S_POLICY) << "Failed to load namespace data JSON. Error: " << e.what(); + return false; + } + return true; + } + + const std::vector & getItems() const { return items; } + +private: + std::vector items; +}; + +class K8sPolicyGenerator::Impl + : + public Singleton::Provide::From, + public Singleton::Consume, + public Singleton::Consume, + public Singleton::Consume, + public Singleton::Consume +{ +public: + void + init() + { + token = retrieveToken(); + if (token.empty()) return; + dbgTrace(D_K8S_POLICY) << "Initializing K8S policy generator"; + conn_flags.setFlag(MessageConnConfig::SECURE_CONN); + conn_flags.setFlag(MessageConnConfig::IGNORE_SSL_VALIDATION); + + messaging = Singleton::Consume::by(); + + Singleton::Consume::by()->addOneTimeRoutine( + I_MainLoop::RoutineType::Offline, + [this] () + { + ScopedContext ctx; + ctx.registerValue("k8s_env", true); + while(!getClusterId()) { + Singleton::Consume::by()->yield(chrono::seconds(1)); + } + return; + }, + "Get k8s cluster ID" + ); + } + + const string & getPolicyPath(void) const override { return policy_path; } + + string + parsePolicy(const string &policy_version) + { + ScopedContext ctx; + ctx.registerValue("k8s_env", true); + + IngressData ingress; + bool res = messaging->sendObject( + ingress, + I_Messaging::Method::GET, + "kubernetes.default.svc", + 443, + conn_flags, + "/apis/networking.k8s.io/v1/ingresses", + "Authorization: Bearer " + token + "\nConnection: close" + ); + + if(!res) { + // TBD: Error handling : INXT-31444 + dbgError(D_K8S_POLICY) << "Failed to retrieve K8S Ingress configurations"; + return ""; + } + + set generated_apps; + set parsed_web_apps_set; + vector parsed_web_apps; + vector parsed_rules; + vector parsed_log_triggers; + set parsed_exeptions; + vector parsed_web_user_res; + map practice_map; + map log_triggers_map; + map exception_map; + map web_user_res_map; + map trusted_sources_map; + map> source_identifiers_map; + RulesConfigRulebase cleanup_rule; + string cleanup_rule_mode = "Inactive"; + + dbgTrace(D_K8S_POLICY) << "Received Ingress apiVersion: " << ingress.getapiVersion(); + dbgTrace(D_K8S_POLICY) << "Ingress items ammount: " << ingress.getItems().size(); + // TBD: break to methods : INXT-31445 + for (const SingleIngressData &item : ingress.getItems()) { + dbgTrace(D_K8S_POLICY) + << "Metadata name is: " + << item.getMetadata().getName() + << ", Namespace is: " + << item.getMetadata().getNamespace() + << ", Spec: " + << item.getSpec(); + + set> specific_assets_from_ingress; + for (const IngressDefinedRule &rule : item.getSpec().getRules()) { + string url = rule.getHost(); + for (const IngressRulePath &uri : rule.getPathsWrapper().getRulePaths()) { + specific_assets_from_ingress.insert({url, uri.getPath()}); + dbgTrace(D_K8S_POLICY) + << "Inserting Host data to the specific asset set:" + << "URL: '" + << url + << "' uri: '" + << uri.getPath() + << "'"; + } + } + + string asset; + string annotation_type; + string annotation_name; + string policy_annotation; + string syslog_address; + string syslog_port; + string mode_annotation; + for (const pair &annotation : item.getMetadata().getAnnotations()) { + string annotation_key = annotation.first; + string annotation_val = annotation.second; + if (annotation_key.find(open_appsec_io) != string::npos) { + if (annotation_key.find(policy_key) != string::npos) policy_annotation = annotation_val; + if (annotation_key.find(syslog_key) != string::npos) { + bool has_port = annotation_val.find(":"); + syslog_address = annotation_val.substr(0, annotation_val.find(":")); + syslog_port = has_port ? annotation_val.substr(annotation_val.find(":") + 1) : ""; + } + if (annotation_key.find(mode_key) != string::npos) { + mode_annotation = annotation_val; + ctx.registerValue("default mode annotation", mode_annotation); + } + } + } + if (policy_annotation.empty()) { + dbgInfo(D_K8S_POLICY) << "No policy was found in this ingress"; + continue; + } + + dbgTrace(D_K8S_POLICY) << "Trying to parse policy for " << policy_annotation; + AppsecSpecParser appsec_policy; + res = messaging->sendObject(appsec_policy, + I_Messaging::Method::GET, + "kubernetes.default.svc", + 443, + conn_flags, + "/apis/openappsec.io/v1beta1/policies/" + policy_annotation, + "Authorization: Bearer " + token + "\nConnection: close"); + if(!res) { + dbgError(D_K8S_POLICY) << "Failed to retrieve AppSec policy"; + return ""; + } + dbgTrace(D_K8S_POLICY) << "Succeessfully retrieved AppSec policy: " << appsec_policy.getSpec(); + + vector specific_rules = appsec_policy.getSpec().getSpecificRules(); + ParsedRule default_rule = appsec_policy.getSpec().getDefaultRule(); + + for (const ParsedRule &parsed_rule : specific_rules) { + string asset_name = parsed_rule.getHost(); + dbgTrace(D_K8S_POLICY) << "Handling specific rule for asset: " << asset_name; + + string practice_annotation_name; + // TBD: support multiple practices + if (parsed_rule.getPractices().size() > 0 && !parsed_rule.getPractices()[0].empty()) { + practice_annotation_name = parsed_rule.getPractices()[0]; + } else if (default_rule.getPractices().size() > 0 && !default_rule.getPractices()[0].empty()) { + practice_annotation_name = default_rule.getPractices()[0]; + } + + string trigger_annotation_name; + // TBD: support multiple triggers + if (parsed_rule.getLogTriggers().size() > 0 && !parsed_rule.getLogTriggers()[0].empty()) { + trigger_annotation_name = parsed_rule.getLogTriggers()[0]; + } else if (default_rule.getLogTriggers().size() > 0 && !default_rule.getLogTriggers()[0].empty()) { + trigger_annotation_name = default_rule.getLogTriggers()[0]; + } + + string exception_annotation_name; + // TBD: support multiple exceptions + if (parsed_rule.getExceptions().size() > 0 && !parsed_rule.getExceptions()[0].empty()) { + exception_annotation_name = parsed_rule.getExceptions()[0]; + } else if (default_rule.getExceptions().size() > 0 && !default_rule.getExceptions()[0].empty()) { + exception_annotation_name = default_rule.getExceptions()[0]; + } + + string web_user_res_annotation_name = + parsed_rule.getCustomResponse().empty() ? + default_rule.getCustomResponse() : + parsed_rule.getCustomResponse(); + + string source_identifiers_annotation_name = + parsed_rule.getSourceIdentifiers().empty() ? + default_rule.getSourceIdentifiers() : + parsed_rule.getSourceIdentifiers(); + + string trusted_sources_annotation_name = + parsed_rule.getTrustedSources ().empty() ? + default_rule.getTrustedSources() : + parsed_rule.getTrustedSources(); + + string url = asset_name.substr(0, asset_name.find("/")); + string uri = asset_name.substr(asset_name.find("/")); + if (specific_assets_from_ingress.find({url, uri}) != specific_assets_from_ingress.end()) { + // Erasing the current asset from the specific assets, because it won't have default policy + specific_assets_from_ingress.erase({url, uri}); + } + + vector> web_user_res_vec; + if (!extractExceptions(exception_annotation_name, exception_map, parsed_exeptions)) { + dbgWarning(D_K8S_POLICY) + << "Failed extracting exceptions. Exception name: " + << exception_annotation_name; + return ""; + } + + if (!extractTriggers( + trigger_annotation_name, + log_triggers_map, + parsed_log_triggers, + syslog_address, + syslog_port) + ) { + dbgWarning(D_K8S_POLICY) + << "Failed extracting triggers. Trigger name: " + << trigger_annotation_name; + return ""; + } + + if (!extractWebUserResponse( + web_user_res_annotation_name, + web_user_res_map, + web_user_res_vec, + parsed_web_user_res) + ) { + dbgWarning(D_K8S_POLICY) + << "Failed extracting custom response. Custom response name: " + << web_user_res_annotation_name; + return ""; + } + + AppSecTrustedSources parsed_trusted_sources; + if (!extractTrustedSources( + asset_name, + trusted_sources_annotation_name, + source_identifiers_annotation_name, + trusted_sources_map, + source_identifiers_map, + parsed_trusted_sources) + ) { + dbgWarning(D_K8S_POLICY) + << "Failed extracting trused sources. Trusted source name: " + << trusted_sources_annotation_name + << ", Source identifiers annotation name: " + << source_identifiers_annotation_name; + return ""; + } + + if (!practice_annotation_name.empty() && practice_map.count(practice_annotation_name) == 0) { + AppsecSpecParser appsec_practice; + res = messaging->sendObject(appsec_practice, + I_Messaging::Method::GET, + "kubernetes.default.svc", + 443, + conn_flags, + "/apis/openappsec.io/v1beta1/practices/" + practice_annotation_name, + "Authorization: Bearer " + token + "\nConnection: close"); + if(!res) { + dbgError(D_K8S_POLICY) << "Failed to retrieve AppSec practice for asset " << asset_name; + return ""; + } + practice_map.emplace(practice_annotation_name, appsec_practice.getSpec()); + dbgTrace(D_K8S_POLICY) + << "Successfully retrieved AppSec practice" + << practice_annotation_name + << appsec_practice.getSpec(); + } + + string log_trigger_id; + LogTriggerSection log_trigger_annotation; + if (log_triggers_map.count(trigger_annotation_name) > 0) { + log_trigger_id = log_triggers_map.at(trigger_annotation_name).getTriggerId(); + log_trigger_annotation = log_triggers_map.at(trigger_annotation_name); + } + string exception_id; + if (exception_map.count(exception_annotation_name) > 0) { + exception_id = exception_map.at(exception_annotation_name).getBehaviorId(); + } + RulesConfigRulebase rules_config = createMultiRulesSections( + url, + uri, + practice_annotation_name, + "WebApplication", + trigger_annotation_name, + log_trigger_id, + "log", + web_user_res_vec, + asset_name, + exception_annotation_name, + exception_id + ); + string port = "80"; + string full_url = asset_name == "Any" ? "" : url + "/" + uri + ":" + port; + string asset_id = rules_config.getAsstId(); + string practice_id = rules_config.getPracticeId(); + + if (!generated_apps.count(full_url)) { + WebAppSection web_app = WebAppSection( + full_url, + asset_id, + asset_name, + asset_id, + asset_name, + practice_id, + practice_annotation_name, + practice_map.at(practice_annotation_name), + log_trigger_annotation, + default_rule.getMode(), + parsed_trusted_sources + ); + + parsed_web_apps_set.insert(web_app); + parsed_rules.push_back(rules_config); + generated_apps.insert(full_url); + } + } + + string exception_name; + if (!default_rule.getExceptions().empty()) { + exception_name = default_rule.getExceptions()[0]; + if (!extractExceptions(exception_name, exception_map, parsed_exeptions)) return ""; + } + + string trigger_name; + if (!default_rule.getLogTriggers().empty()) { + trigger_name = default_rule.getLogTriggers()[0]; + if (!extractTriggers( + trigger_name, + log_triggers_map, + parsed_log_triggers, + syslog_address, + syslog_port)) return ""; + } + + vector> default_web_user_res_vec; + string web_user_res_annotation_name = default_rule.getCustomResponse(); + if (!extractWebUserResponse( + web_user_res_annotation_name, + web_user_res_map, + default_web_user_res_vec, + parsed_web_user_res) + ) return ""; + + AppSecTrustedSources default_parsed_trusted_sources; + string trusted_sources_annotation_name = default_rule.getTrustedSources(); + string source_identifiers_annotation_name = default_rule.getSourceIdentifiers(); + if (!extractTrustedSources( + "Any", + trusted_sources_annotation_name, + source_identifiers_annotation_name, + trusted_sources_map, + source_identifiers_map, + default_parsed_trusted_sources) + ) { + dbgWarning(D_K8S_POLICY) + << "Failed extracting trused sources. Trusted source name: " + << trusted_sources_annotation_name + << ", Source identifiers annotation name: " + << source_identifiers_annotation_name; + return ""; + } + + string practice_name; + if (!default_rule.getPractices().empty()) { + practice_name = default_rule.getPractices()[0]; + } + if (!practice_name.empty() && practice_map.count(practice_name) == 0) { + AppsecSpecParser appsec_practice; + res = messaging->sendObject(appsec_practice, + I_Messaging::Method::GET, + "kubernetes.default.svc", + 443, + conn_flags, + "/apis/openappsec.io/v1beta1/practices/" + practice_name, + "Authorization: Bearer " + token + "\nConnection: close"); + if(!res) { + dbgError(D_K8S_POLICY) << "Failed to retrieve AppSec practice for the dafult practice"; + return ""; + } + practice_map.emplace(practice_name, appsec_practice.getSpec()); + dbgTrace(D_K8S_POLICY) + << "Successfully retrieved AppSec practice" + << practice_name + << appsec_practice.getSpec(); + } + + if (item.getSpec().isDefaultBackendExists()) { + dbgTrace(D_K8S_POLICY) << "Default Backend exists in the ingress"; + bool should_create_rule = false; + if (cleanup_rule_mode != "Prevent") { + if (default_rule.getMode().find("prevent") != string::npos) { + cleanup_rule_mode = "Prevent"; + should_create_rule = true; + } + } else if (cleanup_rule_mode == "Inactive") { + if (default_rule.getMode().find("detect") != string::npos) { + cleanup_rule_mode = "Detect"; + should_create_rule = true; + } + } + + if (should_create_rule) { + dbgTrace(D_K8S_POLICY) << "Cleanup rule mode: " << cleanup_rule_mode; + specific_assets_from_ingress.insert({"Any", "Any"}); + } + } + + // TBD: fix this to support multiple exceptions! + for (const pair &asset : specific_assets_from_ingress) { + string log_trigger_id; + LogTriggerSection log_trigger_section; + if (log_triggers_map.count(trigger_name) > 0) { + log_trigger_id = log_triggers_map.at(trigger_name).getTriggerId(); + log_trigger_section = log_triggers_map.at(trigger_name); + } + string exception_id; + if ( + !default_rule.getExceptions().empty() && exception_map.count(default_rule.getExceptions()[0]) > 0 + ) { + exception_id = exception_map.at(default_rule.getExceptions()[0]).getBehaviorId(); + } + string asset_name = asset.first == "Any" && asset.second == "Any" ? "Any" : asset.first + asset.second; + RulesConfigRulebase default_rule_config = createMultiRulesSections( + asset.first, + asset.second, + practice_name, + "WebApplication", + trigger_name, + log_trigger_id, + "log", + default_web_user_res_vec, + asset_name, + exception_name, + exception_id + ); + if (asset_name == "Any") { + cleanup_rule = default_rule_config; + } else { + parsed_rules.push_back(default_rule_config); + } + + string asset_id = default_rule_config.getAsstId(); + string practice_id = default_rule_config.getPracticeId(); + + if (!generated_apps.count(asset.first + asset.second)) { + WebAppSection web_app = WebAppSection( + asset.first + asset.second, + asset_id, + "Any", + asset_id, + "Any", + practice_id, + practice_name, + practice_map.at(practice_name), + log_trigger_section, + default_rule.getMode(), + default_parsed_trusted_sources + ); + parsed_web_apps_set.insert(web_app); + generated_apps.insert(asset.first + asset.second); + } + } + } + + if (cleanup_rule_mode != "Inactive") { + dbgTrace(D_K8S_POLICY) << "Pushing a cleanup rule"; + parsed_rules.push_back(cleanup_rule); + } + + for (const auto & parsed_web_app : parsed_web_apps_set) { + parsed_web_apps.push_back(parsed_web_app); + } + + dbgTrace(D_K8S_POLICY) + << "Policy creation summery:" << endl + << "Web applications ammount: " + << parsed_web_apps.size() + << endl << "Rules ammount: " + << parsed_rules.size() + << endl << "Triggers ammount: " + << parsed_log_triggers.size() + << endl << "Web user response ammount: " + << parsed_web_user_res.size(); + + TriggersWrapper triggers_section(TriggersRulebase(parsed_log_triggers, parsed_web_user_res)); + AppSecWrapper waap_section = createMultipleAppSecSections(parsed_web_apps); + RulesConfigWrapper rules_config_section(parsed_rules); + + ExceptionsWrapper exceptions_section = createExceptionSection(parsed_exeptions); + SecurityAppsWrapper security_app_section = SecurityAppsWrapper( + waap_section, + triggers_section, + rules_config_section, + exceptions_section, + policy_version + ); + + SettingsWrapper profiles_section = createProfilesSection(); + K8sPolicyWrapper k8s_policy = K8sPolicyWrapper(profiles_section, security_app_section); + + return dumpPolicyToFile(k8s_policy); + } + + SettingsWrapper + createProfilesSection() + { + string agent_settings_key = "agent.test.k8s.policy"; + string agent_settings_value = "k8s policy"; + AgentSettingsSection agent_setting_1 = AgentSettingsSection(agent_settings_key, agent_settings_value); + + SettingsRulebase settings_rulebase_1 = SettingsRulebase({agent_setting_1}); + return SettingsWrapper(settings_rulebase_1); + } + + LogTriggerSection + createLogTriggersSection( + const string &trigger_name, + bool is_syslog = false, + const string &syslog_port = string(), + const AppsecTriggerSpec &trigger_spec = AppsecTriggerSpec()) + { + string verbosity = "Standard"; + string extendLoggingMinSeverity = + trigger_spec.getAppsecTriggerAdditionalSuspiciousEventsLogging().getMinimumSeverity(); + bool tpDetect = trigger_spec.getAppsecTriggerLogging().isDetectEvents(); + bool tpPrevent = trigger_spec.getAppsecTriggerLogging().isPreventEvents(); + bool webRequests = trigger_spec.getAppsecTriggerLogging().isAllWebRequests(); + bool webUrlPath = trigger_spec.getAppsecTriggerExtendedLogging().isUrlPath(); + bool webUrlQuery = trigger_spec.getAppsecTriggerExtendedLogging().isUrlQuery(); + bool webHeaders = trigger_spec.getAppsecTriggerExtendedLogging().isHttpHeaders(); + bool webBody = trigger_spec.getAppsecTriggerExtendedLogging().isRequestBody(); + bool logToCloud = trigger_spec.getAppsecTriggerLogDestination().getCloud(); + bool logToAgent = trigger_spec.getAppsecTriggerLogDestination().isAgentLocal(); + bool beautify_logs = trigger_spec.getAppsecTriggerLogDestination().shouldBeautifyLogs(); + bool logToCef = trigger_spec.getAppsecTriggerLogDestination().isCefNeeded(); + bool logToSyslog = is_syslog ? is_syslog : trigger_spec.getAppsecTriggerLogDestination().isSyslogNeeded(); + bool responseBody = trigger_spec.getAppsecTriggerAdditionalSuspiciousEventsLogging().isResponseBody(); + bool extendLogging = trigger_spec.getAppsecTriggerAdditionalSuspiciousEventsLogging().isEnabled(); + int cefPortNum = logToCef ? trigger_spec.getAppsecTriggerLogDestination().getCefServerUdpPort() : 0; + string cefIpAddress = + logToCef ? trigger_spec.getAppsecTriggerLogDestination().getCefServerIpv4Address() : ""; + int syslogPortNum; + try { + syslogPortNum = + is_syslog ? + stoi(syslog_port) : + logToSyslog ? + trigger_spec.getAppsecTriggerLogDestination().getSyslogServerUdpPort() : + 514; + } catch (const exception &err) { + dbgWarning(D_K8S_POLICY) + << "Failed to convert port number from string. Port: " + << syslog_port + << ". Setting default value 514"; + syslogPortNum = 514; + } + string syslogIpAddress = + is_syslog ? + trigger_name : + logToSyslog ? + trigger_spec.getAppsecTriggerLogDestination().getSyslogServerIpv4Address() : + ""; + + LogTriggerSection log( + trigger_name, + verbosity, + extendLoggingMinSeverity, + extendLogging, + logToAgent, + logToCef, + logToCloud, + logToSyslog, + responseBody, + tpDetect, + tpPrevent, + webBody, + webHeaders, + webRequests, + webUrlPath, + webUrlQuery, + cefPortNum, + cefIpAddress, + syslogPortNum, + syslogIpAddress, + beautify_logs + ); + return log; + } + + WebUserResponseTriggerSection + createWebUserResponseTriggerSection( + const string &trigger_name, + const AppSecWebUserResponseSpec &trigger_spec) + { + string mode = trigger_spec.getMode(); + string response_body = trigger_spec.getMessageBody(); + string response_title = trigger_spec.getMessageTitle(); + int response_code = trigger_spec.getHttpResponseCode(); + + WebUserResponseTriggerSection web_user_res( + trigger_name, + mode, + response_body, + response_code, + response_title + ); + + return web_user_res; + } + + ExceptionsWrapper + createExceptionSection(const set &_exeptions) + { + vector exeptions(_exeptions.begin(), _exeptions.end()); + ExceptionsRulebase exception_1(exeptions); + return ExceptionsWrapper({exception_1}); + } + + RulesConfigRulebase + createMultiRulesSections( + const string &url, + const string &uri, + const string &practice_name, + const string &practice_type, + const string &trigger_name, + const string &trigger_id, + const string &trigger_type, + const vector> &web_user_res_vec, + const string &asset_name, + const string &exception_name, + const string &exception_id) + { + string practice_id; + if (practice_name_to_id_map.count(practice_name)) { + practice_id = practice_name_to_id_map[practice_name]; + } else { + try { + practice_id = to_string(boost::uuids::random_generator()()); + } catch (const boost::uuids::entropy_error &e) { + dbgWarning(D_K8S_POLICY) << "Failed to generate Practice ID. Error: " << e.what(); + //TBD: return Maybe as part of future error handling + } + } + + PracticeSection practice = PracticeSection(practice_id, practice_type, practice_name); + ParametersSection exception_param = ParametersSection(exception_id, exception_name); + + vector triggers; + if (!trigger_id.empty()) { + triggers.push_back(RulesTriggerSection(trigger_name, trigger_id, trigger_type)); + } + for (const pair &web_user_res : web_user_res_vec) { + triggers.push_back(RulesTriggerSection(web_user_res.first, web_user_res.second, "WebUserResponse")); + } + + RulesConfigRulebase rules_config_1 = RulesConfigRulebase( + asset_name, + url, + uri, + {practice}, + {exception_param}, + triggers + ); + return rules_config_1; + } + + AppSecWrapper + createMultipleAppSecSections(vector &web_apps) + { + AppSecRulebase app_sec_rulebase = AppSecRulebase(web_apps, {}); + return AppSecWrapper(app_sec_rulebase); + } + +private: + I_Messaging* messaging = nullptr; + Flags conn_flags; + const string cluster_url = "https://kubernetes.default.svc"; + const string service_account = "/var/run/secrets/kubernetes.io/serviceaccount"; + const string cacert_path = service_account + "/ca.crt"; + string token; + map practice_name_to_id_map; + + bool + getClusterId() + { + dbgTrace(D_K8S_POLICY) << "Getting cluster UID"; + NamespaceData namespaces_data; + bool res = messaging->sendObject( + namespaces_data, + I_Messaging::Method::GET, + "kubernetes.default.svc", + 443, + conn_flags, + "/api/v1/namespaces/", + "Authorization: Bearer " + token + "\nConnection: close" + ); + + if(!res) { + dbgError(D_K8S_POLICY) << "Failed to retrieve K8S namespace data"; + return false; + } + + string uid; + for (const SingleNamespaceData &ns : namespaces_data.getItems()) { + if (ns.getMetadata().getName() == "kube-system") { + uid = ns.getMetadata().getUID(); + dbgTrace(D_K8S_POLICY) << "Found k8s cluster UID: " << uid; + I_Environment *env = Singleton::Consume::by(); + env->getConfigurationContext().registerValue( + "k8sClusterId", + uid, + EnvKeyAttr::LogSection::SOURCE + ); + Singleton::Consume::by()->setClusterId(uid); + return true; + } + } + return false; + } + + const string + dumpPolicyToFile(const K8sPolicyWrapper &k8s_policy) const + { + stringstream ss; + { + cereal::JSONOutputArchive ar(ss); + k8s_policy.serialize(ar); + } + string policy_str = ss.str(); + ofstream policy_file("/tmp/k8s.policy"); + policy_file << policy_str; + policy_file.close(); + return policy_str; + } + + string + readFileContent(const string&file_path) + { + ifstream file(file_path); + stringstream buffer; + + buffer << file.rdbuf(); + + return buffer.str(); + } + + string + retrieveToken() + { + return readFileContent(service_account + "/token"); + } + + bool + extractExceptions( + const string &exception_annotation_name, + map &exception_map, + set &parsed_exeptions) + { + if (!exception_annotation_name.empty() && exception_map.count(exception_annotation_name) == 0) { + dbgTrace(D_K8S_POLICY) << "Trying to retrieve exceptions for " << exception_annotation_name; + AppsecSpecParser> appsec_exception; + bool res = messaging->sendObject(appsec_exception, + I_Messaging::Method::GET, + "kubernetes.default.svc", + 443, + conn_flags, + "/apis/openappsec.io/v1beta1/exceptions/" + exception_annotation_name, + "Authorization: Bearer " + token + "\nConnection: close"); + if(!res) { + dbgError(D_K8S_POLICY) << "Failed to retrieve AppSec exception"; + return false; + } + dbgTrace(D_K8S_POLICY) + << "Successfuly retrieved AppSec exceptions for " + << exception_annotation_name; + + for (const AppsecExceptionSpec &parsed_exeption : appsec_exception.getSpec()) { + ExceptionMatch exception_match(parsed_exeption); + string behavior = + parsed_exeption.getAction() == "skip" ? + "ignore" : + parsed_exeption.getAction(); + ExceptionBehavior exception_behavior("action", behavior); + InnerException inner_exception(exception_behavior, exception_match); + exception_map.emplace(exception_annotation_name, inner_exception); + parsed_exeptions.insert(inner_exception); + } + } + return true; + } + + bool + extractTriggers( + const string &trigger_annotation_name, + map &log_triggers_map, + vector &parsed_log_triggers, + const string &syslog_address = string(), + const string &syslog_port = string()) + { + if (trigger_annotation_name.empty() && !syslog_address.empty()) { + if (!IPAddr::isValidIPAddr(syslog_address)) { + dbgWarning(D_K8S_POLICY) << "Syslog address is invalid. Address: " << syslog_address; + return false; + } + dbgTrace(D_K8S_POLICY) + << "Creating default syslog log section with syslog service address: " + << syslog_address + << ", Port: " + << syslog_port; + + LogTriggerSection log_triggers_section = + createLogTriggersSection(syslog_address, true, syslog_port); + log_triggers_map.emplace(trigger_annotation_name, log_triggers_section); + parsed_log_triggers.push_back(log_triggers_section); + } else if (!trigger_annotation_name.empty() && log_triggers_map.count(trigger_annotation_name) == 0) { + dbgTrace(D_K8S_POLICY) << "Trying to retrieve triggers for " << trigger_annotation_name; + AppsecSpecParser appsec_trigger; + bool res = messaging->sendObject(appsec_trigger, + I_Messaging::Method::GET, + "kubernetes.default.svc", + 443, + conn_flags, + "/apis/openappsec.io/v1beta1/logtriggers/" + trigger_annotation_name, + "Authorization: Bearer " + token + "\nConnection: close"); + if(!res) { + dbgError(D_K8S_POLICY) << "Failed to retrieve AppSec triggers"; + return false; + } + dbgTrace(D_K8S_POLICY) + << "Successfuly retrieved AppSec exceptions for " + << trigger_annotation_name + << ":\n" + << appsec_trigger.getSpec(); + + LogTriggerSection log_triggers_section = + createLogTriggersSection(trigger_annotation_name, false, "", appsec_trigger.getSpec()); + log_triggers_map.emplace(trigger_annotation_name, log_triggers_section); + parsed_log_triggers.push_back(log_triggers_section); + } + return true; + } + + bool + extractTrustedSources( + const string &asset_name, + const string &trusted_sources_name, + const string &source_identifiers_name, + map &trusted_sources_map, + map> &source_identifiers_map, + AppSecTrustedSources &parsedTrustedSources) + { + if (trusted_sources_name.empty() && source_identifiers_name.empty()) return true; + if (trusted_sources_name.empty() ^ source_identifiers_name.empty()) { + dbgInfo(D_K8S_POLICY) + << "Trusted Sources or Source Identifier were not provided. Truster Sources: " + << trusted_sources_name + << ", Source Identidier: " + << source_identifiers_name; + return false; + } + + AppsecSpecParser trusted_sources_from_ingress; + AppsecSpecParser> source_identifier_from_ingress; + + // Parsing trusted sources from the k8s API + if (!trusted_sources_map.count(trusted_sources_name)) { + dbgTrace(D_K8S_POLICY) << "Trying to retrieve trusted sources for: " << trusted_sources_name; + bool res = messaging->sendObject(trusted_sources_from_ingress, + I_Messaging::Method::GET, + "kubernetes.default.svc", + 443, + conn_flags, + "/apis/openappsec.io/v1beta1/trustedsources/" + trusted_sources_name, + "Authorization: Bearer " + token + "\nConnection: close"); + if(!res) { + dbgError(D_K8S_POLICY) << "Failed to retrieve trusted sources"; + return false; + } + trusted_sources_map[trusted_sources_name] = trusted_sources_from_ingress.getSpec(); + } + + // Parsing source identifiers from the k8s API + if (!source_identifiers_map.count(source_identifiers_name)) { + dbgTrace(D_K8S_POLICY) << "Trying to retrieve sources identifiers for: " << source_identifiers_name; + bool res = messaging->sendObject(source_identifier_from_ingress, + I_Messaging::Method::GET, + "kubernetes.default.svc", + 443, + conn_flags, + "/apis/openappsec.io/v1beta1/sourcesidentifiers/" + source_identifiers_name, + "Authorization: Bearer " + token + "\nConnection: close"); + if(!res) { + dbgError(D_K8S_POLICY) << "Failed to retrieve trusted sources"; + return false; + } + source_identifiers_map[source_identifiers_name] = source_identifier_from_ingress.getSpec(); + } + + // Generating the (Trusted Sources X Source Identifiers) matrix + vector generated_trusted_json; + for (const SourceIdentifierSpec &src_ident : source_identifiers_map[source_identifiers_name]) { + for (const string &trusted_src : trusted_sources_map[trusted_sources_name].getSourcesIdentifiers()) { + if (src_ident.getValues().empty()) { + generated_trusted_json.push_back(SourcesIdentifiers(src_ident.getSourceIdentifier(), trusted_src)); + } else { + for (const string &val : src_ident.getValues()) { + string src_key = src_ident.getSourceIdentifier() + ":" + val; + generated_trusted_json.push_back(SourcesIdentifiers(src_key, trusted_src)); + } + } + } + } + + parsedTrustedSources = AppSecTrustedSources( + asset_name, + trusted_sources_map[trusted_sources_name].getMinNumOfSources(), + generated_trusted_json + ); + + return true; + } + + bool + extractWebUserResponse( + const string &web_user_res_annotation_name, + map &web_user_res_map, + vector> &web_user_res_vec, + vector &parsed_web_user_res) + { + if(!web_user_res_annotation_name.empty()) { + dbgTrace(D_K8S_POLICY) << "Trying to retrieve web user response for: " << web_user_res_annotation_name; + AppsecSpecParser appsec_web_user_res; + bool res = messaging->sendObject(appsec_web_user_res, + I_Messaging::Method::GET, + "kubernetes.default.svc", + 443, + conn_flags, + "/apis/openappsec.io/v1beta1/customresponses/" + web_user_res_annotation_name, + "Authorization: Bearer " + token + "\nConnection: close"); + if(!res) { + dbgError(D_K8S_POLICY) << "Failed to retrieve appsec web user res"; + return false; + } + + if(web_user_res_map.count(web_user_res_annotation_name) == 0) { + WebUserResponseTriggerSection web_user_res_section = createWebUserResponseTriggerSection( + web_user_res_annotation_name, + appsec_web_user_res.getSpec()); + + web_user_res_map.emplace(web_user_res_annotation_name, web_user_res_section); + parsed_web_user_res.push_back(web_user_res_section); + web_user_res_vec.push_back( + pair( + web_user_res_section.getTriggerName(), + web_user_res_section.getTriggerId() + ) + ); + } else { + web_user_res_vec.push_back( + pair( + web_user_res_map.at(web_user_res_annotation_name).getTriggerName(), + web_user_res_map.at(web_user_res_annotation_name).getTriggerId() + ) + ); + } + + dbgTrace(D_K8S_POLICY) + << "Successfuly retrieved AppSec web user response for: " + << web_user_res_annotation_name + << ":\n" + << appsec_web_user_res.getSpec(); + } + return true; + } +}; + + +K8sPolicyGenerator::K8sPolicyGenerator() + : + Component("K8sPolicyGenerator"), + pimpl(make_unique()) {} + +K8sPolicyGenerator::~K8sPolicyGenerator() {} + +void +K8sPolicyGenerator::init() +{ + pimpl->init(); +} + +void +K8sPolicyGenerator::preload() +{} diff --git a/components/security_apps/orchestration/manifest_controller/CMakeLists.txt b/components/security_apps/orchestration/manifest_controller/CMakeLists.txt new file mode 100755 index 0000000..e850fe7 --- /dev/null +++ b/components/security_apps/orchestration/manifest_controller/CMakeLists.txt @@ -0,0 +1,3 @@ +add_library(manifest_controller manifest_controller.cc manifest_diff_calculator.cc manifest_handler.cc) + +add_subdirectory(manifest_controller_ut) diff --git a/components/security_apps/orchestration/manifest_controller/manifest_controller.cc b/components/security_apps/orchestration/manifest_controller/manifest_controller.cc new file mode 100755 index 0000000..11bb740 --- /dev/null +++ b/components/security_apps/orchestration/manifest_controller/manifest_controller.cc @@ -0,0 +1,445 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "manifest_controller.h" + +#include "config.h" +#include "debug.h" +#include "sasal.h" +#include "environment.h" +#include "version.h" +#include "log_generator.h" +#include "orchestration_comp.h" + +using namespace std; +using namespace ReportIS; + +SASAL_START // Orchestration - Manifest Handler + +USE_DEBUG_FLAG(D_ORCHESTRATOR); + +class IgnoredPackages +{ +public: + void + load(istream &input, char delim) + { + string ignored_package; + while (getline(input, ignored_package, delim)) + { + if (ignored_package == "all") { + ignore_packages.clear(); + ignore_packages.insert(ignored_package); + dbgInfo(D_ORCHESTRATOR) << "Will ignore updates for all packages"; + break; + } else if (ignored_package == "none") { + ignore_packages.clear(); + dbgInfo(D_ORCHESTRATOR) << "Will not ignore updates of any packages"; + break; + } + + if (ignored_package.size() > 0) { + ignore_packages.insert(ignored_package); + dbgInfo(D_ORCHESTRATOR) << "Updates for package " << ignored_package << " will be ignored"; + } + } + } + + void + load(const string &raw_value) + { + string token; + istringstream tokenStream(raw_value); + load(tokenStream, ','); + } + + const set & operator*() const { return ignore_packages; } + +private: + set ignore_packages; +}; + +class ManifestController::Impl : Singleton::Provide::From +{ +public: + void init(); + + bool updateManifest(const string &new_manifest_file) override; + bool loadAfterSelfUpdate() override; + +private: + bool changeManifestFile(const string &new_manifest_file); + + bool + handlePackage( + const Package &updated_package, + map ¤t_packages, + const map &new_packages, + map &corrupted_packages + ); + + ManifestDiffCalculator manifest_diff_calc; + ManifestHandler manifest_handler; + + string manifest_file_path; + string corrupted_file_list; + string temp_ext; + string backup_ext; + string packages_dir; + string orch_service_name; + set ignore_packages; +}; + +void +ManifestController::Impl::init() +{ + manifest_diff_calc.init(); + manifest_handler.init(); + + dbgTrace(D_ORCHESTRATOR) << "Manifest controller, file system path prefix: " << getFilesystemPathConfig(); + + manifest_file_path = getConfigurationWithDefault( + getFilesystemPathConfig() + "/conf/manifest.json", + "orchestration", + "Manifest file path" + ); + corrupted_file_list = getConfigurationWithDefault( + getFilesystemPathConfig() + "/conf/corrupted_packages.json", + "orchestration", + "Manifest corrupted files path" + ); + temp_ext = getConfigurationWithDefault("_temp", "orchestration", "Temp file extension"); + backup_ext = getConfigurationWithDefault(".bk", "orchestration", "Backup file extension"); + packages_dir = getConfigurationWithDefault( + getFilesystemPathConfig() + "/packages", + "orchestration", + "Packages directory" + ); + orch_service_name = getConfigurationWithDefault("orchestration", "orchestration", "Service name"); + + auto ignore_packages_path = getConfigurationWithDefault( + getFilesystemPathConfig() + "/conf/ignore-packages.txt", + "orchestration", + "Ignore packages list file path" + ); + + if (Singleton::Consume::by()->doesFileExist(ignore_packages_path)) { + try { + ifstream input_stream(ignore_packages_path); + if (!input_stream) { + dbgWarning(D_ORCHESTRATOR) + << "Cannot open the file with ignored packages. " + << "File: " << ignore_packages_path; + } else { + IgnoredPackages packages_to_ignore; + packages_to_ignore.load(input_stream, '\n'); + ignore_packages = *packages_to_ignore; + + input_stream.close(); + } + } catch (ifstream::failure &f) { + dbgWarning(D_ORCHESTRATOR) + << "Cannot read the file with ignored packages." + << " File: " << ignore_packages_path + << " Error: " << f.what(); + } + } +} + +bool +ManifestController::Impl::updateManifest(const string &new_manifest_file) +{ + auto i_env = Singleton::Consume::by(); + auto span_scope = i_env->startNewSpanScope(Span::ContextType::CHILD_OF); + + dbgDebug(D_ORCHESTRATOR) << "Starting to update manifest file"; + auto ignored_settings_packages = getProfileAgentSetting("orchestration.IgnoredPackagesList"); + set packages_to_ignore = ignore_packages; + if (ignored_settings_packages.ok()) packages_to_ignore = *(*ignored_settings_packages); + + auto orchestration_tools = Singleton::Consume::by(); + + if (packages_to_ignore.count("all") > 0) { + dbgTrace(D_ORCHESTRATOR) << "Nothing to update (\"ignore all\" turned on)"; + + if (!orchestration_tools->copyFile(new_manifest_file, manifest_file_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to copy a new manifest file"; + return false; + } + return true; + } + + Maybe> parsed_manifest = orchestration_tools->loadPackagesFromJson(new_manifest_file); + if (!parsed_manifest.ok()) { + dbgWarning(D_ORCHESTRATOR) << "Failed to parse the new manifest file. File: " << new_manifest_file; + return false; + } + + map new_packages = parsed_manifest.unpack(); + map current_packages; + parsed_manifest = orchestration_tools->loadPackagesFromJson(manifest_file_path); + + if (!parsed_manifest.ok()){ + dbgWarning(D_ORCHESTRATOR) << "Can not parse the current manifest file, start with new one."; + } else { + current_packages = parsed_manifest.unpack(); + } + + // Remove any update of all ignore packages + for (const auto &ignore_package : packages_to_ignore) { + dbgInfo(D_ORCHESTRATOR) << "Ignoring a package from the manifest. Package name: " << ignore_package; + if (new_packages.count(ignore_package) > 0) { + // Get the change as-is of the ignore package - it won"t update the service + current_packages[ignore_package] = new_packages[ignore_package]; + } else { + // Remove the ignore package from the current manifest file - it won't uninstall the service + current_packages.erase(ignore_package); + } + } + + map corrupted_packages; + parsed_manifest = orchestration_tools->loadPackagesFromJson(corrupted_file_list); + + if (!parsed_manifest.ok()){ + dbgWarning(D_ORCHESTRATOR) << "Can not parse corrupted services file, start with new one."; + } else { + corrupted_packages = parsed_manifest.unpack(); + } + + bool all_cleaned = true; + bool uninstall_done = false; + // Removes all the untracked packages. new_packages will be cleaned from already installed packages + auto packages_to_remove = manifest_diff_calc.filterUntrackedPackages(current_packages, new_packages); + for (auto remove_package = packages_to_remove.begin(); remove_package != packages_to_remove.end();) { + bool uninstall_response = true; + if (remove_package->second.isInstallable().ok()) { + uninstall_response = manifest_handler.uninstallPackage(remove_package->second); + } + + if (!uninstall_response) { + dbgWarning(D_ORCHESTRATOR) + << "Failed to uninstall package. Package: " << remove_package->second.getName(); + all_cleaned = false; + remove_package++; + } else { + uninstall_done = true; + current_packages.erase(remove_package->first); + remove_package = packages_to_remove.erase(remove_package); + } + } + + if (uninstall_done) { + if (!orchestration_tools->packagesToJsonFile(current_packages, manifest_file_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to update manifest file. File: " + << manifest_file_path; + } else { + dbgInfo(D_ORCHESTRATOR) << "Manifest file was updated successfully. File: " + << manifest_file_path; + } + } + + bool no_change = new_packages.size() == 0; + // Both new_packages & corrupted_packages will be updated based on updated manifest + bool no_corrupted_package = manifest_diff_calc.filterCorruptedPackages(new_packages, corrupted_packages); + + auto orchestration_service = new_packages.find("orchestration"); + if (orchestration_service != new_packages.end()) { + // Orchestration needs special handling as manifest should be backup differently + return handlePackage( + orchestration_service->second, + current_packages, + new_packages, + corrupted_packages + ); + } + auto wlp_standalone_service = new_packages.find("wlpStandalone"); + if (wlp_standalone_service != new_packages.end()) { + // wlpStandalone needs special handling as manifest should be backup differently + return handlePackage( + wlp_standalone_service->second, + current_packages, + new_packages, + corrupted_packages + ); + } + + bool all_installed = true; + bool any_installed = false; + + dbgDebug(D_ORCHESTRATOR) << "Starting to handle " << new_packages.size() <<" new packages"; + for (auto &new_package : new_packages) { + + if (new_package.second.getType() != Package::PackageType::Service) continue; + + size_t prev_size = corrupted_packages.size(); + bool handling_response = handlePackage( + new_package.second, + current_packages, + new_packages, + corrupted_packages + ); + + // During handlePackage function, package installation might fail so it will be added to + // corrupted_packages. Corrupted file needs to be updated accordingly + if (prev_size < corrupted_packages.size() && + !orchestration_tools->packagesToJsonFile(corrupted_packages, corrupted_file_list)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to update corrupted packages list."; + } + + // Orchestration needs special handling as manifest should be backup differently + if (new_package.first.compare(orch_service_name) == 0) { + return handling_response; + } + + any_installed = any_installed || handling_response; + all_installed = all_installed && handling_response; + } + + bool manifest_file_update = true; + + if (all_installed && (any_installed || no_change) && no_corrupted_package) { + manifest_file_update = changeManifestFile(new_manifest_file); + } else if (any_installed) { + manifest_file_update = orchestration_tools->packagesToJsonFile(current_packages, manifest_file_path); + } + return all_installed && manifest_file_update && no_corrupted_package && all_cleaned; +} + +// Orchestration package needs a special handling. Old service will die during the upgrade +// so we need to keep temporary manifest file to prevent overwriting. Once Orchestration upgrade +// finish, we return to regular path. +bool +ManifestController::Impl::loadAfterSelfUpdate() +{ + dbgDebug(D_ORCHESTRATOR) << "Starting load after the self update function"; + string temp_manifest_path = manifest_file_path + temp_ext; + auto orchestration_tools = Singleton::Consume::by(); + if (!orchestration_tools->doesFileExist(temp_manifest_path)) { + return true; + } + + dbgDebug(D_ORCHESTRATOR) << "Orchestration updated itself"; + // Run post installation test + auto package_handler = Singleton::Consume::by(); + string current_file = packages_dir + "/" + orch_service_name + "/" + orch_service_name; + if (!package_handler->postInstallPackage(orch_service_name, current_file + temp_ext)) { + dbgWarning(D_ORCHESTRATOR) << "Failed in post install test. Package: " << orch_service_name; + return false; + } + dbgDebug(D_ORCHESTRATOR) << "Post installation test for the self update package succeed"; + + if (!changeManifestFile(temp_manifest_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to change manifest file after update the orchestration service."; + return false; + } + dbgDebug(D_ORCHESTRATOR) << "Update the temporary manifest to be the running manifest"; + + string backup_file = current_file + backup_ext; + string backup_temp_file = backup_file + temp_ext; + + if (!package_handler->updateSavedPackage(orch_service_name, current_file + temp_ext)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to update the saved package. Package: " << orch_service_name; + return false; + } + + return true; +} + +bool +ManifestController::Impl::changeManifestFile(const string &new_manifest_file) +{ + dbgDebug(D_ORCHESTRATOR) << "Backup the old manifest file"; + auto orchestration_tools = Singleton::Consume::by(); + + if (orchestration_tools->doesFileExist(manifest_file_path)) { + if (!orchestration_tools->copyFile(manifest_file_path, + manifest_file_path + backup_ext)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to backup the old manifest file"; + } + } + + dbgDebug(D_ORCHESTRATOR) << "Writing new manifest to file"; + if (!orchestration_tools->copyFile(new_manifest_file, manifest_file_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed write new manifest to file"; + return false; + } + + if (!orchestration_tools->isNonEmptyFile(manifest_file_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to get manifest file data"; + return false; + } + + dbgInfo(D_ORCHESTRATOR) << "Manifest file has been updated."; + + if (!orchestration_tools->removeFile(new_manifest_file)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to remove new manifest file. Path: " << new_manifest_file; + } + return true; +} + +bool +ManifestController::Impl::handlePackage( + const Package &package, + map ¤t_packages, + const map &new_packages, + map &corrupted_packages) +{ + auto i_env = Singleton::Consume::by(); + auto span_scope = i_env->startNewSpanScope(Span::ContextType::CHILD_OF); + dbgDebug(D_ORCHESTRATOR) << "Handling package. Package: " << package.getName(); + + if (!package.isInstallable().ok()) { + string report_msg = + "Skipping installation of " + package.getName() + ". Reason: " + package.isInstallable().getErr(); + dbgWarning(D_ORCHESTRATOR) << report_msg; + LogGen(report_msg, Audience::SECURITY, Severity::CRITICAL, Priority::HIGH, Tags::ORCHESTRATOR); + current_packages.insert(make_pair(package.getName(), package)); + return true; + } + + vector installation_queue; + + if (!manifest_diff_calc.buildInstallationQueue(package, installation_queue, current_packages, new_packages)) { + dbgWarning(D_ORCHESTRATOR) << "Failed building installation queue. Package: " << package.getName(); + return false; + } + + vector> downloaded_files; + + if (!manifest_handler.downloadPackages(installation_queue, downloaded_files)) return false; + if (!manifest_handler.installPackages(downloaded_files, current_packages, corrupted_packages)) { + LogGen( + "Failed to install package: " + package.getName(), + Audience::SECURITY, + Severity::CRITICAL, + Priority::HIGH, + Tags::ORCHESTRATOR + ); + return false; + } + + dbgInfo(D_ORCHESTRATOR) << "Package was installed successfully. Package: " << package.getName(); + return true; +} + +ManifestController::ManifestController() : Component("ManifestController"), pimpl(make_unique()) {} + +ManifestController::~ManifestController() {} + +void +ManifestController::init() +{ + pimpl->init(); +} + +SASAL_END diff --git a/components/security_apps/orchestration/manifest_controller/manifest_controller_ut/CMakeLists.txt b/components/security_apps/orchestration/manifest_controller/manifest_controller_ut/CMakeLists.txt new file mode 100755 index 0000000..6797cc7 --- /dev/null +++ b/components/security_apps/orchestration/manifest_controller/manifest_controller_ut/CMakeLists.txt @@ -0,0 +1,7 @@ +link_directories(${BOOST_ROOT}/lib) + +add_unit_test( + manifest_controller_ut + "manifest_controller_ut.cc" + "manifest_controller;logging;orchestration_modules;agent_details;agent_details_reporter;version;config;metric;event_is;-lboost_regex" +) diff --git a/components/security_apps/orchestration/manifest_controller/manifest_controller_ut/manifest_controller_ut.cc b/components/security_apps/orchestration/manifest_controller/manifest_controller_ut/manifest_controller_ut.cc new file mode 100755 index 0000000..1de6dde --- /dev/null +++ b/components/security_apps/orchestration/manifest_controller/manifest_controller_ut/manifest_controller_ut.cc @@ -0,0 +1,2431 @@ +#include "manifest_controller.h" + +#include + +#include "cptest.h" +#include "orchestration_tools.h" +#include "config.h" +#include "config_component.h" +#include "mock/mock_package_handler.h" +#include "mock/mock_downloader.h" +#include "mock/mock_orchestration_tools.h" +#include "mock/mock_orchestration_status.h" +#include "mock/mock_logging.h" +#include "environment.h" +#include "mock/mock_shell_cmd.h" +#include "agent_details.h" +#include "mock/mock_time_get.h" +#include "mock/mock_mainloop.h" +#include "mock/mock_agent_details.h" +#include "mock/mock_details_resolver.h" + +using namespace std; +using namespace testing; + +// Loading for multimap +template ::value> = cereal::traits::sfinae> +inline void +load(Archive &ar, map &packages) +{ + packages.clear(); + auto hint = packages.begin(); + while (true) + { + try { + Package value; + ar(value); + hint = packages.emplace_hint(hint, move(value.getName()), move(Package(value))); + } catch (const cereal::Exception &) { + break; + } + } +} + +class ManifestControllerTest : public Test +{ +public: + ManifestControllerTest() + { + env.preload(); + env.init(); + i_env = Singleton::Consume::from(env); + i_env->startNewTrace(); + Debug::setUnitTestFlag(D_ORCHESTRATOR, Debug::DebugLevel::TRACE); + const string ignore_packages_file = "/etc/cp/conf/ignore-packages.txt"; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(ignore_packages_file)).WillOnce(Return(false)); + manifest_controller.init(); + manifest_file_path = getConfigurationWithDefault( + "/etc/cp/conf/manifest.json", + "orchestration", + "Manifest file path" + ); + corrupted_file_list = getConfigurationWithDefault( + "/etc/cp/conf/corrupted_packages.json", + "orchestration", + "Manifest corrupted files path" + ); + temp_ext = getConfigurationWithDefault("_temp", "orchestration", "Temp file extension"); + backup_ext = getConfigurationWithDefault(".bk", "orchestration", "Backup file extension"); + file_name = "new_manifest.json"; + packages_dir = getConfigurationWithDefault("/etc/cp/packages", "orchestration", "Packages directory"); + orch_service_name = getConfigurationWithDefault("orchestration", "orchestration", "Service name"); + + EXPECT_CALL( + mock_shell_cmd, + getExecOutput("cpprod_util CPPROD_IsConfigured CPwaap", _, _) + ).WillRepeatedly(Return(string("1"))); + } + + ~ManifestControllerTest() + { + i_env->finishSpan(); + i_env->finishTrace(); + env.fini(); + } + + void load(string &manifest, map &ret) + { + std::stringstream os(manifest); + cereal::JSONInputArchive archive_in(os); + archive_in(ret); + } + + string manifest_file_path; + string corrupted_file_list; + string temp_ext; + string backup_ext; + string file_name = "new_manifest.json"; + string packages_dir; + string orch_service_name; + string old_manifest = + "{" + " \"packages\": [" + " {" + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + map new_services; + map old_services; + map corrupted_packages; + + NiceMock mock_time_get; + NiceMock mock_mainloop; + ::Environment env; + ConfigComponent config; + I_Environment *i_env; + AgentDetails agent_details; + + NiceMock mock_log; + StrictMock mock_package_handler; + StrictMock mock_downloader; + StrictMock mock_orchestration_tools; + StrictMock mock_status; + StrictMock mock_details_resolver; + NiceMock mock_shell_cmd; + ManifestController manifest_controller; + I_ManifestController *i_manifest_controller = Singleton::Consume::from(manifest_controller); +}; + +TEST_F(ManifestControllerTest, constructorTest) +{ +} + +TEST_F(ManifestControllerTest, createNewManifest) +{ + new_services.clear(); + old_services.clear(); + + string manifest = + "{" + " \"packages\": [" + " {" + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + //mock_downloader + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c805", + Package::ChecksumTypes::SHA1, + "my" + ) + ).WillOnce(Return(string("/tmp/temp_file"))); + + //mock_package_handler + EXPECT_CALL(mock_package_handler, shouldInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, preInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, installPackage("my", "/tmp/temp_file", _)).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, postInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, updateSavedPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + + load(manifest, new_services); + load(old_manifest, old_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(corrupted_file_list)) + .WillOnce(Return(corrupted_packages)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/packages/my/my")).Times(2).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, copyFile(manifest_file_path, "/etc/cp/conf/manifest.json.bk")) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true)); + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); +} + +TEST_F(ManifestControllerTest, badChecksum) +{ + new_services.clear(); + old_services.clear(); + string manifest = + "{" + " \"packages\": [" + " {" + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d0aa8568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + //mock_downloader + Maybe err(genError("Empty")); + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "a58bbab8020b0e6d0aa8568714b5e582a3adf9c805", + Package::ChecksumTypes::SHA1, + "my" + ) + ).WillOnce(Return(err)); + + //mock_orchestration_tools + load(manifest, new_services); + load(old_manifest, old_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, + loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_CALL(mock_orchestration_tools, + loadPackagesFromJson(corrupted_file_list)).WillOnce(Return(corrupted_packages)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/packages/my/my")).WillOnce(Return(false)); + + string hostname = "hostname"; + string empty_err; + EXPECT_CALL(mock_status, getManifestError()).WillOnce(ReturnRef(empty_err)); + EXPECT_CALL(mock_details_resolver, getHostname()).WillOnce(Return( Maybe(hostname))); + EXPECT_CALL( + mock_status, + setFieldStatus(OrchestrationStatusFieldType::MANIFEST, OrchestrationStatusResult::FAILED, _) + ); + EXPECT_FALSE(i_manifest_controller->updateManifest(file_name)); +} + +TEST_F(ManifestControllerTest, updateManifest) +{ + new_services.clear(); + old_services.clear(); + string manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + //mock_downloader + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c805", + Package::ChecksumTypes::SHA1, + "my" + ) + ).WillOnce(Return(string("/tmp/temp_file"))); + + //mock_package_handler + EXPECT_CALL(mock_package_handler, shouldInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, preInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, installPackage("my", "/tmp/temp_file", _)).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, postInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, updateSavedPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + + load(manifest, new_services); + load(old_manifest, old_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_CALL(mock_orchestration_tools, + loadPackagesFromJson(corrupted_file_list)).Times(2).WillRepeatedly(Return(corrupted_packages)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(manifest_file_path)).Times(2).WillRepeatedly(Return(true)); + EXPECT_CALL( + mock_orchestration_tools, + doesFileExist("/etc/cp/packages/my/my") + ).Times(4).WillRepeatedly(Return(false)); + EXPECT_CALL(mock_orchestration_tools, + copyFile(manifest_file_path, "/etc/cp/conf/manifest.json.bk")).Times(2).WillRepeatedly(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, manifest_file_path)) + .Times(2).WillRepeatedly(Return(true)); + EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).Times(2).WillRepeatedly(Return(true)); + EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).Times(2).WillRepeatedly(Return(true)); + + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); + + manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"77ecfeb6d5ec73a596ff406713f4f5d1f233adb6\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "77ecfeb6d5ec73a596ff406713f4f5d1f233adb6", + Package::ChecksumTypes::SHA1, + "my" + ) + ).WillOnce(Return(string("/tmp/temp_file"))); + + //mock_package_handler + EXPECT_CALL(mock_package_handler, shouldInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, preInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, installPackage("my", "/tmp/temp_file", _)).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, postInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, updateSavedPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + + //mock_orchestration_tools + load(manifest, new_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, + loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); +} + +TEST_F(ManifestControllerTest, selfUpdate) +{ + new_services.clear(); + old_services.clear(); + string manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c805", + Package::ChecksumTypes::SHA1, + "orchestration" + ) + ).WillOnce(Return(string("/tmp/temp_file"))); + string temp_orc_file = "/etc/cp/packages/orchestration/orchestration_temp"; + EXPECT_CALL(mock_status, writeStatusToFile()); + EXPECT_CALL(mock_package_handler, preInstallPackage(orch_service_name, temp_orc_file)).WillOnce(Return(true)); + EXPECT_CALL( + mock_package_handler, + installPackage(orch_service_name, temp_orc_file, _) + ).WillOnce(Return(true)); + EXPECT_CALL( + mock_orchestration_tools, + loadPackagesFromJson(corrupted_file_list)).WillOnce(Return(corrupted_packages) + ); + + load(manifest, new_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + string temp_manifest_path = manifest_file_path + temp_ext; + EXPECT_CALL(mock_orchestration_tools, packagesToJsonFile(new_services, temp_manifest_path)).WillOnce(Return(true)); + + string path = packages_dir + "/" + orch_service_name + "/" + + orch_service_name; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(path)).Times(2).WillOnce(Return(false)); + + EXPECT_CALL(mock_orchestration_tools, copyFile("/tmp/temp_file", path + + temp_ext)).WillOnce(Return(true)); + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); +} + +TEST_F(ManifestControllerTest, loadAfterNoSelfUpdate) +{ + string temp_path = manifest_file_path + temp_ext; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(temp_path)).WillOnce(Return(false)); + EXPECT_TRUE(i_manifest_controller->loadAfterSelfUpdate()); +} + +TEST_F(ManifestControllerTest, failureWhileLoadAfterSelfUpdate) +{ + string temp_path = manifest_file_path + temp_ext; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(temp_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(manifest_file_path, manifest_file_path + + backup_ext)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(temp_path, manifest_file_path)).WillOnce(Return(false)); + string path = packages_dir + "/" + orch_service_name + "/" + orch_service_name + temp_ext; + EXPECT_CALL(mock_package_handler, postInstallPackage(orch_service_name, path)).WillOnce(Return(true)); + + EXPECT_FALSE(i_manifest_controller->loadAfterSelfUpdate()); +} + +TEST_F(ManifestControllerTest, successLoadAfteSelfUpdate) +{ + string temp_path = manifest_file_path + temp_ext; + string current_file = packages_dir + "/" + orch_service_name + "/" + orch_service_name; + string backup_file = current_file + backup_ext; + string backup_temp_file = backup_file + temp_ext; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(temp_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(manifest_file_path, manifest_file_path + + backup_ext)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(temp_path, manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, removeFile(temp_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, postInstallPackage(orch_service_name, current_file + temp_ext)) + .WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, updateSavedPackage(orch_service_name, current_file + temp_ext)) + .WillOnce(Return(true)); + + EXPECT_TRUE(i_manifest_controller->loadAfterSelfUpdate()); +} + +TEST_F(ManifestControllerTest, updateWhileErrorPackageExist) +{ + new_services.clear(); + old_services.clear(); + string manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + string corrupted_packages_manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + load(manifest, new_services); + load(old_manifest, old_services); + load(corrupted_packages_manifest, corrupted_packages); + + EXPECT_CALL(mock_orchestration_tools, + loadPackagesFromJson(corrupted_file_list)).WillOnce(Return(corrupted_packages)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_FALSE(i_manifest_controller->updateManifest(file_name)); +} + +TEST_F(ManifestControllerTest, removeCurrentErrorPackage) +{ + new_services.clear(); + old_services.clear(); + string manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + string corrupted_packages_manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d0000000000000\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + load(manifest, new_services); + load(old_manifest, old_services); + load(corrupted_packages_manifest, corrupted_packages); + + //mock_downloader + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c805", + Package::ChecksumTypes::SHA1, + "my" + ) + ).WillOnce(Return(string("/tmp/temp_file"))); + //mock_package_handler + EXPECT_CALL(mock_package_handler, shouldInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, preInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, installPackage("my", "/tmp/temp_file", _)).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, postInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, updateSavedPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_CALL(mock_orchestration_tools, + loadPackagesFromJson(corrupted_file_list)).WillOnce(Return(corrupted_packages)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/packages/my/my")).Times(2).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, copyFile(manifest_file_path, "/etc/cp/conf/manifest.json.bk")) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true)); + + corrupted_packages.clear(); + EXPECT_CALL(mock_orchestration_tools, packagesToJsonFile(corrupted_packages, + corrupted_file_list)).WillOnce(Return(true)); + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); +} + +TEST_F(ManifestControllerTest, selfUpdateWithOldCopy) +{ + new_services.clear(); + old_services.clear(); + corrupted_packages.clear(); + + string manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c805", + Package::ChecksumTypes::SHA1, + "orchestration" + ) + ).WillOnce(Return(string("/tmp/temp_file"))); + string temp_orc_file = "/etc/cp/packages/orchestration/orchestration_temp"; + EXPECT_CALL(mock_package_handler, preInstallPackage(orch_service_name, temp_orc_file)).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, installPackage(orch_service_name, temp_orc_file, _)).WillOnce(Return(true)); + EXPECT_CALL(mock_status, writeStatusToFile()); + load(manifest, new_services); + + EXPECT_CALL(mock_orchestration_tools, + loadPackagesFromJson(corrupted_file_list)).WillOnce(Return(corrupted_packages)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + string temp_manifest_path = manifest_file_path + temp_ext; + EXPECT_CALL(mock_orchestration_tools, packagesToJsonFile(new_services, temp_manifest_path)).WillOnce(Return(true)); + + string path = packages_dir + "/" + orch_service_name + "/" + + orch_service_name; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(path)).WillOnce(Return(false)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(path, path + backup_ext + temp_ext)).WillOnce(Return(true)); + + EXPECT_CALL(mock_orchestration_tools, copyFile("/tmp/temp_file", path + + temp_ext)).WillOnce(Return(true)); + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); +} + +TEST_F(ManifestControllerTest, selfUpdateWithOldCopyWithError) +{ + new_services.clear(); + old_services.clear(); + corrupted_packages.clear(); + + string manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c805", + Package::ChecksumTypes::SHA1, + "orchestration" + ) + ).WillOnce(Return(string("/tmp/temp_file"))); + EXPECT_CALL(mock_status, writeStatusToFile()); + string hostname = "hostname"; + string empty_err; + EXPECT_CALL(mock_status, getManifestError()).WillOnce(ReturnRef(empty_err)); + EXPECT_CALL( + mock_status, + setFieldStatus(OrchestrationStatusFieldType::MANIFEST, OrchestrationStatusResult::FAILED, _) + ); + load(manifest, new_services); + + EXPECT_CALL(mock_orchestration_tools, + loadPackagesFromJson(corrupted_file_list)).WillOnce(Return(corrupted_packages)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + string temp_manifest_path = manifest_file_path + temp_ext; + EXPECT_CALL(mock_orchestration_tools, packagesToJsonFile(new_services, temp_manifest_path)).WillOnce(Return(true)); + + string path = packages_dir + "/" + orch_service_name + "/" + + orch_service_name; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(path)).WillOnce(Return(false)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(path, path + backup_ext + temp_ext)).WillOnce(Return(false)); + EXPECT_FALSE(i_manifest_controller->updateManifest(file_name)); +} + +TEST_F(ManifestControllerTest, installAndRemove) +{ + new_services.clear(); + old_services.clear(); + corrupted_packages.clear(); + string manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + //mock_downloader + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c805", + Package::ChecksumTypes::SHA1, + "my" + ) + ).WillOnce(Return(string("/tmp/temp_file"))); + + //mock_package_handler + EXPECT_CALL(mock_package_handler, shouldInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, preInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, installPackage("my", "/tmp/temp_file", _)).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, postInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, updateSavedPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + + load(manifest, new_services); + load(old_manifest, old_services); + + EXPECT_CALL(mock_orchestration_tools, + loadPackagesFromJson(corrupted_file_list)).Times(2).WillRepeatedly(Return(corrupted_packages)); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(manifest_file_path)).Times(2).WillRepeatedly(Return(true)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/packages/my/my")).Times(2).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, + copyFile(manifest_file_path, "/etc/cp/conf/manifest.json.bk")).Times(2).WillRepeatedly(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, manifest_file_path)) + .Times(2).WillRepeatedly(Return(true)); + EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).Times(2).WillRepeatedly(Return(true)); + EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).Times(2).WillRepeatedly(Return(true)); + + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); + + string new_manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"my1\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"77ecfeb6d5ec73a596ff406713f4f5d1f233adb6\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "77ecfeb6d5ec73a596ff406713f4f5d1f233adb6", + Package::ChecksumTypes::SHA1, + "my1" + ) + ).WillOnce(Return(string("/tmp/temp_file"))); + + // //mock_package_handler + EXPECT_CALL(mock_package_handler, shouldInstallPackage("my1", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, preInstallPackage("my1", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, installPackage("my1", "/tmp/temp_file", _)).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, postInstallPackage("my1", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, updateSavedPackage("my1", "/tmp/temp_file")).WillOnce(Return(true)); + + EXPECT_CALL(mock_orchestration_tools, packagesToJsonFile(old_services, manifest_file_path)).WillOnce(Return(true)); + load(manifest, old_services); + load(new_manifest, new_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_CALL(mock_package_handler, uninstallPackage("my", "/etc/cp/my/my", "/etc/cp/packages/my/my")) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/packages/my1/my1")).Times(2) + .WillOnce(Return(false)); + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); +} + +TEST_F(ManifestControllerTest, badInstall) +{ + new_services.clear(); + old_services.clear(); + corrupted_packages.clear(); + string manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + //mock_downloader + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c805", + Package::ChecksumTypes::SHA1, + "my" + ) + ).WillOnce(Return(string("/tmp/temp_file"))); + + //mock_package_handler + EXPECT_CALL(mock_package_handler, shouldInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, preInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, installPackage("my", "/tmp/temp_file", _)).WillOnce(Return(false)); + + + load(manifest, new_services); + load(old_manifest, old_services); + + EXPECT_CALL(mock_orchestration_tools, + loadPackagesFromJson(corrupted_file_list)).WillOnce(Return(corrupted_packages)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/packages/my/my")).Times(2).WillOnce(Return(false)); + + string hostname = "hostname"; + string empty_err; + EXPECT_CALL(mock_status, getManifestError()).WillOnce(ReturnRef(empty_err)); + EXPECT_CALL(mock_details_resolver, getHostname()).WillOnce(Return( Maybe(hostname))); + EXPECT_CALL( + mock_status, + setFieldStatus(OrchestrationStatusFieldType::MANIFEST, OrchestrationStatusResult::FAILED, _) + ); + + string corrupted_packages_manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + load(corrupted_packages_manifest, corrupted_packages); + EXPECT_CALL(mock_orchestration_tools, + packagesToJsonFile(corrupted_packages, corrupted_file_list)).WillOnce(Return(true)); + + EXPECT_FALSE(i_manifest_controller->updateManifest(file_name)); +} + +TEST_F(ManifestControllerTest, failToDownloadWithselfUpdate) +{ + new_services.clear(); + old_services.clear(); + corrupted_packages.clear(); + string manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + Maybe err(genError("Empty")); + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c805", + Package::ChecksumTypes::SHA1, + "orchestration" + ) + ).WillOnce(Return(err)); + + + load(manifest, new_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(corrupted_file_list)) + .WillOnce(Return(corrupted_packages)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_CALL( + mock_orchestration_tools, + doesFileExist("/etc/cp/packages/orchestration/orchestration") + ).WillOnce(Return(false)); + EXPECT_CALL(mock_details_resolver, getHostname()).WillOnce(Return(string("hostname"))); + EXPECT_CALL( + mock_status, + setFieldStatus(OrchestrationStatusFieldType::MANIFEST, OrchestrationStatusResult::FAILED, _) + ); + string not_error; + EXPECT_CALL(mock_status, getManifestError()).WillOnce(ReturnRef(not_error)); + EXPECT_FALSE(i_manifest_controller->updateManifest(file_name)); +} + +TEST_F(ManifestControllerTest, requireUpdate) +{ + new_services.clear(); + old_services.clear(); + string manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": [ \"pre_orchestration\" ]" + " }," + " {" + " \"name\": \"pre_orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c806\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + EXPECT_CALL(mock_status, writeStatusToFile()); + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c805", + Package::ChecksumTypes::SHA1, + "orchestration" + ) + ).WillOnce(Return(string("/tmp/temp_file1"))); + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c806", + Package::ChecksumTypes::SHA1, + "pre_orchestration" + ) + ).WillOnce(Return(string("/tmp/temp_file2"))); + string temp_orc_file = "/etc/cp/packages/orchestration/orchestration_temp"; + EXPECT_CALL(mock_package_handler, preInstallPackage(orch_service_name, temp_orc_file)) + .WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, installPackage(orch_service_name, temp_orc_file, _)) + .WillOnce(Return(true)); + + EXPECT_CALL( + mock_package_handler, + shouldInstallPackage("pre_orchestration", "/tmp/temp_file2") + ).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, preInstallPackage("pre_orchestration", "/tmp/temp_file2")) + .WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, installPackage("pre_orchestration", "/tmp/temp_file2", _)) + .WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, postInstallPackage("pre_orchestration", "/tmp/temp_file2")) + .WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, updateSavedPackage("pre_orchestration", "/tmp/temp_file2")) + .WillOnce(Return(true)); + + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(corrupted_file_list)) + .WillOnce(Return(corrupted_packages)); + + load(manifest, new_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)) + .WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)) + .WillOnce(Return(old_services)); + string temp_manifest_path = manifest_file_path + temp_ext; + EXPECT_CALL(mock_orchestration_tools, packagesToJsonFile(new_services, temp_manifest_path)) + .WillOnce(Return(true)); + + string path = packages_dir + "/" + orch_service_name + "/" + + orch_service_name; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(path)).Times(2).WillOnce(Return(false)); + EXPECT_CALL( + mock_orchestration_tools, + doesFileExist("/etc/cp/packages/pre_orchestration/pre_orchestration") + ).Times(2).WillOnce(Return(true)); + + EXPECT_CALL(mock_orchestration_tools, copyFile("/tmp/temp_file1", path + temp_ext)) + .WillOnce(Return(true)); + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); +} + +TEST_F(ManifestControllerTest, sharedObjectNotInstalled) +{ + new_services.clear(); + old_services.clear(); + string manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"name\": \"pre_orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c806\"," + " \"package-type\": \"shared objects\"," + " \"require\": []" + " }" + " ]" + "}"; + EXPECT_CALL(mock_status, writeStatusToFile()); + load(manifest, new_services); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_CALL(mock_orchestration_tools, + loadPackagesFromJson(corrupted_file_list)).WillOnce(Return(corrupted_packages)); + + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c805", + Package::ChecksumTypes::SHA1, + "orchestration" + ) + ).WillOnce(Return(string("/tmp/temp_file1"))); + + string temp_manifest_path = manifest_file_path + temp_ext; + string writen_manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + map writen; + load(writen_manifest, writen); + EXPECT_CALL(mock_orchestration_tools, packagesToJsonFile(writen, temp_manifest_path)).WillOnce(Return(true)); + string temp_orc_file = "/etc/cp/packages/orchestration/orchestration_temp"; + + EXPECT_CALL(mock_package_handler, preInstallPackage(orch_service_name, + temp_orc_file)).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, installPackage(orch_service_name, + temp_orc_file, _)).WillOnce(Return(true)); + + string path = packages_dir + "/" + orch_service_name + "/" + + orch_service_name; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(path)).Times(2).WillOnce(Return(false)); + + EXPECT_CALL(mock_orchestration_tools, copyFile("/tmp/temp_file1", path + + temp_ext)).WillOnce(Return(true)); + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); +} + +TEST_F(ManifestControllerTest, requireSharedObjectUpdate) +{ + new_services.clear(); + old_services.clear(); + string manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": [ \"pre_orchestration\" ]" + " }," + " {" + " \"name\": \"pre_orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c806\"," + " \"package-type\": \"shared objects\"," + " \"require\": []" + " }" + " ]" + "}"; + + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c805", + Package::ChecksumTypes::SHA1, + "orchestration" + ) + ).WillOnce(Return(string("/tmp/temp_file1"))); + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c806", + Package::ChecksumTypes::SHA1, + "pre_orchestration" + ) + ).WillOnce(Return(string("/tmp/temp_file2"))); + EXPECT_CALL(mock_status, writeStatusToFile()); + string temp_orc_file = "/etc/cp/packages/orchestration/orchestration_temp"; + EXPECT_CALL(mock_package_handler, shouldInstallPackage(_, _)).WillRepeatedly(Return(true)); + EXPECT_CALL(mock_package_handler, preInstallPackage(orch_service_name, + temp_orc_file)).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, installPackage(orch_service_name, + temp_orc_file, _)).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, installPackage("pre_orchestration", + "/tmp/temp_file2", _)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, + loadPackagesFromJson(corrupted_file_list)).WillOnce(Return(corrupted_packages)); + + load(manifest, new_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + string temp_manifest_path = manifest_file_path + temp_ext; + EXPECT_CALL(mock_orchestration_tools, packagesToJsonFile(new_services, temp_manifest_path)).WillOnce(Return(true)); + + string path = packages_dir + "/" + orch_service_name + "/" + + orch_service_name; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(path)).Times(2).WillOnce(Return(false)); + EXPECT_CALL( + mock_orchestration_tools, + doesFileExist("/etc/cp/packages/pre_orchestration/pre_orchestration") + ).Times(2).WillOnce(Return(false)); + + EXPECT_CALL(mock_orchestration_tools, copyFile("/tmp/temp_file1", path + + temp_ext)).WillOnce(Return(true)); + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); +} + +TEST_F(ManifestControllerTest, failureOnDownloadSharedObject) +{ + new_services.clear(); + old_services.clear(); + string manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": [ \"pre_orchestration\" ]" + " }," + " {" + " \"name\": \"pre_orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c806\"," + " \"package-type\": \"shared objects\"," + " \"require\": []" + " }" + " ]" + "}"; + + Maybe err = genError("error"); + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c806", + Package::ChecksumTypes::SHA1, + "pre_orchestration" + ) + ).WillOnce(Return(err)); + EXPECT_CALL(mock_orchestration_tools, + loadPackagesFromJson(corrupted_file_list)).WillOnce(Return(corrupted_packages)); + + load(manifest, new_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_CALL( + mock_orchestration_tools, + doesFileExist("/etc/cp/packages/pre_orchestration/pre_orchestration") + ).WillOnce(Return(false)); + EXPECT_CALL(mock_details_resolver, getHostname()).WillOnce(Return(string("hostname"))); + EXPECT_CALL( + mock_status, + setFieldStatus(OrchestrationStatusFieldType::MANIFEST, OrchestrationStatusResult::FAILED, _) + ); + string not_error; + EXPECT_CALL(mock_status, getManifestError()).WillOnce(ReturnRef(not_error)); + + EXPECT_FALSE(i_manifest_controller->updateManifest(file_name)); +} + +TEST_F(ManifestControllerTest, multiRequireUpdate) +{ + new_services.clear(); + old_services.clear(); + string manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": [ \"pre_orchestration002\" ]" + " }," + " {" + " \"name\": \"pre_orchestration001\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c806\"," + " \"package-type\": \"shared objects\"," + " \"require\": []" + " }," + " {" + " \"name\": \"pre_orchestration002\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my2.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c807\"," + " \"package-type\": \"shared objects\"," + " \"require\": [ \"pre_orchestration001\" ]" + " }" + " ]" + "}"; + + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c805", + Package::ChecksumTypes::SHA1, + "orchestration" + ) + ).WillOnce(Return(string("/tmp/temp_file1"))); + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c806", + Package::ChecksumTypes::SHA1, + "pre_orchestration001" + ) + ).WillOnce(Return(string("/tmp/temp_file2"))); + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my2.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c807", + Package::ChecksumTypes::SHA1, + "pre_orchestration002" + ) + ).WillOnce(Return(string("/tmp/temp_file3"))); + EXPECT_CALL(mock_status, writeStatusToFile()); + string temp_orc_file = "/etc/cp/packages/orchestration/orchestration_temp"; + EXPECT_CALL(mock_package_handler, shouldInstallPackage(_, _)).WillRepeatedly(Return(true)); + EXPECT_CALL(mock_package_handler, preInstallPackage(orch_service_name, + temp_orc_file)).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, installPackage(orch_service_name, + temp_orc_file, _)).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, installPackage("pre_orchestration001", + "/tmp/temp_file2", _)).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, installPackage("pre_orchestration002", + "/tmp/temp_file3", _)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, + loadPackagesFromJson(corrupted_file_list)).WillOnce(Return(corrupted_packages)); + + load(manifest, new_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + string temp_manifest_path = manifest_file_path + temp_ext; + EXPECT_CALL(mock_orchestration_tools, packagesToJsonFile(new_services, temp_manifest_path)).WillOnce(Return(true)); + + string path = packages_dir + "/" + orch_service_name + "/" + + orch_service_name; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(path)).Times(2).WillOnce(Return(false)); + EXPECT_CALL( + mock_orchestration_tools, + doesFileExist("/etc/cp/packages/pre_orchestration001/pre_orchestration001") + ).Times(2).WillOnce(Return(false)); + EXPECT_CALL( + mock_orchestration_tools, + doesFileExist("/etc/cp/packages/pre_orchestration002/pre_orchestration002") + ).Times(2).WillOnce(Return(false)); + + EXPECT_CALL(mock_orchestration_tools, copyFile("/tmp/temp_file1", path + + temp_ext)).WillOnce(Return(true)); + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); +} + +TEST_F(ManifestControllerTest, createNewManifestWithUninstallablePackage) +{ + new_services.clear(); + old_services.clear(); + + string manifest = + "{" + " \"packages\": [" + " {" + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"download-path\": \"\"," + " \"relative-path\": \"\"," + " \"name\": \"waap\"," + " \"version\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"\"," + " \"package-type\": \"service\"," + " \"status\": false,\n" + " \"message\": \"This security app isn't valid for this agent\"\n" + " }" + " ]" + "}"; + + //mock_downloader + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c805", + Package::ChecksumTypes::SHA1, + "my" + ) + ).WillOnce(Return(string("/tmp/temp_file"))); + + //mock_package_handler + EXPECT_CALL(mock_package_handler, shouldInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, preInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, installPackage("my", "/tmp/temp_file", _)).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, postInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, updateSavedPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + + load(manifest, new_services); + load(old_manifest, old_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(corrupted_file_list)) + .WillOnce(Return(corrupted_packages)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/packages/my/my")).Times(2).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, copyFile(manifest_file_path, "/etc/cp/conf/manifest.json.bk")) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true)); + + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); +} + +TEST_F(ManifestControllerTest, updateUninstallPackage) +{ + new_services.clear(); + old_services.clear(); + string manifest = + "{" + " \"packages\": [" + " {" + " \"download-path\": \"\"," + " \"relative-path\": \"\"," + " \"name\": \"my\"," + " \"version\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"\"," + " \"package-type\": \"service\"," + " \"status\": false,\n" + " \"message\": \"This security app isn't valid for this agent\"\n" + " }," + " {" + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + load(manifest, new_services); + load(old_manifest, old_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_CALL(mock_orchestration_tools, + loadPackagesFromJson(corrupted_file_list)).Times(2).WillRepeatedly(Return(corrupted_packages)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(manifest_file_path)).Times(2).WillRepeatedly(Return(true)); + EXPECT_CALL(mock_orchestration_tools, + copyFile(manifest_file_path, "/etc/cp/conf/manifest.json.bk")).Times(2).WillRepeatedly(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, manifest_file_path)) + .Times(2).WillRepeatedly(Return(true)); + EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).Times(2).WillRepeatedly(Return(true)); + EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).Times(2).WillRepeatedly(Return(true)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/packages/my/my")).Times(2).WillOnce(Return(true)); + string hostname = "hostname"; + + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); + + manifest = + "{" + " \"packages\": [" + " {" + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"77ecfeb6d5ec73a596ff406713f4f5d1f233adb6\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "77ecfeb6d5ec73a596ff406713f4f5d1f233adb6", + Package::ChecksumTypes::SHA1, + "my" + ) + ).WillOnce(Return(string("/tmp/temp_file"))); + + //mock_package_handler + EXPECT_CALL(mock_package_handler, shouldInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, preInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, installPackage("my", "/tmp/temp_file", _)).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, postInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, updateSavedPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + + //mock_orchestration_tools + load(manifest, new_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, + loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); +} + +class ManifestControllerIgnorePakckgeTest : public Test +{ +public: + ManifestControllerIgnorePakckgeTest() + { + env.preload(); + env.init(); + i_env = Singleton::Consume::from(env); + i_env->startNewTrace(); + new_services.clear(); + old_services.clear(); + } + + void + init(const string &ignore_services = "dummy_service") + { + const string ignore_packages_file = "/tmp/ignore-packages.txt"; + setConfiguration(ignore_packages_file, "orchestration", "Ignore packages list file path"); + writeIgnoreList(ignore_packages_file, ignore_services); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(ignore_packages_file)).WillOnce(Return(true)); + manifest_controller.init(); + manifest_file_path = getConfigurationWithDefault( + "/etc/cp/conf/manifest.json", + "orchestration", + "Manifest file path" + ); + corrupted_file_list = getConfigurationWithDefault( + "/etc/cp/conf/corrupted_packages.json", + "orchestration", + "Manifest corrupted files path" + ); + temp_ext = getConfigurationWithDefault("_temp", "orchestration", "Temp file extension"); + backup_ext = getConfigurationWithDefault(".bk", "orchestration", "Backup file extension"); + file_name = "new_manifest.json"; + packages_dir = getConfigurationWithDefault("/etc/cp/packages", "orchestration", "Packages directory"); + orch_service_name = getConfigurationWithDefault("orchestration", "orchestration", "Service name"); + EXPECT_CALL( + mock_shell_cmd, + getExecOutput("cpprod_util CPPROD_IsConfigured CPwaap", _, _) + ).WillRepeatedly(Return(string("1"))); + } + + ~ManifestControllerIgnorePakckgeTest() + { + remove("/tmp/ignore-packages.txt"); + i_env->finishSpan(); + i_env->finishTrace(); + env.fini(); + } + + void load(string &manifest, map &ret) + { + std::stringstream os(manifest); + cereal::JSONInputArchive archive_in(os); + archive_in(ret); + } + + string manifest_file_path; + string corrupted_file_list; + string temp_ext; + string backup_ext; + string file_name = "new_manifest.json"; + string packages_dir; + string orch_service_name; + string old_manifest = + "{" + " \"packages\": [" + " {" + " \"download-path\": \"http://172.23.92.135/orchestration.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + NiceMock mock_mainloop; + NiceMock mock_timer; + ::Environment env; + I_Environment *i_env; + ConfigComponent config; + AgentDetails agent_details; + + map new_services; + map old_services; + map corrupted_packages; + + StrictMock mock_package_handler; + StrictMock mock_status; + StrictMock mock_downloader; + StrictMock mock_orchestration_tools; + NiceMock mock_shell_cmd; + + ManifestController manifest_controller; + I_ManifestController *i_manifest_controller = Singleton::Consume::from(manifest_controller); + +private: + void + writeIgnoreList(const string &path, const string &packages) + { + ofstream ignore_list_file; + ignore_list_file.open (path); + ignore_list_file << packages; + ignore_list_file.close(); + } +}; + +TEST_F(ManifestControllerIgnorePakckgeTest, constructorTest) +{ +} + +TEST_F(ManifestControllerIgnorePakckgeTest, initOnly) +{ + init(); +} + +TEST_F(ManifestControllerIgnorePakckgeTest, addAndUpdateIgnorePackage) +{ + init(); + + // Add an ignored package + string manifest = + "{" + " \"packages\": [" + " {" + " \"download-path\": \"http://172.23.92.135/orchestration.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"download-path\": \"http://172.23.92.135/dummy_service.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"dummy_service\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + load(manifest, new_services); + load(old_manifest, old_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(corrupted_file_list)) + .WillOnce(Return(corrupted_packages)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(manifest_file_path, "/etc/cp/conf/manifest.json.bk")) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true)); + + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); + + // Upate the ignored package + manifest = + "{" + " \"packages\": [" + " {" + " \"download-path\": \"http://172.23.92.135/orchestration.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"download-path\": \"http://172.23.92.135/dummy_service.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"dummy_service\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"b58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + //mock_orchestration_tools + load(manifest, new_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(corrupted_file_list)) + .WillOnce(Return(corrupted_packages)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(manifest_file_path, "/etc/cp/conf/manifest.json.bk")) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true)); + + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); +} + + +TEST_F(ManifestControllerIgnorePakckgeTest, addIgnorePackageAndUpdateNormal) +{ + init(); + + // Add an ignored package + string manifest = + "{" + " \"packages\": [" + " {" + " \"download-path\": \"http://172.23.92.135/orchestration.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"download-path\": \"http://172.23.92.135/dummy_service.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"dummy_service\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + load(manifest, new_services); + load(old_manifest, old_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(corrupted_file_list)) + .WillOnce(Return(corrupted_packages)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/packages/my/my")).Times(2).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, copyFile(manifest_file_path, "/etc/cp/conf/manifest.json.bk")) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true)); + + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); + + // Upate the normal package + manifest = + "{" + " \"packages\": [" + " {" + " \"download-path\": \"http://172.23.92.135/orchestration.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"b58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"download-path\": \"http://172.23.92.135/dummy_service.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"dummy_service\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + //mock_orchestration_tools + load(manifest, new_services); + + //mock_downloader + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "b58bbab8020b0e6d08568714b5e582a3adf9c805", + Package::ChecksumTypes::SHA1, + "my" + ) + ).WillOnce(Return(string("/tmp/temp_file"))); + + //mock_package_handler + EXPECT_CALL(mock_package_handler, shouldInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, preInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, installPackage("my", "/tmp/temp_file", _)).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, postInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, updateSavedPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + + load(manifest, new_services); + load(old_manifest, old_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(corrupted_file_list)) + .WillOnce(Return(corrupted_packages)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(manifest_file_path, "/etc/cp/conf/manifest.json.bk")) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true)); + + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); +} + +TEST_F(ManifestControllerIgnorePakckgeTest, removeIgnoredPackage) +{ + init(); + + // Add an ignored package + string manifest = + "{" + " \"packages\": [" + " {" + " \"download-path\": \"http://172.23.92.135/orchestration.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"download-path\": \"http://172.23.92.135/dummy_service.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"dummy_service\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + load(manifest, new_services); + load(old_manifest, old_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(corrupted_file_list)) + .WillOnce(Return(corrupted_packages)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(manifest_file_path, "/etc/cp/conf/manifest.json.bk")) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true)); + + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); + + // Remove the ignored package + manifest = + "{" + " \"packages\": [" + " {" + " \"download-path\": \"http://172.23.92.135/orchestration.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + //mock_orchestration_tools + load(manifest, new_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(corrupted_file_list)) + .WillOnce(Return(corrupted_packages)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(manifest_file_path, "/etc/cp/conf/manifest.json.bk")) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true)); + + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); +} + +TEST_F(ManifestControllerIgnorePakckgeTest, freezeIgnoredPackage) +{ + init("dummy_service\nmy"); + + Debug::setUnitTestFlag(D_CONFIG, Debug::DebugLevel::TRACE); + ostringstream capture_debug; + Debug::setNewDefaultStdout(&capture_debug); + + // Update an ignored package + string manifest = + "{" + " \"packages\": [" + " {" + " \"download-path\": \"http://172.23.92.135/orchestration.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"b58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + load(manifest, new_services); + load(old_manifest, old_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(corrupted_file_list)) + .WillOnce(Return(corrupted_packages)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(manifest_file_path, "/etc/cp/conf/manifest.json.bk")) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true)); + + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); + + EXPECT_THAT(capture_debug.str(), HasSubstr("Ignoring a package from the manifest. Package name: my")); + EXPECT_THAT(capture_debug.str(), HasSubstr("Ignoring a package from the manifest. Package name: dummy_service")); + EXPECT_THAT( + capture_debug.str(), + Not(HasSubstr("Ignoring a package from the manifest. Package name: orchestration")) + ); + Debug::setNewDefaultStdout(&cout); +} + +TEST_F(ManifestControllerIgnorePakckgeTest, overrideIgnoredPackageFromProfileSettings) +{ + init("dummy_service\nmy"); + config.preload(); + + static const string profile_settings( + "{\n" + "\"agentSettings\": [\n" + "{\n" + "\"key\": \"orchestration.IgnoredPackagesList\",\n" + "\"value\": \"a,orchestration,c,notmy\",\n" + "\"id\": \"123\"\n" + "}\n" + "]\n" + "}\n" + ); + + istringstream ss(profile_settings); + EXPECT_TRUE(Singleton::Consume::from(config)->loadConfiguration(ss)); + + Debug::setUnitTestFlag(D_CONFIG, Debug::DebugLevel::TRACE); + ostringstream capture_debug; + Debug::setNewDefaultStdout(&capture_debug); + + // Update an ignored package + string manifest = + "{" + " \"packages\": [" + " {" + " \"download-path\": \"http://172.23.92.135/orchestration.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"orchestration\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c8051\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"download-path\": \"http://172.23.92.135/my.sh\"," + " \"relative-path\": \"\"," + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"b58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + + //mock_downloader + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/my.sh", + "b58bbab8020b0e6d08568714b5e582a3adf9c805", + Package::ChecksumTypes::SHA1, + "my" + ) + ).WillOnce(Return(string("/tmp/temp_file"))); + + //mock_package_handler + EXPECT_CALL(mock_package_handler, shouldInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, preInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, installPackage("my", "/tmp/temp_file", _)).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, postInstallPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + EXPECT_CALL(mock_package_handler, updateSavedPackage("my", "/tmp/temp_file")).WillOnce(Return(true)); + + load(manifest, new_services); + load(old_manifest, old_services); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(file_name)).WillOnce(Return(new_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file_path)).WillOnce(Return(old_services)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(corrupted_file_list)) + .WillOnce(Return(corrupted_packages)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist("/etc/cp/packages/my/my")).Times(2).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, copyFile(manifest_file_path, "/etc/cp/conf/manifest.json.bk")) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, isNonEmptyFile(manifest_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, removeFile(file_name)).WillOnce(Return(true)); + + EXPECT_TRUE(i_manifest_controller->updateManifest(file_name)); + + EXPECT_THAT(capture_debug.str(), Not(HasSubstr("Ignoring a package from the manifest. Package name: my"))); + EXPECT_THAT( + capture_debug.str(), + Not(HasSubstr("Ignoring a package from the manifest. Package name: dummy_service")) + ); + EXPECT_THAT(capture_debug.str(), HasSubstr("Ignoring a package from the manifest. Package name: orchestration")); + EXPECT_THAT(capture_debug.str(), HasSubstr("Ignoring a package from the manifest. Package name: notmy")); + EXPECT_THAT(capture_debug.str(), HasSubstr("Ignoring a package from the manifest. Package name: a")); + EXPECT_THAT(capture_debug.str(), HasSubstr("Ignoring a package from the manifest. Package name: c")); + Debug::setNewDefaultStdout(&cout); +} + +class ManifestDownloadTest : public Test +{ +public: + ManifestDownloadTest() + { + EXPECT_CALL( + mock_orchestration_tools, + doesFileExist("/etc/cp/conf/ignore-packages.txt") + ).WillOnce(Return(false)); + + manifest_controller.init(); + } + ::Environment env; + ConfigComponent config; + + StrictMock agent_details; + StrictMock mock_orchestration_tools; + StrictMock mock_package_handler; + StrictMock mock_downloader; + StrictMock mock_status; + StrictMock mock_details_resolver; + NiceMock mock_shell_cmd; + + NiceMock mock_mainloop; + NiceMock mock_timer; + + ManifestController manifest_controller; + I_ManifestController *i_manifest_controller = Singleton::Consume::from(manifest_controller); + + void + load(const string &manifest, map &ret) + { + std::stringstream os(manifest); + cereal::JSONInputArchive archive_in(os); + archive_in(ret); + } + +private: +}; + +TEST_F(ManifestDownloadTest, download_relative_path) +{ + vector manifest_data = { + "{", + " \"packages\": [", + " {", + " \"download-path\": \"http://172.23.92.135/orchestration.sh\",", + " \"relative-path\": \"/orchestration.sh\",", + " \"name\": \"orchestration\",", + " \"version\": \"c\",", + " \"checksum-type\": \"sha1sum\",", + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\",", + " \"package-type\": \"service\",", + " \"require\": []", + " }", + " ]", + "}" + }; + + Maybe fog_domain(string("fake.checkpoint.com")); + Maybe downloaded_package(genError("Failed to download")); + + map new_packages; + map manifest_packages; + map corrupted_packages; + + CPTestTempfile manifest_file(manifest_data); + string x = manifest_file.readFile(); + + load(x, new_packages); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file.fname)).WillOnce(Return(new_packages)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson("/etc/cp/conf/manifest.json")) + .WillOnce(Return(manifest_packages)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson("/etc/cp/conf/corrupted_packages.json")) + .WillOnce(Return(corrupted_packages)); + EXPECT_CALL(agent_details, getFogDomain()).WillOnce(Return(fog_domain)); + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "https://fake.checkpoint.com/download/orchestration.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c805", + _, + "orchestration" + ) + ).WillOnce(Return(downloaded_package)); + + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/orchestration.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c805", + _, + "orchestration" + ) + ).WillOnce(Return(downloaded_package)); + EXPECT_CALL( + mock_orchestration_tools, + doesFileExist("/etc/cp/packages/orchestration/orchestration") + ).WillOnce(Return(false)); + EXPECT_CALL(mock_details_resolver, getHostname()).WillOnce(Return(string("hostname"))); + EXPECT_CALL( + mock_status, + setFieldStatus(OrchestrationStatusFieldType::MANIFEST, OrchestrationStatusResult::FAILED, _) + ); + string not_error; + EXPECT_CALL(mock_status, getManifestError()).WillOnce(ReturnRef(not_error)); + + EXPECT_FALSE(i_manifest_controller->updateManifest(manifest_file.fname)); +} + +TEST_F(ManifestDownloadTest, download_relative_path_no_fog_domain) +{ + vector manifest_data = { + "{", + " \"packages\": [", + " {", + " \"download-path\": \"http://172.23.92.135/orchestration.sh\",", + " \"relative-path\": \"/orchestration.sh\",", + " \"name\": \"orchestration\",", + " \"version\": \"c\",", + " \"checksum-type\": \"sha1sum\",", + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\",", + " \"package-type\": \"service\",", + " \"require\": []", + " }", + " ]", + "}" + }; + + Maybe fog_domain(genError("No fog domain")); + Maybe downloaded_package(genError("Failed to download")); + + map new_packages; + map manifest_packages; + map corrupted_packages; + + CPTestTempfile manifest_file(manifest_data); + string x = manifest_file.readFile(); + + load(x, new_packages); + + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson(manifest_file.fname)).WillOnce(Return(new_packages)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson("/etc/cp/conf/manifest.json")) + .WillOnce(Return(manifest_packages)); + EXPECT_CALL(mock_orchestration_tools, loadPackagesFromJson("/etc/cp/conf/corrupted_packages.json")) + .WillOnce(Return(corrupted_packages)); + EXPECT_CALL(agent_details, getFogDomain()).WillOnce(Return(fog_domain)); + EXPECT_CALL( + mock_orchestration_tools, + doesFileExist("/etc/cp/packages/orchestration/orchestration") + ).WillOnce(Return(false)); + string not_error; + EXPECT_CALL(mock_status, getManifestError()).WillOnce(ReturnRef(not_error)); + + EXPECT_CALL( + mock_downloader, + downloadFileFromURL( + "http://172.23.92.135/orchestration.sh", + "a58bbab8020b0e6d08568714b5e582a3adf9c805", + _, + "orchestration" + ) + ).WillOnce(Return(downloaded_package)); + EXPECT_CALL(mock_details_resolver, getHostname()).WillOnce(Return(string("hostname"))); + EXPECT_CALL( + mock_status, + setFieldStatus(OrchestrationStatusFieldType::MANIFEST, OrchestrationStatusResult::FAILED, _) + ); + + EXPECT_FALSE(i_manifest_controller->updateManifest(manifest_file.fname)); +} diff --git a/components/security_apps/orchestration/manifest_controller/manifest_diff_calculator.cc b/components/security_apps/orchestration/manifest_controller/manifest_diff_calculator.cc new file mode 100755 index 0000000..80b5006 --- /dev/null +++ b/components/security_apps/orchestration/manifest_controller/manifest_diff_calculator.cc @@ -0,0 +1,144 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "manifest_diff_calculator.h" + +#include "debug.h" +#include "config.h" +#include "sasal.h" + +using namespace std; + +SASAL_START // Orchestration - Manifest Handler + +USE_DEBUG_FLAG(D_ORCHESTRATOR); + +void +ManifestDiffCalculator::init() +{ + dbgTrace(D_ORCHESTRATOR) + << "Initializing Manifest diff calculator, file system path prefix:: " + << getFilesystemPathConfig(); + + corrupted_file_path = getConfigurationWithDefault( + getFilesystemPathConfig() + "/conf/corrupted_packages.json", + "orchestration", + "Manifest corrupted files path" + ); +} + +// If one of the new packages is already installed, new_packages map is updated accordingly. +// This function return map which contain all packages that should be uninstalled +// based on new manifest +map +ManifestDiffCalculator::filterUntrackedPackages( + const map ¤t_packages, + map &new_packages) +{ + dbgDebug(D_ORCHESTRATOR) << "Starting to scan old packages to remove"; + map packages_to_remove; + for (auto current_package = current_packages.begin(); current_package != current_packages.end();) { + auto package = new_packages.find(current_package->first); + if (package == new_packages.end()) { + packages_to_remove.insert(pair(current_package->first, current_package->second)); + } else { + if (current_package->second == package->second) { + // if package is already installed, new_packages is updated + new_packages.erase(package); + } + } + current_package++; + } + return packages_to_remove; +} + +// If one of the new packages is already known as corrupted, new_packages map is +// updated accordingly. +// Otherwise, corrupted_packages is updated and old corrupted package is deleted. +bool +ManifestDiffCalculator::filterCorruptedPackages( + map &new_packages, + map &corrupted_packages) +{ + bool no_corrupted_package_exist = true; + bool any_corrupted_removed = false; + for (auto corrupted_package = corrupted_packages.begin(); corrupted_package != corrupted_packages.end();) { + auto package = new_packages.find(corrupted_package->first); + if (package == new_packages.end()) { + // The corrupted package is not in the new packages list, + // so it should be removed from the corrupted list. + corrupted_package = corrupted_packages.erase(corrupted_package); + any_corrupted_removed = true; + } else { + if (corrupted_package->second == package->second) { + // The corrupted package is still in the new packages list, + // so it should be removed + dbgWarning(D_ORCHESTRATOR) << "Installation package is corrupted." + << " Package: " << package->second.getName(); + new_packages.erase(package); + corrupted_package++; + no_corrupted_package_exist = false; + } else { + // New version of corrupted package was received + corrupted_package = corrupted_packages.erase(corrupted_package); + any_corrupted_removed = true; + } + } + } + if (any_corrupted_removed) { + dbgDebug(D_ORCHESTRATOR) << "Updating corrupted file. File: " << corrupted_file_path; + auto orchestration_tools = Singleton::Consume::by(); + if (!orchestration_tools->packagesToJsonFile(corrupted_packages, corrupted_file_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to update corrupted file. Path: " << corrupted_file_path; + return false; + } + } + return no_corrupted_package_exist; +} + +// This function build the installation queue recursively and return true if succeeded, false otherwise +// At the beginning, installation_queue is empty and will be filled according package dependences +bool +ManifestDiffCalculator::buildInstallationQueue( + const Package &updated_package, + vector &installation_queue, + const map ¤t_packages, + const map &new_packages) +{ + vector requires = updated_package.getRequire(); + + for (size_t i = 0; i < requires.size(); i++) { + auto installed_package = current_packages.find(requires[i]); + auto new_package = new_packages.find(requires[i]); + + if (installed_package == current_packages.end() || + (new_package != new_packages.end() && *installed_package != *new_package)) { + if(!buildInstallationQueue(new_package->second, + installation_queue, + current_packages, + new_packages)) { + return false; + } + } else if (installed_package != current_packages.end()) { + dbgDebug(D_ORCHESTRATOR) << "Package is already installed. Package: " << installed_package->first; + } else if (new_package == new_packages.end()) { + dbgWarning(D_ORCHESTRATOR) << "One of the requested dependencies is corrupted or doesn't exist." + << " Package: "<< requires[i]; + return false; + } + } + installation_queue.push_back(updated_package); + return true; +} + +SASAL_END diff --git a/components/security_apps/orchestration/manifest_controller/manifest_handler.cc b/components/security_apps/orchestration/manifest_controller/manifest_handler.cc new file mode 100755 index 0000000..ce51a59 --- /dev/null +++ b/components/security_apps/orchestration/manifest_controller/manifest_handler.cc @@ -0,0 +1,384 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "manifest_handler.h" + +#include "debug.h" +#include "config.h" +#include "sasal.h" +#include "agent_details.h" +#include "orchestration_comp.h" + +using namespace std; + +SASAL_START // Orchestration - Manifest Handler + +USE_DEBUG_FLAG(D_ORCHESTRATOR); + +void +ManifestHandler::init() +{ + dbgTrace(D_ORCHESTRATOR) + << "Initializing Manifest handler, file system path prefix: " + << getFilesystemPathConfig(); + + manifest_file_path = getConfigurationWithDefault( + getFilesystemPathConfig() + "/conf/manifest.json", + "orchestration", + "Manifest file path" + ); + temp_ext = getConfigurationWithDefault("_temp", "orchestration", "Temp file extension"); + backup_ext = getConfigurationWithDefault(".bk", "orchestration", "Backup file extension"); + packages_dir = getConfigurationWithDefault( + getFilesystemPathConfig() + "/packages", "orchestration", + "Packages directory" + ); + orch_service_name = getConfigurationWithDefault("orchestration", "orchestration", "Service name"); + default_dir = getConfigurationWithDefault( + getFilesystemPathConfig(), + "orchestration", + "Default Check Point directory" + ); +} + +Maybe +ManifestHandler::downloadPackage(const Package &package, bool is_clean_installation) +{ + Maybe package_download_file = genError("failed to download package, Package: " + package.getName()); + Maybe fog_domain = genError("No Fog domain was found"); + if (Singleton::exists()) { + fog_domain = Singleton::Consume::by()->getFogDomain(); + } + + if (!is_clean_installation) { + I_MainLoop *i_mainloop = Singleton::Consume::by(); + auto pending_time_frame_seconds = getConfigurationWithDefault( + 60, + "orchestration", + "Download pending time frame seconds" + ); + int pending_time = rand() % pending_time_frame_seconds; + dbgInfo(D_ORCHESTRATOR) + << "Pending downloading of package " + << package.getName() + << " for " + << pending_time + << " seconds"; + chrono::microseconds pending_time_micro = chrono::seconds(pending_time); + i_mainloop->yield(pending_time_micro); + dbgTrace(D_ORCHESTRATOR) << "Proceeding to package downloading. Package name " << package.getName(); + } + + auto orchestration_downloader = Singleton::Consume::by(); + if (!package.getRelativeDownloadPath().empty() && fog_domain.ok()) { + string download_path = + "https://" + fog_domain.unpack() + "/download" + package.getRelativeDownloadPath(); + package_download_file= orchestration_downloader->downloadFileFromURL( + download_path, + package.getChecksum(), + package.getChecksumType(), + package.getName() + ); + } + + if (!package_download_file.ok()) { + package_download_file = orchestration_downloader->downloadFileFromURL( + package.getDownloadPath(), + package.getChecksum(), + package.getChecksumType(), + package.getName() + ); + } + return package_download_file; +} + +bool +ManifestHandler::downloadPackages( + const vector &packages_to_download, + vector> &downloaded_packages) +{ + auto i_env = Singleton::Consume::by(); + auto i_orch_tools = Singleton::Consume::by(); + auto span_scope = i_env->startNewSpanScope(Span::ContextType::CHILD_OF); + for (auto &package : packages_to_download) { + dbgInfo(D_ORCHESTRATOR) << "Downloading package file." << " Package: " << package.getName(); + + string packages_dir = getConfigurationWithDefault( + "/etc/cp/packages", + "orchestration", + "Packages directory" + ); + + string current_installation_file = packages_dir + "/" + package.getName() + "/" + package.getName(); + bool is_clean_installation = !i_orch_tools->doesFileExist(current_installation_file); + + Maybe package_download_file = downloadPackage(package, is_clean_installation); + + if (package_download_file.ok()) { + dbgDebug(D_ORCHESTRATOR) + << "Installation package was downloaded successfully." + << " Package: " << package.getName(); + downloaded_packages.push_back(pair(package, package_download_file.unpack())); + } else { + dbgWarning(D_ORCHESTRATOR) + << "Failed to download installation package. " + << "Package: " << package.getName() + << ", Error: " << package_download_file.getErr(); + + for (auto &package_file : downloaded_packages) { + if (i_orch_tools->removeFile(package_file.second)) { + dbgDebug(D_ORCHESTRATOR) << "Corrupted downloaded package was removed. Package: " + << package_file.first.getName(); + } else { + dbgWarning(D_ORCHESTRATOR) + << "Failed to removed the download file. Package: " + << package_file.first.getName() + << ", Path: " + << package_file.second; + } + } + downloaded_packages.clear(); + string install_error; + if (is_clean_installation) { + string error_hostname_addition = ""; + auto maybe_hostname = Singleton::Consume::by()->getHostname(); + if (maybe_hostname.ok()) { + error_hostname_addition = " on host '" + maybe_hostname.unpack() + "'"; + } + install_error = + "Critical Error: Agent/Gateway was not fully deployed" + + error_hostname_addition + + " and is not enforcing a security policy. Retry installation or contact Check Point support."; + } else { + auto agent_details = Singleton::Consume::by(); + install_error = + "Warning: Agent/Gateway '" + + agent_details->getAgentId() + + "' software update failed. Agent is running previous software. Contact Check Point support."; + } + + auto orchestration_status = Singleton::Consume::by(); + if (orchestration_status->getManifestError().find("Gateway was not fully deployed") == string::npos) { + orchestration_status->setFieldStatus( + OrchestrationStatusFieldType::MANIFEST, + OrchestrationStatusResult::FAILED, + install_error + ); + } + return false; + } + } + return true; +} + +bool +ManifestHandler::installPackages( + const vector> &downloaded_package_files, + map ¤t_packages, + map &corrupted_packages) +{ + auto i_env = Singleton::Consume::by(); + auto span_scope = i_env->startNewSpanScope(Span::ContextType::CHILD_OF); + // Patch - reorder packages so that accessControlApp is installed before accessControlKernel + vector> patched_downloaded_package_files; + patched_downloaded_package_files.reserve(downloaded_package_files.size()); + int ac_kernel_package_idx = -1; + int ac_app_package_idx = -1; + int i = 0; + for (auto &downloaded_package : downloaded_package_files) { + if (downloaded_package.first.getName() == "accessControlApp") { + ac_app_package_idx = i; + } else if (downloaded_package.first.getName() == "accessControlKernel") { + ac_kernel_package_idx = i; + } else { + patched_downloaded_package_files.push_back(downloaded_package); + } + i++; + } + if (ac_app_package_idx != -1) { + patched_downloaded_package_files.push_back(downloaded_package_files.at(ac_app_package_idx)); + } + if (ac_kernel_package_idx != -1) { + patched_downloaded_package_files.push_back(downloaded_package_files.at(ac_kernel_package_idx)); + } + + auto orchestration_status = Singleton::Consume::by(); + for (auto &downloaded_package : patched_downloaded_package_files) { + auto package = downloaded_package.first; + auto package_name = package.getName(); + auto package_handler_path = downloaded_package.second; + + dbgInfo(D_ORCHESTRATOR) << "Handling package installation. Package: " << package_name; + + if (package_name.compare(orch_service_name) == 0) { + orchestration_status->writeStatusToFile(); + bool self_update_status = selfUpdate(package, current_packages, package_handler_path); + if (!self_update_status) { + auto agent_details = Singleton::Consume::by(); + string install_error = + "Warning: Agent/Gateway '" + + agent_details->getAgentId() + + "' software update failed. Agent is running previous software. Contact Check Point support."; + if (orchestration_status->getManifestError().find("Gateway was not fully deployed") == string::npos) { + orchestration_status->setFieldStatus( + OrchestrationStatusFieldType::MANIFEST, + OrchestrationStatusResult::FAILED, + install_error + ); + } + } + + return self_update_status; + } + + string packages_dir = getConfigurationWithDefault( + "/etc/cp/packages", + "orchestration", + "Packages directory" + ); + + string current_installation_file = packages_dir + "/" + package_name + "/" + package_name; + auto orchestration_tools = Singleton::Consume::by(); + bool is_clean_installation = !orchestration_tools->doesFileExist(current_installation_file); + + auto package_handler = Singleton::Consume::by(); + if (!package_handler->shouldInstallPackage(package_name, package_handler_path)) { + current_packages.insert(make_pair(package_name, package)); + dbgInfo(D_ORCHESTRATOR) + << "Skipping installation of new package with the same version as current. Package: " + << package_name; + continue; + } + + bool current_result = true; + bool is_service = package.getType() == Package::PackageType::Service; + if (is_service) { + current_result = package_handler->preInstallPackage(package_name, package_handler_path); + } + + current_result = current_result && package_handler->installPackage( + package_name, + package_handler_path, + false + ); + + if (current_result && is_service) { + current_result = package_handler->postInstallPackage(package_name, package_handler_path); + } + + if (current_result && is_service) { + current_result = package_handler->updateSavedPackage(package_name, package_handler_path); + } + + if (!current_result) { + string install_error; + if (is_clean_installation) { + string error_hostname_addition = ""; + auto maybe_hostname = Singleton::Consume::by()->getHostname(); + if (maybe_hostname.ok()) { + error_hostname_addition = " on host '" + maybe_hostname.unpack() + "'"; + } + install_error = + "Critical Error: Agent/Gateway was not fully deployed" + + error_hostname_addition + + " and is not enforcing a security policy. Retry installation or contact Check Point support."; + } else { + auto agent_details = Singleton::Consume::by(); + install_error = + "Warning: Agent/Gateway '" + + agent_details->getAgentId() + + "' software update failed. Agent is running previous software. Contact Check Point support."; + } + corrupted_packages.insert(make_pair(package_name, package)); + dbgWarning(D_ORCHESTRATOR) << "Failed to install package. Package: " << package_name; + + auto orchestration_status = Singleton::Consume::by(); + if (orchestration_status->getManifestError().find("Gateway was not fully deployed") == string::npos) { + orchestration_status->setFieldStatus( + OrchestrationStatusFieldType::MANIFEST, + OrchestrationStatusResult::FAILED, + install_error + ); + } + return false; + } + + current_packages.insert(make_pair(package_name, package)); + } + return true; +} + +bool +ManifestHandler::uninstallPackage(Package &removed_package) +{ + dbgDebug(D_ORCHESTRATOR) << "Starting uninstalling. Package: " << removed_package.getName(); + string package_name = removed_package.getName(); + string package_path = default_dir + "/" + package_name + "/" + package_name; + string installation_package = packages_dir + "/" + package_name + "/" + package_name; + auto package_handler = Singleton::Consume::by(); + return package_handler->uninstallPackage(package_name, package_path, installation_package); +} + +bool +ManifestHandler::selfUpdate( + const Package &updated_package, + map ¤t_packages, + const string &installation_file) +{ + dbgInfo(D_ORCHESTRATOR) << "Updating orchestration service"; + + auto current_service = current_packages.find(updated_package.getName()); + if (current_service != current_packages.end()) { + current_service->second = updated_package; + } else { + current_packages.insert(pair(updated_package.getName(), updated_package)); + } + + string temp_manifest_path = manifest_file_path + temp_ext; + + auto orchestration_tools = Singleton::Consume::by(); + if (!orchestration_tools->packagesToJsonFile(current_packages, temp_manifest_path)) { + dbgWarning(D_ORCHESTRATOR) << "Updating manifest temporary file has failed. File: " << temp_manifest_path; + return false; + } + + string current_file = packages_dir + "/" + orch_service_name + "/" + orch_service_name; + string backup_file = current_file + backup_ext; + + dbgDebug(D_ORCHESTRATOR) << "Saving the temporary backup file."; + if (orchestration_tools->doesFileExist(current_file)) { + dbgDebug(D_ORCHESTRATOR) << "Backup current installation package. Destination: " << backup_file; + if (!orchestration_tools->copyFile(current_file, backup_file + temp_ext)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to backup installation file. File: " << current_file; + return false; + } + } else { + dbgDebug(D_ORCHESTRATOR) << "There is no previous version for Orchestration"; + } + + string current_installation_file = current_file + temp_ext; + dbgDebug(D_ORCHESTRATOR) << "Saving the installation file: " << current_installation_file; + if (!orchestration_tools->copyFile(installation_file, current_installation_file)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to save the installation file: " << current_installation_file; + return false; + } + + dbgDebug(D_ORCHESTRATOR) << "Starting to install the orchestration: " << current_installation_file; + + auto package_handler = Singleton::Consume::by(); + return + package_handler->preInstallPackage(orch_service_name, current_installation_file) && + package_handler->installPackage(orch_service_name, current_installation_file, false); +} + +SASAL_END diff --git a/components/security_apps/orchestration/modules/CMakeLists.txt b/components/security_apps/orchestration/modules/CMakeLists.txt new file mode 100755 index 0000000..1e9d587 --- /dev/null +++ b/components/security_apps/orchestration/modules/CMakeLists.txt @@ -0,0 +1,10 @@ +add_library( + orchestration_modules + orchestration_policy.cc + url_parser.cc + package.cc + orchestration_status.cc + data.cc +) + +add_subdirectory(modules_ut) diff --git a/components/security_apps/orchestration/modules/data.cc b/components/security_apps/orchestration/modules/data.cc new file mode 100755 index 0000000..7bcc390 --- /dev/null +++ b/components/security_apps/orchestration/modules/data.cc @@ -0,0 +1,52 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "orchestrator/data.h" + +#include + +#include "sasal.h" + +SASAL_START // Orchestration - Modules + +using namespace std; +using namespace cereal; + +USE_DEBUG_FLAG(D_ORCHESTRATOR); + +static const map checksum_map = { + { "sha1sum", Data::ChecksumTypes::SHA1 }, + { "sha256sum", Data::ChecksumTypes::SHA256 }, + { "sha512sum", Data::ChecksumTypes::SHA512 }, + { "md5sum", Data::ChecksumTypes::MD5 } +}; + +void +Data::serialize(JSONInputArchive &in_archive) +{ + string checksum_type_as_string; + in_archive(make_nvp("checksumType", checksum_type_as_string)); + if (checksum_map.find(checksum_type_as_string) != checksum_map.end()) { + checksum_type = checksum_map.at(checksum_type_as_string); + } else { + dbgWarning(D_ORCHESTRATOR) << "Unsupported checksum type: " << checksum_type_as_string; + return; + } + in_archive( + make_nvp("downloadPath", download_path), + make_nvp("checksum", checksum_value), + make_nvp("version", version) + ); +} + +SASAL_END diff --git a/components/security_apps/orchestration/modules/modules_ut/CMakeLists.txt b/components/security_apps/orchestration/modules/modules_ut/CMakeLists.txt new file mode 100755 index 0000000..c99043f --- /dev/null +++ b/components/security_apps/orchestration/modules/modules_ut/CMakeLists.txt @@ -0,0 +1,7 @@ +link_directories(${BOOST_ROOT}/lib) + +add_unit_test( + orchestration_modules_ut + "orchestration_policy_ut.cc;url_parser_ut.cc;package_ut.cc;orchestration_status_ut.cc;data_ut.cc;" + "orchestration_modules;config;environment;metric;event_is;time_proxy;-lboost_regex" +) diff --git a/components/security_apps/orchestration/modules/modules_ut/data_ut.cc b/components/security_apps/orchestration/modules/modules_ut/data_ut.cc new file mode 100755 index 0000000..3556a16 --- /dev/null +++ b/components/security_apps/orchestration/modules/modules_ut/data_ut.cc @@ -0,0 +1,85 @@ +#include "orchestrator/data.h" + +#include "cereal/types/string.hpp" +#include "cereal/archives/json.hpp" +#include +#include +#include + +#include "cptest.h" +#include "customized_cereal_map.h" + +using namespace testing; +using namespace std; + +class DataTest : public Test +{ +public: + bool + load(stringstream &string_stream, Data &data) + { + try { + cereal::JSONInputArchive archive_in(string_stream); + data.serialize(archive_in); + } catch (const cereal::Exception &) { + return false; + } + return true; + } +}; + +TEST_F(DataTest, doNothing) +{ +} + +TEST_F(DataTest, serializationFromString) +{ + stringstream string_stream; + string_stream << "{" + " \"version\": \"c\"," + " \"downloadPath\": \"https://a/data.json\",\n" + " \"checksumType\": \"sha1sum\"," + " \"checksum\": \"8d4a5709673a05b380ba7d6567e28910019118f5\"" + "}"; + bool res = false; + Data data; + try { + cereal::JSONInputArchive archive_in(string_stream); + data.serialize(archive_in); + res = true; + } catch (const cereal::Exception &) { + } + EXPECT_EQ(true, res); + + EXPECT_EQ(Data::ChecksumTypes::SHA1, data.getChecksumType()); + EXPECT_EQ("8d4a5709673a05b380ba7d6567e28910019118f5", data.getChecksum()); + EXPECT_EQ("c", data.getVersion()); + EXPECT_EQ("https://a/data.json", data.getDownloadPath()); +} + +TEST_F(DataTest, serializationFromStringAsMap) +{ + stringstream string_stream; + string_stream << "{\n" + " \"ips\": {\n" + " \"version\": \"c\"," + " \"downloadPath\": \"https://a/data.json\",\n" + " \"checksumType\": \"sha1sum\"," + " \"checksum\": \"8d4a5709673a05b380ba7d6567e28910019118f5\"" + " }\n" + "}\n"; + map data; + bool res = false; + try { + cereal::JSONInputArchive archive_in(string_stream); + cereal::load(archive_in, data); + res = true; + } catch (const cereal::Exception &e) { + } + EXPECT_EQ(true, res); + + EXPECT_EQ(Data::ChecksumTypes::SHA1, data["ips"].getChecksumType()); + EXPECT_EQ("8d4a5709673a05b380ba7d6567e28910019118f5", data["ips"].getChecksum()); + EXPECT_EQ("c", data["ips"].getVersion()); + EXPECT_EQ("https://a/data.json", data["ips"].getDownloadPath()); +} diff --git a/components/security_apps/orchestration/modules/modules_ut/orchestration_policy_ut.cc b/components/security_apps/orchestration/modules/modules_ut/orchestration_policy_ut.cc new file mode 100755 index 0000000..fdd1454 --- /dev/null +++ b/components/security_apps/orchestration/modules/modules_ut/orchestration_policy_ut.cc @@ -0,0 +1,158 @@ +#include "orchestration_policy.h" + +#include +#include + +#include "cptest.h" +#include "cereal/types/string.hpp" + +using namespace testing; +using namespace std; + +class PolicyTest : public Test +{ +public: + PolicyTest() {} + + void + orchestrationPolicyToString(stringstream &string_stream) + { + cereal::JSONInputArchive archive_in(string_stream); + orchestration_policy.serialize(archive_in); + } + + OrchestrationPolicy orchestration_policy; +}; + +TEST_F(PolicyTest, doNothing) +{ +} + +TEST_F(PolicyTest, serialization) +{ + stringstream string_stream; + string_stream << "{" + " \"fog-address\": \"http://10.0.0.18:81/control/\"," + " \"agent-type\": \"13324sadsd2\"," + " \"pulling-interval\": 20," + " \"error-pulling-interval\": 15" + "}"; + try { + orchestrationPolicyToString(string_stream); + } catch (cereal::Exception &e) { + ASSERT_TRUE(false) << "Cereal threw an exception: " << e.what(); + } + + EXPECT_EQ(15u, orchestration_policy.getErrorSleepInterval()); + EXPECT_EQ(20u, orchestration_policy.getSleepInterval()); + EXPECT_EQ("http://10.0.0.18:81/control/", orchestration_policy.getFogAddress()); +} + +TEST_F(PolicyTest, noAgentType) +{ + stringstream string_stream; + string_stream << "{" + " \"fog-address\": \"http://10.0.0.18:81/control/\"," + " \"agent-type\": \"\"," + " \"pulling-interval\": 20," + " \"error-pulling-interval\": 15" + "}"; + try { + orchestrationPolicyToString(string_stream); + } catch (cereal::Exception &e) { + ASSERT_TRUE(false) << "Cereal threw an exception: " << e.what(); + } + + EXPECT_EQ(15u, orchestration_policy.getErrorSleepInterval()); + EXPECT_EQ(20u, orchestration_policy.getSleepInterval()); + EXPECT_EQ("http://10.0.0.18:81/control/", orchestration_policy.getFogAddress()); +} + +TEST_F(PolicyTest, zeroSleepIntervels) +{ + stringstream string_stream; + string_stream << "{" + " \"fog-address\": \"http://10.0.0.18:81/control/\"," + " \"agent-type\": \"13324sadsd2\"," + " \"pulling-interval\": 0," + " \"error-pulling-interval\": 0" + "}"; + try { + orchestrationPolicyToString(string_stream); + } catch (cereal::Exception &e) { + ASSERT_TRUE(false) << "Cereal threw an exception: " << e.what(); + } + + EXPECT_EQ(0u, orchestration_policy.getErrorSleepInterval()); + EXPECT_EQ(0u, orchestration_policy.getSleepInterval()); + EXPECT_EQ("http://10.0.0.18:81/control/", orchestration_policy.getFogAddress()); +} + +TEST_F(PolicyTest, operatorEqual) +{ + stringstream string_stream; + string_stream << "{" + " \"fog-address\": \"http://10.0.0.18:81/control/\"," + " \"pulling-interval\": 20," + " \"error-pulling-interval\": 15" + "}"; + try { + orchestrationPolicyToString(string_stream); + } catch (cereal::Exception &e) { + ASSERT_TRUE(false) << "Cereal threw an exception: " << e.what(); + } + + OrchestrationPolicy orchestration_copy_policy; + stringstream string_stream_copy; + string_stream_copy << "{" + " \"fog-address\": \"http://10.0.0.18:81/control/\"," + " \"pulling-interval\": 20," + " \"error-pulling-interval\": 15" + "}"; + try{ + cereal::JSONInputArchive archive_in(string_stream_copy); + orchestration_copy_policy.serialize(archive_in); + } catch (cereal::Exception &e) { + ASSERT_TRUE(false) << "Cereal threw an exception: " << e.what(); + } + EXPECT_TRUE(orchestration_copy_policy == orchestration_policy); + EXPECT_FALSE(orchestration_copy_policy != orchestration_policy); + + OrchestrationPolicy orchestration_new_policy; + stringstream string_stream_new; + string_stream_new << "{" + " \"fog-address\": \"http://10.0.0.18:801/control/\"," + " \"pulling-interval\": 20," + " \"error-pulling-interval\": 15" + "}"; + try{ + cereal::JSONInputArchive archive_in(string_stream_new); + orchestration_new_policy.serialize(archive_in); + } catch (cereal::Exception &e) { + ASSERT_TRUE(false) << "Cereal threw an exception: " << e.what(); + } + EXPECT_FALSE(orchestration_new_policy == orchestration_policy); + EXPECT_TRUE(orchestration_new_policy != orchestration_policy); +} + + +TEST_F(PolicyTest, newOptionalFields) +{ + stringstream string_stream; + string_stream << "{" + " \"fog-address\": \"https://fog-api-gw-agents.cloud.ngen.checkpoint.com\"," + " \"pulling-interval\": 30," + " \"error-pulling-interval\": 10," + " \"agent-type\": \"arrow\"" + "}"; + + try { + orchestrationPolicyToString(string_stream); + } catch (cereal::Exception &e) { + ASSERT_TRUE(false) << "Cereal threw an exception: " << e.what(); + } + + EXPECT_EQ(10u, orchestration_policy.getErrorSleepInterval()); + EXPECT_EQ(30u, orchestration_policy.getSleepInterval()); + EXPECT_EQ("https://fog-api-gw-agents.cloud.ngen.checkpoint.com", orchestration_policy.getFogAddress()); +} diff --git a/components/security_apps/orchestration/modules/modules_ut/orchestration_status_ut.cc b/components/security_apps/orchestration/modules/modules_ut/orchestration_status_ut.cc new file mode 100755 index 0000000..6c902ed --- /dev/null +++ b/components/security_apps/orchestration/modules/modules_ut/orchestration_status_ut.cc @@ -0,0 +1,484 @@ +#include "orchestration_status.h" + +#include +#include +#include +#include + +#include "cptest.h" +#include "config.h" +#include "config_component.h" +#include "mock/mock_time_get.h" +#include "mock/mock_orchestration_tools.h" +#include "mock/mock_agent_details.h" +#include "mock/mock_mainloop.h" +#include "mock/mock_rest_api.h" + +using namespace testing; +using namespace std; +using namespace chrono; + +class OrchestrationStatusTest : public Test +{ +public: + ~OrchestrationStatusTest() { Debug::setNewDefaultStdout(&cout); } + + void + init() + { + Debug::setUnitTestFlag(D_ORCHESTRATOR, Debug::DebugLevel::TRACE); + Debug::setNewDefaultStdout(&capture_debug); + CPTestTempfile status_file; + file_path = status_file.fname; + setConfiguration(file_path, "orchestration", "Orchestration status path"); + // Write orchestration status to file routine + EXPECT_CALL( + mock_mainloop, + addRecurringRoutine(I_MainLoop::RoutineType::Timer, chrono::microseconds(5000000), _, _, false)) + .WillOnce(DoAll(SaveArg<2>(&routine), Return(1)) + ); + EXPECT_CALL(mock_tools, readFile(file_path)).WillOnce(Return(start_file_content)); + orchestration_status.init(); + } + + string + orchestrationStatusFileToString() + { + routine(); + ifstream status_file(file_path); + stringstream string_stream; + if (status_file.is_open()) { + string line; + bool is_first_line = true; + while (getline(status_file, line)) { + if (is_first_line) { + is_first_line = false; + } else { + string_stream << endl; + } + string_stream << line; + } + status_file.close(); + } + return string_stream.str(); + } + + string + buildOrchestrationStatusJSON( + const string &last_update_attempt = "None", + const string &last_update_status = "None", + const string &last_update = "None", + const string &last_manifest_update = "None", + const string &policy_version = "", + const string &last_policy_update = "None", + const string &last_settings_update = "None", + const string &upgrade_mode = "None", + const string &fog_address = "None", + const string ®istration_status = "None", + const string &manifest_status = "None", + const string ®istration_details_name = "", + const string ®istration_details_type = "", + const string ®istration_details_platform = "", + const string ®istration_details_architecture = "", + const string &agent_id = "None", + const string &profile_id = "None", + const string &tenant_id = "None" + ) + { + return "{\n" + " \"Last update attempt\": \"" + last_update_attempt + "\",\n" + " \"Last update status\": \"" + last_update_status + "\",\n" + " \"Last update\": \"" + last_update + "\",\n" + " \"Last manifest update\": \"" + last_manifest_update + "\",\n" + " \"Policy version\": \"" + policy_version + "\",\n" + " \"Last policy update\": \"" + last_policy_update + "\",\n" + " \"Last settings update\": \"" + last_settings_update + "\",\n" + " \"Upgrade mode\": \"" + upgrade_mode + "\",\n" + " \"Fog address\": \"" + fog_address + "\",\n" + " \"Registration status\": \"" + registration_status + "\",\n" + " \"Registration details\": {\n" + " \"Name\": \"" + registration_details_name + "\",\n" + " \"Type\": \"" + registration_details_type + "\",\n" + " \"Platform\": \"" + registration_details_platform + "\",\n" + " \"Architecture\": \"" + registration_details_architecture + "\"\n" + " },\n" + " \"Agent ID\": \"" + agent_id + "\",\n" + " \"Profile ID\": \"" + profile_id + "\",\n" + " \"Tenant ID\": \"" + tenant_id + "\",\n" + " \"Manifest status\": \"" + manifest_status + "\",\n" + " \"Service policy\": {},\n" + " \"Service settings\": {}\n" + "}"; + } + + ::Environment env; + ConfigComponent config; + StrictMock time; + StrictMock mock_mainloop; + ostringstream capture_debug; + StrictMock mock_tools; + StrictMock mock_agent_details; + OrchestrationStatus orchestration_status; + I_OrchestrationStatus * i_orchestration_status = + Singleton::Consume::from(orchestration_status); + string file_path; + Maybe start_file_content = genError("No file"); + I_MainLoop::Routine routine; +}; + +TEST_F(OrchestrationStatusTest, doNothing) +{ +} + +TEST_F(OrchestrationStatusTest, noFieldsValues) +{ + init(); + auto result = orchestrationStatusFileToString(); + EXPECT_EQ(buildOrchestrationStatusJSON(), result); +} + +TEST_F(OrchestrationStatusTest, recoverFields) +{ + init(); + auto result = orchestrationStatusFileToString(); + i_orchestration_status->recoverFields(); + EXPECT_EQ(orchestrationStatusFileToString(), result); +} + +TEST_F(OrchestrationStatusTest, loadFromFile) +{ + Maybe status = genError("No file");; + CPTestTempfile status_file; + file_path = status_file.fname; + setConfiguration(file_path, "orchestration", "Orchestration status path"); + // Write to file routine + EXPECT_CALL( + mock_mainloop, + addRecurringRoutine(I_MainLoop::RoutineType::Timer, chrono::microseconds(5000000), _, _, false) + ).Times(3).WillRepeatedly(DoAll(SaveArg<2>(&routine), Return(1))); + + EXPECT_CALL(mock_tools, readFile(file_path)).Times(3).WillRepeatedly(Return(status)); + orchestration_status.init(); + status = orchestrationStatusFileToString(); + + orchestration_status.init(); + EXPECT_EQ(orchestrationStatusFileToString(), status.unpack()); + + EXPECT_CALL(time, getLocalTimeStr()) + .WillOnce(Return(string("attempt time"))) + .WillOnce(Return(string("current time"))); + i_orchestration_status->setLastUpdateAttempt(); + i_orchestration_status->setFieldStatus( + OrchestrationStatusFieldType::LAST_UPDATE, + OrchestrationStatusResult::SUCCESS + ); + + status = orchestrationStatusFileToString(); + EXPECT_EQ(buildOrchestrationStatusJSON("attempt time", "Succeeded ", "current time"), status.unpack()); + + // Write status to file + routine(); + + // Reload status from file and validate status + orchestration_status.init(); + EXPECT_EQ(buildOrchestrationStatusJSON("attempt time", "Succeeded ", "current time"), status.unpack()); +} + +TEST_F(OrchestrationStatusTest, checkUpdateStatus) +{ + init(); + EXPECT_CALL(time, getLocalTimeStr()) + .WillOnce(Return(string("attempt time"))) + .WillOnce(Return(string("current time"))); + + i_orchestration_status->setLastUpdateAttempt(); + + i_orchestration_status->setFieldStatus( + OrchestrationStatusFieldType::LAST_UPDATE, + OrchestrationStatusResult::SUCCESS + ); + auto result = orchestrationStatusFileToString(); + EXPECT_EQ(buildOrchestrationStatusJSON("attempt time", "Succeeded ", "current time"), result); +} + +TEST_F(OrchestrationStatusTest, recoveryFields) +{ + init(); + CPTestTempfile status({""}); + setConfiguration(status.fname, "orchestration", "Orchestration status path"); + + i_orchestration_status->setFieldStatus( + OrchestrationStatusFieldType::REGISTRATION, + OrchestrationStatusResult::SUCCESS + ); + const string agent_id = "AgentId"; + const string profile_id = "ProfileId"; + const string tenant_id = "TenantId"; + auto fog_addr = Maybe(string("FogDomain")); + + EXPECT_CALL(mock_agent_details, getAgentId()).WillOnce(Return(agent_id)); + EXPECT_CALL(mock_agent_details, getProfileId()).WillOnce(Return(profile_id)); + EXPECT_CALL(mock_agent_details, getTenantId()).WillOnce(Return(tenant_id)); + EXPECT_CALL(mock_agent_details, getFogDomain()).WillOnce(Return(fog_addr)); + i_orchestration_status->writeStatusToFile(); + EXPECT_THAT(capture_debug.str(), HasSubstr("Repairing status fields")); + + EXPECT_EQ(i_orchestration_status->getAgentId(), agent_id); + EXPECT_EQ(i_orchestration_status->getProfileId(), profile_id); + EXPECT_EQ(i_orchestration_status->getTenantId(), tenant_id); + EXPECT_EQ(i_orchestration_status->getFogAddress(), fog_addr.unpack()); +} + +TEST_F(OrchestrationStatusTest, updateAllLastUpdatesTypes) +{ + init(); + EXPECT_CALL(time, getLocalTimeStr()) + .WillOnce(Return(string("attempt time"))) + .WillOnce(Return(string("current time"))) + .WillOnce(Return(string("current time001"))); + + i_orchestration_status->setLastUpdateAttempt(); + + i_orchestration_status->setFieldStatus( + OrchestrationStatusFieldType::LAST_UPDATE, + OrchestrationStatusResult::SUCCESS + ); + i_orchestration_status->setIsConfigurationUpdated( + EnumArray(true, false, false) + ); + auto result = orchestrationStatusFileToString(); + EXPECT_EQ(buildOrchestrationStatusJSON("attempt time", "Succeeded ", "current time", "current time001"), result); + + EXPECT_CALL(time, getLocalTimeStr()) + .Times(2) + .WillRepeatedly(Return(string("current time002"))); + + i_orchestration_status->setFieldStatus( + OrchestrationStatusFieldType::LAST_UPDATE, + OrchestrationStatusResult::SUCCESS + ); + i_orchestration_status->setIsConfigurationUpdated( + EnumArray(true, true, false) + ); + result = orchestrationStatusFileToString(); + EXPECT_EQ( + buildOrchestrationStatusJSON( + "attempt time", + "Succeeded ", + "current time002", + "current time002", + "", + "current time002" + ), + result + ); + + EXPECT_CALL(time, getLocalTimeStr()) + .Times(2) + .WillRepeatedly(Return(string("current time003"))); + + i_orchestration_status->setFieldStatus( + OrchestrationStatusFieldType::LAST_UPDATE, + OrchestrationStatusResult::SUCCESS + ); + i_orchestration_status->setIsConfigurationUpdated( + EnumArray(true, true, true) + ); + result = orchestrationStatusFileToString(); + EXPECT_EQ( + buildOrchestrationStatusJSON( + "attempt time", + "Succeeded ", + "current time003", + "current time003", + "", + "current time003", + "current time003" + ), + result + ); +} + +TEST_F(OrchestrationStatusTest, errorInRegistrationAndMainfest) +{ + init(); + string fog_address = "http://fog.address"; + string registar_error = "Fail to registar"; + string manifest_error = "Fail to achieve manifest"; + string last_update_error = "Fail to update"; + + EXPECT_CALL(time, getLocalTimeStr()).Times(3).WillRepeatedly(Return(string("Time"))); + + i_orchestration_status->setFieldStatus( + OrchestrationStatusFieldType::LAST_UPDATE, + OrchestrationStatusResult::SUCCESS + ); + i_orchestration_status->setIsConfigurationUpdated( + EnumArray(true, true, true) + ); + i_orchestration_status->setFieldStatus( + OrchestrationStatusFieldType::LAST_UPDATE, + OrchestrationStatusResult::FAILED, + last_update_error + ); + i_orchestration_status->setIsConfigurationUpdated( + EnumArray(false, false, false) + ); + + i_orchestration_status->setUpgradeMode("Online upgrades"); + i_orchestration_status->setFogAddress(fog_address); + + i_orchestration_status->setUpgradeMode("Online upgrades"); + i_orchestration_status->setFogAddress(fog_address); + + i_orchestration_status->setFieldStatus( + OrchestrationStatusFieldType::REGISTRATION, + OrchestrationStatusResult::FAILED, + registar_error + ); + i_orchestration_status->setFieldStatus( + OrchestrationStatusFieldType::MANIFEST, + OrchestrationStatusResult::FAILED, + manifest_error + ); + EXPECT_EQ(i_orchestration_status->getManifestError(), manifest_error); + + auto result = orchestrationStatusFileToString(); + EXPECT_EQ( + buildOrchestrationStatusJSON( + "None", + "Failed. Reason: " + last_update_error, + "Time", + "Time", + "", + "Time", + "Time", + "Online upgrades", + fog_address, + "Failed. Reason: " + registar_error, + "Failed. Reason: " + manifest_error + ), + result + ); +} + +TEST_F(OrchestrationStatusTest, setAllFields) +{ + init(); + string fog_address = "http://fog.address"; + EXPECT_CALL(time, getLocalTimeStr()) + .Times(3) + .WillRepeatedly(Return(string("current time"))); + i_orchestration_status->setFieldStatus( + OrchestrationStatusFieldType::LAST_UPDATE, + OrchestrationStatusResult::SUCCESS + ); + i_orchestration_status->setIsConfigurationUpdated( + EnumArray(true, true, true) + ); + i_orchestration_status->setRegistrationDetails("name", "type", "platform", "arch"); + i_orchestration_status->setAgentDetails("id", "profile", "tenant"); + i_orchestration_status->setFogAddress("http://fog.address"); + i_orchestration_status->setPolicyVersion("12"); + i_orchestration_status->setAgentType("test_type"); + i_orchestration_status->setUpgradeMode("Test Mode"); + i_orchestration_status->setRegistrationStatus("Succeeded"); + i_orchestration_status->setFieldStatus( + OrchestrationStatusFieldType::REGISTRATION, + OrchestrationStatusResult::SUCCESS + ); + i_orchestration_status->setFieldStatus( + OrchestrationStatusFieldType::MANIFEST, + OrchestrationStatusResult::SUCCESS + ); + + string non_empty_conf = "{x:y}"; + string curr_mock_path = "path"; + EXPECT_CALL(mock_tools, readFile(curr_mock_path)).WillRepeatedly(Return(non_empty_conf)); + EXPECT_CALL(mock_tools, readFile(string("new_path"))).WillOnce(Return(string("{}"))); + + i_orchestration_status->setServiceConfiguration( + "service_a", "path", OrchestrationStatusConfigType::SETTINGS + ); + i_orchestration_status->setServiceConfiguration( + "service_b", "path", OrchestrationStatusConfigType::POLICY + ); + i_orchestration_status->setServiceConfiguration( + "service_c", "path", OrchestrationStatusConfigType::POLICY + ); + i_orchestration_status->setServiceConfiguration( + "service_c", "new_path", OrchestrationStatusConfigType::POLICY + ); + i_orchestration_status->setLastUpdateAttempt(); + + auto result = orchestrationStatusFileToString(); + + string expected = "{\n" + " \"Last update attempt\": \"current time\",\n" + " \"Last update status\": \"Succeeded \",\n" + " \"Last update\": \"current time\",\n" + " \"Last manifest update\": \"current time\",\n" + " \"Policy version\": \"12\",\n" + " \"Last policy update\": \"current time\",\n" + " \"Last settings update\": \"current time\",\n" + " \"Upgrade mode\": \"Test Mode\",\n" + " \"Fog address\": \"http://fog.address\",\n" + " \"Registration status\": \"Succeeded \",\n" + " \"Registration details\": {\n" + " \"Name\": \"name\",\n" + " \"Type\": \"test_type\",\n" + " \"Platform\": \"platform\",\n" + " \"Architecture\": \"arch\"\n" + " },\n" + " \"Agent ID\": \"id\",\n" + " \"Profile ID\": \"profile\",\n" + " \"Tenant ID\": \"tenant\",\n" + " \"Manifest status\": \"Succeeded \",\n" + " \"Service policy\": {\n" + " \"service_b\": \"path\"\n" + " },\n" + " \"Service settings\": {\n" + " \"service_a\": \"path\"\n" + " }\n" + "}"; + EXPECT_EQ(expected, result); + + // Now lets check load from file + routine(); + EXPECT_EQ(expected, orchestrationStatusFileToString()); + + EXPECT_CALL( + mock_mainloop, + addRecurringRoutine(I_MainLoop::RoutineType::Timer, chrono::microseconds(5000000), _, _, false)) + .WillOnce(DoAll(SaveArg<2>(&routine), Return(1))); + EXPECT_CALL(mock_tools, readFile(file_path)).Times(1).WillOnce(Return(expected)); + orchestration_status.init(); + EXPECT_EQ(expected, orchestrationStatusFileToString()); + + map service_map_a = {{"service_a", "path"}}; + map service_map_b = {{"service_b", "path"}}; + + string agent_details = + "\n Name: name" + "\n Type: test_type" + "\n Platform: platform" + "\n Architecture: arch"; + + EXPECT_EQ(i_orchestration_status->getLastUpdateAttempt(), "current time"); + EXPECT_EQ(i_orchestration_status->getUpdateStatus(), "Succeeded ");; + EXPECT_EQ(i_orchestration_status->getUpdateTime(), "current time"); + EXPECT_EQ(i_orchestration_status->getLastManifestUpdate(), "current time"); + EXPECT_EQ(i_orchestration_status->getPolicyVersion(), "12"); + EXPECT_EQ(i_orchestration_status->getLastPolicyUpdate(), "current time"); + EXPECT_EQ(i_orchestration_status->getLastSettingsUpdate(), "current time"); + EXPECT_EQ(i_orchestration_status->getUpgradeMode(), "Test Mode"); + EXPECT_EQ(i_orchestration_status->getFogAddress(), "http://fog.address"); + EXPECT_EQ(i_orchestration_status->getRegistrationStatus(), "Succeeded "); + EXPECT_EQ(i_orchestration_status->getAgentId(), "id"); + EXPECT_EQ(i_orchestration_status->getProfileId(), "profile"); + EXPECT_EQ(i_orchestration_status->getTenantId(), "tenant"); + EXPECT_EQ(i_orchestration_status->getManifestStatus(), "Succeeded "); + EXPECT_EQ(i_orchestration_status->getServicePolicies(), service_map_b); + EXPECT_EQ(i_orchestration_status->getServiceSettings(), service_map_a); + EXPECT_EQ(i_orchestration_status->getRegistrationDetails(), agent_details); +} diff --git a/components/security_apps/orchestration/modules/modules_ut/package_ut.cc b/components/security_apps/orchestration/modules/modules_ut/package_ut.cc new file mode 100755 index 0000000..c4cc089 --- /dev/null +++ b/components/security_apps/orchestration/modules/modules_ut/package_ut.cc @@ -0,0 +1,236 @@ +#include "package.h" + +#include "cptest.h" +#include "cereal/types/string.hpp" +#include "cereal/archives/json.hpp" +#include +#include +#include + +using namespace testing; +using namespace std; + +class PackageTest : public Test +{ +public: + PackageTest() {} + + bool + load(stringstream &string_stream, Package &package) + { + try { + cereal::JSONInputArchive archive_in(string_stream); + package.serialize(archive_in); + } catch (const cereal::Exception &) { + return false; + } + return true; + } + + void + write(const string &path, Package &package) + { + std::ofstream os(path); + cereal::JSONOutputArchive archive_out(os); + package.serialize(archive_out); + } + + string + readFile(const string &path) + { + ifstream text_file(path); + stringstream buffer; + buffer << text_file.rdbuf(); + return buffer.str(); + } +}; + +TEST_F(PackageTest, doNothing) +{ +} + +TEST_F(PackageTest, serializationFromString) +{ + stringstream string_stream; + string_stream << "{" + " \"version\": \"c\"," + " \"download-path\": \"https://a/install_orchestration.sh\",\n" + " \"relative-path\": \"/install_orchestration.sh\",\n" + " \"name\": \"orchestration\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"8d4a5709673a05b380ba7d6567e28910019118f5\"," + " \"package-type\": \"service\"," + " \"require\": []" + "}"; + Package package; + EXPECT_EQ(true, load(string_stream, package)); + + vector links = { "https://10.0.0.18/install_orchestration.sh", "ftp://172.23.92.135/policy.txt" }; + + EXPECT_EQ("orchestration", package.getName()); + EXPECT_EQ(Package::ChecksumTypes::SHA1, package.getChecksumType()); + EXPECT_EQ("8d4a5709673a05b380ba7d6567e28910019118f5", package.getChecksum()); + EXPECT_EQ("orchestration", package.getName()); + EXPECT_EQ("c", package.getVersion()); + EXPECT_EQ(Package::PackageType::Service, package.getType()); + EXPECT_TRUE(package.isInstallable().ok()); +} + +TEST_F(PackageTest, writeAsJson) +{ + stringstream string_stream; + string_stream << "{\n" + " \"download-path\": \"https://a/install_orchestration.sh\",\n" + " \"relative-path\": \"/install_orchestration.sh\",\n" + " \"version\": \"c\",\n" + " \"name\": \"orchestration\",\n" + " \"checksum-type\": \"sha1sum\",\n" + " \"checksum\": \"8d4a5709673a05b380ba7d6567e28910019118f5\",\n" + " \"package-type\": \"service\"\n" + "}"; + Package package; + EXPECT_EQ(true, load(string_stream, package)); + + vector links = { "https://10.0.0.18/install_orchestration.sh", "ftp://172.23.92.135/policy.txt" }; + + EXPECT_EQ("orchestration", package.getName()); + EXPECT_EQ(Package::ChecksumTypes::SHA1, package.getChecksumType()); + EXPECT_EQ("8d4a5709673a05b380ba7d6567e28910019118f5", package.getChecksum()); + EXPECT_EQ("orchestration", package.getName()); + EXPECT_EQ("c", package.getVersion()); + EXPECT_EQ(Package::PackageType::Service, package.getType()); + EXPECT_TRUE(package.isInstallable().ok()); + + write("service.json", package); + string data = readFile("service.json"); + EXPECT_EQ(string_stream.str(), data); +} + +TEST_F(PackageTest, eqService) +{ + stringstream string_stream; + string_stream << "{\n" + " \"download-path\": \"https://a/install_orchestration.sh\",\n" + " \"relative-path\": \"/install_orchestration.sh\",\n" + " \"version\": \"c\",\n" + " \"name\": \"orchestration\",\n" + " \"checksum-type\": \"sha1sum\",\n" + " \"checksum\": \"8d4a5709673a05b380ba7d6567e28910019118f5\",\n" + " \"package-type\": \"service\"\n" + "}"; + Package package; + Package package2; + EXPECT_TRUE(load(string_stream, package)); + string_stream.clear(); + string_stream << "{\n" + " \"download-path\": \"https://a/install_orchestration.sh\",\n" + " \"relative-path\": \"/install_orchestration.sh\",\n" + " \"version\": \"c\",\n" + " \"name\": \"orchestration\",\n" + " \"checksum-type\": \"sha1sum\",\n" + " \"checksum\": \"8d4a5709673a05b380ba7d6567e28910000000000\",\n" + " \"package-type\": \"service\"\n" + "}"; + EXPECT_TRUE(load(string_stream, package)); + EXPECT_TRUE(package != package2); +} + +TEST_F(PackageTest, changeDir) +{ + stringstream string_stream; + string_stream << "{\n" + " \"download-path\": \"https://a/install_orchestration.sh\",\n" + " \"relative-path\": \"/install_orchestration.sh\",\n" + " \"version\": \"c\",\n" + " \"name\": \"../..\",\n" + " \"checksum-type\": \"sha1sum\",\n" + " \"checksum\": \"8d4a5709673a05b380ba7d6567e28910019118f5\",\n" + " \"package-type\": \"service\"\n" + "}"; + Package package; + EXPECT_FALSE(load(string_stream, package)); +} + +TEST_F(PackageTest, mkdirCommand) +{ + stringstream string_stream; + string_stream << "{\n" + " \"download-path\": \"https://a/install_orchestration.sh\",\n" + " \"relative-path\": \"/install_orchestration.sh\",\n" + " \"version\": \"c\",\n" + " \"name\": \"mkdir ../../something\",\n" + " \"checksum-type\": \"sha1sum\",\n" + " \"checksum\": \"8d4a5709673a05b380ba7d6567e28910019118f5\",\n" + " \"package-type\": \"service\"\n" + "}"; + Package package; + EXPECT_FALSE(load(string_stream, package)); +} + +TEST_F(PackageTest, badPackageName) +{ + stringstream string_stream; + string_stream << "{\n" + " \"download-path\": \"https://a/install_orchestration.sh\",\n" + " \"relative-path\": \"/install_orchestration.sh\",\n" + " \"version\": \"c\",\n" + " \"name\": \"- - - - - -\",\n" + " \"checksum-type\": \"sha1sum\",\n" + " \"checksum\": \"8d4a5709673a05b380ba7d6567e28910019118f5\",\n" + " \"package-type\": \"service\"\n" + "}"; + Package package; + EXPECT_FALSE(load(string_stream, package)); +} + +TEST_F(PackageTest, anyOrder) +{ + stringstream string_stream; + string_stream << "{\n" + " \"name\": \"asdQwe\",\n" + " \"relative-path\": \"/install_orchestration.sh\",\n" + " \"version\": \"c\",\n" + " \"download-path\": \"https://a/install_orchestration.sh\",\n" + " \"checksum\": \"8d4a5709673a05b380ba7d6567e28910019118f5\",\n" + " \"package-type\": \"service\",\n" + " \"checksum-type\": \"sha1sum\"\n" + "}"; + Package package; + EXPECT_TRUE(load(string_stream, package)); +} + +TEST_F(PackageTest, anyOrderWithRequire) +{ + stringstream string_stream; + string_stream << "{\n" + " \"require\": [],\n" + " \"name\": \"asdQwe\",\n" + " \"version\": \"c\",\n" + " \"relative-path\": \"/install_orchestration.sh\",\n" + " \"download-path\": \"https://a/install_orchestration.sh\",\n" + " \"checksum\": \"8d4a5709673a05b380ba7d6567e28910019118f5\",\n" + " \"package-type\": \"service\",\n" + " \"checksum-type\": \"sha1sum\"\n" + "}"; + Package package; + EXPECT_TRUE(load(string_stream, package)); +} + +TEST_F(PackageTest, uninstallablePackage) +{ + stringstream string_stream; + string_stream << "{\n" + " \"name\": \"waap\",\n" + " \"version\": \"\",\n" + " \"download-path\": \"\",\n" + " \"relative-path\": \"\",\n" + " \"checksum\": \"\",\n" + " \"package-type\": \"service\",\n" + " \"checksum-type\": \"sha1sum\",\n" + " \"status\": false,\n" + " \"message\": \"This security app isn't valid for this agent\"\n" + "}"; + Package package; + EXPECT_TRUE(load(string_stream, package)); + EXPECT_THAT(package.isInstallable(), IsError("This security app isn't valid for this agent")); +} diff --git a/components/security_apps/orchestration/modules/modules_ut/url_parser_ut.cc b/components/security_apps/orchestration/modules/modules_ut/url_parser_ut.cc new file mode 100755 index 0000000..eaa465e --- /dev/null +++ b/components/security_apps/orchestration/modules/modules_ut/url_parser_ut.cc @@ -0,0 +1,117 @@ +#include "url_parser.h" + +#include "cptest.h" +#include "mock/mock_orchestration_tools.h" + +#include +#include + +using namespace testing; +using namespace std; + +class URLParserTest : public Test +{ +public: + URLParserTest() {} + + StrictMock mock_orchestration_tools; +}; + +TEST_F(URLParserTest, doNothing) +{ +} + +TEST_F(URLParserTest, parseHTTP) +{ + URLParser link("http://172.23.92.180:180/something"); + + EXPECT_FALSE(link.isOverSSL()); + EXPECT_EQ("180", link.getPort()); + EXPECT_EQ("/something", link.getQuery()); +} + +TEST_F(URLParserTest, parseHTTPS) +{ + URLParser link("https://172.23.92.180:180/something"); + + EXPECT_TRUE(link.isOverSSL()); + EXPECT_EQ("180", link.getPort()); + EXPECT_EQ("/something", link.getQuery()); +} + +TEST_F(URLParserTest, parseAWS) +{ + URLParser link("https://a58efa94efdf711e8a6540620a59b447-1878332922.eu-west-1.elb.amazonaws.com/"); + + EXPECT_TRUE(link.isOverSSL()); + EXPECT_EQ("443", link.getPort()); + EXPECT_EQ("a58efa94efdf711e8a6540620a59b447-1878332922.eu-west-1.elb.amazonaws.com", link.getBaseURL().unpack()); + EXPECT_EQ("", link.getQuery()); +} + +TEST_F(URLParserTest, parseAWSWithoutSlash) +{ + URLParser link("https://a58efa94efdf711e8a6540620a59b447-1878332922.eu-west-1.elb.amazonaws.com"); + + EXPECT_TRUE(link.isOverSSL()); + EXPECT_EQ("443", link.getPort()); + EXPECT_EQ("a58efa94efdf711e8a6540620a59b447-1878332922.eu-west-1.elb.amazonaws.com", link.getBaseURL().unpack()); + EXPECT_EQ("", link.getQuery()); +} + +TEST_F(URLParserTest, protocolIsMissing) +{ + // HTTPS is set by default when protocol is not present in URL. + URLParser link("a58efa94efdf711e8a6540620a59b447-1878332922.eu-west-1.elb.amazonaws.com"); + + EXPECT_EQ(link.getBaseURL().unpack(), "a58efa94efdf711e8a6540620a59b447-1878332922.eu-west-1.elb.amazonaws.com"); + EXPECT_TRUE(link.isOverSSL()); + EXPECT_EQ("443", link.getPort()); + EXPECT_EQ("", link.getQuery()); +} + +TEST_F(URLParserTest, parseBadURL) +{ + URLParser link("http://this_is_not_https_site.com/something"); + + EXPECT_FALSE(link.isOverSSL()); + EXPECT_EQ("80", link.getPort()); + EXPECT_EQ("this_is_not_https_site.com", link.getBaseURL().unpack()); + EXPECT_EQ("/something", link.getQuery()); +} + +TEST_F(URLParserTest, parseNothing) +{ + URLParser link(""); + EXPECT_FALSE(link.getBaseURL().ok()); + EXPECT_TRUE(link.isOverSSL()); + EXPECT_EQ("443", link.getPort()); + EXPECT_EQ("", link.getQuery()); +} + +TEST_F(URLParserTest, copyCtr) +{ + URLParser link(""); + URLParser copy_link = link; + EXPECT_TRUE(copy_link.isOverSSL()); + EXPECT_EQ("443", copy_link.getPort()); + EXPECT_EQ("", copy_link.getQuery()); +} + +TEST_F(URLParserTest, printTest) +{ + string url_path = "this_is_test_url"; + URLParser link(url_path); + EXPECT_EQ("https://" + url_path + ":443", link.toString()); + stringstream ss; + ss << link; + EXPECT_EQ("https://" + url_path + ":443", ss.str()); +} +TEST_F(URLParserTest, setQuery) +{ + string url_path = "this_is_test_url/test.sh"; + URLParser link(url_path); + EXPECT_EQ("https://" + url_path + ":443", link.toString()); + link.setQuery("/new-query"); + EXPECT_EQ("https://this_is_test_url/new-query:443", link.toString()); +} diff --git a/components/security_apps/orchestration/modules/orchestration_policy.cc b/components/security_apps/orchestration/modules/orchestration_policy.cc new file mode 100755 index 0000000..c28b241 --- /dev/null +++ b/components/security_apps/orchestration/modules/orchestration_policy.cc @@ -0,0 +1,64 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "orchestration_policy.h" + +#include "sasal.h" + +SASAL_START // Orchestration - Modules + +using namespace std; +using namespace cereal; + +const string & +OrchestrationPolicy::getFogAddress() const +{ + return fog_address; +} + +const unsigned long & +OrchestrationPolicy::getSleepInterval() const +{ + return sleep_interval; +} + +const unsigned long & +OrchestrationPolicy::getErrorSleepInterval() const +{ + return error_sleep_interval; +} + +void +OrchestrationPolicy::serialize(JSONInputArchive &archive) +{ + // Split it, so the order doesn't matter. + archive(make_nvp("fog-address", fog_address)); + archive(make_nvp("pulling-interval", sleep_interval)); + archive(make_nvp("error-pulling-interval", error_sleep_interval)); +} + +bool +OrchestrationPolicy::operator==(const OrchestrationPolicy &other) const +{ + return error_sleep_interval == other.error_sleep_interval && + sleep_interval == other.sleep_interval && + fog_address == other.fog_address; +} + +bool +OrchestrationPolicy::operator!=(const OrchestrationPolicy &other) const +{ + return !((*this) == other); +} + +SASAL_END diff --git a/components/security_apps/orchestration/modules/orchestration_status.cc b/components/security_apps/orchestration/modules/orchestration_status.cc new file mode 100755 index 0000000..cda2561 --- /dev/null +++ b/components/security_apps/orchestration/modules/orchestration_status.cc @@ -0,0 +1,685 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "orchestration_status.h" + +#include +#include +#include + +#include "debug.h" +#include "config.h" +#include "sasal.h" + +using namespace cereal; +using namespace std; +using namespace chrono; + +USE_DEBUG_FLAG(D_ORCHESTRATOR); + +SASAL_START // Orchestration - Modules + +class RegistrationDetails +{ +public: + RegistrationDetails() = default; + RegistrationDetails(const RegistrationDetails &) = default; + RegistrationDetails(RegistrationDetails &&) = default; + RegistrationDetails( + string name, + string type, + string platform, + string architecture) + : + name(name), + type(type), + platform(platform), + architecture(architecture) + {} + + void + serialize(cereal::JSONOutputArchive &archive) + { + if (type == "InfinityNextGateway") { + type = "AppSecGateway"; + } + archive( + cereal::make_nvp("Name", name), + cereal::make_nvp("Type", type), + cereal::make_nvp("Platform", platform), + cereal::make_nvp("Architecture", architecture) + ); + } + + void + serialize(cereal::JSONInputArchive &archive) + { + archive( + cereal::make_nvp("Name", name), + cereal::make_nvp("Type", type), + cereal::make_nvp("Platform", platform), + cereal::make_nvp("Architecture", architecture) + ); + if (type == "InfinityNextGateway") { + type = "AppSecGateway"; + } + } + + RegistrationDetails & operator=(const RegistrationDetails &) = default; + RegistrationDetails & operator=(RegistrationDetails &&) = default; + void setAgentType(const string &_type) { type = _type; } + + string + toString() const + { + return + "\n Name: " + name + + "\n Type: " + type + + "\n Platform: " + platform + + "\n Architecture: " + architecture; + } + +private: + string name; + string type; + string platform; + string architecture; +}; + +class Status +{ +public: + Status() = default; + Status(const Status &) = default; + Status(Status &&) = default; + + Status & operator=(Status &&from) = default; + Status & operator=(const Status &from) + { + last_update_status = from.last_update_status; + last_update_time = from.last_update_time; + last_update_attempt = from.last_update_attempt; + last_manifest_update = from.last_manifest_update; + policy_version = from.policy_version; + last_policy_update = from.last_policy_update; + last_settings_update = from.last_settings_update; + upgrade_mode = from.upgrade_mode; + fog_address = from.fog_address; + registration_status = from.registration_status; + manifest_status = from.manifest_status; + agent_id = from.agent_id; + profile_id = from.profile_id; + tenant_id = from.tenant_id; + registration_details = from.registration_details; + service_policies = from.service_policies; + service_settings = from.service_settings; + return *this; + } + + const string & getLastUpdateAttempt() const { return last_update_attempt; } + const string & getUpdateStatus() const { return last_update_status; } + const string & getUpdateTime() const { return last_update_time; } + const string & getLastManifestUpdate() const { return last_manifest_update; } + const string & getPolicyVersion() const { return policy_version; } + const string & getLastPolicyUpdate() const { return last_policy_update; } + const string & getLastSettingsUpdate() const { return last_settings_update; } + const string & getUpgradeMode() const { return upgrade_mode; } + const string & getFogAddress() const { return fog_address; } + const string & getRegistrationStatus() const { return registration_status; } + const string & getAgentId() const { return agent_id; } + const string & getProfileId() const { return profile_id; } + const string & getTenantId() const { return tenant_id; } + const string & getManifestStatus() const { return manifest_status; } + const string & getManifestError() const { return manifest_error; } + const RegistrationDetails & getRegistrationDetails() const { return registration_details; } + const map & getServicePolicies() const { return service_policies; } + const map & getServiceSettings() const { return service_settings; } + + void + insertServicePolicy(const string &key, const string &value) + { + service_policies.insert(make_pair(key, value)); + } + + void + eraseServicePolicy(const string &key) + { + service_policies.erase(key); + } + + void + insertServiceSetting(const string &key, const string &value) + { + service_settings.insert(make_pair(key, value)); + } + + void + eraseServiceSetting(const string &key) + { + service_settings.erase(key); + } + + void + setIsConfigurationUpdated( + EnumArray config_types, + const string ¤t_time + ) + { + if (config_types[OrchestrationStatusConfigType::MANIFEST]) last_manifest_update = current_time; + if (config_types[OrchestrationStatusConfigType::POLICY]) last_policy_update = current_time; + if (config_types[OrchestrationStatusConfigType::SETTINGS]) last_settings_update = current_time; + } + + void + setPolicyVersion(const string &_policy_version) + { + policy_version = _policy_version; + } + + void + setRegistrationStatus(const string &_reg_status) + { + registration_status = _reg_status; + } + + void + setUpgradeMode(const string &_upgrade_mode) + { + upgrade_mode = _upgrade_mode; + } + + void + setAgentType(const string &_agent_type) + { + registration_details.setAgentType(_agent_type); + } + + void + setAgentDetails( + const string &_agent_id, + const string &_profile_id, + const string &_tenant_id) + { + agent_id = _agent_id; + profile_id = _profile_id; + tenant_id = _tenant_id; + } + + void + setLastUpdateAttempt(const string &_last_update_attempt) + { + last_update_attempt = _last_update_attempt; + } + + void + setFogAddress(const string &_fog_address) + { + fog_address = _fog_address; + } + + void + setRegistrationDetails( + const string &name, + const string &type, + const string &platform, + const string &arch) + { + registration_details = RegistrationDetails(name, type, platform, arch); + } + + void + setManifestStatus(const string &_manifest_status) + { + manifest_status = _manifest_status; + } + + void + setManifestError(const string &error) + { + manifest_error = error; + } + + void + setLastUpdateTime(const string &_last_update_time) + { + last_update_time = _last_update_time; + } + + void + setLastUpdateStatus(const string &_last_update_status) + { + last_update_status = _last_update_status; + } + + void + initValues() + { + last_update_attempt = "None"; + last_update_time = "None"; + last_update_status = "None"; + last_manifest_update = "None"; + last_policy_update = "None"; + last_settings_update = "None"; + fog_address = "None"; + agent_id = "None"; + profile_id = "None"; + tenant_id = "None"; + registration_status = "None"; + manifest_status = "None"; + upgrade_mode = "None"; + } + + void + recoverFields() + { + auto success_status = "Succeeded"; + if (fog_address == "None" && registration_status.find(success_status) != string::npos) { + auto agent_details = Singleton::Consume::by(); + dbgWarning(D_ORCHESTRATOR) << "Repairing status fields"; + agent_id = agent_details->getAgentId(); + profile_id = agent_details->getProfileId(); + tenant_id = agent_details->getTenantId(); + auto maybe_fog_domain = agent_details->getFogDomain(); + if (maybe_fog_domain.ok()) { + fog_address = maybe_fog_domain.unpack(); + } else { + fog_address = "None"; + } + } + } + + void + serialize(cereal::JSONOutputArchive &archive) + { + recoverFields(); + archive(cereal::make_nvp("Last update attempt", last_update_attempt)); + archive(cereal::make_nvp("Last update status", last_update_status)); + archive(cereal::make_nvp("Last update", last_update_time)); + archive(cereal::make_nvp("Last manifest update", last_manifest_update)); + archive(cereal::make_nvp("Policy version", policy_version)); + archive(cereal::make_nvp("Last policy update", last_policy_update)); + archive(cereal::make_nvp("Last settings update", last_settings_update)); + archive(cereal::make_nvp("Upgrade mode", upgrade_mode)); + archive(cereal::make_nvp("Fog address", fog_address)); + archive(cereal::make_nvp("Registration status", registration_status)); + archive(cereal::make_nvp("Registration details", registration_details)); + archive(cereal::make_nvp("Agent ID", agent_id)); + archive(cereal::make_nvp("Profile ID", profile_id)); + archive(cereal::make_nvp("Tenant ID", tenant_id)); + archive(cereal::make_nvp("Manifest status", manifest_status)); + archive(cereal::make_nvp("Service policy", service_policies)); + archive(cereal::make_nvp("Service settings", service_settings)); + } + + void + serialize(cereal::JSONInputArchive &archive) + { + archive(cereal::make_nvp("Last update attempt", last_update_attempt)); + archive(cereal::make_nvp("Last update status", last_update_status)); + archive(cereal::make_nvp("Last update", last_update_time)); + archive(cereal::make_nvp("Last manifest update", last_manifest_update)); + try { + archive(cereal::make_nvp("Policy version", policy_version)); + } catch (...) { + archive.setNextName(nullptr); + } + + archive(cereal::make_nvp("Last policy update", last_policy_update)); + archive(cereal::make_nvp("Last settings update", last_settings_update)); + + // Optional param (upgrade - new parameter) + bool is_upgrade_mode = false; + try { + archive(cereal::make_nvp("Upgrade mode", upgrade_mode)); + is_upgrade_mode = true; + } catch (...) { + archive.setNextName(nullptr); + } + + if (!is_upgrade_mode) { + try { + archive(cereal::make_nvp("Update mode", upgrade_mode)); + } catch (...) { + archive.setNextName(nullptr); + } + } + + archive(cereal::make_nvp("Fog address", fog_address)); + archive(cereal::make_nvp("Registration status", registration_status)); + archive(cereal::make_nvp("Registration details", registration_details)); + archive(cereal::make_nvp("Agent ID", agent_id)); + archive(cereal::make_nvp("Profile ID", profile_id)); + archive(cereal::make_nvp("Tenant ID", tenant_id)); + archive(cereal::make_nvp("Manifest status", manifest_status)); + archive(cereal::make_nvp("Service policy", service_policies)); + archive(cereal::make_nvp("Service settings", service_settings)); + } + +private: + string last_update_time; + string last_update_status; + string last_update_attempt; + string last_manifest_update; + string policy_version; + string last_policy_update; + string last_settings_update; + string upgrade_mode; + string fog_address; + string registration_status; + string manifest_status; + string manifest_error; + string agent_id; + string profile_id; + string tenant_id; + RegistrationDetails registration_details; + map service_policies; + map service_settings; +}; + +class OrchestrationStatus::Impl : Singleton::Provide::From +{ +public: + void + writeStatusToFile() + { + auto orchestrations_status_path = getConfigurationWithDefault( + filesystem_prefix + "/conf/orchestrations_status.json", + "orchestration", + "Orchestration status path" + ); + auto write_result = + orchestration_tools->objectToJsonFile(status, orchestrations_status_path); + if (!write_result) { + dbgWarning(D_ORCHESTRATOR) << "Failed to write Orchestration status. File: " << orchestrations_status_path; + } + dbgTrace(D_ORCHESTRATOR) << "Orchestration status file has been updated. File: " << orchestrations_status_path; + } + + void + recoverFields() override + { + status.recoverFields(); + } + + void + setServiceConfiguration( + const string &service_name, + const string &path, + const OrchestrationStatusConfigType &configuration_file_type + ) + { + if (shouldPolicyStatusBeIgnored(service_name, path)) return; + + switch (configuration_file_type) { + case OrchestrationStatusConfigType::POLICY: + status.insertServicePolicy(service_name, path); + return; + case OrchestrationStatusConfigType::SETTINGS: + status.insertServiceSetting(service_name, path); + return; + case OrchestrationStatusConfigType::MANIFEST: + dbgAssert(false) << "Manifest is not a service configuration file type"; + break; + case OrchestrationStatusConfigType::DATA: + return; + case OrchestrationStatusConfigType::COUNT: + break; + } + dbgAssert(false) << "Unknown configuration file type"; + } + + void + init() + { + time = Singleton::Consume::by(); + orchestration_tools = Singleton::Consume::by(); + initValues(); + loadFromFile(); + + filesystem_prefix = getFilesystemPathConfig(); + dbgTrace(D_ORCHESTRATOR) + << "Initializing Orchestration status, file system path prefix: " + << filesystem_prefix; + + map service_policies_copy = status.getServicePolicies(); + for (const pair &policy: service_policies_copy) { + setServiceConfiguration(policy.first, policy.second, OrchestrationStatusConfigType::POLICY); + } + + auto mainloop = Singleton::Consume::by(); + mainloop->addRecurringRoutine( + I_MainLoop::RoutineType::Timer, + seconds(5), + [this] () + { + dbgTrace(D_ORCHESTRATOR) << "Write Orchestration status file "; + writeStatusToFile(); + }, + "Write Orchestration status file" + ); + } + +private: + void initValues(); + bool shouldPolicyStatusBeIgnored(const string &service_name, const string &path); + + void + loadFromFile() + { + auto orchestrations_status_path = getConfigurationWithDefault( + filesystem_prefix + "/conf/orchestrations_status.json", + "orchestration", + "Orchestration status path" + ); + Maybe maybe_status_file = + orchestration_tools->jsonFileToObject(orchestrations_status_path); + if (!maybe_status_file.ok()) { + dbgTrace(D_ORCHESTRATOR) + << "Failed to load Orchestration status, start with clear status." + << " Error: " << maybe_status_file.getErr(); + return; + } + + status = maybe_status_file.unpack(); + + dbgInfo(D_ORCHESTRATOR) << "Orchestration status loaded from file." << " File: " << orchestrations_status_path; + } + + const string & getLastUpdateAttempt() const override { return status.getLastUpdateAttempt(); } + const string & getUpdateStatus() const override { return status.getUpdateStatus(); } + const string & getUpdateTime() const override { return status.getUpdateTime(); } + const string & getLastManifestUpdate() const override { return status.getLastManifestUpdate(); } + const string & getPolicyVersion() const override { return status.getPolicyVersion(); } + const string & getLastPolicyUpdate() const override { return status.getLastPolicyUpdate(); } + const string & getLastSettingsUpdate() const override { return status.getLastSettingsUpdate(); } + const string & getUpgradeMode() const override { return status.getUpgradeMode(); } + const string & getFogAddress() const override { return status.getFogAddress(); } + const string & getRegistrationStatus() const override { return status.getRegistrationStatus(); } + const string & getAgentId() const override { return status.getAgentId(); } + const string & getProfileId() const override { return status.getProfileId(); } + const string & getTenantId() const override { return status.getTenantId(); } + const string & getManifestStatus() const override { return status.getManifestStatus(); } + const string & getManifestError() const override { return status.getManifestError(); } + const string getRegistrationDetails() const override { return status.getRegistrationDetails().toString(); } + const map & getServicePolicies() const override { return status.getServicePolicies(); } + const map & getServiceSettings() const override { return status.getServiceSettings(); } + + void + setIsConfigurationUpdated(EnumArray config_types) override + { + status.setIsConfigurationUpdated(config_types, time->getLocalTimeStr()); + } + + void + setPolicyVersion(const string &_policy_version) override + { + status.setPolicyVersion(_policy_version); + } + + void + setRegistrationStatus(const string &_reg_status) override + { + status.setRegistrationStatus(_reg_status); + } + + void + setUpgradeMode(const string &_upgrade_mode) override + { + status.setUpgradeMode(_upgrade_mode); + } + + void + setAgentType(const string &_agent_type) override + { + status.setAgentType(_agent_type); + } + + void + setAgentDetails( + const string &_agent_id, + const string &_profile_id, + const string &_tenant_id) override + { + status.setAgentDetails(_agent_id, _profile_id, _tenant_id); + } + + void + setLastUpdateAttempt() override + { + status.setLastUpdateAttempt(time->getLocalTimeStr()); + } + + void + setFogAddress(const string &_fog_address) override + { + status.setFogAddress(_fog_address); + } + + void + setFieldStatus( + const OrchestrationStatusFieldType &field_type_status, + const OrchestrationStatusResult &status_result, + const string &failure_reason) override + { + string field_value = status_string_map.at(status_result) + " " + failure_reason; + switch (field_type_status) { + case OrchestrationStatusFieldType::REGISTRATION: + status.setRegistrationStatus(field_value); + return; + case OrchestrationStatusFieldType::MANIFEST: + status.setManifestStatus(field_value); + status.setManifestError(failure_reason); + return; + case OrchestrationStatusFieldType::LAST_UPDATE: + if (status_result == OrchestrationStatusResult::SUCCESS) { + status.setLastUpdateTime(time->getLocalTimeStr()); + } + if (status.getUpdateStatus() != field_value) { + writeStatusToFile(); + } + status.setLastUpdateStatus(field_value); + return; + case OrchestrationStatusFieldType::COUNT: + break; + } + } + + void + setRegistrationDetails( + const string &name, + const string &type, + const string &platform, + const string &arch) override + { + status.setRegistrationDetails(name, type, platform, arch); + } + + OrchestrationStatus::Impl & operator=(OrchestrationStatus::Impl &&from) = default; + OrchestrationStatus::Impl & operator=(const OrchestrationStatus::Impl &from) = default; + + const map status_string_map = { + { OrchestrationStatusResult::SUCCESS, "Succeeded" }, + { OrchestrationStatusResult::FAILED, "Failed. Reason:" } + }; + + Status status; + I_TimeGet *time; + I_OrchestrationTools *orchestration_tools; + string filesystem_prefix; + +}; + +void +OrchestrationStatus::Impl::initValues() +{ + status.initValues(); +} + +bool +OrchestrationStatus::Impl::shouldPolicyStatusBeIgnored( + const string &service_name, + const string &path) +{ + vector default_status_ingored_policies = { + "rules", + "zones", + "triggers", + "parameters", + "orchestration", + "webUserResponse", + "kubernetescalico", + "activeContextConfig" + }; + + auto status_ingored_policies = getSettingWithDefault>( + default_status_ingored_policies, + "orchestration", + "Orchestration status ignored policies" + ); + + auto config_content = orchestration_tools->readFile(path); + + if (!config_content.ok() || config_content.unpack().empty()) { + dbgDebug(D_ORCHESTRATOR) << "Can not read the policy for " << service_name; + return true; + } + + auto find_exist_iterator = status.getServicePolicies().find(service_name); + auto find_ignored_iterator = find(status_ingored_policies.begin(), status_ingored_policies.end(), service_name); + + if (config_content.unpack() == "{}") { + dbgDebug(D_ORCHESTRATOR) << "Skipping status print for an empty policy file. Policy name: " << service_name; + if (find_exist_iterator != status.getServicePolicies().end()) { + status.eraseServicePolicy(service_name); + } + return true; + } else if (find_ignored_iterator != status_ingored_policies.end()) { + dbgDebug(D_ORCHESTRATOR) + << "Skipping status print for the policy from a list of ignored policies. Policy name: " + << service_name; + if (find_exist_iterator != status.getServicePolicies().end()) { + status.eraseServicePolicy(service_name); + } + return true; + } + return false; +} + +void +OrchestrationStatus::init() { pimpl->init(); } + +OrchestrationStatus::OrchestrationStatus() : Component("OrchestrationStatus"), pimpl(make_unique()) {} + +OrchestrationStatus::~OrchestrationStatus() {} + +SASAL_END diff --git a/components/security_apps/orchestration/modules/package.cc b/components/security_apps/orchestration/modules/package.cc new file mode 100755 index 0000000..3ea9468 --- /dev/null +++ b/components/security_apps/orchestration/modules/package.cc @@ -0,0 +1,133 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "package.h" + +#include + +#include "sasal.h" + +SASAL_START // Orchestration - Modules + +using namespace std; +using namespace cereal; + +const map checksumMap = { + { "sha1sum", Package::ChecksumTypes::SHA1 }, + { "sha256sum", Package::ChecksumTypes::SHA256 }, + { "sha512sum", Package::ChecksumTypes::SHA512 }, + { "md5sum", Package::ChecksumTypes::MD5 }, +}; + +const map packageTypeMap = { + { "service", Package::PackageType::Service }, + { "shared objects", Package::PackageType::SharedObject }, +}; + +bool +Package::operator==(const Package &other) const +{ + return checksum_type == other.getChecksumType() && checksum_value == other.getChecksum(); +} + +bool +Package::operator!=(const Package &other) const +{ + return !((*this) == other); +} + +void +Package::serialize(JSONOutputArchive & out_archive) const +{ + string type = mapTypeToString(package_type, packageTypeMap); + string checksum_type_as_string = mapTypeToString(checksum_type, checksumMap); + out_archive(make_nvp("download-path", download_path)); + out_archive(make_nvp("relative-path", relative_path)); + out_archive(make_nvp("version", version)); + out_archive(make_nvp("name", name)); + out_archive(make_nvp("checksum-type", checksum_type_as_string)); + out_archive(make_nvp("checksum", checksum_value)); + out_archive(make_nvp("package-type", type)); + + if (require_packages.size() > 0) { + out_archive(make_nvp("require", require_packages)); + } + + if (!installable.ok()) { + out_archive(make_nvp("status", installable.ok())); + out_archive(make_nvp("message", installable.getErr())); + } +} + +void +Package::serialize(JSONInputArchive & in_archive) +{ + string type; + string checksum_type_as_string; + in_archive(make_nvp("download-path", download_path)); + in_archive(make_nvp("version", version)); + in_archive(make_nvp("name", name)); + in_archive(make_nvp("checksum-type", checksum_type_as_string)); + in_archive(make_nvp("checksum", checksum_value)); + in_archive(make_nvp("package-type", type)); + + try { + in_archive(make_nvp("relative-path", relative_path)); + } catch (...) { + in_archive.setNextName(nullptr); + } + + try { + in_archive(make_nvp("require", require_packages)); + } catch (...) { + in_archive.setNextName(nullptr); + } + + bool is_installable = true; + try { + in_archive(make_nvp("status", is_installable)); + } catch (...) { + in_archive.setNextName(nullptr); + } + + if (!is_installable) { + string error_message; + try { + in_archive(make_nvp("message", error_message)); + } catch (...) { + in_archive.setNextName(nullptr); + } + installable = genError(error_message); + } + + for (auto &character : name) { + // Name Validation: should include only: decimal digit / letter / '.' / '_' / '-' + if (!isalnum(character) && character != '.' && character != '_' && character != '-') { + throw Exception(name + " is invalid package name"); + } + } + + auto checksum_type_value = checksumMap.find(checksum_type_as_string); + if (checksum_type_value == checksumMap.end()) { + throw Exception(checksum_type_as_string + " isn't a valid checksum type at " + name); + } + checksum_type = checksum_type_value->second; + + auto package_type_value = packageTypeMap.find(type); + if (package_type_value == packageTypeMap.end()) { + throw Exception(checksum_type_as_string + " isn't a valid package type at " + name); + } + package_type = package_type_value->second; +} + +SASAL_END diff --git a/components/security_apps/orchestration/modules/url_parser.cc b/components/security_apps/orchestration/modules/url_parser.cc new file mode 100755 index 0000000..573507b --- /dev/null +++ b/components/security_apps/orchestration/modules/url_parser.cc @@ -0,0 +1,149 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "url_parser.h" + +#include + +#include "singleton.h" +#include "common.h" +#include "maybe_res.h" +#include "sasal.h" + +using namespace std; + +SASAL_START // Orchestration - Modules + +USE_DEBUG_FLAG(D_ORCHESTRATOR); + +ostream & +operator<<(ostream &os, const URLParser &url) +{ + return os << url.toString(); +} + +ostream & +operator<<(ostream &os, const URLProtocol &protocol) +{ + switch(protocol) { + case URLProtocol::HTTP: { + return os << "http://"; + } + case URLProtocol::HTTPS: { + return os << "https://"; + } + case URLProtocol::LOCAL_FILE: { + return os << "file://"; + } + default: { + dbgAssert(false) << "Unsupported protocol " << static_cast(protocol); + return os; + } + } +} + +URLParser::URLParser(const string &url) +{ + parseURL(url); +} + +Maybe +URLParser::getBaseURL() const +{ + if (base_url.empty()) return genError("Error: URL not found"); + return base_url; +} + +void +URLParser::parseURL(const string &url) +{ + string url_builder; + protocol = parseProtocol(url); + switch(protocol) { + case URLProtocol::HTTP: { + dbgDebug(D_ORCHESTRATOR) << "Protocol of " << url << " is HTTP"; + port = "80"; + over_ssl = false; + url_builder = url.substr(7); + break; + } + case URLProtocol::HTTPS: { + dbgDebug(D_ORCHESTRATOR) << "Protocol of " << url << " is HTTPS"; + if (url.find("https://") != string::npos) { + url_builder = url.substr(8); + } else { + url_builder = url; + } + port = "443"; + over_ssl = true; + break; + } + case URLProtocol::LOCAL_FILE: { + dbgDebug(D_ORCHESTRATOR) << "Protocol of " << url << " is local file."; + base_url = url.substr(7); + return; + } + default: { + dbgAssert(false) << "URL protocol is not supported. Protocol: " << static_cast(protocol); + return; + } + } + + size_t link_extension_position = url_builder.find_first_of("/"); + if (link_extension_position != string::npos) { + query = url_builder.substr(link_extension_position); + url_builder = url_builder.substr(0, link_extension_position); + } + + size_t port_position = url_builder.find_last_of(":"); + string link = url_builder; + if (port_position != string::npos) { + link = url_builder.substr(0, port_position); + port = url_builder.substr(port_position + 1); + } + + if (!link.empty()) base_url = link; + if (!query.empty() && query.back() == '/') query.pop_back(); +} + +URLProtocol +URLParser::parseProtocol(const string &url) const +{ + if (url.find("http://") != string::npos) { + return URLProtocol::HTTP; + } else if (url.find("https://") != string::npos) { + return URLProtocol::HTTPS; + } else if (url.find("file://") != string::npos){ + return URLProtocol::LOCAL_FILE; + } + + dbgWarning(D_ORCHESTRATOR) + << "No supported protocol in URL, HTTPS default value is used. URL: " << url; + return URLProtocol::HTTPS; +} + +void +URLParser::setQuery(const string &new_query) +{ + query = new_query; +} + +string +URLParser::toString() const +{ + stringstream s_build; + s_build << protocol << base_url << query << ":" << port; + return s_build.str(); +} + +SASAL_END diff --git a/components/security_apps/orchestration/orchestration_comp.cc b/components/security_apps/orchestration/orchestration_comp.cc new file mode 100755 index 0000000..7cea246 --- /dev/null +++ b/components/security_apps/orchestration/orchestration_comp.cc @@ -0,0 +1,1629 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "orchestration_comp.h" + +#include +#include +#include + +#include "common.h" +#include "singleton.h" +#include "config.h" +#include "version.h" +#include "log_generator.h" +#include "downloader.h" +#include "package_handler.h" +#include "orchestration_policy.h" +#include "service_controller.h" +#include "manifest_controller.h" +#include "url_parser.h" +#include "i_messaging.h" +#include "sasal.h" +#include "agent_details_report.h" +#include "maybe_res.h" +#include "customized_cereal_map.h" +#include "orchestrator/data.h" +#include "health_check_status/health_check_status.h" +#include "get_status_rest.h" +#include "hybrid_mode_telemetry.h" +#include "telemetry.h" + +SASAL_START // Orchestration - Main + +using namespace std; +using namespace chrono; +using namespace ReportIS; + +USE_DEBUG_FLAG(D_ORCHESTRATOR); + +class HealthCheckStatusListener : public Listener +{ +public: + void upon(const HealthCheckStatusEvent &) override {} + + HealthCheckStatusReply + respond(const HealthCheckStatusEvent &) override + { + return HealthCheckStatusReply(comp_name, status, extended_status); + } + + string getListenerName() const override { return "HealthCheckStatusListener"; } + + void + setStatus( + HealthCheckStatus _status, + OrchestrationStatusFieldType _status_field_type, + const string &_status_description = "Success") + { + string status_field_type_str = convertOrchestrationStatusFieldTypeToStr(_status_field_type); + extended_status[status_field_type_str] = _status_description; + field_types_status[status_field_type_str] = _status; + + switch(_status) { + case HealthCheckStatus::UNHEALTHY: { + status = HealthCheckStatus::UNHEALTHY; + return; + } + case HealthCheckStatus::DEGRADED: { + for (const auto &type_status : field_types_status) { + if ((type_status.first != status_field_type_str) + && (type_status.second == HealthCheckStatus::UNHEALTHY)) + { + return; + } + } + status = HealthCheckStatus::DEGRADED; + return; + } + case HealthCheckStatus::HEALTHY: { + for (const auto &type_status : field_types_status) { + if ((type_status.first != status_field_type_str) + && (type_status.second == HealthCheckStatus::UNHEALTHY + || type_status.second == HealthCheckStatus::DEGRADED) + ) + { + return; + } + status = HealthCheckStatus::HEALTHY; + } + return; + } + case HealthCheckStatus::IGNORED: { + return; + } + } + } + +private: + string + convertOrchestrationStatusFieldTypeToStr(OrchestrationStatusFieldType type) + { + switch (type) { + case OrchestrationStatusFieldType::REGISTRATION : return "Registration"; + case OrchestrationStatusFieldType::MANIFEST : return "Manifest"; + case OrchestrationStatusFieldType::LAST_UPDATE : return "Last Update"; + case OrchestrationStatusFieldType::COUNT : return "Count"; + } + + dbgError(D_ORCHESTRATOR) << "Trying to convert unknown orchestration status field to string."; + return ""; + } + + string comp_name = "Orchestration"; + HealthCheckStatus status = HealthCheckStatus::IGNORED; + map extended_status; + map field_types_status; +}; + +class OrchestrationComp::Impl +{ +public: + explicit Impl() {} + + void + init() + { + filesystem_prefix = getFilesystemPathConfig(); + dbgTrace(D_ORCHESTRATOR) + << "Initializing Orchestration component, file system path prefix: " + << filesystem_prefix; + Singleton::Consume::by()->readAgentDetails(); + setAgentDetails(); + doEncrypt(); + health_check_status_listener.registerListener(); + + auto rest = Singleton::Consume::by(); + rest->addRestCall(RestAction::SHOW, "orchestration-status"); + rest->addRestCall(RestAction::ADD, "proxy"); + // Main loop of the Orchestration. + Singleton::Consume::by()->addOneTimeRoutine( + I_MainLoop::RoutineType::RealTime, + [this] () { run(); }, + "Orchestration runner", + true + ); + hybrid_mode_metric.init( + "Watchdog Metrics", + ReportIS::AudienceTeam::AGENT_CORE, + ReportIS::IssuingEngine::AGENT_CORE, + chrono::minutes(10), + true, + ReportIS::Audience::INTERNAL + ); + hybrid_mode_metric.registerListener(); + } + + void + fini() + { + Singleton::Consume::by()->writeStatusToFile(); + } + +private: + Maybe + start() + { + bool enforce_policy_flag = false; + Maybe maybe_policy = genError("Empty policy"); + string policy_version = ""; + auto orchestration_policy_file = getPolicyConfigPath("orchestration", Config::ConfigFileType::Policy); + + auto orchestration_tools = Singleton::Consume::by(); + if (orchestration_tools->doesFileExist(orchestration_policy_file)) { + maybe_policy = loadOrchestrationPolicy(); + if (!maybe_policy.ok()) { + dbgWarning(D_ORCHESTRATOR) << "Failed to load Orchestration policy. Error: " << maybe_policy.getErr(); + enforce_policy_flag = true; + } + dbgDebug(D_ORCHESTRATOR) << "Orchestration is restarting"; + + auto policy_file_path = getConfigurationWithDefault( + filesystem_prefix + "/conf/policy.json", + "orchestration", + "Policy file path" + ); + auto settings_file_path = getConfigurationWithDefault( + filesystem_prefix + "/conf/settings.json", + "orchestration", + "Settings file path" + ); + + auto service_controller = Singleton::Consume::by(); + if (!service_controller->updateServiceConfiguration(policy_file_path, settings_file_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to load the policy and settings"; + } + + policy_version = service_controller->getPolicyVersion(); + Singleton::Consume::by()->setPolicyVersion(policy_version); + } else { + dbgDebug(D_ORCHESTRATOR) << "Orchestration is running for the first time"; + enforce_policy_flag = true; + } + + if (enforce_policy_flag) { + // Trying to create the Orchestration policy from the general policy file + maybe_policy = enforceOrchestrationPolicy(); + if (!maybe_policy.ok()) { + return genError(maybe_policy.getErr()); + } + reloadConfiguration(); + } + + char *fog_address_env = getenv("FOG_ADDRESS"); + string fog_address = fog_address_env ? string(fog_address_env) : maybe_policy.unpack().getFogAddress(); + + if (updateFogAddress(fog_address)) { + policy = maybe_policy.unpack(); + } else { + return genError("Failed to set fog address from policy"); + } + + auto update_communication = Singleton::Consume::by(); + auto authentication_res = update_communication->authenticateAgent(); + if (authentication_res.ok() && !policy_version.empty()) { + auto path_policy_version = update_communication->sendPolicyVersion(policy_version); + if (!path_policy_version.ok()) { + dbgWarning(D_ORCHESTRATOR) << path_policy_version.getErr(); + } + } + + auto agent_mode = Singleton::Consume::by()->getOrchestrationMode(); + if (agent_mode == OrchestrationMode::HYBRID) { + return Maybe(); + } + return authentication_res; + } + + Maybe + loadOrchestrationPolicy() + { + auto maybe_policy = loadDefaultOrchestrationPolicy(); + if (!maybe_policy.ok()) { + dbgWarning(D_ORCHESTRATOR) << "Failed to load Orchestration Policy. Trying to load from backup."; + maybe_policy = loadOrchestrationPolicyFromBackup(); + } + return maybe_policy; + } + + Maybe + loadDefaultOrchestrationPolicy() + { + auto orchestration_policy_file = getPolicyConfigPath("orchestration", Config::ConfigFileType::Policy); + + auto orchestration_tools = Singleton::Consume::by(); + auto maybe_policy = orchestration_tools->jsonFileToObject(orchestration_policy_file); + if (maybe_policy.ok()) { + return maybe_policy; + } + + return genError("Failed to load default Orchestration policy. Error: " + maybe_policy.getErr()); + } + + Maybe + loadOrchestrationPolicyFromBackup() + { + auto orchestration_policy_file = getPolicyConfigPath("orchestration", Config::ConfigFileType::Policy); + + auto backup_ext = getConfigurationWithDefault(".bk", "orchestration", "Backup file extension"); + auto orchestration_tools = Singleton::Consume::by(); + auto maybe_policy = orchestration_tools->jsonFileToObject( + orchestration_policy_file + backup_ext + ); + + if (maybe_policy.ok()) { + if (!recoverBackupOrchestrationPolicy()) { + dbgWarning(D_ORCHESTRATOR) + << "Succeed to load policy from backup, " + << "but failed to write it to Orchestration default policy file."; + } + + return maybe_policy; + } + + return genError("Failed to load Orchestration policy from backup."); + } + + Maybe + enforceOrchestrationPolicy() + { + Maybe maybe_policy(genError("Failed to enforce Orchestration policy")); + auto policy_file_path = getConfigurationWithDefault( + filesystem_prefix + "/conf/policy.json", + "orchestration", + "Policy file path" + ); + auto orchestration_policy_file = getPolicyConfigPath("orchestration", Config::ConfigFileType::Policy); + + auto settings_file_path = getConfigurationWithDefault( + filesystem_prefix + "/conf/settings.json", + "orchestration", + "Settings file path" + ); + + dbgInfo(D_ORCHESTRATOR) + << "Enforcing new configuration. Policy file: " + << policy_file_path + << ", Settings file: " + << settings_file_path; + + auto orchestration_tools = Singleton::Consume::by(); + auto service_controller = Singleton::Consume::by(); + if (service_controller->updateServiceConfiguration(policy_file_path, settings_file_path)) { + maybe_policy = orchestration_tools->jsonFileToObject(orchestration_policy_file); + } else { + dbgWarning(D_ORCHESTRATOR) << "Failed to enforce Orchestration policy. File: " << policy_file_path; + } + + if (!maybe_policy.ok()) { + dbgDebug(D_ORCHESTRATOR) << "Enforcing policy file from backup"; + string backup_ext = getConfigurationWithDefault( + ".bk", + "orchestration", + "Backup file extension" + ); + + dbgInfo(D_ORCHESTRATOR) << "Recovering the policy file from backup."; + if (!orchestration_tools->copyFile(policy_file_path + backup_ext, policy_file_path)) { + return genError("Failed to copy orchestration policy from backup policy.json file."); + } + // Try to use the backup policy.json file and re-write the services's policies. + if (service_controller->updateServiceConfiguration(policy_file_path, settings_file_path)) { + maybe_policy= orchestration_tools->jsonFileToObject(orchestration_policy_file); + } + } + + if (maybe_policy.ok()) { + return maybe_policy; + } + + dbgDebug(D_ORCHESTRATOR) << "Failed to load Orchestration policy. Error: " << maybe_policy.getErr(); + return genError("Failed to load Orchestration policy from the main policy."); + } + + bool + recoverBackupOrchestrationPolicy() + { + auto conf_path = getPolicyConfigPath("orchestration", Config::ConfigFileType::Policy); + + string backup_ext = getConfigurationWithDefault(".bk", "orchestration", "Backup file extension"); + string backup_orchestration_conf_file = conf_path + backup_ext; + + auto orchestration_tools = Singleton::Consume::by(); + return orchestration_tools->copyFile(backup_orchestration_conf_file, conf_path); + } + + Maybe + handleManifestUpdate(const OrchManifest &orch_manifest) + { + if (!orch_manifest.ok()) return Maybe(); + + // Handling manifest update. + dbgInfo(D_ORCHESTRATOR) << "There is a new manifest file."; + GetResourceFile resource_file(GetResourceFile::ResourceFileType::MANIFEST); + Maybe new_manifest_file = + Singleton::Consume::by()->downloadFileFromFog( + orch_manifest.unpack(), + I_OrchestrationTools::SELECTED_CHECKSUM_TYPE, + resource_file + ); + + auto orch_status = Singleton::Consume::by(); + auto service_controller = Singleton::Consume::by(); + auto agent_details = Singleton::Consume::by(); + static int service_to_port_size = service_controller->getServiceToPortMap().size(); + if (!new_manifest_file.ok()) { + string install_error; + if (!service_to_port_size) { + string error_hostname_addition = ""; + auto maybe_hostname = Singleton::Consume::by()->getHostname(); + if (maybe_hostname.ok()) { + error_hostname_addition = " on host '" + maybe_hostname.unpack() + "'"; + } + install_error = + "Critical Error: Agent/Gateway was not fully deployed" + + error_hostname_addition + + " and is not enforcing a security policy. Retry installation or contact Check Point support."; + } else { + install_error = + "Warning: Agent/Gateway '" + + agent_details->getAgentId() + + "' software update failed. Agent is running previous software. Contact Check Point support."; + } + dbgTrace(D_ORCHESTRATOR) + << "Manifest failed to be updated. Error: " + << new_manifest_file.getErr() + << " Presenting the next message to the user: " + << install_error; + orch_status->setFieldStatus( + OrchestrationStatusFieldType::MANIFEST, + OrchestrationStatusResult::FAILED, + install_error + ); + + health_check_status_listener.setStatus( + HealthCheckStatus::UNHEALTHY, + OrchestrationStatusFieldType::MANIFEST, + install_error + ); + + return genError(install_error); + } + + auto manifest_controller = Singleton::Consume::by(); + if (!manifest_controller->updateManifest(new_manifest_file.unpack())) { + string install_error = + "Warning: Agent/Gateway '" + + agent_details->getAgentId() + + "' software update failed. Agent is running previous software. Contact Check Point support."; + string current_error = orch_status->getManifestError(); + if (current_error.find("Gateway was not fully deployed") == string::npos) { + orch_status->setFieldStatus( + OrchestrationStatusFieldType::MANIFEST, + OrchestrationStatusResult::FAILED, + install_error + ); + } else { + install_error = current_error; + } + dbgTrace(D_ORCHESTRATOR) + << "Manifest failed to be updated. Presenting the next message to the user: " + << install_error; + + health_check_status_listener.setStatus( + HealthCheckStatus::UNHEALTHY, + OrchestrationStatusFieldType::MANIFEST, + install_error + ); + + return genError(install_error); + } + + orch_status->setFieldStatus( + OrchestrationStatusFieldType::MANIFEST, + OrchestrationStatusResult::SUCCESS + ); + health_check_status_listener.setStatus( + HealthCheckStatus::HEALTHY, + OrchestrationStatusFieldType::MANIFEST + ); + + ifstream restart_watchdog_orch(filesystem_prefix + "/orchestration/restart_watchdog"); + if (restart_watchdog_orch.good()) { + ofstream restart_watchdog("/tmp/restart_watchdog", ofstream::out); + restart_watchdog.close(); + remove((filesystem_prefix + "/orchestration/restart_watchdog").c_str()); + restart_watchdog_orch.close(); + } + + string manifest_success_notification_message( + "Agent/Gateway '" + + agent_details->getAgentId() + + "' software update succeeded. Agent is running latest software." + ); + LogGen manifest_success_notification( + manifest_success_notification_message, + ReportIS::Level::ACTION, + ReportIS::Audience::SECURITY, + ReportIS::Severity::INFO, + ReportIS::Priority::LOW, + ReportIS::Tags::ORCHESTRATOR + ); + manifest_success_notification.addToOrigin(LogField("eventTopic", "Agent Profiles")); + manifest_success_notification << LogField("notificationId", "4165c3b1-e9bc-44c3-888b-863e204c1bfb"); + + return Maybe(); + } + + Maybe + handlePolicyUpdate(const OrchPolicy &new_policy, const string &settings_path, const vector &data_updates) + { + if (!new_policy.ok()) return Maybe(); + // Handling policy update. + dbgInfo(D_ORCHESTRATOR) << "There is a new policy file."; + GetResourceFile resource_file(GetResourceFile::ResourceFileType::POLICY); + Maybe new_policy_file = + Singleton::Consume::by()->downloadFileFromFog( + new_policy.unpack(), + I_OrchestrationTools::SELECTED_CHECKSUM_TYPE, + resource_file + ); + if (!new_policy_file.ok()) { + return genError("Failed to download the new policy file. Error: " + new_policy_file.getErr()); + } + + // Calculate the changes between the existing policy to the new one. + auto service_controller = Singleton::Consume::by(); + string old_policy_version = service_controller->getPolicyVersion(); + bool res = service_controller->updateServiceConfiguration( + new_policy_file.unpack(), + settings_path, + data_updates + ); + + if (!res) { + string updated_policy_version = service_controller->getUpdatePolicyVersion(); + string error_str = + "Failed to update services' policy configuration files. Previous version: " + + old_policy_version + + ". New version: " + + updated_policy_version; + + auto policy_file = getConfigurationWithDefault( + filesystem_prefix + "/conf/policy.json", + "orchestration", + "Policy file path" + ); + auto setting_file = getConfigurationWithDefault( + filesystem_prefix + "/conf/settings.json", + "orchestration", + "Settings file path" + ); + + service_controller->updateServiceConfiguration(policy_file, setting_file, data_updates); + LogGen( + error_str, + Audience::SECURITY, + Severity::CRITICAL, + Priority::HIGH, + Tags::ORCHESTRATOR + ) + << LogField("policyVersion", updated_policy_version) + << LogField("previousPolicyVersion", old_policy_version); + + return genError(error_str); + } + + // Reload the orchestration policy, in case of the policy updated + auto orchestration_policy = loadDefaultOrchestrationPolicy(); + if (!orchestration_policy.ok()) { + return genError("Failed to load new Orchestration policy file."); + } + + if (!updateFogAddress(orchestration_policy.unpack().getFogAddress())) { + dbgWarning(D_ORCHESTRATOR) << "Failed to update the new Fog address."; + if (!updateFogAddress(policy.getFogAddress())) { + dbgWarning(D_ORCHESTRATOR) << "Failed to restore the old Fog address."; + } + return genError("Failed to load Orchestration new policy file, fog update failed."); + } + + policy = orchestration_policy.unpack(); + + string new_policy_version = service_controller->getPolicyVersion(); + Singleton::Consume::by()->setPolicyVersion(new_policy_version); + auto update_communication = Singleton::Consume::by(); + auto path_policy_version = update_communication->sendPolicyVersion(new_policy_version); + if (!path_policy_version.ok()) { + dbgWarning(D_ORCHESTRATOR) << path_policy_version.getErr(); + } + + reloadConfiguration(); + if (getProfileAgentSettingWithDefault(false, "agent.config.orchestration.reportAgentDetail")) { + reportAgentDetailsMetaData(); + } + + dbgTrace(D_ORCHESTRATOR) + << "Update policy" + << " from version: " + old_policy_version + << " to version: " + new_policy_version; + LogGen( + "Agent's policy has been updated", + Audience::SECURITY, + Severity::INFO, + Priority::LOW, + Tags::ORCHESTRATOR, + Notification::POLICY_UPDATE + ) << LogField("policyVersion", new_policy_version) << LogField("fromVersion", old_policy_version); + + Singleton::Consume::by()->addOneTimeRoutine( + I_MainLoop::RoutineType::Offline, + [this, new_policy_version] () + { + chrono::microseconds curr_time = Singleton::Consume::by()->getWalltime(); + AudienceTeam audience_team = AudienceTeam::NONE; + auto i_env = Singleton::Consume::by(); + auto team = i_env->get("Audience Team"); + if (team.ok()) audience_team = *team; + + Report policy_update_message( + "Agent's policy has been updated", + curr_time, + Type::EVENT, + Level::LOG, + LogLevel::INFO, + Audience::INTERNAL, + audience_team, + Severity::INFO, + Priority::LOW, + chrono::seconds(0), + LogField("agentId", Singleton::Consume::by()->getAgentId()), + Tags::ORCHESTRATOR + ); + policy_update_message.addToOrigin(LogField("policyVersion", new_policy_version)); + + LogRest policy_update_message_client_rest(policy_update_message); + + Singleton::Consume::by()->sendObjectWithPersistence( + policy_update_message_client_rest, + I_Messaging::Method::POST, + "/api/v1/agents/events", + "", + true, + MessageTypeTag::REPORT + ); + }, + "Send policy update report" + ); + + dbgInfo(D_ORCHESTRATOR) << "Policy update report was successfully sent to fog"; + + return Maybe(); + } + + Maybe + handleDataUpdate(const OrchData &orch_data, vector &data_updates) + { + if (!orch_data.ok()) return Maybe(); + + auto service_name = Singleton::Consume::by()->get("Service Name"); + if (service_name.ok() && *service_name == "WLP Standalone") { + dbgInfo(D_ORCHESTRATOR) << "Skipping download of Data file update"; + return Maybe(); + } + + dbgInfo(D_ORCHESTRATOR) << "There is a new data file."; + const string data_file_dir = filesystem_prefix + "/conf/data"; + + auto orchestration_tools = Singleton::Consume::by(); + if (!orchestration_tools->doesDirectoryExist(data_file_dir)) { + orchestration_tools->createDirectory(data_file_dir); + } + const auto data_file_path = getConfigurationWithDefault( + filesystem_prefix + "/conf/data.json", + "orchestration", + "Data file path" + ); + GetResourceFile resource_file(GetResourceFile::ResourceFileType::DATA); + Maybe new_data_files = Singleton::Consume::by()->downloadFileFromFog( + orch_data.unpack(), + I_OrchestrationTools::SELECTED_CHECKSUM_TYPE, + resource_file + ); + + if (!new_data_files.ok()) { + return genError("Failed to download new data file, Error: " + new_data_files.getErr()); + } + auto new_data_file_input = orchestration_tools->readFile(new_data_files.unpack()); + if (!new_data_file_input.ok()) { + return genError("Failed to read new data file, Error: " + new_data_file_input.getErr()); + } + + map parsed_data; + dbgDebug(D_ORCHESTRATOR) << "Parsing data from " << new_data_files.unpack(); + try { + stringstream is(new_data_file_input.unpack()); + cereal::JSONInputArchive archive_in(is); + cereal::load(archive_in, parsed_data); + } catch (exception &e) { + dbgDebug(D_ORCHESTRATOR) + << "Failed to load data from JSON file. Error: " + << e.what() + << ". Content: " + << new_data_files.unpack(); + return genError(e.what()); + } + + for (const auto &data_file : parsed_data) { + const string data_file_save_path = getPolicyConfigPath(data_file.first, Config::ConfigFileType::Data); + Maybe new_data_file = + Singleton::Consume::by()->downloadFileFromURL( + data_file.second.getDownloadPath(), + data_file.second.getChecksum(), + I_OrchestrationTools::SELECTED_CHECKSUM_TYPE, + "data_" + data_file.first + ); + + if (!new_data_file.ok()) { + dbgWarning(D_ORCHESTRATOR) << "Failed to download the " << data_file.first << " data file."; + return new_data_file.passErr(); + } + auto data_new_checksum = getChecksum(new_data_file.unpack()); + if (data_new_checksum != data_file.second.getChecksum()) { + stringstream current_error; + current_error << "No match for the checksums of the expected and the downloaded data file:" + << " Expected checksum: " + << data_file.second.getChecksum() + << ". Downloaded checksum: " + << data_new_checksum; + + dbgWarning(D_ORCHESTRATOR) << current_error.str(); + return genError(current_error.str()); + } + if (!orchestration_tools->copyFile(new_data_file.unpack(), data_file_save_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to copy a new data file to " << data_file_save_path; + } + + data_updates.push_back(data_file.first); + } + if (!orchestration_tools->copyFile(new_data_files.unpack(), data_file_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to copy a new agents' data file to " << data_file_path; + } + + return Maybe(); + } + + Maybe + handleSettingsUpdate(const OrchSettings &orch_settings, string &settings_file_path) + { + if (!orch_settings.ok()) return Maybe(); + + dbgInfo(D_ORCHESTRATOR) << "There is a new settings file."; + GetResourceFile resource_file(GetResourceFile::ResourceFileType::SETTINGS); + Maybe new_settings_file = + Singleton::Consume::by()->downloadFileFromFog( + orch_settings.unpack(), + I_OrchestrationTools::SELECTED_CHECKSUM_TYPE, + resource_file + ); + + if (!new_settings_file.ok()) { + dbgWarning(D_ORCHESTRATOR) + << "Failed to download the new settings file. Error: " + << new_settings_file.getErr(); + return genError("Failed to download the new settings file. Error: " + new_settings_file.getErr()); + } + + auto res = updateSettingsFile(*new_settings_file); + if (res.ok()) { + settings_file_path = *res; + reloadConfiguration(); + return Maybe(); + } + + return res.passErr(); + } + + Maybe + checkUpdate() + { + auto span_scope = + Singleton::Consume::by()->startNewSpanScope(Span::ContextType::NEW); + auto manifest_checksum = getChecksum( + getConfigurationWithDefault( + filesystem_prefix + "/conf/manifest.json", + "orchestration", + "Manifest file path" + ) + ); + auto settings_checksum = getChecksum( + getConfigurationWithDefault( + filesystem_prefix + "/conf/settings.json", + "orchestration", + "Settings file path" + ) + ); + auto policy_checksum = getChecksum( + getConfigurationWithDefault( + filesystem_prefix + "/conf/policy.json", + "orchestration", + "Policy file path" + ) + ); + auto data_checksum = getChecksum( + getConfigurationWithDefault( + filesystem_prefix + "/conf/data.json", + "orchestration", + "Data file path" + ) + ); + + auto policy_version = Singleton::Consume::by()->getPolicyVersion(); + + dbgDebug(D_ORCHESTRATOR) << "Sending check update request"; + + CheckUpdateRequest request( + manifest_checksum, + policy_checksum, + settings_checksum, + data_checksum, + I_OrchestrationTools::SELECTED_CHECKSUM_TYPE_STR, + policy_version + ); + + auto greedy_update = getProfileAgentSettingWithDefault(false, "orchestration.multitenancy.greedymode"); + greedy_update = getConfigurationWithDefault(greedy_update, "orchestration", "Multitenancy Greedy mode"); + + if (!greedy_update) { + auto tenant_manager = Singleton::Consume::by(); + for (auto const &active_tenant: tenant_manager->fetchActiveTenants()) { + auto virtual_policy_data = getPolicyTenantData(active_tenant); + request.addTenantPolicy(virtual_policy_data); + request.addTenantSettings(getSettingsTenantData(active_tenant, virtual_policy_data.getVersion())); + } + } else { + request.setGreedyMode(); + } + + auto update_communication = Singleton::Consume::by(); + auto response = update_communication->getUpdate(request); + + auto orch_status = Singleton::Consume::by(); + orch_status->setLastUpdateAttempt(); + auto upgrade_mode = getSetting("upgradeMode"); + auto agent_type = getSetting("agentType"); + if (upgrade_mode.ok()) { + orch_status->setUpgradeMode(upgrade_mode.unpack()); + } + if (agent_type.ok()) { + orch_status->setAgentType(agent_type.unpack()); + } + + HybridModeMetricEvent().notify(); + + if (!response.ok()) { + orch_status->setFieldStatus( + OrchestrationStatusFieldType::LAST_UPDATE, + OrchestrationStatusResult::FAILED, + response.getErr() + ); + + return genError(response.getErr()); + } + + return handleUpdate(request); + } + + Maybe + convertOrchestrationConfigTypeToString(OrchestrationStatusConfigType type) + { + switch (type) { + case OrchestrationStatusConfigType::DATA: return string("Data"); + case OrchestrationStatusConfigType::SETTINGS: return string("Settings"); + case OrchestrationStatusConfigType::MANIFEST: return string("Manifest"); + case OrchestrationStatusConfigType::POLICY: return string("Policy"); + default: { + return genError( + "Cannot convert OrchestrationStatusConfigType to string. Type: " + + to_string(static_cast(type)) + ); + } + } + } + + Maybe + handleUpdate(const CheckUpdateRequest &response) + { + auto span_scope = + Singleton::Consume::by()->startNewSpanScope(Span::ContextType::CHILD_OF); + auto agent_details = Singleton::Consume::by(); + dbgDebug(D_ORCHESTRATOR) << "Starting to handle check update response"; + + OrchManifest orch_manifest = response.getManifest(); + OrchPolicy orch_policy = response.getPolicy(); + OrchSettings orch_settings = response.getSettings(); + OrchData orch_data = response.getData(); + + auto orch_status = Singleton::Consume::by(); + orch_status->setFieldStatus( + OrchestrationStatusFieldType::LAST_UPDATE, + OrchestrationStatusResult::SUCCESS + ); + orch_status->setIsConfigurationUpdated( + EnumArray( + orch_manifest.ok(), orch_policy.ok(), orch_settings.ok(), orch_data.ok() + ) + ); + + EnumArray> update_results; + + string settings_path = ""; + update_results[OrchestrationStatusConfigType::SETTINGS] = handleSettingsUpdate(orch_settings, settings_path); + + vector data_updates; + update_results[OrchestrationStatusConfigType::DATA] = handleDataUpdate(orch_data, data_updates); + + update_results[OrchestrationStatusConfigType::POLICY] = handlePolicyUpdate( + orch_policy, + settings_path, + data_updates + ); + + if (!orch_policy.ok() && (data_updates.size() > 0 || settings_path != "")) { + auto service_controller = Singleton::Consume::by(); + bool res = service_controller->updateServiceConfiguration( + "", + settings_path, + data_updates + ); + + if (!res) { + dbgWarning(D_ORCHESTRATOR) << "Failed to update new service configuration"; + } + } + + update_results[OrchestrationStatusConfigType::MANIFEST] = handleManifestUpdate(orch_manifest); + if (!update_results[OrchestrationStatusConfigType::MANIFEST].ok()) { + string current_error = orch_status->getManifestError(); + string recommended_fix; + string msg; + bool is_deploy_error = current_error.find("Critical") != string::npos; + if (is_deploy_error) { + string error_hostname_addition = ""; + auto maybe_hostname = Singleton::Consume::by()->getHostname(); + if (maybe_hostname.ok()) { + error_hostname_addition = " on host '" + maybe_hostname.unpack() + "'"; + } + msg = + "Agent/Gateway was not fully deployed" + + error_hostname_addition + + " and is not enforcing a security policy."; + recommended_fix = "Retry installation or contact Check Point support."; + } else if (current_error.find("Warning") != string::npos) { + msg = + "Agent/Gateway '" + + agent_details->getAgentId() + + "' software update failed. Agent is running previous software."; + recommended_fix = "Contact Check Point support."; + } + if (!msg.empty() && !recommended_fix.empty()) { + LogGen manifest_error_notification( + msg, + ReportIS::Level::ACTION, + ReportIS::Audience::SECURITY, + is_deploy_error ? ReportIS::Severity::CRITICAL : ReportIS::Severity::HIGH, + ReportIS::Priority::URGENT, + ReportIS::Tags::ORCHESTRATOR + ); + manifest_error_notification.addToOrigin(LogField("eventTopic", "Agent Profiles")); + manifest_error_notification << LogField("eventRemediation", recommended_fix); + if (is_deploy_error) { + manifest_error_notification << LogField("notificationId", "4165c3b1-e9bc-44c3-888b-863e204c1bfb"); + } + } + } + + handleVirtualFiles(response.getVirtualSettings(), response.getVirtualPolicy(), data_updates); + + string maybe_errors; + for (OrchestrationStatusConfigType update_type : makeRange()) { + if (update_results[update_type].ok()) continue; + auto type_str = convertOrchestrationConfigTypeToString(update_type); + if (!type_str.ok()) { + continue; + } + if (maybe_errors != "") maybe_errors += ", "; + maybe_errors += (*type_str + " error: " + update_results[update_type].getErr()); + } + + if (maybe_errors != "") return genError(maybe_errors); + return Maybe(); + } + + void + handleVirtualFiles( + const Maybe> &updated_settings_tenants, + const Maybe> &updated_policy_tenants, + const vector &new_data_files) + { + if (!updated_policy_tenants.ok()) return; + + // Sorting files by tenant id; + unordered_map> sorted_files; + + // Download virtual policy + bool is_empty = true; + GetResourceFile resource_v_policy_file(GetResourceFile::ResourceFileType::VIRTUAL_POLICY); + for (const auto &tenant: *updated_policy_tenants) { + if (!tenant.getVersion().empty()) { + is_empty = false; + resource_v_policy_file.addTenant(tenant.getTenantID(), tenant.getVersion(), tenant.getChecksum()); + } + } + + if (!is_empty) { + auto new_virtual_policy_files = + Singleton::Consume::by()->downloadVirtualFileFromFog( + resource_v_policy_file, + I_OrchestrationTools::SELECTED_CHECKSUM_TYPE + ); + if (new_virtual_policy_files.ok()) { + for (const auto &tenant_file: *new_virtual_policy_files) { + sorted_files[tenant_file.first].push_back(tenant_file.second); + } + } + } + + if (updated_settings_tenants.ok()) { + // Download virtual settings + is_empty = true; + GetResourceFile resource_v_settings_file(GetResourceFile::ResourceFileType::VIRTUAL_SETTINGS); + for (const auto &tenant: *updated_settings_tenants) { + if (!tenant.getVersion().empty()) { + is_empty = false; + resource_v_settings_file.addTenant( + tenant.getTenantID(), + tenant.getVersion(), + tenant.getChecksum() + ); + } + } + + if (!is_empty) { + auto new_virtual_settings_files = + Singleton::Consume::by()->downloadVirtualFileFromFog( + resource_v_settings_file, + I_OrchestrationTools::SELECTED_CHECKSUM_TYPE + ); + if (new_virtual_settings_files.ok()) { + for (const auto &tenant_file: *new_virtual_settings_files) { + sorted_files[tenant_file.first].push_back(tenant_file.second); + } + } + } + } + + for (const auto downloade_files: sorted_files) { + auto files = downloade_files.second; + string policy_file = files[0]; + string setting_file = ""; + if (files.size() > 1) { + setting_file = files[1]; + auto handled_settings = updateSettingsFile(setting_file, downloade_files.first); + if (handled_settings.ok()) setting_file = *handled_settings; + } + + Singleton::Consume::by()->updateServiceConfiguration( + policy_file, + setting_file, + new_data_files, + downloade_files.first + ); + } + } + + Maybe + updateSettingsFile(const string &new_settings_file, const string &tenant_id = "") + { + // Handling settings update. + auto conf_dir = getConfigurationWithDefault( + filesystem_prefix + "/conf/", + "orchestration", + "Conf dir" + ) + (tenant_id != "" ? "tenant_" + tenant_id + "_" : ""); + + auto orchestration_tools = Singleton::Consume::by(); + string settings_file_path = conf_dir + "settings.json"; + if (!orchestration_tools->copyFile(new_settings_file, settings_file_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to update the settings."; + return genError("Failed to update the settings"); + } + + return settings_file_path; + } + + CheckUpdateRequest::Tenants + getPolicyTenantData(const string &tenant_id) + { + string dir = getConfigurationWithDefault( + filesystem_prefix + "/conf", + "orchestration", + "Configuration directory" + ); + + string policy_file = dir + "/tenant_" + tenant_id + "/policy.json"; + + string policy_file_checksum = getChecksum(policy_file); + string policy_file_version= getVersion(policy_file); + + return CheckUpdateRequest::Tenants(tenant_id, policy_file_checksum, policy_file_version); + } + + CheckUpdateRequest::Tenants + getSettingsTenantData(const string &tenant_id, const string &policy_version) + { + string dir = getConfigurationWithDefault( + filesystem_prefix + "/conf", + "orchestration", + "Configuration directory" + ); + + string settings_file = dir + "/tenant_" + tenant_id + "_settings.json"; + string settings_file_checksum = getChecksum(settings_file); + + return CheckUpdateRequest::Tenants(tenant_id, settings_file_checksum, policy_version); + } + + string + getChecksum(const string &file_path) + { + auto orchestration_tools = Singleton::Consume::by(); + Maybe file_checksum = orchestration_tools->calculateChecksum( + I_OrchestrationTools::SELECTED_CHECKSUM_TYPE, + file_path + ); + + if (!file_checksum.ok()) return ""; + return file_checksum.unpack(); + } + + string + getVersion(const string &file_path) + { + string version; + auto orchestration_tools = Singleton::Consume::by(); + Maybe file_data = orchestration_tools->readFile(file_path); + + if (file_data.ok()) { + try { + stringstream in; + in.str(*file_data); + cereal::JSONInputArchive ar(in); + ar(cereal::make_nvp("version", version)); + } catch (...) {} + } + // Must be removed. + if (version.empty()) return "1"; + + return version; + } + + void + encryptOldFile(const string &old_path, const string &new_path) + { + auto orchestration_tools = Singleton::Consume::by(); + auto file_data = orchestration_tools->readFile(old_path); + if (file_data.ok()) { + auto encryptor = Singleton::Consume::by(); + auto decoded_data = encryptor->base64Decode(file_data.unpack()); + if (!orchestration_tools->writeFile(decoded_data, new_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to encrypt files"; + } else { + // Removing clear data files after encrypting + orchestration_tools->removeFile(old_path); + } + } + } + + void + encryptToFile(const string &data, const string &file) + { + auto orchestration_tools = Singleton::Consume::by(); + if (!orchestration_tools->writeFile(data, file)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to encrypt files"; + } + } + + void + reportAgentDetailsMetaData() + { + I_DetailsResolver *i_details_resolver = Singleton::Consume::by(); + AgentDataReport agent_data_report; + agent_data_report << AgentReportFieldWithLabel("agent_version", i_details_resolver->getAgentVersion()); + + auto platform = i_details_resolver->getPlatform(); + if (platform.ok()) agent_data_report.setPlatform(*platform); + + auto arch = i_details_resolver->getArch(); + if (arch.ok()) agent_data_report.setArchitecture(*arch); + + for (const pair details : i_details_resolver->getResolvedDetails()) { + agent_data_report << details; + } + + agent_data_report.setAgentVersion(i_details_resolver->getAgentVersion()); + + auto nginx_data = i_details_resolver->parseNginxMetadata(); + if (nginx_data.ok()) { + string nginx_version; + string config_opt; + string cc_opt; + tie(config_opt, cc_opt, nginx_version) = nginx_data.unpack(); + agent_data_report + << make_pair("nginxVersion", nginx_version) + << make_pair("configureOpt", config_opt) + << make_pair("extraCompilerOpt", cc_opt); + } else { + dbgDebug(D_ORCHESTRATOR) << nginx_data.getErr(); + } + + if (i_details_resolver->isReverseProxy()) { + agent_data_report << AgentReportFieldWithLabel("reverse_proxy", "true"); + } + + if (i_details_resolver->isKernelVersion3OrHigher()) { + agent_data_report << AgentReportFieldWithLabel("isKernelVersion3OrHigher", "true"); + } + + if (i_details_resolver->isGwNotVsx()) { + agent_data_report << AgentReportFieldWithLabel("isGwNotVsx", "true"); + } + + if (i_details_resolver->isVersionEqualOrAboveR8110()) { + agent_data_report << AgentReportFieldWithLabel("isVersionEqualOrAboveR8110", "true"); + } + +#if defined(gaia) || defined(smb) + if (i_details_resolver->compareCheckpointVersion(8100, greater_equal())) { + agent_data_report << AgentReportFieldWithLabel("isCheckpointVersionGER81", "true"); + } +#endif // gaia || smb + } + + void + doEncrypt() + { + static const string data1 = "This is fake"; + static const string data2 = "0000 is fake"; + static const string data3 = "This is 3333"; + + auto data_path = getConfigurationWithDefault( + filesystem_prefix + "/data/", + "encryptor", + "Data files directory" + ); + encryptOldFile( + getConfigurationWithDefault( + filesystem_prefix + "/conf/user-cred.json", + "message", + "User Credentials Path" + ), + data_path + user_cred_file_name + ); + + encryptToFile(data1, data_path + data1_file_name); + encryptToFile(data2, data_path + data4_file_name); + encryptToFile(data3, data_path + data6_file_name); + } + + void + run() + { + int sleep_interval = policy.getErrorSleepInterval(); + Maybe start_state(genError("Not running yet.")); + while (!(start_state = start()).ok()) { + dbgError(D_ORCHESTRATOR) << "Failed to start the Orchestration. Error: " << start_state.getErr(); + health_check_status_listener.setStatus( + HealthCheckStatus::UNHEALTHY, + OrchestrationStatusFieldType::REGISTRATION, + start_state.getErr() + ); + sleep_interval = getConfigurationWithDefault( + 20, + "orchestration", + "Default sleep interval" + ); + Singleton::Consume::by()->yield(seconds(sleep_interval)); + } + + Singleton::Consume::by()->yield(chrono::seconds(1)); + + health_check_status_listener.setStatus( + HealthCheckStatus::HEALTHY, + OrchestrationStatusFieldType::REGISTRATION + ); + + LogGen( + "Check Point Orchestration nano service successfully started", + Audience::SECURITY, + Severity::INFO, + Priority::LOW, + Tags::ORCHESTRATOR) + << LogField("agentType", "Orchestration") + << LogField("agentVersion", Version::get()); + + reportAgentDetailsMetaData(); + + if (!Singleton::Consume::by()->loadAfterSelfUpdate()) { + // Should restore from backup + dbgWarning(D_ORCHESTRATOR) << "Failed to load Orchestration after self-update"; + health_check_status_listener.setStatus( + HealthCheckStatus::UNHEALTHY, + OrchestrationStatusFieldType::LAST_UPDATE, + "Failed to load Orchestration after self-update" + ); + } else { + health_check_status_listener.setStatus( + HealthCheckStatus::HEALTHY, + OrchestrationStatusFieldType::MANIFEST + ); + } + + bool is_new_success = false; + while (true) { + static int failure_count = 0; + Singleton::Consume::by()->startNewTrace(false); + auto check_update_result = checkUpdate(); + if (!check_update_result.ok()) { + failure_count++; + is_new_success = false; + sleep_interval = policy.getErrorSleepInterval(); + int failure_multiplier = 1; + if (failure_count >= 10) { + failure_count = 10; + failure_multiplier = 10; + } else if (failure_count >= 3) { + failure_multiplier = 2; + } + sleep_interval *= failure_multiplier; + dbgWarning(D_ORCHESTRATOR) + << "Failed during check update from Fog. Error: " + << check_update_result.getErr() + << ", new check will be every: " + << sleep_interval << " seconds"; + + health_check_status_listener.setStatus( + HealthCheckStatus::UNHEALTHY, + OrchestrationStatusFieldType::LAST_UPDATE, + "Failed during check update from Fog. Error: " + check_update_result.getErr() + ); + } else { + failure_count = 0; + dbgDebug(D_ORCHESTRATOR) << "Check update process completed successfully"; + health_check_status_listener.setStatus( + HealthCheckStatus::HEALTHY, + OrchestrationStatusFieldType::LAST_UPDATE + ); + sleep_interval = policy.getSleepInterval(); + if (!is_new_success) { + dbgInfo(D_ORCHESTRATOR) + << "Check update process completed successfully, new check will be every: " + << sleep_interval << " seconds"; + is_new_success = true; + } + } + + dbgDebug(D_ORCHESTRATOR) << "Next check for update will be in: " << sleep_interval << " seconds"; + Singleton::Consume::by()->finishTrace(); + Singleton::Consume::by()->yield(seconds(sleep_interval)); + } + } + + // LCOV_EXCL_START Reason: future changes will be done + void + restoreToBackup() + { + dbgWarning(D_ORCHESTRATOR) << "Reverting to the latest Orchestration service backup installation package."; + + // Copy the backup installation package to the running installation package. + auto packages_dir = getConfigurationWithDefault( + filesystem_prefix + "/packages", + "orchestration", + "Packages directory" + ); + auto service_name = getConfigurationWithDefault("orchestration", "orchestration", "Service name"); + auto orchestration_dir = packages_dir + "/" + service_name; + auto current_installation_file = orchestration_dir + "/" + service_name; + auto backup_ext = getConfigurationWithDefault(".bk", "orchestration", "Backup file extension"); + auto backup_installation_file = current_installation_file + backup_ext; + auto temp_ext = getConfigurationWithDefault("_temp", "orchestration", "Temp file extension"); + + auto orchestration_tools = Singleton::Consume::by(); + dbgAssert(orchestration_tools->doesFileExist(backup_installation_file)) + << "There is no backup installation package"; + + dbgAssert(orchestration_tools->copyFile(backup_installation_file, current_installation_file)) + << "Failed to copy backup installation package"; + + // Copy the backup manifest file to the default manifest file path. + auto manifest_file_path = getConfigurationWithDefault( + filesystem_prefix + "/conf/manifest.json", + "orchestration", + "Manifest file path" + ); + + if (!orchestration_tools->copyFile(manifest_file_path + backup_ext, manifest_file_path + temp_ext)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to restore manifest backup file."; + } + + auto package_handler = Singleton::Consume::by(); + // Install the backup orchestration service installation package. + dbgAssert(package_handler->preInstallPackage(service_name, current_installation_file)) + << "Failed to restore from backup, pre install test failed"; + dbgAssert(package_handler->installPackage(service_name, current_installation_file, true)) + << "Failed to restore from backup, installation failed"; + } + // LCOV_EXCL_STOP + + bool + shouldReconnectToFog( + const string &fog, + const uint16_t port, + const bool is_secure) + { + auto agent_details = Singleton::Consume::by(); + return + agent_details->getSSLFlag() != is_secure || + !agent_details->getFogPort().ok() || agent_details->getFogPort().unpack() != port || + !agent_details->getFogDomain().ok() || agent_details->getFogDomain().unpack() != fog; + } + + bool + updateFogAddress(const string &fog_addr) + { + auto orch_status = Singleton::Consume::by(); + auto agent_details = Singleton::Consume::by(); + auto orchestration_mode = getOrchestrationMode(); + agent_details->setOrchestrationMode(orchestration_mode); + if (orchestration_mode == OrchestrationMode::OFFLINE) { + orch_status->setUpgradeMode("Offline upgrades"); + orch_status->setRegistrationStatus("Offline mode"); + orch_status->setFogAddress(""); + if (agent_details->writeAgentDetails()) { + dbgDebug(D_ORCHESTRATOR) << "Agent details was successfully saved"; + } else { + dbgWarning(D_COMMUNICATION) << "Failed to save agent details to a file"; + } + return true; + } + + if (fog_addr.empty()) return false; // Fog address could not be empty on online update mode + + auto fog_params = parseURLParams(fog_addr); + if (!fog_params.ok()) { + dbgWarning(D_ORCHESTRATOR) << "Failed to update Fog address, Error: " << fog_params.getErr(); + return false; + } + string fog_domain; + string fog_query; + uint16_t fog_port = 0; + bool encrypted_fog_connection; + tie(fog_domain, fog_query, fog_port, encrypted_fog_connection) = fog_params.unpack(); + + auto message = Singleton::Consume::by(); + + if (!shouldReconnectToFog( + fog_domain, + fog_port, + encrypted_fog_connection + )) { + dbgDebug(D_ORCHESTRATOR) << "Skipping reconnection to the Fog - Fog details did not change"; + return true; + } + + if (message->setActiveFog(fog_domain, fog_port, encrypted_fog_connection, MessageTypeTag::GENERIC)) { + agent_details->setFogPort(fog_port); + agent_details->setFogDomain(fog_domain); + agent_details->setSSLFlag(encrypted_fog_connection); + + if (agent_details->writeAgentDetails()) { + dbgDebug(D_ORCHESTRATOR) << "Agent details was successfully saved"; + } else { + dbgWarning(D_COMMUNICATION) << "Failed to save agent details to a file"; + } + + auto update_communication = Singleton::Consume::by(); + update_communication->setAddressExtenesion(fog_query); + orch_status->setFogAddress(fog_addr); + return true; + } + + dbgWarning(D_ORCHESTRATOR) << "Failed to connect to the Fog, Address: " << fog_addr; + return false; + } + + // Returns Base URL, Query, Port, SSL + Maybe> + parseURLParams(const string &url) + { + URLParser url_parser(url); + auto fog_base_url = url_parser.getBaseURL(); + + if (!fog_base_url.ok()) return genError("Failed to parse address. Address: " + url); + + auto fog_port = url_parser.getPort(); + uint16_t port; + try { + port = stoi(fog_port); + } catch (const exception& err) { + return genError("Failed to parse port. Port: " + fog_port + ", Error:" + err.what()); + } + + return make_tuple( + fog_base_url.unpack(), + url_parser.getQuery(), + port, + url_parser.isOverSSL() + ); + } + + OrchestrationMode + getOrchestrationMode() + { + string orchestration_mode = getConfigurationFlag("orchestration-mode"); + if ( + orchestration_mode == "online_mode" || + orchestration_mode == "hybrid_mode" || + orchestration_mode == "offline_mode" + ) { + dbgTrace(D_ORCHESTRATOR) << "Orchestraion mode: " << orchestration_mode; + if (orchestration_mode == "online_mode") { + return OrchestrationMode::ONLINE; + } else if (orchestration_mode == "hybrid_mode") { + return OrchestrationMode::HYBRID; + } else { + return OrchestrationMode::OFFLINE; + } + } else if (orchestration_mode == ""){ + dbgInfo(D_ORCHESTRATOR) << + "Orchestraion mode was not found in configuration file, continue in online mode"; + } else { + dbgError(D_ORCHESTRATOR) + << "Unexpected orchestration mode found in configuration file: " + << orchestration_mode; + } + return OrchestrationMode::ONLINE; + } + + void + setAgentDetails() + { + static const string openssl_dir_cmd = "openssl version -d | cut -d\" \" -f2 | cut -d\"\\\"\" -f2"; + auto i_shell_cmd = Singleton::Consume::by(); + auto result = i_shell_cmd->getExecOutput(openssl_dir_cmd); + if (result.ok()) { + string val_openssl_dir = result.unpack(); + if (val_openssl_dir.back() == '\n') val_openssl_dir.pop_back(); + dbgTrace(D_ORCHESTRATOR) + << "Adding OpenSSL default directory to agent details. Directory: " + << val_openssl_dir; + + auto agent_details = Singleton::Consume::by(); + agent_details->setOpenSSLDir(val_openssl_dir + "/certs"); + agent_details->setOrchestrationMode(getOrchestrationMode()); + agent_details->writeAgentDetails(); + } else { + dbgWarning(D_ORCHESTRATOR) + << "Failed to load OpenSSL default certificate authority. Error: " + << result.getErr(); + } + } + + class AddProxyRest : public ServerRest + { + public: + void + doCall() override + { + auto agent_details = Singleton::Consume::by(); + agent_details->setProxy(proxy.get()); + agent_details->writeAgentDetails(); + } + + private: + C2S_PARAM(string, proxy); + }; + + const uint16_t default_fog_dport = 443; + OrchestrationPolicy policy; + HealthCheckStatusListener health_check_status_listener; + HybridModeMetric hybrid_mode_metric; + + string filesystem_prefix = ""; +}; + +OrchestrationComp::OrchestrationComp() + : + Component("OrchestrationComp"), + pimpl(make_unique()) +{ +} + +OrchestrationComp::~OrchestrationComp() {} + +void +OrchestrationComp::init() +{ + pimpl->init(); +} + +void +OrchestrationComp::fini() +{ + pimpl->fini(); +} + +void +OrchestrationComp::preload() +{ + Singleton::Consume::by()->registerValue("Is Orchestrator", true); + + registerExpectedConfiguration("orchestration", "Backup file extension"); + registerExpectedConfiguration("orchestration", "Multitenancy Greedy mode"); + registerExpectedConfiguration("orchestration", "Service name"); + registerExpectedConfiguration("orchestration", "Packages directory"); + registerExpectedConfiguration("orchestration", "Manifest file path"); + registerExpectedConfiguration("orchestration", "Settings file path"); + registerExpectedConfiguration("orchestration", "Data file path"); + registerExpectedConfiguration("orchestration", "Policy file path"); + registerExpectedConfiguration("orchestration", "Configuration path"); + registerExpectedConfiguration("orchestration", "Configuration directory"); + registerExpectedConfiguration("orchestration", "Default Check Point directory"); + registerExpectedConfiguration("orchestration", "Configuration file extension"); + registerExpectedConfiguration("orchestration", "Policy file extension"); + registerExpectedConfiguration("orchestration", "Temp file extension"); + registerExpectedConfiguration("orchestration", "Services ports file"); + registerExpectedConfiguration("orchestration", "Orchestration status path"); + registerExpectedConfiguration("orchestration", "Ignore packages list file path"); + registerExpectedConfiguration("orchestration", "Supported practices file path"); + registerExpectedConfiguration("orchestration", "Nginx metadata temp file"); + registerExpectedConfiguration("orchestration", "Default sleep interval"); + registerExpectedConfiguration("orchestration", "Reconfiguration timeout seconds"); + registerExpectedConfiguration("orchestration", "Download pending time frame seconds"); + registerExpectedSetting>("orchestration", "Orchestration status ignored policies"); + registerExpectedSetting("agentType"); + registerExpectedSetting("upgradeMode"); + registerExpectedConfigFile("orchestration", Config::ConfigFileType::Policy); +} + +SASAL_END diff --git a/components/security_apps/orchestration/orchestration_tools/CMakeLists.txt b/components/security_apps/orchestration/orchestration_tools/CMakeLists.txt new file mode 100755 index 0000000..8eab4de --- /dev/null +++ b/components/security_apps/orchestration/orchestration_tools/CMakeLists.txt @@ -0,0 +1,5 @@ +ADD_DEFINITIONS(-Wno-deprecated-declarations) + +add_library(orchestration_tools orchestration_tools.cc) + +add_subdirectory(orchestration_tools_ut) diff --git a/components/security_apps/orchestration/orchestration_tools/orchestration_tools.cc b/components/security_apps/orchestration/orchestration_tools/orchestration_tools.cc new file mode 100755 index 0000000..eadc2ba --- /dev/null +++ b/components/security_apps/orchestration/orchestration_tools/orchestration_tools.cc @@ -0,0 +1,475 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "orchestration_tools.h" + +#include "openssl/md5.h" +#include "openssl/sha.h" +#include "cereal/external/rapidjson/document.h" +#include "cereal/types/vector.hpp" +#include "cereal/types/set.hpp" + +#include +#include +#include + +#include "sasal.h" + +SASAL_START // Orchestration - Tools + +using namespace std; +using namespace rapidjson; + +static const string base64_base_str = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +class OrchestrationTools::Impl : Singleton::Provide::From +{ +public: + bool packagesToJsonFile(const map &packages, const string &path) const override; + Maybe> loadPackagesFromJson(const string &path) const override; + + Maybe> + jsonObjectSplitter( + const string &json, + const string &tenant_id) const override; + + Maybe readFile(const string &path) const override; + bool writeFile(const string &text, const string &path) const override; + bool removeFile(const string &path) const override; + bool copyFile(const string &src_path, const string &dst_path) const override; + bool doesFileExist(const string &file_path) const override; + bool createDirectory(const string &directory_path) const override; + bool doesDirectoryExist(const string &dir_path) const override; + bool executeCmd(const string &cmd) const override; + bool isNonEmptyFile(const string &path) const override; + + Maybe calculateChecksum(Package::ChecksumTypes checksum_type, const string &path) const override; + + string base64Encode(const string &input) const override; + string base64Decode(const string &input) const override; + +private: + string calculateFileMd5(ifstream &file) const; + string calculateSHA256Sum(ifstream &file) const; + string calculateSHA1Sum(ifstream &file) const; + string calculateSHA512Sum(ifstream &file) const; +}; + +using packageName = I_OrchestrationTools::packageName; +using packageDetails = I_OrchestrationTools::packageDetails; + +static bool +checkExistence(const string &path, bool is_dir) +{ + try { + struct stat info; + if (stat(path.c_str(), &info) != 0) return false; + int flag = is_dir ? S_IFDIR : S_IFREG; + return info.st_mode & flag; + } catch (exception &e) { + return false; + } +} + +bool +OrchestrationTools::Impl::doesFileExist(const string &file_path) const +{ + return checkExistence(file_path, false); +} + +bool +OrchestrationTools::Impl::doesDirectoryExist(const string &dir_path) const +{ + return checkExistence(dir_path, true); +} + +bool +OrchestrationTools::Impl::writeFile(const string &text, const string &path) const +{ + dbgDebug(D_ORCHESTRATOR) << "Writing file: text = " << text << ", path = " << path; + if (path.find('/') != string::npos) { + string dir_path = path.substr(0, path.find_last_of('/')); + if (!createDirectory(dir_path)) { + dbgDebug(D_ORCHESTRATOR) << "Failed to write file because directory creation failed. file: " + << path; + return false; + } + } + try { + ofstream fout(path); + fout << text; + return true; + } catch (const ofstream::failure &e) { + dbgDebug(D_ORCHESTRATOR) << "Error while writing file in " << path << ", " << e.what(); + } + return false; +} + +bool +OrchestrationTools::Impl::isNonEmptyFile(const string &path) const +{ + if (!doesFileExist(path)) { + dbgDebug(D_ORCHESTRATOR) << "Cannot read file, file does not exist. File: " << path; + return false; + } + + try { + ifstream text_file(path); + if (!text_file) { + dbgDebug(D_ORCHESTRATOR) << "Cannot open file. File: " << path; + return false; + } + + char buf[1]; + text_file.read(buf, 1); + return text_file.gcount() != 0; + } catch (const ifstream::failure &e) { + dbgDebug(D_ORCHESTRATOR) << "Error while reading file " << path << ", " << e.what(); + } + + return false; +} + +Maybe +OrchestrationTools::Impl::readFile(const string &path) const +{ + if (!doesFileExist(path)) { + dbgDebug(D_ORCHESTRATOR) << "Cannot read file, file does not exist. File: " << path; + return genError("File " + path + " does not exist."); + } + try { + ifstream text_file(path); + if (!text_file) { + return genError("Cannot open file. File: " + path); + } + stringstream buffer; + buffer << text_file.rdbuf(); + return buffer.str(); + } catch (const ifstream::failure &e) { + dbgDebug(D_ORCHESTRATOR) << "Error while reading file " << path << ", " << e.what(); + return genError("Error while reading file " + path + ", " + e.what()); + } +} + +bool +OrchestrationTools::Impl::removeFile(const string &path) const +{ + if (remove(path.c_str()) != 0) { + dbgDebug(D_ORCHESTRATOR) << "Error deleting file. File: " << path; + return false; + } else { + dbgDebug(D_ORCHESTRATOR) << "Successfully deleted the file " << path; + } + return true; +} + +Maybe +OrchestrationTools::Impl::calculateChecksum(Package::ChecksumTypes checksum_type, const string &path) const +{ + if (!doesFileExist(path)) { + dbgDebug(D_ORCHESTRATOR) << "Cannot read file, file does not exist. File: " << path; + return genError("File " + path + " does not exist."); + } + try { + ifstream file(path); + if (!file) { + return genError("Cannot open file. File: " + path); + } + + switch (checksum_type) { + case Package::ChecksumTypes::MD5: + return calculateFileMd5(file); + case Package::ChecksumTypes::SHA256: + return calculateSHA256Sum(file); + case Package::ChecksumTypes::SHA1: + return calculateSHA1Sum(file); + case Package::ChecksumTypes::SHA512: + return calculateSHA512Sum(file); + } + } catch (const ifstream::failure &e) { + dbgDebug(D_ORCHESTRATOR) << "Error while reading file " << path << ", " << e.what(); + return genError("Error while reading file " + path + ", " + e.what()); + } + + dbgAssert(false) << "Checksum type is not supported. Checksum type: " << static_cast(checksum_type); + return genError("Unsupported checksum type"); +} + +bool +OrchestrationTools::Impl::copyFile(const string &src_path, const string &dst_path) const +{ + if (!doesFileExist(src_path)) { + dbgDebug(D_ORCHESTRATOR) << "Failed to copy file. File does not exist: " << src_path; + return false; + } + + if (src_path.compare(dst_path) == 0) { + dbgDebug(D_ORCHESTRATOR) << "Source path is equal to the destination path. Path: " << src_path; + return true; + } + + if (dst_path.find('/') != string::npos) { + string dir_path = dst_path.substr(0, dst_path.find_last_of('/')); + if (!createDirectory(dir_path)) { + dbgDebug(D_ORCHESTRATOR) << "Failed to copy file. Directory creation failed: " << dir_path; + return false; + } + } + + try { + ifstream src(src_path, ios::binary); + ofstream dest(dst_path, ios::binary); + dest << src.rdbuf(); + return true; + } catch (const ios_base::failure &e) { + dbgDebug(D_ORCHESTRATOR) << "Failed to copy file " << src_path << " to " << dst_path << ", " << e.what(); + } + return false; +} + +Maybe> +OrchestrationTools::Impl::jsonObjectSplitter(const string &json, const string &tenant_id) const +{ + Document document; + map parsed; + + document.Parse(json.c_str()); + if (document.HasParseError()) return genError("JSON file is not valid."); + + for (Value::MemberIterator itr = document.MemberBegin(); itr != document.MemberEnd(); ++itr) { + + if (!tenant_id.empty() && itr->value.IsObject()) { + + itr->value.AddMember( + Value("tenantID"), + Value(tenant_id.c_str(), tenant_id.size()), + document.GetAllocator() + ); + } + + rapidjson::StringBuffer buffer; + rapidjson::Writer writer(buffer); + itr->value.Accept(writer); + parsed.insert({itr->name.GetString(), buffer.GetString()}); + } + return parsed; +} + +bool +OrchestrationTools::Impl::packagesToJsonFile(const map &packages, const string &path) const +{ + try { + ofstream os(path); + cereal::JSONOutputArchive archive_out(os); + vector packges_vector; + for (auto p: packages) { + packges_vector.push_back(p.second); + } + archive_out(cereal::make_nvp("packages", packges_vector)); + } catch (cereal::Exception &e) { + dbgDebug(D_ORCHESTRATOR) << "Failed to write vector of packages to JSON file " << path << ", " << e.what(); + return false; + } + return true; +} + +Maybe> +OrchestrationTools::Impl::loadPackagesFromJson(const string &path) const +{ + dbgDebug(D_ORCHESTRATOR) << "Parsing packages from " << path; + try { + ifstream is(path); + cereal::JSONInputArchive archive_in(is); + vector packages_vector; + archive_in(packages_vector); + map packages; + for (auto p: packages_vector) { + packages[p.getName()] = p; + } + return packages; + } catch (const exception &e) { + dbgDebug(D_ORCHESTRATOR) << "Failed to load vector of packages from JSON file " << path << ", " << e.what(); + return genError(e.what()); + } +} + +bool +OrchestrationTools::Impl::createDirectory(const string &directory_path) const +{ + string dir; + struct stat info; + for (size_t i = 0; i < directory_path.size(); i++) { + dir.push_back(directory_path[i]); + if (directory_path[i] == '/' || i + 1 == directory_path.size()) { + if (stat(dir.c_str(), &info) != 0) { + if(mkdir(dir.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH)) { + dbgDebug(D_ORCHESTRATOR) << "Failed to create directory " << directory_path; + return false; + } + } + } + } + return true; +} + +bool +OrchestrationTools::Impl::executeCmd(const string &cmd) const +{ + int ret = system(cmd.c_str()); + if (ret != 0) { + dbgDebug(D_ORCHESTRATOR) << "System command failed, " + cmd; + return false; + } + return true; +} + +string +OrchestrationTools::Impl::calculateFileMd5(ifstream &file) const +{ + MD5_CTX md5_Context; + MD5_Init(&md5_Context); + + char read_buf[512]; + while (file) { + file.read(read_buf, 512); + auto size = file.gcount(); + if (!size) break; + MD5_Update(&md5_Context, read_buf, size); + } + + unsigned char digest[16]; + MD5_Final(digest, &md5_Context); + + stringstream out; + for (int i = 0; i < MD5_DIGEST_LENGTH; ++i) { + out << setfill('0') << setw(2) << hex << (unsigned int)digest[i]; + } + return out.str(); +} + +string +OrchestrationTools::Impl::calculateSHA256Sum(ifstream &file) const +{ + unsigned char hash[SHA256_DIGEST_LENGTH]; + SHA256_CTX sha256; + SHA256_Init(&sha256); + + char read_buf[512]; + while (file) { + file.read(read_buf, 512); + auto size = file.gcount(); + if (!size) break; + SHA256_Update(&sha256, read_buf, size); + } + + SHA256_Final(hash, &sha256); + stringstream string_stream; + for (int i = 0; i < SHA256_DIGEST_LENGTH; i++) { + string_stream << hex << setw(2) << setfill('0') << (int)hash[i]; + } + return string_stream.str(); +} + +string +OrchestrationTools::Impl::calculateSHA1Sum(ifstream &file) const +{ + unsigned char hash[SHA_DIGEST_LENGTH]; + SHA_CTX sha1; + SHA1_Init(&sha1); + + char read_buf[512]; + while (file) { + file.read(read_buf, 512); + auto size = file.gcount(); + if (!size) break; + SHA1_Update(&sha1, read_buf, size); + } + + SHA1_Final(hash, &sha1); + stringstream string_stream; + for (int i = 0; i < SHA_DIGEST_LENGTH; i++) { + string_stream << hex << setw(2) << setfill('0') << (int)hash[i]; + } + return string_stream.str(); +} + +string +OrchestrationTools::Impl::calculateSHA512Sum(ifstream &file) const +{ + unsigned char hash[SHA512_DIGEST_LENGTH]; + SHA512_CTX sha512; + SHA512_Init(&sha512); + + char read_buf[512]; + while (file) { + file.read(read_buf, 512); + auto size = file.gcount(); + if (!size) break; + SHA512_Update(&sha512, read_buf, size); + } + + SHA512_Final(hash, &sha512); + stringstream string_stream; + for (int i = 0; i < SHA512_DIGEST_LENGTH; i++) { + string_stream << hex << setw(2) << setfill('0') << (int)hash[i]; + } + return string_stream.str(); +} + +string +OrchestrationTools::Impl::base64Encode(const string &input) const +{ + string out; + int val = 0, val_base = -6; + for (unsigned char c : input) { + val = (val << 8) + c; + val_base += 8; + while (val_base >= 0) { + out.push_back(base64_base_str[(val >> val_base) & 0x3F]); + val_base -= 6; + } + } + // -6 indicates the number of bits to take from each character + // (6 bits is enough to present a range of 0 to 63) + if (val_base > -6) out.push_back(base64_base_str[((val << 8) >> (val_base + 8)) & 0x3F]); + while (out.size() % 4) out.push_back('='); + return out; +} + +string +OrchestrationTools::Impl::base64Decode(const string &input) const +{ + string out; + vector T(256, -1); + for (int i = 0; i < 64; i++) { + T[base64_base_str[i]] = i; + } + + int val = 0, valb = -8; + for (unsigned char c : input) { + if (T[c] == -1) break; + val = (val << 6) + T[c]; + valb += 6; + if (valb >= 0) { + out.push_back(char((val >> valb) & 0xFF)); + valb -= 8; + } + } + return out; +} + +OrchestrationTools::OrchestrationTools() : Component("OrchestrationTools"), pimpl(make_unique()) {} + +OrchestrationTools::~OrchestrationTools() {} + +SASAL_END diff --git a/components/security_apps/orchestration/orchestration_tools/orchestration_tools_ut/CMakeLists.txt b/components/security_apps/orchestration/orchestration_tools/orchestration_tools_ut/CMakeLists.txt new file mode 100755 index 0000000..f96fe39 --- /dev/null +++ b/components/security_apps/orchestration/orchestration_tools/orchestration_tools_ut/CMakeLists.txt @@ -0,0 +1,7 @@ +link_directories(${ng_module_osrc_openssl_path}/lib) + +add_unit_test( + orchestration_tools_ut + "orchestration_tools_ut.cc" + "orchestration_modules;orchestration_tools;singleton;-lcrypto;" +) diff --git a/components/security_apps/orchestration/orchestration_tools/orchestration_tools_ut/orchestration_tools_ut.cc b/components/security_apps/orchestration/orchestration_tools/orchestration_tools_ut/orchestration_tools_ut.cc new file mode 100755 index 0000000..6422b4e --- /dev/null +++ b/components/security_apps/orchestration/orchestration_tools/orchestration_tools_ut/orchestration_tools_ut.cc @@ -0,0 +1,263 @@ +#include "orchestration_tools.h" + +#include "cptest.h" + +using namespace std; +using namespace testing; + +class OrchestrationToolsTest : public Test +{ +public: + OrchestrationToolsTest() : manifest_file("manifest.json") + { + } + + void + cleanSpaces(string &str) + { + str.erase(remove(str.begin(), str.end(), ' '), str.end()); + } + + OrchestrationTools orchestration_tools; + I_OrchestrationTools *i_orchestration_tools = Singleton::Consume::from(orchestration_tools); + string manifest_file = "manifest.json"; + string manifest_text = "{" + " \"packages\": [" + " {" + " \"download-path\": \"https://a/install_orchestration.sh\"," + " \"relative-path\": \"/install_orchestration.sh\"," + " \"name\": \"l4_firewall\"," + " \"version\": \"b\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"206afe939eb53168d70fbb777afb4e814097c4dc\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }," + " {" + " \"name\": \"orchestration\"," + " \"download-path\": \"https://a/install_orchestration.sh\"," + " \"relative-path\": \"/install_orchestration.sh\"," + " \"version\": \"c\"," + " \"checksum-type\": \"md5sum\"," + " \"checksum\": \"04417eef36f93cec4ca7a435bdcd004508dbaa83\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; +}; + +TEST_F(OrchestrationToolsTest, doNothing) +{ +} + +TEST_F(OrchestrationToolsTest, writeReadTextToFile) +{ + EXPECT_TRUE(i_orchestration_tools->writeFile(manifest_text, manifest_file)); + EXPECT_TRUE(i_orchestration_tools->doesFileExist(manifest_file)); + EXPECT_TRUE(i_orchestration_tools->isNonEmptyFile(manifest_file)); + EXPECT_EQ(manifest_text, i_orchestration_tools->readFile(manifest_file).unpack()); + + EXPECT_FALSE(i_orchestration_tools->isNonEmptyFile("no_such_file")); +} + +TEST_F(OrchestrationToolsTest, loadPackagesFromJsonTest) +{ + EXPECT_TRUE(i_orchestration_tools->writeFile("blabla", "in_test.json")); + string file_name = "in_test.json"; + Maybe> packages = i_orchestration_tools->loadPackagesFromJson(file_name); + EXPECT_FALSE(packages.ok()); + + Maybe value = i_orchestration_tools->readFile(manifest_file); + packages = i_orchestration_tools->loadPackagesFromJson(manifest_file); + EXPECT_TRUE(packages.ok()); + EXPECT_EQ(2u, packages.unpack().size()); + EXPECT_TRUE(packages.unpack().find("orchestration") != packages.unpack().end()); + EXPECT_TRUE(packages.unpack().find("l4_firewall") != packages.unpack().end()); + EXPECT_TRUE(packages.unpack().find("Hello World") == packages.unpack().end()); +} + +TEST_F(OrchestrationToolsTest, copyFile) +{ + EXPECT_TRUE(i_orchestration_tools->writeFile("blabla", "in_test.json")); + EXPECT_TRUE(i_orchestration_tools->copyFile("in_test.json", "cpy_test.json")); + EXPECT_EQ("blabla", i_orchestration_tools->readFile("cpy_test.json").unpack()); + EXPECT_FALSE(i_orchestration_tools->copyFile("NOT_EXISTS_FILE", "cpy2_test.json")); + auto read_unexists_file = i_orchestration_tools->readFile("cpy2_test.json"); + EXPECT_FALSE(read_unexists_file.ok()); + EXPECT_THAT(read_unexists_file, IsError("File cpy2_test.json does not exist.")); +} + +TEST_F(OrchestrationToolsTest, checksumTest) +{ + EXPECT_EQ("df5ea29924d39c3be8785734f13169c6", + i_orchestration_tools->calculateChecksum(Package::ChecksumTypes::MD5, "in_test.json").unpack()); + EXPECT_EQ("ccadd99b16cd3d200c22d6db45d8b6630ef3d936767127347ec8a76ab992c2ea", + i_orchestration_tools->calculateChecksum(Package::ChecksumTypes::SHA256, "in_test.json").unpack()); + EXPECT_EQ("bb21158c733229347bd4e681891e213d94c685be", + i_orchestration_tools->calculateChecksum(Package::ChecksumTypes::SHA1, "in_test.json").unpack()); + EXPECT_EQ("d1c2e12cfeababc8b95daf6902e210b170992e68fd1c1f19565a40cf0099c6e2cb559" + "b85d7c14ea05b4dca0a790656d003ccade9286827cffdf8e664fd271499", + i_orchestration_tools->calculateChecksum(Package::ChecksumTypes::SHA512, "in_test.json").unpack()); + EXPECT_NE( + "12342", + i_orchestration_tools->calculateChecksum(Package::ChecksumTypes::SHA256, "in_test.json").unpack() + ); +} + +TEST_F(OrchestrationToolsTest, removeTestFiles) +{ + EXPECT_TRUE(i_orchestration_tools->doesFileExist(manifest_file)); + EXPECT_TRUE(i_orchestration_tools->removeFile(manifest_file)); + EXPECT_FALSE(i_orchestration_tools->doesFileExist(manifest_file)); + + EXPECT_TRUE(i_orchestration_tools->doesFileExist(string("in_test.json"))); + EXPECT_TRUE(i_orchestration_tools->removeFile(string("in_test.json"))); + EXPECT_FALSE(i_orchestration_tools->doesFileExist(string("in_test.json"))); + + EXPECT_TRUE(i_orchestration_tools->doesFileExist(string("cpy_test.json"))); + EXPECT_TRUE(i_orchestration_tools->removeFile(string("cpy_test.json"))); + EXPECT_FALSE(i_orchestration_tools->doesFileExist(string("cpy_test.json"))); + + EXPECT_FALSE(i_orchestration_tools->removeFile(string("test.json"))); +} + +TEST_F(OrchestrationToolsTest, jsonObjectSplitter) +{ + string update_text = "{" + " \"manifest\":" + " {" + " \"checksaum\":\"12e307c8f0aab4f51a160d5fb2396de1ca9da5b9\"," + " \"download-options\": [" + " \"http://172.23.92.135/manifest_file.txt\"" + " ]" + " }," + " \"policy\":" + " {" + " \"checksum\":\"82e307c8f0aab4f51a160d5fb2396de1ca9da5b9\"," + " \"download-opations\": [" + " \"http://172.23.92.135/policy_file.txt\"," + " \"ftp://172.23.92.135/policy_file.txt\"" + " ]" + " }," + " \"version\": \"10\"" + "}"; + + string manifest = "{" + " \"checksaum\":\"12e307c8f0aab4f51a160d5fb2396de1ca9da5b9\"," + " \"download-options\": [" + " \"http://172.23.92.135/manifest_file.txt\"" + " ]" + " }"; + + string policy = "{" + " \"checksum\":\"82e307c8f0aab4f51a160d5fb2396de1ca9da5b9\"," + " \"download-opations\": [" + " \"http://172.23.92.135/policy_file.txt\"," + " \"ftp://172.23.92.135/policy_file.txt\"" + " ]" + " }"; + + Maybe> parsed = i_orchestration_tools->jsonObjectSplitter(update_text, ""); + EXPECT_TRUE(parsed.ok()); + cleanSpaces(manifest); + EXPECT_EQ(manifest, parsed.unpack().find("manifest")->second); + cleanSpaces(policy); + EXPECT_EQ(policy, parsed.unpack().find("policy")->second); + string policy_value = parsed.unpack().find("policy")->second; + EXPECT_TRUE(policy_value.find("82e307c8f0aab4f51a160d5fb2396de1ca9da5b9") != string::npos); + + string invalid_json = "{" + " \"manifest\":" + " {" + " \"checksaum\":\"12e307c8f0aab4f51a160d5fb2396de1ca9da5b9\"," + " \"download-options\": [" + " \"http://172.23.92.135/manifest_file.txt\"" + " ]"; + parsed = i_orchestration_tools->jsonObjectSplitter(invalid_json, ""); + EXPECT_FALSE(parsed.ok()); +} + +TEST_F(OrchestrationToolsTest, jsonFileToPackages) +{ + stringstream string_stream; + string_stream << "{" + " \"packages\": [" + " {" + " \"download-path\": \"https://a/install_orchestration.sh\"," + " \"relative-path\": \"/install_orchestration.sh\"," + " \"name\": \"nano-agent\"," + " \"version\": \"24452\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + i_orchestration_tools->writeFile(string_stream.str(), "packages_tmp.json"); + Maybe> packages = i_orchestration_tools->loadPackagesFromJson("packages_tmp.json"); + EXPECT_TRUE(packages.ok()); + EXPECT_TRUE(packages.unpack().find("nano-agent") != packages.unpack().end()); +} + +TEST_F(OrchestrationToolsTest, packagesToJsonFile) +{ + stringstream string_stream; + string_stream << "{" + " \"packages\": [" + " {" + " \"download-path\": \"https://a/install_orchestration.sh\"," + " \"relative-path\": \"/install_orchestration.sh\"," + " \"name\": \"my\"," + " \"version\": \"c\"," + " \"checksum-type\": \"sha1sum\"," + " \"checksum\": \"a58bbab8020b0e6d08568714b5e582a3adf9c805\"," + " \"package-type\": \"service\"," + " \"require\": []" + " }" + " ]" + "}"; + i_orchestration_tools->writeFile(string_stream.str(), "packages.json"); + Maybe> packages = i_orchestration_tools->loadPackagesFromJson("packages.json"); + EXPECT_TRUE(packages.ok()); + EXPECT_TRUE(i_orchestration_tools->packagesToJsonFile(packages.unpack(), "packages.json")); + auto file_content = i_orchestration_tools->readFile("packages.json").unpack(); + EXPECT_TRUE(file_content.find("a58bbab8020b0e6d08568714b5e582a3adf9c805") != string::npos); +} + +TEST_F(OrchestrationToolsTest, executeCommand) +{ + EXPECT_TRUE(i_orchestration_tools->executeCmd("exit 0")); + EXPECT_FALSE(i_orchestration_tools->executeCmd("exit 1")); +} + +TEST_F(OrchestrationToolsTest, createDirectory) +{ + string path = "/tmp/temp_dir"; + EXPECT_TRUE(i_orchestration_tools->createDirectory(path)); + EXPECT_TRUE(i_orchestration_tools->doesDirectoryExist(path)); + // get True after the directory already exists + EXPECT_TRUE(i_orchestration_tools->createDirectory(path)); +} + +TEST_F(OrchestrationToolsTest, base64DecodeEncode) +{ + string clear_text = "{\n" + " \"token\": \"77f380c5-9397-4e53-bb78-7c9df8f80a03\",\n" + " \"expired\": false\n" + "}"; + string base64_text = "ewogICAidG9rZW4iOiAiNzdmMzgwYzUtOTM5Ny00ZTUzLWJiNzgtN2M5Z"\ + "GY4ZjgwYTAzIiwKICAgImV4cGlyZWQiOiBmYWxzZQp9"; + EXPECT_EQ(clear_text, i_orchestration_tools->base64Decode(base64_text)); + EXPECT_EQ(base64_text, i_orchestration_tools->base64Encode(clear_text)); + + string test_str = ""; + EXPECT_EQ(test_str, i_orchestration_tools->base64Decode(i_orchestration_tools->base64Encode(test_str))); + test_str = "TEStsr fassaf saf"; + EXPECT_EQ(test_str, i_orchestration_tools->base64Decode(i_orchestration_tools->base64Encode(test_str))); + test_str = "T24122142sfsavs!@!%"; + EXPECT_EQ(test_str, i_orchestration_tools->base64Decode(i_orchestration_tools->base64Encode(test_str))); + test_str = "\nsdlsakdsad\nsdaslds"; + EXPECT_EQ(test_str, i_orchestration_tools->base64Decode(i_orchestration_tools->base64Encode(test_str))); +} diff --git a/components/security_apps/orchestration/orchestration_ut/CMakeLists.txt b/components/security_apps/orchestration/orchestration_ut/CMakeLists.txt new file mode 100755 index 0000000..07d9ae1 --- /dev/null +++ b/components/security_apps/orchestration/orchestration_ut/CMakeLists.txt @@ -0,0 +1,15 @@ +link_directories(${ng_module_osrc_openssl_path}/lib) +link_directories(${ng_module_osrc_curl_path}/lib) +link_directories(${BOOST_ROOT}/lib) + +add_unit_test( + orchestration_ut + "orchestration_ut.cc" + "orchestration;rest;manifest_controller;service_controller;orchestration_downloader;agent_details;package_handler;orchestration_modules;orchestration_tools;environment;config;logging;version;shell_cmd;message;update_communication;agent_details_reporter;connkey;encryptor;metric;ip_utilities;event_is;-lcrypto;-lboost_filesystem;-lboost_regex;-lssl" +) + +add_unit_test( + orchestration_multitenant_ut + "orchestration_multitenant_ut.cc" + "orchestration;rest;manifest_controller;service_controller;orchestration_downloader;agent_details;package_handler;orchestration_modules;orchestration_tools;environment;config;logging;version;shell_cmd;message;update_communication;agent_details_reporter;connkey;encryptor;metric;ip_utilities;event_is;-lcrypto;-lboost_filesystem;-lboost_regex;-lssl;curl" +) diff --git a/components/security_apps/orchestration/orchestration_ut/orchestration_multitenant_ut.cc b/components/security_apps/orchestration/orchestration_ut/orchestration_multitenant_ut.cc new file mode 100644 index 0000000..8b3ebf5 --- /dev/null +++ b/components/security_apps/orchestration/orchestration_ut/orchestration_multitenant_ut.cc @@ -0,0 +1,445 @@ +#include "orchestration_comp.h" + +#include "cptest.h" +#include "mock/mock_encryptor.h" +#include "mock/mock_orchestration_tools.h" +#include "mock/mock_downloader.h" +#include "mock/mock_manifest_controller.h" +#include "mock/mock_service_controller.h" +#include "mock/mock_orchestration_status.h" +#include "mock/mock_update_communication.h" +#include "mock/mock_details_resolver.h" +#include "mock/mock_agent_details_reporter.h" +#include "mock/mock_logging.h" +#include "mock/mock_shell_cmd.h" +#include "mock/mock_mainloop.h" +#include "mock/mock_messaging.h" +#include "mock/mock_time_get.h" +#include "mock/mock_rest_api.h" +#include "mock/mock_tenant_manager.h" +#include "mock/mock_messaging_downloader.h" +#include "config.h" +#include "config_component.h" +#include "agent_details.h" + +using namespace testing; +using namespace std; + +class OrchestrationMultitenancyTest : public Test +{ +public: + OrchestrationMultitenancyTest() : config(Singleton::Consume::from(config_comp)) + { + EXPECT_CALL( + rest, + mockRestCall(RestAction::SET, "new-configuration", _) + ).WillOnce(WithArg<2>(Invoke(this, &OrchestrationMultitenancyTest::setNewConfiguration))); + + EXPECT_CALL(tenant_manager, getTimeoutVal()).WillOnce(Return(chrono::microseconds(0))); + EXPECT_CALL( + mock_ml, + addRecurringRoutine(I_MainLoop::RoutineType::System, _, _, _, _) + ).WillRepeatedly(Return(0)); + EXPECT_CALL( + mock_ml, + addOneTimeRoutine(I_MainLoop::RoutineType::System, _, "Configuration update registration", false) + ).WillOnce(Return(0)); + + config_comp.preload(); + config_comp.init(); + } + + void + init() + { + EXPECT_CALL(mock_service_controller, isServiceInstalled("Access Control")).WillRepeatedly(Return(false)); + + // This Holding the Main Routine of the Orchestration. + EXPECT_CALL( + mock_ml, + addOneTimeRoutine(I_MainLoop::RoutineType::RealTime, _, "Orchestration runner", true) + ).WillOnce(DoAll(SaveArg<1>(&routine), Return(1))); + + EXPECT_CALL(mock_shell_cmd, getExecOutput("openssl version -d | cut -d\" \" -f2 | cut -d\"\\\"\" -f2", _, _)) + .WillOnce(Return(string("OpenSSL certificates Directory"))); + + EXPECT_CALL(rest, mockRestCall(RestAction::SHOW, "orchestration-status", _)).WillOnce( + WithArg<2>(Invoke(this, &OrchestrationMultitenancyTest::setRestStatus))); + + doEncrypt(); + orchestration_comp.init(); + } + + bool + restHandler(const unique_ptr &rest_ptr) + { + rest_handler = rest_ptr->getRest(); + return true; + } + + void + doEncrypt() + { + Maybe err = genError("No file exist"); + EXPECT_CALL(mock_orchestration_tools, readFile("/etc/cp/conf/user-cred.json")).WillOnce(Return(err)); + + EXPECT_CALL(mock_orchestration_tools, writeFile("This is fake", "/etc/cp/data/data1.a")).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, writeFile("0000 is fake", "/etc/cp/data/data4.a")).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, writeFile("This is 3333", "/etc/cp/data/data6.a")).WillOnce(Return(true)); + } + + void + expectDetailsResolver() + { + Maybe> no_nginx(genError("No nginx")); + EXPECT_CALL(mock_details_resolver, getPlatform()).WillOnce(Return(string("linux"))); + EXPECT_CALL(mock_details_resolver, getArch()).WillOnce(Return(string("x86_64"))); + EXPECT_CALL(mock_details_resolver, isReverseProxy()).WillOnce(Return(false)); + EXPECT_CALL(mock_details_resolver, isKernelVersion3OrHigher()).WillOnce(Return(false)); + EXPECT_CALL(mock_details_resolver, isGwNotVsx()).WillOnce(Return(false)); + EXPECT_CALL(mock_details_resolver, isVersionEqualOrAboveR8110()).WillOnce(Return(false)); + EXPECT_CALL(mock_details_resolver, parseNginxMetadata()).WillOnce(Return(no_nginx)); + EXPECT_CALL(mock_details_resolver, getAgentVersion()) + .WillOnce(Return("1.1.1")) + .WillOnce(Return("1.1.1")); + + map resolved_mgmt_details({{"kernel_version", "4.4.0-87-generic"}}); + EXPECT_CALL(mock_details_resolver, getResolvedDetails()).WillOnce(Return(resolved_mgmt_details)); + } + + void + runRoutine() + { + routine(); + } + + void + preload() + { + orchestration_comp.preload(); + } + + void + waitForRestCall() + { + EXPECT_CALL(rest, mockRestCall(RestAction::SHOW, "orchestration-status", _)).WillRepeatedly(Return(true)); + } + + void + performSetNewConfiguration(const string &file_path) + { + stringstream rest_call_parameters; + rest_call_parameters + << "{\"configuration_file_paths\": [" + << (file_path == "" ? file_path : (string("\"") + file_path + string("\""))) + << "] }"; + set_new_configuration->performRestCall(rest_call_parameters); + } + + bool + declareVariable(const unique_ptr &p) + { + set_new_configuration = p->getRest(); + return true; + } + + ::Environment env; + OrchestrationComp orchestration_comp; + AgentDetails agent_details; + ConfigComponent config_comp; + Config::I_Config *config; + + unique_ptr set_new_configuration; + unique_ptr rest_status; + unique_ptr rest_handler; + unique_ptr declare_variable; + + StrictMock mock_ml; + StrictMock mock_encryptor; + StrictMock mock_orchestration_tools; + StrictMock mock_downloader; + StrictMock mock_shell_cmd; + StrictMock mock_message; + StrictMock rest; + StrictMock mock_service_controller; + StrictMock mock_manifest_controller; + StrictMock mock_update_communication; + StrictMock mock_messaging_downloader; + StrictMock tenant_manager; + + NiceMock mock_status; + NiceMock mock_time_get; + NiceMock mock_details_resolver; + NiceMock mock_agent_reporter; + NiceMock mock_log; + + +private: + bool + setNewConfiguration(const unique_ptr &p) + { + set_new_configuration = p->getRest(); + return true; + } + + bool setRestStatus(const unique_ptr &p) + { + rest_status = p->getRest(); + return true; + } + + I_MainLoop::Routine routine; + I_MainLoop::Routine status_routine; +}; + +TEST_F(OrchestrationMultitenancyTest, init) +{ +} + +TEST_F(OrchestrationMultitenancyTest, handle_virtual_resource) +{ + string orchestration_policy_file_path = "/etc/cp/conf/orchestration/orchestration.policy"; + string manifest_file_path = "/etc/cp/conf/manifest.json"; + string setting_file_path = "/etc/cp/conf/settings.json"; + string policy_file_path = "/etc/cp/conf/policy.json"; + string data_file_path = "/etc/cp/conf/data.json"; + + string host_address = "1.2.3.5"; + string manifest_checksum= "manifest"; + string policy_checksum= "policy"; + string settings_checksum= "settings"; + string data_checksum= "data"; + + string first_policy_version = ""; + string host_url = "https://" + host_address + "/"; + + EXPECT_CALL( + rest, + mockRestCall(RestAction::ADD, "proxy", _) + ).WillOnce(WithArg<2>(Invoke(this, &OrchestrationMultitenancyTest::restHandler))); + waitForRestCall(); + init(); + expectDetailsResolver(); + + Maybe response( + string( + "{\n" + " \"fog-address\": \"" + host_url + "\",\n" + " \"agent-type\": \"test\",\n" + " \"pulling-interval\": 25,\n" + " \"error-pulling-interval\": 15\n" + "}" + ) + ); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path)).WillOnce(Return(response)); + EXPECT_CALL(mock_message, setActiveFog(host_address, 443, true, MessageTypeTag::GENERIC)).WillOnce(Return(true)); + EXPECT_CALL(mock_update_communication, setAddressExtenesion("")); + EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe())); + EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, manifest_file_path)) + .WillOnce(Return(manifest_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, setting_file_path)) + .WillOnce(Return(settings_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, policy_file_path)) + .WillOnce(Return(policy_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, data_file_path)) + .WillOnce(Return(data_checksum)); + + EXPECT_CALL(mock_service_controller, getPolicyVersion()) + .Times(2).WillRepeatedly(ReturnRef(first_policy_version)); + + vector active_tenants = { "1236", "1235" }; + EXPECT_CALL(tenant_manager, fetchActiveTenants()).WillOnce(Return(active_tenants)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(_, "/etc/cp/conf/tenant_1236/policy.json")) + .WillOnce(Return(string("checksum_policy_tenant_1236"))); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(_, "/etc/cp/conf/tenant_1235/policy.json")) + .WillOnce(Return(string("checksum_policy_tenant_1235"))); + + EXPECT_CALL(mock_orchestration_tools, readFile("/etc/cp/conf/tenant_1236/policy.json")) + .WillOnce(Return(string("{}"))); + + EXPECT_CALL(mock_orchestration_tools, readFile("/etc/cp/conf/tenant_1235/policy.json")) + .WillOnce(Return(string("{}"))); + + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(_, "/etc/cp/conf/tenant_1236_settings.json")) + .WillOnce(Return(string("checksum_settings_tenant_1236"))); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(_, "/etc/cp/conf/tenant_1235_settings.json")) + .WillOnce(Return(string("checksum_settings_tenant_1235"))); + + EXPECT_CALL(mock_update_communication, getUpdate(_)).WillOnce( + Invoke( + [&](CheckUpdateRequest &req) + { + EXPECT_THAT(req.getPolicy(), IsValue(policy_checksum)); + EXPECT_THAT(req.getSettings(), IsValue(settings_checksum)); + EXPECT_THAT(req.getManifest(), IsValue(manifest_checksum)); + EXPECT_THAT(req.getData(), IsValue(data_checksum)); + + string update_response = + "{\n" + " \"manifest\": \"\",\n" + " \"policy\": \"\",\n" + " \"settings\": \"\",\n" + " \"data\": \"\",\n" + " \"virtualPolicy\": {\n" + " \"tenants\": [\n" + " {\n" + " \"tenantId\": \"1236\",\n" + " \"checksum\": \"new_checksum_policy_tenant_1236\",\n" + " \"version\": \"1\"\n" + " },\n" + " {\n" + " \"tenantId\": \"1235\",\n" + " \"checksum\": \"new_checksum_policy_tenant_1235\",\n" + " \"version\": \"1\"\n" + " }\n" + " ]\n" + " },\n" + " \"virtualSettings\": {\n" + " \"tenants\": [\n" + " {\n" + " \"tenantId\": \"1236\",\n" + " \"checksum\": \"new_checksum_settings_tenant_1236\",\n" + " \"version\": \"1\"\n" + " },\n" + " {\n" + " \"tenantId\": \"1235\",\n" + " \"checksum\": \"new_checksum_settings_tenant_1235\",\n" + " \"version\": \"1\"\n" + " }\n" + " ]\n" + " }\n" + "}"; + + EXPECT_TRUE(req.loadJson(update_response)); + + return Maybe(); + } + ) + ); + + GetResourceFile policy_file(GetResourceFile::ResourceFileType::VIRTUAL_POLICY); + policy_file.addTenant("1236", "1", "new_checksum_policy_tenant_1236"); + policy_file.addTenant("1235", "1", "new_checksum_policy_tenant_1235"); + + map download_policy_res = { + { "1236", "/tmp/orchestration_downloads/virtualPolicy_1236.download" }, + { "1235", "/tmp/orchestration_downloads/virtualPolicy_1235.download" } + }; + + GetResourceFile settings_file(GetResourceFile::ResourceFileType::VIRTUAL_SETTINGS); + settings_file.addTenant("1236", "1", "new_checksum_settings_tenant_1236"); + settings_file.addTenant("1235", "1", "new_checksum_settings_tenant_1235"); + + map download_settings_res = { + { "1236", "/tmp/orchestration_downloads/virtualSettings_1236.download" }, + { "1235", "/tmp/orchestration_downloads/virtualSettings_1235.download" } + }; + + EXPECT_CALL( + mock_downloader, + downloadVirtualFileFromFog(_, Package::ChecksumTypes::SHA256) + ).WillOnce( + WithArg<0>( + Invoke( + [&] (const GetResourceFile &resourse_file) + { + EXPECT_EQ(resourse_file, policy_file); + return download_policy_res; + } + ) + ) + ).WillOnce( + WithArg<0>( + Invoke( + [&] (const GetResourceFile &resourse_file) + { + EXPECT_EQ(resourse_file, settings_file); + return download_settings_res; + } + ) + ) + ); + + EXPECT_CALL( + mock_orchestration_tools, + copyFile( + "/tmp/orchestration_downloads/virtualSettings_1236.download", + "/etc/cp/conf/tenant_1236_settings.json" + ) + ).WillOnce(Return(true)); + + EXPECT_CALL( + mock_orchestration_tools, + copyFile( + "/tmp/orchestration_downloads/virtualSettings_1235.download", + "/etc/cp/conf/tenant_1235_settings.json" + ) + ).WillOnce(Return(true)); + + vector expected_data_types = {}; + EXPECT_CALL( + mock_service_controller, + updateServiceConfiguration( + "/etc/cp/conf/policy.json", + "/etc/cp/conf/settings.json", + expected_data_types, + "" + ) + ).WillOnce(Return(true)); + + EXPECT_CALL( + mock_service_controller, + updateServiceConfiguration( + "/tmp/orchestration_downloads/virtualPolicy_1236.download", + "/etc/cp/conf/tenant_1236_settings.json", + expected_data_types, + "1236" + ) + ).WillOnce(Return(true)); + + EXPECT_CALL( + mock_service_controller, + updateServiceConfiguration( + "/tmp/orchestration_downloads/virtualPolicy_1235.download", + "/etc/cp/conf/tenant_1235_settings.json", + expected_data_types, + "1235" + ) + ).WillOnce(Return(true)); + + EXPECT_CALL(mock_ml, yield(A())) + .WillOnce( + Invoke( + [] (chrono::microseconds microseconds) + { + EXPECT_EQ(1000000, microseconds.count()); + } + ) + ) + .WillOnce( + Invoke( + [] (chrono::microseconds microseconds) + { + EXPECT_EQ(25000000, microseconds.count()); + throw invalid_argument("stop while loop"); + } + ) + ); + EXPECT_CALL( + mock_shell_cmd, + getExecOutput(_, _, _) + ).WillRepeatedly(Return(string("daniel\n1\n"))); + try { + runRoutine(); + } catch (const invalid_argument& e) {} +} diff --git a/components/security_apps/orchestration/orchestration_ut/orchestration_ut.cc b/components/security_apps/orchestration/orchestration_ut/orchestration_ut.cc new file mode 100755 index 0000000..0d9a781 --- /dev/null +++ b/components/security_apps/orchestration/orchestration_ut/orchestration_ut.cc @@ -0,0 +1,1705 @@ +#include "orchestration_comp.h" + +#include "cptest.h" +#include "mock/mock_encryptor.h" +#include "mock/mock_orchestration_tools.h" +#include "mock/mock_downloader.h" +#include "mock/mock_manifest_controller.h" +#include "mock/mock_service_controller.h" +#include "mock/mock_orchestration_status.h" +#include "mock/mock_update_communication.h" +#include "mock/mock_details_resolver.h" +#include "mock/mock_agent_details_reporter.h" +#include "mock/mock_logging.h" +#include "mock/mock_shell_cmd.h" +#include "mock/mock_mainloop.h" +#include "mock/mock_messaging.h" +#include "mock/mock_time_get.h" +#include "mock/mock_rest_api.h" +#include "mock/mock_messaging_downloader.h" +#include "mock/mock_tenant_manager.h" +#include "config.h" +#include "config_component.h" +#include "agent_details.h" +#include "customized_cereal_map.h" +#include "health_check_status/health_check_status.h" + +using namespace testing; +using namespace std; + +class OrchestrationTest : public testing::TestWithParam +{ +public: + OrchestrationTest() + { + EXPECT_CALL(rest, mockRestCall(RestAction::SET, "new-configuration", _)) + .WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::setNewConfiguration)) + ); + + EXPECT_CALL(mock_ml, addOneTimeRoutine(I_MainLoop::RoutineType::System, _, _, false)).WillOnce(Return(0)); + EXPECT_CALL( + mock_ml, + addRecurringRoutine(I_MainLoop::RoutineType::System, _, _, _, _) + ).WillRepeatedly(Return(0)); + + config_comp.preload(); + config_comp.init(); + } + + void + init() + { + // This Holding the Main Routine of the Orchestration. + EXPECT_CALL( + mock_ml, + addOneTimeRoutine(I_MainLoop::RoutineType::RealTime, _, "Orchestration runner", true) + ).WillOnce(DoAll(SaveArg<1>(&routine), Return(1))); + + EXPECT_CALL( + mock_shell_cmd, + getExecOutput("openssl version -d | cut -d\" \" -f2 | cut -d\"\\\"\" -f2", _, _) + ).WillOnce(Return(string("OpenSSL certificates Directory"))); + + EXPECT_CALL(mock_service_controller, isServiceInstalled("Access Control")).WillRepeatedly( + InvokeWithoutArgs( + []() + { + static int count = 0; + if (count > 0) return false; + count++; + return true; + } + ) + ); + + map empty_service_to_port_map; + EXPECT_CALL(mock_service_controller, getServiceToPortMap()).WillRepeatedly(Return(empty_service_to_port_map)); + + EXPECT_CALL(rest, mockRestCall(RestAction::SHOW, "orchestration-status", _)).WillOnce( + WithArg<2>(Invoke(this, &OrchestrationTest::setRestStatus)) + ); + + string message_body; + EXPECT_CALL(mock_message, mockSendPersistentMessage( + false, + _, + I_Messaging::Method::POST, + "/api/v1/agents/events", + _, + _, + MessageTypeTag::REPORT + )).WillRepeatedly(DoAll(SaveArg<1>(&message_body), Return(Maybe(string(""))))); + + doEncrypt(); + orchestration_comp.init(); + } + + bool + restHandler(const unique_ptr &rest_ptr) + { + rest_handler = rest_ptr->getRest(); + return true; + } + + void + doEncrypt() + { + Maybe err = genError("No file exist"); + EXPECT_CALL(mock_orchestration_tools, readFile("/etc/cp/conf/user-cred.json")).WillOnce(Return(err)); + + EXPECT_CALL(mock_orchestration_tools, writeFile("This is fake", "/etc/cp/data/data1.a")).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, writeFile("0000 is fake", "/etc/cp/data/data4.a")).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, writeFile("This is 3333", "/etc/cp/data/data6.a")).WillOnce(Return(true)); + } + + void + expectDetailsResolver() + { + Maybe> no_nginx(genError("No nginx")); + EXPECT_CALL(mock_details_resolver, getPlatform()).WillOnce(Return(string("linux"))); + EXPECT_CALL(mock_details_resolver, getArch()).WillOnce(Return(string("x86_64"))); + EXPECT_CALL(mock_details_resolver, isReverseProxy()).WillOnce(Return(false)); + EXPECT_CALL(mock_details_resolver, isKernelVersion3OrHigher()).WillOnce(Return(false)); + EXPECT_CALL(mock_details_resolver, isGwNotVsx()).WillOnce(Return(false)); + EXPECT_CALL(mock_details_resolver, isVersionEqualOrAboveR8110()).WillOnce(Return(false)); + EXPECT_CALL(mock_details_resolver, parseNginxMetadata()).WillOnce(Return(no_nginx)); + EXPECT_CALL(mock_details_resolver, getAgentVersion()) + .WillOnce(Return("1.1.1")) + .WillOnce(Return("1.1.1")); + + map resolved_mgmt_details({{"kernel_version", "4.4.0-87-generic"}}); + EXPECT_CALL(mock_details_resolver, getResolvedDetails()).WillOnce(Return(resolved_mgmt_details)); + } + + string + buildOrchestrationStatusJSON( + const string &last_update_attempt = "None", + const string &last_update_status = "None", + const string &last_update = "None", + const string &last_manifest_update = "None", + const string &policy_version = "", + const string &last_policy_update = "None", + const string &last_settings_update = "None", + const string &update_mode = "None", + const string &fog_address = "None", + const string ®istration_status = "None", + const string &manifest_status = "None", + const string ®istration_details_name = "", + const string ®istration_details_type = "", + const string ®istration_details_platform = "", + const string ®istration_details_architecture = "", + const string &agent_id = "None", + const string &profile_id = "None", + const string &tenant_id = "None", + const string &service_policy = "", + const string &service_settings = "" + ) + { + string ans = "{\n" + " \"Last update attempt\": \"" + last_update_attempt + "\",\n" + " \"Last update status\": \"" + last_update_status + "\",\n" + " \"Last update\": \"" + last_update + "\",\n" + " \"Last manifest update\": \"" + last_manifest_update + "\",\n" + " \"Policy version\": \"" + policy_version + "\",\n" + " \"Last policy update\": \"" + last_policy_update + "\",\n" + " \"Last settings update\": \"" + last_settings_update + "\",\n" + " \"Upgrade mode\": \"" + update_mode + "\",\n" + " \"Fog address\": \"" + fog_address + "\",\n" + " \"Registration status\": \"" + registration_status + "\",\n" + " \"Registration details\": {\n" + " \"Name\": \"" + registration_details_name + "\",\n" + " \"Type\": \"" + registration_details_type + "\",\n" + " \"Platform\": \"" + registration_details_platform + "\",\n" + " \"Architecture\": \"" + registration_details_architecture + "\"\n" + " },\n" + " \"Agent ID\": \"" + agent_id + "\",\n" + " \"Profile ID\": \"" + profile_id + "\",\n" + " \"Tenant ID\": \"" + tenant_id + "\",\n" + " \"Manifest status\": \"" + manifest_status + "\",\n" + " \"Service policy\": {},\n" + " \"Service settings\": {}\n" + "}"; + + if (!service_policy.empty()) { + string empty_policy = " \"Service policy\": {},\n"; + ans.replace( + ans.find(empty_policy), + empty_policy.size(), + " \"Service policy\": {\n" + " " + service_policy + "\n" + " },\n" + ); + } + if (!service_settings.empty()) { + string empty_settings = " \"Service settings\": {}\n"; + ans.replace( + ans.find(empty_settings), + empty_settings.size(), + " \"Service settings\": {\n" + " " + service_settings + "\n" + " }\n" + ); + } + return ans; + } + + void + runRoutine() + { + routine(); + } + + void + runStatusRoutine() + { + status_routine(); + } + + void + preload() + { + env.preload(); + orchestration_comp.preload(); + } + + string + readFromFile(const string &path) + { + ifstream text_file(path); + stringstream buffer; + buffer << text_file.rdbuf(); + return buffer.str(); + } + + void + waitForRestCall() + { + EXPECT_CALL(rest, mockRestCall(RestAction::SHOW, "orchestration-status", _)).WillRepeatedly(Return(true)); + } + + bool + declareVariable(const unique_ptr &p) + { + set_new_configuration = p->getRest(); + return true; + } + + unique_ptr rest_handler; + unique_ptr declare_variable; + StrictMock mock_ml; + NiceMock mock_time_get; + ::Environment env; + string first_policy_version = ""; + string host_address = "1.2.3.5"; + string host_url = "https://" + host_address + "/"; + ConfigComponent config_comp; + StrictMock mock_encryptor; + NiceMock mock_log; + unique_ptr set_new_configuration; + unique_ptr rest_status; + StrictMock mock_orchestration_tools; + StrictMock mock_downloader; + StrictMock mock_messaging_downloader; + StrictMock mock_shell_cmd; + StrictMock mock_message; + StrictMock rest; + StrictMock mock_service_controller; + StrictMock mock_manifest_controller; + StrictMock mock_update_communication; + StrictMock mock_status; + StrictMock mock_details_resolver; + NiceMock mock_agent_reporter; + NiceMock tenant_manager; + OrchestrationComp orchestration_comp; + AgentDetails agent_details; + +private: + bool + setNewConfiguration(const unique_ptr &p) + { + set_new_configuration = p->getRest(); + return true; + } + + bool setRestStatus(const unique_ptr &p) + { + rest_status = p->getRest(); + return true; + } + + I_MainLoop::Routine routine; + I_MainLoop::Routine status_routine; +}; + +TEST_F(OrchestrationTest, doNothing) +{ +} + +TEST_F(OrchestrationTest, register_config) +{ + EXPECT_CALL(rest, mockRestCall(RestAction::ADD, "declare-boolean-variable", _)) + .WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::declareVariable))); + + preload(); + env.init(); + + string config_json = + "{\n" + " \"orchestration\": {\n" + " \"Backup file extension\": [\n" + " {\n" + " \"context\": \"All()\",\n" + " \"value\": \"1\"\n" + " }\n" + " ],\n" + " \"Service name\": [\n" + " {\n" + " \"context\": \"All()\",\n" + " \"value\": \"3\"\n" + " }\n" + " ],\n" + " \"Packages directory\": [\n" + " {\n" + " \"context\": \"All()\",\n" + " \"value\": \"5\"\n" + " }\n" + " ],\n" + " \"Manifest file path\": [\n" + " {\n" + " \"context\": \"All()\",\n" + " \"value\": \"7\"\n" + " }\n" + " ],\n" + " \"Settings file path\": [\n" + " {\n" + " \"context\": \"All()\",\n" + " \"value\": \"8\"\n" + " }\n" + " ],\n" + " \"Configuration path\": [\n" + " {\n" + " \"context\": \"All()\",\n" + " \"value\": \"9\"\n" + " }\n" + " ],\n" + " \"Policy file path\": [\n" + " {\n" + " \"context\": \"All()\",\n" + " \"value\": \"10\"\n" + " }\n" + " ],\n" + " \"Configuration directory\": [\n" + " {\n" + " \"context\": \"All()\",\n" + " \"value\": \"11\"\n" + " }\n" + " ],\n" + " \"Default Check Point directory\": [\n" + " {\n" + " \"context\": \"All()\",\n" + " \"value\": \"12\"\n" + " }\n" + " ],\n" + " \"Configuration file extension\": [\n" + " {\n" + " \"context\": \"All()\",\n" + " \"value\": \"13\"\n" + " }\n" + " ],\n" + " \"Policy file extension\": [\n" + " {\n" + " \"context\": \"All()\",\n" + " \"value\": \"14\"\n" + " }\n" + " ],\n" + " \"Temp file extension\": [\n" + " {\n" + " \"context\": \"All()\",\n" + " \"value\": \"15\"\n" + " }\n" + " ],\n" + " \"Orchestration status path\": [\n" + " {\n" + " \"context\": \"All()\",\n" + " \"value\": \"16\"\n" + " }\n" + " ],\n" + " \"Data file path\": [\n" + " {\n" + " \"context\": \"All()\",\n" + " \"value\": \"17\"\n" + " }\n" + " ]\n" + " }\n" + "}"; + + istringstream ss(config_json); + Singleton::Consume::from(config_comp)->loadConfiguration(ss); + EXPECT_THAT(getConfiguration("orchestration", "Backup file extension"), IsValue("1")); + EXPECT_THAT(getConfiguration("orchestration", "Service name"), IsValue("3")); + EXPECT_THAT(getConfiguration("orchestration", "Packages directory"), IsValue("5")); + EXPECT_THAT(getConfiguration("orchestration", "Manifest file path"), IsValue("7")); + EXPECT_THAT(getConfiguration("orchestration", "Settings file path"), IsValue("8")); + EXPECT_THAT(getConfiguration("orchestration", "Configuration path"), IsValue("9")); + EXPECT_THAT(getConfiguration("orchestration", "Policy file path"), IsValue("10")); + EXPECT_THAT(getConfiguration("orchestration", "Configuration directory"), IsValue("11")); + EXPECT_THAT(getConfiguration("orchestration", "Default Check Point directory"), IsValue("12")); + EXPECT_THAT(getConfiguration("orchestration", "Configuration file extension"), IsValue("13")); + EXPECT_THAT(getConfiguration("orchestration", "Policy file extension"), IsValue("14")); + EXPECT_THAT(getConfiguration("orchestration", "Temp file extension"), IsValue("15")); + EXPECT_THAT(getConfiguration("orchestration", "Orchestration status path"), IsValue("16")); + EXPECT_THAT(getConfiguration("orchestration", "Data file path"), IsValue("17")); + env.fini(); +} + +TEST_F(OrchestrationTest, orchestrationPolicyUpdate) +{ + waitForRestCall(); + preload(); + + EXPECT_CALL( + mock_ml, + addOneTimeRoutine(I_MainLoop::RoutineType::Offline, _, "Send policy update report", _) + ).WillOnce(Return(1)); + EXPECT_CALL( + rest, + mockRestCall(RestAction::ADD, "proxy", _) + ).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler))); + EXPECT_CALL(mock_status, setFogAddress(host_url)); + + init(); + + string orchestration_policy_file_path = "/etc/cp/conf/orchestration/orchestration.policy"; + string manifest_file_path = "/etc/cp/conf/manifest.json"; + string setting_file_path = "/etc/cp/conf/settings.json"; + string policy_file_path = "/etc/cp/conf/policy.json"; + string data_file_path = "/etc/cp/conf/data.json"; + string host_address = "1.2.3.5"; + string new_host_address = "6.2.3.5"; + string new_host_url = "https://" + new_host_address + "/test/"; + string new_policy_path = "/some-path"; + + string manifest_checksum = "manifest"; + string policy_checksum = "policy"; + string settings_checksum = "settings"; + string data_checksum = "data"; + string new_policy_checksum= "111111"; + + string second_val = "12"; + string third_val = "13"; + + Maybe policy_response( + string( + "{\n" + " \"fog-address\": \"" + host_url + "\",\n" + " \"agent-type\": \"test\",\n" + " \"pulling-interval\": 25,\n" + " \"error-pulling-interval\": 15\n" + "}" + ) + ); + + Maybe new_policy_response( + string( + "{\n" + " \"fog-address\": \"" + new_host_url + "\",\n" + " \"agent-type\": \"test\",\n" + " \"pulling-interval\": 25,\n" + " \"error-pulling-interval\": 15\n" + "}" + ) + ); + + EXPECT_CALL(mock_status, setFogAddress(new_host_url)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path)) + .WillOnce(Return(policy_response)) + .WillOnce(Return(new_policy_response)); + EXPECT_CALL(mock_message, setActiveFog(host_address, 443, true, MessageTypeTag::GENERIC)).WillOnce(Return(true)); + EXPECT_CALL(mock_update_communication, setAddressExtenesion("")); + EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe())); + expectDetailsResolver(); + EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, manifest_file_path)) + .WillOnce(Return(manifest_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, setting_file_path)) + .WillOnce(Return(settings_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, policy_file_path)) + .WillOnce(Return(policy_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, data_file_path)) + .WillOnce(Return(data_checksum)); + + EXPECT_CALL(mock_service_controller, getPolicyVersion()) + .Times(4) + .WillOnce(ReturnRef(first_policy_version)) + .WillOnce(ReturnRef(first_policy_version)) + .WillOnce(ReturnRef(second_val)) + .WillOnce(ReturnRef(third_val) + ); + EXPECT_CALL(mock_status, setPolicyVersion(first_policy_version)); + EXPECT_CALL(mock_status, setPolicyVersion(third_val)); + EXPECT_CALL(mock_update_communication, sendPolicyVersion("13")).Times(1).WillOnce(Return(Maybe())); + + EXPECT_CALL(mock_update_communication, getUpdate(_)).WillOnce( + Invoke( + [&](CheckUpdateRequest &req) + { + EXPECT_THAT(req.getPolicy(), IsValue(policy_checksum)); + EXPECT_THAT(req.getSettings(), IsValue(settings_checksum)); + EXPECT_THAT(req.getManifest(), IsValue(manifest_checksum)); + EXPECT_THAT(req.getData(), IsValue(data_checksum)); + req = CheckUpdateRequest("", new_policy_checksum, "", "", "", ""); + return Maybe(); + } + ) + ); + + GetResourceFile policy_file(GetResourceFile::ResourceFileType::POLICY); + EXPECT_CALL( + mock_downloader, + downloadFileFromFog(new_policy_checksum, Package::ChecksumTypes::SHA256, policy_file) + ).WillOnce(Return(Maybe(new_policy_path))); + + vector expected_data_types = {}; + EXPECT_CALL( + mock_service_controller, + updateServiceConfiguration(policy_file_path, setting_file_path, expected_data_types, "") + ).WillOnce(Return(true)); + + EXPECT_CALL( + mock_service_controller, + updateServiceConfiguration(new_policy_path, "", expected_data_types, "") + ).WillOnce(Return(true)); + + EXPECT_CALL( + mock_message, + setActiveFog(new_host_address, 443, true, MessageTypeTag::GENERIC) + ).WillOnce(Return(true)); + EXPECT_CALL(mock_update_communication, setAddressExtenesion("/test")); + EXPECT_CALL(mock_status, setLastUpdateAttempt()); + EXPECT_CALL( + mock_status, + setFieldStatus(OrchestrationStatusFieldType::LAST_UPDATE, OrchestrationStatusResult::SUCCESS, "") + ); + EXPECT_CALL(mock_status, setIsConfigurationUpdated(A>()) + ).WillOnce( + Invoke( + [](EnumArray arr) + { + EXPECT_EQ(arr[OrchestrationStatusConfigType::MANIFEST], false); + EXPECT_EQ(arr[OrchestrationStatusConfigType::POLICY], true); + EXPECT_EQ(arr[OrchestrationStatusConfigType::SETTINGS], false); + } + ) + ); + + EXPECT_CALL(mock_ml, yield(A())) + .WillOnce( + Invoke( + [] (chrono::microseconds microseconds) + { + EXPECT_EQ(1000000, microseconds.count()); + } + ) + ) + .WillOnce( + Invoke( + [] (chrono::microseconds microseconds) + { + EXPECT_EQ(25000000, microseconds.count()); + throw invalid_argument("stop while loop"); + } + ) + ); + EXPECT_CALL( + mock_shell_cmd, + getExecOutput(_, _, _) + ).WillRepeatedly(Return(string("daniel\n1\n"))); + try { + runRoutine(); + } catch (const invalid_argument& e) {} +} + +TEST_F(OrchestrationTest, startOrchestrationPoliceWithFailures) +{ + waitForRestCall(); + preload(); + Maybe msg_err = genError("Failed to send message"); + EXPECT_CALL(mock_status, setFogAddress(host_url)); + EXPECT_CALL( + rest, + mockRestCall(RestAction::ADD, "proxy", _) + ).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler))); + init(); + string orchestration_policy_file_path = getPolicyConfigPath("orchestration", Config::ConfigFileType::Policy); + string orchestration_policy_file_path_bk = orchestration_policy_file_path + ".bk"; + string manifest_file_path = "/etc/cp/conf/manifest.json"; + string setting_file_path = "/etc/cp/conf/settings.json"; + string policy_file_path = "/etc/cp/conf/policy.json"; + string data_file_path = "/etc/cp/conf/data.json"; + + string host_address = "1.2.3.5"; + string manifest_checksum = "manifest"; + string policy_checksum = "policy"; + string settings_checksum = "settings"; + string data_checksum = "data"; + + Maybe response( + string( + "{\n" + " \"fog-address\": \"" + host_url + "\",\n" + " \"agent-type\": \"test\",\n" + " \"pulling-interval\": 25,\n" + " \"error-pulling-interval\": 15\n" + "}" + ) + ); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path)) + .WillOnce(Return(Maybe(genError("Failed")))) + .WillOnce(Return(response)); + + EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path_bk)).WillOnce( + Return(Maybe(genError("Failed"))) + ); + + vector expected_data_types = {}; + EXPECT_CALL( + mock_service_controller, + updateServiceConfiguration(policy_file_path, setting_file_path, expected_data_types, "") + ).Times(2).WillRepeatedly(Return(true)); + + EXPECT_CALL(mock_message, setActiveFog(host_address, 443, true, MessageTypeTag::GENERIC)).WillOnce(Return(true)); + EXPECT_CALL(mock_update_communication, setAddressExtenesion("")); + EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe())); + expectDetailsResolver(); + EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, manifest_file_path)) + .WillOnce(Return(manifest_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, setting_file_path)) + .WillOnce(Return(settings_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, policy_file_path)) + .WillOnce(Return(policy_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, data_file_path)) + .WillOnce(Return(data_checksum)); + + EXPECT_CALL(mock_service_controller, getPolicyVersion()) + .Times(2).WillRepeatedly(ReturnRef(first_policy_version)); + EXPECT_CALL(mock_status, setPolicyVersion(first_policy_version)); + + EXPECT_CALL(mock_update_communication, getUpdate(_)).WillOnce( + Invoke( + [&](CheckUpdateRequest &req) + { + EXPECT_THAT(req.getPolicy(), IsValue(policy_checksum)); + EXPECT_THAT(req.getSettings(), IsValue(settings_checksum)); + EXPECT_THAT(req.getManifest(), IsValue(manifest_checksum)); + EXPECT_THAT(req.getData(), IsValue(data_checksum)); + req = CheckUpdateRequest("", "", "", "", "", ""); + return Maybe(); + } + ) + ); + + EXPECT_CALL(mock_status, setLastUpdateAttempt()); + EXPECT_CALL( + mock_status, + setFieldStatus(OrchestrationStatusFieldType::LAST_UPDATE, OrchestrationStatusResult::SUCCESS, "") + ); + + EXPECT_CALL(mock_status, setIsConfigurationUpdated(A>()) + ).WillOnce( + Invoke( + [](EnumArray arr) + { + EXPECT_EQ(arr[OrchestrationStatusConfigType::MANIFEST], false); + EXPECT_EQ(arr[OrchestrationStatusConfigType::POLICY], false); + EXPECT_EQ(arr[OrchestrationStatusConfigType::SETTINGS], false); + } + ) + ); + + EXPECT_CALL(mock_ml, yield(A())) + .WillOnce( + Invoke( + [] (chrono::microseconds microseconds) + { + EXPECT_EQ(1000000, microseconds.count()); + } + ) + ) + .WillOnce( + Invoke( + [] (chrono::microseconds microseconds) + { + EXPECT_EQ(25000000, microseconds.count()); + throw invalid_argument("stop while loop"); + } + ) + ); + EXPECT_CALL( + mock_shell_cmd, + getExecOutput(_, _, _) + ).WillRepeatedly(Return(string("daniel\n1\n"))); + try { + runRoutine(); + } catch (const invalid_argument& e) {} +} + +TEST_F(OrchestrationTest, loadOrchestrationPolicyFromBackup) +{ + EXPECT_CALL( + rest, + mockRestCall(RestAction::ADD, "proxy", _) + ).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler))); + waitForRestCall(); + init(); + string orchestration_policy_file_path = "/etc/cp/conf/orchestration/orchestration.policy"; + string orchestration_policy_file_path_bk = orchestration_policy_file_path + ".bk"; + string manifest_file_path = "/etc/cp/conf/manifest.json"; + string setting_file_path = "/etc/cp/conf/settings.json"; + string policy_file_path = "/etc/cp/conf/policy.json"; + string data_file_path = "/etc/cp/conf/data.json"; + + string host_address = "1.2.3.5"; + string manifest_checksum = "manifest"; + string policy_checksum = "policy"; + string settings_checksum = "settings"; + string data_checksum = "data"; + + Maybe response( + string( + "{\n" + " \"fog-address\": \"https://1.2.3.5/\",\n" + " \"agent-type\": \"test\",\n" + " \"pulling-interval\": 25,\n" + " \"error-pulling-interval\": 15\n" + "}" + ) + ); + + EXPECT_CALL(mock_status, setFogAddress(host_url)); + + vector expected_data_types = {}; + EXPECT_CALL( + mock_service_controller, + updateServiceConfiguration(policy_file_path, setting_file_path, expected_data_types, "") + ).WillOnce(Return(true)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path)) + .WillOnce(Return(Maybe(genError("Failed")))); + EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path_bk)).WillOnce(Return(response)); + EXPECT_CALL( + mock_orchestration_tools, + copyFile(orchestration_policy_file_path_bk, orchestration_policy_file_path) + ).WillOnce(Return(true)); + EXPECT_CALL(mock_message, setActiveFog(host_address, 443, true, MessageTypeTag::GENERIC)).WillOnce(Return(true)); + EXPECT_CALL(mock_update_communication, setAddressExtenesion("")); + EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe())); + expectDetailsResolver(); + EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, manifest_file_path)) + .WillOnce(Return(manifest_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, setting_file_path)) + .WillOnce(Return(settings_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, policy_file_path)) + .WillOnce(Return(policy_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, data_file_path)) + .WillOnce(Return(data_checksum)); + + EXPECT_CALL(mock_service_controller, getPolicyVersion()) + .Times(2).WillRepeatedly(ReturnRef(first_policy_version)); + EXPECT_CALL(mock_status, setPolicyVersion(first_policy_version)); + EXPECT_CALL(mock_update_communication, getUpdate(_)).WillOnce( + Invoke( + [&](CheckUpdateRequest &req) + { + EXPECT_THAT(req.getPolicy(), IsValue(policy_checksum)); + EXPECT_THAT(req.getSettings(), IsValue(settings_checksum)); + EXPECT_THAT(req.getManifest(), IsValue(manifest_checksum)); + EXPECT_THAT(req.getData(), IsValue(data_checksum)); + req = CheckUpdateRequest("", "", "", "", "", ""); + return Maybe(); + } + ) + ); + + EXPECT_CALL(mock_status, setLastUpdateAttempt()); + EXPECT_CALL( + mock_status, + setFieldStatus(OrchestrationStatusFieldType::LAST_UPDATE, OrchestrationStatusResult::SUCCESS, "") + ); + EXPECT_CALL(mock_status, setIsConfigurationUpdated(A>()) + ).WillOnce( + Invoke( + [](EnumArray arr) + { + EXPECT_EQ(arr[OrchestrationStatusConfigType::MANIFEST], false); + EXPECT_EQ(arr[OrchestrationStatusConfigType::POLICY], false); + EXPECT_EQ(arr[OrchestrationStatusConfigType::SETTINGS], false); + } + ) + ); + EXPECT_CALL(mock_ml, yield(A())) + .WillOnce( + Invoke( + [] (chrono::microseconds microseconds) + { + EXPECT_EQ(1000000, microseconds.count()); + } + ) + ) + .WillOnce( + Invoke( + [] (chrono::microseconds microseconds) + { + EXPECT_EQ(25000000, microseconds.count()); + throw invalid_argument("stop while loop"); + } + ) + ); + EXPECT_CALL( + mock_shell_cmd, + getExecOutput(_, _, _) + ).WillRepeatedly(Return(string("daniel\n1\n"))); + try { + runRoutine(); + } catch (const invalid_argument& e) {} +} + +TEST_F(OrchestrationTest, newServicePolicyUpdate) +{ + EXPECT_CALL( + rest, + mockRestCall(RestAction::ADD, "proxy", _) + ).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler))); + waitForRestCall(); + init(); +} + +TEST_F(OrchestrationTest, manifestUpdate) +{ + EXPECT_CALL( + rest, + mockRestCall(RestAction::ADD, "proxy", _) + ).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler))); + waitForRestCall(); + init(); + string orchestration_policy_file_path = "/etc/cp/conf/orchestration/orchestration.policy"; + string manifest_file_path = "/etc/cp/conf/manifest.json"; + string setting_file_path = "/etc/cp/conf/settings.json"; + string policy_file_path = "/etc/cp/conf/policy.json"; + string data_file_path = "/etc/cp/conf/data.json"; + + string host_address = "1.2.3.5"; + string manifest_checksum= "manifest"; + string policy_checksum= "policy"; + string settings_checksum= "settings"; + string data_checksum = "data"; + + EXPECT_CALL(mock_status, setFogAddress(host_url)); + + Maybe response( + string( + "{\n" + " \"fog-address\": \"" + host_url + "\",\n" + " \"agent-type\": \"test\",\n" + " \"pulling-interval\": 25,\n" + " \"error-pulling-interval\": 15\n" + "}" + ) + ); + + vector expected_data_types = {}; + EXPECT_CALL( + mock_service_controller, + updateServiceConfiguration(policy_file_path, setting_file_path, expected_data_types, "") + ).WillOnce(Return(true)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path)).WillOnce(Return(response)); + EXPECT_CALL(mock_message, setActiveFog(host_address, 443, true, MessageTypeTag::GENERIC)).WillOnce(Return(true)); + EXPECT_CALL(mock_update_communication, setAddressExtenesion("")); + EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe())); + expectDetailsResolver(); + EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, manifest_file_path)) + .WillOnce(Return(manifest_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, setting_file_path)) + .WillOnce(Return(settings_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, policy_file_path)) + .WillOnce(Return(policy_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, data_file_path)) + .WillOnce(Return(data_checksum)); + + EXPECT_CALL(mock_service_controller, getPolicyVersion()) + .Times(2).WillRepeatedly(ReturnRef(first_policy_version)); + EXPECT_CALL(mock_status, setPolicyVersion(first_policy_version)); + EXPECT_CALL(mock_update_communication, getUpdate(_)).WillOnce( + Invoke( + [&](CheckUpdateRequest &req) + { + EXPECT_THAT(req.getPolicy(), IsValue(policy_checksum)); + EXPECT_THAT(req.getSettings(), IsValue(settings_checksum)); + EXPECT_THAT(req.getManifest(), IsValue(manifest_checksum)); + EXPECT_THAT(req.getData(), IsValue(data_checksum)); + req = CheckUpdateRequest("new check sum", "", "", "", "", ""); + return Maybe(); + } + ) + ); + + EXPECT_CALL(mock_status, setLastUpdateAttempt()); + EXPECT_CALL( + mock_status, + setFieldStatus(OrchestrationStatusFieldType::LAST_UPDATE, OrchestrationStatusResult::SUCCESS, "") + ); + EXPECT_CALL( + mock_status, + setFieldStatus(OrchestrationStatusFieldType::MANIFEST, OrchestrationStatusResult::SUCCESS, "") + ); + EXPECT_CALL(mock_status, setIsConfigurationUpdated(A>()) + ).WillOnce( + Invoke( + [](EnumArray arr) + { + EXPECT_EQ(arr[OrchestrationStatusConfigType::MANIFEST], true); + EXPECT_EQ(arr[OrchestrationStatusConfigType::POLICY], false); + EXPECT_EQ(arr[OrchestrationStatusConfigType::SETTINGS], false); + } + ) + ); + + GetResourceFile manifest_file(GetResourceFile::ResourceFileType::MANIFEST); + EXPECT_CALL(mock_downloader, + downloadFileFromFog( + string("new check sum"), + Package::ChecksumTypes::SHA256, + manifest_file + ) + ).WillOnce(Return(Maybe(string("manifest path")))); + EXPECT_CALL(mock_manifest_controller, updateManifest(string("manifest path"))).WillOnce(Return(true)); + EXPECT_CALL(mock_ml, yield(A())) + .WillOnce( + Invoke( + [] (chrono::microseconds microseconds) + { + EXPECT_EQ(1000000, microseconds.count()); + } + ) + ) + .WillOnce( + Invoke( + [] (chrono::microseconds microseconds) + { + EXPECT_EQ(25000000, microseconds.count()); + throw invalid_argument("stop while loop"); + } + ) + ); + EXPECT_CALL( + mock_shell_cmd, + getExecOutput(_, _, _) + ).WillRepeatedly(Return(string("daniel\n1\n"))); + try { + runRoutine(); + } catch (const invalid_argument& e) {} +} + +TEST_F(OrchestrationTest, loadFromOrchestrationPolicy) +{ + EXPECT_CALL( + rest, + mockRestCall(RestAction::ADD, "proxy", _) + ).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler))); + waitForRestCall(); + init(); +} + +TEST_F(OrchestrationTest, loadFromOrchestrationBackupPolicy) +{ + EXPECT_CALL( + rest, + mockRestCall(RestAction::ADD, "proxy", _) + ).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler))); + waitForRestCall(); + init(); +} + +TEST_F(OrchestrationTest, getBadPolicyUpdate) +{ + EXPECT_CALL( + rest, + mockRestCall(RestAction::ADD, "proxy", _) + ).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler))); + waitForRestCall(); + init(); + string orchestration_policy_file_path = "/etc/cp/conf/orchestration/orchestration.policy"; + string manifest_file_path = "/etc/cp/conf/manifest.json"; + string setting_file_path = "/etc/cp/conf/settings.json"; + string policy_file_path = "/etc/cp/conf/policy.json"; + string data_file_path = "/etc/cp/conf/data.json"; + + string manifest_checksum = "manifest"; + string policy_checksum = "policy"; + string settings_checksum = "settings"; + string data_checksum = "data"; + + Maybe response( + string( + "{\n" + " \"fog-address\": \"" + host_url + "\",\n" + " \"agent-type\": \"test\",\n" + " \"pulling-interval\": 25,\n" + " \"error-pulling-interval\": 15\n" + "}" + ) + ); + EXPECT_CALL(mock_status, setFogAddress(host_url)); + + vector expected_data_types = {}; + EXPECT_CALL( + mock_service_controller, + updateServiceConfiguration(policy_file_path, setting_file_path, expected_data_types, "") + ).Times(2).WillRepeatedly(Return(true)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path)).WillOnce(Return(response)); + EXPECT_CALL(mock_message, setActiveFog(host_address, 443, true, MessageTypeTag::GENERIC)).WillOnce(Return(true)); + EXPECT_CALL(mock_update_communication, setAddressExtenesion("")); + + EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe())); + expectDetailsResolver(); + EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, manifest_file_path)) + .WillOnce(Return(manifest_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, setting_file_path)) + .WillOnce(Return(settings_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, policy_file_path)) + .WillOnce(Return(policy_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, data_file_path)) + .WillOnce(Return(data_checksum)); + + Maybe new_policy_checksum(string("111111")); + + GetResourceFile policy_file(GetResourceFile::ResourceFileType::POLICY); + EXPECT_CALL( + mock_downloader, + downloadFileFromFog( + string("111111"), + Package::ChecksumTypes::SHA256, + policy_file + ) + ).WillOnce(Return(Maybe(string("policy path")))); + string manifest = ""; + string policy = "111111"; + string setting = ""; + + string second_val = "12"; + string third_val = "13"; + EXPECT_CALL(mock_service_controller, getPolicyVersion()) + .Times(3) + .WillOnce(ReturnRef(first_policy_version)) + .WillOnce(ReturnRef(first_policy_version)) + .WillOnce(ReturnRef(second_val) + ); + EXPECT_CALL(mock_status, setPolicyVersion(first_policy_version)); + EXPECT_CALL(mock_status, setLastUpdateAttempt()); + EXPECT_CALL( + mock_status, + setFieldStatus(OrchestrationStatusFieldType::LAST_UPDATE, OrchestrationStatusResult::SUCCESS, "") + ); + EXPECT_CALL(mock_status, setIsConfigurationUpdated(A>()) + ).WillOnce( + Invoke( + [](EnumArray arr) + { + EXPECT_EQ(arr[OrchestrationStatusConfigType::MANIFEST], false); + EXPECT_EQ(arr[OrchestrationStatusConfigType::POLICY], true); + EXPECT_EQ(arr[OrchestrationStatusConfigType::SETTINGS], false); + } + ) + ); + + EXPECT_CALL(mock_update_communication, getUpdate(_)).WillOnce( + Invoke( + [&](CheckUpdateRequest &req) + { + EXPECT_THAT(req.getPolicy(), IsValue(policy_checksum)); + EXPECT_THAT(req.getSettings(), IsValue(settings_checksum)); + EXPECT_THAT(req.getManifest(), IsValue(manifest_checksum)); + req = CheckUpdateRequest(manifest, policy, setting, "", "", ""); + return Maybe(); + } + ) + ); + + EXPECT_CALL(mock_service_controller, getUpdatePolicyVersion()).Times(1).WillOnce(ReturnRef(third_val)); + + EXPECT_CALL( + mock_service_controller, + updateServiceConfiguration(string("policy path"), "", expected_data_types, "")).WillOnce(Return(false) + ); + + EXPECT_CALL(mock_ml, yield(A())) + .WillOnce( + Invoke( + [] (chrono::microseconds microseconds) + { + EXPECT_EQ(1000000, microseconds.count()); + } + ) + ) + .WillOnce( + Invoke( + [] (chrono::microseconds microseconds) + { + EXPECT_EQ(15000000, microseconds.count()); + throw invalid_argument("stop while loop"); + } + ) + ); + EXPECT_CALL( + mock_shell_cmd, + getExecOutput(_, _, _) + ).WillRepeatedly(Return(string("daniel\n1\n"))); + try { + runRoutine(); + } catch (const invalid_argument& e) {} +} + +TEST_F(OrchestrationTest, failedDownloadSettings) +{ + EXPECT_CALL( + rest, + mockRestCall(RestAction::ADD, "proxy", _) + ).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler))); + waitForRestCall(); + init(); + string orchestration_policy_file_path = "/etc/cp/conf/orchestration/orchestration.policy"; + string manifest_file_path = "/etc/cp/conf/manifest.json"; + string setting_file_path = "/etc/cp/conf/settings.json"; + string policy_file_path = "/etc/cp/conf/policy.json"; + string data_file_path = "/etc/cp/conf/data.json"; + + string host_address = "1.2.3.5"; + string manifest_checksum = "manifest-checksum"; + string policy_checksum = "policy-checksum"; + string settings_checksum = "settings-checksum"; + string data_checksum = "data"; + + Maybe response( + string( + "{\n" + " \"fog-address\": \"" + host_url + "\",\n" + " \"agent-type\": \"test\",\n" + " \"pulling-interval\": 25,\n" + " \"error-pulling-interval\": 15\n" + "}" + ) + ); + EXPECT_CALL(mock_status, setFogAddress(host_url)); + + vector expected_data_types = {}; + EXPECT_CALL( + mock_service_controller, + updateServiceConfiguration(policy_file_path, setting_file_path, expected_data_types, "") + ).WillOnce(Return(true)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path)).WillOnce(Return(response)); + EXPECT_CALL(mock_message, setActiveFog(host_address, 443, true, MessageTypeTag::GENERIC)).WillOnce(Return(true)); + EXPECT_CALL(mock_update_communication, setAddressExtenesion("")); + + EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe())); + expectDetailsResolver(); + EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, manifest_file_path)) + .WillOnce(Return(manifest_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, setting_file_path)) + .WillOnce(Return(settings_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, policy_file_path)) + .WillOnce(Return(policy_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, data_file_path)) + .WillOnce(Return(data_checksum)); + + Maybe new_policy_checksum(string("111111")); + + EXPECT_CALL(mock_service_controller, getPolicyVersion()) + .Times(2).WillRepeatedly(ReturnRef(first_policy_version)); + EXPECT_CALL(mock_status, setPolicyVersion(first_policy_version)); + EXPECT_CALL(mock_update_communication, getUpdate(_)).WillOnce( + Invoke( + [&](CheckUpdateRequest &req) + { + EXPECT_THAT(req.getPolicy(), IsValue(policy_checksum)); + EXPECT_THAT(req.getSettings(), IsValue(settings_checksum)); + EXPECT_THAT(req.getManifest(), IsValue(manifest_checksum)); + req = CheckUpdateRequest(manifest_checksum, policy_checksum, settings_checksum, "", "", ""); + return Maybe(); + } + ) + ); + + EXPECT_CALL(mock_status, setLastUpdateAttempt()); + EXPECT_CALL( + mock_status, + setFieldStatus(OrchestrationStatusFieldType::LAST_UPDATE, OrchestrationStatusResult::SUCCESS, "") + ).Times(1); + + string manifest_err = + "Critical Error: Agent/Gateway was not fully deployed on host 'hostname' " + "and is not enforcing a security policy. Retry installation or contact Check Point support."; + EXPECT_CALL( + mock_status, + setFieldStatus( + OrchestrationStatusFieldType::MANIFEST, + OrchestrationStatusResult::FAILED, + manifest_err + ) + ).Times(1); + EXPECT_CALL(mock_details_resolver, getHostname()).Times(2).WillRepeatedly(Return(string("hostname"))); + EXPECT_CALL(mock_status, getManifestError()).WillOnce(ReturnRef(manifest_err)); + + EXPECT_CALL(mock_status, setIsConfigurationUpdated(A>()) + ).WillOnce( + Invoke( + [](EnumArray arr) + { + EXPECT_EQ(arr[OrchestrationStatusConfigType::MANIFEST], true); + EXPECT_EQ(arr[OrchestrationStatusConfigType::POLICY], true); + EXPECT_EQ(arr[OrchestrationStatusConfigType::SETTINGS], true); + } + ) + ); + Maybe download_error = genError("Failed to download"); + GetResourceFile settings_file(GetResourceFile::ResourceFileType::SETTINGS); + GetResourceFile policy_file(GetResourceFile::ResourceFileType::POLICY); + GetResourceFile manifest_file(GetResourceFile::ResourceFileType::MANIFEST); + + EXPECT_CALL(mock_downloader, + downloadFileFromFog( + string("manifest-checksum"), + Package::ChecksumTypes::SHA256, + manifest_file + ) + ).WillOnce(Return(download_error)); + EXPECT_CALL(mock_downloader, + downloadFileFromFog( + string("policy-checksum"), + Package::ChecksumTypes::SHA256, + policy_file + ) + ).WillOnce(Return(download_error)); + EXPECT_CALL(mock_downloader, + downloadFileFromFog( + string("settings-checksum"), + Package::ChecksumTypes::SHA256, + settings_file + ) + ).WillOnce(Return(download_error)); + + EXPECT_CALL(mock_ml, yield(A())) + .WillOnce( + Invoke( + [] (chrono::microseconds microseconds) + { + EXPECT_EQ(1000000, microseconds.count()); + } + ) + ) + .WillOnce( + Invoke( + [] (chrono::microseconds microseconds) + { + EXPECT_EQ(15000000, microseconds.count()); + throw invalid_argument("stop while loop"); + } + ) + ); + EXPECT_CALL( + mock_shell_cmd, + getExecOutput(_, _, _) + ).WillRepeatedly(Return(string("daniel\n1\n"))); + try { + runRoutine(); + } catch (const invalid_argument& e) {} +} + +TEST_P(OrchestrationTest, orchestrationFirstRun) +{ + EXPECT_CALL( + rest, + mockRestCall(RestAction::ADD, "proxy", _) + ).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler))); + waitForRestCall(); + init(); + string orchestration_policy_file_path = "/etc/cp/conf/orchestration/orchestration.policy"; + string manifest_file_path = "/etc/cp/conf/manifest.json"; + string setting_file_path = "/etc/cp/conf/settings.json"; + string policy_file_path = "/etc/cp/conf/policy.json"; + string data_file_path = "/etc/cp/conf/data.json"; + + string host_address = "1.2.3.5"; + string manifest_checksum = "manifest"; + string policy_checksum = "policy"; + string settings_checksum = "settings"; + string data_checksum = "data"; + + string manifest = ""; + string policy = ""; + string setting = ""; + + Maybe response( + string( + "{\n" + " \"fog-address\": \"" + host_url + "\",\n" + " \"agent-type\": \"test\",\n" + " \"pulling-interval\": 25,\n" + " \"error-pulling-interval\": 15\n" + "}" + ) + ); + + EXPECT_CALL(mock_status, setFogAddress(host_url)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path)) + .WillOnce(Return(response)); + EXPECT_CALL(mock_message, setActiveFog(host_address, 443, true, MessageTypeTag::GENERIC)). + Times(1). + WillRepeatedly(Return(true)); + EXPECT_CALL(mock_update_communication, setAddressExtenesion("")); + + EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe())); + expectDetailsResolver(); + EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, manifest_file_path)) + .WillOnce(Return(manifest_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, setting_file_path)) + .WillOnce(Return(settings_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, policy_file_path)) + .WillOnce(Return(policy_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, data_file_path)) + .WillOnce(Return(data_checksum)); + + EXPECT_CALL(mock_status, setLastUpdateAttempt()); + EXPECT_CALL( + mock_status, + setFieldStatus(OrchestrationStatusFieldType::LAST_UPDATE, OrchestrationStatusResult::SUCCESS, "") + ); + EXPECT_CALL(mock_status, setIsConfigurationUpdated(A>()) + ).WillOnce( + Invoke( + [](EnumArray arr) + { + EXPECT_EQ(arr[OrchestrationStatusConfigType::MANIFEST], false); + EXPECT_EQ(arr[OrchestrationStatusConfigType::POLICY], false); + EXPECT_EQ(arr[OrchestrationStatusConfigType::SETTINGS], false); + EXPECT_EQ(arr[OrchestrationStatusConfigType::DATA], false); + } + ) + ); + EXPECT_CALL(mock_service_controller, getPolicyVersion()).WillOnce(ReturnRef(first_policy_version)); + EXPECT_CALL(mock_update_communication, getUpdate(_)).WillOnce( + Invoke( + [&](CheckUpdateRequest &req) + { + EXPECT_THAT(req.getPolicy(), IsValue(policy_checksum)); + EXPECT_THAT(req.getSettings(), IsValue(settings_checksum)); + EXPECT_THAT(req.getManifest(), IsValue(manifest_checksum)); + req = CheckUpdateRequest(manifest, policy, setting, "", "", ""); + return Maybe(); + } + ) + ); + vector expected_data_types = {}; + EXPECT_CALL( + mock_service_controller, + updateServiceConfiguration(policy_file_path, setting_file_path, expected_data_types, "") + ).WillOnce(Return(true)); + + EXPECT_CALL(mock_ml, yield(A())) + .WillOnce( + Invoke( + [] (chrono::microseconds microseconds) + { + EXPECT_EQ(1000000, microseconds.count()); + } + ) + ) + .WillOnce( + Invoke( + [] (chrono::microseconds microseconds) + { + EXPECT_EQ(25000000, microseconds.count()); + throw invalid_argument("stop while loop"); + } + ) + ); + EXPECT_CALL( + mock_shell_cmd, + getExecOutput(_, _, _) + ).WillRepeatedly(Return(string("daniel\n1\n"))); + try { + runRoutine(); + } catch (const invalid_argument& e) {} + EXPECT_CALL(mock_status, writeStatusToFile()); + + vector reply; + bool is_named_query = GetParam(); + if (is_named_query) { + auto all_comps_status_reply = HealthCheckStatusEvent().performNamedQuery(); + for (auto &elem : all_comps_status_reply) { + reply.push_back(elem.second); + } + } else { + reply = HealthCheckStatusEvent().query(); + } + + ASSERT_EQ(reply.size(), 1); + EXPECT_EQ(reply[0].getCompName(), "Orchestration"); + EXPECT_EQ(reply[0].getStatus(), HealthCheckStatus::HEALTHY); + + HealthCheckStatusEvent().notify(); + + orchestration_comp.fini(); +} + +INSTANTIATE_TEST_CASE_P(getBadPolicyUpdate, OrchestrationTest, ::testing::Values(false, true)); + +TEST_F(OrchestrationTest, GetRestOrchStatus) +{ + string test_str = "Test"; + string agent_details = + " Name: name" + " Type: test_type" + " Platform: platform" + " Architecture: arch"; + string ans = "{\n" + " \"Last update attempt\": \"" + test_str + "\",\n" + " \"Last update\": \"" + test_str + "\",\n" + " \"Last update status\": \"" + test_str + "\",\n" + " \"Policy version\": \"" + test_str + "\",\n" + " \"Last policy update\": \"" + test_str + "\",\n" + " \"Last manifest update\": \"" + test_str + "\",\n" + " \"Last settings update\": \"" + test_str + "\",\n" + " \"Registration status\": \"" + test_str + "\",\n" + " \"Manifest status\": \"" + test_str + "\",\n" + " \"Upgrade mode\": \"" + test_str + "\",\n" + " \"Fog address\": \"" + test_str + "\",\n" + " \"Agent ID\": \"" + test_str + "\",\n" + " \"Profile ID\": \"" + test_str + "\",\n" + " \"Tenant ID\": \"" + test_str + "\",\n" + " \"Registration details\": \"" + agent_details +"\",\n" + " \"Service policy\": \"\\n service_a: path\",\n" + " \"Service settings\": \"\\n service_b: path\"\n" + "}"; + + map service_map_a = {{"service_a", "path"}}; + map service_map_b = {{"service_b", "path"}}; + EXPECT_CALL( + rest, + mockRestCall(RestAction::ADD, "proxy", _) + ).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler))); + EXPECT_CALL(mock_status, getLastUpdateAttempt()).WillOnce(ReturnRef(test_str)); + EXPECT_CALL(mock_status, getUpdateStatus()).WillOnce(ReturnRef(test_str)); + EXPECT_CALL(mock_status, getUpdateTime()).WillOnce(ReturnRef(test_str)); + EXPECT_CALL(mock_status, getLastManifestUpdate()).WillOnce(ReturnRef(test_str)); + EXPECT_CALL(mock_status, getPolicyVersion()).WillOnce(ReturnRef(test_str)); + EXPECT_CALL(mock_status, getLastPolicyUpdate()).WillOnce(ReturnRef(test_str)); + EXPECT_CALL(mock_status, getLastSettingsUpdate()).WillOnce(ReturnRef(test_str)); + EXPECT_CALL(mock_status, getUpgradeMode()).WillOnce(ReturnRef(test_str)); + EXPECT_CALL(mock_status, getFogAddress()).WillOnce(ReturnRef(test_str)); + EXPECT_CALL(mock_status, getRegistrationStatus()).WillOnce(ReturnRef(test_str)); + EXPECT_CALL(mock_status, getAgentId()).WillOnce(ReturnRef(test_str)); + EXPECT_CALL(mock_status, getProfileId()).WillOnce(ReturnRef(test_str)); + EXPECT_CALL(mock_status, getTenantId()).WillOnce(ReturnRef(test_str)); + EXPECT_CALL(mock_status, getManifestStatus()).WillOnce(ReturnRef(test_str)); + EXPECT_CALL(mock_status, getServicePolicies()).WillOnce(ReturnRef(service_map_a)); + EXPECT_CALL(mock_status, getServiceSettings()).WillOnce(ReturnRef(service_map_b)); + EXPECT_CALL(mock_status, getRegistrationDetails()).WillOnce(Return(agent_details)); + init(); + stringstream ss("{}"); + auto output = rest_status->performRestCall(ss); + EXPECT_EQ(output.ok(), true); + EXPECT_EQ(output.unpack(), ans); +} + +TEST_F(OrchestrationTest, set_proxy) +{ + EXPECT_CALL( + rest, + mockRestCall(RestAction::ADD, "proxy", _) + ).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler))); + waitForRestCall(); + + init(); + stringstream is; + string proxy_url = "http://some-proxy.com:8080"; + is << "{\"proxy\": \""+ proxy_url +"\"}"; + rest_handler->performRestCall(is); + auto maybe_proxy = agent_details.getProxy(); + EXPECT_TRUE(maybe_proxy.ok()); + EXPECT_EQ(maybe_proxy.unpack(), proxy_url); +} + +TEST_F(OrchestrationTest, dataUpdate) +{ + EXPECT_CALL( + rest, + mockRestCall(RestAction::ADD, "proxy", _) + ).WillOnce(WithArg<2>(Invoke(this, &OrchestrationTest::restHandler))); + waitForRestCall(); + init(); + string orchestration_policy_file_path = "/etc/cp/conf/orchestration/orchestration.policy"; + string manifest_file_path = "/etc/cp/conf/manifest.json"; + string setting_file_path = "/etc/cp/conf/settings.json"; + string policy_file_path = "/etc/cp/conf/policy.json"; + string data_file_path = "/etc/cp/conf/data.json"; + + string host_address = "1.2.3.5"; + string manifest_checksum= "manifest"; + string policy_checksum= "policy"; + string settings_checksum= "settings"; + string data_checksum = "data"; + + string data_download_path = "https://a/data.json"; + string data_checksum_type = "sha1sum"; + string data_instance_checksum = "8d4a5709673a05b380ba7d6567e28910019118f5"; + + EXPECT_CALL(mock_status, setFogAddress(host_url)); + + Maybe policy_response( + string( + "{\n" + " \"fog-address\": \"" + host_url + "\",\n" + " \"agent-type\": \"test\",\n" + " \"pulling-interval\": 25,\n" + " \"error-pulling-interval\": 15\n" + "}" + ) + ); + + Maybe data_response( + string( + "{\n" + " \"ips\": {\n" + " \"version\": \"c\",\n" + " \"downloadPath\": \"" + data_download_path + "\",\n" + " \"checksumType\": \"" + data_checksum_type + "\",\n" + " \"checksum\": \"" + data_instance_checksum + "\"\n" + " }\n" + "}\n" + ) + ); + + vector expected_empty_data_types = {}; + ExpectationSet expectation_set = EXPECT_CALL( + mock_service_controller, + updateServiceConfiguration(policy_file_path, setting_file_path, expected_empty_data_types, "") + ).WillOnce(Return(true)); + + vector expected_ips_data_types = { "ips" }; + EXPECT_CALL( + mock_service_controller, + updateServiceConfiguration("", "", expected_ips_data_types, "") + ).After(expectation_set).WillOnce(Return(true)); + + EXPECT_CALL(mock_orchestration_tools, doesDirectoryExist("/etc/cp/conf/data")).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, readFile(orchestration_policy_file_path)).WillOnce(Return(policy_response)); + EXPECT_CALL(mock_orchestration_tools, readFile(data_file_path + ".download")).WillOnce(Return(data_response)); + + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_message, setActiveFog(host_address, 443, true, MessageTypeTag::GENERIC)).WillOnce(Return(true)); + EXPECT_CALL(mock_update_communication, setAddressExtenesion("")); + EXPECT_CALL(mock_update_communication, authenticateAgent()).WillOnce(Return(Maybe())); + EXPECT_CALL(mock_manifest_controller, loadAfterSelfUpdate()).WillOnce(Return(false)); + expectDetailsResolver(); + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, manifest_file_path)) + .WillOnce(Return(manifest_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, setting_file_path)) + .WillOnce(Return(settings_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, policy_file_path)) + .WillOnce(Return(policy_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, data_file_path)) + .WillOnce(Return(data_checksum)); + + EXPECT_CALL(mock_orchestration_tools, calculateChecksum(Package::ChecksumTypes::SHA256, "/path/ips")) + .WillOnce(Return(data_instance_checksum)); + + EXPECT_CALL(mock_service_controller, getPolicyVersion()) + .Times(2).WillRepeatedly(ReturnRef(first_policy_version)); + EXPECT_CALL(mock_status, setPolicyVersion(first_policy_version)); + EXPECT_CALL(mock_update_communication, getUpdate(_)).WillOnce( + Invoke( + [&](CheckUpdateRequest &req) + { + EXPECT_THAT(req.getPolicy(), IsValue(policy_checksum)); + EXPECT_THAT(req.getSettings(), IsValue(settings_checksum)); + EXPECT_THAT(req.getManifest(), IsValue(manifest_checksum)); + EXPECT_THAT(req.getData(), IsValue(data_checksum)); + req = CheckUpdateRequest("", "", "", "new data", "", ""); + return Maybe(); + } + ) + ); + + EXPECT_CALL(mock_status, setLastUpdateAttempt()); + EXPECT_CALL( + mock_status, + setFieldStatus(OrchestrationStatusFieldType::LAST_UPDATE, OrchestrationStatusResult::SUCCESS, "") + ); + EXPECT_CALL(mock_status, setIsConfigurationUpdated(A>()) + ).WillOnce( + Invoke( + [](EnumArray arr) + { + EXPECT_EQ(arr[OrchestrationStatusConfigType::MANIFEST], false); + EXPECT_EQ(arr[OrchestrationStatusConfigType::POLICY], false); + EXPECT_EQ(arr[OrchestrationStatusConfigType::SETTINGS], false); + EXPECT_EQ(arr[OrchestrationStatusConfigType::DATA], true); + } + ) + ); + + EXPECT_CALL(mock_ml, yield(A())) + .WillOnce( + Invoke( + [] (chrono::microseconds microseconds) + { + EXPECT_EQ(1000000, microseconds.count()); + } + ) + ) + .WillOnce( + Invoke( + [] (chrono::microseconds microseconds) + { + EXPECT_EQ(25000000, microseconds.count()); + throw invalid_argument("stop while loop"); + } + ) + ); + + string new_data_file_path = data_file_path + ".download"; + GetResourceFile data_file(GetResourceFile::ResourceFileType::DATA); + EXPECT_CALL(mock_downloader, + downloadFileFromFog( + string("new data"), + Package::ChecksumTypes::SHA256, + data_file + ) + ).WillOnce(Return(Maybe(string(new_data_file_path)))); + + EXPECT_CALL(mock_downloader, + downloadFileFromURL( + data_download_path, + data_instance_checksum, + Package::ChecksumTypes::SHA256, + "data_ips" + ) + ).WillOnce(Return(Maybe(string("/path/ips")))); + + EXPECT_CALL( + mock_orchestration_tools, + copyFile(new_data_file_path, data_file_path) + ); + EXPECT_CALL( + mock_orchestration_tools, + copyFile("/path/ips", "/etc/cp/conf/data/ips.data") + ); + EXPECT_CALL( + mock_shell_cmd, + getExecOutput(_, _, _) + ).WillRepeatedly(Return(string("daniel\n1\n"))); + + try { + runRoutine(); + } catch (const invalid_argument& e) {} +} diff --git a/components/security_apps/orchestration/package_handler/CMakeLists.txt b/components/security_apps/orchestration/package_handler/CMakeLists.txt new file mode 100755 index 0000000..17af19f --- /dev/null +++ b/components/security_apps/orchestration/package_handler/CMakeLists.txt @@ -0,0 +1,3 @@ +add_library(package_handler package_handler.cc) + +add_subdirectory(package_handler_ut) diff --git a/components/security_apps/orchestration/package_handler/package_handler.cc b/components/security_apps/orchestration/package_handler/package_handler.cc new file mode 100755 index 0000000..f2423f6 --- /dev/null +++ b/components/security_apps/orchestration/package_handler/package_handler.cc @@ -0,0 +1,508 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "package_handler.h" +#include "config.h" +#include "sasal.h" +#include "i_shell_cmd.h" + +#include +#include + +SASAL_START // Orchestration - Updates Control + +USE_DEBUG_FLAG(D_ORCHESTRATOR); + +using namespace std; + +#ifdef smb +static const string InstallEnvPrefix = "TMPDIR=/storage/tmp "; +#else +static const string InstallEnvPrefix = ""; +#endif + +enum class PackageHandlerActions { + INSTALL, + UNINSTALL, + PREINSTALL, + POSTINSTALL, + UNREGISTER, + GET_VERSION +}; + +class AdditionalFlagsConfiguration +{ +public: + AdditionalFlagsConfiguration() : flags() {} + + void + load(cereal::JSONInputArchive &ar) + { + try { + ar(cereal::make_nvp("flags", flags)); + } catch (cereal::Exception &) { + ar.setNextName(nullptr); + } + } + + const vector & getFlags() const { return flags; } + +private: + vector flags; +}; + +class PackageHandler::Impl : Singleton::Provide::From +{ +public: + void + init() + { + filesystem_prefix = getFilesystemPathConfig(); + dbgTrace(D_ORCHESTRATOR) << "Initializing Packet handler, file system path prefix: " << filesystem_prefix; + } + bool shouldInstallPackage(const string &package_name, const string &install_file_path) const override; + + bool installPackage(const string &package_name, const string &install_file_path, bool restore_mode) const override; + + bool + uninstallPackage( + const string &package_name, + const string &package_path, + const string &install_file_path + ) const override; + + bool preInstallPackage(const string &package_name, const string &install_file_path) const override; + + bool postInstallPackage(const string &package_name, const string &install_file_path) const override; + + bool updateSavedPackage(const string &package_name, const string &install_file_path) const override; + +private: + void + revertPackage( + const string &package_name, + bool restore_mode, + const string ¤t_installation_file, + const string &backup_installation_file + ) const; + + bool setExecutionMode(const string &install_file_path) const; + + string filesystem_prefix; +}; + +static string +packageHandlerActionsToString(PackageHandlerActions action) +{ + switch(action) { + case PackageHandlerActions::INSTALL: { + string installation_mode = " --install"; + auto trusted_ca_directory = getConfiguration("message", "Trusted CA directory"); + if (trusted_ca_directory.ok() && !trusted_ca_directory.unpack().empty()) { + installation_mode += " --certs-dir "; + installation_mode += trusted_ca_directory.unpack(); + } + AdditionalFlagsConfiguration additional_flags = getConfigurationWithDefault( + AdditionalFlagsConfiguration(), + "orchestration", + "additional flags" + ); + for (const auto &flag : additional_flags.getFlags()) { + installation_mode += " " + flag; + } + + return installation_mode; + } + case PackageHandlerActions::UNINSTALL: { + return string(" --uninstall"); + } + case PackageHandlerActions::PREINSTALL: { + return string(" --pre_install_test"); + } + case PackageHandlerActions::POSTINSTALL: { + return string(" --post_install_test"); + } + case PackageHandlerActions::UNREGISTER: { + return string(" --un-register "); + } + case PackageHandlerActions::GET_VERSION: { + return string(" --version"); + } + } + + dbgAssert(false) << "Package handler action is not supported. Action: " << static_cast(action); + return string(); +} + +void +PackageHandler::init() +{ + pimpl->init(); +} + +void +PackageHandler::preload() +{ + registerExpectedConfiguration("orchestration", "Debug mode"); + registerExpectedConfiguration("orchestration", "additional flags"); + registerExpectedConfiguration("orchestration", "Shell command execution time out"); +} + +bool +PackageHandler::Impl::setExecutionMode(const string &install_file_path) const +{ + return (chmod(install_file_path.c_str(), S_IRUSR | S_IWUSR | S_IXUSR) == 0); +} + +bool +PackageHandler::Impl::shouldInstallPackage(const string &package_name, const string &install_file_path) const +{ + string packages_dir = getConfigurationWithDefault( + filesystem_prefix + "/packages", + "orchestration", + "Packages directory" + ); + + I_OrchestrationTools *orchestration_tools = Singleton::Consume::by(); + string current_installation_file = packages_dir + "/" + package_name + "/" + package_name; + if (!orchestration_tools->doesFileExist(current_installation_file)) { + dbgDebug(D_ORCHESTRATOR) << "Clean installation - package should be installed. Package name: " << package_name; + return true; + } + + setExecutionMode(current_installation_file); + setExecutionMode(install_file_path); + + dbgDebug(D_ORCHESTRATOR) << "Checking if new and current packages has different versions"; + + uint timeout = getConfigurationWithDefault(5000, "orchestration", "Shell command execution time out"); + static const string action = packageHandlerActionsToString(PackageHandlerActions::GET_VERSION); + + I_ShellCmd *shell_cmd = Singleton::Consume::by(); + Maybe current_package_version = shell_cmd->getExecOutput(current_installation_file + action, timeout); + Maybe new_package_version = shell_cmd->getExecOutput(install_file_path + action, timeout); + + if (!current_package_version.ok()) { + dbgWarning(D_ORCHESTRATOR) + << "Failed to get version of current package - Upgrade will be executed. Package name: " + << package_name + << ", Error: " + << current_package_version.getErr(); + return true; + } + + if (!new_package_version.ok()) { + dbgWarning(D_ORCHESTRATOR) + << "Failed to get version of new package - Upgrade will be executed. Package name: " + << package_name + << ", Error: " + << new_package_version.getErr(); + return true; + } + + bool should_install = current_package_version.unpack() != new_package_version.unpack(); + + dbgInfo(D_ORCHESTRATOR) + << "Version for both new and current version successfully extracted. Package name: " + << package_name + << ", Current version: " + << current_package_version.unpack() + << ", New version: " + << new_package_version.unpack() + << ", Should install: " + << (should_install ? "yes" : "no"); + + return should_install; +} + +bool +PackageHandler::Impl::installPackage( + const string &package_name, + const string &install_file_path, + bool restore_mode = false) const +{ + I_OrchestrationTools *orchestration_tools = Singleton::Consume::by(); + if (!orchestration_tools->doesFileExist(install_file_path)) { + dbgWarning(D_ORCHESTRATOR) + << "Installation file is not valid for update. File path: " + << install_file_path + << " , package: " + << package_name; + return false; + } + + string packages_dir = getConfigurationWithDefault( + filesystem_prefix + "/packages", + "orchestration", + "Packages directory" + ); + string backup_extension = getConfigurationWithDefault(".bk", "orchestration", "Backup file extension"); + string current_installation_file = packages_dir + "/" + package_name + "/" + package_name; + string backup_installation_file = current_installation_file + backup_extension; + + if (restore_mode) { + dbgDebug(D_ORCHESTRATOR) << "Installing package: " << package_name << " from backup."; + } else { + dbgDebug(D_ORCHESTRATOR) << "Installing package: " << package_name; + } + + dbgDebug(D_ORCHESTRATOR) << "Changing permissions to execute installation file " << install_file_path; + if (!setExecutionMode(install_file_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to change permission for the installation file of " << package_name; + return false; + } + + dbgDebug(D_ORCHESTRATOR) << "Start running installation file. Package: " + << package_name + << ", path: " + << install_file_path; + auto action = packageHandlerActionsToString(PackageHandlerActions::INSTALL); + bool cmd_result = orchestration_tools->executeCmd(InstallEnvPrefix + install_file_path + action); + if (!cmd_result) { + dbgWarning(D_ORCHESTRATOR) << "Failed installing package: " << package_name; + revertPackage(package_name, restore_mode, current_installation_file, backup_installation_file); + return false; + } + + // In restore mode, we should exit to prevent infinite loop + if (restore_mode) return true; + + if ( + !orchestration_tools->doesFileExist(current_installation_file) && + !orchestration_tools->copyFile(install_file_path, current_installation_file) + ) { + dbgWarning(D_ORCHESTRATOR) + << "Failed to save installation file. File: " + << install_file_path + << ". Target path: " + << current_installation_file; + return false; + } + + dbgDebug(D_ORCHESTRATOR) << "Backup installation file to " << backup_installation_file; + if (!orchestration_tools->copyFile(current_installation_file, backup_installation_file)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to backup installation file: " << current_installation_file; + return false; + } + + return true; +} + +void +PackageHandler::Impl::revertPackage( + const string &package_name, + bool restore_mode, + const string ¤t_installation_file, + const string &backup_installation_file) const +{ + string orch_service_name = getConfigurationWithDefault( + "orchestration", + "orchestration", + "Service name" + ); + string packages_dir = getConfigurationWithDefault( + filesystem_prefix + "/packages", + "orchestration", + "Packages directory" + ); + if (package_name == orch_service_name) { + string manifest_file_path = getConfigurationWithDefault( + filesystem_prefix + "/conf/manifest.json", + "orchestration", + "Manifest file path" + ); + string temp_extension = getConfigurationWithDefault("_temp", "orchestration", "Temp file extension"); + string temp_manifest_file(manifest_file_path + temp_extension); + + I_OrchestrationTools *orchestration_tools = Singleton::Consume::by(); + orchestration_tools->removeFile(temp_manifest_file); + } + + if (restore_mode) return; + + // First we try to recover to last running package and then to + // the backup (2 recent versions are kept) + if (!installPackage(package_name, current_installation_file, true)) { + dbgWarning(D_ORCHESTRATOR) + << "Failed to recover from current installation package," + << " trying to use backup package. Current package: " + << current_installation_file; + if (!installPackage(package_name, backup_installation_file, true)) { + dbgWarning(D_ORCHESTRATOR) + << "Failed to recover from backup installation package. Backup package: " + << backup_installation_file; + } else { + dbgInfo(D_ORCHESTRATOR) + << "Installation of the backup package succeeded. Backup package: " + << backup_installation_file; + } + } else { + dbgInfo(D_ORCHESTRATOR) + << "Installation of the latest package succeeded. Current package: " + << current_installation_file; + } +} + +bool +PackageHandler::Impl::uninstallPackage( + const string &package_name, + const string &package_path, + const string &install_file_path) const +{ + I_OrchestrationTools *orchestration_tools = Singleton::Consume::by(); + if (!orchestration_tools->doesFileExist(install_file_path)) { + dbgWarning(D_ORCHESTRATOR) << "Installation file does not exist. File: " << install_file_path; + return false; + } + + string watchdog_path = getConfigurationWithDefault( + filesystem_prefix, + "orchestration", + "Default Check Point directory" + ) + "/watchdog/cp-nano-watchdog"; + auto action = packageHandlerActionsToString(PackageHandlerActions::UNREGISTER); + if (!orchestration_tools->executeCmd(InstallEnvPrefix + watchdog_path + action + package_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to unregister package from watchdog. Package: " << package_name; + return false; + } + + if (!setExecutionMode(install_file_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to change package permission. Package: " << package_name; + return false; + } + + action = packageHandlerActionsToString(PackageHandlerActions::UNINSTALL); + if (!orchestration_tools->executeCmd(InstallEnvPrefix + install_file_path + action)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to uninstall package. Package: " << package_name; + return false; + } + + if (!orchestration_tools->removeFile(install_file_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to remove installation package files. Package: " << package_name; + } + + string backup_ext = getConfigurationWithDefault( + ".bk", + "orchestration", + "Backup file extension" + ); + + if (!orchestration_tools->removeFile(install_file_path + backup_ext)) { + dbgDebug(D_ORCHESTRATOR) << "Failed to remove backup installation package files. Package: " << package_name; + } + + dbgInfo(D_ORCHESTRATOR) << "Package was uninstalled successfully. Package: " << package_name; + return true; +} + +bool +PackageHandler::Impl::preInstallPackage(const string &package_name, const string &install_file_path) const +{ + I_OrchestrationTools *orchestration_tools = Singleton::Consume::by(); + if (!orchestration_tools->doesFileExist(install_file_path)) { + dbgWarning(D_ORCHESTRATOR) << "Installation file does not exist. File: " << install_file_path; + return false; + } + + if (!setExecutionMode(install_file_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to change package permission. Package: " << package_name; + return false; + } + + auto action = packageHandlerActionsToString(PackageHandlerActions::PREINSTALL); + auto cmd_result = orchestration_tools->executeCmd(InstallEnvPrefix + install_file_path + action); + if (!cmd_result) { + dbgWarning(D_ORCHESTRATOR) << "Failed during pre installation test. Package: " << package_name; + return false; + } + + dbgInfo(D_ORCHESTRATOR) << "Pre installation test passed successfully. Package: " << package_name; + return true; +} + +bool +PackageHandler::Impl::postInstallPackage(const string &package_name, const string &install_file_path) const +{ + I_OrchestrationTools *orchestration_tools = Singleton::Consume::by(); + if (!orchestration_tools->doesFileExist(install_file_path)) { + dbgWarning(D_ORCHESTRATOR) << "Installation file does not exist. File: " << install_file_path; + return false; + } + + if (!setExecutionMode(install_file_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to change package permission. Package: " << package_name; + return false; + } + + auto action = packageHandlerActionsToString(PackageHandlerActions::POSTINSTALL); + auto cmd_result = orchestration_tools->executeCmd(InstallEnvPrefix + install_file_path + action); + if (!cmd_result) { + dbgWarning(D_ORCHESTRATOR) << "Failed during post installation test. Package: " << package_name; + string backup_extension = getConfigurationWithDefault(".bk", "orchestration", "Backup file extension"); + string packages_dir = getConfigurationWithDefault( + filesystem_prefix + "/packages", + "orchestration", + "Packages directory" + ); + string current_installation_file = packages_dir + "/" + package_name + "/" + package_name; + revertPackage(package_name, false, current_installation_file, current_installation_file + backup_extension); + return false; + } + dbgInfo(D_ORCHESTRATOR) << "Post installation test passed successfully. Package: " << package_name; + return true; +} + +bool +PackageHandler::Impl::updateSavedPackage(const string &package_name, const string &install_file_path) const +{ + string packages_dir = getConfigurationWithDefault( + filesystem_prefix + "/packages", + "orchestration", + "Packages directory" + ); + string backup_extension = getConfigurationWithDefault(".bk", "orchestration", "Backup file extension"); + string temp_extension = getConfigurationWithDefault("_temp", "orchestration", "Temp file extension"); + string current_installation_file = packages_dir + "/" + package_name + "/" + package_name; + string current_installation_file_backup = current_installation_file + backup_extension; + string tmp_backup = current_installation_file_backup + temp_extension; + + I_OrchestrationTools *orchestration_tools = Singleton::Consume::by(); + // Step 1 - save current installation file backup to temporary file. + orchestration_tools->copyFile(current_installation_file_backup, tmp_backup); + // Step 2 - save current installation file to the backuop file. + orchestration_tools->copyFile(current_installation_file, current_installation_file_backup); + dbgDebug(D_ORCHESTRATOR) << "Saving the installation file. " + << "From: " << install_file_path << ", " + << " To: " << current_installation_file; + // Step 3 - save the new installation file to the saved package. + if (!orchestration_tools->copyFile(install_file_path, current_installation_file)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to save installation file. File: " << install_file_path; + // Step 3.1 - Revet the backup package + orchestration_tools->copyFile(tmp_backup, current_installation_file_backup); + return false; + } + // Step 4 - remove the current package file + if (!orchestration_tools->removeFile(install_file_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to remove temporary installation file. File: " << install_file_path; + } + // Step 5 - remove the temporary backup file + orchestration_tools->removeFile(tmp_backup); + + return true; +} + +PackageHandler::PackageHandler() : Component("PackageHandler"), pimpl(make_unique()) {} + +PackageHandler::~PackageHandler() {} + +SASAL_END diff --git a/components/security_apps/orchestration/package_handler/package_handler_ut/CMakeLists.txt b/components/security_apps/orchestration/package_handler/package_handler_ut/CMakeLists.txt new file mode 100755 index 0000000..4eb1822 --- /dev/null +++ b/components/security_apps/orchestration/package_handler/package_handler_ut/CMakeLists.txt @@ -0,0 +1,8 @@ +link_directories(${ng_module_osrc_openssl_path}/lib) +link_directories(${BOOST_ROOT}/lib) + +add_unit_test( + package_handler_ut + "package_handler_ut.cc" + "package_handler;orchestration_tools;orchestration_modules;singleton;logging;config;metric;event_is;-lcrypto;-lboost_filesystem;-lboost_regex" +) diff --git a/components/security_apps/orchestration/package_handler/package_handler_ut/package_handler_ut.cc b/components/security_apps/orchestration/package_handler/package_handler_ut/package_handler_ut.cc new file mode 100755 index 0000000..8a6f864 --- /dev/null +++ b/components/security_apps/orchestration/package_handler/package_handler_ut/package_handler_ut.cc @@ -0,0 +1,404 @@ +#include "package_handler.h" + +#include "cptest.h" +#include "config.h" +#include "config_component.h" +#include "mock/mock_orchestration_tools.h" +#include "mock/mock_mainloop.h" +#include "mock/mock_time_get.h" +#include "mock/mock_shell_cmd.h" + +#include + +using namespace std; +using namespace testing; + +class PackageHandlerTest : public Test +{ +public: + PackageHandlerTest() + : + package_dir("/tmp/packages"), + backup_ext(".bk") + { + setConfiguration(package_dir, "orchestration", "Packages directory"); + setConfiguration(backup_ext, "orchestration", "Backup file extension"); + setConfiguration("/tmp", "orchestration", "Default Check Point directory"); + + writeFile("#!/bin/bash\necho \"bb\"\nexit 1", "/tmp/bad.sh"); + writeFile("#!/bin/bash\necho \"bb\"", "/tmp/packages/good/good"); + writeFile("#!/bin/bash\necho \"bb\"", "/tmp/good.sh"); + writeFile("#!/bin/bash\necho \"bb\"", "/tmp/packages/a/a"); + package_handler.init(); + } + + ~PackageHandlerTest() + { + namespace fs = boost::filesystem; + fs::path path_to_clean(package_dir); + if (fs::is_directory(path_to_clean)) { + for (fs::directory_iterator iter(path_to_clean); iter != fs::directory_iterator(); ++iter) { + fs::remove_all(iter->path()); + } + fs::remove_all(package_dir); + } + } + + void + preload() + { + package_handler.preload(); + } + + bool + writeFile(const string &text, const string &path) const + { + if (path.find('/') != string::npos) { + try { + string dir_path = path.substr(0, path.find_last_of('/')); + boost::filesystem::create_directories(dir_path); + } catch (const boost::filesystem::filesystem_error& e) { + return false; + } + } + try { + ofstream fout(path); + fout << text; + return true; + } catch (const boost::filesystem::filesystem_error& e) { + } + return false; + } + + string package_dir; + string backup_ext; + ::Environment env; + ConfigComponent config; + NiceMock mock_orchestration_tools; + PackageHandler package_handler; + I_PackageHandler *i_package_handler = Singleton::Consume::from(package_handler); + NiceMock mock_mainloop; + NiceMock mock_timer; + StrictMock mock_shell; +}; + +TEST_F(PackageHandlerTest, doNothing) +{ +} + +TEST_F(PackageHandlerTest, registerExpectedConfig) +{ + env.preload(); + env.init(); + + preload(); + string config_json = + "{\n" + " \"orchestration\": {\n" + " \"Debug mode\": [\n" + " {\n" + " \"value\": true\n" + " }\n" + " ]\n" + " }\n" + "}"; + + istringstream string_stream(config_json); + Singleton::Consume::from(config)->loadConfiguration(string_stream); + EXPECT_THAT(getConfiguration("orchestration", "Debug mode"), IsValue(true)); + env.fini(); +} + +TEST_F(PackageHandlerTest, useAdditionalFlags) +{ + env.preload(); + env.init(); + preload(); + registerExpectedConfiguration("orchestration", "Packages directory"); + registerExpectedConfiguration("orchestration", "Backup file extension"); + registerExpectedConfiguration("orchestration", "Default Check Point directory"); + + string config_json = + "{\n" + " \"orchestration\": {\n" + " \"additional flags\": [\n" + " {\n" + " \"flags\": [\n" + " \"--flag1\",\n" + " \"--flag2\"\n" + " ]\n" + " }\n" + " ],\n" + " \"Packages directory\": [ { \"value\": \"" + package_dir + "\"}],\n" + " \"Backup file extension\": [ { \"value\": \"" + backup_ext + "\"}],\n" + " \"Default Check Point directory\": [ { \"value\": \"/tmp\"}]" + " }\n" + "}"; + istringstream string_stream(config_json); + Singleton::Consume::from(config)->loadConfiguration(string_stream); + + string script_path = "/tmp/good.sh"; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(script_path)).WillOnce(Return(true)); + string package_file = package_dir + "/a/a"; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(package_file)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(package_file, package_file + backup_ext)).WillOnce(Return(true)); + + string install_command = script_path + " --install --flag1 --flag2"; + EXPECT_CALL(mock_orchestration_tools, executeCmd(install_command)).WillOnce(Return(true)); + EXPECT_TRUE(i_package_handler->installPackage("a", script_path, false)); + + env.fini(); +} + +TEST_F(PackageHandlerTest, fileNotExist) +{ + EXPECT_CALL(mock_orchestration_tools, doesFileExist("test.json")).WillOnce(Return(false)); + EXPECT_NE(true, i_package_handler->installPackage("", "test.json", false)); +} + +TEST_F(PackageHandlerTest, goodInstall) +{ + string script_path = "/tmp/good.sh"; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(script_path)).WillOnce(Return(true)); + string package_file = package_dir + "/a/a"; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(package_file)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(package_file, package_file + backup_ext)).WillOnce(Return(true)); + + string command = script_path + " --install"; + EXPECT_CALL(mock_orchestration_tools, executeCmd(command)).WillOnce(Return(true)); + EXPECT_TRUE(i_package_handler->installPackage("a", script_path, false)); +} + +TEST_F(PackageHandlerTest, badInstall) +{ + string package_name = "a"; + string package_file = package_dir + "/" + package_name + "/" + package_name; + string script_path = "/tmp/bad.sh"; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(script_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(package_file)).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(package_file + backup_ext)).WillOnce(Return(false)); + string command = script_path + " --install"; + EXPECT_CALL(mock_orchestration_tools, executeCmd(command)).WillOnce(Return(false)); + EXPECT_FALSE(i_package_handler->installPackage(package_name, script_path, false)); +} + +TEST_F(PackageHandlerTest, orcInstallErrorWhileCopyCurrent) +{ + string script_path = "/tmp/good.sh"; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(script_path)).WillOnce(Return(true)); + string package_file = package_dir + "/a/a"; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(package_file)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(package_file, package_file + backup_ext)).WillOnce(Return(false)); + + string command = script_path + " --install"; + EXPECT_CALL(mock_orchestration_tools, executeCmd(command)).WillOnce(Return(true)); + EXPECT_FALSE(i_package_handler->installPackage("a", script_path, false)); +} + +TEST_F(PackageHandlerTest, orcInstallErrorWhileRemovingNew) +{ + string script_path = "/tmp/good.sh"; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(script_path)).WillOnce(Return(true)); + string package_file = package_dir + "/a/a"; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(package_file)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(package_file, package_file + backup_ext)).WillOnce(Return(true)); + + string command = script_path + " --install"; + EXPECT_CALL(mock_orchestration_tools, executeCmd(command)).WillOnce(Return(true)); + EXPECT_TRUE(i_package_handler->installPackage("a", script_path, false)); +} +TEST_F(PackageHandlerTest, badInstallAndRecovery) +{ + string package_name = "a"; + string package_file = package_dir + "/" + package_name + "/" + package_name; + string script_path = "/tmp/bad.sh"; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(script_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(package_file)).WillOnce(Return(true)); + + string command = script_path + " --install"; + EXPECT_CALL(mock_orchestration_tools, executeCmd(command)).WillOnce(Return(false)); + + command = package_file + " --install"; + EXPECT_CALL(mock_orchestration_tools, executeCmd(command)).WillOnce(Return(true)); + + EXPECT_FALSE(i_package_handler->installPackage(package_name, script_path, false)); +} + +TEST_F(PackageHandlerTest, badOrcInstallAndRecoveryWithDefualValuesChange) +{ + setConfiguration("good", "orchestration", "Service name"); + string manifest_file_path = getConfigurationWithDefault("/etc/cp/conf/manifest.json", + "orchestration", "Manifest file path"); + string temp_ext = getConfigurationWithDefault("_temp", "orchestration", "Temp file extension"); + string temp_manifest_file = manifest_file_path + temp_ext; + string package_file = package_dir + "/good/good"; + + EXPECT_CALL(mock_orchestration_tools, doesFileExist("/tmp/bad.sh")).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(package_file)).WillOnce(Return(true)); + + string command = "/tmp/bad.sh --install"; + EXPECT_CALL(mock_orchestration_tools, executeCmd(command)).WillOnce(Return(false)); + + command = package_file + " --install"; + EXPECT_CALL(mock_orchestration_tools, executeCmd(command)).WillOnce(Return(true)); + + EXPECT_FALSE(i_package_handler->installPackage("good", "/tmp/bad.sh", false)); +} + +TEST_F(PackageHandlerTest, shouldInstall) +{ + string old_script_path = "/tmp/packages/my-script/my-script"; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(old_script_path)).WillOnce(Return(true)); + string new_script_path = "/tmp/new-script.sh"; + string version_command = " --version"; + EXPECT_CALL(mock_shell, getExecOutput(old_script_path + version_command, 5000, _)).WillOnce(Return(string("a"))); + EXPECT_CALL(mock_shell, getExecOutput(new_script_path + version_command, 5000, _)).WillOnce(Return(string("b"))); + + EXPECT_TRUE(i_package_handler->shouldInstallPackage("my-script", new_script_path)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(old_script_path)).WillOnce(Return(false)); + EXPECT_TRUE(i_package_handler->shouldInstallPackage("my-script", new_script_path)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(old_script_path)).WillOnce(Return(true)); + EXPECT_CALL( + mock_shell, + getExecOutput(old_script_path + version_command, 5000, _) + ).WillOnce(Return(Maybe(genError("Failed")))); + EXPECT_CALL(mock_shell, getExecOutput(new_script_path + version_command, 5000, _)).WillOnce(Return(string("a"))); + EXPECT_TRUE(i_package_handler->shouldInstallPackage("my-script", new_script_path)); +} + +TEST_F(PackageHandlerTest, shouldNotInstall) +{ + string old_script_path = "/tmp/packages/my-script/my-script"; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(old_script_path)).WillOnce(Return(true)); + string version_command = " --version"; + EXPECT_CALL(mock_shell, getExecOutput(old_script_path + version_command, 5000, _)).WillOnce(Return(string("a"))); + string new_script_path = "/tmp/new-script.sh"; + EXPECT_CALL(mock_shell, getExecOutput(new_script_path + version_command, 5000, _)).WillOnce(Return(string("a"))); + EXPECT_FALSE(i_package_handler->shouldInstallPackage("my-script", new_script_path)); +} + +TEST_F(PackageHandlerTest, badPreInstall) +{ + string script_path = "/tmp/bad.sh"; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(script_path)).WillOnce(Return(false)); + EXPECT_FALSE(i_package_handler->preInstallPackage("a", script_path)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(script_path)).WillOnce(Return(true)); + string command = script_path + " --pre_install_test"; + EXPECT_CALL(mock_orchestration_tools, executeCmd(command)).WillOnce(Return(false)); + EXPECT_FALSE(i_package_handler->preInstallPackage("a", script_path)); +} + +TEST_F(PackageHandlerTest, goodPreInstall) +{ + string script_path = "/tmp/good.sh"; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(script_path)).WillOnce(Return(true)); + string command = script_path + " --pre_install_test"; + EXPECT_CALL(mock_orchestration_tools, executeCmd(command)).WillOnce(Return(true)); + EXPECT_TRUE(i_package_handler->preInstallPackage("a", script_path)); +} + +TEST_F(PackageHandlerTest, badPostInstall) +{ + string script_path = "/tmp/bad.sh"; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(script_path)).WillOnce(Return(false)); + EXPECT_FALSE(i_package_handler->postInstallPackage("a", script_path)); + + string package_file = package_dir + "/a/a"; + string command = script_path + " --post_install_test"; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(script_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, executeCmd(command)).WillOnce(Return(false)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(package_file)).WillOnce(Return(true)); + command = package_file + " --install"; + EXPECT_CALL(mock_orchestration_tools, executeCmd(command)).WillOnce(Return(true)); + + EXPECT_FALSE(i_package_handler->postInstallPackage("a", script_path)); +} + +TEST_F(PackageHandlerTest, goodPostInstall) +{ + string script_path = "/tmp/good.sh"; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(script_path)).WillOnce(Return(true)); + string package_file = package_dir + "/a/a"; + string command = script_path + " --post_install_test"; + EXPECT_CALL(mock_orchestration_tools, executeCmd(command)).WillOnce(Return(true)); + EXPECT_TRUE(i_package_handler->postInstallPackage("a", script_path)); +} + +TEST_F(PackageHandlerTest, badUninstall) +{ + string script_path = "/tmp/good.sh"; + string watchdog_dir = "/tmp/watchdog"; + string watchdog_path = watchdog_dir + "/cp-nano-watchdog"; + string package_file = package_dir + "/a/a"; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(script_path)).WillOnce(Return(false)); + EXPECT_FALSE(i_package_handler->uninstallPackage("a", package_file, script_path)); + + string command = watchdog_path + " --un-register " + package_file; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(script_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, executeCmd(command)).WillOnce(Return(false)); + EXPECT_FALSE(i_package_handler->uninstallPackage("a", package_file, script_path)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(script_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, executeCmd(command)).WillOnce(Return(true)); + command = script_path + " --uninstall"; + EXPECT_CALL(mock_orchestration_tools, executeCmd(command)).WillOnce(Return(false)); + EXPECT_FALSE(i_package_handler->uninstallPackage("a", package_file, script_path)); +} + +TEST_F(PackageHandlerTest, goodUninstall) +{ + string script_path = "/tmp/good.sh"; + string watchdog_dir = "/tmp/watchdog"; + string watchdog_path = watchdog_dir + "/cp-nano-watchdog"; + string package_file = package_dir + "/a/a"; + EXPECT_CALL(mock_orchestration_tools, doesFileExist(script_path)).WillOnce(Return(true)); + + string command = watchdog_path + " --un-register " + package_file; + EXPECT_CALL(mock_orchestration_tools, executeCmd(command)).WillOnce(Return(true)); + + command = script_path + " --uninstall"; + EXPECT_CALL(mock_orchestration_tools, executeCmd(command)).WillOnce(Return(true)); + EXPECT_TRUE(i_package_handler->uninstallPackage("a", package_file, script_path)); +} + +TEST_F(PackageHandlerTest, badupdateSavedPackage) +{ + string script_path = "/tmp/good.sh"; + string package_file = package_dir + "/a/a"; + string package_file_backup = package_dir + "/a/a.bk"; + string package_file_backup_temp = package_dir + "/a/a.bk_temp"; + EXPECT_CALL(mock_orchestration_tools, + copyFile(package_file_backup, package_file_backup_temp)).Times(2).WillRepeatedly(Return(false)); + EXPECT_CALL(mock_orchestration_tools, + copyFile(package_file, package_file_backup)).Times(2).WillRepeatedly(Return(false)); + + EXPECT_CALL(mock_orchestration_tools, copyFile(script_path, package_file)).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, + copyFile(package_file_backup_temp, package_file_backup)).WillOnce(Return(false)); + EXPECT_FALSE(i_package_handler->updateSavedPackage("a", script_path)); + + EXPECT_CALL(mock_orchestration_tools, copyFile(script_path, package_file)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, removeFile(script_path)).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, removeFile(package_file_backup_temp)).WillOnce(Return(false)); + EXPECT_TRUE(i_package_handler->updateSavedPackage("a", script_path)); +} + +TEST_F(PackageHandlerTest, goodupdateSavedPackage) +{ + string script_path = "/tmp/good.sh"; + string package_file = package_dir + "/a/a"; + string package_file_backup = package_dir + "/a/a.bk"; + string package_file_backup_temp = package_dir + "/a/a.bk_temp"; + EXPECT_CALL(mock_orchestration_tools, + copyFile(package_file_backup, package_file_backup_temp)).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, copyFile(package_file, package_file_backup)).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, copyFile(script_path, package_file)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, removeFile(script_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, removeFile(package_file_backup_temp)).WillOnce(Return(true)); + + EXPECT_TRUE(i_package_handler->updateSavedPackage("a", script_path)); +} diff --git a/components/security_apps/orchestration/service_controller/CMakeLists.txt b/components/security_apps/orchestration/service_controller/CMakeLists.txt new file mode 100755 index 0000000..30aa88e --- /dev/null +++ b/components/security_apps/orchestration/service_controller/CMakeLists.txt @@ -0,0 +1,3 @@ +add_library(service_controller service_controller.cc) + +add_subdirectory(service_controller_ut) diff --git a/components/security_apps/orchestration/service_controller/service_controller.cc b/components/security_apps/orchestration/service_controller/service_controller.cc new file mode 100755 index 0000000..ba7f235 --- /dev/null +++ b/components/security_apps/orchestration/service_controller/service_controller.cc @@ -0,0 +1,937 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "service_controller.h" + +#include +#include +#include +#include + +#include "config.h" +#include "debug.h" +#include "sasal.h" +#include "rest.h" +#include "connkey.h" +#include "i_messaging.h" +#include "common.h" +#include "log_generator.h" +#include "i_orchestration_tools.h" +#include "customized_cereal_map.h" + +SASAL_START // Orchestration - Updates Control + +using namespace std; +using namespace ReportIS; + +USE_DEBUG_FLAG(D_ORCHESTRATOR); + +class SendConfigurations : public ClientRest +{ +public: + SendConfigurations(int _id, const string &ver) : id(_id), policy_version(ver) {} + + BOTH_PARAM(int, id); + S2C_PARAM(bool, error); + S2C_PARAM(bool, finished); + S2C_OPTIONAL_PARAM(string, error_message); + C2S_PARAM(string, policy_version); +}; + +class ServiceReconfStatusMonitor : Singleton::Consume, public ServerRest +{ +public: + void + doCall() override + { + auto service_controller = Singleton::Consume::by(); + if (!finished.get()) { + service_controller->updateReconfStatus(id.get(), ReconfStatus::IN_PROGRESS); + dbgTrace(D_ORCHESTRATOR) + << "Request for service reconfiguration, with id " + << id.get() + << ", is still in progress."; + return; + } + if (error.get()) { + service_controller->updateReconfStatus(id.get(), ReconfStatus::FAILED); + dbgError(D_ORCHESTRATOR) + << "Request for service reconfiguration, with id " + << id.get() + << ", failed to complete." + << (error_message.isActive() ? " Error: " + error_message.get() : ""); + return; + } + service_controller->updateReconfStatus(id.get(), ReconfStatus::SUCCEEDED); + dbgInfo(D_ORCHESTRATOR) + << "Request for service reconfiguration, with id " + << id.get() + << ", successfully accomplished."; + return; + } + +private: + C2S_PARAM(int, id); + C2S_PARAM(bool, error); + C2S_PARAM(bool, finished); + C2S_OPTIONAL_PARAM(string, error_message); +}; + +bool +ServiceDetails::isServiceActive() const +{ + stringstream watchdog_status_cmd; + watchdog_status_cmd + << getFilesystemPathConfig() + << "/watchdog/cp-nano-watchdog --status --verbose --service " + << service_name; + + if (!service_id.empty() && service_id != service_name) { + string uuid = ""; + if (service_id.find("_") != string::npos) { + string fid = service_id.substr(0, service_id.find("_")); + uuid = service_id.substr(service_id.find("_") + 1, service_id.size()); + watchdog_status_cmd << " --family " << fid << " --id " << uuid; + } else { + uuid = service_id; + watchdog_status_cmd << " --id " << uuid; + } + } + + dbgDebug(D_ORCHESTRATOR) + << "Executing service status check via watchdog api. Service name: " + << service_name + << ", Watchdog command: " + << watchdog_status_cmd.str(); + + I_ShellCmd *shell_cmd = Singleton::Consume::by(); + Maybe service_status = shell_cmd->getExecOutput(watchdog_status_cmd.str()); + + if (!service_status.ok()) { + dbgWarning(D_ORCHESTRATOR) + << "Changing service status to inactive after failure to its status from watchdog. Service name: " + << service_name; + return false; + } + + dbgDebug(D_ORCHESTRATOR) + << "Successfully retrieved service status from watchdog. Service name: " + << service_name + << ", Watchdog output: " + << *service_status; + + string status = service_status.unpack(); + for_each(status.begin(), status.end(), [](char &c) { c = ::tolower(c); }); + + bool is_registered = status.find("not-registered") == string::npos && status.find("registered") != string::npos; + bool is_running = status.find("not-running") == string::npos && status.find("running") != string::npos; + + dbgInfo(D_ORCHESTRATOR) + << "Successfully set service status. Service name: " + << service_name + << ", Status: " + << ((is_registered && is_running) ? "active" : "inactive"); + + return is_registered && is_running; +} + +template +void +ServiceDetails::serialize(Archive &ar) +{ + ar(cereal::make_nvp("Service name", service_name)); + ar(cereal::make_nvp("Service ID", service_id)); + ar(cereal::make_nvp("Service port", service_port)); + ar(cereal::make_nvp("Relevant configs", relevant_configs)); +} + +ReconfStatus +ServiceDetails::sendNewConfigurations(int configuration_id, const string &policy_version) +{ + SendConfigurations new_config(configuration_id, policy_version); + + I_Messaging *messaging = Singleton::Consume::by(); + Flags conn_flags; + conn_flags.setFlag(MessageConnConfig::ONE_TIME_CONN); + bool res = messaging->sendObject( + new_config, + I_Messaging::Method::POST, + "127.0.0.1", + service_port, + conn_flags, + "/set-new-configuration" + ); + if (!res) { + if(!isServiceActive()) { + dbgDebug(D_ORCHESTRATOR) << "Service " << service_name << " is inactive"; + return ReconfStatus::INACTIVE; + } + dbgDebug(D_ORCHESTRATOR) << "Service " << service_name << " didn't respond to new configuration request"; + return ReconfStatus::FAILED; + } + auto service_details = Singleton::Consume::by(); + + if (new_config.finished.get()) { + if (!new_config.error.get()) { + service_details->startReconfStatus(new_config.id.get(), ReconfStatus::SUCCEEDED, service_name, service_id); + dbgDebug(D_ORCHESTRATOR) << "Loading service configuration succeeded for service " << service_name; + return ReconfStatus::SUCCEEDED; + } else { + string log_name = "Agent could not update policy to version " + + service_details->getUpdatePolicyVersion() + + ". " + + (new_config.error_message.isActive() ? "Additional details: " + new_config.error_message.get() : ""); + LogGen( + log_name, + Audience::SECURITY, + Severity::CRITICAL, + Priority::HIGH, + Tags::ORCHESTRATOR + ) + << LogField("ServiceName", service_name) + << LogField("policyVersion", service_details->getPolicyVersion()); + + service_details->startReconfStatus(new_config.id.get(), ReconfStatus::FAILED, service_name, service_id); + dbgDebug(D_ORCHESTRATOR) + << "Loading service configuration failed for service " + << service_name + << " with error: " + << (new_config.error_message.isActive() ? new_config.error_message.get() : ""); + return ReconfStatus::FAILED; + } + } + dbgDebug(D_ORCHESTRATOR) << "Loading service configuration is in progress for service: " << service_name; + service_details->startReconfStatus(new_config.id.get(), ReconfStatus::IN_PROGRESS, service_name, service_id); + return ReconfStatus::IN_PROGRESS; +} + +void +SetNanoServiceConfig::doCall() +{ + dbgFlow(D_ORCHESTRATOR) + << "Received registration request from service. Service name: " + << service_name.get() + << ", service listening port: " + << service_listening_port.get(); + + I_ServiceController *i_service_controller = Singleton::Consume::from(); + i_service_controller->registerServiceConfig( + service_name, + service_listening_port, + expected_configurations, + service_id.isActive() ? service_id.get() : service_name.get() + ); + + status = true; +} + +class ServiceController::Impl + : + Singleton::Provide::From, + Singleton::Consume +{ +public: + void init(); + + bool + updateServiceConfiguration( + const string &new_policy_path, + const string &new_settings_path, + const vector &new_data_files, + const string &tenant_id + ) override; + + bool isServiceInstalled(const string &service_name) override; + + void registerServiceConfig( + const string &service_name, + PortNumber listening_port, + const vector &relevant_configurations, + const string &service_id + ) override; + + const string & getPolicyVersion() const override; + const string & getUpdatePolicyVersion() const override; + void updateReconfStatus(int id, ReconfStatus status) override; + void startReconfStatus( + int id, + ReconfStatus status, + const string &service_name, + const string &service_id + ) override; + +private: + void cleanUpVirtualFiles(); + void refreshPendingServices(); + + bool sendSignalForServices(const set &nano_services_to_update, const string &policy_version); + + bool updateServiceConfigurationFile( + const string &configuration_name, + const string &configuration_file_path, + const string &new_configuration_path); + + ReconfStatus getUpdatedReconfStatus(); + Maybe getServiceDetails(const string &service_name); + map getServiceToPortMap(); + + template + void serializeRegisterServices(Archive &ar) { ar(pending_services); } + + void loadRegisteredServicesFromFile(); + void writeRegisteredServicesToFile(); + + int configuration_id = 0; + map registered_services; + map pending_services; + string policy_version; + string update_policy_version; + string settings_path; + map services_reconf_status; + map services_reconf_names; + map services_reconf_ids; + string filesystem_prefix; +}; + +class GetServicesPorts : public ServerRest +{ +public: + void + doCall() + { + stringstream output; + auto ports_map = Singleton::Consume::from()->getServiceToPortMap(); + for (auto const& entry: ports_map) { + string service = entry.first; + replace(service.begin(), service.end(), ' ', '-'); + output << service << ":"; + output << entry.second << ","; + } + ports_list = output.str(); + } + + S2C_PARAM(string, ports_list); +}; + +Maybe +ServiceController::Impl::getServiceDetails(const string &service_id) +{ + auto iter = registered_services.find(service_id); + if (iter != registered_services.end()) return iter->second; + + return genError("did not find service details for the provided service name. service id: " + service_id); +} + +ReconfStatus +ServiceController::Impl::getUpdatedReconfStatus() +{ + ReconfStatus res = ReconfStatus::SUCCEEDED; + + for(auto &service_and_reconf_status : services_reconf_status) { + string service_id = services_reconf_ids[service_and_reconf_status.first]; + auto maybe_service = getServiceDetails(service_id); + + if (!maybe_service.ok()) { + dbgWarning(D_ORCHESTRATOR) << "Unable to get service details. Error: " << maybe_service.getErr(); + continue; + } + + if (!maybe_service.unpack().isServiceActive()) { + dbgInfo(D_ORCHESTRATOR) + << "Service is not active, removing from registered services list. Service: " + << services_reconf_names[service_and_reconf_status.first] + << "ID: " + << service_id; + registered_services.erase(service_id); + service_and_reconf_status.second = ReconfStatus::INACTIVE; + writeRegisteredServicesToFile(); + + continue; + } + + if (res < service_and_reconf_status.second) res = service_and_reconf_status.second; + } + return res; +} + +void +ServiceController::Impl::init() +{ + auto rest = Singleton::Consume::by(); + rest->addRestCall(RestAction::SET, "nano-service-config"); + rest->addRestCall(RestAction::SHOW, "all-service-ports"); + rest->addRestCall(RestAction::SET, "reconf-status"); + + Singleton::Consume::by()->addRecurringRoutine( + I_MainLoop::RoutineType::System, + chrono::seconds( + getConfigurationWithDefault( + 86400, + "orchestration", + "Cleanup virtual tenant seconds interval" + ) + ), + [this] () { cleanUpVirtualFiles(); }, + "Cleanup virtual tenants" + ); + + filesystem_prefix = getFilesystemPathConfig(); + + loadRegisteredServicesFromFile(); +} + +void +ServiceController::Impl::loadRegisteredServicesFromFile() +{ + auto registered_services_file = getConfigurationWithDefault( + filesystem_prefix + "/conf/orchestrations_registered_services.json", + "orchestration", + "Orchestration registered services" + ); + auto maybe_registered_services_str = Singleton::Consume::by()-> + readFile(registered_services_file); + if (!maybe_registered_services_str.ok()) { + dbgTrace(D_ORCHESTRATOR) + << "could not read file. File: " + << registered_services_file + << " Error: " << maybe_registered_services_str.getErr(); + return; + } + + stringstream ss(maybe_registered_services_str.unpack()); + cereal::JSONInputArchive ar(ss); + ar(cereal::make_nvp("Registered Services", pending_services)); + + dbgInfo(D_ORCHESTRATOR) + << "Orchestration pending services loaded from file." + << " File: " + << registered_services_file + << ". Registered Services:"; + + for (const auto &id_service_pair : pending_services) { + const auto &service = id_service_pair.second; + dbgInfo(D_ORCHESTRATOR) + << "Service name: " + << service.getServiceName() + << ", Service ID: " + << service.getServiceID() + << ", Service port: " + << service.getPort(); + } +} + +void +ServiceController::Impl::writeRegisteredServicesToFile() +{ + dbgFlow(D_ORCHESTRATOR); + auto registered_services_file = getConfigurationWithDefault( + filesystem_prefix + "/conf/orchestrations_registered_services.json", + "orchestration", + "Orchestration registered services" + ); + + ofstream ss(registered_services_file); + cereal::JSONOutputArchive ar(ss); + ar(cereal::make_nvp("Registered Services", registered_services)); + + dbgInfo(D_ORCHESTRATOR) + << "Orchestration registered services file has been updated. File: " + << registered_services_file + << ". Registered Services:"; + + for (const auto &id_service_pair : registered_services) { + const auto &service = id_service_pair.second; + dbgInfo(D_ORCHESTRATOR) + << "Service name: " + << service.getServiceName() + << ", Service ID: " + << service.getServiceID() + << ", Service port: " + << service.getPort(); + } +} + +void +ServiceController::Impl::cleanUpVirtualFiles() +{ + const string file_list_cmd = + "ls " + + getConfigurationWithDefault( + filesystem_prefix + "/conf", + "orchestration", + "Configuration directory" + ) + + " | grep 'tenant_*' | cut -d '_' -f 2"; + + auto shell_cmd = Singleton:: Consume::by(); + auto tenant_manager = Singleton::Consume::by(); + + auto result = shell_cmd->getExecOutput(file_list_cmd); + if (!result.ok()) return; + + set tenants_on_agent; + + istringstream parsig(*result); + while (!parsig.eof()) { + string tenant_id; + getline(parsig, tenant_id); + if (!tenant_id.empty()) tenants_on_agent.insert(tenant_id); + } + + for (const auto &active_tenant: tenant_manager->fetchActiveTenants()) { + tenants_on_agent.erase(active_tenant); + } + + for (const auto &none_active_tenant: tenants_on_agent) { + // remove files; + string settings_file = filesystem_prefix + "/conf/"+ none_active_tenant + "_settings.json"; + string tenant_dir = filesystem_prefix + "/conf/tenant_"+ none_active_tenant; + + Singleton::Consume::by()->removeFile(settings_file); + rmdir(tenant_dir.c_str()); + } +} + +map +ServiceController::Impl::getServiceToPortMap() +{ + map ports_map; + for (auto const& entry: registered_services) { + const string &service = entry.first; + PortNumber port = entry.second.getPort(); + ports_map[service] = port; + } + + for (auto const& entry: pending_services) { + const string &service = entry.first; + PortNumber port = entry.second.getPort(); + ports_map[service] = port; + } + + return ports_map; +} + +void +ServiceController::Impl::registerServiceConfig( + const string &service_name, + PortNumber listening_port, + const vector &relevant_configurations, + const string &service_id) +{ + ServiceDetails service_config( + service_name, + listening_port, + relevant_configurations, + service_id + ); + + pending_services.erase(service_config.getServiceID()); + pending_services.insert({service_config.getServiceID(), service_config}); +} + +bool +ServiceController::Impl::isServiceInstalled(const string &service_name) +{ + return + registered_services.find(service_name) != registered_services.end() || + pending_services.find(service_name) != pending_services.end(); +} + +void +ServiceController::Impl::refreshPendingServices() +{ + dbgFlow(D_ORCHESTRATOR); + if (pending_services.empty()) return; + for (const auto &service : pending_services) { + registered_services.erase(service.first); + registered_services.insert({service.first, service.second}); + dbgDebug(D_ORCHESTRATOR) << "Successfully registered service. Name: " << service.first; + } + pending_services.clear(); + + writeRegisteredServicesToFile(); +} + +bool +ServiceController::Impl::updateServiceConfiguration( + const string &new_policy_path, + const string &new_settings_path, + const vector &new_data_files, + const string &tenant_id) +{ + dbgFlow(D_ORCHESTRATOR) + << "new_policy_path: " + << new_policy_path + << ", new_settings_path: " + << new_settings_path + << ", new_data_files: " + << makeSeparatedStr(new_data_files, ",") + << ". tenant_id: " + << tenant_id; + + if (!new_settings_path.empty()) { + settings_path = new_settings_path; + } + + refreshPendingServices(); + + set nano_services_to_update; + for (const auto &service : registered_services) { + if (new_settings_path != "") { + nano_services_to_update.insert(service.first); + continue; + } + + for (const string &data : new_data_files) { + dbgTrace(D_ORCHESTRATOR) << "data: " << data; + if (service.second.isConfigurationRelevant(data)) { + dbgTrace(D_ORCHESTRATOR) + << "data has relevant configuration, will update the service: " + << service.first; + nano_services_to_update.insert(service.first); + break; + } + } + } + + if (new_policy_path == "") { + dbgDebug(D_ORCHESTRATOR) << "Policy file was not updated. Sending reload command regarding settings and data"; + + return sendSignalForServices(nano_services_to_update, ""); + } + + I_OrchestrationTools *orchestration_tools = Singleton::Consume::by(); + Maybe loaded_json = orchestration_tools->readFile(new_policy_path); + if (!loaded_json.ok()) { + dbgWarning(D_ORCHESTRATOR) + << "Failed to load new file: " + << new_policy_path + << ". Error: " + << loaded_json.getErr(); + + return false; + } + + auto all_security_policies = orchestration_tools->jsonObjectSplitter(loaded_json.unpack(), tenant_id); + + if (!all_security_policies.ok()) { + dbgWarning(D_ORCHESTRATOR) + << "Failed to parse json file: " + << new_policy_path + << ". Error: " + << all_security_policies.getErr(); + + return false; + } + + bool was_policy_updated = true; + const string version_param = "version"; + string version_value; + + for (auto &single_policy : all_security_policies.unpack()) { + if (single_policy.first == version_param) { + version_value = single_policy.second; + version_value.erase(remove(version_value.begin(), version_value.end(), '\"'), version_value.end()); + update_policy_version = version_value; + continue; + } + + dbgDebug(D_ORCHESTRATOR) << "Starting to update policy file. Policy type: " << single_policy.first; + + string dir = getConfigurationWithDefault( + filesystem_prefix + "/conf", + "orchestration", + "Configuration directory" + ); + + if (tenant_id != "") { + dir = dir + "/tenant_" + tenant_id; + if (!orchestration_tools->doesDirectoryExist(dir)) { + if (orchestration_tools->createDirectory(dir)) { + dbgTrace(D_ORCHESTRATOR) << "Created new configuration directory for tenant " << tenant_id; + } else { + dbgError(D_ORCHESTRATOR) << "Failed to create configuration directory for tenant "<< tenant_id; + return false; + } + } + } + + string policy_file_path = getPolicyConfigPath(single_policy.first, Config::ConfigFileType::Policy, tenant_id); + + auto update_config_result = updateServiceConfigurationFile( + single_policy.first, + policy_file_path, + single_policy.second + ); + + if (!update_config_result) { + dbgWarning(D_ORCHESTRATOR) << "Failed to update policy file. Policy name: " << single_policy.first; + was_policy_updated = false; + continue; + } + + dbgInfo(D_ORCHESTRATOR) << "Successfully updated policy file. Policy name: " << single_policy.first; + + auto orc_status = Singleton::Consume::by(); + orc_status->setServiceConfiguration( + single_policy.first, + policy_file_path, + OrchestrationStatusConfigType::POLICY + ); + + if (tenant_id != "") { + auto instances = Singleton::Consume::by()->getInstances(tenant_id); + for (const auto &instance_id: instances) { + auto relevant_service = registered_services.find(instance_id); + if (relevant_service == registered_services.end()) { + dbgWarning(D_ORCHESTRATOR) << "Could not find registered service. Service Id: " << instance_id; + continue; + } + if (relevant_service->second.isConfigurationRelevant(single_policy.first)) { + nano_services_to_update.insert(instance_id); + } + } + } else { + for (const auto &service : registered_services) { + if (service.second.isConfigurationRelevant(single_policy.first)) { + nano_services_to_update.insert(service.first); + } + } + } + } + + was_policy_updated &= sendSignalForServices(nano_services_to_update, version_value); + + dbgTrace(D_ORCHESTRATOR) << "was_policy_updated: " << (was_policy_updated ? "true" : "false"); + + if (was_policy_updated) { + string config_file_path; + string base_path = filesystem_prefix + "/conf/" + (tenant_id != "" ? "tenant_" + tenant_id + "/" : ""); + config_file_path = getConfigurationWithDefault( + base_path + "policy.json", + "orchestration", + "Policy file path" + ); + + if (new_policy_path.compare(config_file_path) == 0) { + dbgDebug(D_ORCHESTRATOR) << "Enforcing the default policy file"; + policy_version = version_value; + + return true; + } + + string backup_ext = getConfigurationWithDefault(".bk", "orchestration", "Backup file extension"); + + // Save the new configuration file. + if (!orchestration_tools->copyFile(new_policy_path, config_file_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to save the policy file."; + return false; + } + + // Backup the current configuration file. + uint max_backup_attempts = 3; + bool is_backup_succeed = false; + string backup_file = config_file_path + backup_ext; + I_MainLoop *mainloop = Singleton::Consume::by(); + + for (size_t i = 0; i < max_backup_attempts; i++) { + if (orchestration_tools->copyFile(new_policy_path, backup_file)) { + is_backup_succeed = true; + break; + } + mainloop->yield(false); + } + + if (!is_backup_succeed) { + dbgWarning(D_ORCHESTRATOR) << "Failed to back up the policy file."; + return false; + } + + policy_version = version_value; + } + + return was_policy_updated; +} + +bool +ServiceController::Impl::sendSignalForServices( + const set &nano_services_to_update, + const string &policy_version) +{ + dbgFlow(D_ORCHESTRATOR); + for (auto &service_id : nano_services_to_update) { + auto nano_service = registered_services.find(service_id); + if (nano_service == registered_services.end()) { + dbgWarning(D_ORCHESTRATOR) << "Could not find registered service. Service Id: " << service_id; + continue; + } + + ++configuration_id; + auto reconf_status = nano_service->second.sendNewConfigurations(configuration_id, policy_version); + + if (reconf_status == ReconfStatus::INACTIVE) { + dbgWarning(D_ORCHESTRATOR) << "Erasing details regarding inactive service " << service_id; + registered_services.erase(service_id); + writeRegisteredServicesToFile(); + } + + if (reconf_status == ReconfStatus::FAILED) { + dbgDebug(D_ORCHESTRATOR) << "The reconfiguration failed for serivce " << service_id; + services_reconf_status.clear(); + services_reconf_names.clear(); + return false; + } + } + + int reconf_timeout = getConfigurationWithDefault(600, "orchestration", "Reconfiguration timeout seconds"); + auto timer = Singleton::Consume::by(); + auto current_timeout = timer->getMonotonicTime() + chrono::seconds(reconf_timeout); + while(timer->getMonotonicTime() < current_timeout) { + switch (getUpdatedReconfStatus()) { + case ReconfStatus::SUCCEEDED: { + dbgDebug(D_ORCHESTRATOR) << "The reconfiguration was successfully completed for all the services"; + services_reconf_status.clear(); + services_reconf_names.clear(); + return true; + } + case ReconfStatus::IN_PROGRESS: { + dbgTrace(D_ORCHESTRATOR) << "Reconfiguration in progress..."; + Singleton::Consume::by()->yield(chrono::seconds(2)); + break; + } + case ReconfStatus::FAILED: { + for(auto &status : services_reconf_status) { + if (status.second == ReconfStatus::FAILED) { + dbgDebug(D_ORCHESTRATOR) + << "The reconfiguration failed for serivce " + << services_reconf_names[status.first]; + } + } + services_reconf_status.clear(); + services_reconf_names.clear(); + return false; + } + case ReconfStatus::INACTIVE: { + dbgError(D_ORCHESTRATOR) << "Reached inactive state in the middle of reconfiguration!"; + services_reconf_status.clear(); + services_reconf_names.clear(); + return false; + } + } + } + + dbgDebug(D_ORCHESTRATOR) << "The reconfiguration has reached a timeout"; + services_reconf_status.clear(); + services_reconf_names.clear(); + return false; +} + +bool +ServiceController::Impl::updateServiceConfigurationFile( + const string &configuration_name, + const string &configuration_file_path, + const string &new_configuration_path) +{ + + dbgFlow(D_ORCHESTRATOR) << "Updating configuration. Config Name: " << configuration_name; + + I_OrchestrationTools *orchestration_tools = Singleton::Consume::by(); + if (orchestration_tools->doesFileExist(configuration_file_path)) { + Maybe old_configuration = orchestration_tools->readFile(configuration_file_path); + if (old_configuration.ok()) { + bool service_changed = old_configuration.unpack().compare(new_configuration_path) != 0; + if (service_changed == false) { + dbgDebug(D_ORCHESTRATOR) << "There is no update for policy file: " << configuration_file_path; + return true; + } + dbgDebug(D_ORCHESTRATOR) + << "Starting to update " << configuration_file_path << " to " << new_configuration_path; + string old_configuration_backup_path = configuration_file_path + getConfigurationWithDefault( + ".bk", + "orchestration", + "Backup file extension" + ); + if (orchestration_tools->copyFile(configuration_file_path, old_configuration_backup_path)) { + dbgDebug(D_ORCHESTRATOR) << "Backup of policy file has been created in: " << configuration_file_path; + } else { + dbgWarning(D_ORCHESTRATOR) << "Failed to backup policy file"; + return false; + } + } else { + dbgWarning(D_ORCHESTRATOR) + << "Failed to read current policy file " + << configuration_file_path + << ". Error: " + << old_configuration.getErr(); + + return false; + } + } + + if (orchestration_tools->writeFile(new_configuration_path, configuration_file_path)) { + dbgDebug(D_ORCHESTRATOR) << "New policy file has been saved in: " << configuration_file_path; + } else { + dbgWarning(D_ORCHESTRATOR) << "Failed to save new policy file"; + return false; + } + + dbgInfo(D_ORCHESTRATOR) << "Successfully updated policy file: " << configuration_file_path; + + return true; +} + +ServiceController::ServiceController() : Component("ServiceController"), pimpl(make_unique()) {} + +ServiceController::~ServiceController() {} + +void +ServiceController::init() +{ + pimpl->init(); +} + +const string & +ServiceController::Impl::getPolicyVersion() const +{ + return policy_version; +} + +const string & +ServiceController::Impl::getUpdatePolicyVersion() const +{ + return update_policy_version; +} + +void +ServiceController::Impl::updateReconfStatus(int id, ReconfStatus status) +{ + if (services_reconf_status.find(id) == services_reconf_status.end()) { + dbgError(D_ORCHESTRATOR) << "Service reconfiguration monitor received illegal id :" << id; + return; + } + services_reconf_status[id] = status; +} + +void +ServiceController::Impl::startReconfStatus( + int id, + ReconfStatus status, + const string &service_name, + const string &service_id) +{ + services_reconf_status.emplace(id, status); + services_reconf_names.emplace(id, service_name); + services_reconf_ids.emplace(id, service_id); +} + +SASAL_END diff --git a/components/security_apps/orchestration/service_controller/service_controller_ut/CMakeLists.txt b/components/security_apps/orchestration/service_controller/service_controller_ut/CMakeLists.txt new file mode 100755 index 0000000..60bd44b --- /dev/null +++ b/components/security_apps/orchestration/service_controller/service_controller_ut/CMakeLists.txt @@ -0,0 +1,7 @@ +link_directories(${BOOST_ROOT}/lib) + +add_unit_test( + service_controller_ut + "service_controller_ut.cc" + "service_controller;rest;config;environment;metric;event_is;shell_cmd;orchestration_modules;logging;agent_details;-lboost_regex" +) diff --git a/components/security_apps/orchestration/service_controller/service_controller_ut/service_controller_ut.cc b/components/security_apps/orchestration/service_controller/service_controller_ut/service_controller_ut.cc new file mode 100755 index 0000000..f598a76 --- /dev/null +++ b/components/security_apps/orchestration/service_controller/service_controller_ut/service_controller_ut.cc @@ -0,0 +1,1604 @@ +#include "cptest.h" +#include +#include "orchestration_tools.h" +#include +#include +#include +#include "service_controller.h" +#include "config.h" +#include "config_component.h" +#include "mock/mock_orchestration_tools.h" +#include "mock/mock_orchestration_status.h" +#include "mock/mock_time_get.h" +#include "mock/mock_rest_api.h" +#include "mock/mock_messaging.h" +#include "mock/mock_mainloop.h" +#include "mock/mock_logging.h" +#include "mock/mock_shell_cmd.h" +#include "mock/mock_tenant_manager.h" + +using namespace testing; +using namespace std; + +class ServiceControllerTest : public Test +{ +public: + ServiceControllerTest() + { + Debug::setUnitTestFlag(D_ORCHESTRATOR, Debug::DebugLevel::NOISE); + Debug::setNewDefaultStdout(&capture_debug); + + CPTestTempfile status_file; + registered_services_file_path = status_file.fname; + setConfiguration(registered_services_file_path, "orchestration", "Orchestration registered services"); + + EXPECT_CALL(time, getWalltimeStr(_)).WillRepeatedly(Return("time")); + EXPECT_CALL(time, getMonotonicTime()).WillRepeatedly(Return(chrono::microseconds(1))); + + EXPECT_CALL(mock_rest_api, mockRestCall(RestAction::SET, "nano-service-config", _)).WillOnce( + WithArg<2>(Invoke(this, &ServiceControllerTest::setNanoServiceConfig)) + ); + + EXPECT_CALL(mock_rest_api, mockRestCall(RestAction::SET, "new-configuration", _)).WillOnce( + WithArg<2>(Invoke(this, &ServiceControllerTest::setNanoServiceConfig)) + ); + EXPECT_CALL(mock_ml, addRecurringRoutine(I_MainLoop::RoutineType::System, _, _, _, _)).WillOnce(Return(2)); + + EXPECT_CALL(tenant_manager, getTimeoutVal()).WillOnce(Return(chrono::seconds(1))); + EXPECT_CALL(mock_ml, addOneTimeRoutine(I_MainLoop::RoutineType::System, _, _, false)).WillOnce(Return(1)); + config.init(); + + EXPECT_CALL(mock_rest_api, mockRestCall(RestAction::SHOW, "all-service-ports", _)).WillOnce( + WithArg<2>(Invoke(this, &ServiceControllerTest::getServicesPorts)) + ); + + EXPECT_CALL(mock_rest_api, mockRestCall(RestAction::SET, "reconf-status", _)).WillOnce( + WithArg<2>(Invoke(this, &ServiceControllerTest::setReconfStatus)) + ); + + EXPECT_CALL( + mock_ml, + addRecurringRoutine(_, _, _, "Cleanup virtual tenants", _) + ).WillOnce(DoAll(SaveArg<2>(&v_tenants_cleanup), Return(0))); + + Maybe err = genError("Cannot read file, file does not exist"); + EXPECT_CALL( + mock_orchestration_tools, + readFile(registered_services_file_path) + ).WillOnce(Return(err)); + + configuration_dir = getConfigurationWithDefault( + "/etc/cp/conf", + "orchestration", + "Configuration directory" + ); + policy_extension = getConfigurationWithDefault( + ".policy", + "orchestration", + "Configuration file extension" + ); + settings_extension = getConfigurationWithDefault( + ".conf", + "orchestration", + "Configuration file extension" + ); + backup_extension = getConfigurationWithDefault( + ".bk", + "orchestration", + "Backup file extension" + ); + l4_firewall_policy_path = "/etc/cp/conf/l4_firewall/l4_firewall" + policy_extension; + l4_firewall_settings_path = configuration_dir + "/l4_firewall/l4_firewall" + settings_extension; + l4_firewall_debug_path = configuration_dir + "/l4_firewall/l4_firewall_debug" + settings_extension; + file_name= "in_test.json"; + policy_file_path = getConfigurationWithDefault( + "/etc/cp/conf/policy.json", + "orchestration", + "Policy file path" + ); + settings_file_path = getConfigurationWithDefault( + "/etc/cp/conf/settings.json", + "orchestration", + "Settings file path" + ); + + service_controller.init(); + + registerNewService(); + } + + bool setNanoServiceConfig(const unique_ptr &p) { set_nano_service_config = p->getRest(); return true; } + bool getServicesPorts(const unique_ptr &p) { get_services_ports = p->getRest(); return true; } + bool setReconfStatus(const unique_ptr &p) { set_reconf_status = p->getRest(); return true; } + bool setNewConfiguration(const unique_ptr &p) { set_new_configuration = p->getRest(); return true; } + + ~ServiceControllerTest() + { + Debug::setNewDefaultStdout(&cout); + } + + void + registerNewService() + { + stringstream new_service_registration; + new_service_registration + << "{" + << " \"service_name\": \"mock access control\"," + << " \"service_listening_port\":" + to_string(l4_firewall_service_port) + "," + << " \"expected_configurations\": [\"l4_firewall\", \"non updated capability\"]," + << " \"service_id\": \"family1_id2\"," + << " \"general_settings\": \"path_to_settings\"," + << " \"debug_settings\": \"path_to_debug\"" + << "}"; + + auto registration_res = set_nano_service_config->performRestCall(new_service_registration); + ASSERT_TRUE(registration_res.ok()); + + i_service_controller = Singleton::Consume::from(service_controller); + EXPECT_TRUE(i_service_controller->isServiceInstalled("family1_id2")); + EXPECT_FALSE(i_service_controller->isServiceInstalled("I am not installed")); + } + + string + orchestrationRegisteredServicesFileToString(const string &file_name) + { + ifstream status_file(file_name); + stringstream string_stream; + if (status_file.is_open()) { + string line; + bool is_first_line = true; + while (getline(status_file, line)) { + if (is_first_line) { + is_first_line = false; + } else { + string_stream << endl; + } + string_stream << line; + } + status_file.close(); + } + return string_stream.str(); + } + + const uint16_t l4_firewall_service_port = 8888; + const uint16_t waap_service_port = 7777; + ::Environment env; + ConfigComponent config; + string configuration_dir; + string policy_extension; + string settings_extension; + string backup_extension; + string l4_firewall_policy_path; + string l4_firewall_settings_path; + string l4_firewall_debug_path; + string file_name; + string registered_services_file_path; + string policy_file_path; + string settings_file_path; + string services_port; + StrictMock time; + StrictMock mock_rest_api; + StrictMock mock_message; + StrictMock mock_ml; + StrictMock mock_shell_cmd; + StrictMock mock_orchestration_status; + StrictMock mock_orchestration_tools; + StrictMock tenant_manager; + NiceMock mock_log; + ServiceController service_controller; + I_ServiceController *i_service_controller; + unique_ptr set_nano_service_config; + unique_ptr get_services_ports; + unique_ptr set_reconf_status; + unique_ptr set_new_configuration; + + I_MainLoop::Routine v_tenants_cleanup; + ostringstream capture_debug; + string version_value = "1.0.2"; + string old_version = "1.0.1"; +}; + +TEST_F(ServiceControllerTest, doNothing) +{ +} + +TEST_F(ServiceControllerTest, UpdateConfiguration) +{ + string new_configuration = "{" + " \"version\": \"" + version_value + "\"" + " \"l4_firewall\":" + " {" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + " }" + "}"; + + string l4_firewall = "{" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + "}"; + + Maybe> json_parser_return = + map({{"l4_firewall", l4_firewall}, {"version", version_value}}); + EXPECT_CALL(mock_orchestration_tools, readFile(file_name)).WillOnce(Return(new_configuration)); + EXPECT_CALL(mock_orchestration_tools, jsonObjectSplitter(new_configuration, _)) + .WillOnce(Return(json_parser_return)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(l4_firewall_policy_path)).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, writeFile(l4_firewall, l4_firewall_policy_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_status, + setServiceConfiguration("l4_firewall", l4_firewall_policy_path, OrchestrationStatusConfigType::POLICY)); + + EXPECT_EQ(i_service_controller->getPolicyVersion(), ""); + + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, policy_file_path + backup_extension)) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, policy_file_path)).WillOnce(Return(true)); + + string general_settings_path = "/my/settings/path"; + string reply_msg = "{\"id\": 1, \"error\": false, \"finished\": true, \"error_message\": \"\"}"; + + Flags conn_flags; + conn_flags.setFlag(MessageConnConfig::ONE_TIME_CONN); + EXPECT_CALL( + mock_message, + sendMessage( + true, + "{\n \"id\": 1,\n \"policy_version\": \"1.0.2\"\n}", + I_Messaging::Method::POST, + string("127.0.0.1"), + l4_firewall_service_port, + conn_flags, + string("/set-new-configuration"), + string(), + _, + MessageTypeTag::GENERIC + ) + ).WillOnce(Return(Maybe(reply_msg))); + + EXPECT_CALL( + mock_shell_cmd, + getExecOutput( + "/etc/cp/watchdog/cp-nano-watchdog --status --verbose --service mock access control" + " --family family1 --id id2", + _, + _ + ) + ).WillRepeatedly(Return(string("registered and running"))); + + EXPECT_TRUE(i_service_controller->updateServiceConfiguration(file_name, general_settings_path)); + EXPECT_EQ(i_service_controller->getPolicyVersion(), version_value); + EXPECT_EQ(i_service_controller->getUpdatePolicyVersion(), version_value); +} + +TEST_F(ServiceControllerTest, writeRegisteredServicesFromFile) +{ + EXPECT_EQ(orchestrationRegisteredServicesFileToString(registered_services_file_path), string("")); + + string new_configuration = "{" + " \"version\": \"" + version_value + "\"" + " \"l4_firewall\":" + " {" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + " }" + "}"; + + string l4_firewall = "{" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + "}"; + string expected_json = "{\n" + " \"Registered Services\": {\n" + " \"family1_id2\": {\n" + " \"Service name\": \"mock access control\",\n" + " \"Service ID\": \"family1_id2\",\n" + " \"Service port\": 8888,\n" + " \"Relevant configs\": [\n" + " \"non updated capability\",\n" + " \"l4_firewall\"\n" + " ]\n" + " }\n" + " }\n" + "}"; + + Maybe> json_parser_return = + map({{"l4_firewall", l4_firewall}, {"version", version_value}}); + EXPECT_CALL(mock_orchestration_tools, readFile(file_name)).WillOnce(Return(new_configuration)); + EXPECT_CALL(mock_orchestration_tools, jsonObjectSplitter(new_configuration, _)) + .WillOnce(Return(json_parser_return)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(l4_firewall_policy_path)).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, writeFile(l4_firewall, l4_firewall_policy_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_status, + setServiceConfiguration("l4_firewall", l4_firewall_policy_path, OrchestrationStatusConfigType::POLICY)); + + EXPECT_EQ(i_service_controller->getPolicyVersion(), ""); + + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, policy_file_path + backup_extension)) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, policy_file_path)).WillOnce(Return(true)); + + string general_settings_path = "/my/settings/path"; + string reply_msg = "{\"id\": 1, \"error\": false, \"finished\": true, \"error_message\": \"\"}"; + + Flags conn_flags; + conn_flags.setFlag(MessageConnConfig::ONE_TIME_CONN); + EXPECT_CALL( + mock_message, + sendMessage( + true, + "{\n \"id\": 1,\n \"policy_version\": \"1.0.2\"\n}", + I_Messaging::Method::POST, + string("127.0.0.1"), + l4_firewall_service_port, + conn_flags, + string("/set-new-configuration"), + string(), + _, + MessageTypeTag::GENERIC + ) + ).WillOnce(Return(Maybe(reply_msg))); + + EXPECT_CALL( + mock_shell_cmd, + getExecOutput( + "/etc/cp/watchdog/cp-nano-watchdog --status --verbose --service mock access control" + " --family family1 --id id2", + _, + _ + ) + ).WillRepeatedly(Return(string("registered and running"))); + + EXPECT_TRUE(i_service_controller->updateServiceConfiguration(file_name, general_settings_path)); + EXPECT_EQ(i_service_controller->getPolicyVersion(), version_value); + EXPECT_EQ(i_service_controller->getUpdatePolicyVersion(), version_value); + EXPECT_EQ(orchestrationRegisteredServicesFileToString(registered_services_file_path), expected_json); +} + +TEST_F(ServiceControllerTest, readRegisteredServicesFromFile) +{ + int family1_id3_port = 1111; + string registered_services_json = "{\n" + " \"Registered Services\": {\n" + " \"family1_id3\": {\n" + " \"Service name\": \"mock access control\",\n" + " \"Service ID\": \"family1_id3\",\n" + " \"Service port\": 1111,\n" + " \"Relevant configs\": [\n" + " \"non updated capability\",\n" + " \"l4_firewall\"\n" + " ]\n" + " }\n" + " }\n" + "}"; + EXPECT_CALL(mock_rest_api, mockRestCall(RestAction::SET, "nano-service-config", _)).WillOnce( + WithArg<2>(Invoke(this, &ServiceControllerTest::setNanoServiceConfig)) + ); + + EXPECT_CALL(mock_rest_api, mockRestCall(RestAction::SET, "new-configuration", _)).WillOnce( + WithArg<2>(Invoke(this, &ServiceControllerTest::setNanoServiceConfig)) + ); + EXPECT_CALL(mock_ml, addRecurringRoutine(I_MainLoop::RoutineType::System, _, _, _, _)).WillOnce(Return(2)); + + EXPECT_CALL(tenant_manager, getTimeoutVal()).WillOnce(Return(chrono::seconds(1))); + EXPECT_CALL(mock_ml, addOneTimeRoutine(I_MainLoop::RoutineType::System, _, _, false)).WillOnce(Return(1)); + config.init(); + + EXPECT_CALL(mock_rest_api, mockRestCall(RestAction::SHOW, "all-service-ports", _)).WillOnce( + WithArg<2>(Invoke(this, &ServiceControllerTest::getServicesPorts)) + ); + + EXPECT_CALL(mock_rest_api, mockRestCall(RestAction::SET, "reconf-status", _)).WillOnce( + WithArg<2>(Invoke(this, &ServiceControllerTest::setReconfStatus)) + ); + + EXPECT_CALL( + mock_ml, + addRecurringRoutine(_, _, _, "Cleanup virtual tenants", _) + ).WillOnce(DoAll(SaveArg<2>(&v_tenants_cleanup), Return(0))); + + EXPECT_CALL( + mock_orchestration_tools, + readFile(registered_services_file_path) + ).WillOnce(Return(registered_services_json)); + + service_controller.init(); + + auto services_to_port_map = i_service_controller->getServiceToPortMap(); + EXPECT_EQ(services_to_port_map.find("family1_id3")->second, family1_id3_port); +} + +TEST_F(ServiceControllerTest, noPolicyUpdate) +{ + string new_configuration = "{" + " \"version\": \"" + version_value + "\"" + " \"l4_firewall\":" + " {" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + " }" + "}"; + + string l4_firewall = "{" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + "}"; + + Maybe> json_parser_return = + map({{"l4_firewall", l4_firewall}, {"version", version_value}}); + EXPECT_CALL(mock_orchestration_tools, readFile(file_name)).WillOnce(Return(new_configuration)); + EXPECT_CALL(mock_orchestration_tools, jsonObjectSplitter(new_configuration, _)) + .WillOnce(Return(json_parser_return)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(l4_firewall_policy_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, readFile(l4_firewall_policy_path)).WillOnce(Return(l4_firewall)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, policy_file_path + backup_extension)) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, policy_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_status, + setServiceConfiguration("l4_firewall", l4_firewall_policy_path, OrchestrationStatusConfigType::POLICY)); + + string reply_msg = "{\"id\": 1, \"error\": false, \"finished\": true, \"error_message\": \"\"}"; + Flags conn_flags; + conn_flags.setFlag(MessageConnConfig::ONE_TIME_CONN); + EXPECT_CALL( + mock_message, + sendMessage( + true, + "{\n \"id\": 1,\n \"policy_version\": \"1.0.2\"\n}", + I_Messaging::Method::POST, + string("127.0.0.1"), + l4_firewall_service_port, + conn_flags, + string("/set-new-configuration"), + string(), + _, + MessageTypeTag::GENERIC + ) + ).WillOnce(Return(Maybe(reply_msg))); + + EXPECT_CALL( + mock_shell_cmd, + getExecOutput( + "/etc/cp/watchdog/cp-nano-watchdog --status --verbose --service mock access control" + " --family family1 --id id2", + _, + _ + ) + ).WillRepeatedly(Return(string("registered and running"))); + + EXPECT_TRUE(i_service_controller->updateServiceConfiguration(file_name, "")); + EXPECT_EQ(i_service_controller->getPolicyVersion(), version_value); +} + +TEST_F(ServiceControllerTest, SettingsAndPolicyUpdateCombinations) +{ + string new_configuration = "{" + " \"version\": \"" + version_value + "\"" + " \"l4_firewall\":" + " {" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + " }" + "}"; + + string l4_firewall = "{" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + "}"; + + Maybe> json_parser_return = + map({{"l4_firewall", l4_firewall}, {"version", version_value}}); + EXPECT_CALL(mock_orchestration_tools, readFile(file_name)).WillOnce(Return(new_configuration)); + EXPECT_CALL(mock_orchestration_tools, jsonObjectSplitter(new_configuration, _)) + .WillOnce(Return(json_parser_return)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(l4_firewall_policy_path)).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, writeFile(l4_firewall, l4_firewall_policy_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_status, + setServiceConfiguration("l4_firewall", l4_firewall_policy_path, OrchestrationStatusConfigType::POLICY)); + + EXPECT_EQ(i_service_controller->getPolicyVersion(), ""); + + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, policy_file_path + backup_extension)) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, policy_file_path)).WillOnce(Return(true)); + + EXPECT_CALL( + mock_shell_cmd, + getExecOutput( + "/etc/cp/watchdog/cp-nano-watchdog --status --verbose --service mock access control" + " --family family1 --id id2", + _, + _ + ) + ).WillRepeatedly(Return(string("registered and running"))); + + string general_settings_path = "/my/settings/path"; + string reply_msg1 = "{\"id\": 1, \"error\": false, \"finished\": true, \"error_message\": \"\"}"; + + Flags conn_flags; + conn_flags.setFlag(MessageConnConfig::ONE_TIME_CONN); + EXPECT_CALL( + mock_message, + sendMessage( + true, + "{\n \"id\": 1,\n \"policy_version\": \"1.0.2\"\n}", + I_Messaging::Method::POST, + string("127.0.0.1"), + l4_firewall_service_port, + conn_flags, + string("/set-new-configuration"), + string(), + _, + MessageTypeTag::GENERIC + ) + ).WillOnce(Return(Maybe(reply_msg1))); + + // both policy and settings now being updated + EXPECT_TRUE(i_service_controller->updateServiceConfiguration(file_name, general_settings_path)); + EXPECT_EQ(i_service_controller->getPolicyVersion(), version_value); + EXPECT_EQ(i_service_controller->getUpdatePolicyVersion(), version_value); + + // Only settings now being updated + EXPECT_CALL(mock_orchestration_tools, readFile(file_name)).WillOnce(Return(new_configuration)); + EXPECT_CALL(mock_orchestration_tools, jsonObjectSplitter(new_configuration, _)) + .WillOnce(Return(json_parser_return)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(l4_firewall_policy_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, readFile(l4_firewall_policy_path)).WillOnce(Return(l4_firewall)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, policy_file_path + backup_extension)) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, policy_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_status, + setServiceConfiguration("l4_firewall", l4_firewall_policy_path, OrchestrationStatusConfigType::POLICY)); + general_settings_path += "/new"; + + string reply_msg2 = "{\"id\": 2, \"error\": false, \"finished\": true, \"error_message\": \"\"}"; + Flags conn_flags2; + conn_flags2.setFlag(MessageConnConfig::ONE_TIME_CONN); + EXPECT_CALL( + mock_message, + sendMessage( + true, + "{\n \"id\": 2,\n \"policy_version\": \"1.0.2\"\n}", + I_Messaging::Method::POST, + string("127.0.0.1"), + l4_firewall_service_port, + conn_flags2, + string("/set-new-configuration"), + string(), + _, + MessageTypeTag::GENERIC + ) + ).WillRepeatedly(Return(Maybe(reply_msg2))); + + EXPECT_TRUE(i_service_controller->updateServiceConfiguration(file_name, general_settings_path)); + EXPECT_EQ(i_service_controller->getPolicyVersion(), version_value); +} + +TEST_F(ServiceControllerTest, backup) +{ + string new_configuration = "{" + " \"version\": \"" + version_value + "\"" + " \"l4_firewall\":" + " {" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + " }" + "}"; + + string l4_firewall = "{" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + "}"; + + string old_configuration = "{" + " \"version\": \"" + old_version + "\"" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"reject\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + "}"; + + Maybe> json_parser_return = + map({{"l4_firewall", l4_firewall}, {"version", version_value}}); + EXPECT_CALL(mock_orchestration_tools, readFile(file_name)).WillOnce(Return(new_configuration)); + EXPECT_CALL(mock_orchestration_tools, jsonObjectSplitter(new_configuration, _)) + .WillOnce(Return(json_parser_return)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(l4_firewall_policy_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, readFile(l4_firewall_policy_path)).WillOnce(Return(old_configuration)); + EXPECT_CALL(mock_orchestration_status, + setServiceConfiguration("l4_firewall", l4_firewall_policy_path, OrchestrationStatusConfigType::POLICY)); + + EXPECT_CALL( + mock_orchestration_tools, + copyFile(l4_firewall_policy_path, l4_firewall_policy_path + backup_extension) + ).WillOnce(Return(true)); + EXPECT_CALL( + mock_orchestration_tools, + writeFile(l4_firewall, l4_firewall_policy_path)).WillOnce(Return(true) + ); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, policy_file_path + backup_extension)) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, policy_file_path)).WillOnce(Return(true)); + + EXPECT_CALL( + mock_shell_cmd, + getExecOutput( + "/etc/cp/watchdog/cp-nano-watchdog --status --verbose --service mock access control" + " --family family1 --id id2", + _, + _ + ) + ).WillRepeatedly(Return(string("registered and running"))); + + string reply_msg = "{\"id\": 1, \"error\": false, \"finished\": true, \"error_message\": \"\"}"; + EXPECT_CALL( + mock_message, + sendMessage( + _, + _, + _, + "127.0.0.1", + l4_firewall_service_port, + _, + "/set-new-configuration", + _, + _, + MessageTypeTag::GENERIC + ) + ).WillOnce(Return(Maybe(reply_msg))); + + EXPECT_EQ(i_service_controller->getPolicyVersion(), ""); + EXPECT_TRUE(i_service_controller->updateServiceConfiguration(file_name, "")); + EXPECT_EQ(i_service_controller->getPolicyVersion(), version_value); +} + +TEST_F(ServiceControllerTest, backupAttempts) +{ + string new_configuration = "{" + " \"version\": \"" + version_value + "\"" + " \"l4_firewall\":" + " {" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + " }" + "}"; + + string l4_firewall = "{" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + "}"; + + string old_configuration = "{" + " \"version\": \"" + old_version + "\"" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"reject\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + "}"; + + Maybe> json_parser_return = + map({{"l4_firewall", l4_firewall}, {"version", version_value}}); + EXPECT_CALL(mock_orchestration_tools, readFile(file_name)).WillOnce(Return(new_configuration)); + EXPECT_CALL(mock_orchestration_tools, jsonObjectSplitter(new_configuration, _)) + .WillOnce(Return(json_parser_return)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(l4_firewall_policy_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, readFile(l4_firewall_policy_path)).WillOnce(Return(old_configuration)); + EXPECT_CALL(mock_orchestration_status, + setServiceConfiguration("l4_firewall", l4_firewall_policy_path, OrchestrationStatusConfigType::POLICY)); + + EXPECT_CALL( + mock_orchestration_tools, + copyFile(l4_firewall_policy_path, l4_firewall_policy_path + backup_extension) + ).WillOnce(Return(true)); + + EXPECT_CALL( + mock_orchestration_tools, + writeFile(l4_firewall, l4_firewall_policy_path)).WillOnce(Return(true) + ); + + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, policy_file_path + backup_extension)) + .WillOnce(Return(false)) + .WillOnce(Return(false)) + .WillOnce(Return(true)); + + EXPECT_CALL( + mock_shell_cmd, + getExecOutput( + "/etc/cp/watchdog/cp-nano-watchdog --status --verbose --service mock access control" + " --family family1 --id id2", + _, + _ + ) + ).WillRepeatedly(Return(string("registered and running"))); + + string reply_msg = "{\"id\": 1, \"error\": false, \"finished\": true, \"error_message\": \"\"}"; + EXPECT_CALL( + mock_message, + sendMessage( + _, + _, + _, + "127.0.0.1", + l4_firewall_service_port, + _, + "/set-new-configuration", + _, + _, + MessageTypeTag::GENERIC + ) + ).WillOnce(Return(Maybe(reply_msg))); + + EXPECT_CALL(mock_ml, yield(false)).Times(2); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, policy_file_path)).WillOnce(Return(true)); + EXPECT_EQ(i_service_controller->getPolicyVersion(), ""); + EXPECT_TRUE(i_service_controller->updateServiceConfiguration(file_name, "")); + EXPECT_EQ(i_service_controller->getPolicyVersion(), version_value); +} + +TEST_F(ServiceControllerTest, MultiUpdateConfiguration) +{ + string new_configuration = "{" + " \"version\": \"" + version_value + "\"" + " \"l4_firewall\":" + " {" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + " }," + " \"orchestration\":" + " {" + " \"fog-address\": \"http://10.0.0.18:81/control/\"," + " \"agent-type\": \"13324sadsd2\"," + " \"proxy\": \"\"," + " \"pulling-interval\": 10," + " \"error-pulling-interval\": 15" + " }" + "}"; + + string l4_firewall = "{" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + "}"; + + string orchestration = "{" + " \"fog-address\": \"http://10.0.0.18:81/control/\"," + " \"agent-type\": \"13324sadsd2\"," + " \"proxy\": \"\"," + " \"pulling-interval\": 10," + " \"error-pulling-interval\": 15" + " }"; + + Maybe> json_parser_return = map({ + {"version", version_value}, + {"l4_firewall", l4_firewall}, + {"orchestration", orchestration} + }); + string orchestration_policy_path = configuration_dir + "/orchestration/orchestration" + policy_extension; + string orchestration_settings_path = configuration_dir + "/orchestration/orchestration" + settings_extension; + + EXPECT_CALL(mock_orchestration_tools, readFile(file_name)).WillOnce(Return(new_configuration)); + EXPECT_CALL(mock_orchestration_tools, jsonObjectSplitter(new_configuration, _)) + .WillOnce(Return(json_parser_return)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(l4_firewall_policy_path)).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(orchestration_policy_path)).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_status, + setServiceConfiguration("l4_firewall", l4_firewall_policy_path, OrchestrationStatusConfigType::POLICY)); + EXPECT_CALL(mock_orchestration_status, + setServiceConfiguration("orchestration", orchestration_policy_path, OrchestrationStatusConfigType::POLICY)); + + EXPECT_CALL(mock_orchestration_tools, writeFile(l4_firewall, l4_firewall_policy_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, writeFile(orchestration, orchestration_policy_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, policy_file_path + backup_extension)) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, policy_file_path)).WillOnce(Return(true)); + + EXPECT_CALL( + mock_shell_cmd, + getExecOutput( + "/etc/cp/watchdog/cp-nano-watchdog --status --verbose --service mock access control" + " --family family1 --id id2", + _, + _ + ) + ).WillRepeatedly(Return(string("registered and running"))); + + string reply_msg = "{\"id\": 1, \"error\": false, \"finished\": true, \"error_message\": \"\"}"; + Flags conn_flags; + conn_flags.setFlag(MessageConnConfig::ONE_TIME_CONN); + EXPECT_CALL( + mock_message, + sendMessage( + true, + "{\n \"id\": 1,\n \"policy_version\": \"1.0.2\"\n}", + I_Messaging::Method::POST, + string("127.0.0.1"), + l4_firewall_service_port, + conn_flags, + string("/set-new-configuration"), + string(), + _, + MessageTypeTag::GENERIC + ) + ).WillOnce(Return(Maybe(reply_msg))); + + EXPECT_TRUE(i_service_controller->updateServiceConfiguration(file_name, "")); +} + +class TestSendRequestToService : public ClientRest +{ +public: + C2S_PARAM(string, mock_rest_request_body_tag); +}; + +TEST_F(ServiceControllerTest, badJsonFile) +{ + Maybe err = genError("Error"); + EXPECT_CALL(mock_orchestration_tools, readFile(file_name)).Times(1).WillRepeatedly(Return(err)); + EXPECT_FALSE(i_service_controller->updateServiceConfiguration(file_name, "")); +} + +TEST_F(ServiceControllerTest, emptyServices) +{ + Maybe> json_parser_return = map(); + string empty_string = ""; + EXPECT_CALL(mock_orchestration_tools, readFile(file_name)).Times(1).WillRepeatedly(Return(empty_string)); + EXPECT_CALL(mock_orchestration_tools, jsonObjectSplitter(empty_string, _)).Times(1).WillRepeatedly( + Return(json_parser_return) + ); + + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, policy_file_path + backup_extension)) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, policy_file_path)).WillOnce(Return(true)); + + EXPECT_TRUE(i_service_controller->updateServiceConfiguration(file_name, "")); +} + +TEST_F(ServiceControllerTest, failingWhileLoadingCurrentConfiguration) +{ + string new_configuration = "{" + " \"version\": \"" + version_value + "\"" + " \"l4_firewall\":" + " {" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + " }" + "}"; + + string l4_firewall = "{" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + "}"; + + Maybe err = genError("Error"); + Maybe> json_parser_return = + map({{"l4_firewall", l4_firewall}, {"version", version_value}}); + EXPECT_CALL(mock_orchestration_tools, readFile(file_name)).WillOnce(Return(new_configuration)); + EXPECT_CALL(mock_orchestration_tools, jsonObjectSplitter(new_configuration, _)) + .WillOnce(Return(json_parser_return)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(l4_firewall_policy_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, readFile(l4_firewall_policy_path)).WillOnce(Return(err)); + EXPECT_FALSE(i_service_controller->updateServiceConfiguration(file_name, "")); +} + +TEST_F(ServiceControllerTest, failingWhileCopyingCurrentConfiguration) +{ + string new_configuration = "{" + " \"version\": \"" + version_value + "\"" + " \"l4_firewall\":" + " {" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + " }" + "}"; + + string l4_firewall = "{" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + "}"; + + string old_configuration = "{" + " \"version\": \"" + old_version + "\"" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"reject\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + "}"; + + Maybe> json_parser_return = + map({{"l4_firewall", l4_firewall}, {"version", version_value}}); + EXPECT_CALL(mock_orchestration_tools, readFile(file_name)).Times(1).WillRepeatedly(Return(new_configuration)); + EXPECT_CALL(mock_orchestration_tools, jsonObjectSplitter(new_configuration, _)).Times(1).WillRepeatedly( + Return(json_parser_return) + ); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(l4_firewall_policy_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, readFile(l4_firewall_policy_path)).WillOnce(Return(old_configuration)); + EXPECT_CALL( + mock_orchestration_tools, + copyFile(l4_firewall_policy_path, l4_firewall_policy_path + backup_extension) + ).WillOnce(Return(false)); + + EXPECT_EQ(i_service_controller->getPolicyVersion(), ""); + EXPECT_FALSE(i_service_controller->updateServiceConfiguration(file_name, "")); + EXPECT_EQ(i_service_controller->getPolicyVersion(), ""); +} + +TEST_F(ServiceControllerTest, ErrorUpdateConfigurationRest) +{ + string new_configuration = "{" + " \"version\": \"" + version_value + "\"" + " \"l4_firewall\":" + " {" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + " }" + "}"; + + string l4_firewall = "{" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + "}"; + + EXPECT_CALL(time, getWalltime()).WillRepeatedly(Return(chrono::microseconds(0))); + + Maybe> json_parser_return = + map({{"l4_firewall", l4_firewall}, {"version", version_value}}); + EXPECT_CALL(mock_orchestration_tools, readFile(file_name)).WillOnce(Return(new_configuration)); + EXPECT_CALL(mock_orchestration_tools, jsonObjectSplitter(new_configuration, _)) + .WillOnce(Return(json_parser_return)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(l4_firewall_policy_path)).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, writeFile(l4_firewall, l4_firewall_policy_path)).WillOnce(Return(true)); + EXPECT_CALL( + mock_orchestration_status, + setServiceConfiguration("l4_firewall", l4_firewall_policy_path, OrchestrationStatusConfigType::POLICY) + ); + + EXPECT_EQ(i_service_controller->getPolicyVersion(), ""); + + Flags conn_flags; + conn_flags.setFlag(MessageConnConfig::ONE_TIME_CONN); + EXPECT_CALL( + mock_message, + sendMessage( + true, + "{\n \"id\": 1,\n \"policy_version\": \"1.0.2\"\n}", + I_Messaging::Method::POST, + string("127.0.0.1"), + l4_firewall_service_port, + conn_flags, + string("/set-new-configuration"), + string(), + _, + MessageTypeTag::GENERIC + ) + ).WillOnce(Return(Maybe(genError("")))); + + EXPECT_TRUE(i_service_controller->isServiceInstalled("family1_id2")); + + EXPECT_CALL( + mock_shell_cmd, + getExecOutput( + "/etc/cp/watchdog/cp-nano-watchdog --status --verbose --service mock access control" + " --family family1 --id id2", + _, + _ + ) + ).WillRepeatedly(Return(string("not-registered"))); + EXPECT_CALL( + mock_orchestration_tools, + copyFile(file_name, policy_file_path) + ).WillOnce(Return(false)); + + EXPECT_FALSE(i_service_controller->updateServiceConfiguration(file_name, "")); + EXPECT_THAT( + capture_debug.str(), + HasSubstr("Service mock access control is inactive") + ); + EXPECT_FALSE(i_service_controller->isServiceInstalled("family1_id2")); + EXPECT_NE(i_service_controller->getPolicyVersion(), version_value); + EXPECT_EQ(i_service_controller->getPolicyVersion(), ""); + EXPECT_EQ(i_service_controller->getUpdatePolicyVersion(), version_value); +} + +TEST_F(ServiceControllerTest, errorWhileWrtingNewConfiguration) +{ + string new_configuration = "{" + " \"version\": \"" + version_value + "\"" + " \"l4_firewall\":" + " {" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + " }" + "}"; + + string l4_firewall = "{" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + "}"; + + string old_configuration = "{" + " \"version\": \"" + old_version + "\"" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"reject\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + "}"; + + Maybe> json_parser_return = + map({{"l4_firewall", l4_firewall}, {"version", version_value}}); + EXPECT_CALL(mock_orchestration_tools, readFile(file_name)).Times(1).WillRepeatedly(Return(new_configuration)); + EXPECT_CALL(mock_orchestration_tools, jsonObjectSplitter(new_configuration, _)).Times(1).WillRepeatedly( + Return(json_parser_return) + ); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(l4_firewall_policy_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, readFile(l4_firewall_policy_path)).WillOnce(Return(old_configuration)); + EXPECT_CALL( + mock_orchestration_tools, + copyFile(l4_firewall_policy_path, l4_firewall_policy_path + backup_extension) + ).WillOnce(Return(true)); + + EXPECT_CALL( + mock_orchestration_tools, + writeFile(l4_firewall, l4_firewall_policy_path)).WillOnce(Return(false) + ); + + EXPECT_FALSE(i_service_controller->updateServiceConfiguration(file_name, "")); +} + +TEST_F(ServiceControllerTest, testPortsRest) +{ + stringstream empty_json; + empty_json << "{}"; + auto res = get_services_ports->performRestCall(empty_json); + ASSERT_TRUE(res.ok()); + EXPECT_THAT(res.unpack(), HasSubstr("family1_id2:8888")); +} + +TEST_F(ServiceControllerTest, testMultitenantConfFiles) +{ + map> tenant_files_input = { + {"tenant1", make_pair("/etc/cp/conf/tenant1_policy.json", "/etc/cp/conf/tenant1_settings.json")}, + {"tenant2", make_pair("/etc/cp/conf/tenant2_policy.json", "")} + }; + + vector ids = {"family1_id2"}; + vector empty_ids; + + EXPECT_CALL(tenant_manager, getInstances("tenant1")).WillOnce(Return(ids)); + EXPECT_CALL(tenant_manager, getInstances("tenant2")).WillOnce(Return(empty_ids)); + + string reply_msg = "{\"id\": 1, \"error\": false, \"finished\": true, \"error_message\": \"\"}"; + EXPECT_CALL( + mock_message, + sendMessage( + true, + "{\n \"id\": 1,\n \"policy_version\": \"1.0.2\"\n}", + _, + string("127.0.0.1"), + l4_firewall_service_port, + _, + string("/set-new-configuration"), + _, + _, + _ + ) + ).WillOnce(Return(Maybe(reply_msg))); + + for(auto entry : tenant_files_input) { + auto tenant = entry.first; + auto files = entry.second; + string conf_file_name = files.first; + string settings_file_name = files.second; + + string new_configuration = "{" + " \"version\": \"" + version_value + "\"" + " \"l4_firewall\":" + " {" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + " }" + "}"; + + string l4_firewall = "{" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + "}"; + + string l4_firewall_policy_path_new = + configuration_dir + "/tenant_" + tenant + "/l4_firewall/l4_firewall" + policy_extension; + Maybe> json_parser_return = + map({{"l4_firewall", l4_firewall}, {"version", version_value}}); + + EXPECT_CALL(mock_orchestration_tools, readFile(conf_file_name)).WillOnce(Return(new_configuration)); + + EXPECT_CALL(mock_orchestration_tools, jsonObjectSplitter(new_configuration, tenant)) + .WillOnce(Return(json_parser_return)); + + EXPECT_CALL(mock_orchestration_tools, doesDirectoryExist(configuration_dir + "/tenant_" + tenant)) + .WillOnce(Return(false)); + + EXPECT_CALL(mock_orchestration_tools, createDirectory(configuration_dir + "/tenant_" + tenant)) + .WillOnce(Return(true)); + + EXPECT_CALL(mock_orchestration_tools, doesFileExist(l4_firewall_policy_path_new)).WillOnce(Return(false)); + + EXPECT_CALL(mock_orchestration_tools, writeFile(l4_firewall, l4_firewall_policy_path_new)) + .WillOnce(Return(true)); + + EXPECT_CALL(mock_orchestration_status, setServiceConfiguration( + "l4_firewall", l4_firewall_policy_path_new, OrchestrationStatusConfigType::POLICY) + ); + + string new_policy_file_path = "/etc/cp/conf/tenant_" + tenant + "/" + "policy.json"; + EXPECT_CALL(mock_orchestration_tools, copyFile(conf_file_name, new_policy_file_path + backup_extension)) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(conf_file_name, new_policy_file_path)).WillOnce(Return(true)); + + EXPECT_CALL( + mock_shell_cmd, + getExecOutput( + "/etc/cp/watchdog/cp-nano-watchdog --status --verbose --service mock access control" + " --family family1 --id id2", + _, + _ + ) + ).WillRepeatedly(Return(string("registered and running"))); + + EXPECT_TRUE(i_service_controller->updateServiceConfiguration(conf_file_name, settings_file_name, {}, tenant)); + } +} + +TEST_F(ServiceControllerTest, cleanup_virtual_files) +{ + string agent_tenants_files = + "111111\n" + "222222\n" + "333333\n"; + + vector active_tenants = { + "222222" + }; + + EXPECT_CALL(mock_shell_cmd, getExecOutput("ls /etc/cp/conf | grep 'tenant_*' | cut -d '_' -f 2", _, _)) + .WillOnce(Return(agent_tenants_files)); + + EXPECT_CALL(tenant_manager, fetchActiveTenants()).WillOnce(Return(active_tenants)); + + EXPECT_CALL(mock_orchestration_tools, removeFile("/etc/cp/conf/111111_settings.json")).WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, removeFile("/etc/cp/conf/333333_settings.json")).WillOnce(Return(true)); + + v_tenants_cleanup(); +} + +TEST_F(ServiceControllerTest, test_delayed_reconf) +{ + string new_configuration = + "{" + " \"version\": \"" + version_value + "\"" + " \"l4_firewall\":" + " {" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + " }" + "}"; + + string l4_firewall = + "{" + " \"app\": \"netfilter\"," + " \"l4_firewall_rules\": [" + " {" + " \"name\": \"allow_statefull_conns\"," + " \"flags\": [\"established\"]," + " \"action\": \"accept\"" + " }," + " {" + " \"name\": \"icmp drop\"," + " \"flags\": [\"log\"]," + " \"services\": [{\"name\":\"icmp\"}]," + " \"action\": \"drop\"" + " }" + " ]" + "}"; + + setConfiguration(60, "orchestration", "Reconfiguration timeout seconds"); + + Maybe> json_parser_return = + map({{"l4_firewall", l4_firewall}, {"version", version_value}}); + EXPECT_CALL(mock_orchestration_tools, readFile(file_name)).WillOnce(Return(new_configuration)); + EXPECT_CALL(mock_orchestration_tools, jsonObjectSplitter(new_configuration, _)) + .WillOnce(Return(json_parser_return)); + EXPECT_CALL(mock_orchestration_tools, doesFileExist(l4_firewall_policy_path)).WillOnce(Return(false)); + EXPECT_CALL(mock_orchestration_tools, writeFile(l4_firewall, l4_firewall_policy_path)). + WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_status, + setServiceConfiguration("l4_firewall", l4_firewall_policy_path, OrchestrationStatusConfigType::POLICY)); + + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, policy_file_path + backup_extension)) + .WillOnce(Return(true)); + EXPECT_CALL(mock_orchestration_tools, copyFile(file_name, policy_file_path)).WillOnce(Return(true)); + EXPECT_CALL(mock_ml, yield(false)).Times(AnyNumber()); + + EXPECT_CALL( + mock_shell_cmd, + getExecOutput( + "/etc/cp/watchdog/cp-nano-watchdog --status --verbose --service mock access control" + " --family family1 --id id2", + _, + _ + ) + ).WillRepeatedly(Return(string("registered and running"))); + + string general_settings_path = "/my/settings/path"; + string reply_msg = "{\"id\": 1, \"error\": false, \"finished\": false, \"error_message\": \"\"}"; + stringstream reconf_status; + reconf_status + << "{" + << " \"id\": 1," + << " \"finished\": true," + << " \"error\": false," + << " \"error_message\": \"\"" + << "}"; + + Flags conn_flags; + conn_flags.setFlag(MessageConnConfig::ONE_TIME_CONN); + EXPECT_CALL( + mock_message, + sendMessage( + true, + "{\n \"id\": 1,\n \"policy_version\": \"1.0.2\"\n}", + I_Messaging::Method::POST, + string("127.0.0.1"), + l4_firewall_service_port, + conn_flags, + string("/set-new-configuration"), + string(), + _, + MessageTypeTag::GENERIC + ) + ).WillOnce(Return(Maybe(reply_msg))); + + auto func = [&] (chrono::microseconds) { set_reconf_status->performRestCall(reconf_status); }; + EXPECT_CALL(mock_ml, yield(chrono::microseconds(2000000))).WillOnce(Invoke(func)); + + + EXPECT_TRUE(i_service_controller->updateServiceConfiguration(file_name, general_settings_path)); + EXPECT_EQ(i_service_controller->getPolicyVersion(), version_value); + EXPECT_EQ(i_service_controller->getUpdatePolicyVersion(), version_value); +} diff --git a/components/security_apps/orchestration/update_communication/CMakeLists.txt b/components/security_apps/orchestration/update_communication/CMakeLists.txt new file mode 100755 index 0000000..e88e689 --- /dev/null +++ b/components/security_apps/orchestration/update_communication/CMakeLists.txt @@ -0,0 +1,3 @@ +add_library(update_communication update_communication.cc hybrid_communication.cc fog_communication.cc fog_authenticator.cc local_communication.cc) + +add_subdirectory(update_communication_ut) diff --git a/components/security_apps/orchestration/update_communication/fog_authenticator.cc b/components/security_apps/orchestration/update_communication/fog_authenticator.cc new file mode 100755 index 0000000..361e40d --- /dev/null +++ b/components/security_apps/orchestration/update_communication/fog_authenticator.cc @@ -0,0 +1,572 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fog_communication.h" +#include "rest.h" +#include "config.h" +#include "log_generator.h" +#include "agent_details.h" +#include "version.h" +#include "sasal.h" + +#include +#include +#include + +SASAL_START // Orchestration - Communication + +using namespace std; +using namespace cereal; +using HTTPMethod = I_Messaging::Method; + +USE_DEBUG_FLAG(D_ORCHESTRATOR); + +function()> FogAuthenticator::AccessTokenProvider::getAccessToken = nullptr; + +FogAuthenticator::AccessToken::AccessToken(const string &_token, chrono::seconds _expiration) + : + token(_token), + expiration(_expiration) +{ + received_time = Singleton::Consume::by()->getMonotonicTime(); +} + +chrono::seconds +FogAuthenticator::AccessToken::getRemainingTime() const +{ + return + expiration - + chrono::duration_cast( + Singleton::Consume::by()->getMonotonicTime() - received_time + ); +} + +void +FogAuthenticator::AccessTokenProvider::doCall() +{ + if (getAccessToken != nullptr) { + auto access_token = getAccessToken(); + if (access_token.ok()) { + auto encryptor = Singleton::Consume::by(); + token = encryptor->obfuscateXorBase64(access_token.unpack().getToken()); + expiration = access_token.unpack().getRemainingTime().count(); + } + } +} + +FogAuthenticator::RegistrationData::RegistrationData(const string &token) + : + type(AuthenticationType::Token), + data(token) +{ +} + +FogAuthenticator::UserCredentials::UserCredentials(const string &_client_id, const string &_shared_secret) + : + client_id(_client_id), + shared_secret(_shared_secret) +{ +} + +void +FogAuthenticator::UserCredentials::serialize(JSONOutputArchive &out_ar) const +{ + out_ar( + make_nvp("client_id", client_id), + make_nvp("shared_secret", shared_secret) + ); +} + +void +FogAuthenticator::UserCredentials::serialize(JSONInputArchive &in_ar) +{ + in_ar( + make_nvp("client_id", client_id), + make_nvp("shared_secret", shared_secret) + ); + + if (client_id.empty() || shared_secret.empty()) { + throw cereal::Exception("Agent credentials can't be empty."); + } +} + +void +FogAuthenticator::RegistrationData::serialize(JSONInputArchive &in_ar) +{ + string type_as_string; + static const map StringToAuthenticationType { + { "token", AuthenticationType::Token }, + { "presharedsecret", AuthenticationType::PresharedSecret } + }; + + in_ar( + make_nvp("registration type", type_as_string), + make_nvp("registration data", data) + ); + + if (type_as_string.empty()) throw cereal::Exception("registration type can't be empty."); + if (data.empty()) throw cereal::Exception("registration data can't be empty."); + + auto auth_type = StringToAuthenticationType.find(type_as_string); + if (auth_type == StringToAuthenticationType.end()) throw cereal::Exception("Unsupported registration type."); + type = auth_type->second; +} + +void +FogAuthenticator::RegistrationData::serialize(JSONOutputArchive &out_ar) const +{ + static const EnumArray AuthenticationTypeString { + "token", + "presharedsecret" + }; + + out_ar( + make_nvp("authenticationMethod", AuthenticationTypeString[type]), + make_nvp("data", data) + ); +} + +Maybe +FogAuthenticator::registerAgent( + const FogAuthenticator::RegistrationData ®_data, + const string &name, + const string &type, + const string &platform, + const string &architecture) const +{ + dbgInfo(D_ORCHESTRATOR) << "Starting agent registration to fog"; + + auto details_resolver = Singleton::Consume::by(); + RegistrationRequest request( + reg_data, + name, + type, + platform, + architecture, + details_resolver->getAgentVersion() + ); + + request << make_pair("agent_version", details_resolver->getAgentVersion()); + + if (required_security_apps.size() > 0) { + request << make_pair("require", makeSeparatedStr(required_security_apps, ";")); + } + + auto nginx_data = details_resolver->parseNginxMetadata(); + + if (nginx_data.ok()) { + string nginx_version; + string config_opt; + string cc_opt; + tie(config_opt, cc_opt, nginx_version) = nginx_data.unpack(); + request << make_pair("nginxVersion", nginx_version); + request << make_pair("configureOpt", config_opt); + request << make_pair("extraCompilerOpt", cc_opt); + } else { + dbgDebug(D_ORCHESTRATOR) << nginx_data.getErr(); + } + + for (const pair details : details_resolver->getResolvedDetails()) { + request << details; + } + + if (details_resolver->isReverseProxy()) { + request << make_pair("reverse_proxy", "true"); + } + + if (details_resolver->isKernelVersion3OrHigher()) { + request << make_pair("isKernelVersion3OrHigher", "true"); + } + + if (details_resolver->isGwNotVsx()) { + request << make_pair("isGwNotVsx", "true"); + } + + if (details_resolver->isVersionEqualOrAboveR8110()) { + request << make_pair("isVersionEqualOrAboveR8110", "true"); + } + +#if defined(gaia) || defined(smb) + if (details_resolver->compareCheckpointVersion(8100, std::greater_equal())) { + request << make_pair("isCheckpointVersionGER81", "true"); + } +#endif // gaia || smb + + auto fog_messaging = Singleton::Consume::by(); + if (fog_messaging->sendObject(request, HTTPMethod::POST, fog_address_ex + "/agents")) { + dbgDebug(D_ORCHESTRATOR) << "Agent has registered successfully."; + auto i_agent_details = Singleton::Consume::by(); + i_agent_details->setAgentId(request.getAgentId()); + i_agent_details->setProfileId(request.getProfileId()); + i_agent_details->setTenantId(request.getTenantId()); + i_agent_details->writeAgentDetails(); + + auto orc_status = Singleton::Consume::by(); + orc_status->setAgentDetails(request.getAgentId(), request.getProfileId(), request.getTenantId()); + return UserCredentials(request.getClientId(), request.getSharedSecret()); + } + + LogGen log( + "We suggest to check that your Agent Profile is defined and enforced", + ReportIS::Audience::SECURITY, + ReportIS::Severity::INFO, + ReportIS::Priority::MEDIUM, + LogField("source", "fog_communication"), + ReportIS::Tags::ORCHESTRATOR + ); + + return genError("Failed to register agent with the Fog"); +} + +Maybe +FogAuthenticator::getAccessToken(const UserCredentials &user_credentials) const +{ + dbgDebug(D_ORCHESTRATOR) << "Requesting token from fog."; + static const string grant_type_string = "/oauth/token?grant_type=client_credentials"; + TokenRequest request = TokenRequest(); + + auto fog_messaging = Singleton::Consume::by(); + auto sending_result = fog_messaging->sendObject( + request, + HTTPMethod::POST, + fog_address_ex + grant_type_string, + buildBasicAuthHeader(user_credentials.getClientId(), user_credentials.getSharedSecret()) + ); + + if (sending_result) { + auto data_path = getConfigurationWithDefault( + filesystem_prefix + "/data/", + "encryptor", + "Data files directory" + ); + auto orchestration_tools = Singleton::Consume::by(); + if (!orchestration_tools->writeFile(request.getAccessToken(), data_path + session_token_file_name)) { + return genError("Failed to write new access token to file"); + } + + dbgInfo(D_ORCHESTRATOR) << "New access token was saved"; + fog_messaging->loadAccessToken(); + + return AccessToken(request.getAccessToken(), chrono::seconds(request.getExpirationTime())); + } + + return genError("Failed to get access token."); +} + +Maybe +FogAuthenticator::getRegistrationData() +{ + if (!otp.empty()) { + reg_data = RegistrationData(otp); + return reg_data; + } + + const char *env_otp = getenv("NANO_AGENT_TOKEN"); + if (env_otp) { + dbgInfo(D_ORCHESTRATOR) << "Loading registration token from environment"; + return RegistrationData(env_otp); + } + if (reg_data.ok()) { + dbgInfo(D_ORCHESTRATOR) << "Loading registration token from cache"; + return reg_data; + } + + auto reg_data_path = getConfigurationWithDefault( + filesystem_prefix + "/conf/registration-data.json", + "orchestration", + "Registration data Path" + ); + + dbgDebug(D_ORCHESTRATOR) << "Loading registration data from " << reg_data_path; + auto orchestration_tools = Singleton::Consume::by(); + auto raw_reg_data = orchestration_tools->readFile(reg_data_path); + if (!raw_reg_data.ok()) return genError(raw_reg_data.getErr()); + + dbgTrace(D_ORCHESTRATOR) << "Successfully loaded the registration data"; + auto decoded_reg_data = orchestration_tools->base64Decode(raw_reg_data.unpack()); + reg_data = orchestration_tools->jsonStringToObject(decoded_reg_data); + + if (reg_data.ok()) { + dbgTrace(D_ORCHESTRATOR) << "Registration token has been converted to an object"; + } + + return reg_data; +} + +bool +FogAuthenticator::saveCredentialsToFile(const UserCredentials &user_credentials) const +{ + auto data_path = getConfigurationWithDefault( + filesystem_prefix + "/data/", + "encryptor", + "Data files directory" + ); + + auto orchestration_tools = Singleton::Consume::by(); + auto cred_str = orchestration_tools->objectToJson(user_credentials); + if (!cred_str.ok()) { + dbgWarning(D_ORCHESTRATOR) << "Failed to parse user credentials to JSON. Error: " << cred_str.getErr(); + return false; + } + + return orchestration_tools->writeFile(cred_str.unpack(), data_path + user_cred_file_name); +} + +void +FogAuthenticator::initRestAPI() +{ + AccessTokenProvider::getAccessToken = [this] () { + return access_token; + }; + + auto rest = Singleton::Consume::by(); + rest->addRestCall(RestAction::SHOW, "access-token"); +} + +Maybe +FogAuthenticator::getCredentialsFromFile() const +{ + auto data_path = getConfigurationWithDefault( + filesystem_prefix + "/data/", + "encryptor", + "Data files directory" + ); + + auto orchestration_tools = Singleton::Consume::by(); + auto encrypted_cred = orchestration_tools->readFile(data_path + user_cred_file_name); + if (!encrypted_cred.ok()) return genError(encrypted_cred.getErr()); + + dbgTrace(D_ORCHESTRATOR) << "Read the user credentials from the file"; + return orchestration_tools->jsonStringToObject(encrypted_cred.unpack()); +} + +Maybe +FogAuthenticator::getCredentials() +{ + auto maybe_credentials = getCredentialsFromFile(); + if (maybe_credentials.ok()) { + return maybe_credentials; + } + + auto reg_data = getRegistrationData(); + if (!reg_data.ok()) { + return genError("Failed to load a valid registration token, Error: " + reg_data.getErr()); + } + + auto details_resolver = Singleton::Consume::by(); + Maybe name = details_resolver->getHostname(); + if (!name.ok()) return name.passErr(); + + Maybe platform = details_resolver->getPlatform(); + if (!platform.ok()) return platform.passErr(); + + Maybe arch = details_resolver->getArch(); + if (!arch.ok()) return arch.passErr(); + + string type = getConfigurationWithDefault("Embedded", "orchestration", "Agent type"); + maybe_credentials = registerAgent(reg_data.unpack(), *name, type, *platform, *arch); + + auto orc_status = Singleton::Consume::by(); + orc_status->setRegistrationDetails(*name, type, *platform, *arch); + + if (!maybe_credentials.ok()) return maybe_credentials; + + auto credentials = maybe_credentials.unpack(); + auto token_path = getConfigurationWithDefault( + filesystem_prefix + "/conf/registration-data.json", + "orchestration", + "Registration data Path" + ); + + auto orchestration_tools = Singleton::Consume::by(); + if (saveCredentialsToFile(credentials)) { + if (!orchestration_tools->removeFile(token_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to remove one time token file"; + } + return credentials; + } + + dbgWarning(D_ORCHESTRATOR) << "Failed to save credentials to file"; + Singleton::Consume::by()->addOneTimeRoutine( + I_MainLoop::RoutineType::Offline, + [this, credentials, token_path] () + { + auto orchestration_tools = Singleton::Consume::by(); + static uint retry_counter = 1; + while (!saveCredentialsToFile(credentials)) { + dbgTrace(D_ORCHESTRATOR) << "Failed to save credentials to file, retry number: " << retry_counter++; + Singleton::Consume::by()->yield(chrono::seconds(60)); + } + + if (!orchestration_tools->removeFile(token_path)) { + dbgWarning(D_ORCHESTRATOR) << "Failed to remove one time token file"; + } + }, + "Fog credential save to file" + ); + + return credentials; +} + +string +FogAuthenticator::buildBasicAuthHeader(const string &username, const string &pass) const +{ + auto orchestration_tools = Singleton::Consume::by(); + auto auth_encode = orchestration_tools->base64Encode(username + ":" + pass); + return "Authorization: Basic " + auth_encode + "\r\n"; +} + +string +FogAuthenticator::buildOAuth2Header(const string &token) const +{ + return "Authorization: Bearer " + token + "\r\n"; +} + +void +FogAuthenticator::setAddressExtenesion(const std::string &extension) +{ + fog_address_ex = extension; +} + + +Maybe +FogAuthenticator::authenticateAgent() +{ + const int min_expiration_time = 10; + if (!credentials.ok()) { + dbgDebug(D_ORCHESTRATOR) << "Getting Agent credentials."; + + auto orc_status = Singleton::Consume::by(); + credentials = getCredentials(); + if (!credentials.ok()) { + orc_status->setFieldStatus( + OrchestrationStatusFieldType::REGISTRATION, + OrchestrationStatusResult::FAILED, + credentials.getErr() + ); + return genError(credentials.getErr()); + } + orc_status->setFieldStatus( + OrchestrationStatusFieldType::REGISTRATION, + OrchestrationStatusResult::SUCCESS + ); + } + + auto mainloop = Singleton::Consume::by(); + if (!mainloop->doesRoutineExist(routine)) { + routine = mainloop->addOneTimeRoutine( + I_MainLoop::RoutineType::RealTime, + [this, min_expiration_time] () + { + uint expiration_time; + uint pre_expire_time = 0; + do { + expiration_time = 20; + auto orc_status = Singleton::Consume::by(); + access_token = getAccessToken(credentials.unpack()); + if (access_token.ok()) { + pre_expire_time = getConfigurationWithDefault( + 120, + "fog communication", + "Time (seconds) to renew token prior its expiration" + ); + expiration_time = access_token.unpack().getExpiration(); + dbgInfo(D_ORCHESTRATOR) << "New token was received, expiration time: " << expiration_time; + orc_status->setFieldStatus( + OrchestrationStatusFieldType::REGISTRATION, + OrchestrationStatusResult::SUCCESS + ); + } else { + dbgWarning(D_ORCHESTRATOR) + << "Failed to receive access token. Error: " << access_token.getErr(); + orc_status->setFieldStatus( + OrchestrationStatusFieldType::REGISTRATION, + OrchestrationStatusResult::FAILED, + access_token.getErr() + ); + } + int next_session_req = max( + static_cast(expiration_time - pre_expire_time), + min_expiration_time + ); + dbgDebug(D_ORCHESTRATOR) + << "Schedule the next re-activate session token. Seconds: " + << next_session_req; + Singleton::Consume::by()->yield(chrono::seconds(next_session_req)); + } while (1); + }, + "Fog communication token periodic update", + true + ); + // Wait for the access token mainloop + mainloop->yield(chrono::seconds(min_expiration_time + 1)); + } + + if (!access_token.ok()) return genError(access_token.getErr()); + return Maybe(); +} + +void +FogAuthenticator::preload() +{ + registerExpectedConfiguration("orchestration", "Agent type"); + registerExpectedConfiguration("orchestration", "OTP Token Path"); + registerExpectedConfiguration("orchestration", "User Credentials Path"); + registerExpectedConfiguration("fog communication", "Time (seconds) to renew token prior its expiration"); +} + +void +FogAuthenticator::loadRequiredSecurityApps() +{ + auto required_apps_file_path = getConfigurationWithDefault( + filesystem_prefix + "/conf/support-practices.txt", + "orchestration", + "Supported practices file path" + ); + + auto orchestration_tools = Singleton::Consume::by(); + if (orchestration_tools->doesFileExist(required_apps_file_path)) { + try { + ifstream input_stream(required_apps_file_path); + if (!input_stream) { + dbgDebug(D_ORCHESTRATOR) + << "Cannot open the file with required security apps" + << "File: " << required_apps_file_path; + return; + } + + string required_security_app; + while (getline(input_stream, required_security_app)) { + required_security_apps.push_back(required_security_app); + } + input_stream.close(); + + } catch (const ifstream::failure &exception) { + dbgWarning(D_ORCHESTRATOR) + << "Cannot read the file with required security app lists." + << " File: " << required_apps_file_path + << " Error: " << exception.what(); + } + } +} + +void +FogAuthenticator::init() +{ + filesystem_prefix = getFilesystemPathConfig(); + dbgTrace(D_ORCHESTRATOR) << "Initializing Fog communication, file system path prefix: " << filesystem_prefix; + loadRequiredSecurityApps(); + initRestAPI(); +} + +SASAL_END diff --git a/components/security_apps/orchestration/update_communication/fog_communication.cc b/components/security_apps/orchestration/update_communication/fog_communication.cc new file mode 100755 index 0000000..65cf396 --- /dev/null +++ b/components/security_apps/orchestration/update_communication/fog_communication.cc @@ -0,0 +1,89 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fog_communication.h" +#include "rest.h" +#include "config.h" +#include "log_generator.h" +#include "agent_details.h" +#include "version.h" +#include "sasal.h" + +#include +#include +#include + +SASAL_START // Orchestration - Communication + +using namespace std; +using namespace cereal; +using HTTPMethod = I_Messaging::Method; + +USE_DEBUG_FLAG(D_ORCHESTRATOR); + +Maybe +FogCommunication::getUpdate(CheckUpdateRequest &request) +{ + if (!access_token.ok()) return genError("Acccess Token not available."); + + auto unpacked_access_token = access_token.unpack().getToken(); + static const string check_update_str = "/api/v2/agents/resources"; + auto request_status = Singleton::Consume::by()->sendObject( + request, + HTTPMethod::POST, + fog_address_ex + check_update_str, + buildOAuth2Header(unpacked_access_token) + ); + + if (!request_status) { + dbgDebug(D_ORCHESTRATOR) << "Failed to get response after check update request."; + return genError("Failed to request updates"); + } + dbgDebug(D_ORCHESTRATOR) << "Got response after check update request."; + return Maybe(); +} + +Maybe +FogCommunication::downloadAttributeFile(const GetResourceFile &resourse_file) +{ + if (!access_token.ok()) return genError("Acccess Token not available."); + + auto unpacked_access_token = access_token.unpack().getToken(); + + static const string file_attribute_str = "/api/v2/agents/resources/"; + Maybe attribute_file = Singleton::Consume::by()->downloadFile( + resourse_file, + resourse_file.getRequestMethod(), + fog_address_ex + file_attribute_str + resourse_file.getFileName(), + buildOAuth2Header(unpacked_access_token) // Header + ); + + return attribute_file; +} + +Maybe +FogCommunication::sendPolicyVersion(const string &policy_version) const +{ + PolicyVersionPatchRequest request(policy_version); + auto fog_messaging = Singleton::Consume::by(); + if (fog_messaging->sendNoReplyObject(request, HTTPMethod::PATCH, fog_address_ex + "/agents")) { + dbgInfo(D_ORCHESTRATOR) + << "Patch request was sent successfully to the fog." + << " Policy version: " + << policy_version; + return Maybe(); + } + return genError("Failed to patch policy version"); +} + +SASAL_END diff --git a/components/security_apps/orchestration/update_communication/hybrid_communication.cc b/components/security_apps/orchestration/update_communication/hybrid_communication.cc new file mode 100755 index 0000000..946af58 --- /dev/null +++ b/components/security_apps/orchestration/update_communication/hybrid_communication.cc @@ -0,0 +1,128 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "hybrid_communication.h" +#include "rest.h" +#include "config.h" +#include "log_generator.h" +#include "agent_details.h" +#include "version.h" +#include "sasal.h" + +#include +#include +#include + +SASAL_START // Orchestration - Communication + +using namespace std; +using HTTPMethod = I_Messaging::Method; + +USE_DEBUG_FLAG(D_ORCHESTRATOR); + +void +HybridCommunication::init() +{ + FogAuthenticator::init(); + dbgTrace(D_ORCHESTRATOR) << "Initializing the Hybrid Communication Component"; + if (getConfigurationFlag("otp") != "") { + otp = getConfigurationFlag("otp"); + } else { + otp = "cp-3fb5c718-5e39-47e6-8d5e-99b4bc5660b74b4b7fc8-5312-451d-a763-aaf7872703c0"; + } +} + +string +HybridCommunication::getChecksum(const string &policy_version) +{ + dbgFlow(D_ORCHESTRATOR) << "Checking the policy Checksum"; + string clean_plicy_version = policy_version; + if (!clean_plicy_version.empty() && clean_plicy_version[clean_plicy_version.size() - 1] == '\n') + clean_plicy_version.erase(clean_plicy_version.size() - 1); + + curr_policy = Singleton::Consume::by()->parsePolicy(clean_plicy_version); + + I_OrchestrationTools *orchestration_tools = Singleton::Consume::by(); + Maybe file_checksum = orchestration_tools->calculateChecksum( + I_OrchestrationTools::SELECTED_CHECKSUM_TYPE, + Singleton::Consume::by()->getPolicyPath() + ); + + if (!file_checksum.ok()) { + dbgWarning(D_ORCHESTRATOR) << "Failed the policy checksum calculation"; + return ""; + } + return file_checksum.unpack(); +} + +Maybe +HybridCommunication::getNewVersion() +{ + I_OrchestrationTools *orchestration_tools = Singleton::Consume::by(); + return orchestration_tools->readFile("/etc/cp/conf/k8s-policy-check.trigger"); +} + +Maybe +HybridCommunication::getUpdate(CheckUpdateRequest &request) +{ + dbgFlow(D_ORCHESTRATOR) << "Getting policy update in an Hybrid Communication"; + auto maybe_new_version = getNewVersion(); + if (!maybe_new_version.ok() || maybe_new_version == curr_version) { + request = CheckUpdateRequest("", "", "", "", "", ""); + dbgDebug(D_ORCHESTRATOR) << "No new version is currently available"; + return Maybe(); + } + + auto policy_checksum = request.getPolicy(); + + auto offline_policy_checksum = getChecksum(maybe_new_version.unpack()); + + string policy_response = ""; + + if (!policy_checksum.ok() || offline_policy_checksum != policy_checksum.unpack()) { + policy_response = offline_policy_checksum; + } + + dbgDebug(D_ORCHESTRATOR) + << "Local update response: " + << " policy: " + << (policy_response.empty() ? "has no change," : "has new update," ); + + request = CheckUpdateRequest("", policy_response, "", "", "", ""); + curr_version = *maybe_new_version; + + return Maybe(); +} + +Maybe +HybridCommunication::downloadAttributeFile(const GetResourceFile &resourse_file) +{ + auto file_name = resourse_file.getFileName(); + + if (file_name.compare("policy") == 0) { + return curr_policy; + } + + dbgWarning(D_ORCHESTRATOR) << "Failed downloading the attribute files"; + return string(""); +} + +Maybe +HybridCommunication::sendPolicyVersion(const string &policy_version) const +{ + dbgFlow(D_ORCHESTRATOR); + policy_version.empty(); + return Maybe(); +} + +SASAL_END diff --git a/components/security_apps/orchestration/update_communication/local_communication.cc b/components/security_apps/orchestration/update_communication/local_communication.cc new file mode 100755 index 0000000..61b39a0 --- /dev/null +++ b/components/security_apps/orchestration/update_communication/local_communication.cc @@ -0,0 +1,187 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "local_communication.h" +#include "config.h" +#include "sasal.h" + +SASAL_START // Orchestration - Communication + +using namespace std; + +USE_DEBUG_FLAG(D_ORCHESTRATOR); + +void +LocalCommunication::init() +{ + filesystem_prefix = getFilesystemPathConfig(); + dbgTrace(D_ORCHESTRATOR) << "Initializing Local communication, file system path prefix: " << filesystem_prefix; +} + +void +LocalCommunication::preload() +{ + registerExpectedConfiguration("orchestration", "Offline manifest file path"); + registerExpectedConfiguration("orchestration", "Offline settings file path"); + registerExpectedConfiguration("orchestration", "Offline policy file path"); + registerExpectedConfiguration("orchestration", "Offline Data file path"); +} + +Maybe +LocalCommunication::authenticateAgent() +{ + return Maybe(); +} + +string +LocalCommunication::getChecksum(const string &file_path) +{ + I_OrchestrationTools *orchestration_tools = Singleton::Consume::by(); + Maybe file_checksum = orchestration_tools->calculateChecksum( + I_OrchestrationTools::SELECTED_CHECKSUM_TYPE, + file_path + ); + + if (!file_checksum.ok()) return ""; + return file_checksum.unpack(); +} + +Maybe +LocalCommunication::getUpdate(CheckUpdateRequest &request) +{ + auto manifest_checksum = request.getManifest(); + auto policy_checksum = request.getPolicy(); + auto settings_checksum =request.getSettings(); + auto data_checksum = request.getData(); + + auto offline_manifest_checksum = getChecksum( + getConfigurationWithDefault( + filesystem_prefix + "/conf/offline_manifest.json", + "orchestration", + "Offline Manifest file path" + ) + ); + auto offline_policy_checksum = getChecksum( + getConfigurationWithDefault( + filesystem_prefix + "/conf/offline_policy.json", + "orchestration", + "Offline Policy file path" + ) + ); + auto offline_settings_checksum = getChecksum( + getConfigurationWithDefault( + filesystem_prefix + "/conf/offline_settings.json", + "orchestration", + "Offline Settings file path" + ) + ); + auto offline_data_checksum = getChecksum( + getConfigurationWithDefault( + filesystem_prefix + "/conf/data/offline_data.json", + "orchestration", + "Offline Data file path" + ) + ); + + string manifest_response = ""; + string policy_response = ""; + string settings_response = ""; + string data_response = ""; + + if (!manifest_checksum.ok() || offline_manifest_checksum != manifest_checksum.unpack()) { + manifest_response = offline_manifest_checksum; + } + + if (!policy_checksum.ok() || offline_policy_checksum != policy_checksum.unpack()) { + policy_response = offline_policy_checksum; + } + + if (!settings_checksum.ok() || offline_settings_checksum != settings_checksum.unpack()) { + settings_response = offline_settings_checksum; + } + + if (!data_checksum.ok() || offline_data_checksum != data_checksum.unpack()) { + data_response = offline_data_checksum; + } + + dbgDebug(D_ORCHESTRATOR) << "Local update response, " + << " manifest: " << (manifest_response.empty() ? "has no change," : "has new update,") + << " policy: " << (policy_response.empty() ? "has no change," : "has new update," ) + << " settings: " << (settings_response.empty() ? "has no change" : "has new update") + << " data: " << (data_response.empty() ? "has no change" : "has new update"); + + request = CheckUpdateRequest(manifest_response, policy_response, settings_response, data_response, "", ""); + return Maybe(); +} + +Maybe +LocalCommunication::downloadAttributeFile(const GetResourceFile &resourse_file) +{ + auto file_name = resourse_file.getFileName(); + + I_OrchestrationTools *orchestration_tools = Singleton::Consume::by(); + if (file_name.compare("policy") == 0) { + return orchestration_tools->readFile(getConfigurationWithDefault( + filesystem_prefix + "/conf/offline_policy.json", + "orchestration", + "Offline Policy file path" + )); + } + if (file_name.compare("manifest") == 0) { + return orchestration_tools->readFile(getConfigurationWithDefault( + filesystem_prefix + "/conf/offline_manifest.json", + "orchestration", + "Offline Manifest file path" + )); + } + if (file_name.compare("settings") == 0) { + return orchestration_tools->readFile(getConfigurationWithDefault( + filesystem_prefix + "/conf/offline_settings.json", + "orchestration", + "Offline Settings file path" + )); + } + if (file_name.compare("virtualSettings") == 0) { + return orchestration_tools->readFile(getConfigurationWithDefault( + filesystem_prefix + "/conf/offline_virtual_manifest.json", + "orchestration", + "Offline virtual Manifest file path" + )); + } + if (file_name.compare("virtualPolicy") == 0) { + return orchestration_tools->readFile(getConfigurationWithDefault( + filesystem_prefix + "/conf/offline_virtual_settings.json", + "orchestration", + "Offline virtual Settings file path" + )); + } + + dbgError(D_ORCHESTRATOR) << "Unknown resourse file name " << file_name; + return genError("Failed to detect resourse file name " + file_name); +} + +void +LocalCommunication::setAddressExtenesion(const string &) +{ + dbgTrace(D_ORCHESTRATOR) << "Agent in offline mode, no need for address setting"; + return; +} + +Maybe +LocalCommunication::sendPolicyVersion(const string &) const +{ + dbgTrace(D_ORCHESTRATOR) << "Agent in offline mode, no need to send policy version"; + return Maybe(); +} + +SASAL_END diff --git a/components/security_apps/orchestration/update_communication/update_communication.cc b/components/security_apps/orchestration/update_communication/update_communication.cc new file mode 100755 index 0000000..a5ac217 --- /dev/null +++ b/components/security_apps/orchestration/update_communication/update_communication.cc @@ -0,0 +1,148 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "update_communication.h" + +#include +#include +#include + +#include "rest.h" +#include "config.h" +#include "log_generator.h" +#include "agent_details.h" +#include "version.h" +#include "sasal.h" +#include "i_encryptor.h" +#include "fog_authenticator.h" +#include "fog_communication.h" +#include "local_communication.h" +#include "hybrid_communication.h" + +SASAL_START // Orchestration - Communication + +using namespace std; + +USE_DEBUG_FLAG(D_ORCHESTRATOR); +class UpdateCommunication::Impl + : + public ServerRest, + Singleton::Provide::From +{ +public: + void + doCall() override + { + Singleton::Consume::by()->stopAll(); + status = "Operation mode had changed successfully"; + } + + void + preload() + { + FogAuthenticator::preload(); + LocalCommunication::preload(); + } + + void + init() + { + auto rest = Singleton::Consume::by(); + rest->addRestCall(RestAction::SET, "orchestration-mode"); + setMode(); + } + + Maybe + authenticateAgent() + { + return i_update_comm_impl->authenticateAgent(); + } + + Maybe + getUpdate(CheckUpdateRequest &request) override + { + return i_update_comm_impl->getUpdate(request); + } + + Maybe + sendPolicyVersion(const string &policy_version) const override + { + return i_update_comm_impl->sendPolicyVersion(policy_version); + } + + Maybe + downloadAttributeFile(const GetResourceFile &resourse_file) override + { + return i_update_comm_impl->downloadAttributeFile(resourse_file); + } + + void + setAddressExtenesion(const string &extension) override + { + i_update_comm_impl->setAddressExtenesion(extension); + } + + void + fini() + { + i_update_comm_impl = nullptr; + } + +private: + void + setMode() + { + if (getConfigurationFlag("orchestration-mode") == "offline_mode") { + i_update_comm_impl = make_unique(); + LocalCommunication *local_comm = static_cast(i_update_comm_impl.get()); + local_comm->init(); + return; + } else if (getConfigurationFlag("orchestration-mode") == "hybrid_mode") { + i_update_comm_impl = make_unique(); + HybridCommunication *local_comm = static_cast(i_update_comm_impl.get()); + local_comm->init(); + return; + } + + i_update_comm_impl = make_unique(); + FogCommunication *fog_comm = static_cast(i_update_comm_impl.get()); + fog_comm->init(); + } + + std::unique_ptr i_update_comm_impl = nullptr; + S2C_LABEL_PARAM(string, status, "status"); +}; + +UpdateCommunication::UpdateCommunication() : Component("UpdateCommunication"), pimpl(make_unique()) {} + +UpdateCommunication::~UpdateCommunication() {} + +void +UpdateCommunication::preload() +{ + pimpl->preload(); +} + +void +UpdateCommunication::init() +{ + pimpl->init(); +} + +void +UpdateCommunication::fini() +{ + pimpl->fini(); +} + +SASAL_END diff --git a/components/security_apps/orchestration/update_communication/update_communication_ut/CMakeLists.txt b/components/security_apps/orchestration/update_communication/update_communication_ut/CMakeLists.txt new file mode 100755 index 0000000..997e434 --- /dev/null +++ b/components/security_apps/orchestration/update_communication/update_communication_ut/CMakeLists.txt @@ -0,0 +1,7 @@ +link_directories(${BOOST_ROOT}/lib) + +add_unit_test( + update_communication_ut + "local_communication_ut.cc" + "rest;version;orchestration_modules;update_communication;singleton;config;metric;event_is;logging;agent_details;-lboost_regex;" +) diff --git a/components/security_apps/orchestration/update_communication/update_communication_ut/local_communication_ut.cc b/components/security_apps/orchestration/update_communication/update_communication_ut/local_communication_ut.cc new file mode 100755 index 0000000..22440be --- /dev/null +++ b/components/security_apps/orchestration/update_communication/update_communication_ut/local_communication_ut.cc @@ -0,0 +1,233 @@ +#include + +#include "local_communication.h" +#include "cptest.h" +#include "mock/mock_orchestration_tools.h" +#include "config.h" +#include "config_component.h" +#include "orchestration_status.h" +#include "mock/mock_mainloop.h" +#include "mock/mock_time_get.h" + +using namespace std; +using namespace testing; + +ostream & +operator<<(ostream &os, const tuple &) +{ + return os; +} + +class LocalCommunicationTest: public Test +{ +public: + LocalCommunicationTest() + { + local_communication.init(); + } + + void + preload() + { + local_communication.preload(); + } + + Maybe + authenticateAgent() + { + return local_communication.authenticateAgent(); + } + + Maybe + sendPolicyVersion(const string &version) + { + return local_communication.sendPolicyVersion(version); + } + + Maybe + downloadAttributeFile(const GetResourceFile &resourse_file) + { + return local_communication.downloadAttributeFile(resourse_file); + } + + void + setAddressExtenesion(const string &ext) + { + local_communication.setAddressExtenesion(ext); + } + + Maybe + checkUpdate(CheckUpdateRequest &request) + { + return local_communication.getUpdate(request); + } + + NiceMock mock_mainloop; + NiceMock mock_timer; + ::Environment env; + ConfigComponent config_comp; + StrictMock mock_orc_tools; + OrchestrationStatus orc_status; + +private: + LocalCommunication local_communication; +}; + +TEST_F(LocalCommunicationTest, doNothing) +{ +} + +TEST_F(LocalCommunicationTest, registerConfig) +{ + env.preload(); + env.init(); + + preload(); + string config_json = + "{\n" + " \"orchestration\": {\n" + " \"Offline manifest file path\": [\n" + " {\n" + " \"context\": \"All()\",\n" + " \"value\": \"ABC\"\n" + " }\n" + " ],\n" + " \"Offline policy file path\": [\n" + " {\n" + " \"context\": \"All()\",\n" + " \"value\": \"qwe\"\n" + " }\n" + " ],\n" + " \"Offline settings file path\": [\n" + " {\n" + " \"context\": \"All()\",\n" + " \"value\": \"CCCC\"\n" + " }\n" + " ]\n" + " }\n" + "}"; + istringstream ss(config_json); + Singleton::Consume::from(config_comp)->loadConfiguration(ss); + + EXPECT_THAT(getConfiguration("orchestration", "Offline manifest file path"), IsValue("ABC")); + EXPECT_THAT(getConfiguration("orchestration", "Offline policy file path"), IsValue("qwe")); + EXPECT_THAT(getConfiguration("orchestration", "Offline settings file path"), IsValue("CCCC")); + + env.fini(); +} + +TEST_F(LocalCommunicationTest, authenticateAgent) +{ + auto authenticat_res = authenticateAgent(); + EXPECT_TRUE(authenticat_res.ok()); +} + +TEST_F(LocalCommunicationTest, downloadManifest) +{ + string new_manifest_string = "new manifest"; + EXPECT_CALL(mock_orc_tools, readFile("/etc/cp/conf/offline_manifest.json")).WillOnce(Return(new_manifest_string)); + GetResourceFile resourse_file(GetResourceFile::ResourceFileType::MANIFEST); + auto downloaded_string = downloadAttributeFile(resourse_file); + EXPECT_TRUE(downloaded_string.ok()); + EXPECT_EQ(downloaded_string.unpack(), new_manifest_string); +} + +TEST_F(LocalCommunicationTest, checkUpdateWithNoUpdate) +{ + Maybe manifest_checksum(string("1")); + Maybe policy_checksum(string("2")); + Maybe settings_checksum(string("3")); + Maybe data_checksum(string("4")); + EXPECT_CALL(mock_orc_tools, calculateChecksum( + Package::ChecksumTypes::SHA256, "/etc/cp/conf/offline_manifest.json")).WillOnce(Return(manifest_checksum)); + EXPECT_CALL(mock_orc_tools, calculateChecksum( + Package::ChecksumTypes::SHA256, "/etc/cp/conf/offline_policy.json")).WillOnce(Return(policy_checksum)); + EXPECT_CALL(mock_orc_tools, calculateChecksum( + Package::ChecksumTypes::SHA256, "/etc/cp/conf/offline_settings.json")).WillOnce(Return(settings_checksum)); + EXPECT_CALL(mock_orc_tools, calculateChecksum( + Package::ChecksumTypes::SHA256, "/etc/cp/conf/data/offline_data.json")).WillOnce(Return(data_checksum)); + + CheckUpdateRequest request( + *manifest_checksum, + *policy_checksum, + *settings_checksum, + *data_checksum, + I_OrchestrationTools::SELECTED_CHECKSUM_TYPE_STR, + "123" + ); + + auto update_response = checkUpdate(request); + EXPECT_TRUE(update_response.ok()); + + Maybe manifest = request.getManifest(); + EXPECT_FALSE(manifest.ok()); + + Maybe policy = request.getPolicy(); + EXPECT_FALSE(policy.ok()); + + Maybe settings = request.getSettings(); + EXPECT_FALSE(settings.ok()); + + Maybe data = request.getData(); + EXPECT_FALSE(data.ok()); +} + +TEST_F(LocalCommunicationTest, checkUpdateWithPolicyUpdate) +{ + Maybe manifest_checksum(string("1")); + Maybe policy_checksum(string("2")); + Maybe new_policy_checksum(string("22")); + Maybe settings_checksum(string("3")); + Maybe data_checksum(string("4")); + + EXPECT_CALL( + mock_orc_tools, + calculateChecksum(Package::ChecksumTypes::SHA256, "/etc/cp/conf/offline_manifest.json") + ).WillOnce(Return(manifest_checksum)); + EXPECT_CALL( + mock_orc_tools, + calculateChecksum(Package::ChecksumTypes::SHA256, "/etc/cp/conf/offline_policy.json") + ).WillOnce(Return(new_policy_checksum)); + EXPECT_CALL( + mock_orc_tools, + calculateChecksum(Package::ChecksumTypes::SHA256, "/etc/cp/conf/offline_settings.json") + ).WillOnce(Return(settings_checksum)); + EXPECT_CALL( + mock_orc_tools, + calculateChecksum(Package::ChecksumTypes::SHA256, "/etc/cp/conf/data/offline_data.json") + ).WillOnce(Return(data_checksum)); + + CheckUpdateRequest request( + *manifest_checksum, + *policy_checksum, + *settings_checksum, + *data_checksum, + I_OrchestrationTools::SELECTED_CHECKSUM_TYPE_STR, + "123" + ); + + auto update_response = checkUpdate(request); + EXPECT_TRUE(update_response.ok()); + + Maybe manifest = request.getManifest(); + EXPECT_FALSE(manifest.ok()); + + EXPECT_THAT(request.getPolicy(), IsValue("22")); + + Maybe settings = request.getSettings(); + EXPECT_FALSE(settings.ok()); + + Maybe data = request.getData(); + EXPECT_FALSE(data.ok()); +} + +TEST_F(LocalCommunicationTest, setAddressExtenesion) +{ + setAddressExtenesion("Test"); +} + +TEST_F(LocalCommunicationTest, sendPolicyVersion) +{ + auto res = sendPolicyVersion("12"); + EXPECT_TRUE(res.ok()); +} diff --git a/components/security_apps/waap/CMakeLists.txt b/components/security_apps/waap/CMakeLists.txt new file mode 100755 index 0000000..47b7116 --- /dev/null +++ b/components/security_apps/waap/CMakeLists.txt @@ -0,0 +1,13 @@ +add_library(waap + waap_component.cc + waap_component_impl.cc + first_request_object.cc +) + +add_subdirectory(waap_clib) +add_subdirectory(reputation) + +include_directories(include) +include_directories(reputation) + +install(DIRECTORY resources DESTINATION http_transaction_handler_service USE_SOURCE_PERMISSIONS) diff --git a/components/security_apps/waap/first_request_object.cc b/components/security_apps/waap/first_request_object.cc new file mode 100755 index 0000000..0bf3dcd --- /dev/null +++ b/components/security_apps/waap/first_request_object.cc @@ -0,0 +1,57 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "first_request_object.h" +#include "tag_and_enum_management.h" + +FirstRequestNotificationObject::FirstRequestNotificationObject( + std::string asset_id, + std::string asset_name, + ReportIS::Severity severity +): + m_asset_id(asset_id), + m_asset_name(asset_name), + m_severity(severity) +{} + +FirstRequestNotificationObject::~FirstRequestNotificationObject() +{} + +void FirstRequestNotificationObject::serialize(cereal::JSONOutputArchive& ar) const +{ + ar.setNextName("notificationConsumerData"); + ar.startNode(); + ar.setNextName("firstRequestNotificationConsumers"); + ar.startNode(); + ar(cereal::make_nvp("assetId", m_asset_id)); + ar(cereal::make_nvp("assetName", m_asset_name)); + ar(cereal::make_nvp("originalEventSeverity", TagAndEnumManagement::convertToString(m_severity))); + ar.finishNode(); + ar.finishNode(); +} + +std::string FirstRequestNotificationObject::toString() const +{ + std::stringstream ss; + { + cereal::JSONOutputArchive ar(ss); + serialize(ar); + } + + return ss.str(); +} + +std::ostream& operator<<(std::ostream& os, const FirstRequestNotificationObject& obj) +{ + return os << obj.toString(); +} diff --git a/components/security_apps/waap/first_request_object.h b/components/security_apps/waap/first_request_object.h new file mode 100755 index 0000000..19ea482 --- /dev/null +++ b/components/security_apps/waap/first_request_object.h @@ -0,0 +1,42 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __FIRST_REQUEST_NOTIFICATION_OBJECT_H__ +#define __FIRST_REQUEST_NOTIFICATION_OBJECT_H__ + +#include +#include +#include "cereal/archives/json.hpp" +#include "report/report.h" + +class FirstRequestNotificationObject +{ +public: + explicit FirstRequestNotificationObject( + std::string asset_id, + std::string asset_name, + ReportIS::Severity severity + ); + virtual ~FirstRequestNotificationObject(); + void serialize(cereal::JSONOutputArchive& ar) const; + + friend std::ostream& operator<<(std::ostream& os, const FirstRequestNotificationObject& obj); + +private: + std::string toString() const; + + std::string m_asset_id; + std::string m_asset_name; + ReportIS::Severity m_severity; +}; +#endif diff --git a/components/security_apps/waap/include/WaapDefines.h b/components/security_apps/waap/include/WaapDefines.h new file mode 100755 index 0000000..7910fbc --- /dev/null +++ b/components/security_apps/waap/include/WaapDefines.h @@ -0,0 +1,30 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#define BACKUP_DIRECTORY_PATH "/etc/cp/conf/waap/" +// reduce from 2048 in order to accomodate in 10K max log size in Kibana +#define MAX_LOG_FIELD_SIZE 1536 +// maximum bytes response body log field size can reduce from request body log +#define MIN_RESP_BODY_LOG_FIELD_SIZE (std::size_t{500}) +// size of clean values LRU cache +#define SIGS_APPLY_CLEAN_CACHE_CAPACITY 4096 +// size of suspicious values LRU cache +#define SIGS_APPLY_SUSPICIOUS_CACHE_CAPACITY 4096 +// size of SampleType cache capacity +#define SIGS_SAMPLE_TYPE_CACHE_CAPACITY 4096 + +// ScoreBuilder pool names +#define KEYWORDS_SCORE_POOL_BASE "base_scores" +#define KEYWORDS_SCORE_POOL_HEADERS "headers_scores" diff --git a/components/security_apps/waap/include/i_deepAnalyzer.h b/components/security_apps/waap/include/i_deepAnalyzer.h new file mode 100755 index 0000000..1931061 --- /dev/null +++ b/components/security_apps/waap/include/i_deepAnalyzer.h @@ -0,0 +1,24 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +class IWaf2Transaction; +class IWaapConfig; +struct AnalysisResult; + +class I_DeepAnalyzer { +public: + virtual AnalysisResult analyzeData(IWaf2Transaction* waf2Trans, const IWaapConfig* pSitePolicy) = 0; + virtual ~I_DeepAnalyzer() {}; +}; diff --git a/components/security_apps/waap/include/i_ignoreSources.h b/components/security_apps/waap/include/i_ignoreSources.h new file mode 100755 index 0000000..3e5cf15 --- /dev/null +++ b/components/security_apps/waap/include/i_ignoreSources.h @@ -0,0 +1,26 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __I_IGNORE_SOURCES_H__ +#define __I_IGNORE_SOURCES_H__ +#include +#include + +class I_IgnoreSources +{ +public: + virtual std::vector* getSourcesToIgnore() = 0; + virtual bool ready() = 0; +}; + +#endif diff --git a/components/security_apps/waap/include/i_indicatorsFilter.h b/components/security_apps/waap/include/i_indicatorsFilter.h new file mode 100755 index 0000000..83dc437 --- /dev/null +++ b/components/security_apps/waap/include/i_indicatorsFilter.h @@ -0,0 +1,39 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include "../waap_clib/WaapKeywords.h" +#include "i_serialize.h" +#include +#include + +class IWaf2Transaction; + +class I_IndicatorsFilter{ +public: + virtual ~I_IndicatorsFilter() { } + + // filters indicators from keywords vector + virtual void filterKeywords( + const std::string &key, + Waap::Keywords::KeywordsSet& keywords, + Waap::Keywords::KeywordsVec& filteredKeywords) = 0; + + // register keyword for a specific key + virtual void registerKeywords(const std::string& key, Waap::Keywords::KeywordsSet& keyword, + IWaf2Transaction* pTransaction) = 0; + + // returns true if the keyword based on the key should be filtered out + virtual bool shouldFilterKeyword(const std::string &key, const std::string &keyword) const = 0; +}; diff --git a/components/security_apps/waap/include/i_serialize.h b/components/security_apps/waap/include/i_serialize.h new file mode 100755 index 0000000..cc73ba3 --- /dev/null +++ b/components/security_apps/waap/include/i_serialize.h @@ -0,0 +1,281 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include "i_time_get.h" +#include "i_encryptor.h" +#include "rest.h" +#include "i_messaging.h" +#include "i_mainloop.h" +#include "i_agent_details.h" + +static const uint max_send_obj_retries = 3; + +USE_DEBUG_FLAG(D_WAAP); + +class RestGetFile : public ClientRest +{ +public: + // decrypts and load json + bool loadJson(const std::string& json); + // gen json and encrypt + Maybe genJson() const; +}; + +struct FileMetaData +{ + std::string filename; + std::string modified; +}; + +class RemoteFilesList : public ClientRest +{ +public: + RemoteFilesList(); + + // parses xml instead of json + // extracts a file list in + bool loadJson(const std::string& xml); + + const std::vector& getFilesMetadataList() const; + const std::vector& getFilesList() const; + +private: + RestParam> files; + std::vector filesPathsList; +}; + + +class I_Serializable { +public: + virtual void serialize(std::ostream& stream) = 0; + virtual void deserialize(std::istream& stream) = 0; +}; + +class I_RemoteSyncSerialize { +public: + virtual bool postData() = 0; + virtual void pullData(const std::vector& files) = 0; + virtual void processData() = 0; + virtual void postProcessedData() = 0; + virtual void pullProcessedData(const std::vector& files) = 0; + virtual void updateState(const std::vector& files) = 0; +}; + +class I_Backup { +public: + // open stream and serialize data + virtual void saveData() = 0; + // open stream and deserialize data + virtual void restore() = 0; +}; + +class SerializeToFileBase : + public I_Backup, + public I_Serializable +{ +public: + SerializeToFileBase(std::string filePath); + virtual ~SerializeToFileBase(); + + virtual void saveData(); + virtual void restore(); + virtual void setFilePath(const std::string &new_file_path); + +protected: + // saved file name for testing + std::string m_filePath; +private: + void loadFromFile(std::string filePath); +}; + +class SerializeToFilePeriodically : public SerializeToFileBase +{ +public: + SerializeToFilePeriodically(std::chrono::seconds pollingIntervals, std::string filePath); + virtual ~SerializeToFilePeriodically(); + + void setInterval(std::chrono::seconds newInterval); + +protected: + void backupWorker(); + +private: + std::chrono::microseconds m_lastSerialization; + std::chrono::seconds m_interval; +}; + +class WaapComponent; + +class SerializeToLocalAndRemoteSyncBase : public I_RemoteSyncSerialize, public SerializeToFileBase +{ +public: + SerializeToLocalAndRemoteSyncBase(std::chrono::minutes interval, + std::chrono::seconds waitForSync, + const std::string& filePath, + const std::string& remotePath, + const std::string& assetId, + const std::string& owner); + virtual ~SerializeToLocalAndRemoteSyncBase(); + + virtual void restore(); + + virtual void syncWorker(); + + void setInterval(std::chrono::seconds newInterval); + std::chrono::seconds getIntervalDuration() const; + void setRemoteSyncEnabled(bool enabled); +protected: + void mergeProcessedFromRemote(); + std::string getPostDataUrl(); + std::string getUri(); + size_t getIntervalsCount(); + + template + bool sendObject(T &obj, I_Messaging::Method method, std::string uri) + { + I_Messaging *messaging = Singleton::Consume::by(); + I_AgentDetails *agentDetails = Singleton::Consume::by(); + if (agentDetails->getOrchestrationMode() == OrchestrationMode::OFFLINE) { + dbgDebug(D_WAAP) << "offline mode not sending object"; + return false; + } + if (agentDetails->getOrchestrationMode() == OrchestrationMode::HYBRID) { + Flags conn_flags; + conn_flags.setFlag(MessageConnConfig::EXTERNAL); + std::string tenant_header = "X-Tenant-Id: " + agentDetails->getTenantId(); + + return messaging->sendObject( + obj, + method, + getSharedStorageHost(), + 80, + conn_flags, + uri, + tenant_header, + nullptr, + MessageTypeTag::WAAP_LEARNING); + } + return messaging->sendObject( + obj, + method, + uri, + "", + nullptr, + true, + MessageTypeTag::WAAP_LEARNING); + } + + template + bool sendObjectWithRetry(T &obj, I_Messaging::Method method, std::string uri) + { + I_MainLoop *mainloop = Singleton::Consume::by(); + for (uint i = 0; i < max_send_obj_retries; i++) + { + if (sendObject(obj, method, uri)) + { + dbgTrace(D_WAAP) << + "object sent successfully after " << i << " retry attempts"; + return true; + } + dbgWarning(D_WAAP) << "Failed to send object. Attempt: " << i; + mainloop->yield(true); + } + dbgError(D_WAAP) << "Failed to send object, reached maximum attempts: " << + max_send_obj_retries; + return false; + } + + template + bool sendNoReplyObject(T &obj, I_Messaging::Method method, std::string uri) + { + I_Messaging *messaging = Singleton::Consume::by(); + I_AgentDetails *agentDetails = Singleton::Consume::by(); + if (agentDetails->getOrchestrationMode() == OrchestrationMode::OFFLINE) { + dbgDebug(D_WAAP) << "offline mode not sending object"; + return false; + } + if (agentDetails->getOrchestrationMode() == OrchestrationMode::HYBRID) { + Flags conn_flags; + conn_flags.setFlag(MessageConnConfig::EXTERNAL); + std::string tenant_header = "X-Tenant-Id: " + agentDetails->getTenantId(); + return messaging->sendNoReplyObject( + obj, + method, + getSharedStorageHost(), + 80, + conn_flags, + uri, + tenant_header, + nullptr, + MessageTypeTag::WAAP_LEARNING); + } + return messaging->sendNoReplyObject( + obj, + method, + uri, + "", + nullptr, + true, + MessageTypeTag::WAAP_LEARNING); + } + + template + bool sendNoReplyObjectWithRetry(T &obj, I_Messaging::Method method, std::string uri) + { + I_MainLoop *mainloop= Singleton::Consume::by(); + for (uint i = 0; i < max_send_obj_retries; i++) + { + if (sendNoReplyObject(obj, method, uri)) + { + dbgTrace(D_WAAP) << + "object sent successfully after " << i << " retry attempts"; + return true; + } + dbgWarning(D_WAAP) << "Failed to send object. Attempt: " << i; + mainloop->yield(true); + } + dbgError(D_WAAP) << "Failed to send object, reached maximum attempts: " << + max_send_obj_retries; + return false; + } + + const std::string m_remotePath; // Created from tenentId + / + assetId + / + class + std::chrono::seconds m_interval; + std::string m_owner; + +private: + bool localSyncAndProcess(); + void updateStateFromRemoteService(); + RemoteFilesList getProcessedFilesList(); + RemoteFilesList getRemoteProcessedFilesList(); + std::string getWindowId(); + bool isBase(); + std::string getLearningHost(); + std::string getSharedStorageHost(); + + I_MainLoop* m_pMainLoop; + std::chrono::microseconds m_waitForSync; + uint m_workerRoutineId; + size_t m_daysCount; + size_t m_windowsCount; + size_t m_intervalsCounter; + bool m_remoteSyncEnabled; + const std::string m_assetId; + std::string m_type; + std::string m_lastProcessedModified; + Maybe m_shared_storage_host; + Maybe m_learning_host; +}; diff --git a/components/security_apps/waap/include/i_transaction.h b/components/security_apps/waap/include/i_transaction.h new file mode 100755 index 0000000..6f731bc --- /dev/null +++ b/components/security_apps/waap/include/i_transaction.h @@ -0,0 +1,144 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "../waap_clib/WaapDecision.h" +#include "../include/WaapDefines.h" +#include "../waap_clib/Csrf.h" +#include "../waap_clib/Waf2Util.h" +#include "../waap_clib/WaapOpenRedirect.h" +#include "../waap_clib/FpMitigation.h" +#include "../waap_clib/DeepParser.h" +#include "http_inspection_events.h" + +enum HeaderType { + UNKNOWN_HEADER, + HOST_HEADER, + USER_AGENT_HEADER, + COOKIE_HEADER, + REFERER_HEADER, + CONTENT_TYPE_HEADER, + CLEAN_HEADER, + OTHER_KNOWN_HEADERS +}; + +struct AnalysisResult; +class WaapAssetState; + +struct Waf2TransactionFlags { + bool endResponseHeadersCalled; + bool requestDataPushStarted; + bool responseDataPushStarted; + + Waf2TransactionFlags(): + endResponseHeadersCalled(false), + requestDataPushStarted(false), + responseDataPushStarted(false) + { + } +}; + +class IWaf2Transaction { +public: + virtual ~IWaf2Transaction() {} + virtual uint64_t getIndex() const = 0; + virtual void setIndex(uint64_t index) = 0; + virtual std::shared_ptr getAssetState() = 0; + virtual IWaapConfig* getSiteConfig() = 0; + virtual DeepParser& getDeepParser() = 0; + virtual bool get_ignoreScore() const = 0; + virtual void addNote(const std::string ¬e) = 0; + virtual bool shouldIgnoreOverride(const Waf2ScanResult &res) = 0; + virtual bool reportScanResult(const Waf2ScanResult &res) = 0; + virtual const std::string getHost() const = 0; + virtual Waap::OpenRedirect::State &getOpenRedirectState() = 0; + virtual const std::string getLocation() const = 0; + virtual const std::string getUserAgent() const = 0; + virtual const std::string getParam() const = 0; + virtual const std::vector getKeywordMatches() const = 0; + virtual const std::vector getKeywordsCombinations() const = 0; + virtual const std::string getContentTypeStr() const = 0; + virtual Waap::Util::ContentType getContentType() const = 0; + virtual const std::string getKeywordMatchesStr() const = 0; + virtual const std::string getSample() const = 0; + virtual const std::string getLastScanSample() const = 0; + virtual const std::string& getLastScanParamName() const = 0; + virtual const std::string getMethod() const = 0; + virtual const std::string getHdrContent(std::string hdrName) const = 0; + virtual const WaapDecision &getWaapDecision() const = 0; + virtual const std::string& getRemoteAddr() const = 0; + virtual const std::string getUri() const = 0; + virtual const std::string getUriStr() const = 0; + virtual const std::string& getSourceIdentifier() const = 0; + virtual double getScore() const = 0; + virtual const std::vector getScoreArray() const = 0; + virtual Waap::CSRF::State& getCsrfState() = 0; + virtual ngx_http_cp_verdict_e getUserLimitVerdict() = 0; + virtual const std::string getUserLimitVerdictStr() const = 0; + virtual const std::string getViolatedUserLimitTypeStr() const = 0; + virtual void checkShouldInject() = 0; + virtual void completeInjectionResponseBody(std::string& strInjection) = 0; + virtual void sendLog() = 0; + virtual bool decideAfterHeaders() = 0; + virtual int decideFinal( + int mode, + AnalysisResult &transactionResult, + const std::string &poolName=KEYWORDS_SCORE_POOL_BASE, + PolicyCounterType fpClassification = UNKNOWN_TYPE) = 0; + virtual bool decideResponse() = 0; + virtual void clearAllInjectionReasons() = 0; + virtual bool shouldInspectResponse() = 0; + virtual bool shouldInjectResponse() = 0; + virtual bool shouldInjectCSRF() = 0; + virtual bool shouldInjectSecurityHeaders() = 0; + virtual void handleSecurityHeadersInjection( + std::vector>& injectHeaderStrs) = 0; + virtual void disableShouldInjectSecurityHeaders() = 0; + virtual void handleCsrfHeaderInjection(std::string& injectStr) = 0; + virtual bool findHtmlTagToInject(const char* data, int data_len, int& pos) = 0; + virtual bool isHtmlType(const char* data, int data_len) = 0; + + virtual HeaderType detectHeaderType(const char* name, int name_len) = 0; + + virtual void start() = 0; + virtual void set_transaction_time(const char* log_time) = 0; + virtual void set_transaction_remote(const char* remote_addr, int remote_port) = 0; + virtual void set_transaction_local(const char* local_addr, int local_port) = 0; + + // Request + virtual void set_method(const char* method) = 0; + virtual void set_uri(const char* uri) = 0; + virtual void start_request_hdrs() = 0; + virtual void add_request_hdr(const char* name, int name_len, const char* value, int value_len) = 0; + virtual void end_request_hdrs() = 0; + virtual void start_request_body() = 0; + virtual void add_request_body_chunk(const char* data, int data_len) = 0; + virtual void end_request_body() = 0; + virtual void end_request() = 0; + + // Response + virtual void start_response(int response_status, int http_version) = 0; + virtual void start_response_hdrs() = 0; + virtual void add_response_hdr(const char* name, int name_len, const char* value, int value_len) = 0; + virtual void end_response_hdrs() = 0; + virtual void start_response_body() = 0; + virtual void add_response_body_chunk(const char* data, int data_len) = 0; + virtual void end_response_body() = 0; + virtual void end_response() = 0; + + virtual void collectFoundPatterns() = 0; + virtual ReportIS::Severity computeEventSeverityFromDecision() const = 0; + virtual void finish() = 0; + virtual Waf2TransactionFlags &getTransactionFlags() = 0; +}; diff --git a/components/security_apps/waap/include/i_waapConfig.h b/components/security_apps/waap/include/i_waapConfig.h new file mode 100755 index 0000000..27eecbc --- /dev/null +++ b/components/security_apps/waap/include/i_waapConfig.h @@ -0,0 +1,69 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "../waap_clib/WaapOverride.h" +#include "../waap_clib/WaapTrigger.h" +#include "../waap_clib/TrustedSources.h" +#include "../waap_clib/WaapParameters.h" +#include "../waap_clib/WaapOpenRedirectPolicy.h" +#include "../waap_clib/WaapErrorDisclosurePolicy.h" +#include "../waap_clib/CsrfPolicy.h" +#include "../waap_clib/UserLimitsPolicy.h" +#include "../waap_clib/RateLimiting.h" +#include "../waap_clib/SecurityHeadersPolicy.h" + +#include + +enum class BlockingLevel { + NO_BLOCKING = 0, + LOW_BLOCKING_LEVEL, + MEDIUM_BLOCKING_LEVEL, + HIGH_BLOCKING_LEVEL +}; + +enum class AttackMitigationMode +{ + DISABLED = 0, + LEARNING, + PREVENT, + UNKNOWN +}; +class IWaapConfig { +public: + virtual const std::string& get_AssetId() const = 0; + virtual const std::string& get_AssetName() const = 0; + virtual const BlockingLevel& get_BlockingLevel() const = 0; + virtual const std::string& get_PracticeId() const = 0; + virtual const std::string& get_PracticeName() const = 0; + virtual const std::string& get_PracticeSubType() const = 0; + virtual const std::string& get_RuleId() const = 0; + virtual const std::string& get_RuleName() const = 0; + virtual const bool& get_WebAttackMitigation() const = 0; + virtual const std::string& get_WebAttackMitigationAction() const = 0; + + virtual const std::shared_ptr& get_OverridePolicy() const = 0; + virtual const std::shared_ptr& get_TriggerPolicy() const = 0; + virtual const std::shared_ptr& get_TrustedSourcesPolicy() const = 0; + virtual const std::shared_ptr& get_WaapParametersPolicy() const = 0; + virtual const std::shared_ptr& get_OpenRedirectPolicy() const = 0; + virtual const std::shared_ptr& get_ErrorDisclosurePolicy() const = 0; + virtual const std::shared_ptr& get_CsrfPolicy() const = 0; + virtual const std::shared_ptr& get_RateLimitingPolicy() const = 0; + virtual const std::shared_ptr& get_ErrorLimitingPolicy() const = 0; + virtual const std::shared_ptr& get_SecurityHeadersPolicy() const = 0; + virtual const std::shared_ptr& get_UserLimitsPolicy() const = 0; + + virtual void printMe(std::ostream& os) const = 0; +}; diff --git a/components/security_apps/waap/include/i_waap_asset_state.h b/components/security_apps/waap/include/i_waap_asset_state.h new file mode 100755 index 0000000..c0b6f5b --- /dev/null +++ b/components/security_apps/waap/include/i_waap_asset_state.h @@ -0,0 +1,27 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "WaapEnums.h" + +class I_WaapAssetState { +public: + virtual void updateScores() = 0; + virtual std::string getSignaturesScoresFilePath() const = 0; + virtual std::string getSignaturesFilterDir() const = 0; + virtual bool isKeywordOfType(const std::string& keyword, ParamType type) const = 0; + virtual bool isBinarySampleType(const std::string & sample) const = 0; + virtual bool isWBXMLSampleType(const std::string & sample) const = 0; + virtual std::set getSampleType(const std::string& sample) const = 0; +}; diff --git a/components/security_apps/waap/include/picojson.h b/components/security_apps/waap/include/picojson.h new file mode 100755 index 0000000..7485335 --- /dev/null +++ b/components/security_apps/waap/include/picojson.h @@ -0,0 +1,1178 @@ +// +// Copyright 2009-2010 Cybozu Labs, Inc. +// Copyright 2011-2014 Kazuho Oku +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +#ifndef picojson_h +#define picojson_h + +// LCOV_EXCL_START Reason: External library header. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// for isnan/isinf +#if __cplusplus >= 201103L +#include +#else +extern "C" { +#ifdef _MSC_VER +#include +#elif defined(__INTEL_COMPILER) +#include +#else +#include +#endif +} +#endif + +#ifndef PICOJSON_USE_RVALUE_REFERENCE +#if (defined(__cpp_rvalue_references) && __cpp_rvalue_references >= 200610) || (defined(_MSC_VER) && _MSC_VER >= 1600) +#define PICOJSON_USE_RVALUE_REFERENCE 1 +#else +#define PICOJSON_USE_RVALUE_REFERENCE 0 +#endif +#endif // PICOJSON_USE_RVALUE_REFERENCE + +// experimental support for int64_t (see README.mkdn for detail) +#ifdef PICOJSON_USE_INT64 +#define __STDC_FORMAT_MACROS +#include +#include +#endif + +// to disable the use of localeconv(3), set PICOJSON_USE_LOCALE to 0 +#ifndef PICOJSON_USE_LOCALE +#define PICOJSON_USE_LOCALE 1 +#endif +#if PICOJSON_USE_LOCALE +extern "C" { +#include +} +#endif + +#ifndef PICOJSON_ASSERT +#define PICOJSON_ASSERT(e) \ + do { \ + if (!(e)) \ + throw std::runtime_error(#e); \ + } while (0) +#endif + +#ifdef _MSC_VER +#define SNPRINTF _snprintf_s +#pragma warning(push) +#pragma warning(disable : 4244) // conversion from int to char +#pragma warning(disable : 4127) // conditional expression is constant +#pragma warning(disable : 4702) // unreachable code +#else +#define SNPRINTF snprintf +#endif + +namespace picojson { + + +enum { +#ifndef PICOJSON_USE_INT64 + null_type, + boolean_type, + number_type, + string_type, + array_type, + object_type +#else + null_type, + boolean_type, + number_type, + string_type, + array_type, + object_type, + int64_type +#endif +}; + +enum { INDENT_WIDTH = 2 }; + +struct null {}; + +class value { +public: + typedef std::vector array; + typedef std::map object; + union _storage { + bool boolean_; + double number_; +#ifdef PICOJSON_USE_INT64 + int64_t int64_; +#endif + std::string *string_; + array *array_; + object *object_; + }; + +protected: + int type_; + _storage u_; + +public: + value(); + value(int type, bool); + explicit value(bool b); +#ifdef PICOJSON_USE_INT64 + explicit value(int64_t i); +#endif + explicit value(double n); + explicit value(const std::string &s); + explicit value(const array &a); + explicit value(const object &o); +#if PICOJSON_USE_RVALUE_REFERENCE + explicit value(std::string &&s); + explicit value(array &&a); + explicit value(object &&o); +#endif + explicit value(const char *s); + value(const char *s, size_t len); + ~value(); + value(const value &x); + value &operator=(const value &x); +#if PICOJSON_USE_RVALUE_REFERENCE + value(value &&x) throw(); + value &operator=(value &&x) throw(); +#endif + void swap(value &x) throw(); + template bool is() const; + template const T &get() const; + template T &get(); + template void set(const T &); +#if PICOJSON_USE_RVALUE_REFERENCE + template void set(T &&); +#endif + bool evaluate_as_boolean() const; + const value &get(const size_t idx) const; + const value &get(const std::string &key) const; + value &get(const size_t idx); + value &get(const std::string &key); + + bool contains(const size_t idx) const; + bool contains(const std::string &key) const; + std::string to_str() const; + template void serialize(Iter os, bool prettify = false) const; + std::string serialize(bool prettify = false) const; + +private: + template value(const T *); // intentionally defined to block implicit conversion of pointer to bool + template static void _indent(Iter os, int indent); + template void _serialize(Iter os, int indent) const; + std::string _serialize(int indent) const; + void clear(); +}; + +typedef value::array array; +typedef value::object object; + +inline value::value() : type_(null_type), u_() { +} + +inline value::value(int type, bool) : type_(type), u_() { + switch (type) { +#define INIT(p, v) \ + case p##type: \ + u_.p = v; \ + break; + INIT(boolean_, false); + INIT(number_, 0.0); +#ifdef PICOJSON_USE_INT64 + INIT(int64_, 0); +#endif + INIT(string_, new std::string()); + INIT(array_, new array()); + INIT(object_, new object()); +#undef INIT + default: + break; + } +} + +inline value::value(bool b) : type_(boolean_type), u_() { + u_.boolean_ = b; +} + +#ifdef PICOJSON_USE_INT64 +inline value::value(int64_t i) : type_(int64_type), u_() { + u_.int64_ = i; +} +#endif + +inline value::value(double n) : type_(number_type), u_() { + if ( +#ifdef _MSC_VER + !_finite(n) +#elif __cplusplus >= 201103L || !(defined(isnan) && defined(isinf)) + std::isnan(n) || std::isinf(n) +#else + isnan(n) || isinf(n) +#endif + ) { + throw std::overflow_error(""); + } + u_.number_ = n; +} + +inline value::value(const std::string &s) : type_(string_type), u_() { + u_.string_ = new std::string(s); +} + +inline value::value(const array &a) : type_(array_type), u_() { + u_.array_ = new array(a); +} + +inline value::value(const object &o) : type_(object_type), u_() { + u_.object_ = new object(o); +} + +#if PICOJSON_USE_RVALUE_REFERENCE +inline value::value(std::string &&s) : type_(string_type), u_() { + u_.string_ = new std::string(std::move(s)); +} + +inline value::value(array &&a) : type_(array_type), u_() { + u_.array_ = new array(std::move(a)); +} + +inline value::value(object &&o) : type_(object_type), u_() { + u_.object_ = new object(std::move(o)); +} +#endif + +inline value::value(const char *s) : type_(string_type), u_() { + u_.string_ = new std::string(s); +} + +inline value::value(const char *s, size_t len) : type_(string_type), u_() { + u_.string_ = new std::string(s, len); +} + +inline void value::clear() { + switch (type_) { +#define DEINIT(p) \ + case p##type: \ + delete u_.p; \ + break; + DEINIT(string_); + DEINIT(array_); + DEINIT(object_); +#undef DEINIT + default: + break; + } +} + +inline value::~value() { + clear(); +} + +inline value::value(const value &x) : type_(x.type_), u_() { + switch (type_) { +#define INIT(p, v) \ + case p##type: \ + u_.p = v; \ + break; + INIT(string_, new std::string(*x.u_.string_)); + INIT(array_, new array(*x.u_.array_)); + INIT(object_, new object(*x.u_.object_)); +#undef INIT + default: + u_ = x.u_; + break; + } +} + +inline value &value::operator=(const value &x) { + if (this != &x) { + value t(x); + swap(t); + } + return *this; +} + +#if PICOJSON_USE_RVALUE_REFERENCE +inline value::value(value &&x) throw() : type_(null_type), u_() { + swap(x); +} +inline value &value::operator=(value &&x) throw() { + swap(x); + return *this; +} +#endif +inline void value::swap(value &x) throw() { + std::swap(type_, x.type_); + std::swap(u_, x.u_); +} + +#define IS(ctype, jtype) \ + template <> inline bool value::is() const { \ + return type_ == jtype##_type; \ + } +IS(null, null) +IS(bool, boolean) +#ifdef PICOJSON_USE_INT64 +IS(int64_t, int64) +#endif +IS(std::string, string) +IS(array, array) +IS(object, object) +#undef IS +template <> inline bool value::is() const { + return type_ == number_type; +#ifdef PICOJSON_USE_INT64 + return type_ == number_type || type_ == int64_type; +#endif +} + +#define GET(ctype, var) \ + template <> inline const ctype &value::get() const { \ + PICOJSON_ASSERT("type mismatch! call is() before get()" && is()); \ + return var; \ + } \ + template <> inline ctype &value::get() { \ + PICOJSON_ASSERT("type mismatch! call is() before get()" && is()); \ + return var; \ + } +GET(bool, u_.boolean_) +GET(std::string, *u_.string_) +GET(array, *u_.array_) +GET(object, *u_.object_) +#ifdef PICOJSON_USE_INT64 +GET(double, + (type_ == int64_type && + (const_cast(this)->type_ = number_type, const_cast(this)->u_.number_ = u_.int64_), + u_.number_)) +GET(int64_t, u_.int64_) +#else +GET(double, u_.number_) +#endif +#undef GET + +#define SET(ctype, jtype, setter) \ + template <> inline void value::set(const ctype &_val) { \ + clear(); \ + type_ = jtype##_type; \ + setter \ + } +SET(bool, boolean, u_.boolean_ = _val;) +SET(std::string, string, u_.string_ = new std::string(_val);) +SET(array, array, u_.array_ = new array(_val);) +SET(object, object, u_.object_ = new object(_val);) +SET(double, number, u_.number_ = _val;) +#ifdef PICOJSON_USE_INT64 +SET(int64_t, int64, u_.int64_ = _val;) +#endif +#undef SET + +#if PICOJSON_USE_RVALUE_REFERENCE +#define MOVESET(ctype, jtype, setter) \ + template <> inline void value::set(ctype && _val) { \ + clear(); \ + type_ = jtype##_type; \ + setter \ + } +MOVESET(std::string, string, u_.string_ = new std::string(std::move(_val));) +MOVESET(array, array, u_.array_ = new array(std::move(_val));) +MOVESET(object, object, u_.object_ = new object(std::move(_val));) +#undef MOVESET +#endif + +inline bool value::evaluate_as_boolean() const { + switch (type_) { + case null_type: + return false; + case boolean_type: + return u_.boolean_; + case number_type: + return u_.number_ != 0; +#ifdef PICOJSON_USE_INT64 + case int64_type: + return u_.int64_ != 0; +#endif + case string_type: + return !u_.string_->empty(); + default: + return true; + } +} + +inline const value &value::get(const size_t idx) const { + static value s_null; + PICOJSON_ASSERT(is()); + return idx < u_.array_->size() ? (*u_.array_)[idx] : s_null; +} + +inline value &value::get(const size_t idx) { + static value s_null; + PICOJSON_ASSERT(is()); + return idx < u_.array_->size() ? (*u_.array_)[idx] : s_null; +} + +inline const value &value::get(const std::string &key) const { + static value s_null; + PICOJSON_ASSERT(is()); + object::const_iterator i = u_.object_->find(key); + return i != u_.object_->end() ? i->second : s_null; +} + +inline value &value::get(const std::string &key) { + static value s_null; + PICOJSON_ASSERT(is()); + object::iterator i = u_.object_->find(key); + return i != u_.object_->end() ? i->second : s_null; +} + +inline bool value::contains(const size_t idx) const { + PICOJSON_ASSERT(is()); + return idx < u_.array_->size(); +} + +inline bool value::contains(const std::string &key) const { + PICOJSON_ASSERT(is()); + object::const_iterator i = u_.object_->find(key); + return i != u_.object_->end(); +} + +inline std::string value::to_str() const { + switch (type_) { + case null_type: + return "null"; + case boolean_type: + return u_.boolean_ ? "true" : "false"; +#ifdef PICOJSON_USE_INT64 + case int64_type: { + char buf[sizeof("-9223372036854775808")]; + SNPRINTF(buf, sizeof(buf), "%" PRId64, u_.int64_); + return buf; + } +#endif + case number_type: { + char buf[256]; + double tmp; + SNPRINTF( + buf, + sizeof(buf), + fabs(u_.number_) < (1ULL << 53) && modf(u_.number_, &tmp) == 0 ? "%.f" : "%.17g", + u_.number_ + ); +#if PICOJSON_USE_LOCALE + char *decimal_point = localeconv()->decimal_point; + if (strcmp(decimal_point, ".") != 0) { + size_t decimal_point_len = strlen(decimal_point); + for (char *p = buf; *p != '\0'; ++p) { + if (strncmp(p, decimal_point, decimal_point_len) == 0) { + return std::string(buf, p) + "." + (p + decimal_point_len); + } + } + } +#endif + return buf; + } + case string_type: + return *u_.string_; + case array_type: + return "array"; + case object_type: + return "object"; + default: + PICOJSON_ASSERT(0); +#ifdef _MSC_VER + __assume(0); +#endif + } + return std::string(); +} + +template void copy(const std::string &s, Iter oi) { + std::copy(s.begin(), s.end(), oi); +} + +template struct serialize_str_char { + Iter oi; + void operator()(char c) { + switch (c) { +#define MAP(val, sym) \ + case val: \ + copy(sym, oi); \ + break; + MAP('"', "\\\""); + MAP('\\', "\\\\"); + MAP('/', "\\/"); + MAP('\b', "\\b"); + MAP('\f', "\\f"); + MAP('\n', "\\n"); + MAP('\r', "\\r"); + MAP('\t', "\\t"); +#undef MAP + default: + if (static_cast(c) < 0x20 || c == 0x7f) { + char buf[7]; + SNPRINTF(buf, sizeof(buf), "\\u%04x", c & 0xff); + copy(buf, buf + 6, oi); + } else { + *oi++ = c; + } + break; + } + } +}; + +template void serialize_str(const std::string &s, Iter oi) { + *oi++ = '"'; + serialize_str_char process_char = {oi}; + std::for_each(s.begin(), s.end(), process_char); + *oi++ = '"'; +} + +template void value::serialize(Iter oi, bool prettify) const { + return _serialize(oi, prettify ? 0 : -1); +} + +inline std::string value::serialize(bool prettify) const { + return _serialize(prettify ? 0 : -1); +} + +template void value::_indent(Iter oi, int indent) { + *oi++ = '\n'; + for (int i = 0; i < indent * INDENT_WIDTH; ++i) { + *oi++ = ' '; + } +} + +template void value::_serialize(Iter oi, int indent) const { + switch (type_) { + case string_type: + serialize_str(*u_.string_, oi); + break; + case array_type: { + *oi++ = '['; + if (indent != -1) { + ++indent; + } + for (array::const_iterator i = u_.array_->begin(); i != u_.array_->end(); ++i) { + if (i != u_.array_->begin()) { + *oi++ = ','; + } + if (indent != -1) { + _indent(oi, indent); + } + i->_serialize(oi, indent); + } + if (indent != -1) { + --indent; + if (!u_.array_->empty()) { + _indent(oi, indent); + } + } + *oi++ = ']'; + break; + } + case object_type: { + *oi++ = '{'; + if (indent != -1) { + ++indent; + } + for (object::const_iterator i = u_.object_->begin(); i != u_.object_->end(); ++i) { + if (i != u_.object_->begin()) { + *oi++ = ','; + } + if (indent != -1) { + _indent(oi, indent); + } + serialize_str(i->first, oi); + *oi++ = ':'; + if (indent != -1) { + *oi++ = ' '; + } + i->second._serialize(oi, indent); + } + if (indent != -1) { + --indent; + if (!u_.object_->empty()) { + _indent(oi, indent); + } + } + *oi++ = '}'; + break; + } + default: + copy(to_str(), oi); + break; + } + if (indent == 0) { + *oi++ = '\n'; + } +} + +inline std::string value::_serialize(int indent) const { + std::string s; + _serialize(std::back_inserter(s), indent); + return s; +} + +template class input { +protected: + Iter cur_, end_; + bool consumed_; + int line_; + +public: + input(const Iter &first, const Iter &last) : cur_(first), end_(last), consumed_(false), line_(1) { + } + int getc() { + if (consumed_) { + if (*cur_ == '\n') { + ++line_; + } + ++cur_; + } + if (cur_ == end_) { + consumed_ = false; + return -1; + } + consumed_ = true; + return *cur_ & 0xff; + } + void ungetc() { + consumed_ = false; + } + Iter cur() const { + if (consumed_) { + input *self = const_cast *>(this); + self->consumed_ = false; + ++self->cur_; + } + return cur_; + } + int line() const { + return line_; + } + void skip_ws() { + while (1) { + int ch = getc(); + if (!(ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r')) { + ungetc(); + break; + } + } + } + bool expect(const int expected) { + skip_ws(); + if (getc() != expected) { + ungetc(); + return false; + } + return true; + } + bool match(const std::string &pattern) { + for (std::string::const_iterator pi(pattern.begin()); pi != pattern.end(); ++pi) { + if (getc() != *pi) { + ungetc(); + return false; + } + } + return true; + } +}; + +template inline int _parse_quadhex(input &in) { + int uni_ch = 0, hex; + for (int i = 0; i < 4; i++) { + if ((hex = in.getc()) == -1) { + return -1; + } + if ('0' <= hex && hex <= '9') { + hex -= '0'; + } else if ('A' <= hex && hex <= 'F') { + hex -= 'A' - 0xa; + } else if ('a' <= hex && hex <= 'f') { + hex -= 'a' - 0xa; + } else { + in.ungetc(); + return -1; + } + uni_ch = uni_ch * 16 + hex; + } + return uni_ch; +} + +template inline bool _parse_codepoint(String &out, input &in) { + int uni_ch; + if ((uni_ch = _parse_quadhex(in)) == -1) { + return false; + } + if (0xd800 <= uni_ch && uni_ch <= 0xdfff) { + if (0xdc00 <= uni_ch) { + // a second 16-bit of a surrogate pair appeared + return false; + } + // first 16-bit of surrogate pair, get the next one + if (in.getc() != '\\' || in.getc() != 'u') { + in.ungetc(); + return false; + } + int second = _parse_quadhex(in); + if (!(0xdc00 <= second && second <= 0xdfff)) { + return false; + } + uni_ch = ((uni_ch - 0xd800) << 10) | ((second - 0xdc00) & 0x3ff); + uni_ch += 0x10000; + } + if (uni_ch < 0x80) { + out.push_back(static_cast(uni_ch)); + } else { + if (uni_ch < 0x800) { + out.push_back(static_cast(0xc0 | (uni_ch >> 6))); + } else { + if (uni_ch < 0x10000) { + out.push_back(static_cast(0xe0 | (uni_ch >> 12))); + } else { + out.push_back(static_cast(0xf0 | (uni_ch >> 18))); + out.push_back(static_cast(0x80 | ((uni_ch >> 12) & 0x3f))); + } + out.push_back(static_cast(0x80 | ((uni_ch >> 6) & 0x3f))); + } + out.push_back(static_cast(0x80 | (uni_ch & 0x3f))); + } + return true; +} + +template inline bool _parse_string(String &out, input &in) { + while (1) { + int ch = in.getc(); + if (ch < ' ') { + in.ungetc(); + return false; + } else if (ch == '"') { + return true; + } else if (ch == '\\') { + if ((ch = in.getc()) == -1) { + return false; + } + switch (ch) { +#define MAP(sym, val) \ + case sym: \ + out.push_back(val); \ + break; + MAP('"', '\"'); + MAP('\\', '\\'); + MAP('/', '/'); + MAP('b', '\b'); + MAP('f', '\f'); + MAP('n', '\n'); + MAP('r', '\r'); + MAP('t', '\t'); +#undef MAP + case 'u': + if (!_parse_codepoint(out, in)) { + return false; + } + break; + default: + return false; + } + } else { + out.push_back(static_cast(ch)); + } + } + return false; +} + +template inline bool _parse_array(Context &ctx, input &in) { + if (!ctx.parse_array_start()) { + return false; + } + size_t idx = 0; + if (in.expect(']')) { + return ctx.parse_array_stop(idx); + } + do { + if (!ctx.parse_array_item(in, idx)) { + return false; + } + idx++; + } while (in.expect(',')); + return in.expect(']') && ctx.parse_array_stop(idx); +} + +template inline bool _parse_object(Context &ctx, input &in) { + if (!ctx.parse_object_start()) { + return false; + } + if (in.expect('}')) { + return true; + } + do { + std::string key; + if (!in.expect('"') || !_parse_string(key, in) || !in.expect(':')) { + return false; + } + if (!ctx.parse_object_item(in, key)) { + return false; + } + } while (in.expect(',')); + return in.expect('}'); +} + +template inline std::string _parse_number(input &in) { + std::string num_str; + while (1) { + int ch = in.getc(); + if (('0' <= ch && ch <= '9') || ch == '+' || ch == '-' || ch == 'e' || ch == 'E') { + num_str.push_back(static_cast(ch)); + } else if (ch == '.') { +#if PICOJSON_USE_LOCALE + num_str += localeconv()->decimal_point; +#else + num_str.push_back('.'); +#endif + } else { + in.ungetc(); + break; + } + } + return num_str; +} + +template inline bool _parse(Context &ctx, input &in) { + in.skip_ws(); + int ch = in.getc(); + switch (ch) { +#define IS(ch, text, op) \ + case ch: \ + if (in.match(text) && op) { \ + return true; \ + } else { \ + return false; \ + } + IS('n', "ull", ctx.set_null()); + IS('f', "alse", ctx.set_bool(false)); + IS('t', "rue", ctx.set_bool(true)); +#undef IS + case '"': + return ctx.parse_string(in); + case '[': + return _parse_array(ctx, in); + case '{': + return _parse_object(ctx, in); + default: + if (('0' <= ch && ch <= '9') || ch == '-') { + double f; + char *endp; + in.ungetc(); + std::string num_str(_parse_number(in)); + if (num_str.empty()) { + return false; + } +#ifdef PICOJSON_USE_INT64 + { + errno = 0; + intmax_t ival = strtoimax(num_str.c_str(), &endp, 10); + if (errno == 0 && std::numeric_limits::min() <= ival && + ival <= std::numeric_limits::max() && + endp == num_str.c_str() + num_str.size()) { + ctx.set_int64(ival); + return true; + } + } +#endif + f = strtod(num_str.c_str(), &endp); + if (endp == num_str.c_str() + num_str.size()) { + ctx.set_number(f); + return true; + } + return false; + } + break; + } + in.ungetc(); + return false; +} + +class deny_parse_context { +public: + bool set_null() { + return false; + } + bool set_bool(bool) { + return false; + } +#ifdef PICOJSON_USE_INT64 + bool set_int64(int64_t) { + return false; + } +#endif + bool set_number(double) { + return false; + } + template bool parse_string(input &) { + return false; + } + bool parse_array_start() { + return false; + } + template bool parse_array_item(input &, size_t) { + return false; + } + bool parse_array_stop(size_t) { + return false; + } + bool parse_object_start() { + return false; + } + template bool parse_object_item(input &, const std::string &) { + return false; + } +}; + +class default_parse_context { +protected: + value *out_; + +public: + default_parse_context(value *out) : out_(out) { + } + bool set_null() { + *out_ = value(); + return true; + } + bool set_bool(bool b) { + *out_ = value(b); + return true; + } +#ifdef PICOJSON_USE_INT64 + bool set_int64(int64_t i) { + *out_ = value(i); + return true; + } +#endif + bool set_number(double f) { + *out_ = value(f); + return true; + } + template bool parse_string(input &in) { + *out_ = value(string_type, false); + return _parse_string(out_->get(), in); + } + bool parse_array_start() { + *out_ = value(array_type, false); + return true; + } + template bool parse_array_item(input &in, size_t) { + array &a = out_->get(); + a.push_back(value()); + default_parse_context ctx(&a.back()); + return _parse(ctx, in); + } + bool parse_array_stop(size_t) { + return true; + } + bool parse_object_start() { + *out_ = value(object_type, false); + return true; + } + template bool parse_object_item(input &in, const std::string &key) { + object &o = out_->get(); + default_parse_context ctx(&o[key]); + return _parse(ctx, in); + } + +private: + default_parse_context(const default_parse_context &); + default_parse_context &operator=(const default_parse_context &); +}; + +class null_parse_context { +public: + struct dummy_str { + void push_back(int) { + } + }; + +public: + null_parse_context() { + } + bool set_null() { + return true; + } + bool set_bool(bool) { + return true; + } +#ifdef PICOJSON_USE_INT64 + bool set_int64(int64_t) { + return true; + } +#endif + bool set_number(double) { + return true; + } + template bool parse_string(input &in) { + dummy_str s; + return _parse_string(s, in); + } + bool parse_array_start() { + return true; + } + template bool parse_array_item(input &in, size_t) { + return _parse(*this, in); + } + bool parse_array_stop(size_t) { + return true; + } + bool parse_object_start() { + return true; + } + template bool parse_object_item(input &in, const std::string &) { + return _parse(*this, in); + } + +private: + null_parse_context(const null_parse_context &); + null_parse_context &operator=(const null_parse_context &); +}; + +// obsolete, use the version below +template inline std::string +parse(value &out, Iter &pos, const Iter &last) { + std::string err; + pos = parse(out, pos, last, &err); + return err; +} + +template inline Iter +_parse(Context &ctx, const Iter &first, const Iter &last, std::string *err) { + input in(first, last); + if (!_parse(ctx, in) && err != NULL) { + char buf[64]; + SNPRINTF(buf, sizeof(buf), "syntax error at line %d near: ", in.line()); + *err = buf; + while (1) { + int ch = in.getc(); + if (ch == -1 || ch == '\n') { + break; + } else if (ch >= ' ') { + err->push_back(static_cast(ch)); + } + } + } + return in.cur(); +} + +template inline Iter parse(value &out, const Iter &first, const Iter &last, std::string *err) { + default_parse_context ctx(&out); + return _parse(ctx, first, last, err); +} + +inline std::string parse(value &out, const std::string &s) { + std::string err; + parse(out, s.begin(), s.end(), &err); + return err; +} + +inline std::string parse(value &out, std::istream &is) { + std::string err; + parse(out, std::istreambuf_iterator(is.rdbuf()), std::istreambuf_iterator(), &err); + return err; +} + +template struct last_error_t { static std::string s; }; +template std::string last_error_t::s; + +inline void set_last_error(const std::string &s) { + last_error_t::s = s; +} + +inline const std::string &get_last_error() { + return last_error_t::s; +} + +inline bool operator==(const value &x, const value &y) { + if (x.is()) + return y.is(); +#define PICOJSON_CMP(type) \ + if (x.is()) \ + return y.is() && x.get() == y.get() + PICOJSON_CMP(bool); + PICOJSON_CMP(double); + PICOJSON_CMP(std::string); + PICOJSON_CMP(array); + PICOJSON_CMP(object); +#undef PICOJSON_CMP + PICOJSON_ASSERT(0); +#ifdef _MSC_VER + __assume(0); +#endif + return false; +} + +inline bool operator!=(const value &x, const value &y) { + return !(x == y); +} +} + +#if !PICOJSON_USE_RVALUE_REFERENCE +namespace std { +template <> inline void swap(picojson::value &x, picojson::value &y) { + x.swap(y); +} +} +#endif + +inline std::istream &operator>>(std::istream &is, picojson::value &x) { + picojson::set_last_error(std::string()); + const std::string err(picojson::parse(x, is)); + if (!err.empty()) { + picojson::set_last_error(err); + is.setstate(std::ios::failbit); + } + return is; +} + +inline std::ostream &operator<<(std::ostream &os, const picojson::value &x) { + x.serialize(std::ostream_iterator(os)); + return os; +} +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +// LCOV_EXCL_STOP Reason: External library header. +#endif diff --git a/components/security_apps/waap/include/reputation_features_events.h b/components/security_apps/waap/include/reputation_features_events.h new file mode 100755 index 0000000..13eac5d --- /dev/null +++ b/components/security_apps/waap/include/reputation_features_events.h @@ -0,0 +1,100 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __REPUTATION_FEATURES_EVENTS_H__ +#define __REPUTATION_FEATURES_EVENTS_H__ + +#include "event.h" +#include "http_inspection_events.h" + +using ResponseCode = uint16_t; +class ReputationFeaturesEntry; + +class TearDownEvent : public Event +{ +public: + TearDownEvent(ReputationFeaturesEntry *pEntry) : m_pEntry(pEntry) + { + + } + + ReputationFeaturesEntry * + getEntry() const + { + return m_pEntry; + } + +private: + ReputationFeaturesEntry *m_pEntry; +}; + +class IdentifiersEvent : public Event +{ +public: + IdentifiersEvent(const std::string &sourceId, const std::string &assetId) + : + m_sourceId(sourceId), + m_assetId(assetId) + { } + + const std::string & + getSourceId() const + { + return m_sourceId; + } + + const std::string & + getAssetId() const + { + return m_assetId; + } + +private: + const std::string m_sourceId; + const std::string m_assetId; +}; + +class DetectionEvent : public Event +{ +public: + DetectionEvent(const std::string &location, const std::vector &indicators) + : + m_location(location), + m_indicators(indicators) + { } + + // LCOV_EXCL_START - sync functions, can only be tested once the sync module exists + + DetectionEvent() {} + template + void + serialize(T &ar) + { + ar(m_location, m_indicators); + } + + // LCOV_EXCL_STOP + + + const std::string& + getLocation() const + { + return m_location; + } + +private: + std::string m_location; + std::vector m_indicators; +}; + +#endif // __REPUTATION_FEATURES_EVENTS_H__ diff --git a/components/security_apps/waap/reputation/CMakeLists.txt b/components/security_apps/waap/reputation/CMakeLists.txt new file mode 100755 index 0000000..4ae0c70 --- /dev/null +++ b/components/security_apps/waap/reputation/CMakeLists.txt @@ -0,0 +1,3 @@ +include_directories(../include) + +add_library(reputation reputation_features_agg.cc) diff --git a/components/security_apps/waap/reputation/reputation_features_agg.cc b/components/security_apps/waap/reputation/reputation_features_agg.cc new file mode 100755 index 0000000..daed4af --- /dev/null +++ b/components/security_apps/waap/reputation/reputation_features_agg.cc @@ -0,0 +1,379 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "reputation_features_agg.h" + +#include +#include + +#include "i_mainloop.h" +#include "i_time_get.h" +#include "i_serialize.h" +#include "../waap_clib/Waf2Util.h" +#include "customized_cereal_map.h" + +USE_DEBUG_FLAG(D_WAAP_REPUTATION); + +using namespace std; + +template +class DefaultListener : public Listener +{ +public: + DefaultListener(EventVerdict defaultVerdict = EventVerdict(ngx_http_cp_verdict_e::TRAFFIC_VERDICT_IRRELEVANT)) + : + m_default_verdict(defaultVerdict) + {} + + EventVerdict + respond(const EventType &event) + { + this->upon(event); + return m_default_verdict; + } + +private: + EventVerdict m_default_verdict; +}; + +class ReputationFeaturesAgg::Impl + : + public Listener, + public Listener, + public Listener, + public DefaultListener, + public DefaultListener, + public DefaultListener +{ +public: + Impl() + : + DefaultListener(EventVerdict(ngx_http_cp_verdict_e::TRAFFIC_VERDICT_IRRELEVANT)), + m_agg_entries() + { + } + + void reportReputationFeatures(); + + void + init() + { + registerListener(); + I_MainLoop* i_mainLoop = Singleton::Consume::by(); + I_MainLoop::Routine routine = [this]() { reportReputationFeatures(); }; + i_mainLoop->addOneTimeRoutine(I_MainLoop::RoutineType::Offline, routine, "report reputation features"); + } + + void + fini() + { + unregisterListener(); + } + + void upon(const IdentifiersEvent &event) override; + void upon(const DetectionEvent &event) override; + void upon(const TearDownEvent &event) override; + + void upon(const NewHttpTransactionEvent &event) override; + void upon(const HttpRequestHeaderEvent &event) override; + void upon(const ResponseCodeEvent &event) override; + + string getListenerName() const { return "reputationFeaturesAgg"; } + +private: + map> m_agg_entries; +}; + +void +SourceReputationFeaturesAgg::addEntry(const ReputationFeaturesEntry &entry) +{ + m_requests++; + if (m_wall_time_hour == 0) { + chrono::hours wallTimeHour = chrono::duration_cast(entry.getTime()); + m_wall_time_hour = wallTimeHour.count(); + } + addMethod(entry.getMethod()); + addResponseCode(entry.getResponseCode()); + addDetections(entry.getDetections()); + addUri(entry.getUri()); + addHeaders(entry); + dbgTrace(D_WAAP_REPUTATION) << "aggregated request from: " << m_wall_time_hour % 24 << + " count: " << m_requests << " for source: " << entry.getSourceId() << " on asset: " << entry.getAssetId(); +} + +string +SourceReputationFeaturesAgg::extractCookieKey(const string &cookie_seg) +{ + size_t pos = cookie_seg.find("="); + return cookie_seg.substr(0, pos); +} + +void +SourceReputationFeaturesAgg::addHeaders(const ReputationFeaturesEntry &entry) +{ + const auto &headers = entry.getHeaders(); + + const auto &user_agent_header_itr = headers.find("user-agent"); + if (user_agent_header_itr != headers.cend()) { + m_unique_user_agent.insert(user_agent_header_itr->second); + } + + const auto &referer_header_itr = headers.find("referer"); + if (referer_header_itr == headers.cend()) { + m_referer_count.na++; + } else { + const string &uri = referer_header_itr->second; + size_t scheme_end_pos = uri.find("://") + 3; + size_t authority_end_pos = uri.find("/", scheme_end_pos + 1); + string authority = uri.substr(scheme_end_pos + 1, authority_end_pos); + if (authority.find(entry.getHost()) != string::npos) { + m_referer_count.external_host++; + } else { + m_referer_count.internal_host++; + } + } + + const auto &cookie_header_itr = headers.find("cookie"); + if (cookie_header_itr == headers.cend()) { + return; + } + const string &cookie = cookie_header_itr->second; + const vector &cookie_split = split(cookie, ';'); + for (const auto& cookie_seg : cookie_split) + { + const string &key = extractCookieKey(cookie_seg); + m_unique_cookies.insert(key); + } +} + +void +SourceReputationFeaturesAgg::addDetections(const vector &detections) +{ + for (const auto &detect : detections) { + m_hit_count_per_location[detect.getLocation()]++; + } +} + +void +SourceReputationFeaturesAgg::addUri(const string &uri) +{ + size_t pos = uri.find_first_of("?;"); + if (pos == string::npos) { + m_unique_uris.insert(uri); + return; + } + string uri_path = uri.substr(0, pos); + m_unique_uris.insert(uri_path); +} + +void +SourceReputationFeaturesAgg::addMethod(const string &method) +{ + m_method_count[method]++; +} + +void +SourceReputationFeaturesAgg::addResponseCode(const ResponseCode &responseCode) +{ + if (responseCode >= 500) { + m_response_code_count.response_5xx++; + } else if (responseCode >= 400) { + m_response_code_count.response_4xx++; + } else if (responseCode >= 300) { + m_response_code_count.response_3xx++; + } else if (responseCode >= 200) { + m_response_code_count.response_2xx++; + } else if (responseCode >= 100) { + m_response_code_count.response_1xx++; + } else { + m_response_code_count.response_na++; + } +} + +class ReputationFeaturesReport : public RestGetFile +{ + using SourceAggPerAsset = map>; +public: + ReputationFeaturesReport(SourceAggPerAsset &entries) : + reputation_entries(entries) + { + } + +private: + C2S_PARAM(SourceAggPerAsset, reputation_entries); +}; + +void +ReputationFeaturesAgg::Impl::upon(const IdentifiersEvent &event) +{ + I_Table *pTable = Singleton::Consume().by(); + if (!pTable->hasState()) + { + dbgWarning(D_WAAP_REPUTATION) << "reputation entry state is missing"; + return; + } + ReputationFeaturesEntry &entry = pTable->getState(); + + entry.m_assetId = event.getAssetId(); + entry.m_sourceId = event.getSourceId(); + dbgTrace(D_WAAP_REPUTATION) << "assign identifiers to reputation entry. src: " << event.getSourceId() << + ", asset: " << event.getAssetId(); +} + +void +ReputationFeaturesAgg::Impl::upon(const DetectionEvent &event) +{ + I_Table *pTable = Singleton::Consume().by(); + if (!pTable->hasState()) + { + dbgWarning(D_WAAP_REPUTATION) << "reputation entry state is missing"; + return; + } + ReputationFeaturesEntry &entry = pTable->getState(); + + entry.m_detections.push_back(event); + dbgTrace(D_WAAP_REPUTATION) << "add a detection event. detection location: " << event.getLocation(); +} + +void +ReputationFeaturesAgg::Impl::upon(const TearDownEvent &event) +{ + dbgDebug(D_WAAP_REPUTATION) << "aggregating reputation entry data"; + ReputationFeaturesEntry *entry = event.getEntry(); + + SourceReputationFeaturesAgg &srvAgg = m_agg_entries[entry->getAssetId()][entry->getSourceId()]; + srvAgg.addEntry(*entry); +} + +void +ReputationFeaturesAgg::Impl::upon(const NewHttpTransactionEvent &event) +{ + dbgDebug(D_WAAP_REPUTATION) << "new transaction"; + I_Table *pTable = Singleton::Consume().by(); + if (pTable->hasState()) + { + dbgDebug(D_WAAP_REPUTATION) << "reputation entry state already exists"; + return; + } + if (!pTable->createState()) + { + dbgError(D_WAAP_REPUTATION) << "failed to create reputation entry state"; + return; + } + + if (!pTable->hasState()) + { + dbgWarning(D_WAAP_REPUTATION) << "reputation entry state is missing"; + return; + } + ReputationFeaturesEntry& entry = pTable->getState(); + + I_TimeGet *timeGet = Singleton::Consume::by(); + auto currentTime = timeGet->getWalltime(); + entry.m_wallTime = currentTime; + entry.m_method = event.getHttpMethod(); + entry.m_uri = event.getURI(); + entry.m_host = event.getDestinationHost(); + dbgTrace(D_WAAP_REPUTATION) << "created a new reputation entry state"; +} + +void +ReputationFeaturesAgg::Impl::upon(const HttpRequestHeaderEvent &event) +{ + I_Table *pTable = Singleton::Consume().by(); + if (!pTable->hasState()) + { + dbgWarning(D_WAAP_REPUTATION) << "reputation entry state is missing"; + return; + } + ReputationFeaturesEntry &entry = pTable->getState(); + std::string key = event.getKey(); + boost::algorithm::to_lower(key); + entry.m_headers[key] = event.getValue(); + dbgTrace(D_WAAP_REPUTATION) << "add header: " << string(event.getKey()); +} + +void +ReputationFeaturesAgg::Impl::upon(const ResponseCodeEvent &event) +{ + I_Table *pTable = Singleton::Consume().by(); + if (!pTable->hasState()) + { + dbgWarning(D_WAAP_REPUTATION) << "reputation entry state is missing"; + return; + } + ReputationFeaturesEntry &entry = pTable->getState(); + entry.m_responseCode = event.getResponseCode(); + dbgTrace(D_WAAP_REPUTATION) << "add response code: " << entry.getResponseCode(); +} + +void +ReputationFeaturesAgg::Impl::reportReputationFeatures() +{ + I_TimeGet *timeGet = Singleton::Consume::by(); + I_Messaging *msg = Singleton::Consume::by(); + I_AgentDetails *agentDetails = Singleton::Consume::by(); + I_MainLoop *i_mainLoop = Singleton::Consume::by(); + + string tenantId = agentDetails->getTenantId(); + string agentId = agentDetails->getAgentId(); + if (Singleton::exists()) + { + I_InstanceAwareness *instance = Singleton::Consume::by(); + Maybe uniqueId = instance->getUniqueID(); + if (uniqueId.ok()) + { + agentId += "/" + uniqueId.unpack(); + } + } + while (true) + { + auto currentTime = timeGet->getWalltime(); + chrono::microseconds remainingTime = chrono::hours(1) - (currentTime % chrono::hours(1)); + i_mainLoop->yield(remainingTime); + + dbgDebug(D_WAAP_REPUTATION) << "sending features report"; + + ReputationFeaturesReport report(m_agg_entries); + m_agg_entries.clear(); + string uri = "/storage/waap/" + tenantId + "/reputation/" + + to_string(chrono::duration_cast(currentTime).count()) + + "/" + agentId + "/data.data"; + msg->sendObjectWithPersistence(report, + I_Messaging::Method::PUT, + uri, + "", + true, + MessageTypeTag::WAAP_LEARNING); + } +} + +ReputationFeaturesAgg::ReputationFeaturesAgg() : Component("ReputationComp"), pimpl(make_unique()) +{ +} + +ReputationFeaturesAgg::~ReputationFeaturesAgg() +{ +} + +void +ReputationFeaturesAgg::init() +{ + pimpl->init(); +} + +void +ReputationFeaturesAgg::fini() +{ + pimpl->fini(); +} diff --git a/components/security_apps/waap/reputation/reputation_features_agg.h b/components/security_apps/waap/reputation/reputation_features_agg.h new file mode 100755 index 0000000..c774b6d --- /dev/null +++ b/components/security_apps/waap/reputation/reputation_features_agg.h @@ -0,0 +1,221 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __REPUTATION_FEATURES_AGG_H__ +#define __REPUTATION_FEATURES_AGG_H__ + +#include + +#include "reputation_features_events.h" +#include "component.h" +#include "table_opaque.h" +#include "i_table.h" +#include "i_agent_details.h" +#include "i_instance_awareness.h" + +class ReputationFeaturesEntry : public TableOpaqueSerialize +{ +public: + ReputationFeaturesEntry() + : + TableOpaqueSerialize(this), + m_wallTime(), + m_sourceId(), + m_assetId(), + m_method(), + m_uri(), + m_responseCode(), + m_detections() + { + } + + ~ReputationFeaturesEntry() + { + TearDownEvent(this).notify(); + } + + // LCOV_EXCL_START - sync functions, can only be tested once the sync module exists + + template + void serialize(T &ar, uint) + { + ar(m_wallTime, + m_sourceId, + m_assetId, + m_method, + m_uri, + m_host, + m_responseCode, + m_detections, + m_headers); + } + + static std::string name() { return "ReputationFeatures"; } + static std::unique_ptr prototype() { return std::make_unique(); } + static uint currVer() { return 0; } + static uint minVer() { return 0; } + + // LCOV_EXCL_STOP + + const std::chrono::microseconds & getTime() const { return m_wallTime; } + const std::string & getSourceId() const { return m_sourceId; } + const std::string & getAssetId() const { return m_assetId; } + const std::string & getMethod() const { return m_method; } + const std::string & getUri() const { return m_uri; } + const std::string & getHost() const { return m_host; } + const ResponseCode & getResponseCode() const { return m_responseCode; } + const std::vector & getDetections() const { return m_detections; } + const std::map & getHeaders() const { return m_headers; } + + friend class ReputationFeaturesAgg; + +private: + std::chrono::microseconds m_wallTime; + std::string m_sourceId; + std::string m_assetId; + std::string m_method; + std::string m_uri; + std::string m_host; + ResponseCode m_responseCode; + std::vector m_detections; + std::map m_headers; +}; + +typedef struct ResponseCodeCounters +{ + size_t response_na; + size_t response_1xx; + size_t response_2xx; + size_t response_3xx; + size_t response_4xx; + size_t response_5xx; + + ResponseCodeCounters() + : + response_na(0), + response_1xx(0), + response_2xx(0), + response_3xx(0), + response_4xx(0), + response_5xx(0) + { + } + + template + void serialize(Archive &ar) + { + ar( + cereal::make_nvp("response_NA", response_na), + cereal::make_nvp("response_1xx", response_1xx), + cereal::make_nvp("response_2xx", response_2xx), + cereal::make_nvp("response_3xx", response_3xx), + cereal::make_nvp("response_4xx", response_4xx), + cereal::make_nvp("response_5xx", response_5xx) + ); + } +} ResponseCodeCounters; + +typedef struct RefererCounters +{ + size_t na; + size_t internal_host; + size_t external_host; + + RefererCounters() + : + na(0), + internal_host(0), + external_host(0) + { + } + + template + void + serialize(Archive &ar) + { + ar( + cereal::make_nvp("referer_NA", na), + cereal::make_nvp("internal_host", internal_host), + cereal::make_nvp("external_host", external_host) + ); + } +} RefererCounters; + +class SourceReputationFeaturesAgg +{ +public: + SourceReputationFeaturesAgg() : m_wall_time_hour(0), m_requests(0) + { + } + + template + void + serialize(Archive &ar) + { + ar( + cereal::make_nvp("wall_time_hour", m_wall_time_hour), + cereal::make_nvp("requests_count", m_requests), + cereal::make_nvp("hits_per_location", m_hit_count_per_location), + cereal::make_nvp("method_counters", m_method_count), + cereal::make_nvp("response_code_counters", m_response_code_count), + cereal::make_nvp("referer_counters", m_referer_count), + cereal::make_nvp("uris", m_unique_uris), + cereal::make_nvp("user_agents", m_unique_user_agent), + cereal::make_nvp("cookies", m_unique_cookies) + ); + } + + void addEntry(const ReputationFeaturesEntry &entry); + +private: + std::string extractCookieKey(const std::string &cookie_seg); + void addHeaders(const ReputationFeaturesEntry &entry); + void addDetections(const std::vector &detections); + void addUri(const std::string &uri); + void addMethod(const std::string &method); + void addResponseCode(const ResponseCode &responseCode); + + size_t m_wall_time_hour; + size_t m_requests; + std::map m_hit_count_per_location; + std::map m_method_count; + ResponseCodeCounters m_response_code_count; + RefererCounters m_referer_count; + std::set m_unique_uris; + std::set m_unique_user_agent; + std::set m_unique_cookies; +}; + +class ReputationFeaturesAgg + : + public Component, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume, + Singleton::Consume +{ +public: + ReputationFeaturesAgg(); + ~ReputationFeaturesAgg(); + + void init() override; + void fini() override; + +private: + class Impl; + std::unique_ptr pimpl; +}; + +#endif // __REPUTATION_FEATURES_AGG_H__ diff --git a/components/security_apps/waap/resources/1.data b/components/security_apps/waap/resources/1.data new file mode 100755 index 0000000..dd4fa9d --- /dev/null +++ b/components/security_apps/waap/resources/1.data @@ -0,0 +1,26379 @@ +{ + "allowed_text_re": "^([\\w\\s+-]|%20|%09)*$", + "attack_types_map": { + "code_execution_fast_reg_0": [ + "Remote Code Execution" + ], + "code_execution_fast_reg_1": [ + "Remote Code Execution" + ], + "code_execution_fast_reg_2": [ + "Remote Code Execution" + ], + "code_execution_fast_reg_3": [ + "Remote Code Execution" + ], + "comment_ev_fast_reg_0": [ + "Evasion Techniques" + ], + "comment_newline_bypass_regex_0": [ + "SQL Injection" + ], + "detect_evasion_high_acuracy_0": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_1": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_10": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_11": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_12": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_13": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_14": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_15": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_16": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_17": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_18": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_19": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_2": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_20": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_21": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_22": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_23": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_3": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_4": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_5": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_6": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_7": [ + "Evasion Techniques" + ], + "detect_evasion_high_acuracy_9": [ + "Evasion Techniques" + ], + "encoding_chars108a": [ + "Evasion Techniques" + ], + "encoding_chars_2": [ + "Evasion Techniques" + ], + "encoding_chars_3": [ + "Evasion Techniques" + ], + "encoding_chars_4": [ + "Evasion Techniques" + ], + "encoding_charsfb50": [ + "Evasion Techniques" + ], + "evasion": [ + "Evasion Techniques" + ], + "evasion_high_acuracy557d": [ + "Evasion Techniques" + ], + "evasion_high_acuracy90a4": [ + "Evasion Techniques" + ], + "evasion_high_acuracyfb73": [ + "Evasion Techniques" + ], + "evasion_wildcard_regex_0": [ + "Evasion Techniques" + ], + "evasion_wildcard_regex_1": [ + "Evasion Techniques" + ], + "evasion_wildcard_regex_2": [ + "Evasion Techniques" + ], + "evasion_wildcard_regex_3": [ + "Evasion Techniques" + ], + "evasion_wildcard_regex_4": [ + "Evasion Techniques" + ], + "evasion_wildcard_regex_5": [ + "Evasion Techniques" + ], + "evasion_wildcard_regex_6": [ + "Evasion Techniques" + ], + "evasion_wildcard_regex_7": [ + "Evasion Techniques" + ], + "evasion_wildcard_regex_8": [ + "Evasion Techniques" + ], + "evasion_wildcard_regex_9": [ + "Evasion Techniques" + ], + "fn_name_pass_regex_0": [ + "Remote Code Execution" + ], + "general_injection_regex_0": [ + "SQL Injection" + ], + "generic_keywords": [ + "General" + ], + "generic_keywordsce36": [ + "General" + ], + "hi_acur_fast_reg_evasion48ff": [ + "Evasion Techniques" + ], + "hi_acur_fast_reg_evasion9228": [ + "Evasion Techniques" + ], + "hi_acur_fast_reg_evasion_0": [ + "Evasion Techniques" + ], + "hi_acur_fast_reg_evasion_1": [ + "Evasion Techniques" + ], + "hi_acur_fast_reg_evasion_2": [ + "Evasion Techniques" + ], + "hi_acur_fast_reg_evasion_3": [ + "Evasion Techniques" + ], + "high_acuracy1246": [ + "General" + ], + "high_acuracy2c17": [ + "General" + ], + "high_acuracy34a1": [ + "General" + ], + "high_acuracy3afe": [ + "General" + ], + "high_acuracy496d": [ + "General" + ], + "high_acuracy65ab": [ + "General" + ], + "high_acuracy6a8c": [ + "General" + ], + "high_acuracy_0": [ + "General" + ], + "high_acuracy_1": [ + "General" + ], + "high_acuracy_10": [ + "General" + ], + "high_acuracy_12": [ + "General" + ], + "high_acuracy_13": [ + "General" + ], + "high_acuracy_15": [ + "General" + ], + "high_acuracy_16": [ + "General" + ], + "high_acuracy_17": [ + "General" + ], + "high_acuracy_18": [ + "General" + ], + "high_acuracy_2": [ + "General" + ], + "high_acuracy_3": [ + "General" + ], + "high_acuracy_4": [ + "General" + ], + "high_acuracy_5": [ + "General" + ], + "high_acuracy_7": [ + "General" + ], + "high_acuracy_code_exec035f": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec038e": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec043f": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec048d": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec063e": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec0668": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec09c1": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec0b2c": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec0b93": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec11b0": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec12a1": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec1457": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec1f6f": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec20f9": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec2880": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec2c66": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec2e22": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec2ed8": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec2f16": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec30f1": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec315e": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec323a": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec32bf": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec3afe": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec3bfa": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec3c96": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec4543": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec461a": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec4753": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec485a": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec5015": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec50a4": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec50f8": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec560f": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec562c": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec56df": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec5729": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec575a": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec59c8": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec5aa6": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec5ff3": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec6274": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec6338": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec6d5b": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec6db9": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec7065": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec778e": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec78ab": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec78da": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec79c4": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec7b77": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec7c99": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec7e9f": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec8386": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec8ef9": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec91f3": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec99db": [ + "Remote Code Execution" + ], + "high_acuracy_code_exec9e8c": [ + "Remote Code Execution" + ], + "high_acuracy_code_execa46c": [ + "Remote Code Execution" + ], + "high_acuracy_code_execa58d": [ + "Remote Code Execution" + ], + "high_acuracy_code_execa7da": [ + "Remote Code Execution" + ], + "high_acuracy_code_execa88a": [ + "Remote Code Execution" + ], + "high_acuracy_code_execa9f6": [ + "Remote Code Execution" + ], + "high_acuracy_code_execaa33": [ + "Remote Code Execution" + ], + "high_acuracy_code_execaf62": [ + "Remote Code Execution" + ], + "high_acuracy_code_execb269": [ + "Remote Code Execution" + ], + "high_acuracy_code_execbb03": [ + "Remote Code Execution" + ], + "high_acuracy_code_execbb0c": [ + "Remote Code Execution" + ], + "high_acuracy_code_execbcbe": [ + "Remote Code Execution" + ], + "high_acuracy_code_execbcd6": [ + "Remote Code Execution" + ], + "high_acuracy_code_execc123": [ + "Remote Code Execution" + ], + "high_acuracy_code_execc50f": [ + "Remote Code Execution" + ], + "high_acuracy_code_execca3c": [ + "Remote Code Execution" + ], + "high_acuracy_code_execce3b": [ + "Remote Code Execution" + ], + "high_acuracy_code_execd061": [ + "Remote Code Execution" + ], + "high_acuracy_code_execd217": [ + "Remote Code Execution" + ], + "high_acuracy_code_execd415": [ + "Remote Code Execution" + ], + "high_acuracy_code_execdaf9": [ + "Remote Code Execution" + ], + "high_acuracy_code_exece0bc": [ + "Remote Code Execution" + ], + "high_acuracy_code_execea23": [ + "Remote Code Execution" + ], + "high_acuracy_code_execf01b": [ + "Remote Code Execution" + ], + "high_acuracy_code_execf08e": [ + "Remote Code Execution" + ], + "high_acuracy_code_execf565": [ + "Remote Code Execution" + ], + "high_acuracy_code_execf7a4": [ + "Remote Code Execution" + ], + "high_acuracy_code_execf877": [ + "Remote Code Execution" + ], + "high_acuracy_code_execfccf": [ + "Remote Code Execution" + ], + "high_acuracy_code_execution_1": [ + "Remote Code Execution" + ], + "high_acuracy_code_execution_12": [ + "Remote Code Execution" + ], + "high_acuracy_code_execution_16": [ + "Remote Code Execution" + ], + "high_acuracy_code_execution_18": [ + "Remote Code Execution" + ], + "high_acuracy_code_execution_20": [ + "Remote Code Execution" + ], + "high_acuracy_code_execution_25": [ + "Remote Code Execution" + ], + "high_acuracy_code_execution_26": [ + "Remote Code Execution" + ], + "high_acuracy_code_execution_4": [ + "Remote Code Execution" + ], + "high_acuracy_code_execution_46": [ + "Remote Code Execution" + ], + "high_acuracy_code_execution_49": [ + "Remote Code Execution" + ], + "high_acuracy_code_execution_51": [ + "Remote Code Execution" + ], + "high_acuracy_code_execution_53": [ + "Remote Code Execution" + ], + "high_acuracy_code_execution_54": [ + "Remote Code Execution" + ], + "high_acuracy_code_execution_59": [ + "Remote Code Execution" + ], + "high_acuracy_code_execution_6": [ + "Remote Code Execution" + ], + "high_acuracy_code_execution_66": [ + "Remote Code Execution" + ], + "high_acuracy_code_execution_9": [ + "Remote Code Execution" + ], + "high_acuracy_fast_reg_xss0315": [ + "Cross Site Scripting" + ], + "high_acuracy_fast_reg_xss2841": [ + "Cross Site Scripting" + ], + "high_acuracy_fast_reg_xss3420": [ + "Cross Site Scripting" + ], + "high_acuracy_fast_reg_xss52a6": [ + "Cross Site Scripting" + ], + "high_acuracy_fast_reg_xss5e81": [ + "Cross Site Scripting" + ], + "high_acuracy_fast_reg_xss_0": [ + "Cross Site Scripting" + ], + "high_acuracy_fast_reg_xssd0fb": [ + "Cross Site Scripting" + ], + "high_acuracy_fast_reg_xxe_0": [ + "XML External Entity" + ], + "high_acuracya652": [ + "General" + ], + "high_acuracyb269": [ + "General" + ], + "high_acuracyc37d": [ + "General" + ], + "ldap_injection": [ + "LDAP Injection" + ], + "ldap_injection_0": [ + "LDAP Injection" + ], + "ldap_injection_1": [ + "LDAP Injection" + ], + "ldap_injection_regex_0": [ + "LDAP Injection" + ], + "ldap_injection_regex_1": [ + "LDAP Injection" + ], + "ldap_injection_regex_2": [ + "LDAP Injection" + ], + "ldap_injection_regex_3": [ + "LDAP Injection" + ], + "ldap_injection_regex_4": [ + "LDAP Injection" + ], + "ldap_injection_regex_5": [ + "LDAP Injection" + ], + "ldap_injection_regex_6": [ + "LDAP Injection" + ], + "ldap_injectionf0b9": [ + "LDAP Injection" + ], + "longtext": [ + "General" + ], + "mail_attacks_0": [ + "Remote Code Execution" + ], + "mail_attacks_1": [ + "Remote Code Execution" + ], + "mail_attacks_2": [ + "Remote Code Execution" + ], + "mail_attacks_3": [ + "Remote Code Execution" + ], + "mail_attacks_4": [ + "Remote Code Execution" + ], + "medium_acuracy": [ + "General" + ], + "medium_acuracy_0": [ + "General" + ], + "medium_acuracy_1": [ + "General" + ], + "no_sql_fast_reg0397": [ + "SQL Injection" + ], + "no_sql_fast_reg7acf": [ + "SQL Injection" + ], + "no_sql_fast_rega323": [ + "SQL Injection" + ], + "no_sql_fast_regb128": [ + "SQL Injection" + ], + "os_cmd_ev_fast_reg642a": [ + "Remote Code Execution", + "Evasion Techniques" + ], + "os_cmd_exec_medium_acuracy": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy065c": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy0f71": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy30f1": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy_0": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy_1": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy_10": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy_11": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy_12": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy_13": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy_14": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy_15": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy_16": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy_17": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy_18": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy_19": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy_2": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy_20": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy_21": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy_22": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy_4": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy_6": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy_8": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracy_9": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracyb605": [ + "Remote Code Execution" + ], + "os_cmd_exec_medium_acuracyd71b": [ + "Remote Code Execution" + ], + "os_cmd_exec_susp_dir_patts": [ + "Remote Code Execution" + ], + "os_cmd_exec_susp_dir_patts_0": [ + "Remote Code Execution" + ], + "os_cmd_exec_susp_dir_patts_1": [ + "Remote Code Execution" + ], + "os_cmd_exec_susp_dir_patts_2": [ + "Remote Code Execution" + ], + "os_cmd_high_acuracy_fast_reg0e76": [ + "Remote Code Execution" + ], + "os_cmd_high_acuracy_fast_reg32ff": [ + "Remote Code Execution" + ], + "os_cmd_high_acuracy_fast_reg4c37": [ + "Remote Code Execution" + ], + "os_cmd_high_acuracy_fast_reg7f90": [ + "Remote Code Execution" + ], + "os_cmd_high_acuracy_fast_reg_0": [ + "Remote Code Execution" + ], + "os_cmd_high_acuracy_fast_reg_10": [ + "Remote Code Execution" + ], + "os_cmd_high_acuracy_fast_reg_2": [ + "Remote Code Execution" + ], + "os_cmd_high_acuracy_fast_reg_4": [ + "Remote Code Execution" + ], + "os_cmd_high_acuracy_fast_reg_8": [ + "Remote Code Execution" + ], + "os_cmd_high_acuracy_fast_reg_9": [ + "Remote Code Execution" + ], + "os_cmd_high_acuracy_fast_regd6e8": [ + "Remote Code Execution" + ], + "os_cmd_sep_medium_acuracy": [ + "Remote Code Execution" + ], + "os_cmd_sep_medium_acuracy_0": [ + "Remote Code Execution" + ], + "os_cmd_sep_medium_acuracy_1": [ + "Remote Code Execution" + ], + "os_cmd_sep_medium_acuracy_2": [ + "Remote Code Execution" + ], + "os_cmd_sep_medium_acuracy_3": [ + "Remote Code Execution" + ], + "os_cmd_sep_medium_acuracy_4": [ + "Remote Code Execution" + ], + "os_cmd_sep_medium_acuracy_5": [ + "Remote Code Execution" + ], + "os_cmd_sep_medium_acuracy_6": [ + "Remote Code Execution" + ], + "os_cmd_sep_medium_acuracy_7": [ + "Remote Code Execution" + ], + "os_commands": [ + "Remote Code Execution" + ], + "os_commands272f": [ + "Remote Code Execution" + ], + "os_commands2cf4": [ + "Remote Code Execution" + ], + "os_commands2ea6": [ + "Remote Code Execution" + ], + "os_commands8744": [ + "Remote Code Execution" + ], + "os_commands_0": [ + "Remote Code Execution" + ], + "os_commands_1": [ + "Remote Code Execution" + ], + "os_commands_10": [ + "Remote Code Execution" + ], + "os_commands_11": [ + "Remote Code Execution" + ], + "os_commands_12": [ + "Remote Code Execution" + ], + "os_commands_13": [ + "Remote Code Execution" + ], + "os_commands_14": [ + "Remote Code Execution" + ], + "os_commands_15": [ + "Remote Code Execution" + ], + "os_commands_16": [ + "Remote Code Execution" + ], + "os_commands_17": [ + "Remote Code Execution" + ], + "os_commands_18": [ + "Remote Code Execution" + ], + "os_commands_19": [ + "Remote Code Execution" + ], + "os_commands_2": [ + "Remote Code Execution" + ], + "os_commands_20": [ + "Remote Code Execution" + ], + "os_commands_21": [ + "Remote Code Execution" + ], + "os_commands_22": [ + "Remote Code Execution" + ], + "os_commands_23": [ + "Remote Code Execution" + ], + "os_commands_24": [ + "Remote Code Execution" + ], + "os_commands_25": [ + "Remote Code Execution" + ], + "os_commands_26": [ + "Remote Code Execution" + ], + "os_commands_27": [ + "Remote Code Execution" + ], + "os_commands_3": [ + "Remote Code Execution" + ], + "os_commands_4": [ + "Remote Code Execution" + ], + "os_commands_5": [ + "Remote Code Execution" + ], + "os_commands_6": [ + "Remote Code Execution" + ], + "os_commands_7": [ + "Remote Code Execution" + ], + "os_commands_8": [ + "Remote Code Execution" + ], + "os_commands_9": [ + "Remote Code Execution" + ], + "os_commandsbdd9": [ + "Remote Code Execution" + ], + "os_commandsdd75": [ + "Remote Code Execution" + ], + "os_commandsefda": [ + "Remote Code Execution" + ], + "os_commandsff70": [ + "Remote Code Execution" + ], + "path_traversal": [ + "Path Traversal" + ], + "path_traversal65ba": [ + "Path Traversal" + ], + "path_traversal6625": [ + "Path Traversal" + ], + "path_traversal913d": [ + "Path Traversal" + ], + "path_traversal_0": [ + "Path Traversal" + ], + "path_traversal_1": [ + "Path Traversal" + ], + "path_traversal_2": [ + "Path Traversal" + ], + "path_traversal_3": [ + "Path Traversal" + ], + "php_info_parameters0669": [ + "Remote Code Execution" + ], + "php_info_parameters3f14": [ + "Remote Code Execution" + ], + "php_info_parameters3fdf": [ + "Remote Code Execution" + ], + "php_info_parameters41db": [ + "Remote Code Execution" + ], + "php_info_parameters8851": [ + "Remote Code Execution" + ], + "php_info_parametersc394": [ + "Remote Code Execution" + ], + "php_info_parametersda77": [ + "Remote Code Execution" + ], + "php_info_parameterse68c": [ + "Remote Code Execution" + ], + "php_proto_wrappers_fast_reg022a": [ + "Remote Code Execution" + ], + "php_proto_wrappers_fast_reg151a": [ + "Remote Code Execution" + ], + "php_proto_wrappers_fast_reg1e0f": [ + "Remote Code Execution" + ], + "php_proto_wrappers_fast_reg4f07": [ + "Remote Code Execution" + ], + "php_proto_wrappers_fast_reg6c96": [ + "Remote Code Execution" + ], + "php_proto_wrappers_fast_reg7c51": [ + "Remote Code Execution" + ], + "php_proto_wrappers_fast_regba9d": [ + "Remote Code Execution" + ], + "php_proto_wrappers_fast_regbc7e": [ + "Remote Code Execution" + ], + "php_proto_wrappers_fast_reged42": [ + "Remote Code Execution" + ], + "quotes_ev_fast_reg_0": [ + "Evasion Techniques" + ], + "quotes_ev_fast_reg_1": [ + "Evasion Techniques" + ], + "quotes_ev_fast_reg_2": [ + "Evasion Techniques" + ], + "quotes_ev_fast_reg_3": [ + "Evasion Techniques" + ], + "quotes_ev_fast_reg_4": [ + "Evasion Techniques" + ], + "quotes_ev_fast_regab4e": [ + "Evasion Techniques" + ], + "quotes_space_ev_fast_reg9d52": [ + "Evasion Techniques" + ], + "regex_code_execution_0": [ + "Remote Code Execution" + ], + "regex_code_execution_1": [ + "Remote Code Execution" + ], + "regex_code_execution_10": [ + "Remote Code Execution" + ], + "regex_code_execution_2": [ + "Remote Code Execution" + ], + "regex_code_execution_3": [ + "Remote Code Execution" + ], + "regex_code_execution_4": [ + "Remote Code Execution" + ], + "regex_code_execution_5": [ + "Remote Code Execution" + ], + "regex_code_execution_6": [ + "Remote Code Execution" + ], + "regex_code_execution_7": [ + "Remote Code Execution" + ], + "regex_code_execution_8": [ + "Remote Code Execution" + ], + "regex_code_execution_9": [ + "Remote Code Execution" + ], + "regex_high_acuracy_crlf_inj_0": [ + "Evasion Techniques" + ], + "regex_high_acuracy_crlf_inj_1": [ + "Evasion Techniques" + ], + "regex_high_acuracy_crlf_inj_2": [ + "Evasion Techniques" + ], + "regex_high_acuracy_crlf_inj_3": [ + "Evasion Techniques" + ], + "regex_postfix_0": [ + "SQL Injection" + ], + "regex_postfix_1": [ + "SQL Injection" + ], + "regex_prefix_0": [ + "SQL Injection" + ], + "regex_prefix_1": [ + "SQL Injection" + ], + "regex_sqli_0": [ + "SQL Injection" + ], + "regex_sqli_1": [ + "SQL Injection" + ], + "regex_sqli_10": [ + "SQL Injection" + ], + "regex_sqli_11": [ + "SQL Injection" + ], + "regex_sqli_12": [ + "SQL Injection" + ], + "regex_sqli_13": [ + "SQL Injection" + ], + "regex_sqli_14": [ + "SQL Injection" + ], + "regex_sqli_15": [ + "SQL Injection" + ], + "regex_sqli_16": [ + "SQL Injection" + ], + "regex_sqli_17": [ + "SQL Injection" + ], + "regex_sqli_18": [ + "SQL Injection" + ], + "regex_sqli_19": [ + "SQL Injection" + ], + "regex_sqli_2": [ + "SQL Injection" + ], + "regex_sqli_20": [ + "SQL Injection" + ], + "regex_sqli_21": [ + "SQL Injection" + ], + "regex_sqli_22": [ + "SQL Injection" + ], + "regex_sqli_23": [ + "SQL Injection" + ], + "regex_sqli_24": [ + "SQL Injection" + ], + "regex_sqli_25": [ + "SQL Injection" + ], + "regex_sqli_26": [ + "SQL Injection" + ], + "regex_sqli_27": [ + "SQL Injection" + ], + "regex_sqli_28": [ + "SQL Injection" + ], + "regex_sqli_29": [ + "SQL Injection" + ], + "regex_sqli_3": [ + "SQL Injection" + ], + "regex_sqli_30": [ + "SQL Injection" + ], + "regex_sqli_31": [ + "SQL Injection" + ], + "regex_sqli_32": [ + "SQL Injection" + ], + "regex_sqli_4": [ + "SQL Injection" + ], + "regex_sqli_5": [ + "SQL Injection" + ], + "regex_sqli_6": [ + "SQL Injection" + ], + "regex_sqli_7": [ + "SQL Injection" + ], + "regex_sqli_8": [ + "SQL Injection" + ], + "regex_sqli_9": [ + "SQL Injection" + ], + "regex_xss_0": [ + "Cross Site Scripting" + ], + "regex_xss_1": [ + "Cross Site Scripting" + ], + "regex_xss_2": [ + "Cross Site Scripting" + ], + "regex_xss_3": [ + "Cross Site Scripting" + ], + "regex_xss_4": [ + "Cross Site Scripting" + ], + "regex_xss_5": [ + "Cross Site Scripting" + ], + "regex_xss_6": [ + "Cross Site Scripting" + ], + "regex_xss_7": [ + "Cross Site Scripting" + ], + "regex_xss_8": [ + "Cross Site Scripting" + ], + "regex_xss_evasion_0": [ + "Cross Site Scripting", + "Evasion Techniques" + ], + "regex_xxe_0": [ + "XML External Entity" + ], + "sqli_blind": [ + "SQL Injection" + ], + "sqli_blind8a50": [ + "SQL Injection" + ], + "sqli_blind_0": [ + "SQL Injection" + ], + "sqli_blinda6e0": [ + "SQL Injection" + ], + "sqli_detection_evasion": [ + "Evasion Techniques", + "SQL Injection" + ], + "sqli_detection_evasion_0": [ + "Evasion Techniques", + "SQL Injection" + ], + "sqli_detection_evasion_1": [ + "Evasion Techniques", + "SQL Injection" + ], + "sqli_fast_reg6210": [ + "SQL Injection" + ], + "sqli_fast_reg7dbf": [ + "SQL Injection" + ], + "sqli_fast_reg_0": [ + "SQL Injection" + ], + "sqli_fast_reg_1": [ + "SQL Injection" + ], + "sqli_fast_reg_2": [ + "SQL Injection" + ], + "sqli_fast_reg_3": [ + "SQL Injection" + ], + "sqli_fast_reg_4": [ + "SQL Injection" + ], + "sqli_fast_reg_5": [ + "SQL Injection" + ], + "sqli_fast_rega9c5": [ + "SQL Injection" + ], + "sqli_generic": [ + "SQL Injection" + ], + "sqli_generic035a": [ + "SQL Injection" + ], + "sqli_generic062d": [ + "SQL Injection" + ], + "sqli_generic0cd1": [ + "SQL Injection" + ], + "sqli_generic0ceb": [ + "SQL Injection" + ], + "sqli_generic108f": [ + "SQL Injection" + ], + "sqli_generic155c": [ + "SQL Injection" + ], + "sqli_generic20ed": [ + "SQL Injection" + ], + "sqli_generic2717": [ + "SQL Injection" + ], + "sqli_generic2a58": [ + "SQL Injection" + ], + "sqli_generic2bdb": [ + "SQL Injection" + ], + "sqli_generic2c9a": [ + "SQL Injection" + ], + "sqli_generic332b": [ + "SQL Injection" + ], + "sqli_generic3928": [ + "SQL Injection" + ], + "sqli_generic39a4": [ + "SQL Injection" + ], + "sqli_generic3c80": [ + "SQL Injection" + ], + "sqli_generic3f67": [ + "SQL Injection" + ], + "sqli_generic4271": [ + "SQL Injection" + ], + "sqli_generic4c86": [ + "SQL Injection" + ], + "sqli_generic4d35": [ + "SQL Injection" + ], + "sqli_generic4fa4": [ + "SQL Injection" + ], + "sqli_generic4ffb": [ + "SQL Injection" + ], + "sqli_generic502b": [ + "SQL Injection" + ], + "sqli_generic506a": [ + "SQL Injection" + ], + "sqli_generic559b": [ + "SQL Injection" + ], + "sqli_generic587a": [ + "SQL Injection" + ], + "sqli_generic6a8c": [ + "SQL Injection" + ], + "sqli_generic7598": [ + "SQL Injection" + ], + "sqli_generic936a": [ + "SQL Injection" + ], + "sqli_generic95c8": [ + "SQL Injection" + ], + "sqli_generic9dce": [ + "SQL Injection" + ], + "sqli_generic9ef6": [ + "SQL Injection" + ], + "sqli_generic_10": [ + "SQL Injection" + ], + "sqli_generic_11": [ + "SQL Injection" + ], + "sqli_generic_13": [ + "SQL Injection" + ], + "sqli_generic_14": [ + "SQL Injection" + ], + "sqli_generic_16": [ + "SQL Injection" + ], + "sqli_generic_18": [ + "SQL Injection" + ], + "sqli_generic_21": [ + "SQL Injection" + ], + "sqli_generic_22": [ + "SQL Injection" + ], + "sqli_generic_25": [ + "SQL Injection" + ], + "sqli_generic_27": [ + "SQL Injection" + ], + "sqli_generic_28": [ + "SQL Injection" + ], + "sqli_generic_33": [ + "SQL Injection" + ], + "sqli_generic_37": [ + "SQL Injection" + ], + "sqli_generic_38": [ + "SQL Injection" + ], + "sqli_generic_4": [ + "SQL Injection" + ], + "sqli_generic_41": [ + "SQL Injection" + ], + "sqli_generic_42": [ + "SQL Injection" + ], + "sqli_generic_47": [ + "SQL Injection" + ], + "sqli_generic_5": [ + "SQL Injection" + ], + "sqli_generic_50": [ + "SQL Injection" + ], + "sqli_generic_58": [ + "SQL Injection" + ], + "sqli_generic_59": [ + "SQL Injection" + ], + "sqli_generic_60": [ + "SQL Injection" + ], + "sqli_generic_61": [ + "SQL Injection" + ], + "sqli_generic_63": [ + "SQL Injection" + ], + "sqli_generic_65": [ + "SQL Injection" + ], + "sqli_generic_8": [ + "SQL Injection" + ], + "sqli_genericadd9": [ + "SQL Injection" + ], + "sqli_genericb11c": [ + "SQL Injection" + ], + "sqli_genericb28b": [ + "SQL Injection" + ], + "sqli_genericb844": [ + "SQL Injection" + ], + "sqli_genericb981": [ + "SQL Injection" + ], + "sqli_genericba4a": [ + "SQL Injection" + ], + "sqli_genericba83": [ + "SQL Injection" + ], + "sqli_genericbb99": [ + "SQL Injection" + ], + "sqli_genericc23f": [ + "SQL Injection" + ], + "sqli_genericc98e": [ + "SQL Injection" + ], + "sqli_genericccae": [ + "SQL Injection" + ], + "sqli_genericce64": [ + "SQL Injection" + ], + "sqli_genericd47d": [ + "SQL Injection" + ], + "sqli_genericd497": [ + "SQL Injection" + ], + "sqli_genericdb7b": [ + "SQL Injection" + ], + "sqli_genericdd00": [ + "SQL Injection" + ], + "sqli_genericdd3b": [ + "SQL Injection" + ], + "sqli_generice043": [ + "SQL Injection" + ], + "sqli_generice54c": [ + "SQL Injection" + ], + "sqli_genericea23": [ + "SQL Injection" + ], + "sqli_genericec02": [ + "SQL Injection" + ], + "sqli_medium_acuracy": [ + "SQL Injection" + ], + "sqli_medium_acuracy_0": [ + "SQL Injection" + ], + "sqli_medium_acuracy_1": [ + "SQL Injection" + ], + "sqli_medium_acuracy_2": [ + "SQL Injection" + ], + "sqli_medium_acuracy_3": [ + "SQL Injection" + ], + "sqli_medium_acuracy_4": [ + "SQL Injection" + ], + "sqli_medium_acuracy_5": [ + "SQL Injection" + ], + "sqli_medium_acuracy_6": [ + "SQL Injection" + ], + "sqli_medium_acuracy_7": [ + "SQL Injection" + ], + "ssti": [ + "Remote Code Execution" + ], + "ssti2553": [ + "Remote Code Execution" + ], + "ssti2c4d": [ + "Remote Code Execution" + ], + "ssti32bf": [ + "Remote Code Execution" + ], + "ssti3c17": [ + "Remote Code Execution" + ], + "ssti43a8": [ + "Remote Code Execution" + ], + "ssti6c6e": [ + "Remote Code Execution" + ], + "ssti8e8b": [ + "Remote Code Execution" + ], + "ssti9606": [ + "Remote Code Execution" + ], + "ssti_0": [ + "Remote Code Execution" + ], + "ssti_12": [ + "Remote Code Execution" + ], + "ssti_15": [ + "Remote Code Execution" + ], + "ssti_16": [ + "Remote Code Execution" + ], + "ssti_18": [ + "Remote Code Execution" + ], + "ssti_2": [ + "Remote Code Execution" + ], + "ssti_20": [ + "Remote Code Execution" + ], + "ssti_22": [ + "Remote Code Execution" + ], + "ssti_24": [ + "Remote Code Execution" + ], + "ssti_5": [ + "Remote Code Execution" + ], + "ssti_6": [ + "Remote Code Execution" + ], + "ssti_fast_reg2d28": [ + "Remote Code Execution" + ], + "ssti_fast_reg_0": [ + "Remote Code Execution" + ], + "ssti_fast_reg_1": [ + "Remote Code Execution" + ], + "ssti_fast_reg_2": [ + "Remote Code Execution" + ], + "ssti_fast_reg_3": [ + "Remote Code Execution" + ], + "ssti_fast_reg_4": [ + "Remote Code Execution" + ], + "ssti_fast_regc807": [ + "Remote Code Execution" + ], + "ssti_fast_regdcd5": [ + "Remote Code Execution" + ], + "sstib08a": [ + "Remote Code Execution" + ], + "sstib9c5": [ + "Remote Code Execution" + ], + "ssticd8d": [ + "Remote Code Execution" + ], + "sstid1c2": [ + "Remote Code Execution" + ], + "sstid61d": [ + "Remote Code Execution" + ], + "sstie907": [ + "Remote Code Execution" + ], + "sstifdb4": [ + "Remote Code Execution" + ], + "url_scanning": [ + "Path Traversal" + ], + "url_scanning1f1b": [ + "Path Traversal" + ], + "url_scanning4716": [ + "Path Traversal" + ], + "url_scanning6bf5": [ + "Path Traversal" + ], + "url_scanning6f3d": [ + "Path Traversal" + ], + "url_scanning7ce2": [ + "Path Traversal" + ], + "url_scanning_1": [ + "Path Traversal" + ], + "url_scanning_11": [ + "Path Traversal" + ], + "url_scanning_12": [ + "Path Traversal" + ], + "url_scanning_13": [ + "Path Traversal" + ], + "url_scanning_14": [ + "Path Traversal" + ], + "url_scanning_15": [ + "Path Traversal" + ], + "url_scanning_2": [ + "Path Traversal" + ], + "url_scanning_5": [ + "Path Traversal" + ], + "url_scanning_6": [ + "Path Traversal" + ], + "url_scanning_7": [ + "Path Traversal" + ], + "url_scanning_8": [ + "Path Traversal" + ], + "url_scanning_9": [ + "Path Traversal" + ], + "url_scanning_regex_0": [ + "Path Traversal" + ], + "url_scanningd475": [ + "Path Traversal" + ], + "url_scanningf544": [ + "Path Traversal" + ], + "url_scanningf977": [ + "Path Traversal" + ], + "vuln_os_dirs": [ + "Vulnerability Scanning" + ], + "vuln_os_dirs2c17": [ + "Vulnerability Scanning" + ], + "vuln_os_dirs3b59": [ + "Vulnerability Scanning" + ], + "vuln_os_dirs_0": [ + "Vulnerability Scanning" + ], + "vuln_os_dirs_1": [ + "Vulnerability Scanning" + ], + "vuln_os_dirs_10": [ + "Vulnerability Scanning" + ], + "vuln_os_dirs_11": [ + "Vulnerability Scanning" + ], + "vuln_os_dirs_13": [ + "Vulnerability Scanning" + ], + "vuln_os_dirs_14": [ + "Vulnerability Scanning" + ], + "vuln_os_dirs_15": [ + "Vulnerability Scanning" + ], + "vuln_os_dirs_16": [ + "Vulnerability Scanning" + ], + "vuln_os_dirs_17": [ + "Vulnerability Scanning" + ], + "vuln_os_dirs_2": [ + "Vulnerability Scanning" + ], + "vuln_os_dirs_3": [ + "Vulnerability Scanning" + ], + "vuln_os_dirs_4": [ + "Vulnerability Scanning" + ], + "vuln_os_dirs_5": [ + "Vulnerability Scanning" + ], + "vuln_os_dirs_6": [ + "Vulnerability Scanning" + ], + "vuln_os_dirs_8": [ + "Vulnerability Scanning" + ], + "vuln_os_files_0": [ + "Vulnerability Scanning" + ], + "vuln_os_files_1": [ + "Vulnerability Scanning" + ], + "vuln_os_files_2": [ + "Vulnerability Scanning" + ], + "vuln_os_files_3": [ + "Vulnerability Scanning" + ], + "vuln_os_files_4": [ + "Vulnerability Scanning" + ], + "vuln_os_files_5": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_102": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_121": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_170": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_185": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_207": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_280": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_307": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_377": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_380": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_389": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_40": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_428": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_483": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_593": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_600": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_665": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_689": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_71": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_712": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_729": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_74": [ + "Vulnerability Scanning" + ], + "vuln_web_dirs_77": [ + "Vulnerability Scanning" + ], + "vuln_web_files": [ + "Vulnerability Scanning" + ], + "vuln_web_files00a6": [ + "Vulnerability Scanning" + ], + "vuln_web_files1a13": [ + "Vulnerability Scanning" + ], + "vuln_web_files1b74": [ + "Vulnerability Scanning" + ], + "vuln_web_files2919": [ + "Vulnerability Scanning" + ], + "vuln_web_files5e7d": [ + "Vulnerability Scanning" + ], + "vuln_web_files87e1": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1003": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1005": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1007": [ + "Vulnerability Scanning" + ], + "vuln_web_files_101": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1014": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1016": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1018": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1019": [ + "Vulnerability Scanning" + ], + "vuln_web_files_102": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1021": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1023": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1025": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1027": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1028": [ + "Vulnerability Scanning" + ], + "vuln_web_files_103": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1031": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1037": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1038": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1039": [ + "Vulnerability Scanning" + ], + "vuln_web_files_104": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1043": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1045": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1049": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1050": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1052": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1059": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1060": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1062": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1063": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1068": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1070": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1071": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1075": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1076": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1077": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1079": [ + "Vulnerability Scanning" + ], + "vuln_web_files_108": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1080": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1081": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1082": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1083": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1085": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1087": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1089": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1090": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1092": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1094": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1095": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1096": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1097": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1098": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1099": [ + "Vulnerability Scanning" + ], + "vuln_web_files_11": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1100": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1101": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1104": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1108": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1110": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1111": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1114": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1118": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1119": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1120": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1122": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1123": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1126": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1127": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1128": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1129": [ + "Vulnerability Scanning" + ], + "vuln_web_files_113": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1130": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1132": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1133": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1135": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1136": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1137": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1138": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1143": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1144": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1146": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1147": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1148": [ + "Vulnerability Scanning" + ], + "vuln_web_files_115": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1150": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1151": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1152": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1155": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1156": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1157": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1159": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1160": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1162": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1163": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1164": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1166": [ + "Vulnerability Scanning" + ], + "vuln_web_files_117": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1170": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1171": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1173": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1175": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1177": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1178": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1179": [ + "Vulnerability Scanning" + ], + "vuln_web_files_118": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1180": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1181": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1182": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1183": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1184": [ + "Vulnerability Scanning" + ], + "vuln_web_files_1187": [ + "Vulnerability Scanning" + ], + "vuln_web_files_119": [ + "Vulnerability Scanning" + ], + "vuln_web_files_120": [ + "Vulnerability Scanning" + ], + "vuln_web_files_121": [ + "Vulnerability Scanning" + ], + "vuln_web_files_122": [ + "Vulnerability Scanning" + ], + "vuln_web_files_123": [ + "Vulnerability Scanning" + ], + "vuln_web_files_124": [ + "Vulnerability Scanning" + ], + "vuln_web_files_126": [ + "Vulnerability Scanning" + ], + "vuln_web_files_128": [ + "Vulnerability Scanning" + ], + "vuln_web_files_13": [ + "Vulnerability Scanning" + ], + "vuln_web_files_132": [ + "Vulnerability Scanning" + ], + "vuln_web_files_135": [ + "Vulnerability Scanning" + ], + "vuln_web_files_136": [ + "Vulnerability Scanning" + ], + "vuln_web_files_139": [ + "Vulnerability Scanning" + ], + "vuln_web_files_14": [ + "Vulnerability Scanning" + ], + "vuln_web_files_143": [ + "Vulnerability Scanning" + ], + "vuln_web_files_145": [ + "Vulnerability Scanning" + ], + "vuln_web_files_147": [ + "Vulnerability Scanning" + ], + "vuln_web_files_149": [ + "Vulnerability Scanning" + ], + "vuln_web_files_15": [ + "Vulnerability Scanning" + ], + "vuln_web_files_150": [ + "Vulnerability Scanning" + ], + "vuln_web_files_152": [ + "Vulnerability Scanning" + ], + "vuln_web_files_154": [ + "Vulnerability Scanning" + ], + "vuln_web_files_155": [ + "Vulnerability Scanning" + ], + "vuln_web_files_157": [ + "Vulnerability Scanning" + ], + "vuln_web_files_158": [ + "Vulnerability Scanning" + ], + "vuln_web_files_159": [ + "Vulnerability Scanning" + ], + "vuln_web_files_16": [ + "Vulnerability Scanning" + ], + "vuln_web_files_162": [ + "Vulnerability Scanning" + ], + "vuln_web_files_163": [ + "Vulnerability Scanning" + ], + "vuln_web_files_164": [ + "Vulnerability Scanning" + ], + "vuln_web_files_165": [ + "Vulnerability Scanning" + ], + "vuln_web_files_166": [ + "Vulnerability Scanning" + ], + "vuln_web_files_168": [ + "Vulnerability Scanning" + ], + "vuln_web_files_17": [ + "Vulnerability Scanning" + ], + "vuln_web_files_171": [ + "Vulnerability Scanning" + ], + "vuln_web_files_173": [ + "Vulnerability Scanning" + ], + "vuln_web_files_175": [ + "Vulnerability Scanning" + ], + "vuln_web_files_178": [ + "Vulnerability Scanning" + ], + "vuln_web_files_18": [ + "Vulnerability Scanning" + ], + "vuln_web_files_181": [ + "Vulnerability Scanning" + ], + "vuln_web_files_183": [ + "Vulnerability Scanning" + ], + "vuln_web_files_186": [ + "Vulnerability Scanning" + ], + "vuln_web_files_188": [ + "Vulnerability Scanning" + ], + "vuln_web_files_189": [ + "Vulnerability Scanning" + ], + "vuln_web_files_190": [ + "Vulnerability Scanning" + ], + "vuln_web_files_192": [ + "Vulnerability Scanning" + ], + "vuln_web_files_193": [ + "Vulnerability Scanning" + ], + "vuln_web_files_194": [ + "Vulnerability Scanning" + ], + "vuln_web_files_198": [ + "Vulnerability Scanning" + ], + "vuln_web_files_2": [ + "Vulnerability Scanning" + ], + "vuln_web_files_20": [ + "Vulnerability Scanning" + ], + "vuln_web_files_201": [ + "Vulnerability Scanning" + ], + "vuln_web_files_203": [ + "Vulnerability Scanning" + ], + "vuln_web_files_206": [ + "Vulnerability Scanning" + ], + "vuln_web_files_207": [ + "Vulnerability Scanning" + ], + "vuln_web_files_209": [ + "Vulnerability Scanning" + ], + "vuln_web_files_210": [ + "Vulnerability Scanning" + ], + "vuln_web_files_212": [ + "Vulnerability Scanning" + ], + "vuln_web_files_215": [ + "Vulnerability Scanning" + ], + "vuln_web_files_217": [ + "Vulnerability Scanning" + ], + "vuln_web_files_221": [ + "Vulnerability Scanning" + ], + "vuln_web_files_222": [ + "Vulnerability Scanning" + ], + "vuln_web_files_225": [ + "Vulnerability Scanning" + ], + "vuln_web_files_226": [ + "Vulnerability Scanning" + ], + "vuln_web_files_227": [ + "Vulnerability Scanning" + ], + "vuln_web_files_229": [ + "Vulnerability Scanning" + ], + "vuln_web_files_23": [ + "Vulnerability Scanning" + ], + "vuln_web_files_230": [ + "Vulnerability Scanning" + ], + "vuln_web_files_231": [ + "Vulnerability Scanning" + ], + "vuln_web_files_233": [ + "Vulnerability Scanning" + ], + "vuln_web_files_234": [ + "Vulnerability Scanning" + ], + "vuln_web_files_235": [ + "Vulnerability Scanning" + ], + "vuln_web_files_237": [ + "Vulnerability Scanning" + ], + "vuln_web_files_238": [ + "Vulnerability Scanning" + ], + "vuln_web_files_240": [ + "Vulnerability Scanning" + ], + "vuln_web_files_243": [ + "Vulnerability Scanning" + ], + "vuln_web_files_246": [ + "Vulnerability Scanning" + ], + "vuln_web_files_247": [ + "Vulnerability Scanning" + ], + "vuln_web_files_250": [ + "Vulnerability Scanning" + ], + "vuln_web_files_251": [ + "Vulnerability Scanning" + ], + "vuln_web_files_252": [ + "Vulnerability Scanning" + ], + "vuln_web_files_254": [ + "Vulnerability Scanning" + ], + "vuln_web_files_255": [ + "Vulnerability Scanning" + ], + "vuln_web_files_258": [ + "Vulnerability Scanning" + ], + "vuln_web_files_259": [ + "Vulnerability Scanning" + ], + "vuln_web_files_26": [ + "Vulnerability Scanning" + ], + "vuln_web_files_260": [ + "Vulnerability Scanning" + ], + "vuln_web_files_261": [ + "Vulnerability Scanning" + ], + "vuln_web_files_262": [ + "Vulnerability Scanning" + ], + "vuln_web_files_267": [ + "Vulnerability Scanning" + ], + "vuln_web_files_269": [ + "Vulnerability Scanning" + ], + "vuln_web_files_27": [ + "Vulnerability Scanning" + ], + "vuln_web_files_270": [ + "Vulnerability Scanning" + ], + "vuln_web_files_271": [ + "Vulnerability Scanning" + ], + "vuln_web_files_276": [ + "Vulnerability Scanning" + ], + "vuln_web_files_277": [ + "Vulnerability Scanning" + ], + "vuln_web_files_278": [ + "Vulnerability Scanning" + ], + "vuln_web_files_279": [ + "Vulnerability Scanning" + ], + "vuln_web_files_28": [ + "Vulnerability Scanning" + ], + "vuln_web_files_281": [ + "Vulnerability Scanning" + ], + "vuln_web_files_282": [ + "Vulnerability Scanning" + ], + "vuln_web_files_283": [ + "Vulnerability Scanning" + ], + "vuln_web_files_284": [ + "Vulnerability Scanning" + ], + "vuln_web_files_285": [ + "Vulnerability Scanning" + ], + "vuln_web_files_286": [ + "Vulnerability Scanning" + ], + "vuln_web_files_287": [ + "Vulnerability Scanning" + ], + "vuln_web_files_288": [ + "Vulnerability Scanning" + ], + "vuln_web_files_289": [ + "Vulnerability Scanning" + ], + "vuln_web_files_29": [ + "Vulnerability Scanning" + ], + "vuln_web_files_291": [ + "Vulnerability Scanning" + ], + "vuln_web_files_292": [ + "Vulnerability Scanning" + ], + "vuln_web_files_294": [ + "Vulnerability Scanning" + ], + "vuln_web_files_295": [ + "Vulnerability Scanning" + ], + "vuln_web_files_297": [ + "Vulnerability Scanning" + ], + "vuln_web_files_30": [ + "Vulnerability Scanning" + ], + "vuln_web_files_300": [ + "Vulnerability Scanning" + ], + "vuln_web_files_301": [ + "Vulnerability Scanning" + ], + "vuln_web_files_302": [ + "Vulnerability Scanning" + ], + "vuln_web_files_303": [ + "Vulnerability Scanning" + ], + "vuln_web_files_304": [ + "Vulnerability Scanning" + ], + "vuln_web_files_306": [ + "Vulnerability Scanning" + ], + "vuln_web_files_307": [ + "Vulnerability Scanning" + ], + "vuln_web_files_308": [ + "Vulnerability Scanning" + ], + "vuln_web_files_310": [ + "Vulnerability Scanning" + ], + "vuln_web_files_311": [ + "Vulnerability Scanning" + ], + "vuln_web_files_312": [ + "Vulnerability Scanning" + ], + "vuln_web_files_313": [ + "Vulnerability Scanning" + ], + "vuln_web_files_316": [ + "Vulnerability Scanning" + ], + "vuln_web_files_320": [ + "Vulnerability Scanning" + ], + "vuln_web_files_321": [ + "Vulnerability Scanning" + ], + "vuln_web_files_322": [ + "Vulnerability Scanning" + ], + "vuln_web_files_323": [ + "Vulnerability Scanning" + ], + "vuln_web_files_324": [ + "Vulnerability Scanning" + ], + "vuln_web_files_331": [ + "Vulnerability Scanning" + ], + "vuln_web_files_333": [ + "Vulnerability Scanning" + ], + "vuln_web_files_336": [ + "Vulnerability Scanning" + ], + "vuln_web_files_337": [ + "Vulnerability Scanning" + ], + "vuln_web_files_338": [ + "Vulnerability Scanning" + ], + "vuln_web_files_34": [ + "Vulnerability Scanning" + ], + "vuln_web_files_341": [ + "Vulnerability Scanning" + ], + "vuln_web_files_344": [ + "Vulnerability Scanning" + ], + "vuln_web_files_345": [ + "Vulnerability Scanning" + ], + "vuln_web_files_348": [ + "Vulnerability Scanning" + ], + "vuln_web_files_349": [ + "Vulnerability Scanning" + ], + "vuln_web_files_350": [ + "Vulnerability Scanning" + ], + "vuln_web_files_351": [ + "Vulnerability Scanning" + ], + "vuln_web_files_352": [ + "Vulnerability Scanning" + ], + "vuln_web_files_356": [ + "Vulnerability Scanning" + ], + "vuln_web_files_36": [ + "Vulnerability Scanning" + ], + "vuln_web_files_360": [ + "Vulnerability Scanning" + ], + "vuln_web_files_362": [ + "Vulnerability Scanning" + ], + "vuln_web_files_363": [ + "Vulnerability Scanning" + ], + "vuln_web_files_369": [ + "Vulnerability Scanning" + ], + "vuln_web_files_37": [ + "Vulnerability Scanning" + ], + "vuln_web_files_372": [ + "Vulnerability Scanning" + ], + "vuln_web_files_373": [ + "Vulnerability Scanning" + ], + "vuln_web_files_374": [ + "Vulnerability Scanning" + ], + "vuln_web_files_377": [ + "Vulnerability Scanning" + ], + "vuln_web_files_378": [ + "Vulnerability Scanning" + ], + "vuln_web_files_379": [ + "Vulnerability Scanning" + ], + "vuln_web_files_38": [ + "Vulnerability Scanning" + ], + "vuln_web_files_380": [ + "Vulnerability Scanning" + ], + "vuln_web_files_387": [ + "Vulnerability Scanning" + ], + "vuln_web_files_389": [ + "Vulnerability Scanning" + ], + "vuln_web_files_39": [ + "Vulnerability Scanning" + ], + "vuln_web_files_390": [ + "Vulnerability Scanning" + ], + "vuln_web_files_392": [ + "Vulnerability Scanning" + ], + "vuln_web_files_393": [ + "Vulnerability Scanning" + ], + "vuln_web_files_395": [ + "Vulnerability Scanning" + ], + "vuln_web_files_396": [ + "Vulnerability Scanning" + ], + "vuln_web_files_397": [ + "Vulnerability Scanning" + ], + "vuln_web_files_399": [ + "Vulnerability Scanning" + ], + "vuln_web_files_40": [ + "Vulnerability Scanning" + ], + "vuln_web_files_401": [ + "Vulnerability Scanning" + ], + "vuln_web_files_402": [ + "Vulnerability Scanning" + ], + "vuln_web_files_403": [ + "Vulnerability Scanning" + ], + "vuln_web_files_404": [ + "Vulnerability Scanning" + ], + "vuln_web_files_407": [ + "Vulnerability Scanning" + ], + "vuln_web_files_409": [ + "Vulnerability Scanning" + ], + "vuln_web_files_41": [ + "Vulnerability Scanning" + ], + "vuln_web_files_412": [ + "Vulnerability Scanning" + ], + "vuln_web_files_414": [ + "Vulnerability Scanning" + ], + "vuln_web_files_415": [ + "Vulnerability Scanning" + ], + "vuln_web_files_417": [ + "Vulnerability Scanning" + ], + "vuln_web_files_419": [ + "Vulnerability Scanning" + ], + "vuln_web_files_421": [ + "Vulnerability Scanning" + ], + "vuln_web_files_422": [ + "Vulnerability Scanning" + ], + "vuln_web_files_425": [ + "Vulnerability Scanning" + ], + "vuln_web_files_427": [ + "Vulnerability Scanning" + ], + "vuln_web_files_428": [ + "Vulnerability Scanning" + ], + "vuln_web_files_429": [ + "Vulnerability Scanning" + ], + "vuln_web_files_431": [ + "Vulnerability Scanning" + ], + "vuln_web_files_432": [ + "Vulnerability Scanning" + ], + "vuln_web_files_433": [ + "Vulnerability Scanning" + ], + "vuln_web_files_434": [ + "Vulnerability Scanning" + ], + "vuln_web_files_436": [ + "Vulnerability Scanning" + ], + "vuln_web_files_437": [ + "Vulnerability Scanning" + ], + "vuln_web_files_438": [ + "Vulnerability Scanning" + ], + "vuln_web_files_440": [ + "Vulnerability Scanning" + ], + "vuln_web_files_441": [ + "Vulnerability Scanning" + ], + "vuln_web_files_443": [ + "Vulnerability Scanning" + ], + "vuln_web_files_444": [ + "Vulnerability Scanning" + ], + "vuln_web_files_445": [ + "Vulnerability Scanning" + ], + "vuln_web_files_446": [ + "Vulnerability Scanning" + ], + "vuln_web_files_447": [ + "Vulnerability Scanning" + ], + "vuln_web_files_448": [ + "Vulnerability Scanning" + ], + "vuln_web_files_451": [ + "Vulnerability Scanning" + ], + "vuln_web_files_453": [ + "Vulnerability Scanning" + ], + "vuln_web_files_454": [ + "Vulnerability Scanning" + ], + "vuln_web_files_457": [ + "Vulnerability Scanning" + ], + "vuln_web_files_458": [ + "Vulnerability Scanning" + ], + "vuln_web_files_459": [ + "Vulnerability Scanning" + ], + "vuln_web_files_462": [ + "Vulnerability Scanning" + ], + "vuln_web_files_463": [ + "Vulnerability Scanning" + ], + "vuln_web_files_466": [ + "Vulnerability Scanning" + ], + "vuln_web_files_467": [ + "Vulnerability Scanning" + ], + "vuln_web_files_472": [ + "Vulnerability Scanning" + ], + "vuln_web_files_473": [ + "Vulnerability Scanning" + ], + "vuln_web_files_474": [ + "Vulnerability Scanning" + ], + "vuln_web_files_479": [ + "Vulnerability Scanning" + ], + "vuln_web_files_480": [ + "Vulnerability Scanning" + ], + "vuln_web_files_481": [ + "Vulnerability Scanning" + ], + "vuln_web_files_482": [ + "Vulnerability Scanning" + ], + "vuln_web_files_483": [ + "Vulnerability Scanning" + ], + "vuln_web_files_484": [ + "Vulnerability Scanning" + ], + "vuln_web_files_485": [ + "Vulnerability Scanning" + ], + "vuln_web_files_486": [ + "Vulnerability Scanning" + ], + "vuln_web_files_487": [ + "Vulnerability Scanning" + ], + "vuln_web_files_488": [ + "Vulnerability Scanning" + ], + "vuln_web_files_489": [ + "Vulnerability Scanning" + ], + "vuln_web_files_492": [ + "Vulnerability Scanning" + ], + "vuln_web_files_493": [ + "Vulnerability Scanning" + ], + "vuln_web_files_494": [ + "Vulnerability Scanning" + ], + "vuln_web_files_495": [ + "Vulnerability Scanning" + ], + "vuln_web_files_496": [ + "Vulnerability Scanning" + ], + "vuln_web_files_497": [ + "Vulnerability Scanning" + ], + "vuln_web_files_50": [ + "Vulnerability Scanning" + ], + "vuln_web_files_502": [ + "Vulnerability Scanning" + ], + "vuln_web_files_503": [ + "Vulnerability Scanning" + ], + "vuln_web_files_505": [ + "Vulnerability Scanning" + ], + "vuln_web_files_508": [ + "Vulnerability Scanning" + ], + "vuln_web_files_51": [ + "Vulnerability Scanning" + ], + "vuln_web_files_510": [ + "Vulnerability Scanning" + ], + "vuln_web_files_511": [ + "Vulnerability Scanning" + ], + "vuln_web_files_514": [ + "Vulnerability Scanning" + ], + "vuln_web_files_515": [ + "Vulnerability Scanning" + ], + "vuln_web_files_518": [ + "Vulnerability Scanning" + ], + "vuln_web_files_519": [ + "Vulnerability Scanning" + ], + "vuln_web_files_52": [ + "Vulnerability Scanning" + ], + "vuln_web_files_520": [ + "Vulnerability Scanning" + ], + "vuln_web_files_521": [ + "Vulnerability Scanning" + ], + "vuln_web_files_522": [ + "Vulnerability Scanning" + ], + "vuln_web_files_524": [ + "Vulnerability Scanning" + ], + "vuln_web_files_53": [ + "Vulnerability Scanning" + ], + "vuln_web_files_530": [ + "Vulnerability Scanning" + ], + "vuln_web_files_533": [ + "Vulnerability Scanning" + ], + "vuln_web_files_535": [ + "Vulnerability Scanning" + ], + "vuln_web_files_536": [ + "Vulnerability Scanning" + ], + "vuln_web_files_538": [ + "Vulnerability Scanning" + ], + "vuln_web_files_54": [ + "Vulnerability Scanning" + ], + "vuln_web_files_540": [ + "Vulnerability Scanning" + ], + "vuln_web_files_541": [ + "Vulnerability Scanning" + ], + "vuln_web_files_542": [ + "Vulnerability Scanning" + ], + "vuln_web_files_543": [ + "Vulnerability Scanning" + ], + "vuln_web_files_546": [ + "Vulnerability Scanning" + ], + "vuln_web_files_547": [ + "Vulnerability Scanning" + ], + "vuln_web_files_548": [ + "Vulnerability Scanning" + ], + "vuln_web_files_549": [ + "Vulnerability Scanning" + ], + "vuln_web_files_55": [ + "Vulnerability Scanning" + ], + "vuln_web_files_550": [ + "Vulnerability Scanning" + ], + "vuln_web_files_551": [ + "Vulnerability Scanning" + ], + "vuln_web_files_557": [ + "Vulnerability Scanning" + ], + "vuln_web_files_559": [ + "Vulnerability Scanning" + ], + "vuln_web_files_56": [ + "Vulnerability Scanning" + ], + "vuln_web_files_560": [ + "Vulnerability Scanning" + ], + "vuln_web_files_563": [ + "Vulnerability Scanning" + ], + "vuln_web_files_564": [ + "Vulnerability Scanning" + ], + "vuln_web_files_566": [ + "Vulnerability Scanning" + ], + "vuln_web_files_57": [ + "Vulnerability Scanning" + ], + "vuln_web_files_570": [ + "Vulnerability Scanning" + ], + "vuln_web_files_572": [ + "Vulnerability Scanning" + ], + "vuln_web_files_573": [ + "Vulnerability Scanning" + ], + "vuln_web_files_574": [ + "Vulnerability Scanning" + ], + "vuln_web_files_576": [ + "Vulnerability Scanning" + ], + "vuln_web_files_58": [ + "Vulnerability Scanning" + ], + "vuln_web_files_581": [ + "Vulnerability Scanning" + ], + "vuln_web_files_582": [ + "Vulnerability Scanning" + ], + "vuln_web_files_586": [ + "Vulnerability Scanning" + ], + "vuln_web_files_587": [ + "Vulnerability Scanning" + ], + "vuln_web_files_588": [ + "Vulnerability Scanning" + ], + "vuln_web_files_589": [ + "Vulnerability Scanning" + ], + "vuln_web_files_59": [ + "Vulnerability Scanning" + ], + "vuln_web_files_590": [ + "Vulnerability Scanning" + ], + "vuln_web_files_593": [ + "Vulnerability Scanning" + ], + "vuln_web_files_595": [ + "Vulnerability Scanning" + ], + "vuln_web_files_596": [ + "Vulnerability Scanning" + ], + "vuln_web_files_598": [ + "Vulnerability Scanning" + ], + "vuln_web_files_601": [ + "Vulnerability Scanning" + ], + "vuln_web_files_602": [ + "Vulnerability Scanning" + ], + "vuln_web_files_603": [ + "Vulnerability Scanning" + ], + "vuln_web_files_605": [ + "Vulnerability Scanning" + ], + "vuln_web_files_607": [ + "Vulnerability Scanning" + ], + "vuln_web_files_61": [ + "Vulnerability Scanning" + ], + "vuln_web_files_615": [ + "Vulnerability Scanning" + ], + "vuln_web_files_616": [ + "Vulnerability Scanning" + ], + "vuln_web_files_618": [ + "Vulnerability Scanning" + ], + "vuln_web_files_619": [ + "Vulnerability Scanning" + ], + "vuln_web_files_620": [ + "Vulnerability Scanning" + ], + "vuln_web_files_623": [ + "Vulnerability Scanning" + ], + "vuln_web_files_624": [ + "Vulnerability Scanning" + ], + "vuln_web_files_625": [ + "Vulnerability Scanning" + ], + "vuln_web_files_626": [ + "Vulnerability Scanning" + ], + "vuln_web_files_627": [ + "Vulnerability Scanning" + ], + "vuln_web_files_628": [ + "Vulnerability Scanning" + ], + "vuln_web_files_629": [ + "Vulnerability Scanning" + ], + "vuln_web_files_63": [ + "Vulnerability Scanning" + ], + "vuln_web_files_630": [ + "Vulnerability Scanning" + ], + "vuln_web_files_631": [ + "Vulnerability Scanning" + ], + "vuln_web_files_637": [ + "Vulnerability Scanning" + ], + "vuln_web_files_64": [ + "Vulnerability Scanning" + ], + "vuln_web_files_641": [ + "Vulnerability Scanning" + ], + "vuln_web_files_642": [ + "Vulnerability Scanning" + ], + "vuln_web_files_643": [ + "Vulnerability Scanning" + ], + "vuln_web_files_644": [ + "Vulnerability Scanning" + ], + "vuln_web_files_645": [ + "Vulnerability Scanning" + ], + "vuln_web_files_648": [ + "Vulnerability Scanning" + ], + "vuln_web_files_649": [ + "Vulnerability Scanning" + ], + "vuln_web_files_65": [ + "Vulnerability Scanning" + ], + "vuln_web_files_651": [ + "Vulnerability Scanning" + ], + "vuln_web_files_652": [ + "Vulnerability Scanning" + ], + "vuln_web_files_655": [ + "Vulnerability Scanning" + ], + "vuln_web_files_656": [ + "Vulnerability Scanning" + ], + "vuln_web_files_659": [ + "Vulnerability Scanning" + ], + "vuln_web_files_662": [ + "Vulnerability Scanning" + ], + "vuln_web_files_663": [ + "Vulnerability Scanning" + ], + "vuln_web_files_664": [ + "Vulnerability Scanning" + ], + "vuln_web_files_667": [ + "Vulnerability Scanning" + ], + "vuln_web_files_668": [ + "Vulnerability Scanning" + ], + "vuln_web_files_67": [ + "Vulnerability Scanning" + ], + "vuln_web_files_671": [ + "Vulnerability Scanning" + ], + "vuln_web_files_675": [ + "Vulnerability Scanning" + ], + "vuln_web_files_676": [ + "Vulnerability Scanning" + ], + "vuln_web_files_677": [ + "Vulnerability Scanning" + ], + "vuln_web_files_678": [ + "Vulnerability Scanning" + ], + "vuln_web_files_679": [ + "Vulnerability Scanning" + ], + "vuln_web_files_68": [ + "Vulnerability Scanning" + ], + "vuln_web_files_682": [ + "Vulnerability Scanning" + ], + "vuln_web_files_685": [ + "Vulnerability Scanning" + ], + "vuln_web_files_686": [ + "Vulnerability Scanning" + ], + "vuln_web_files_688": [ + "Vulnerability Scanning" + ], + "vuln_web_files_689": [ + "Vulnerability Scanning" + ], + "vuln_web_files_690": [ + "Vulnerability Scanning" + ], + "vuln_web_files_691": [ + "Vulnerability Scanning" + ], + "vuln_web_files_692": [ + "Vulnerability Scanning" + ], + "vuln_web_files_693": [ + "Vulnerability Scanning" + ], + "vuln_web_files_698": [ + "Vulnerability Scanning" + ], + "vuln_web_files_7": [ + "Vulnerability Scanning" + ], + "vuln_web_files_700": [ + "Vulnerability Scanning" + ], + "vuln_web_files_701": [ + "Vulnerability Scanning" + ], + "vuln_web_files_703": [ + "Vulnerability Scanning" + ], + "vuln_web_files_708": [ + "Vulnerability Scanning" + ], + "vuln_web_files_71": [ + "Vulnerability Scanning" + ], + "vuln_web_files_711": [ + "Vulnerability Scanning" + ], + "vuln_web_files_716": [ + "Vulnerability Scanning" + ], + "vuln_web_files_717": [ + "Vulnerability Scanning" + ], + "vuln_web_files_718": [ + "Vulnerability Scanning" + ], + "vuln_web_files_719": [ + "Vulnerability Scanning" + ], + "vuln_web_files_72": [ + "Vulnerability Scanning" + ], + "vuln_web_files_721": [ + "Vulnerability Scanning" + ], + "vuln_web_files_722": [ + "Vulnerability Scanning" + ], + "vuln_web_files_725": [ + "Vulnerability Scanning" + ], + "vuln_web_files_726": [ + "Vulnerability Scanning" + ], + "vuln_web_files_727": [ + "Vulnerability Scanning" + ], + "vuln_web_files_728": [ + "Vulnerability Scanning" + ], + "vuln_web_files_729": [ + "Vulnerability Scanning" + ], + "vuln_web_files_73": [ + "Vulnerability Scanning" + ], + "vuln_web_files_730": [ + "Vulnerability Scanning" + ], + "vuln_web_files_735": [ + "Vulnerability Scanning" + ], + "vuln_web_files_736": [ + "Vulnerability Scanning" + ], + "vuln_web_files_737": [ + "Vulnerability Scanning" + ], + "vuln_web_files_739": [ + "Vulnerability Scanning" + ], + "vuln_web_files_74": [ + "Vulnerability Scanning" + ], + "vuln_web_files_742": [ + "Vulnerability Scanning" + ], + "vuln_web_files_744": [ + "Vulnerability Scanning" + ], + "vuln_web_files_745": [ + "Vulnerability Scanning" + ], + "vuln_web_files_746": [ + "Vulnerability Scanning" + ], + "vuln_web_files_749": [ + "Vulnerability Scanning" + ], + "vuln_web_files_75": [ + "Vulnerability Scanning" + ], + "vuln_web_files_750": [ + "Vulnerability Scanning" + ], + "vuln_web_files_756": [ + "Vulnerability Scanning" + ], + "vuln_web_files_757": [ + "Vulnerability Scanning" + ], + "vuln_web_files_759": [ + "Vulnerability Scanning" + ], + "vuln_web_files_760": [ + "Vulnerability Scanning" + ], + "vuln_web_files_761": [ + "Vulnerability Scanning" + ], + "vuln_web_files_765": [ + "Vulnerability Scanning" + ], + "vuln_web_files_766": [ + "Vulnerability Scanning" + ], + "vuln_web_files_767": [ + "Vulnerability Scanning" + ], + "vuln_web_files_769": [ + "Vulnerability Scanning" + ], + "vuln_web_files_77": [ + "Vulnerability Scanning" + ], + "vuln_web_files_772": [ + "Vulnerability Scanning" + ], + "vuln_web_files_775": [ + "Vulnerability Scanning" + ], + "vuln_web_files_776": [ + "Vulnerability Scanning" + ], + "vuln_web_files_779": [ + "Vulnerability Scanning" + ], + "vuln_web_files_780": [ + "Vulnerability Scanning" + ], + "vuln_web_files_782": [ + "Vulnerability Scanning" + ], + "vuln_web_files_784": [ + "Vulnerability Scanning" + ], + "vuln_web_files_786": [ + "Vulnerability Scanning" + ], + "vuln_web_files_787": [ + "Vulnerability Scanning" + ], + "vuln_web_files_789": [ + "Vulnerability Scanning" + ], + "vuln_web_files_790": [ + "Vulnerability Scanning" + ], + "vuln_web_files_792": [ + "Vulnerability Scanning" + ], + "vuln_web_files_793": [ + "Vulnerability Scanning" + ], + "vuln_web_files_795": [ + "Vulnerability Scanning" + ], + "vuln_web_files_799": [ + "Vulnerability Scanning" + ], + "vuln_web_files_800": [ + "Vulnerability Scanning" + ], + "vuln_web_files_803": [ + "Vulnerability Scanning" + ], + "vuln_web_files_804": [ + "Vulnerability Scanning" + ], + "vuln_web_files_805": [ + "Vulnerability Scanning" + ], + "vuln_web_files_806": [ + "Vulnerability Scanning" + ], + "vuln_web_files_807": [ + "Vulnerability Scanning" + ], + "vuln_web_files_809": [ + "Vulnerability Scanning" + ], + "vuln_web_files_810": [ + "Vulnerability Scanning" + ], + "vuln_web_files_811": [ + "Vulnerability Scanning" + ], + "vuln_web_files_813": [ + "Vulnerability Scanning" + ], + "vuln_web_files_814": [ + "Vulnerability Scanning" + ], + "vuln_web_files_815": [ + "Vulnerability Scanning" + ], + "vuln_web_files_817": [ + "Vulnerability Scanning" + ], + "vuln_web_files_818": [ + "Vulnerability Scanning" + ], + "vuln_web_files_819": [ + "Vulnerability Scanning" + ], + "vuln_web_files_821": [ + "Vulnerability Scanning" + ], + "vuln_web_files_822": [ + "Vulnerability Scanning" + ], + "vuln_web_files_824": [ + "Vulnerability Scanning" + ], + "vuln_web_files_825": [ + "Vulnerability Scanning" + ], + "vuln_web_files_826": [ + "Vulnerability Scanning" + ], + "vuln_web_files_827": [ + "Vulnerability Scanning" + ], + "vuln_web_files_829": [ + "Vulnerability Scanning" + ], + "vuln_web_files_831": [ + "Vulnerability Scanning" + ], + "vuln_web_files_832": [ + "Vulnerability Scanning" + ], + "vuln_web_files_833": [ + "Vulnerability Scanning" + ], + "vuln_web_files_834": [ + "Vulnerability Scanning" + ], + "vuln_web_files_836": [ + "Vulnerability Scanning" + ], + "vuln_web_files_838": [ + "Vulnerability Scanning" + ], + "vuln_web_files_839": [ + "Vulnerability Scanning" + ], + "vuln_web_files_84": [ + "Vulnerability Scanning" + ], + "vuln_web_files_840": [ + "Vulnerability Scanning" + ], + "vuln_web_files_842": [ + "Vulnerability Scanning" + ], + "vuln_web_files_843": [ + "Vulnerability Scanning" + ], + "vuln_web_files_844": [ + "Vulnerability Scanning" + ], + "vuln_web_files_846": [ + "Vulnerability Scanning" + ], + "vuln_web_files_847": [ + "Vulnerability Scanning" + ], + "vuln_web_files_848": [ + "Vulnerability Scanning" + ], + "vuln_web_files_850": [ + "Vulnerability Scanning" + ], + "vuln_web_files_851": [ + "Vulnerability Scanning" + ], + "vuln_web_files_852": [ + "Vulnerability Scanning" + ], + "vuln_web_files_853": [ + "Vulnerability Scanning" + ], + "vuln_web_files_854": [ + "Vulnerability Scanning" + ], + "vuln_web_files_855": [ + "Vulnerability Scanning" + ], + "vuln_web_files_856": [ + "Vulnerability Scanning" + ], + "vuln_web_files_857": [ + "Vulnerability Scanning" + ], + "vuln_web_files_858": [ + "Vulnerability Scanning" + ], + "vuln_web_files_859": [ + "Vulnerability Scanning" + ], + "vuln_web_files_862": [ + "Vulnerability Scanning" + ], + "vuln_web_files_863": [ + "Vulnerability Scanning" + ], + "vuln_web_files_864": [ + "Vulnerability Scanning" + ], + "vuln_web_files_866": [ + "Vulnerability Scanning" + ], + "vuln_web_files_868": [ + "Vulnerability Scanning" + ], + "vuln_web_files_869": [ + "Vulnerability Scanning" + ], + "vuln_web_files_87": [ + "Vulnerability Scanning" + ], + "vuln_web_files_871": [ + "Vulnerability Scanning" + ], + "vuln_web_files_873": [ + "Vulnerability Scanning" + ], + "vuln_web_files_878": [ + "Vulnerability Scanning" + ], + "vuln_web_files_879": [ + "Vulnerability Scanning" + ], + "vuln_web_files_881": [ + "Vulnerability Scanning" + ], + "vuln_web_files_882": [ + "Vulnerability Scanning" + ], + "vuln_web_files_885": [ + "Vulnerability Scanning" + ], + "vuln_web_files_886": [ + "Vulnerability Scanning" + ], + "vuln_web_files_887": [ + "Vulnerability Scanning" + ], + "vuln_web_files_89": [ + "Vulnerability Scanning" + ], + "vuln_web_files_892": [ + "Vulnerability Scanning" + ], + "vuln_web_files_893": [ + "Vulnerability Scanning" + ], + "vuln_web_files_895": [ + "Vulnerability Scanning" + ], + "vuln_web_files_896": [ + "Vulnerability Scanning" + ], + "vuln_web_files_897": [ + "Vulnerability Scanning" + ], + "vuln_web_files_898": [ + "Vulnerability Scanning" + ], + "vuln_web_files_9": [ + "Vulnerability Scanning" + ], + "vuln_web_files_90": [ + "Vulnerability Scanning" + ], + "vuln_web_files_900": [ + "Vulnerability Scanning" + ], + "vuln_web_files_901": [ + "Vulnerability Scanning" + ], + "vuln_web_files_902": [ + "Vulnerability Scanning" + ], + "vuln_web_files_903": [ + "Vulnerability Scanning" + ], + "vuln_web_files_904": [ + "Vulnerability Scanning" + ], + "vuln_web_files_905": [ + "Vulnerability Scanning" + ], + "vuln_web_files_907": [ + "Vulnerability Scanning" + ], + "vuln_web_files_91": [ + "Vulnerability Scanning" + ], + "vuln_web_files_910": [ + "Vulnerability Scanning" + ], + "vuln_web_files_911": [ + "Vulnerability Scanning" + ], + "vuln_web_files_912": [ + "Vulnerability Scanning" + ], + "vuln_web_files_913": [ + "Vulnerability Scanning" + ], + "vuln_web_files_915": [ + "Vulnerability Scanning" + ], + "vuln_web_files_916": [ + "Vulnerability Scanning" + ], + "vuln_web_files_917": [ + "Vulnerability Scanning" + ], + "vuln_web_files_918": [ + "Vulnerability Scanning" + ], + "vuln_web_files_92": [ + "Vulnerability Scanning" + ], + "vuln_web_files_920": [ + "Vulnerability Scanning" + ], + "vuln_web_files_922": [ + "Vulnerability Scanning" + ], + "vuln_web_files_923": [ + "Vulnerability Scanning" + ], + "vuln_web_files_924": [ + "Vulnerability Scanning" + ], + "vuln_web_files_925": [ + "Vulnerability Scanning" + ], + "vuln_web_files_926": [ + "Vulnerability Scanning" + ], + "vuln_web_files_927": [ + "Vulnerability Scanning" + ], + "vuln_web_files_929": [ + "Vulnerability Scanning" + ], + "vuln_web_files_932": [ + "Vulnerability Scanning" + ], + "vuln_web_files_934": [ + "Vulnerability Scanning" + ], + "vuln_web_files_935": [ + "Vulnerability Scanning" + ], + "vuln_web_files_936": [ + "Vulnerability Scanning" + ], + "vuln_web_files_937": [ + "Vulnerability Scanning" + ], + "vuln_web_files_939": [ + "Vulnerability Scanning" + ], + "vuln_web_files_94": [ + "Vulnerability Scanning" + ], + "vuln_web_files_941": [ + "Vulnerability Scanning" + ], + "vuln_web_files_943": [ + "Vulnerability Scanning" + ], + "vuln_web_files_945": [ + "Vulnerability Scanning" + ], + "vuln_web_files_947": [ + "Vulnerability Scanning" + ], + "vuln_web_files_948": [ + "Vulnerability Scanning" + ], + "vuln_web_files_949": [ + "Vulnerability Scanning" + ], + "vuln_web_files_95": [ + "Vulnerability Scanning" + ], + "vuln_web_files_950": [ + "Vulnerability Scanning" + ], + "vuln_web_files_951": [ + "Vulnerability Scanning" + ], + "vuln_web_files_954": [ + "Vulnerability Scanning" + ], + "vuln_web_files_955": [ + "Vulnerability Scanning" + ], + "vuln_web_files_957": [ + "Vulnerability Scanning" + ], + "vuln_web_files_958": [ + "Vulnerability Scanning" + ], + "vuln_web_files_959": [ + "Vulnerability Scanning" + ], + "vuln_web_files_96": [ + "Vulnerability Scanning" + ], + "vuln_web_files_960": [ + "Vulnerability Scanning" + ], + "vuln_web_files_962": [ + "Vulnerability Scanning" + ], + "vuln_web_files_964": [ + "Vulnerability Scanning" + ], + "vuln_web_files_966": [ + "Vulnerability Scanning" + ], + "vuln_web_files_967": [ + "Vulnerability Scanning" + ], + "vuln_web_files_968": [ + "Vulnerability Scanning" + ], + "vuln_web_files_969": [ + "Vulnerability Scanning" + ], + "vuln_web_files_97": [ + "Vulnerability Scanning" + ], + "vuln_web_files_973": [ + "Vulnerability Scanning" + ], + "vuln_web_files_978": [ + "Vulnerability Scanning" + ], + "vuln_web_files_979": [ + "Vulnerability Scanning" + ], + "vuln_web_files_980": [ + "Vulnerability Scanning" + ], + "vuln_web_files_981": [ + "Vulnerability Scanning" + ], + "vuln_web_files_982": [ + "Vulnerability Scanning" + ], + "vuln_web_files_984": [ + "Vulnerability Scanning" + ], + "vuln_web_files_985": [ + "Vulnerability Scanning" + ], + "vuln_web_files_986": [ + "Vulnerability Scanning" + ], + "vuln_web_files_988": [ + "Vulnerability Scanning" + ], + "vuln_web_files_989": [ + "Vulnerability Scanning" + ], + "vuln_web_files_99": [ + "Vulnerability Scanning" + ], + "vuln_web_files_990": [ + "Vulnerability Scanning" + ], + "vuln_web_files_993": [ + "Vulnerability Scanning" + ], + "vuln_web_files_995": [ + "Vulnerability Scanning" + ], + "vuln_web_files_996": [ + "Vulnerability Scanning" + ], + "vuln_web_files_997": [ + "Vulnerability Scanning" + ], + "vuln_web_files_998": [ + "Vulnerability Scanning" + ], + "vuln_web_filesacb6": [ + "Vulnerability Scanning" + ], + "vuln_web_filex_fast_reg_0": [ + "Vulnerability Scanning" + ], + "vuln_web_filex_fast_reg_1": [ + "Vulnerability Scanning" + ], + "xpath_injeciton_regex_0": [ + "Path Traversal" + ], + "xpath_injeciton_regex_1": [ + "Path Traversal" + ], + "xpath_injeciton_regex_2": [ + "Path Traversal" + ], + "xpath_injeciton_regex_3": [ + "Path Traversal" + ], + "xpath_injeciton_regex_4": [ + "Path Traversal" + ], + "xpath_injection671f": [ + "Path Traversal" + ], + "xpath_injection_regex_0": [ + "Path Traversal" + ], + "xpath_injection_regex_1": [ + "Path Traversal" + ], + "xpath_injection_regex_2": [ + "Path Traversal" + ], + "xpath_injection_regex_3": [ + "Path Traversal" + ], + "xss_html_cmds_high_acuracy2d26": [ + "Cross Site Scripting" + ], + "xss_html_cmds_high_acuracy3bfa": [ + "Cross Site Scripting" + ], + "xss_html_special": [ + "Cross Site Scripting" + ], + "xss_html_special0c79": [ + "Cross Site Scripting" + ], + "xss_html_special0e69": [ + "Cross Site Scripting" + ], + "xss_html_special171d": [ + "Cross Site Scripting" + ], + "xss_html_special1720": [ + "Cross Site Scripting" + ], + "xss_html_special27db": [ + "Cross Site Scripting" + ], + "xss_html_special31d4": [ + "Cross Site Scripting" + ], + "xss_html_special3ba9": [ + "Cross Site Scripting" + ], + "xss_html_special47b3": [ + "Cross Site Scripting" + ], + "xss_html_special4a8e": [ + "Cross Site Scripting" + ], + "xss_html_special4f36": [ + "Cross Site Scripting" + ], + "xss_html_special64b9": [ + "Cross Site Scripting" + ], + "xss_html_special6675": [ + "Cross Site Scripting" + ], + "xss_html_special6763": [ + "Cross Site Scripting" + ], + "xss_html_special6dda": [ + "Cross Site Scripting" + ], + "xss_html_special7168": [ + "Cross Site Scripting" + ], + "xss_html_special7b2e": [ + "Cross Site Scripting" + ], + "xss_html_special8338": [ + "Cross Site Scripting" + ], + "xss_html_special8524": [ + "Cross Site Scripting" + ], + "xss_html_special883e": [ + "Cross Site Scripting" + ], + "xss_html_special8893": [ + "Cross Site Scripting" + ], + "xss_html_special8a9f": [ + "Cross Site Scripting" + ], + "xss_html_special91ee": [ + "Cross Site Scripting" + ], + "xss_html_special94dc": [ + "Cross Site Scripting" + ], + "xss_html_special99fb": [ + "Cross Site Scripting" + ], + "xss_html_special_0": [ + "Cross Site Scripting" + ], + "xss_html_special_1": [ + "Cross Site Scripting" + ], + "xss_html_special_12": [ + "Cross Site Scripting" + ], + "xss_html_special_13": [ + "Cross Site Scripting" + ], + "xss_html_special_15": [ + "Cross Site Scripting" + ], + "xss_html_special_16": [ + "Cross Site Scripting" + ], + "xss_html_special_17": [ + "Cross Site Scripting" + ], + "xss_html_special_18": [ + "Cross Site Scripting" + ], + "xss_html_special_19": [ + "Cross Site Scripting" + ], + "xss_html_special_2": [ + "Cross Site Scripting" + ], + "xss_html_special_20": [ + "Cross Site Scripting" + ], + "xss_html_special_21": [ + "Cross Site Scripting" + ], + "xss_html_special_22": [ + "Cross Site Scripting" + ], + "xss_html_special_23": [ + "Cross Site Scripting" + ], + "xss_html_special_26": [ + "Cross Site Scripting" + ], + "xss_html_special_28": [ + "Cross Site Scripting" + ], + "xss_html_special_3": [ + "Cross Site Scripting" + ], + "xss_html_special_31": [ + "Cross Site Scripting" + ], + "xss_html_special_33": [ + "Cross Site Scripting" + ], + "xss_html_special_34": [ + "Cross Site Scripting" + ], + "xss_html_special_35": [ + "Cross Site Scripting" + ], + "xss_html_special_37": [ + "Cross Site Scripting" + ], + "xss_html_special_38": [ + "Cross Site Scripting" + ], + "xss_html_special_4": [ + "Cross Site Scripting" + ], + "xss_html_special_40": [ + "Cross Site Scripting" + ], + "xss_html_special_41": [ + "Cross Site Scripting" + ], + "xss_html_special_44": [ + "Cross Site Scripting" + ], + "xss_html_special_45": [ + "Cross Site Scripting" + ], + "xss_html_special_46": [ + "Cross Site Scripting" + ], + "xss_html_special_50": [ + "Cross Site Scripting" + ], + "xss_html_special_51": [ + "Cross Site Scripting" + ], + "xss_html_special_55": [ + "Cross Site Scripting" + ], + "xss_html_special_56": [ + "Cross Site Scripting" + ], + "xss_html_special_59": [ + "Cross Site Scripting" + ], + "xss_html_special_6": [ + "Cross Site Scripting" + ], + "xss_html_special_60": [ + "Cross Site Scripting" + ], + "xss_html_special_61": [ + "Cross Site Scripting" + ], + "xss_html_special_63": [ + "Cross Site Scripting" + ], + "xss_html_special_64": [ + "Cross Site Scripting" + ], + "xss_html_special_9": [ + "Cross Site Scripting" + ], + "xss_html_speciala329": [ + "Cross Site Scripting" + ], + "xss_html_speciala3af": [ + "Cross Site Scripting" + ], + "xss_html_speciala652": [ + "Cross Site Scripting" + ], + "xss_html_speciala9ba": [ + "Cross Site Scripting" + ], + "xss_html_specialb0c1": [ + "Cross Site Scripting" + ], + "xss_html_specialb902": [ + "Cross Site Scripting" + ], + "xss_html_specialba96": [ + "Cross Site Scripting" + ], + "xss_html_specialbe34": [ + "Cross Site Scripting" + ], + "xss_html_specialc12a": [ + "Cross Site Scripting" + ], + "xss_html_specialc3dd": [ + "Cross Site Scripting" + ], + "xss_html_specialc80e": [ + "Cross Site Scripting" + ], + "xss_html_specialcb24": [ + "Cross Site Scripting" + ], + "xss_html_speciald37a": [ + "Cross Site Scripting" + ], + "xss_html_speciald85b": [ + "Cross Site Scripting" + ], + "xss_html_specialdd4a": [ + "Cross Site Scripting" + ], + "xss_html_specialdec0": [ + "Cross Site Scripting" + ], + "xss_html_specialf1c4": [ + "Cross Site Scripting" + ], + "xss_html_specialfca1": [ + "Cross Site Scripting" + ], + "xss_html_specialfcc5": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy0ec1": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy11aa": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy2968": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy2c91": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy2d32": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy32c9": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy356c": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy3c13": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy5422": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy5d04": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy5e70": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy7058": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy7463": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy7f39": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_0": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_1": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_10": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_100": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_101": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_102": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_103": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_104": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_105": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_11": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_12": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_13": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_14": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_15": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_16": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_17": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_18": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_19": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_2": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_20": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_21": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_22": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_23": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_24": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_25": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_26": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_27": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_28": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_29": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_3": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_30": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_31": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_32": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_33": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_34": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_35": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_36": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_37": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_38": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_39": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_4": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_40": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_41": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_42": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_43": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_44": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_45": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_46": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_47": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_48": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_49": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_5": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_50": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_51": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_52": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_53": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_54": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_55": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_56": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_57": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_58": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_59": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_6": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_60": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_61": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_62": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_63": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_64": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_65": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_66": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_67": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_68": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_69": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_7": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_70": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_71": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_72": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_73": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_74": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_75": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_76": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_77": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_78": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_79": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_8": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_80": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_81": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_82": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_83": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_84": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_85": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_86": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_87": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_88": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_89": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_9": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_90": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_91": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_92": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_93": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_94": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_95": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_96": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_97": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_98": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracy_99": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracya34b": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracyaadf": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracybe83": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracyd0d4": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracydd7f": [ + "Cross Site Scripting" + ], + "xss_html_tags_high_acuracye36b": [ + "Cross Site Scripting" + ], + "xxe": [ + "XML External Entity" + ], + "xxe_0": [ + "XML External Entity" + ], + "xxe_1": [ + "XML External Entity" + ], + "xxe_2": [ + "XML External Entity" + ], + "xxe_3": [ + "XML External Entity" + ] + }, + "binary_data_kw_filter": "encoded.*|.*ev_fast_reg.*|.*fast_reg_evasion.*|ldap_injection_regex.*|code_execution_fast_reg.*|xpath_injeciton_regex.*|xpath_injection_regex.*|regex_code_execution.*|os_cmd_sep_medium_acuracy.*|high_acuracy_fast_reg_xss.*|regex_high_acuracy_crlf_inj.*|regex_postfix.*|url_scanning.*|ssti_fast_reg.*|os_cmd_high_acuracy_fast_reg.*|regex_xss.*|general_injection_regex.*|regex_sqli.*|vuln_os_dirs.*|sqli_medium_acuracy.*|sqli_fast_reg_0", + "comment_ev_pattern": "\\/\\/.*[\\r\\n]+", + "false_keywords": [ + "=", + "&", + "/www", + "http" + ], + "false_patterns_re_list": [ + "(?P\\w+=\\w+\\&)", + "(?P(^(http://[\\w\\.\\d\\\\/\\=\\-\\?\\&\\%]*)))" + ], + "filter_parameters": { + "cql": [ + "=", + "and", + "probing", + "quotes_ev_fast_reg_2", + "regex_postfix_1", + "regex_sqli_17", + "regex_sqli_22", + ">", + "regex_postfix_0", + "config.", + "function_name_bypass_regex_0", + "\"" + ], + "currentJql": [ + "\"", + "=", + "and", + "regex_postfix_0", + "regex_sqli_17", + "regex_sqli_22", + "regex_postfix_1", + "or", + "os_cmd_high_acuracy_fast_reg_0", + "quotes_ev_fast_reg_2", + "regex_sqli_26" + ], + "description": [ + "#", + "*", + "/card", + "and", + "comment_ev_fast_reg_0", + "create", + "encoded_\\r\\n", + "os_cmd_high_acuracy_fast_reg_3", + "quotes_ev_fast_reg_4", + "repetition", + "ssti_fast_reg_4", + "|", + "&", + "document." + ], + "jql": [ + "=", + "and", + "or", + "regex_postfix_0", + "\"", + "code_execution_fast_reg_0", + "os_cmd_high_acuracy_fast_reg_0", + "regex_postfix_0", + "regex_sqli_17", + "regex_sqli_22", + "regex_sqli_26", + "quotes_ev_fast_reg_2", + "regex_postfix_1" + ], + "nfeedJql": [ + "\"", + "=", + "and", + "regex_postfix_0", + "regex_sqli_17", + "regex_sqli_22", + "regex_postfix_1", + "or", + "os_cmd_high_acuracy_fast_reg_0", + "quotes_ev_fast_reg_2", + "regex_sqli_26" + ], + "rows.dimensions.include": [ + "*", + "].", + "code_execution_fast_reg_0", + "quotes_ev_fast_reg_4", + "ssti_fast_reg_4" + ], + "unrenderedMarkup": [ + "#", + "*", + "/card", + "and", + "comment_ev_fast_reg_0", + "create", + "encoded_\\n", + "os_cmd_high_acuracy_fast_reg_3", + "quotes_ev_fast_reg_4", + "repetition", + "ssti_fast_reg_4", + "|", + "\"", + "&", + "/file", + "/site", + "all", + "quotes_ev_fast_reg_2", + "null", + ".exe", + "null," + ] + }, + "format_magic_binary_re": "^%PDF|^PK\\x03\\x04|^FORM\\x00|^PACK|^RIFF|^GIF8[79]a|^\\xFF\\xD8\\xFF|^\\x89PNG\\x0D\\x0A\\x1A\\x0A", + "format_types_regex_list": { + "administration_config": "((?> |(\\d{1,3}\\.){3}\\d{1,3})(?!\\w).{0,100}?){6}", + "ampersand_delimiter": "^(?>.{1,100}?&){2,}", + "asterisk_delimiter": "^(?>.{1,100}?\\*){2,}", + "comma_delimiter": "^(?>.{1,100}?,){2,}", + "free_text": "((?<=^|\\s)(?>the|be|to|of|an|your|that|have|had|i(?>'m)?|it(?>'s)?|on|he(?>'s)?|you(?>'re)?|we(>?'re)|at|but|his|they|we|she(?>'s)|says?|will|my|one|would|there|was|were|are|what|how|when)(?>\\s|$)[\\w\\s\\.,:'!\\?]*?){2,}", + "html_input": "(<\\/?(?>div|li|ul|tr|th|p|table|body|html|title|h[1-6]|input|img|head|label|button|br|hr|a)\\b.*?>(?>.|[\\r\\n]){0,400}?){2}", + "local_file_path": "^((/?([\\w_\\-\\.]+/)+([\\w_\\-\\.]+\\.[a-zA-Z]{1,5}\\d*))|((([A-Za-z]:\\\\)|\\\\?)([\\w_\\-\\.]+\\\\)+([\\w_\\-\\.]+\\.[a-zA-Z]{1,5}\\d*)))$", + "long_random_text": "[^\\s]{40,}+", + "pipes": "^(?>.{1,100}?\\|){2,}", + "semicolon_delimiter": "^(?>.{1,100}?;){2,}", + "urls": "https?://([\\w\\.\\\\\\-_~\\%=?,]*/){3,}" + }, + "global_ignored": { + "keys": [ + "all", + "and", + "or", + "from", + "null" + ], + "patterns": [] + }, + "good_header_name_re": "^\\s?[\\[\\]\\w\\-]*$", + "good_header_value_re": "(^[\\w\\-\\s\\.s]*$|^(([wW]\\/)?(\\,?\\s?\"[^\"]+\")+|\\*)$|^(,?\\s?[\\w\\-]+\\s*=\\s*[\\w\\-\\.]+)+$|^[\\w\\/\\.\\-\\:]+$)", + "headers_re": { + "accept": "^(?>((?>\\s*(?>\\*|[A-Za-z\\-_+][\\w\\-_+.]*|\\d+(?>\\.\\d+)?|\\.\\d+)\\s*)\\/?){1,3}(?>=(?>\\s*(?>\\*|[A-Za-z\\-_+][\\w\\-_+.]*|\\d+(?>\\.\\d+)?|\\.\\d+)\\s*))?|(?>[,;]((?>\\s*(?>\\*|[A-Za-z\\-_+][\\w\\-_+.]*|\\d+(?>\\.\\d+)?|\\.\\d+)\\s*)\\/?){1,3}(?>=(?>\\s*(?>\\*|[A-Za-z\\-_+][\\w\\-_+.]*|\\d+(?>\\.\\d+)?|\\.\\d+)\\s*))?))+$", + "accept-charset": "^(?>((?>\\s*(?>\\*|[A-Za-z\\-_+][\\w\\-_+.]*|\\d+(?>\\.\\d+)?|\\.\\d+)\\s*)\\/?){1,3}(?>=(?>\\s*(?>\\*|[A-Za-z\\-_+][\\w\\-_+.]*|\\d+(?>\\.\\d+)?|\\.\\d+)\\s*))?|(?>[,;]((?>\\s*(?>\\*|[A-Za-z\\-_+][\\w\\-_+.]*|\\d+(?>\\.\\d+)?|\\.\\d+)\\s*)\\/?){1,3}(?>=(?>\\s*(?>\\*|[A-Za-z\\-_+][\\w\\-_+.]*|\\d+(?>\\.\\d+)?|\\.\\d+)\\s*))?))+$", + "accept-encoding": "^(?>((?>\\s*(?>\\*|[A-Za-z\\-_+][\\w\\-_+.]*|\\d+(?>\\.\\d+)?|\\.\\d+)\\s*)\\/?){1,3}(?>=(?>\\s*(?>\\*|[A-Za-z\\-_+][\\w\\-_+.]*|\\d+(?>\\.\\d+)?|\\.\\d+)\\s*))?|(?>[,;]((?>\\s*(?>\\*|[A-Za-z\\-_+][\\w\\-_+.]*|\\d+(?>\\.\\d+)?|\\.\\d+)\\s*)\\/?){1,3}(?>=(?>\\s*(?>\\*|[A-Za-z\\-_+][\\w\\-_+.]*|\\d+(?>\\.\\d+)?|\\.\\d+)\\s*))?))+$", + "accept-language": "^(?>((?>\\s*(?>\\*|[A-Za-z\\-_+][\\w\\-_+.]*|\\d+(?>\\.\\d+)?|\\.\\d+)\\s*)\\/?){1,3}(?>=(?>\\s*(?>\\*|[A-Za-z\\-_+][\\w\\-_+.]*|\\d+(?>\\.\\d+)?|\\.\\d+)\\s*))?|(?>[,;]((?>\\s*(?>\\*|[A-Za-z\\-_+][\\w\\-_+.]*|\\d+(?>\\.\\d+)?|\\.\\d+)\\s*)\\/?){1,3}(?>=(?>\\s*(?>\\*|[A-Za-z\\-_+][\\w\\-_+.]*|\\d+(?>\\.\\d+)?|\\.\\d+)\\s*))?))+$", + "cache-control": "^([\\w\\-\\=]|, ?)+$", + "content-type": "^[\\w\\-]+(/[\\w\\-]+)?(; ?[\\w\\-]+=[\\w\\-_\\.]+)*$", + "forwarded": ".*", + "from": "^[\\w\\(\\)\\.]+$", + "host": "^([\\d\\.:]+|[\\w/\\.\\-]+)*$", + "if-match": "^(([wW]\\/)?(\\,?\\s?\"[^\"]+\")+|\\*)$", + "if-modified-since": "^[\\w\\s,\\-\\:]+$", + "if-none-match": "^(([wW]\\/)?(\\,?\\s?\"[^\"]+\")+|\\*)$", + "if-range": "(^[\\w\\s,\\-\\:]+$|^(([wW]\\/)?(\\,?\\s?\"[^\"]+\")+|\\*)$)", + "if-unmodified-since": "^[\\w\\s,\\-\\:]+$", + "intervention": "^<(https?|[\\w\\-]+app):\\/\\/[\\w\\.\\/\\-]+(:\\d+\\/)?[\\w\\.\\?=&%\\-]*>;\\s*\\w+=[\\\\\\\"\\w]+$", + "last-modified": "^[\\w\\s,\\-\\:]+$", + "origin": "^https?://[\\w\\./]+$", + "range": "^[\\w\\-\\=]+$", + "referer": "^https?://.*", + "sec-ch-ua": "^(\\s*\".+?\"\\s*;\\s*v=\"\\d+(\\.\\d+)?\",?)+\\s*$", + "sec-ch-ua-full-version": "^\\d(\\.\\d)*$", + "singularityheader": "^(?:(?:[\\w\\-_]+=[\\w\\-_\\|\\?/:]+)\\*)+(?:[\\w\\-_]+=[\\w\\-_\\|\\?/:]+)$", + "surrogate-capability": ".*", + "te": "^\\s*(compress|deflate|gzip|trailers)|(\\s*;\\s*q=[0,1](\\.\\d{1,3})*)$", + "unless-modified-since": "^[\\w\\s,\\-\\:]+$", + "user-agent": "^(([\\w\\/\\@\\-\\,\\s\\/;\\+]|(\\.[^\\.])|(\\.$)|(:[\\/\\\\]{2})|(:[^\\/\\\\]));?|\\s|\\((\\*\\s*\\d+|[\\w@;:~\"\\{\\}#\\=\\+\\s\\(\\)\\/\\.\\,\\-?])+\\)|\\[[\\(\\)\\w\\/;\\.,\\&\\-\\.\\#\\s{=}:]+\\])*$", + "via": "^(([\\w\\/\\@\\-\\,\\s\\/;\\+]|(\\.[^\\.])|(\\.$)|(:[\\/\\\\]{2})|(:[^\\/\\\\]));?|\\s|\\((\\*\\s*\\d+|[\\w@;:~\"\\{\\}#\\=\\+\\s\\(\\)\\/\\.\\,\\-?])+\\)|\\[[\\(\\)\\w\\/;\\.,\\&\\-\\.\\#\\s{=}:]+\\])*$", + "x-fb-flow-capture": "^[\\w\\=<>]+$", + "x-forwarded-for": "^(,?\\s?([\\d\\.]+|[\\w:]+))+$", + "x-logsourceip": "^(,?\\s?([\\d\\.]+|[\\w:]+))+$", + "x-operamini-features": "^(\\s*\\w+\\s*,)+(\\s*\\w+\\s*)+$", + "x-operamini-phone": ".*", + "x-operamini-phone-ua": "^(([\\w\\/\\@\\-\\,\\s\\/;\\+]|(\\.[^\\.])|(\\.$)|(:[\\/\\\\]{2})|(:[^\\/\\\\]));?|\\s|\\((\\*\\s*\\d+|[\\w@;:~\"\\{\\}#\\=\\+\\s\\(\\)\\/\\.\\,\\-?])+\\)|\\[[\\(\\)\\w\\/;\\.,\\&\\-\\.\\#\\s{=}:]+\\])*$", + "x-ucbrowser-ua": "^(\\w+\\(([\\s\\w\\*\\+\\-\\/\\.\\,\\'\\\"\\(\\)\\[\\]]*?)\\);)+$" + }, + "ignored_for_headers": { + "keys": [ + ";", + "code_execution_fast_reg_0", + "|", + "all", + "and", + "or", + "from", + "null" + ], + "patterns": [ + "--", + "=", + ";", + "&", + "/", + "|", + "os_cmd_sep_medium_acuracy", + "code_execution_fast_reg_0" + ], + "regex": "^$" + }, + "ignored_for_nospace_long_value": [ + "--", + "=", + ";", + "&", + "/" + ], + "ignored_for_url": { + "keys": [ + "--", + "&", + "/wp", + "/admin", + "/upload", + "/includes/", + "character_encoding", + "/uploads", + "/cgibin", + "/home", + "all", + "and", + "or", + "from", + "null" + ], + "patterns": [ + "regex_sqli_14" + ], + "regex": "^[\\w/_\\-\\&]+((\\.(js|css|woff|woff2|json|html|jpeg|png))|/)$" + }, + "longtext_re": "\\w{16,}|(\\.(jpg|jpeg|png|gif)$)", + "pattern_regex_list": [ + "(?P=\\s*('|\")\\w+)", + "(?P\\w+\\[\\s*['\"\\w]+\\s*\\]\\s*\\()", + "(?P<\\w+\\/)", + "(?P\\(\\s*\\w+\\s*=\\s*\\w+\\s*\\w+\\*?\\s*\\))", + "(?P[,\\)]\\s*null)", + "(?P\\(\\s*(userpassword|surname|name|cn|sn|objectclass|mail|givenname|commonname)(?!\\w))", + "(?Psrc=\\s*\\w)", + "(?P(backup|db|dump|htdocs|home|html|data|backup|database|setup|install|localhost|backup|wordpress|blog|admin|eshop|admin|store|m|mariadb|mage|1)[\\w\\-~\\.]*\\.(rar|zip|tar|sql|7z|zip|gz|bak|config|connect|old|cfg))", + "(?P/\\w+\\s*(:|\\())", + "(?Psys\\.[\\w]+\\s*\\()", + "(?P:\\s*\\w+\\s*\\()", + "(?P\\w+\\s*:\\s*(expression|url)\\s*\\()", + "(?P^[^[\\[]+\\]\\s*,.*\\[[^\\]]+$)", + "(?Plike\\s['\"][^'\"]*$)", + "(?P(limit|offset)\\s+\\d+)", + "(?P^[\\d\\s+\\'\\)\";]+or\\s)", + "(?P(?/\\w+\\s*\\[)", + "(?P^[\\w\\s]*('|\"|;|\\|\\&)+\\s+(and|or|having))", + "(?Pcase\\s*when\\s+\\w+)", + "(?Padmin\\s*\\*)", + "(?Punion[\\w\\s]+select[\\w\\s]+from[\\w\\s]+)", + "(?P^[\\w\\s]*('|\"|;|\\|\\&)+\\s)", + "(?Pselect.*(,\\w+).*from.*where)", + "(?P\\[\\s*\\w+[\\(<>=!])", + "(?P<(script|img|iframe)[^>]*[a-z]*(src|on\\w+)\\s*=)", + "(?P@\\s*\\*)", + "(?P\\w*\\s*=\\s*\\w*\\*)", + "(?P\\(\\s*/)", + "(?P(\\)|\\(|\\*)\\s*(\\(|\\||\\%))", + "(?P\\r\\n\\w+\\s*:\\s)", + "(?P/book\\[[\\d\"'])", + "(?P\\s(and|or|union)\\s.*([\\);'\"\\|]{2,}|--|/\\*|#))", + "(?Pinsert\\s+into\\s+\\w)", + "(?P('|\"|\\);)\\s*--')", + "(?Pfunction\\s+[^\\s]+\\s*\\()", + "(?Pcontent-length.*\\r\\n\\r\\n.*<)", + "(?P\\s(waitfor|delay)\\s.*\\d+:\\d+:\\d+)", + "(?P\\\\r\\\\n\\w+\\s*:\\s)", + "(?P\\[\\s*'eval'\\s*\\])", + "(?Pexec\\s*cmd=\\s*('.*'|\".*\"))", + "(?Punion\\s*(all\\s*)?select)", + "(?P(=|or|and|select|union|rlike|like|waitfor|having|null|is|limit|offset|order|by|all|case|when)\\s*//)", + "(?P[)&|;`]+\\s*\\w+\\s*(\\-\\w|http|ftp|file|\\.*\\/))", + "(?P\\[\\s*@)", + "(?Pwaitfor[\\s+]delay[\\s+][\"']\\d+:\\d:\\d+[\"'])", + "(?P^\\w+\\s*,\\s*\"[^\\\\\"]*\"\\s*:[\\w_\"]+$)", + "(?P(and|\\||\\&|\\|\\||\\&\\&)\\s*\\d+\\s*=)", + "(?P\\{[^\"']*\\()", + "(?P^[\\.\\-\\w\\^\\']+\\s*union\\s*select)", + "(?Pcontent-length.*\\\\r\\\\n\\\\r\\\\n.*<)", + "(?Pnull\\s*[,\\)])", + "(?P(\\d{1,3}\\.){3}\\d{1,3})", + "(?P('|\"|\\)|;|\\||\\&)+\\s*(and|\\||\\&|\\|\\||\\&\\&|or|having|order\\s+by)\\s)", + "(?P\\)\\s*=\\s*[\\w'\"])", + "(?P\\/\\s*\\@)", + "(?P\\([^\\)]+\\$[^\\)]+\\)\\s*\\()", + "(?P[\\/\\\\](?:[?p][?s]|[?i][?d]|[?l][?s]|[?l][?l]|[?l][?a]|[?t][?r]|[?s][?h]|[?w][?c])(?=[^\\w?<>:]|$))", + "(?P[\\/\\\\](?:[?w][?m][?l]|[?w][?e][?b]|[?o][?p][?t]|[?b][?i][?n]|[?l][?o][?g]|[?c][?a][?t]|[?p][?w][?d]|[?a][?w][?k]|[?c][?g][?i]|[?e][?t][?c]|[?s][?e][?d]|[?v][?a][?r]|[?t][?m][?p]|[?l][?i][?b]|[?u][?s][?r]|[?d][?i][?r])(?=[^\\w?<>:]|$))", + "(?P[\\/\\\\](?:[?s][?o][?r][?t]|[?h][?t][?t][?p]|[?l][?e][?s][?s]|[?s][?e][?l][?f]|[?c][?u][?r][?l]|[?c][?o][?n][?f]|[?l][?o][?g][?s]|[?d][?a][?s][?h]|[?p][?r][?o][?c]|[?.][?e][?x][?e]|[?p][?a][?t][?h]|[?b][?a][?s][?h]|[?.][?d][?l][?l]|[?p][?i][?n][?g]|[?m][?a][?i][?l]|[?e][?c][?h][?o]|[?w][?g][?e][?t])(?=[^\\w?<>:]|$))", + "(?P[\\/\\\\](?:[?p][?i][?n][?g][?6]|[?l][?o][?c][?a][?l]|[?s][?h][?a][?r][?e]|[?u][?n][?a][?m][?e]|[?w][?i][?n][?n][?t]|[?h][?o][?s][?t][?s]|[?n][?e][?t][?s][?h])(?=[^\\w?<>:]|$))", + "(?P[\\/\\\\](?:[?s][?h][?a][?d][?o][?w]|[?b][?a][?c][?k][?u][?p]|[?w][?e][?b][?a][?c][?c]|[?w][?h][?o][?a][?m][?i]|[?m][?a][?s][?t][?e][?r]|[?e][?x][?p][?o][?r][?t]|[?$][?s][?h][?e][?l][?l]|[?p][?a][?s][?s][?w][?d]|[?s][?y][?s][?t][?e][?m])(?=[^\\w?<>:]|$))", + "(?P[\\/\\\\](?:[?w][?i][?n][?.][?i][?n][?i]|[?t][?r][?a][?c][?e][?r][?t]|[?w][?w][?w][?r][?o][?o][?t]|[?n][?e][?t][?s][?t][?a][?t]|[?w][?i][?n][?d][?o][?w][?s]|[?i][?n][?e][?t][?p][?u][?b]|[?e][?n][?v][?i][?r][?o][?n]|[?o][?p][?e][?n][?s][?s][?l])(?=[^\\w?<>:]|$))", + "(?P[\\/\\\\](?:[?n][?e][?t][? ][?v][?i][?e][?w]|[?n][?s][?l][?o][?o][?k][?u][?p]|[?i][?p][?c][?o][?n][?f][?i][?g]|[?i][?f][?c][?o][?n][?f][?i][?g]|[?p][?a][?s][?s][?w][?o][?r][?d]|[?h][?o][?s][?t][?n][?a][?m][?e]|[?b][?o][?o][?t][?.][?i][?n][?i]|[?n][?e][?t][? ][?u][?s][?e][?r])(?=[^\\w?<>:]|$))", + "(?P[\\/\\\\](?:[?a][?l][?l][?c][?f][?g][?c][?o][?n][?v]|[?t][?r][?a][?c][?e][?r][?o][?u][?t][?e]|[?l][?o][?c][?a][?l][?g][?r][?o][?u][?p])(?=[^\\w?<>:]|$))", + "(?P[\\/\\\\](?:[?d][?e][?s][?k][?t][?o][?p][?.][?i][?n][?i]|[?t][?r][?a][?c][?e][?r][?o][?u][?t][?e][?6])(?=[^\\w?<>:]|$))", + "(?P[\\/\\\\](?:[?n][?e][?t][?s][?h][? ][?f][?i][?r][?e][?w][?a][?l][?l])(?=[^\\w?<>:]|$))", + "(?P\\\\?['\"](system|echo|exit|bck|eval|benchmark|pgsleep|attr|class|request|subclasses|class|writefile|getpath|getruntime|exec|clearconfig|setcache|loadtemplate|getfilter|ex|config|sum|convert|concat|ascii|randomblob|elt|like|sha|rand|lower|substring|upper|count|gethostaddress|username|isnull|varchar|nvarchar|nchar|sleep|nameconst|unhex|hex|version|basedecode|loadfile|systemuser|hash|extractvalue|cast|avg|case|user|reverse|min|gtidsubset|updatexml|updatexml|hashbytes|database|informationschema|sys|database|user|groupconcat|dbo|md|extractvalue|master|sysmessages|this|conf|exe|ini|function|alert|char|pregreplace|sleep|body|appendchild|createelement|self|document|components|lookupmethod|window|location|catch|hash|constructor|constructor|parent|ownerdocument|alert|execscript|slice|expression|setinterval|top|find|history|forward|url|settimeout|back|prompt|msgbox|javascriptval|phpinfo|eval|popen|execute|vardump|func|mid|len|isnumeric|passthru|system|include|basename|realpath|moveuploadedfile|printr|sprintf|fopen|require|copy|readfile|tempnam|touch|assert|fileputcontents|exec|filegetcontents|fileputcontents|unserialize|requireonce|popen|createfunction|strrev|function|die|basedecode|arraymap|chr|pregreplace|isset|fread|strreplace|calluserfunc|ord|decode|request|getparameter|fileoutputstream|application|getrealpath|response|write|getbytes|setcookie|res|end|readdirsync|tostring|serialize|phpuname|phpversion|array|thread|join|valueof|length|writeln|decodeuri|navigate|reflect|open|set)\\\\?['\"])", + "(?Pprintf\\s+['\"\"])", + "(?P\\w+\\s*\\[\\s*\\$)", + "(?P\\(\\s*exec\\s*\\))", + "(?P\\w+\\s*=\\s*<%)", + "(?P\\)\\s*\\(\\s*\"\\w+\"\\s*\\))", + "(?P^\\s*\\.\\s*return)", + "(?P-?\\d+(\\.\\d+)?\\s+(or|and|\\||\\&|\\|\\||\\&\\&|having)(\\s*not\\s*)?\\s+-?\\d+(\\.\\d+)?\\s*(!|=|<|>|is))", + "(?P(['\"]|\\d)\\s+(and|\\||\\&|\\|\\||\\&\\&|or|having)(\\s*not\\s*)?\\s+[\"']\\w+[\"']\\s*(=|is))", + "(?P^\\s*(and|\\||\\&|\\|\\||\\&\\&|or|having)(\\s*not\\s*)?\\s*\\d+\\s*([^\\w\\s]+|$))", + "(?P^\\s*(and|\\||\\&|\\|\\||\\&\\&|or|having)(\\s*not\\s*)?.*is\\s*?(not\\s*)null)", + "(?P(\\s(and|\\||\\&|\\|\\||\\&\\&|or|having)|\\^)(\\s*not\\s*)?\\s+([\"'\\.\\-_\\(\\),\\w]+)\\s*([=%>&<+\\-\\!\\|]+|(like|rlike))[\\w\\s])", + "(?P(and|\\||\\&|\\|\\||\\&\\&|or|having)(\\s*not\\s*)?\\s+\\w+\\s*\\()", + "(?P('|and|\\||\\&|\\|\\||\\&\\&|or|having)(\\s*not\\s*)?.*select\\s+(.*,){3,}.*(from|[\\);'\"\\|]{2,}|--|/\\*|#))", + "(?P\\s(and|\\||\\&|\\|\\||\\&\\&|or|having|order\\s+by)(\\s*not\\s*)?\\s[\\s\\w\\(\\)]*([\\);'\"\\|]{2,}|--|\\/\\*|#))", + "(?P(and|or)(\\s*not\\s*)?\\s+('|\"))", + "(?Pselect[\\w\\s\\-\\.\\^\\@~]+from)", + "(?P^\\s*\\w+\\s*[<>]+\\s*\\w+\\s*$)", + "(?P'\\s+(and|\\||\\&|\\|\\||\\&\\&)(\\s*not\\s*)?\\s+'[^']+'\\s*([=\\!]+|like|rlike)\\s*')", + "(?Por\\s+(\\s*not\\s*)?\\d+(\\.\\d+)?\\s*[=<>]+\\s*\\d+(\\.\\d+)?)", + "(?Pselect\\s*@)", + "(?Pdeclare\\s*@)", + "(?P^\\d(\\^\\w+)*\\s*(and|\\||\\&|\\|\\||\\&\\&|or|having)(\\s*not\\s*)?\\s*\\w$)", + "(?P[^\\w]on(canplaythrough|help|show|layoutcomplete|beforeeditfocus|move|focus|medialoadfailed|ended|toggle|pointerout|afterprint|selectstart|beforepaste|mediaslip|loadend|cut|transitionstart|durationchange|volumechange|rowsinserted|aftercopy|stalled|mouseout|input|focusin|cuechange|seeking|datasetcomplete|stop|keydown|mousemove|resizestart|webkitanimationstart|movestart|offline|beforeupdate|pointerrawupdate|dragenter|seek|drop|hashchange|transitionrun|beforedeactivate|loadedmetadata|pointerenter|beforescriptexecute|loadeddata|activate|animationstart|rowdelete|rowexit|begin|bounce|selectionchange|repeat|resizeend|webkittransitionend|dragover|close|pointermove|losecapture|animationiteration|pagehide|webkitanimationiteration|deactivate|keypress|fullscreenchange|waiting|blur|progress|dragleave|touchend|animationend|resume|undo|pointerdown|pause|trackchange|beforeactivate|change|search|seeksegmenttime|mediacomplete|reverse|canplay|mediaerror|textinput|load|suspend|datasetchanged|emptied|auxclick|beforeprint|dragstart|rowsdelete|message|redo|transitionend|select|timeupdate|seeked|paste|propertychange|submit|seeksegmenttime|start|dragdrop|syncrestored|errorupdate|mouseenter|pageshow|pointerup|outofsync|copy|resize|reset|ratechange|unload|dataavailable|rowsexit|popstate|rowsenter|error|rowenter|animationcancel|moveend|transitioncancel|focusout|mouseup|controlselect|abort|resync|unhandledrejection|urlflip|beforeunload|webkitanimationend|cellchange|end|touchstart|loadstart|keyup|beforecopy|invalid|click|readystatechange|touchmove|afteractivate|online|urlflip|wheel|play|finish|mousewheel|scriptcommand|filterchange|storage|drag|mousedown|timeerror|beforecut|pointerover|mouseleave|rowinserted|contextmenu|afterscriptexecute|scroll|pointerleave|dragend|dblclick|afterupdate|mouseover|playing|mozfullscreenchange)\\s*=)", + "(?Padmin\" #)" + ], + "precondition_keys": [ + "", + "\n", + "\r", + " ", + "!", + "!!", + "!![]", + "\"", + "\"=\"", + "\"username\"", + "#", + "$", + "$*", + "$@", + "$_cookie[", + "$_files", + "$_get", + "$_get[", + "$_post[", + "$_request[", + "$_server[", + "$comment", + "$env{", + "$http_get_vars[", + "$php_md5", + "$query", + "$shell", + "$where", + "${", + "%", + "%25c0%25ae%25c0%25ae/", + "%>", + "%env", + "%u22", + "%uefc8", + "%uf025", + "%uff0e", + "&", + "r", + "<", + "<", + "<", + "t", + "<", + "", + "e", + "<", + "l", + " ", + " ", + "<", + "", + "e", + "'", + "'--", + "'80040e", + "';", + "'='", + "'or", + "'username'", + "'||", + "(", + ")", + ");/", + ");id;", + ");id|", + ")|/", + ")|id", + ")|id;", + "*", + "*/", + "+acj-", + "+ad4apb-", + "+ad7-", + "+adz-", + ",", + ",null", + "-", + "--", + ".", + ".7z", + ".bak", + ".cfg", + ".cobalt", + ".config", + ".connect", + ".gz", + ".inc", + ".old", + ".rar", + ".sql", + ".tar", + ".zip", + "/", + "/%c0%ae", + "/%c0%ae%c0%ae", + "/%c0%ae%c0%ae/", + "/*", + "/**/", + "//filter/", + "/3tvars", + "/4iv9", + "/5vkmc", + "/5ytus", + "/?", + "/_showjavartdetails", + "/_showpooldetails", + "/a_domlog", + "/a_security", + "/acart2_0", + "/access", + "/access-options", + "/account", + "/accounts", + "/active", + "/addalink", + "/addcontent", + "/adovbs", + "/aedating4cms", + "/aedatingcms", + "/aexp2", + "/aexp2b", + "/aexp3", + "/aexp4", + "/aexp4b", + "/agentrunner", + "/ains_main", + "/alog", + "/amprops", + "/analyse", + "/anything", + "/apage", + "/apzufu", + "/architext_query", + "/args", + "/attach", + "/attrib", + "/auth_user_file", + "/author", + "/authors", + "/awebvisit", + "/awstats", + "/backup", + "/ban", + "/basilix", + "/bc4j", + "/bdir", + "/bdjra5dcb", + "/beaninfo", + "/bigconf", + "/billing", + "/bin", + "/biztalkhttpreceive", + "/blah-whatever", + "/blah-whatever-badfile", + "/blahb", + "/bookmark", + "/books", + "/botinfs", + "/bots", + "/buddies", + "/buddy", + "/buddylist", + "/buffer", + "/buffer2", + "/buffer4", + "/busytime", + "/calendar", + "/carbo", + "/carello", + "/cartcart", + "/catalog", + "/ccbill", + "/cersvr", + "/certa", + "/certlog", + "/certsrv", + "/cf-sinfo", + "/cfcache", + "/cfcexplorer", + "/cfdocs", + "/cfgwiz", + "/cfmlsyntaxcheck", + "/cgi", + "/cgi-sys/cgiecho", + "/cgi-sys/countedit", + "/cgichkmasterpwd", + "/cgimail", + "/chatlog", + "/chetcpasswd", + "/chkvol", + "/clbusy", + "/cldbdir", + "/clients", + "/clusta4", + "/clusterframe", + "/cnf_gi", + "/code", + "/codebrw", + "/collect4", + "/com", + "/com_minibb", + "/command", + "/convert", + "/copy", + "/countdown", + "/counter", + "/cpa", + "/cphost", + "/cplogfile", + "/cpshost", + "/cschatrbox", + "/csguestbook", + "/cslivesupport", + "/csnews", + "/cte", + "/ctguestb", + "/cthzrcbsobmimq", + "/ctss", + "/customerdata", + "/da", + "/dadentries", + "/dan_o", + "/db", + "/db2000", + "/dba4", + "/dbconnect", + "/dbprod2_prod", + "/dclf", + "/deasappdesign", + "/deaslog", + "/deaslog0", + "/deaslog02", + "/deaslog03", + "/deaslog04", + "/deaslog05", + "/decsadm", + "/decsdoc", + "/decslog", + "/deesadmin", + "/delete", + "/deptodoc", + "/desktop", + "/detail", + "/details", + "/diag_dbtest", + "/displayopenedfile", + "/doladmin", + "/dols_help", + "/domadmin", + "/domcfg", + "/domguide", + "/domlog", + "/dpnecentral", + "/dpnepolicyservice", + "/dsn", + "/dsnform", + "/dspug", + "/duxqcmbiq", + "/dvwssr", + "/dwssap", + "/e7uo7v7d", + "/easylog", + "/ecxotaping", + "/eipc", + "/english", + "/entropybanner", + "/entropysearch", + "/env", + "/environ", + "/etc", + "/etc/passwd", + "/evaluate", + "/event", + "/events", + "/events4", + "/events5", + "/exprcalc", + "/ext", + "/extends", + "/extends2", + "/ezadmin", + "/ezboard", + "/fastjsdata", + "/fck_flash", + "/fck_image", + "/fck_link", + "/fcring", + "/fdir", + "/file-that-is-not-real-2002", + "/fileexists", + "/filetime", + "/form_results", + "/formmail-clone", + "/forms5", + "/forum", + "/fp30reg", + "/fpadmcgi", + "/fpcount", + "/fpremadm", + "/fpsrvadm", + "/ftayrbj", + "/funhist", + "/generalchassisconfig", + "/get_od_toc", + "/getdrvrs", + "/getfile", + "/getrend", + "/getservers", + "/gettempdirectory", + "/getvars", + "/getwhen", + "/glist", + "/gm-2-b2", + "/gozila", + "/group", + "/groups", + "/gwweb", + "/haffzeqgulj", + "/hellouser", + "/help4", + "/help5_admin", + "/help5_client", + "/help5_designer", + "/helpadmin", + "/helpdesk", + "/helplt4", + "/hidden", + "/hits", + "/hnvyuzwaqq", + "/hosts", + "/htpasswd", + "/hywsrkdlyfhyrb", + "/i8ya5llb0qb", + "/icoduserguide", + "/id", + "/iisadmpwd", + "/imadminlogon", + "/import", + "/internet", + "/intersl", + "/ip_password_result", + "/ism", + "/ixmail_netattach", + "/javapg", + "/jdkrqnotify", + "/jluwpkdfrgvwfs", + "/jotter", + "/judy_tech_book", + "/judysort", + "/junk", + "/junk988", + "/junk999", + "/junk_nonexistant", + "/kbccv", + "/kbnv", + "/kbssvv", + "/krkiuwjugrom", + "/kywndbrexogmih", + "/l_domlog", + "/lancard", + "/lancgi", + "/lang", + "/language", + "/lccon", + "/lcgitest", + "/lcon", + "/ldap", + "/ldhpsaoeanyd", + "/learn-msg", + "/leiadm", + "/leilog", + "/leivlt", + "/lib", + "/license", + "/link", + "/linkinfo", + "/local", + "/lpt9", + "/lsxlc", + "/lygmasbkidzlu/cgi-bin", + "/mab", + "/main", + "/mainframeset", + "/manage", + "/master", + "/mbox-list", + "/mchat", + "/md", + "/mdefre8m4ml", + "/member_log", + "/metacart", + "/middle_help_intro", + "/midicart", + "/migrate", + "/mime", + "/motd", + "/mountain", + "/mpcsoftweb_guestdata", + "/msdwda", + "/msg", + "/mtatbls", + "/mtstore", + "/musicqueue", + "/mysql", + "/names", + "/nbxovzsmbguzvznzzqpiu", + "/nd00000", + "/nd000000", + "/nd000002", + "/nd000003", + "/nd000004", + "/ndslogin", + "/ndsobj", + "/netbasic", + "/netdetector", + "/netpanzer", + "/newdsn", + "/newpro", + "/news", + "/nikto", + "/nikto-test-7zpdybyd", + "/nikto-test-bgl4esul", + "/nikto-test-ksa8x6xq", + "/nikto-test-ons", + "/nikto-test-ttjw6", + "/nntppost", + "/no-such-file", + "/notes", + "/npn_admn", + "/npn_rn", + "/nsmanager", + "/ntp_primer", + "/ntsync4", + "/ntsync45", + "/nul", + "/null", + "/obwupzwyakykba", + "/oder", + "/oem", + "/ofscan", + "/okjdkewqqdobah/cgi-bin", + "/onrequestend", + "/openfile", + "/openview5", + "/opt", + "/order", + "/order_log", + "/order_log_v", + "/orders", + "/ovlaunch", + "/ovlogin", + "/owa_util", + "/pagedouble", + "/pageimport2", + "/pageiserrorpage", + "/pageutil", + "/parse_xml", + "/passgen", + "/passwd", + "/passwdfile", + "/password", + "/passwords", + "/payload_encoding_call4", + "/payload_encoding_fnstenv", + "/payload_encoding_jmp_call", + "/pbserver", + "/people", + "/perl/printenv", + "/perweb", + "/pfzozrh", + "/pitc_ag", + "/planning_superdome_configs", + "/ppzcljsmvsnzwofqxug", + "/pqbopmnkhoxsisdh", + "/pqjgjw", + "/print", + "/private", + "/probe", + "/proc", + "/pt_config", + "/ptg_upgrade_pkg", + "/pubfld", + "/public", + "/qfullhit", + "/qpadmin", + "/qstart50", + "/qsumrhit", + "/query", + "/queryhit", + "/quikstore", + "/randhtml", + "/rd", + "/rdprocess", + "/reademail", + "/realhelpdesk", + "/realsignup", + "/registry", + "/release-msg", + "/ren", + "/reports", + "/reports/ndrqm", + "/reports/yumjnuzv", + "/repqof", + "/rhhvengt26x6rh", + "/rpc", + "/rtm", + "/rvbshld", + "/rvdhe880gl", + "/sam", + "/sample/site3w4646", + "/schema50", + "/secret", + "/self", + "/send", + "/sendemail", + "/sendmail", + "/service", + "/services", + "/session", + "/session/admnlogin", + "/set", + "/setpasswd", + "/setsecurity", + "/shadow", + "/share", + "/shop", + "/shopper", + "/shopping300", + "/shopping400", + "/shtml", + "/simple", + "/site", + "/site_searcher", + "/sitemap", + "/skbvqsy", + "/slist", + "/smadmin", + "/smbcfg", + "/smconf", + "/smency", + "/smg_smxcfg30", + "/smhelp", + "/smmsg", + "/smquar", + "/smsolar", + "/smtime", + "/smtp", + "/smtpibwq", + "/smtpobwq", + "/smtptbls", + "/smvlog", + "/snmpviewer", + "/snoop", + "/soapconfig", + "/software", + "/source", + "/spywall", + "/sqljdemo", + "/sqlnet", + "/srchadm", + "/srvinst", + "/srvnam", + "/ss", + "/sscd_suncourier", + "/stat", + "/stat_what", + "/statmail", + "/statrep", + "/stats", + "/statusmap", + "/stauths", + "/stautht", + "/stconf", + "/stconfig", + "/stdnaset", + "/stdomino", + "/stlog", + "/streg", + "/stsrc", + "/svacl", + "/svcacl", + "/syxwptffnnuyexftov", + "/tmp", + "/today", + "/toolbar", + "/tovvjrbwrdrpmonx", + "/tqmm8", + "/trace", + "/tradecli", + "/trends", + "/tstisapi", + "/turwwwwhhm", + "/type", + "/url", + "/usebean", + "/usr", + "/uurfnmhfhvtn", + "/uygv2i3lv6ij", + "/var", + "/vfolder", + "/viewexample", + "/volscgi", + "/vpuserinfo", + "/vwchqjxorzitbm", + "/vxvm_ag", + "/vxvm_hwnotes", + "/vxvm_ig", + "/vxvm_mig", + "/vxvm_notes", + "/vxvm_tshoot", + "/vxvm_ug", + "/w3proxy", + "/w3tvars", + "/welcome", + "/wfreassign", + "/wg_user-info", + "/whatever", + "/whateverwrzb", + "/whereami", + "/win", + "/windows", + "/winmsdp", + "/wksinst", + "/wml", + "/wp-config", + "/wpconfig", + "/writeto", + "/ws_ftp", + "/wwforum", + "/wwsample", + "/xagogktctnspa", + "/xsqlconfig", + "/yabb", + "/ynnwsuao", + "/yokhehviqwnn", + "/zskwhg8jniabcd", + "00relnotes", + "08a80340-06d3-11ea-9f87-0242ac11000f", + "0x", + "0x800a0bcd", + "1", + "27", + "2f", + "2guest", + "3mduh", + "45a0mzr", + "6", + "7z", + "80040e", + ":", + ";", + ";--", + ";dir", + ";id", + ";id;", + ";id|", + ";|/", + "<", + "[HEAD][TAIL]---> wrap + m_recentIdx++; + + if (m_recentIdx >= m_hitsPerSecond.size()) { + m_recentIdx = 0; + } + + // forget the hits from the oldest second in this interval (deduct them from total count) + m_hitsCount -= m_hitsPerSecond[m_recentIdx]; + m_hitsPerSecond[m_recentIdx] = 0; + + // Update recentHitTime (switch to next second) + m_recentHitTime += std::chrono::seconds(1); + } + + // increment hitcount in the most recent second's slot, and also the total count + m_hitsPerSecond[m_recentIdx]++; + m_hitsCount++; + return m_hitsCount <= m_max_events; +} + +} +} diff --git a/components/security_apps/waap/waap_clib/RateLimiter.h b/components/security_apps/waap/waap_clib/RateLimiter.h new file mode 100755 index 0000000..04c5942 --- /dev/null +++ b/components/security_apps/waap/waap_clib/RateLimiter.h @@ -0,0 +1,42 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include + +namespace Waap { +namespace Util { + +// Simple rate limiter primitive that collects events() and only allows up to X events per Y seconds. +// For each event, call RateLimiter::event() passing real or simulated timestamp (in seconds). +// The returned boolean value will tell the caller whether this event must pass (true) or be blocked (false). + +class RateLimiter { +public: + RateLimiter(unsigned int events, std::chrono::seconds interval); + void clear(const std::chrono::seconds& now); + bool event(const std::chrono::seconds& now); + +private: + + unsigned m_max_events; // max events allowed during the recent interval window + std::chrono::seconds m_interval; // configured interval window + std::vector m_hitsPerSecond; // array of hitcounts per second (remembers up to interval recent seconds) + unsigned m_recentIdx; // index of recent second + std::chrono::seconds m_recentHitTime; // timestamp of recent second + unsigned m_hitsCount; // total events during last interval seconds (rolling update) +}; + +} +} diff --git a/components/security_apps/waap/waap_clib/RateLimiting.cc b/components/security_apps/waap/waap_clib/RateLimiting.cc new file mode 100755 index 0000000..fa2ef15 --- /dev/null +++ b/components/security_apps/waap/waap_clib/RateLimiting.cc @@ -0,0 +1,255 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "RateLimiting.h" +#include "Waf2Engine.h" +#include "agent_core_utilities.h" + +#define RATE_LIMITING_LRU_SIZE 10000 + +namespace Waap { +namespace RateLimiting { + +bool Policy::getRateLimitingEnforcementStatus() +{ + return m_rateLimiting.enable; +} + +bool +EntryKey::operator==(EntryKey const& other) const +{ + return url == other.url && source == other.source; +} + +bool +Policy::RateLimitingEnforcement::operator==(const Policy::RateLimitingEnforcement &other) const +{ + return enable == other.enable; +} + +bool +Policy::operator==(const Policy &other) const { + return rules == other.rules && m_rateLimiting == other.m_rateLimiting; +} + +bool +Policy::Rule::operator==(const Policy::Rule &other) const { + return action == other.action && rate == other.rate && + sourceFilter == other.sourceFilter && uriFilter == other.uriFilter; +} + +bool +Policy::Rule::Action::operator==(const Policy::Rule::Action &other) const { + return quarantineTimeSeconds == other.quarantineTimeSeconds && + type == other.type; +} + +bool +Policy::Rule::Rate::operator==(const Policy::Rule::Rate &other) const { + return events == other.events && interval == other.interval; +} + +bool +Policy::Rule::SourceFilter::operator==(const Policy::Rule::SourceFilter &other) const { + if (!(groupBy == other.groupBy && scope == other.scope)) + { + return false; + } + + if (specific_source_regexes_pattern.size() != other.specific_source_regexes_pattern.size()) + { + return false; + } + + for(size_t i=0; i= quarantinedUntil) { + // Release blocking state + state = TrackEntry::State::MEASURING; + } + } + + // Count this event, the result will be true if rate limiter not saturated (should allow), or false if it + // is (should block). + return eventRateLimiter.event(now); +} + +void +TrackEntry::quarantineUntil(std::chrono::seconds until) +{ + state = TrackEntry::State::QUARANTINED; + quarantinedUntil = until; +} + +bool +TrackEntry::isBlocked() const +{ + return state != TrackEntry::State::MEASURING; +} + +State::State(const std::shared_ptr &policy) +:policy(policy), perRuleTrackingTable() +{ + // For each rule create separate rate limiter states tracking table + for (unsigned ruleId=0; ruleId < policy->rules.size(); ++ruleId) { + perRuleTrackingTable.push_back(std::make_shared(RATE_LIMITING_LRU_SIZE)); + } +} + +static bool +matchOneOfRegexes(const std::string& value, const std::vector> ®exesList) +{ + for (auto ®ex : regexesList) { + if (regex != nullptr && NGEN::Regex::regexMatch(__FILE__, __LINE__, value, *regex)) { + return true; + } + } + + return false; +} + +bool +State::execute(const std::string& sourceIdentifier, const std::string& uriStr, std::chrono::seconds now, bool& log) +{ + bool allow = true; + log = false; + + // Run rules one by one. + for (unsigned ruleId=0; ruleId < policy->rules.size(); ++ruleId) { + const Policy::Rule &rule = policy->rules[ruleId]; + const Policy::Rule::UriFilter &uriFilter = rule.uriFilter; + const Policy::Rule::SourceFilter &sourceFilter = rule.sourceFilter; + const Policy::Rule::Rate &rate = rule.rate; + const Policy::Rule::Action &action = rule.action; + + // Get rate limiter states tracking table specific to current rule + std::shared_ptr table = perRuleTrackingTable[ruleId]; + + // Build a key to look up an entry + EntryKey entryKey; + + // Filter out unmatched Urls + if (uriFilter.scope == Waap::RateLimiting::Policy::Rule::UriFilter::Scope::SPECIFIC + && !matchOneOfRegexes(uriStr, uriFilter.specific_uri_regexes)) + { + continue; + } + + // Filter out unmatched Sources + if (sourceFilter.scope == Waap::RateLimiting::Policy::Rule::SourceFilter::Scope::SPECIFIC + && !matchOneOfRegexes(sourceIdentifier, sourceFilter.specific_source_regexes)) + { + continue; + } + + if (uriFilter.groupBy == Policy::Rule::UriFilter::GroupBy::URL) { + // Include the HTTP source ID in the key + entryKey.url = uriStr; + } + + if (sourceFilter.groupBy == Policy::Rule::SourceFilter::GroupBy::SOURCE) { + // Include the HTTP source ID in the key + entryKey.source = sourceIdentifier; + } + + // Find entry in LRU, or create new + std::shared_ptr trackEntry; + if (!table->get(entryKey, trackEntry)) { + trackEntry = std::make_shared(rate.events, std::chrono::seconds(rate.interval)); + } + + // Insert or update an entry in LRU (this moves entry up if exist, or inserts new, possibly expiring old ones + // to keep the LRU size under control). + table->insert(std::make_pair(entryKey, trackEntry)); + + // Count this event in the entry's rate limiter. Release temporary block if time arrived. + if (trackEntry->event(now) == false) { + // TrackEntry's rate limiter is saturated (too many requests) - act according to rule's Action + switch (action.type) { + case Policy::Rule::Action::Type::DETECT: + // log block action. + log = true; + // Detect + break; + case Policy::Rule::Action::Type::QUARANTINE: + // Mark this entry blocked temorarily, for at least X seconds + trackEntry->quarantineUntil(now + std::chrono::seconds(action.quarantineTimeSeconds)); + break; + case Policy::Rule::Action::Type::RATE_LIMIT: + // log block action. + log = true; + // Block this event only + allow = false; + break; + } + } + + // If at least one of the rules says "block" - block the request + if (trackEntry->isBlocked()) { + // log block action. + log = true; + allow = false; + } + } + + return allow; +} + +} +} diff --git a/components/security_apps/waap/waap_clib/RateLimiting.h b/components/security_apps/waap/waap_clib/RateLimiting.h new file mode 100755 index 0000000..05e53d4 --- /dev/null +++ b/components/security_apps/waap/waap_clib/RateLimiting.h @@ -0,0 +1,337 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "lru_cache_map.h" +#include "RateLimiter.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +class Waf2Transaction; + +namespace Waap { +namespace RateLimiting { + +struct Policy { + struct Rule { + struct UriFilter { + enum class GroupBy { + GLOBAL, + URL + }; + + enum class Scope { + ALL, + SPECIFIC + }; + + // Deserialize the Type enum + Scope strScopeToEnum(std::string const &value) + { + if (boost::iequals(value, "all")) { + return Scope::ALL; + } + else if (boost::iequals(value, "specific")) { + return Scope::SPECIFIC; + } + else { + throw cereal::Exception( + "Invalid value for RateLimiting::Policy::Rule::SourceFilter::GroupBy='" + value + "'"); + } + } + + // Deserialize the Type enum + GroupBy strGroupByToEnum(std::string const &value) + { + if (boost::iequals(value, "all uris")) { + return GroupBy::GLOBAL; + } + else if (boost::iequals(value, "single uri")) { + return GroupBy::URL; + } + else { + throw cereal::Exception( + "Invalid value for RateLimiting::Policy::Rule::SourceFilter::GroupBy='" + value + "'"); + } + } + + template + void serialize(_A &ar) + { + std::string groupByStr; + ar(cereal::make_nvp("groupBy", groupByStr)); + groupBy = strGroupByToEnum(groupByStr); + std::string scopeStr; + ar(cereal::make_nvp("scope", scopeStr)); + scope = strScopeToEnum(scopeStr); + + if(scope == Scope::SPECIFIC) + { + ar(cereal::make_nvp("specific_uris", specific_uri_regexes_pattern)); + specific_uri_regexes.clear(); + + for (auto &specific_uri_pattern : specific_uri_regexes_pattern) + { + specific_uri_regexes.push_back(std::make_shared(specific_uri_pattern)); + } + } + } + + bool operator==(const Policy::Rule::UriFilter &other) const; + + GroupBy groupBy; + std::vector> specific_uri_regexes; + std::vector specific_uri_regexes_pattern; + Scope scope; + }; + + struct SourceFilter { + enum class GroupBy { + GLOBAL, + SOURCE + }; + + enum class Scope { + ALL, + SPECIFIC + }; + + // Deserialize the Type enum + Scope strScopeToEnum(std::string const &value) + { + if (boost::iequals(value, "all")) { + return Scope::ALL; + } + else if (boost::iequals(value, "specific")) { + return Scope::SPECIFIC; + } + else { + throw cereal::Exception( + "Invalid value for RateLimiting::Policy::Rule::SourceFilter::GroupBy='" + value + "'"); + } + } + + // Deserialize the Type enum + GroupBy strToEnum(std::string const &value) + { + if (boost::iequals(value, "all sources")) { + return GroupBy::GLOBAL; + } + else if (boost::iequals(value, "single source")) { + return GroupBy::SOURCE; + } + else { + throw cereal::Exception( + "Invalid value for RateLimiting::Policy::Rule::SourceFilter::GroupBy='" + value + "'"); + } + } + + template + void serialize(_A &ar) { + std::string groupByStr; + ar(cereal::make_nvp("groupBy", groupByStr)); + groupBy = strToEnum(groupByStr); + + std::string scopeStr; + ar(cereal::make_nvp("scope", scopeStr)); + scope = strScopeToEnum(scopeStr); + + if(scope == Scope::SPECIFIC) + { + ar(cereal::make_nvp("specific_sources", specific_source_regexes_pattern)); + specific_source_regexes.clear(); + + for (auto &specific_source_pattern : specific_source_regexes_pattern) { + specific_source_regexes.push_back(std::make_shared(specific_source_pattern)); + } + } + } + + bool operator==(const Policy::Rule::SourceFilter &other) const; + + GroupBy groupBy; + std::vector> specific_source_regexes; + std::vector specific_source_regexes_pattern; + Scope scope; + }; + + struct Rate { + template + void serialize(_A &ar) { + ar(cereal::make_nvp("interval", interval)); + ar(cereal::make_nvp("events", events)); + } + + bool operator==(const Policy::Rule::Rate &other) const; + + unsigned interval; // Interval in seconds + unsigned events; // Events allowed during the interval + }; + + struct Action { + enum class Type { + DETECT, + QUARANTINE, + RATE_LIMIT + }; + + // Deserialize the Type enum + Type strToEnum(std::string const &value) + { + if (boost::iequals(value, "detect")) { + return Type::DETECT; + } + else if (boost::iequals(value, "quarantine")) { + return Type::QUARANTINE; + } + else if (boost::iequals(value, "rate limit")) { + return Type::RATE_LIMIT; + } + else { + throw cereal::Exception( + "Invalid value for RateLimiting::Policy::Action::Type='" + value + "'"); + } + } + + template + void serialize(_A &ar) { + std::string typeStr; + ar(cereal::make_nvp("type", typeStr)); + type = strToEnum(typeStr); + quarantineTimeSeconds = 0; + if (type == Type::QUARANTINE) { + ar(cereal::make_nvp("quarantineTimeSeconds", quarantineTimeSeconds)); + } + } + + bool operator==(const Policy::Rule::Action &other) const; + + Type type; + unsigned quarantineTimeSeconds; // time to block (in seconds), relevant only for QUARANTINE action type + }; + + template + void serialize(_A &ar) { + ar(cereal::make_nvp("uriFilter", uriFilter)); + ar(cereal::make_nvp("sourceFilter", sourceFilter)); + ar(cereal::make_nvp("rate", rate)); + ar(cereal::make_nvp("action", action)); + } + + bool operator==(const Rule &other) const; + + UriFilter uriFilter; + SourceFilter sourceFilter; + Rate rate; + Action action; + }; + + class RateLimitingEnforcement + { + public: + RateLimitingEnforcement() + : + enable(false) + { + } + + template + RateLimitingEnforcement(_A &ar) + : + enable(false) + { + std::string level; + ar(cereal::make_nvp("rateLimitingEnforcement", level)); + level = boost::algorithm::to_lower_copy(level); + if (level == "prevent") { + enable = true; + } + } + + bool operator==(const Policy::RateLimitingEnforcement &other) const; + + bool enable; + }; + + std::vector rules; + RateLimitingEnforcement m_rateLimiting; + + Policy() {} + + bool getRateLimitingEnforcementStatus(); + bool operator==(const Policy &other) const; + + template + Policy(_A& ar) : m_rateLimiting(ar) { + ar(cereal::make_nvp("rateLimiting", rules)); + } + +}; + +// Key used to identify specific rate limiting entry +struct EntryKey { + std::string url; + std::string source; + // comparison operator should be implemented to use this struct as a key in an LRU cache. + bool operator==(EntryKey const& other) const; +}; + +// Support efficient hashing for the EntryKey struct so it can participate in unordered (hashed) containers such as LRU +inline std::size_t hash_value(EntryKey const &entryKey) +{ + std::size_t hash = 0; + boost::hash_combine(hash, entryKey.url); + boost::hash_combine(hash, entryKey.source); + return hash; +} + +// Rate limiting tracking entry +struct TrackEntry { + enum State { + MEASURING, + QUARANTINED + }; + Waap::Util::RateLimiter eventRateLimiter; + State state; + std::chrono::seconds quarantinedUntil; + TrackEntry(unsigned int events, std::chrono::seconds interval); + bool event(std::chrono::seconds now); + void quarantineUntil(std::chrono::seconds until); + bool isBlocked() const; +}; + +// Rate limiting state maintained per asset +class State { + public: + typedef LruCacheMap> EntriesLru; + const std::shared_ptr policy; + // For each rule - hold corresponding tracking state (EntriesLru) instance + std::vector> perRuleTrackingTable; + State(const std::shared_ptr &policy); + bool execute( + const std::string& sourceIdentifier, + const std::string& uriStr, + std::chrono::seconds now, + bool& log); +}; + +} +} diff --git a/components/security_apps/waap/waap_clib/RateLimitingDecision.cc b/components/security_apps/waap/waap_clib/RateLimitingDecision.cc new file mode 100755 index 0000000..bc95f59 --- /dev/null +++ b/components/security_apps/waap/waap_clib/RateLimitingDecision.cc @@ -0,0 +1,22 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "RateLimitingDecision.h" + +RateLimitingDecision::RateLimitingDecision(DecisionType type) : SingleDecision(type) +{} + +std::string RateLimitingDecision::getTypeStr() const +{ + return "Rate Limiting"; +} diff --git a/components/security_apps/waap/waap_clib/RateLimitingDecision.h b/components/security_apps/waap/waap_clib/RateLimitingDecision.h new file mode 100755 index 0000000..90242ab --- /dev/null +++ b/components/security_apps/waap/waap_clib/RateLimitingDecision.h @@ -0,0 +1,27 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __RATE_LIMITING_DECISION_H__ +#define __RATE_LIMITING_DECISION_H__ + +#include "SingleDecision.h" +#include "DecisionType.h" +#include + +class RateLimitingDecision: public SingleDecision +{ +public: + explicit RateLimitingDecision(DecisionType type); + std::string getTypeStr() const override; +}; +#endif diff --git a/components/security_apps/waap/waap_clib/ScanResult.cc b/components/security_apps/waap/waap_clib/ScanResult.cc new file mode 100755 index 0000000..0159c83 --- /dev/null +++ b/components/security_apps/waap/waap_clib/ScanResult.cc @@ -0,0 +1,79 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ScanResult.h" + +Waf2ScanResult::Waf2ScanResult() +: +keyword_matches(), +regex_matches(), +filtered_keywords(), +found_patterns(), +unescaped_line(), +param_name(), +location(), +score(0.0f), +scoreArray(), +keywordCombinations(), +attack_types(), +m_isAttackInParam(false) +{ +} + +void Waf2ScanResult::clear() +{ + keyword_matches.clear(); + regex_matches.clear(); + filtered_keywords.clear(); + found_patterns.clear(); + unescaped_line.clear(); + param_name.clear(); + location.clear(); + score = 0; + scoreArray.clear(); + keywordCombinations.clear(); + attack_types.clear(); +} + +void Waf2ScanResult::mergeFrom(const Waf2ScanResult& other) +{ + location = other.location; + param_name = other.param_name; + + Waap::Util::mergeFromVectorWithoutDuplicates( + other.keyword_matches, + keyword_matches + ); + Waap::Util::mergeFromVectorWithoutDuplicates( + other.regex_matches, + regex_matches + ); + Waap::Util::mergeFromMapOfVectorsWithoutDuplicates( + other.found_patterns, + found_patterns + ); + if (unescaped_line.empty()) + { + unescaped_line = other.unescaped_line; + } + + unescaped_line = other.unescaped_line + "?" + unescaped_line; + + + Waap::Util::mergeFromVectorWithoutDuplicates( + other.scoreArray, + scoreArray + ); + + attack_types.insert(other.attack_types.begin(), other.attack_types.end()); +} diff --git a/components/security_apps/waap/waap_clib/ScanResult.h b/components/security_apps/waap/waap_clib/ScanResult.h new file mode 100755 index 0000000..e923fcb --- /dev/null +++ b/components/security_apps/waap/waap_clib/ScanResult.h @@ -0,0 +1,41 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __SCAN_RESULT_H__ +#define __SCAN_RESULT_H__ + +#include "Waf2Util.h" +#include +#include +#include + + +struct Waf2ScanResult { + std::vector keyword_matches; + std::vector regex_matches; + std::vector filtered_keywords; + Waap::Util::map_of_stringlists_t found_patterns; + std::string unescaped_line; + std::string param_name; + std::string location; + double score; + std::vector scoreArray; + std::vector keywordCombinations; + std::set attack_types; + bool m_isAttackInParam; + void clear(); // clear Waf2ScanResult + Waf2ScanResult(); + void mergeFrom(const Waf2ScanResult& other); +}; + +#endif // __SCAN_RESULT_H__ diff --git a/components/security_apps/waap/waap_clib/ScannerDetector.cc b/components/security_apps/waap/waap_clib/ScannerDetector.cc new file mode 100755 index 0000000..1688042 --- /dev/null +++ b/components/security_apps/waap/waap_clib/ScannerDetector.cc @@ -0,0 +1,235 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ScannersDetector.h" +#include "waap.h" +#include "i_messaging.h" +#include + +USE_DEBUG_FLAG(D_WAAP); +#define SYNC_WAIT_TIME std::chrono::seconds(300) // 5 minutes in seconds +#define INTERVAL std::chrono::minutes(120) +#define EQUAL_VALUES_COUNT_THRESHOLD 2 +#define MAX_RETENTION 5 + +ScannerDetector::ScannerDetector(const std::string& localPath, const std::string& remotePath, + const std::string &assetId) : + SerializeToLocalAndRemoteSyncBase(INTERVAL, SYNC_WAIT_TIME, + localPath + "/11.data", + (remotePath == "") ? remotePath : remotePath + "/ScannersDetector", + assetId, + "ScannerDetector") +{ + m_sources_monitor.push_front(SourceKeyValsMap()); +} + +bool ScannerDetector::ready() +{ + if (m_lastSync.count() == 0) + { + return false; + } + std::chrono::microseconds currentTime = Singleton::Consume::by()->getWalltime(); + return (currentTime - m_lastSync < m_interval / 2); +} + +std::vector* ScannerDetector::getSourcesToIgnore() +{ + return &m_sources; +} + +void ScannerDetector::log(const std::string& source, const std::string& key, Waap::Keywords::KeywordsSet& keywords) +{ + m_sources_monitor.front()[source][key].insert(keywords.begin(), keywords.end()); +} + +void ScannerDetector::loadParams(std::shared_ptr pParams) +{ + std::string interval = pParams->getParamVal("learnIndicators.intervalDuration", + std::to_string(INTERVAL.count())); + setInterval(std::chrono::minutes(std::stoul(interval))); + std::string remoteSyncStr = pParams->getParamVal("remoteSync", "true"); + setRemoteSyncEnabled(!boost::iequals(remoteSyncStr, "false")); +} + +class SourcesMonitorPost : public RestGetFile +{ +public: + SourcesMonitorPost(ScannerDetector::SourceKeyValsMap& _monitor) + : monitor(_monitor) + { + } + +private: + C2S_PARAM(ScannerDetector::SourceKeyValsMap, monitor) +}; + +class SourcesMonitorGet : public RestGetFile +{ +public: + SourcesMonitorGet() : monitor() + { + } + + Maybe getSourcesMonitor() + { + return monitor.get(); + } + +private: + S2C_PARAM(ScannerDetector::SourceKeyValsMap, monitor) +}; + + +bool ScannerDetector::postData() +{ + m_sources_monitor_backup = m_sources_monitor.front(); + m_sources_monitor.push_front(SourceKeyValsMap()); + std::string url = getPostDataUrl(); + + dbgTrace(D_WAAP) << "Sending the data to: " << url; + + SourcesMonitorPost currentWindow(m_sources_monitor_backup); + return sendNoReplyObjectWithRetry(currentWindow, + I_Messaging::Method::PUT, + url); +} + +void ScannerDetector::pullData(const std::vector& files) +{ + std::string url = getPostDataUrl(); + std::string sentFile = url.erase(0, url.find_first_of('/') + 1); + dbgTrace(D_WAAP) << "pulling files, skipping: " << sentFile; + for (auto file : files) + { + if (file == sentFile) + { + continue; + } + dbgTrace(D_WAAP) << "Pulling the file: " << file; + SourcesMonitorGet getMonitor; + sendObjectWithRetry(getMonitor, + I_Messaging::Method::GET, + getUri() + "/" + file); + + SourceKeyValsMap remoteMonitor = getMonitor.getSourcesMonitor().unpack(); + for (const auto& srcData : remoteMonitor) + { + for (const auto& keyData : srcData.second) + { + m_sources_monitor_backup[srcData.first][keyData.first].insert( + keyData.second.begin(), + keyData.second.end()); + } + } + // update the sources monitor in previous "time window" + auto temp = m_sources_monitor.front(); + m_sources_monitor.pop_front(); + m_sources_monitor.pop_front(); + m_sources_monitor.push_front(m_sources_monitor_backup); + m_sources_monitor.push_front(temp); + } +} + +void ScannerDetector::postProcessedData() +{ + +} + +void ScannerDetector::updateState(const std::vector&) +{ +} + +void ScannerDetector::pullProcessedData(const std::vector& files) +{ + (void)files; +} + +void ScannerDetector::mergeMonitors(SourceKeyValsMap& mergeTo, SourceKeyValsMap& mergeFrom) +{ + for (const auto& srcData : mergeFrom) + { + for (const auto& keyData : srcData.second) + { + dbgTrace(D_WAAP) << "merging src: " << srcData.first << ", key: " << keyData.first << + ", keywords: " << Waap::Util::setToString(keyData.second); + mergeTo[srcData.first][keyData.first].insert(keyData.second.begin(), keyData.second.end()); + } + } +} + +void ScannerDetector::processData() +{ + if (m_sources_monitor_backup.empty()) + { + m_sources_monitor_backup = m_sources_monitor.front(); + m_sources_monitor.push_front(SourceKeyValsMap()); + } + + if (m_sources_monitor.size() > 2) + { + auto monitorItr = m_sources_monitor.begin()++; + for (monitorItr++; monitorItr != m_sources_monitor.end(); monitorItr++) + { + mergeMonitors(m_sources_monitor_backup, *monitorItr); + } + } + + m_sources.clear(); + for (auto source : m_sources_monitor_backup) + { + if (source.second.size() <= 2) + { + continue; + } + std::map>& keyVals = source.second; + for (auto key = keyVals.begin(); key != keyVals.end(); key++) + { + auto otherKey = key; + int counter = 0; + for (++otherKey; otherKey != keyVals.end(); otherKey++) + { + if (key->second != otherKey->second) + { + continue; + } + dbgTrace(D_WAAP) << "source monitor: src: " << source.first << ", key_1: " << key->first << ", key_2: " + << otherKey->first << ", vals: " << Waap::Util::setToString(key->second); + counter++; + } + if (counter >= EQUAL_VALUES_COUNT_THRESHOLD) + { + dbgDebug(D_WAAP) << "source: " << source.first << " will be ignored"; + m_sources.push_back(source.first); + break; + } + } + } + + if (m_sources_monitor.size() > MAX_RETENTION) + { + m_sources_monitor.pop_back(); + } + m_sources_monitor_backup.clear(); + m_lastSync = Singleton::Consume::by()->getWalltime(); +} + +void ScannerDetector::serialize(std::ostream& stream) +{ + (void)stream; +} + +void ScannerDetector::deserialize(std::istream& stream) +{ + (void)stream; +} diff --git a/components/security_apps/waap/waap_clib/ScannersDetector.h b/components/security_apps/waap/waap_clib/ScannersDetector.h new file mode 100755 index 0000000..e668e5c --- /dev/null +++ b/components/security_apps/waap/waap_clib/ScannersDetector.h @@ -0,0 +1,55 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __SCANNERS_DETECTOR_H__ +#define __SCANNERS_DETECTOR_H__ + +#include "WaapKeywords.h" +#include "i_serialize.h" +#include "i_ignoreSources.h" +#include "WaapParameters.h" + +class ScannerDetector : public SerializeToLocalAndRemoteSyncBase, public I_IgnoreSources +{ +public: + typedef std::map>> SourceKeyValsMap; + ScannerDetector(const std::string& localPath, const std::string& remotePath = "", const std::string &assetId = ""); + + virtual bool ready(); + virtual std::vector* getSourcesToIgnore(); + void log(const std::string& source, const std::string& key, Waap::Keywords::KeywordsSet& keywords); + + void loadParams(std::shared_ptr pParams); + + virtual bool postData(); + virtual void pullData(const std::vector& files); + virtual void processData(); + virtual void postProcessedData(); + virtual void pullProcessedData(const std::vector& files); + virtual void updateState(const std::vector& files); + + virtual void serialize(std::ostream& stream); + virtual void deserialize(std::istream& stream); + +private: + void mergeMonitors(SourceKeyValsMap& mergeTo, SourceKeyValsMap& mergeFrom); + + std::list m_sources_monitor; // list of map source -> key -> set of indicators + SourceKeyValsMap m_sources_monitor_backup; // stores data of the last window to process + + std::vector m_sources; + std::chrono::microseconds m_lastSync; +}; + + +#endif diff --git a/components/security_apps/waap/waap_clib/ScoreBuilder.cc b/components/security_apps/waap/waap_clib/ScoreBuilder.cc new file mode 100755 index 0000000..92fb58a --- /dev/null +++ b/components/security_apps/waap/waap_clib/ScoreBuilder.cc @@ -0,0 +1,499 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ScoreBuilder.h" +#include "Waf2Regex.h" +#include +#include +#include +#include "WaapAssetState.h" +#include +#include +#include +#include +#include "debug.h" + +USE_DEBUG_FLAG(D_WAAP_SCORE_BUILDER); + +#define GENERATE_FALSE_POSITIVES_LIST_THRESHOLD 100 +#define SCORE_CALCULATION_THRESHOLD 5000 + +using namespace std::chrono; + +ScoreBuilderData::ScoreBuilderData() : + m_sourceIdentifier(), + m_userAgent(), + m_sample(), + m_relativeReputation(0.0), + m_fpClassification(UNKNOWN_TYPE) +{} + +ScoreBuilderData::ScoreBuilderData( + const std::string &sourceIdentifier, + const std::string &userAgent, + const std::string &sample, + double relativeReputation, + PolicyCounterType type, + const std::vector &keywordsMatches, + const std::vector &keywordsCombinations) + : + m_sourceIdentifier(sourceIdentifier), + m_userAgent(userAgent), + m_sample(sample), + m_relativeReputation(relativeReputation), + m_fpClassification(type), + m_keywordsMatches(keywordsMatches), + m_keywordsCombinations(keywordsCombinations) +{} + +KeywordsScorePool::KeywordsScorePool() +: +m_keywordsDataMap(), +m_stats() +{ +} + +void KeywordsScorePool::mergeScores(const KeywordsScorePool& baseScores) +{ + // find all keywords that exist in base but not in this + std::vector removedElements; + std::vector::iterator removedElementsIt; + for (KeywordDataMap::const_iterator it = m_keywordsDataMap.begin(); + it != m_keywordsDataMap.end(); ++it) + { + // key not found in base array + if (baseScores.m_keywordsDataMap.find(it->first) == baseScores.m_keywordsDataMap.end()) + { + removedElements.push_back(it->first); + } + } + + // removing elements that were deleted + for (removedElementsIt = removedElements.begin(); + removedElementsIt != removedElements.end(); + ++removedElementsIt) + { + m_keywordsDataMap.erase(*removedElementsIt); + } + + // learning new scores + for (KeywordDataMap::const_iterator it = baseScores.m_keywordsDataMap.begin(); + it != baseScores.m_keywordsDataMap.end(); ++it) + { + if (m_keywordsDataMap.find(it->first) == m_keywordsDataMap.end()) + { + m_keywordsDataMap[it->first] = it->second; + } + } +} + + +ScoreBuilder::ScoreBuilder(I_WaapAssetState* pWaapAssetState) : + SerializeToFilePeriodically(duration_cast(minutes(10)), pWaapAssetState->getSignaturesScoresFilePath()), + m_scoreTrigger(0), + m_fpStore(), + m_keywordsScorePools(), + m_falsePositivesSetsIntersection(), + m_pWaapAssetState(pWaapAssetState) +{ + restore(); +} + +ScoreBuilder::ScoreBuilder(I_WaapAssetState* pWaapAssetState, ScoreBuilder& baseScores) : + SerializeToFilePeriodically(duration_cast(minutes(10)), pWaapAssetState->getSignaturesScoresFilePath()), + m_scoreTrigger(0), + m_fpStore(), + m_keywordsScorePools(), + m_falsePositivesSetsIntersection(), + m_pWaapAssetState(pWaapAssetState) +{ + restore(); + + // merge + mergeScores(baseScores); +} + +void ScoreBuilder::serialize(std::ostream& stream) { + cereal::JSONOutputArchive archive(stream); + static const size_t version = 1; + archive( + cereal::make_nvp("version", version), + cereal::make_nvp("scorePools", m_keywordsScorePools) + ); +} + +void ScoreBuilder::deserialize(std::istream& stream) { + cereal::JSONInputArchive iarchive(stream); + + size_t version = 0; + try { + iarchive(cereal::make_nvp("version", version)); + } + catch (std::runtime_error & e) { + iarchive.setNextName(nullptr); + version = 0; + dbgDebug(D_WAAP_SCORE_BUILDER) << "ScoreBuilder version absent, using version " << version << + " e.what() is " << e.what(); + } + + dbgDebug(D_WAAP_SCORE_BUILDER) << "Loading scores from file version " << version << "..."; + + switch (version) + { + case 1: { + iarchive(cereal::make_nvp("scorePools", m_keywordsScorePools)); + break; + } + case 0: { + m_keywordsScorePools[KEYWORDS_SCORE_POOL_BASE] = KeywordsScorePool(iarchive); + break; + } + default: { + dbgDebug(D_WAAP_SCORE_BUILDER) << "Unknown scores file version: " << version; + } + } +} + +void ScoreBuilder::analyzeFalseTruePositive(ScoreBuilderData& data, const std::string &poolName, bool doBackup) +{ + if (data.m_fpClassification == UNKNOWN_TYPE) + { + dbgTrace(D_WAAP_SCORE_BUILDER) << + "analyzeFalseTruePositive(): Got UNKNOWN_TYPE as false positive classification " + ", will not pump keywords score"; + return; + } + dbgTrace(D_WAAP_SCORE_BUILDER) << "ScoreBuilder::analyzeFalseTruePositive: pumping score pool=" << poolName; + pumpKeywordScore(data, poolName, doBackup); +} + +bool ScoreBuilder::isHtmlContent(std::string sample) +{ + // count closing html elements + unsigned int closingHtmlElem = 0; + std::string::size_type pos = 0; + std::string htmlClosingElementHint = " 3) + { + return true; + } + + unsigned int openingHtmlElem = 0; + bool regexError = false; + std::string reName = "html opening element regex"; + Regex htmlOpenElementRe(" matches; + + if (sample.length() <= 30) + { + return false; + } + + openingHtmlElem = htmlOpenElementRe.findAllMatches(sample, matches); + + if (openingHtmlElem > 5) + { + return true; + } + return false; +} + +void ScoreBuilder::checkBadSourcesForLearning(double reputation, std::string& source, std::string& userAgent) +{ + if (m_fpStore.count == 0) + { + return; + } + m_fpStore.count++; + + if (reputation < 2.0) + { + if (m_fpStore.hasUaItem(userAgent)) + { + m_fpStore.uaItems.erase(userAgent); + } + if (m_fpStore.hasIpItem(source)) + { + m_fpStore.ipItems.erase(source); + } + } + + if (m_fpStore.count >= GENERATE_FALSE_POSITIVES_LIST_THRESHOLD) + { + m_fpStore.appendKeywordsSetsIntersectionToList(m_falsePositivesSetsIntersection); + m_fpStore.clear(); + } +} + +void ScoreBuilder::pumpKeywordScore(ScoreBuilderData& data, const std::string &poolName, bool doBackup) +{ + std::map::iterator poolIt = m_keywordsScorePools.find(poolName); + + if (poolIt == m_keywordsScorePools.end()) { + dbgDebug(D_WAAP_SCORE_BUILDER) << "pumpKeywordScore() is called with unknown poolName='" << poolName << + "'. Creating the pool."; + m_keywordsScorePools[poolName] = KeywordsScorePool(); + } + + poolIt = m_keywordsScorePools.find(poolName); + if (poolIt == m_keywordsScorePools.end()) { + dbgWarning(D_WAAP_SCORE_BUILDER) << "pumpKeywordScore() failed to create pool '" << poolName << "'"; + return; + } + + KeywordsScorePool &keywordsScorePool = poolIt->second; + + if (isHtmlContent(data.m_sample)) + { + dbgTrace(D_WAAP_SCORE_BUILDER) << "pumpKeywordScore: isHtmlContent -> do not process"; + return; + } + for (const std::string &keyword : data.m_keywordsMatches) { + pumpKeywordScorePerKeyword(data, keyword, KEYWORD_TYPE_KEYWORD, keywordsScorePool); + } + + for (const std::string &keyword : data.m_keywordsCombinations) { + pumpKeywordScorePerKeyword(data, keyword, KEYWORD_TYPE_COMBINATION, keywordsScorePool); + } + + if (doBackup && m_scoreTrigger >= SCORE_CALCULATION_THRESHOLD) + { + calcScore(poolName); + if (m_pWaapAssetState != NULL) + { + m_pWaapAssetState->updateScores(); + } + backupWorker(); + } +} + +void ScoreBuilder::calcScore(const std::string &poolName) +{ + std::map::iterator poolIt = m_keywordsScorePools.find(poolName); + + if (poolIt == m_keywordsScorePools.end()) { + dbgDebug(D_WAAP_SCORE_BUILDER) << "calcScore() is called with unknown poolName='" << poolName << + "'. Creating the pool."; + m_keywordsScorePools[poolName] = KeywordsScorePool(); + } + + poolIt = m_keywordsScorePools.find(poolName); + if (poolIt == m_keywordsScorePools.end()) { + dbgWarning(D_WAAP_SCORE_BUILDER) << "calcScore() failed to create pool '" << poolName << "'"; + return; + } + + KeywordsScorePool &keywordsScorePool = poolIt->second; + KeywordDataMap &keywordsDataMap = keywordsScorePool.m_keywordsDataMap; + KeywordsStats &keywordsStats = keywordsScorePool.m_stats; + + m_scoreTrigger = 0; + + for (auto fpKeyword : m_falsePositivesSetsIntersection) + { + if (keywordsDataMap.find(fpKeyword) == keywordsScorePool.m_keywordsDataMap.end()) + { + keywordsDataMap[fpKeyword]; + } + + keywordsDataMap[fpKeyword].falsePositiveCtr++; + keywordsStats.falsePositiveCtr++; + } + + m_falsePositivesSetsIntersection.clear(); + + KeywordDataMap newKeywordsDataMap; + + double tpAverageLog = log(keywordsStats.truePositiveCtr / std::max(keywordsDataMap.size(), (size_t)1) + 101); + for (auto keyword : keywordsDataMap) + { + double tpLog = log(keyword.second.truePositiveCtr + 1); + double tpScore = tpLog / (tpLog + tpAverageLog / 4 + 1); // range [0,1) + int fpAvg = 1; + keyword.second.score = 10 * tpScore * (fpAvg + 1) / (fpAvg + (keyword.second.falsePositiveCtr * 5) + 2); + + if (keyword.second.score > 1 || + keyword.second.falsePositiveCtr < 10 || + keyword.second.type == KEYWORD_TYPE_KEYWORD) + { + newKeywordsDataMap[keyword.first] = keyword.second; + } + } + keywordsDataMap = newKeywordsDataMap; +} + +void ScoreBuilder::snap() +{ + // Copy data from all mutable score pools to "snapshot" keyword->scores map + for (const std::pair &pool : m_keywordsScorePools) { + const std::string &poolName = pool.first; + const KeywordsScorePool& keywordScorePool = pool.second; + m_snapshotKwScoreMap[poolName]; + + for (const std::pair &kwData : keywordScorePool.m_keywordsDataMap) + { + const std::string &kwName = kwData.first; + double kwScore = kwData.second.score; + m_snapshotKwScoreMap[poolName][kwName] = kwScore; + } + } +} + +double ScoreBuilder::getSnapshotKeywordScore(const std::string &keyword, double defaultScore, + const std::string &poolName) const +{ + std::map::const_iterator poolIt = m_snapshotKwScoreMap.find(poolName); + if (poolIt == m_snapshotKwScoreMap.end()) { + dbgTrace(D_WAAP_SCORE_BUILDER) << "pool " << poolName << " does not exist. Getting score from base pool"; + poolIt = m_snapshotKwScoreMap.find(KEYWORDS_SCORE_POOL_BASE); + } + + if (poolIt == m_snapshotKwScoreMap.end()) { + dbgDebug(D_WAAP_SCORE_BUILDER) << + "base pool does not exist! This is probably a bug. Returning default score " << defaultScore; + return defaultScore; + } + + const KeywordScoreMap &kwScoreMap = poolIt->second; + + KeywordScoreMap::const_iterator kwScoreFound = kwScoreMap.find(keyword); + if (kwScoreFound == kwScoreMap.end()) { + dbgTrace(D_WAAP_SCORE_BUILDER) << "keywordScore:'" << keyword << "': " << defaultScore << + " (default, keyword not found in pool '" << poolName << "')"; + return defaultScore; + } + + dbgTrace(D_WAAP_SCORE_BUILDER) << "keywordScore:'" << keyword << "': " << kwScoreFound->second << " (pool '" << + poolName << "')"; + return kwScoreFound->second; +} + +keywords_set ScoreBuilder::getIpItemKeywordsSet(std::string ip) +{ + return m_fpStore.ipItems[ip]; +} + +keywords_set ScoreBuilder::getUaItemKeywordsSet(std::string userAgent) +{ + return m_fpStore.uaItems[userAgent]; +} + +unsigned int ScoreBuilder::getFpStoreCount() +{ + return m_fpStore.count; +} + +void ScoreBuilder::mergeScores(const ScoreBuilder& baseScores) +{ + for (const std::pair &pool : baseScores.m_keywordsScorePools) { + const std::string &poolName = pool.first; + if (m_keywordsScorePools.find(poolName) == m_keywordsScorePools.end()) { + m_keywordsScorePools[poolName]; + } + const KeywordsScorePool &baseKeywordsScorePool = pool.second; + m_keywordsScorePools[poolName].mergeScores(baseKeywordsScorePool); + } +} + +void ScoreBuilder::pumpKeywordScorePerKeyword(ScoreBuilderData& data, const std::string& keyword, + KeywordType keywordSource, KeywordsScorePool &keywordsScorePool) +{ + m_scoreTrigger++; + if (data.m_fpClassification == UNKNOWN_TYPE) { + dbgTrace(D_WAAP_SCORE_BUILDER) << + "pumpKeywordScorePerKeyword(): Got UNKNOWN_TYPE as false positive classifiaction " + ", will not pump keywords score"; + return; + } + + if (keywordsScorePool.m_keywordsDataMap.find(keyword) == keywordsScorePool.m_keywordsDataMap.end()) + { + keywordsScorePool.m_keywordsDataMap[keyword]; + } + KeywordData& keyData = keywordsScorePool.m_keywordsDataMap[keyword]; + keyData.type = keywordSource; + + if (data.m_fpClassification == TRUE_POSITIVE && keyData.score < 8) + { + dbgTrace(D_WAAP_SCORE_BUILDER) << + "pumpKeywordScorePerKeyword(): fpClassification = TRUE_POSITIVE for keyword: " << keyword; + keyData.truePositiveCtr++; + keywordsScorePool.m_stats.truePositiveCtr++; + } + else if (data.m_fpClassification == FALSE_POSITIVE && (keyData.score > 0.1 || keyData.truePositiveCtr < 10)) + { + dbgTrace(D_WAAP_SCORE_BUILDER) << + "pumpKeywordScorePerKeyword(): fpClassification = FALSE_POSITIVE for keyword: " << keyword; + m_fpStore.putFalsePositive(data.m_sourceIdentifier, data.m_userAgent, keyword); + } + +} + +void FalsePoisitiveStore::putFalsePositive(const std::string& ip, const std::string& userAgent, + const std::string& keyword) +{ + count = 1; + ipItems[ip].insert(keyword); + uaItems[userAgent].insert(keyword); +} + +bool FalsePoisitiveStore::hasIpItem(const std::string& ip) const +{ + return ipItems.find(ip) != ipItems.end(); +} + +bool FalsePoisitiveStore::hasUaItem(const std::string& ua) const +{ + return uaItems.find(ua) != uaItems.end(); +} + +void FalsePoisitiveStore::appendKeywordsSetsIntersectionToList(std::list& keywordsList) +{ + std::list ipKeywords; + std::unordered_set uaKeywords; + + for (auto ip : ipItems) { + for (auto keyword : ip.second) + { + ipKeywords.push_back(keyword); + } + } + + for (auto ua : uaItems) { + for (auto keyword : ua.second) + { + uaKeywords.insert(keyword); + } + } + + for (auto keyword : ipKeywords) + { + if (uaKeywords.find(keyword) != uaKeywords.end()) + { + keywordsList.push_back(keyword); + } + } +} + +void FalsePoisitiveStore::clear() +{ + count = 0; + ipItems.clear(); + uaItems.clear(); +} diff --git a/components/security_apps/waap/waap_clib/ScoreBuilder.h b/components/security_apps/waap/waap_clib/ScoreBuilder.h new file mode 100755 index 0000000..c817068 --- /dev/null +++ b/components/security_apps/waap/waap_clib/ScoreBuilder.h @@ -0,0 +1,173 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include +#include "FpMitigation.h" +#include "Waf2Util.h" +#include "picojson.h" +#include "i_serialize.h" +#include +#include +#include + +struct ScoreBuilderData { + std::string m_sourceIdentifier; + std::string m_userAgent; + std::string m_sample; + double m_relativeReputation; + PolicyCounterType m_fpClassification; + std::vector m_keywordsMatches; + std::vector m_keywordsCombinations; + + ScoreBuilderData(); + ScoreBuilderData( + const std::string &sourceIdentifier, + const std::string &userAgent, + const std::string &sample, + double relativeReputation, + PolicyCounterType type, + const std::vector &keywordsMatches, + const std::vector &keywordsCombinations); +}; +enum KeywordType { + KEYWORD_TYPE_UNKNOWN, + KEYWORD_TYPE_KEYWORD, + KEYWORD_TYPE_COMBINATION +}; + +struct KeywordData { + KeywordData() : truePositiveCtr(0), falsePositiveCtr(0), score(0.0), type(KEYWORD_TYPE_UNKNOWN) {} + + unsigned int truePositiveCtr; + unsigned int falsePositiveCtr; + double score; + KeywordType type; + + template + void serialize(Archive& ar) { + ar(cereal::make_nvp("false_positives", falsePositiveCtr), + cereal::make_nvp("true_positives", truePositiveCtr), + cereal::make_nvp("score", score), + cereal::make_nvp("type", type)); + } +}; + +struct KeywordsStats { + KeywordsStats() : truePositiveCtr(0), falsePositiveCtr(0) {} + + template + void serialize(Archive& ar) { + ar(cereal::make_nvp("false_positives", falsePositiveCtr), + cereal::make_nvp("true_positives", truePositiveCtr)); + } + + unsigned int truePositiveCtr; + unsigned int falsePositiveCtr; +}; + +typedef std::unordered_set keywords_set; + +struct FalsePoisitiveStore { + unsigned int count; + std::unordered_map ipItems; + std::unordered_map uaItems; + + FalsePoisitiveStore() : count(0), ipItems(), uaItems() {} + void putFalsePositive(const std::string& ip, const std::string& userAgent, const std::string& keyword); + bool hasIpItem(const std::string& ip) const; + bool hasUaItem(const std::string& ua) const; + void appendKeywordsSetsIntersectionToList(std::list& keywordsList); + void clear(); +}; + +class I_WaapAssetState; + +typedef std::unordered_map KeywordDataMap; + +struct KeywordsScorePool { + KeywordDataMap m_keywordsDataMap; + KeywordsStats m_stats; + + KeywordsScorePool(); + + template + KeywordsScorePool(_A &iarchive) + { + KeywordDataMap tmpKeyordsDataMap; + iarchive(cereal::make_nvp("keyword_data", tmpKeyordsDataMap), + cereal::make_nvp("keyword_stats", m_stats)); + + // Decode keys (originally urlencoded in the source file) + for (auto item : tmpKeyordsDataMap) { + std::string key = item.first; + key.erase(unquote_plus(key.begin(), key.end()), key.end()); + m_keywordsDataMap[key] = item.second; + } + } + + template + void serialize(Archive& ar) { + ar( + cereal::make_nvp("keyword_data", m_keywordsDataMap), + cereal::make_nvp("keyword_stats", m_stats) + ); + } + + void mergeScores(const KeywordsScorePool& baseScores); +}; + +class ScoreBuilder : public SerializeToFilePeriodically { +public: + ScoreBuilder(I_WaapAssetState* pWaapAssetState); + ScoreBuilder(I_WaapAssetState* pWaapAssetState, ScoreBuilder& baseScores); + ~ScoreBuilder() {} + + void analyzeFalseTruePositive(ScoreBuilderData& data, const std::string &poolName, bool doBackup=true); + + bool isHtmlContent(std::string sample); + + void checkBadSourcesForLearning(double reputation, std::string& source, std::string& userAgent); + void pumpKeywordScore(ScoreBuilderData& data, const std::string &poolName, bool doBackup=true); + void calcScore(const std::string &poolName); + + void snap(); + double getSnapshotKeywordScore(const std::string &keyword, double defaultScore, const std::string &poolName) const; + + keywords_set getIpItemKeywordsSet(std::string ip); + keywords_set getUaItemKeywordsSet(std::string userAgent); + unsigned int getFpStoreCount(); + + virtual void serialize(std::ostream& stream); + virtual void deserialize(std::istream& stream); + + void mergeScores(const ScoreBuilder& baseScores); +protected: + typedef std::map KeywordScoreMap; + + void pumpKeywordScorePerKeyword(ScoreBuilderData& data, + const std::string& keyword, + KeywordType keywordSource, + KeywordsScorePool &keywordsScorePool); + + unsigned int m_scoreTrigger; + FalsePoisitiveStore m_fpStore; + std::map m_keywordsScorePools; // live data continuously updated during traffic + std::map m_snapshotKwScoreMap; // the snapshot is updated only by a call to snap() + std::list m_falsePositivesSetsIntersection; + I_WaapAssetState* m_pWaapAssetState; +}; diff --git a/components/security_apps/waap/waap_clib/SecurityHeadersPolicy.cc b/components/security_apps/waap/waap_clib/SecurityHeadersPolicy.cc new file mode 100644 index 0000000..860ade0 --- /dev/null +++ b/components/security_apps/waap/waap_clib/SecurityHeadersPolicy.cc @@ -0,0 +1,104 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "SecurityHeadersPolicy.h" +#include "Waf2Util.h" + +namespace Waap { +namespace SecurityHeaders { + +void +Policy::StrictTransportSecurity::buildInjectStr() { + if (preload && includeSubDomains) + { + directivesStr = "max-age=" + maxAge + "; includeSubDomains; preload"; + } + else if (includeSubDomains) + { + directivesStr = "max-age=" + maxAge + "; includeSubDomains"; + } + else if (preload) + { + directivesStr = "max-age=" + maxAge + "; preload"; + } + else + { + directivesStr = "max-age=" + maxAge; + } + headerDetails = std::make_pair(headerName, directivesStr); +} + +void +Policy::XFrameOptions::buildInjectStr() { + headerDetails = std::make_pair(headerName, directivesStr); +} + +void +Policy::XContentTypeOptions::buildInjectStr() { + headerDetails = std::make_pair(headerName, directivesStr); +} + +bool +Policy::SecurityHeadersEnforcement::operator==(const Policy::SecurityHeadersEnforcement &other) const +{ + return enable == other.enable; +} + +bool +Policy::XFrameOptions::operator==(const XFrameOptions &other) const +{ + return sameOrigin == other.sameOrigin && directivesStr == other.directivesStr && + deny == other.deny && headerName == other.headerName && + headerDetails.first == other.headerDetails.first && + headerDetails.second == other.headerDetails.second; +} + +bool +Policy::XContentTypeOptions::operator==(const XContentTypeOptions &other) const +{ + return directivesStr == other.directivesStr && headerName == other.headerName && + headerDetails.first == other.headerDetails.first && headerDetails.second == other.headerDetails.second; +} + +bool +Policy::StrictTransportSecurity::operator==(const StrictTransportSecurity &other) const +{ + return maxAge == other.maxAge && directivesStr == other.directivesStr && + includeSubDomains == other.includeSubDomains && headerName == other.headerName && + preload == other.preload && headerDetails.first == other.headerDetails.first && + headerDetails.second == other.headerDetails.second; +} + +bool +Policy::Headers::operator==(const Headers &other) const +{ + return other.headersInjectStr == headersInjectStr && hsts == other.hsts && + xContentTypeOptions == other.xContentTypeOptions && xFrameOptions == other.xFrameOptions; +} + +bool +Policy::operator==(const Policy &other) const +{ + return headers == other.headers && m_securityHeaders == other.m_securityHeaders; +} + +State::State(const std::shared_ptr &policy) +{ + for(auto headerStr : policy->headers.headersInjectStr) + { + headersInjectStrs.push_back(headerStr); + } +} + +} +} diff --git a/components/security_apps/waap/waap_clib/SecurityHeadersPolicy.h b/components/security_apps/waap/waap_clib/SecurityHeadersPolicy.h new file mode 100644 index 0000000..2782e2b --- /dev/null +++ b/components/security_apps/waap/waap_clib/SecurityHeadersPolicy.h @@ -0,0 +1,225 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include +#include +#include +#include "debug.h" + +USE_DEBUG_FLAG(D_WAAP); + +namespace Waap { +namespace SecurityHeaders { +struct Policy { + struct StrictTransportSecurity { + void setDefaults() + { + maxAge = "31536000"; + includeSubDomains = true; + preload = false; + buildInjectStr(); + } + + template + void serialize(_A &ar) { + ar(cereal::make_nvp("maxAge", maxAge)); + ar(cereal::make_nvp("includeSubDomains", includeSubDomains)); + ar(cereal::make_nvp("preload", preload)); + buildInjectStr(); + } + + void buildInjectStr(); + bool operator==(const StrictTransportSecurity &other) const; + + const std::string headerName = "Strict-Transport-Security"; + std::string maxAge; + bool includeSubDomains; + bool preload; + std::string directivesStr; + // string that define exactly how the header should be inject after collecting all data. + std::pair headerDetails; + }; + + struct XFrameOptions { + + void setDefaults() + { + directivesStr = sameOrigin; + buildInjectStr(); + } + + template + void serialize(_A &ar) { + + std::string value; + ar(cereal::make_nvp("directive", value)); + if(boost::iequals(value, "sameOrigin")) + { + directivesStr = sameOrigin; + } + else if(boost::iequals(value, "deny")) + { + directivesStr = deny; + } + else + { + throw cereal::Exception( + "Invalid value for SecurityHeaders::Policy::XFrameOptions::directive='" + value + "'"); + } + + buildInjectStr(); + } + + void buildInjectStr(); + bool operator==(const XFrameOptions &other) const; + + const std::string sameOrigin = "SAMEORIGIN"; + const std::string deny = "DENY"; + const std::string headerName = "X-Frame-Options"; + std::string directivesStr; + // string that define exactly how the header should be inject after collecting all data. + std::pair headerDetails; + }; + + struct XContentTypeOptions + { + void setDefaults() + { + directivesStr = nosniff; + buildInjectStr(); + } + + template + void serialize(_A &ar) { + + std::string value; + ar(cereal::make_nvp("directive", value)); + if(boost::iequals(value, "nosniff")) + { + directivesStr = nosniff; + } + else + { + throw cereal::Exception( + "Invalid value for SecurityHeaders::Policy::XContentTypeOptions::directive='" + value + "'"); + } + + buildInjectStr(); + } + + void buildInjectStr(); + bool operator==(const XContentTypeOptions &other) const; + const std::string headerName = "X-Content-Type-Options"; + const std::string nosniff = "nosniff"; + std::string directivesStr; + // string that define exactly how the header should be inject after collecting all data. + std::pair headerDetails; + }; + + struct Headers { + + template + void serialize(_A &ar) { + try + { + ar(cereal::make_nvp("strictTransportSecurity", hsts)); + headersInjectStr.push_back( + std::make_pair(hsts.headerDetails.first, hsts.headerDetails.second)); + } + catch (std::runtime_error& e) + { + dbgTrace(D_WAAP) << "Strict-Transport-Security header is not configured. Loading defaults."; + hsts.setDefaults(); + headersInjectStr.push_back( + std::make_pair(hsts.headerDetails.first, hsts.headerDetails.second)); + } + try + { + ar(cereal::make_nvp("xFrameOptions", xFrameOptions)); + headersInjectStr.push_back( + std::make_pair(xFrameOptions.headerDetails.first, xFrameOptions.headerDetails.second)); + } + catch (std::runtime_error& e) + { + dbgTrace(D_WAAP) << "X-Frame-Options header is not configured. Loading defaults."; + xFrameOptions.setDefaults(); + headersInjectStr.push_back( + std::make_pair(xFrameOptions.headerDetails.first, xFrameOptions.headerDetails.second)); + } + try + { + ar(cereal::make_nvp("xContentTypeOptions", xContentTypeOptions)); + headersInjectStr.push_back( + std::make_pair(xContentTypeOptions.headerDetails.first, xContentTypeOptions.headerDetails.second)); + } + catch (std::runtime_error& e) + { + dbgTrace(D_WAAP) << "X Content Type Options header is not configured. Loading defaults."; + xContentTypeOptions.setDefaults(); + headersInjectStr.push_back( + std::make_pair(xContentTypeOptions.headerDetails.first, xContentTypeOptions.headerDetails.second)); + } + } + + bool operator==(const Headers &other) const; + // will contain all strings that should be injected as headers. + std::vector> headersInjectStr; + StrictTransportSecurity hsts; + XFrameOptions xFrameOptions; + XContentTypeOptions xContentTypeOptions; + }; + + class SecurityHeadersEnforcement + { + public: + template + SecurityHeadersEnforcement(_A &ar) + : + enable(false) + { + std::string level; + ar(cereal::make_nvp("securityHeadersEnforcement", level)); + level = boost::algorithm::to_lower_copy(level); + if (level == "prevent") { + enable = true; + } + } + + bool operator==(const Policy::SecurityHeadersEnforcement &other) const; + + bool enable; + }; + + Headers headers; + SecurityHeadersEnforcement m_securityHeaders; + + bool operator==(const Policy &other) const; + + template + Policy(_A& ar) : m_securityHeaders(ar) { + ar(cereal::make_nvp("securityHeaders", headers)); + } + +}; +class State { + public: + const std::shared_ptr policy; + State(const std::shared_ptr &policy); + std::vector> headersInjectStrs; +}; + +} +} diff --git a/components/security_apps/waap/waap_clib/Serializator.cc b/components/security_apps/waap/waap_clib/Serializator.cc new file mode 100755 index 0000000..71cf80a --- /dev/null +++ b/components/security_apps/waap/waap_clib/Serializator.cc @@ -0,0 +1,850 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "i_serialize.h" +#include "waap.h" +#include "Waf2Util.h" +#include "WaapAssetState.h" +#include "i_instance_awareness.h" +#include +#include +#include +#include "debug.h" +#include +#include +#include +#include "SyncLearningNotification.h" +#include "report_messaging.h" +#include "compression_utils.h" +#include "config.h" + +USE_DEBUG_FLAG(D_WAAP_CONFIDENCE_CALCULATOR); + +namespace ch = std::chrono; +using namespace std; +typedef ch::duration> days; + +// Define interval between successful sync times +static const ch::minutes assetSyncTimeSliceLength(10); +static const int remoteSyncMaxPollingAttempts = 10; +static const string defaultLearningHost = "appsec-learning-svc"; +static const string defaultSharedStorageHost = "appsec-shared-storage-svc"; + +#define SHARED_STORAGE_HOST_ENV_NAME "SHARED_STORAGE_HOST" +#define LEARNING_HOST_ENV_NAME "LEARNING_HOST" + +static bool +isGZipped(const std::string &stream) +{ + if (stream.size() < 2) return false; + auto unsinged_stream = reinterpret_cast(stream.data()); + return unsinged_stream[0] == 0x1f && unsinged_stream[1] == 0x8b; +} + +bool RestGetFile::loadJson(const std::string& json) +{ + std::string json_str = json; + if (isGZipped(json_str) == 0) + { + return ClientRest::loadJson(json_str); + } + auto compression_stream = initCompressionStream(); + DecompressionResult res = decompressData( + compression_stream, + json_str.size(), + reinterpret_cast(json_str.c_str())); + + if (res.ok){ + json_str = std::string((const char *)res.output, res.num_output_bytes); + if (res.output) free(res.output); + res.output = nullptr; + res.num_output_bytes = 0; + } + + finiCompressionStream(compression_stream); + return ClientRest::loadJson(json_str); +} + +Maybe RestGetFile::genJson() const +{ + Maybe json = ClientRest::genJson(); + if (json.ok()) + { + std::string data = json.unpack(); + auto compression_stream = initCompressionStream(); + CompressionResult res = compressData( + compression_stream, + CompressionType::GZIP, + data.size(), + reinterpret_cast(data.c_str()), + true); + finiCompressionStream(compression_stream); + if (!res.ok) { + dbgWarning(D_WAAP_CONFIDENCE_CALCULATOR) << "Failed to gzip data"; + return genError("Failed to compress data"); + } + data = std::string((const char *)res.output, res.num_output_bytes); + json = data; + + if (res.output) free(res.output); + res.output = nullptr; + res.num_output_bytes = 0; + } + return json; +} +SerializeToFilePeriodically::SerializeToFilePeriodically(std::chrono::seconds pollingIntervals, std::string filePath) : + SerializeToFileBase(filePath), + m_lastSerialization(0), + m_interval(pollingIntervals) +{ + I_TimeGet* timer = Singleton::Consume::by(); + + if (timer != NULL) + { + m_lastSerialization = timer->getMonotonicTime(); + } +} + +SerializeToFilePeriodically::~SerializeToFilePeriodically() +{ + +} + +void SerializeToFilePeriodically::backupWorker() +{ + I_TimeGet* timer = Singleton::Consume::by(); + auto currentTime = timer->getMonotonicTime(); + + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "backup worker: current time: " << currentTime.count(); + + if (currentTime - m_lastSerialization >= m_interval) + { + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "backup worker: backing up data"; + m_lastSerialization = currentTime; + // save data + saveData(); + + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "backup worker: data is backed up"; + } +} + +void SerializeToFilePeriodically::setInterval(std::chrono::seconds newInterval) +{ + if (m_interval != newInterval) + { + m_interval = newInterval; + I_TimeGet* timer = Singleton::Consume::by(); + m_lastSerialization = timer->getMonotonicTime(); + } +} + +SerializeToFileBase::SerializeToFileBase(std::string fileName) : m_filePath(fileName) +{ + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "SerializeToFileBase::SerializeToFileBase() fname='" << m_filePath + << "'"; +} + +SerializeToFileBase::~SerializeToFileBase() +{ + +} + +void SerializeToFileBase::saveData() +{ + std::fstream filestream; + + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "saving to file: " << m_filePath; + filestream.open(m_filePath, std::fstream::out); + + std::stringstream ss; + + if (filestream.is_open() == false) { + dbgWarning(D_WAAP_CONFIDENCE_CALCULATOR) << "failed to open file: " << m_filePath << " Error: " + << strerror(errno); + return; + } + + serialize(ss); + filestream << ss.str(); + filestream.close(); +} + +void SerializeToFileBase::loadFromFile(std::string filePath) +{ + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "loadFromFile() file: " << filePath; + std::fstream filestream; + + filestream.open(filePath, std::fstream::in); + + if (filestream.is_open() == false) { + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "failed to open file: " << filePath << " Error: " << + strerror(errno); + if (!Singleton::exists() || errno != ENOENT) + { + return; + } + // if we fail to open a file because it doesn't exist and instance awareness is present + // try to strip the unique ID from the path and load the file from the parent directory + // that might exist in previous run where instance awareness didn't exits. + I_InstanceAwareness* instanceAwareness = Singleton::Consume::by(); + Maybe id = instanceAwareness->getUniqueID(); + if (!id.ok()) + { + return; + } + std::string idStr = "/" + id.unpack() + "/"; + size_t idPosition = filePath.find(idStr); + if (idPosition != std::string::npos) + { + filePath.erase(idPosition, idStr.length() - 1); + dbgDebug(D_WAAP_CONFIDENCE_CALCULATOR) << "retry to load file from : " << filePath; + loadFromFile(filePath); + } + return; + } + + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "loading from file: " << filePath; + + int length; + filestream.seekg(0, std::ios::end); // go to the end + length = filestream.tellg(); // report location (this is the length) + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "file length: " << length; + assert(length >= 0); // length -1 really happens if filePath is a directory (!) + char* buffer = new char[length]; // allocate memory for a buffer of appropriate dimension + filestream.seekg(0, std::ios::beg); // go back to the beginning + if (!filestream.read(buffer, length)) // read the whole file into the buffer + { + filestream.close(); + delete[] buffer; + dbgWarning(D_WAAP_CONFIDENCE_CALCULATOR) << "Failed to read file, file: " << filePath; + return; + } + filestream.close(); + + std::string dataObfuscated(buffer, length); + + delete[] buffer; + + std::stringstream ss(dataObfuscated); + + try + { + deserialize(ss); + } + catch (std::runtime_error & e) { + dbgWarning(D_WAAP_CONFIDENCE_CALCULATOR) << "failed to deserialize file: " << m_filePath << ", error: " << + e.what(); + } +} + +void SerializeToFileBase::restore() +{ + loadFromFile(m_filePath); +} + +void SerializeToFileBase::setFilePath(const std::string& new_file_path) +{ + m_filePath = new_file_path; +} + + +RemoteFilesList::RemoteFilesList() : files(), filesPathsList() +{ + +} + +// parses xml instead of json +// extracts a file list in +bool RemoteFilesList::loadJson(const std::string& xml) +{ + xmlDocPtr doc; // the resulting document tree + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "XML input: " << xml; + doc = xmlParseMemory(xml.c_str(), xml.length()); + + if (doc == NULL) { + dbgWarning(D_WAAP_CONFIDENCE_CALCULATOR) << "Failed to parse " << xml; + return false; + } + + xmlNodePtr node = doc->children; + if (node->children == NULL) + { + return false; + } + node = node->children; + + xmlChar *contents_name = xmlCharStrdup("Contents"); + xmlChar *key_name = xmlCharStrdup("Key"); + xmlChar *last_modified_name = xmlCharStrdup("LastModified"); + + // allows to get reference to the internal member and modify it + files.setActive(true); + while (node != NULL) + { + if (xmlStrEqual(contents_name, node->name) == 1) + { + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "Found the Contents element"; + xmlNodePtr contents_node = node->children; + std::string file; + std::string lastModified; + while (contents_node != NULL) + { + if (xmlStrEqual(key_name, contents_node->name) == 1) + { + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "Found the Key element"; + xmlChar* xml_file = xmlNodeGetContent(contents_node); + file = std::string(reinterpret_cast(xml_file)); + xmlFree(xml_file); + } + if (xmlStrEqual(last_modified_name, contents_node->name) == 1) + { + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "Found the LastModified element"; + xmlChar* xml_file = xmlNodeGetContent(contents_node); + lastModified = std::string(reinterpret_cast(xml_file)); + xmlFree(xml_file); + } + if (!file.empty() && !lastModified.empty()) + { + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "Adding the file: " << file << + " last modified: " << lastModified; + break; + } + contents_node = contents_node->next; + } + files.get().push_back(FileMetaData{ file, lastModified }); + filesPathsList.push_back(file); + } + node = node->next; + } + + // free up memory + xmlFree(last_modified_name); + xmlFree(contents_name); + xmlFree(key_name); + xmlFreeDoc(doc); + return true; +} + +const std::vector& RemoteFilesList::getFilesList() const +{ + return filesPathsList; +} + +const std::vector& RemoteFilesList::getFilesMetadataList() const +{ + return files.get(); +} + + +SerializeToLocalAndRemoteSyncBase::SerializeToLocalAndRemoteSyncBase( + std::chrono::minutes interval, + std::chrono::seconds waitForSync, + const std::string& filePath, + const std::string& remotePath, + const std::string& assetId, + const std::string& owner) + : + SerializeToFileBase(filePath), + m_remotePath(remotePath), + m_interval(0), + m_owner(owner), + m_pMainLoop(nullptr), + m_waitForSync(waitForSync), + m_workerRoutineId(0), + m_daysCount(0), + m_windowsCount(0), + m_intervalsCounter(0), + m_remoteSyncEnabled(true), + m_assetId(assetId), + m_shared_storage_host(genError("not set")), + m_learning_host(genError("not set")) +{ + dbgInfo(D_WAAP_CONFIDENCE_CALCULATOR) << "Create SerializeToLocalAndRemoteSyncBase. assetId='" << assetId << + "', owner='" << m_owner << "'"; + + if (Singleton::exists() && + Singleton::Consume::by()->getOrchestrationMode() == + OrchestrationMode::HYBRID) { + char* sharedStorageHost = getenv(SHARED_STORAGE_HOST_ENV_NAME); + if (sharedStorageHost != NULL) { + m_shared_storage_host = string(sharedStorageHost); + } else { + dbgWarning(D_WAAP_CONFIDENCE_CALCULATOR) << + "shared storage host name(" << + SHARED_STORAGE_HOST_ENV_NAME << + ") is not set"; + } + char* learningHost = getenv(LEARNING_HOST_ENV_NAME); + if (learningHost != NULL) { + m_learning_host = string(learningHost); + } else { + dbgWarning(D_WAAP_CONFIDENCE_CALCULATOR) << + "learning host name(" << + SHARED_STORAGE_HOST_ENV_NAME << + ") is not set"; + } + } + if (remotePath != "") { + // remote path is /// + auto parts = split(remotePath, '/'); + if (parts.size() > 2) { + size_t offset = 0; + if (parts[0].empty()) { + offset = 1; + } + std::string type = ""; + for (size_t i = offset + 2; i < parts.size(); i++) + { + type += type.empty() ? parts[i] : "/" + parts[i]; + } + m_type = type; + } + } + m_pMainLoop = Singleton::Consume::by(); + setInterval(interval); +} + +bool SerializeToLocalAndRemoteSyncBase::isBase() +{ + return m_remotePath == ""; +} + +std::string SerializeToLocalAndRemoteSyncBase::getUri() +{ + static const string hybridModeUri = "/api"; + static const string onlineModeUri = "/storage/waap"; + if (Singleton::exists() && + Singleton::Consume::by()->getOrchestrationMode() == + OrchestrationMode::HYBRID) return hybridModeUri; + return onlineModeUri; +} + +size_t SerializeToLocalAndRemoteSyncBase::getIntervalsCount() +{ + return m_intervalsCounter; +} + +SerializeToLocalAndRemoteSyncBase::~SerializeToLocalAndRemoteSyncBase() +{ + +} + +std::string SerializeToLocalAndRemoteSyncBase::getWindowId() +{ + return "window_" + std::to_string(m_daysCount) + "_" + std::to_string(m_windowsCount); +} + +std::string SerializeToLocalAndRemoteSyncBase::getPostDataUrl() +{ + std::string agentId = Singleton::Consume::by()->getAgentId(); + if (Singleton::exists()) + { + I_InstanceAwareness* instance = Singleton::Consume::by(); + Maybe uniqueId = instance->getUniqueID(); + if (uniqueId.ok()) + { + agentId += "/" + uniqueId.unpack(); + } + } + std::string windowId = getWindowId(); + return getUri() + "/" + m_remotePath + "/" + windowId + "/" + agentId + "/data.data"; +} +void SerializeToLocalAndRemoteSyncBase::setRemoteSyncEnabled(bool enabled) +{ + m_remoteSyncEnabled = enabled; +} + +void SerializeToLocalAndRemoteSyncBase::setInterval(std::chrono::seconds newInterval) +{ + dbgDebug(D_WAAP_CONFIDENCE_CALCULATOR) << "setInterval: from " << m_interval.count() << " to " << + newInterval.count() << " seconds. assetId='" << m_assetId << "', owner='" << m_owner << "'"; + + if (newInterval == m_interval) + { + return; + } + + m_interval = newInterval; + + if (m_workerRoutineId != 0) + { + return; + } + I_MainLoop::Routine syncRoutineOnLoad = [this]() { + I_TimeGet* timer = Singleton::Consume::by(); + ch::microseconds timeBeforeSyncWorker = timer->getWalltime(); + ch::microseconds timeAfterSyncWorker = timeBeforeSyncWorker; + while (true) + { + m_daysCount = ch::duration_cast(timeBeforeSyncWorker).count(); + + ch::microseconds timeSinceMidnight = timeBeforeSyncWorker - ch::duration_cast(timeBeforeSyncWorker); + m_windowsCount = timeSinceMidnight / m_interval; + + // Distribute syncWorker tasks for different assets spread over assetSyncTimeSliceLengthintervals + // It is guaranteed that for the same asset, sync events will start at the same time on all + // http_transaction_host instances. + size_t slicesCount = m_interval / assetSyncTimeSliceLength; + size_t sliceIndex = 0; + if (slicesCount != 0 && m_assetId != "") { + sliceIndex = std::hash{}(m_assetId) % slicesCount; + } + ch::seconds sliceOffset = assetSyncTimeSliceLength * sliceIndex; + + ch::microseconds remainingTime = m_interval - (timeAfterSyncWorker - timeBeforeSyncWorker) - + timeBeforeSyncWorker % m_interval + sliceOffset; + + if (remainingTime > m_interval) { + // on load between trigger and offset remaining time is larger than the interval itself + remainingTime -= m_interval; + dbgDebug(D_WAAP_CONFIDENCE_CALCULATOR) << "adjusting remaining time: " << remainingTime.count(); + if (timeBeforeSyncWorker.count() != 0) + { + auto updateTime = timeBeforeSyncWorker - m_interval; + m_daysCount = ch::duration_cast(updateTime).count(); + + ch::microseconds timeSinceMidnight = updateTime - ch::duration_cast(updateTime); + m_windowsCount = timeSinceMidnight / m_interval; + } + } + + if (remainingTime < ch::seconds(0)) { + // syncWorker execution time was so large the remaining time became negative + remainingTime = ch::seconds(0); + dbgError(D_WAAP_CONFIDENCE_CALCULATOR) << "syncWorker execution time (owner='" << m_owner << + "', assetId='" << m_assetId << "') is " << + ch::duration_cast(timeAfterSyncWorker - timeBeforeSyncWorker).count() << + " seconds, too long to cause negative remainingTime. Waiting 0 seconds..."; + } + + dbgDebug(D_WAAP_CONFIDENCE_CALCULATOR) << "current time: " << timeBeforeSyncWorker.count() << " \u00b5s" << + ": assetId='" << m_assetId << "'" << + ", owner='" << m_owner << "'" << + ", daysCount=" << m_daysCount << + ", windowsCount=" << m_windowsCount << + ", interval=" << m_interval.count() << " seconds" + ", seconds till next window=" << ch::duration_cast(remainingTime - sliceOffset).count() << + ", sliceOffset=" << sliceOffset.count() << " seconds" << + ", hashIndex=" << sliceIndex << + ": next wakeup in " << ch::duration_cast(remainingTime).count() << " seconds"; + m_pMainLoop->yield(remainingTime); + + timeBeforeSyncWorker = timer->getWalltime(); + syncWorker(); + timeAfterSyncWorker = timer->getWalltime(); + } + }; + m_workerRoutineId = m_pMainLoop->addOneTimeRoutine( + I_MainLoop::RoutineType::System, + syncRoutineOnLoad, + "Sync worker learning on load" + ); +} + +bool SerializeToLocalAndRemoteSyncBase::localSyncAndProcess() +{ + RemoteFilesList rawDataFiles; + + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "Getting files of all agents"; + + bool isSuccessful = sendObjectWithRetry(rawDataFiles, + I_Messaging::Method::GET, + getUri() + "/?list-type=2&prefix=" + m_remotePath + "/" + getWindowId() + "/"); + + if (!isSuccessful) + { + dbgError(D_WAAP_CONFIDENCE_CALCULATOR) << "Failed to get the list of files"; + return false; + } + + pullData(rawDataFiles.getFilesList()); + processData(); + saveData(); + postProcessedData(); + return true; +} + +std::chrono::seconds SerializeToLocalAndRemoteSyncBase::getIntervalDuration() const +{ + return m_interval; +} + +void SerializeToLocalAndRemoteSyncBase::updateStateFromRemoteService() +{ + for (int i = 0; i < remoteSyncMaxPollingAttempts; i++) + { + m_pMainLoop->yield(std::chrono::seconds(60)); + RemoteFilesList remoteFiles = getRemoteProcessedFilesList(); + if (remoteFiles.getFilesMetadataList().empty()) + { + dbgWarning(D_WAAP_CONFIDENCE_CALCULATOR) << "no files generated by the remote service were found"; + continue; + } + std::string lastModified = remoteFiles.getFilesMetadataList().begin()->modified; + if (lastModified != m_lastProcessedModified) + { + m_lastProcessedModified = lastModified; + updateState(remoteFiles.getFilesList()); + dbgInfo(D_WAAP_CONFIDENCE_CALCULATOR) << "Owner: " << m_owner << + ". updated state generated by remote at " << m_lastProcessedModified; + return; + } + } + dbgWarning(D_WAAP_CONFIDENCE_CALCULATOR) << "polling for update state timeout. for assetId='" + << m_assetId << "', owner='" << m_owner; + localSyncAndProcess(); +} + +void SerializeToLocalAndRemoteSyncBase::syncWorker() +{ + dbgInfo(D_WAAP_CONFIDENCE_CALCULATOR) << "Running the sync worker for assetId='" << m_assetId << "', owner='" << + m_owner << "'" << " last modified state: " << m_lastProcessedModified; + m_intervalsCounter++; + OrchestrationMode mode = Singleton::exists() ? + Singleton::Consume::by()->getOrchestrationMode() : OrchestrationMode::ONLINE; + + if (!m_remoteSyncEnabled || isBase() || !postData() || + mode == OrchestrationMode::OFFLINE) + { + dbgDebug(D_WAAP_CONFIDENCE_CALCULATOR) + << "Did not synchronize the data. Remote URL: " + << m_remotePath + << " is enabled: " + << std::to_string(m_remoteSyncEnabled); + processData(); + saveData(); + return; + } + + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "Waiting for all agents to post their data"; + m_pMainLoop->yield(m_waitForSync); + // check if learning service is operational + if (m_lastProcessedModified == "") + { + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "check if remote service is operational"; + RemoteFilesList remoteFiles = getRemoteProcessedFilesList(); + if (!remoteFiles.getFilesMetadataList().empty()) + { + m_lastProcessedModified = remoteFiles.getFilesMetadataList()[0].modified; + dbgInfo(D_WAAP_CONFIDENCE_CALCULATOR) << "First sync by remote service: " << m_lastProcessedModified; + } + } + + // check if learning service is enabled + bool isRemoteServiceEnabled = getProfileAgentSettingWithDefault( + true, + "appsecLearningSettings.remoteServiceEnabled"); + + dbgDebug(D_WAAP_CONFIDENCE_CALCULATOR) << "using remote service: " << isRemoteServiceEnabled; + if ((m_lastProcessedModified == "" || !isRemoteServiceEnabled) && !localSyncAndProcess()) + { + dbgWarning(D_WAAP_CONFIDENCE_CALCULATOR) << "local sync and process failed"; + return; + } + + if (mode == OrchestrationMode::HYBRID) { + dbgDebug(D_WAAP_CONFIDENCE_CALCULATOR) << "detected running in standalone mode"; + I_AgentDetails *agentDetails = Singleton::Consume::by(); + I_Messaging *messaging = Singleton::Consume::by(); + + SyncLearningObject syncObj(m_assetId, m_type, getWindowId()); + + Flags conn_flags; + conn_flags.setFlag(MessageConnConfig::EXTERNAL); + std::string tenant_header = "X-Tenant-Id: " + agentDetails->getTenantId(); + bool ok = messaging->sendNoReplyObject(syncObj, + I_Messaging::Method::POST, + getLearningHost(), + 80, + conn_flags, + "/api/sync", + tenant_header); + dbgDebug(D_WAAP_CONFIDENCE_CALCULATOR) << "sent learning sync notification ok: " << ok; + if (!ok) { + dbgWarning(D_WAAP_CONFIDENCE_CALCULATOR) << "failed to send learning notification"; + } + } else { + SyncLearningNotificationObject syncNotification(m_assetId, m_type, getWindowId()); + + dbgDebug(D_WAAP_CONFIDENCE_CALCULATOR) << "sending sync notification: " << syncNotification; + + ReportMessaging( + "sync notification for '" + m_assetId + "'", + ReportIS::AudienceTeam::WAAP, + syncNotification, + false, + MessageTypeTag::WAAP_LEARNING, + ReportIS::Tags::WAF, + ReportIS::Notification::SYNC_LEARNING + ); + } + + if (m_lastProcessedModified != "" && isRemoteServiceEnabled) + { + updateStateFromRemoteService(); + } +} + +void SerializeToLocalAndRemoteSyncBase::restore() +{ + SerializeToFileBase::restore(); + if (!isBase()) + { + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "merge state from remote service"; + mergeProcessedFromRemote(); + } +} + +RemoteFilesList SerializeToLocalAndRemoteSyncBase::getRemoteProcessedFilesList() +{ + RemoteFilesList remoteFiles; + bool isRemoteServiceEnabled = getProfileAgentSettingWithDefault( + true, + "appsecLearningSettings.remoteServiceEnabled"); + + if (!isRemoteServiceEnabled) + { + dbgDebug(D_WAAP_CONFIDENCE_CALCULATOR) << "remote service is disabled"; + return remoteFiles; + } + + bool isSuccessful = sendObject( + remoteFiles, + I_Messaging::Method::GET, + getUri() + "/?list-type=2&prefix=" + m_remotePath + "/remote"); + + if (!isSuccessful) + { + dbgWarning(D_WAAP_CONFIDENCE_CALCULATOR) << "Failed to get the list of files"; + } + return remoteFiles; +} + + +RemoteFilesList SerializeToLocalAndRemoteSyncBase::getProcessedFilesList() +{ + RemoteFilesList processedFilesList = getRemoteProcessedFilesList(); + + if (!processedFilesList.getFilesList().empty()) + { + const std::vector& filesMD = processedFilesList.getFilesMetadataList(); + if (filesMD.size() > 1) { + dbgWarning(D_WAAP_CONFIDENCE_CALCULATOR) << "got more than 1 expected processed file"; + } + if (!filesMD.empty()) { + m_lastProcessedModified = filesMD[0].modified; + } + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "found " << filesMD.size() << " remote service state files. " + "last modified: " << m_lastProcessedModified; + + return processedFilesList; + } + + + bool isSuccessful = sendObject( + processedFilesList, + I_Messaging::Method::GET, + getUri() + "/?list-type=2&prefix=" + m_remotePath + "/processed"); + + if (!isSuccessful) + { + dbgDebug(D_WAAP_CONFIDENCE_CALCULATOR) << "Failed to get the list of files"; + } + else if (!processedFilesList.getFilesList().empty()) + { + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "found state files"; + return processedFilesList; + } + // backward compatibility - try to get backup file with the buggy prefix tenantID/assetID/instanceID/ + std::string bcRemotePath = m_remotePath; + size_t pos = bcRemotePath.find('/'); + pos = bcRemotePath.find('/', pos + 1); + if (!Singleton::exists()) + { + dbgDebug(D_WAAP_CONFIDENCE_CALCULATOR) << "missing instance of instance awareness," + " can't check backward compatibility"; + return processedFilesList; + } + I_InstanceAwareness* instanceAwareness = Singleton::Consume::by(); + Maybe id = instanceAwareness->getUniqueID(); + if (!id.ok()) + { + dbgDebug(D_WAAP_CONFIDENCE_CALCULATOR) << "failed to get instance id err: " << id.getErr() << + ". can't check backward compatibility"; + return processedFilesList; + } + std::string idStr = id.unpack(); + bcRemotePath.insert(pos + 1, idStr + "/"); + dbgDebug(D_WAAP_CONFIDENCE_CALCULATOR) << "List of files is empty - trying to get the file from " << + bcRemotePath; + + isSuccessful = sendObject( + processedFilesList, + I_Messaging::Method::GET, + getUri() + "/?list-type=2&prefix=" + bcRemotePath + "/processed"); + + if (!isSuccessful) + { + dbgWarning(D_WAAP_CONFIDENCE_CALCULATOR) << "Failed to get the list of files"; + } + dbgDebug(D_WAAP_CONFIDENCE_CALCULATOR) << "backwards computability: got " + << processedFilesList.getFilesList().size() << " state files"; + return processedFilesList; +} + +void SerializeToLocalAndRemoteSyncBase::mergeProcessedFromRemote() +{ + dbgDebug(D_WAAP_CONFIDENCE_CALCULATOR) << "Merging processed data from remote. assetId='" << m_assetId << + "', owner='" << m_owner << "'"; + m_pMainLoop->addOneTimeRoutine( + I_MainLoop::RoutineType::Offline, + [&]() + { + RemoteFilesList processedFiles = getProcessedFilesList(); + pullProcessedData(processedFiles.getFilesList()); + }, + "Merge processed data from remote for asset Id: " + m_assetId + ", owner:" + m_owner + ); +} + +string +SerializeToLocalAndRemoteSyncBase::getLearningHost() +{ + if (m_learning_host.ok()) { + return *m_learning_host; + } else { + char* learningHost = getenv(LEARNING_HOST_ENV_NAME); + if (learningHost != NULL) { + m_learning_host = string(learningHost); + return learningHost; + } + dbgWarning(D_WAAP_CONFIDENCE_CALCULATOR) << "learning host is not set. using default"; + } + return defaultLearningHost; +} + +string +SerializeToLocalAndRemoteSyncBase::getSharedStorageHost() +{ + if (m_shared_storage_host.ok()) { + return *m_shared_storage_host; + } else { + char* sharedStorageHost = getenv(SHARED_STORAGE_HOST_ENV_NAME); + if (sharedStorageHost != NULL) { + m_shared_storage_host = string(sharedStorageHost); + return sharedStorageHost; + } + dbgWarning(D_WAAP_CONFIDENCE_CALCULATOR) << "shared storage host is not set. using default"; + } + return defaultSharedStorageHost; +} diff --git a/components/security_apps/waap/waap_clib/Signatures.cc b/components/security_apps/waap/waap_clib/Signatures.cc new file mode 100755 index 0000000..751efbb --- /dev/null +++ b/components/security_apps/waap/waap_clib/Signatures.cc @@ -0,0 +1,278 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "Signatures.h" +#include "i_encryptor.h" +#include "waap.h" +#include + +USE_DEBUG_FLAG(D_WAAP); + +typedef picojson::value::object JsObj; +typedef picojson::value JsVal; +typedef picojson::value::array JsArr; +typedef std::map> filtered_parameters_t; + + +static std::vector to_strvec(const picojson::value::array& jsV) +{ + std::vector r; + + for (auto it = jsV.begin(); it != jsV.end(); ++it) { + r.push_back(it->get()); + } + + return r; +} + +static std::set to_strset(const picojson::value::array& jsA) +{ + std::set r; + + for (auto it = jsA.begin(); it != jsA.end(); ++it) { + r.insert(it->get()); + } + + return r; +} + +static std::map to_regexmap(const picojson::value::object& jsO, bool& error) +{ + std::map r; + + for (auto it = jsO.begin(); it != jsO.end(); ++it) { + const std::string& n = it->first; + // convert name to lowercase now (so we don't need to do it at runtime every time). + std::string n_lower; + for (std::string::const_iterator pCh = n.begin(); pCh != n.end(); ++pCh) { + n_lower += std::tolower(*pCh); + } + const picojson::value& v = it->second; + + if (error) { + // stop loading regexes if there's previous error... + break; + } + + // Pointers to Regex instances are stored instead of instances themselves to avoid + // the need to make the Regex objects copyable. + // However, these pointers must be freed by the holder of the returned map! + // note: in our case this freeing is happening in the destructor of the WaapAssetState class. + r[n] = new Regex(v.get(), error, n_lower); + } + + return r; +} + +static filtered_parameters_t to_filtermap(const picojson::value::object& JsObj) +{ + filtered_parameters_t result; + for (auto it = JsObj.begin(); it != JsObj.end(); ++it) + { + const std::string parameter = it->first; + const picojson::value::array& arr = it->second.get(); + result[parameter] = to_strvec(arr); + } + return result; +} + +std::string genDelimitedKeyValPattern(const std::string& delim) +{ + std::string pattern = "^([^" + delim + "]+?=[^" + delim + "]+?" + delim + ")+" + "([^" + delim + "]+?=[^" + delim + "]+?)" + delim + "?$"; + return pattern; +} + +Signatures::Signatures(const std::string& filepath) : + sigsSource(loadSource(filepath)), + error(false), + m_regexPreconditions(std::make_shared(sigsSource, error)), + words_regex( + to_strvec(sigsSource["words_regex_list"].get()), + error, + "words_regex_list", + m_regexPreconditions + ), + specific_acuracy_keywords_regex( + to_strvec(sigsSource["specific_acuracy_keywords_regex_list"].get()), + error, + "specific_acuracy_keywords_regex_list", + m_regexPreconditions + ), + pattern_regex( + to_strvec(sigsSource["pattern_regex_list"].get()), + error, + "pattern_regex_list", + m_regexPreconditions + ), + un_escape_pattern(sigsSource["un_escape_pattern"].get(), error, "un_escape_pattern"), + quotes_ev_pattern(sigsSource["quotes_ev_pattern"].get(), error, "quotes_ev_pattern"), + comment_ev_pattern(sigsSource["comment_ev_pattern"].get(), error, "comment_ev_pattern"), + quotes_space_ev_pattern( + sigsSource["quotes_space_ev_fast_reg"].get(), error, + "quotes_space_ev_fast_reg" + ), + allowed_text_re(sigsSource["allowed_text_re"].get(), error, "allowed_text_re"), + pipe_split_re( + "([\\w\\=\\-\\_\\.\\,\\(\\)\\[\\]\\/\\%\\s]+?)\\||([\\w\\=\\-\\_\\.\\,\\(\\)\\[\\]\\/\\%\\s]+)|\\|()", + error, + "pipe_decode"), + semicolon_split_re("([\\w\\=\\-\\_\\.\\,\\(\\)\\%]+?);|([\\w\\=\\-\\_\\.\\,\\(\\)\\%]+)|;()", error, "sem_decode"), + longtext_re(sigsSource["longtext_re"].get(), error, "longtext_re"), + nospaces_long_value_re("^[^\\s]{16,}$", error, "nospaces_long_value_re"), + good_header_name_re(sigsSource["good_header_name_re"].get(), error, "good_header_name"), + good_header_value_re(sigsSource["good_header_value_re"].get(), error, "good_header_value"), + ignored_for_nospace_long_value( + to_strset(sigsSource["ignored_for_nospace_long_value"].get())), + global_ignored_keywords( + to_strset( + sigsSource["global_ignored"].get()["keys"].get() + ) + ), + global_ignored_patterns( + to_strset( + sigsSource["global_ignored"].get()["patterns"].get() + ) + ), + url_ignored_keywords( + to_strset( + sigsSource["ignored_for_url"].get()["keys"].get() + ) + ), + url_ignored_patterns( + to_strset( + sigsSource["ignored_for_url"].get()["patterns"].get() + ) + ), + url_ignored_re( + sigsSource["ignored_for_url"].get()["regex"].get(), + error, + "url_ignored" + ), + header_ignored_keywords( + to_strset( + sigsSource["ignored_for_headers"].get()["keys"].get() + ) + ), + header_ignored_patterns( + to_strset( + sigsSource["ignored_for_headers"].get() + ["patterns"].get() + ) + ), + header_ignored_re( + sigsSource["ignored_for_headers"].get()["regex"].get(), + error, + "header_ignored" + ), + filter_parameters( + to_filtermap( + sigsSource["filter_parameters"].get() + ) + ), + m_attack_types( + to_filtermap( + sigsSource["attack_types_map"].get() + ) + ), + // Removed by Pavel's request. Leaving here in case he'll want to add this back... +#if 0 + cookie_ignored_keywords( + to_strset( + sigsSource["ignored_for_cookies"].get()["keys"].get() + ) + ), + cookie_ignored_patterns( + to_strset( + sigsSource["ignored_for_cookies"].get() + ["patterns"].get() + ) + ), + cookie_ignored_re( + sigsSource["ignored_for_cookies"].get()["regex"].get(), + error, + "cookie_ignored" + ), +#endif + php_serialize_identifier("^(N;)|^([ibdsOoCcRra]:\\d+)", error, "php_serialize_identifier"), + html_regex("(<(?>body|head)\\b.*>(?>.|[\\r\\n]){0,400}){2}|.+\\|)+.+}"), + pipes_delimited_key_val_re(genDelimitedKeyValPattern("\\|")), + semicolon_delimited_key_val_re(genDelimitedKeyValPattern(";")), + asterisk_delimited_key_val_re(genDelimitedKeyValPattern("\\*")), + comma_delimited_key_val_re(genDelimitedKeyValPattern(",")), + ampersand_delimited_key_val_re(genDelimitedKeyValPattern("&")), + headers_re(to_regexmap(sigsSource["headers_re"].get(), error)), + format_magic_binary_re(sigsSource["format_magic_binary_re"].get(), error, "format_magic_binary_re"), + params_type_re(to_regexmap(sigsSource["format_types_regex_list"].get(), error)), + resp_hdr_pattern_regex_list(to_strvec(sigsSource["resp_hdr_pattern_regex_list"].get()), + error, "resp_hdr_pattern_regex_list", nullptr), + resp_hdr_words_regex_list(to_strvec(sigsSource["resp_hdr_words_regex_list"].get()), + error, "resp_hdr_words_regex_list", nullptr), + resp_body_pattern_regex_list(to_strvec(sigsSource["resp_body_pattern_regex_list"].get()), + error, "resp_body_pattern_regex_list", nullptr), + resp_body_words_regex_list(to_strvec(sigsSource["resp_body_words_regex_list"].get()), + error, "resp_body_words_regex_list", nullptr), + remove_keywords_always( + to_strset(sigsSource["remove_keywords_always"].get())), + user_agent_prefix_re(sigsSource["user_agent_prefix_re"].get()), + binary_data_kw_filter(sigsSource["binary_data_kw_filter"].get()), + wbxml_data_kw_filter(sigsSource["wbxml_data_kw_filter"].get()) +{ + +} + +Signatures::~Signatures() +{ +} + +bool Signatures::fail() +{ + return error; +} + +picojson::value::object Signatures::loadSource(const std::string& sigsFname) +{ + picojson::value doc; + std::ifstream f(sigsFname.c_str()); + + if (f.fail()) { + dbgError(D_WAAP) << "Failed to open json data file '" << sigsFname << "'!"; + error = true; // flag an error + return picojson::value::object(); + } + + int length; + f.seekg(0, std::ios::end); // go to the end + length = f.tellg(); // report location (this is the length) + char* buffer = new char[length]; // allocate memory for a buffer of appropriate dimension + f.seekg(0, std::ios::beg); // go back to the beginning + f.read(buffer, length); // read the whole file into the buffer + f.close(); + + std::string dataObfuscated(buffer, length); + + delete[] buffer; + std::stringstream ss(dataObfuscated); + ss >> doc; + + if (!picojson::get_last_error().empty()) { + dbgError(D_WAAP) << "WaapAssetState::loadSource('" << sigsFname << "') failed (parse error: '" << + picojson::get_last_error() << "')."; + error = true; // flag an error + return picojson::value::object(); + } + + return doc.get(); +} diff --git a/components/security_apps/waap/waap_clib/Signatures.h b/components/security_apps/waap/waap_clib/Signatures.h new file mode 100755 index 0000000..302e7ce --- /dev/null +++ b/components/security_apps/waap/waap_clib/Signatures.h @@ -0,0 +1,93 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __SIGNATURES_H__ +#define __SIGNATURES_H__ + +#include "Waf2Regex.h" +#include "picojson.h" +#include + +class Signatures { +private: + // json parsed sources (not really needed once data is loaded) + picojson::value::object sigsSource; + bool error; +public: + Signatures(const std::string& filepath); + ~Signatures(); + + bool fail(); + + std::shared_ptr m_regexPreconditions; + + // Regexes loaded from compiled signatures + const Regex words_regex; + const Regex specific_acuracy_keywords_regex; + const Regex pattern_regex; + const Regex un_escape_pattern; + const Regex quotes_ev_pattern; + const Regex comment_ev_pattern; + const Regex quotes_space_ev_pattern; + const Regex allowed_text_re; + const Regex pipe_split_re; + const Regex semicolon_split_re; + const Regex longtext_re; + const Regex nospaces_long_value_re; + const Regex good_header_name_re; + const Regex good_header_value_re; + const std::set ignored_for_nospace_long_value; + const std::set global_ignored_keywords; + const std::set global_ignored_patterns; + const std::set url_ignored_keywords; + const std::set url_ignored_patterns; + const Regex url_ignored_re; + const std::set header_ignored_keywords; + const std::set header_ignored_patterns; + const Regex header_ignored_re; + const std::map> filter_parameters; + const std::map> m_attack_types; + const Regex php_serialize_identifier; + const Regex html_regex; + const Regex uri_parser_regex; + const boost::regex confluence_macro_re; + const boost::regex pipes_delimited_key_val_re; + const boost::regex semicolon_delimited_key_val_re; + const boost::regex asterisk_delimited_key_val_re; + const boost::regex comma_delimited_key_val_re; + const boost::regex ampersand_delimited_key_val_re; +#if 0 // Removed by Pavel's request. Leaving here in case he'll want to add this back... + const std::set cookie_ignored_keywords; + const std::set cookie_ignored_patterns; + const Regex cookie_ignored_re; +#endif + std::map headers_re; + const Regex format_magic_binary_re; + std::map params_type_re; + + // Signatures for responses + const Regex resp_hdr_pattern_regex_list; + const Regex resp_hdr_words_regex_list; + const Regex resp_body_pattern_regex_list; + const Regex resp_body_words_regex_list; + + const std::set remove_keywords_always; + const boost::regex user_agent_prefix_re; + const boost::regex binary_data_kw_filter; + const boost::regex wbxml_data_kw_filter; + +private: + picojson::value::object loadSource(const std::string& sigsFname); +}; + +#endif diff --git a/components/security_apps/waap/waap_clib/SingleDecision.cc b/components/security_apps/waap/waap_clib/SingleDecision.cc new file mode 100755 index 0000000..b7a38d1 --- /dev/null +++ b/components/security_apps/waap/waap_clib/SingleDecision.cc @@ -0,0 +1,53 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "SingleDecision.h" +#include "debug.h" + +USE_DEBUG_FLAG(D_WAAP); + +SingleDecision::SingleDecision(DecisionType type): + m_type(type), + m_log(false), + m_block(false) +{} + +SingleDecision::~SingleDecision() +{} + +DecisionType SingleDecision::getType() const +{ + return m_type; +} + +bool SingleDecision::shouldLog() const +{ + return m_log; +} + +bool SingleDecision::shouldBlock() const +{ + return m_block; +} + +void SingleDecision::setLog(bool log) +{ + dbgTrace(D_WAAP) << "Decision " << getTypeStr() << " changes should log from " << m_log << " to " << log; + m_log = log; +} + +void SingleDecision::setBlock(bool block) +{ + dbgTrace(D_WAAP) << "Decision " << getTypeStr() << " changes should block from " << m_block << " to " << block; + m_block = block; +} diff --git a/components/security_apps/waap/waap_clib/SingleDecision.h b/components/security_apps/waap/waap_clib/SingleDecision.h new file mode 100755 index 0000000..da39f33 --- /dev/null +++ b/components/security_apps/waap/waap_clib/SingleDecision.h @@ -0,0 +1,39 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __SINGLE_DECISION_H__ +#define __SINGLE_DECISION_H__ + +#include "DecisionType.h" +#include + +class SingleDecision +{ +public: + explicit SingleDecision(DecisionType type); + virtual ~SingleDecision(); + + void setLog(bool log); + void setBlock(bool block); + DecisionType getType() const; + bool shouldLog() const; + bool shouldBlock() const; + virtual std::string getTypeStr() const = 0; + +protected: + DecisionType m_type; + bool m_log; + bool m_block; +}; + +#endif diff --git a/components/security_apps/waap/waap_clib/SyncLearningNotification.cc b/components/security_apps/waap/waap_clib/SyncLearningNotification.cc new file mode 100755 index 0000000..6f794f6 --- /dev/null +++ b/components/security_apps/waap/waap_clib/SyncLearningNotification.cc @@ -0,0 +1,58 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "SyncLearningNotification.h" + +SyncLearningNotificationObject::SyncLearningNotificationObject(const std::string& asset_id, + const std::string& type, + const std::string& window_id) : + m_asset_id(asset_id), + m_type(type), + m_window_id(window_id) +{ + +} + +SyncLearningNotificationObject::~SyncLearningNotificationObject() +{ + +} + +void SyncLearningNotificationObject::serialize(cereal::JSONOutputArchive& ar) const +{ + ar.setNextName("notificationConsumerData"); + ar.startNode(); + ar.setNextName("syncLearnNotificationConsumers"); + ar.startNode(); + ar(cereal::make_nvp("assetId", m_asset_id)); + ar(cereal::make_nvp("type", m_type)); + ar(cereal::make_nvp("windowId", m_window_id)); + ar.finishNode(); + ar.finishNode(); +} + +std::string SyncLearningNotificationObject::toString() const +{ + std::stringstream ss; + { + cereal::JSONOutputArchive ar(ss); + serialize(ar); + } + + return ss.str(); +} + +std::ostream& operator<<(std::ostream& os, const SyncLearningNotificationObject& obj) +{ + return os << obj.toString(); +} diff --git a/components/security_apps/waap/waap_clib/SyncLearningNotification.h b/components/security_apps/waap/waap_clib/SyncLearningNotification.h new file mode 100755 index 0000000..2dd92b1 --- /dev/null +++ b/components/security_apps/waap/waap_clib/SyncLearningNotification.h @@ -0,0 +1,59 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __SYNC_LEARNING_NOTIFICATION_OBJECT_H__ +#define __SYNC_LEARNING_NOTIFICATION_OBJECT_H__ + +#include +#include +#include "cereal/archives/json.hpp" +#include "report/report.h" +#include "rest.h" + +class SyncLearningNotificationObject +{ +public: + explicit SyncLearningNotificationObject( + const std::string& asset_id, + const std::string& type, + const std::string& window_id + ); + ~SyncLearningNotificationObject(); + void serialize(cereal::JSONOutputArchive& ar) const; + + friend std::ostream& operator<<(std::ostream& os, const SyncLearningNotificationObject& obj); + +private: + std::string toString() const; + + std::string m_asset_id; + std::string m_type; + std::string m_window_id; +}; + +class SyncLearningObject : public ClientRest +{ +public: + SyncLearningObject( + const std::string& _asset_id, + const std::string& _type, + const std::string& _window_id + ) : assetId(_asset_id), type(_type), windowId(_window_id) {} + +private: + C2S_PARAM(std::string, assetId); + C2S_PARAM(std::string, type); + C2S_PARAM(std::string, windowId); +}; + +#endif diff --git a/components/security_apps/waap/waap_clib/Telemetry.cc b/components/security_apps/waap/waap_clib/Telemetry.cc new file mode 100755 index 0000000..2c344d6 --- /dev/null +++ b/components/security_apps/waap/waap_clib/Telemetry.cc @@ -0,0 +1,304 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "telemetry.h" +#include "waap.h" +#include "report/report.h" +#include "log_generator.h" +#include "generic_rulebase/triggers_config.h" +#include "config.h" +#include "maybe_res.h" +#include "LogGenWrapper.h" +#include + +USE_DEBUG_FLAG(D_WAAP); + +#define LOGGING_INTERVAL_IN_MINUTES 10 + +using namespace std; + +void +WaapTelemetrics::initMetrics() +{ + requests.report(0); + sources.report(0); + threat_info.report(0); + threat_low.report(0); + threat_medium.report(0); + threat_high.report(0); + api_blocked.report(0); + bot_blocked.report(0); + waf_blocked.report(0); + force_and_block_exceptions.report(0); +} +void +WaapTelemetrics::updateMetrics(const string &asset_id, const DecisionTelemetryData &data) +{ + initMetrics(); + requests.report(1); + if (sources_seen.find(data.source) == sources_seen.end()) { + if (sources.getCounter() == 0) sources_seen.clear(); + sources_seen.insert(data.source); + sources.report(1); + } + + if (data.blockType == WAF_BLOCK || data.blockType == NOT_BLOCKING) + { + switch (data.threat) + { + case NO_THREAT: { + break; + } + case THREAT_INFO: { + threat_info.report(1); + break; + } + case LOW_THREAT: { + threat_low.report(1); + break; + } + case MEDIUM_THREAT: { + threat_medium.report(1); + break; + } + case HIGH_THREAT: { + threat_high.report(1); + break; + } + default: { + dbgWarning(D_WAAP) << "Unexpected Enum value: " << data.threat; + break; + } + } + } + + switch (data.blockType) + { + case API_BLOCK: { + api_blocked.report(1); + break; + } + case BOT_BLOCK: { + bot_blocked.report(1); + break; + } + case WAF_BLOCK: { + waf_blocked.report(1); + break; + } + case FORCE_BLOCK: + case FORCE_EXCEPTION: { + force_and_block_exceptions.report(1); + break; + } + case NOT_BLOCKING: { + break; + } + default: { + dbgWarning(D_WAAP) << "Unexpected Enum value: " << data.blockType; + break; + } + } +} + +void +WaapAttackTypesMetrics::initMetrics() +{ + sql_inj.report(0); + vulnerability_scan.report(0); + path_traversal.report(0); + ldap_inj.report(0); + evasion_techs.report(0); + remote_code_exec.report(0); + xml_extern_entity.report(0); + cross_site_scripting.report(0); + general.report(0); +} + +void +WaapAttackTypesMetrics::updateMetrics(const string &asset_id, const DecisionTelemetryData &data) +{ + if (data.blockType == FORCE_EXCEPTION) { + dbgInfo(D_WAAP) << "Data block type is FORCE_EXCEPTION, no update needed"; + return; + } + + if (!data.attackTypes.empty()) initMetrics(); + + for (const auto &attackType : data.attackTypes) { + if (attackType == "SQL Injection") sql_inj.report(1); + if (attackType == "Vulnerability Scanning") vulnerability_scan.report(1); + if (attackType == "Path Traversal") path_traversal.report(1); + if (attackType == "LDAP Injection") ldap_inj.report(1); + if (attackType == "Evasion Techniques") evasion_techs.report(1); + if (attackType == "Remote Code Execution") remote_code_exec.report(1); + if (attackType == "XML External Entity") xml_extern_entity.report(1); + if (attackType == "Cross Site Scripting") cross_site_scripting.report(1); + if (attackType == "General") general.report(1); + } +} + +void +WaapMetricWrapper::upon(const WaapTelemetryEvent &event) +{ + const string &asset_id = event.getAssetId(); + const DecisionTelemetryData &data = event.getData(); + + dbgTrace(D_WAAP) + << "Log the decision for telemetry. Asset ID: " + << asset_id + << ", Practice ID: " + << data.practiceId + << ", Source: " + << data.source + << ", Block type: " + << data.blockType + << ", Threat level: " + << data.threat; + + if (!telemetries.count(asset_id)) { + telemetries.emplace(asset_id, make_shared()); + telemetries[asset_id]->init( + "WAAP telemetry", + ReportIS::AudienceTeam::WAAP, + ReportIS::IssuingEngine::AGENT_CORE, + chrono::minutes(10), + true, + ReportIS::Audience::SECURITY + ); + + telemetries[asset_id]->registerContext( + "pracitceType", + string("Threat Prevention"), + EnvKeyAttr::LogSection::SOURCE + ); + telemetries[asset_id]->registerContext( + "practiceSubType", + string("Web Application"), + EnvKeyAttr::LogSection::SOURCE + ); + telemetries[asset_id]->registerContext("assetId", asset_id, EnvKeyAttr::LogSection::SOURCE); + telemetries[asset_id]->registerContext("assetName", data.assetName, EnvKeyAttr::LogSection::SOURCE); + telemetries[asset_id]->registerContext("practiceId", data.practiceId, EnvKeyAttr::LogSection::SOURCE); + telemetries[asset_id]->registerContext( + "practiceName", + data.practiceName, + EnvKeyAttr::LogSection::SOURCE + ); + + telemetries[asset_id]->registerListener(); + } + if (!attack_types_telemetries.count(asset_id)) { + attack_types_telemetries.emplace(asset_id, make_shared()); + attack_types_telemetries[asset_id]->init( + "WAAP attack type telemetry", + ReportIS::AudienceTeam::WAAP, + ReportIS::IssuingEngine::AGENT_CORE, + chrono::minutes(10), + true, + ReportIS::Audience::SECURITY + ); + + attack_types_telemetries[asset_id]->registerContext( + "pracitceType", + string("Threat Prevention"), + EnvKeyAttr::LogSection::SOURCE + ); + attack_types_telemetries[asset_id]->registerContext( + "practiceSubType", + string("Web Application"), + EnvKeyAttr::LogSection::SOURCE + ); + attack_types_telemetries[asset_id]->registerContext( + "assetId", + asset_id, + EnvKeyAttr::LogSection::SOURCE + ); + attack_types_telemetries[asset_id]->registerContext( + "assetName", + data.assetName, + EnvKeyAttr::LogSection::SOURCE + ); + attack_types_telemetries[asset_id]->registerContext( + "practiceId", + data.practiceId, + EnvKeyAttr::LogSection::SOURCE + ); + attack_types_telemetries[asset_id]->registerContext( + "practiceName", + data.practiceName, + EnvKeyAttr::LogSection::SOURCE + ); + + attack_types_telemetries[asset_id]->registerListener(); + } + + telemetries[asset_id]->updateMetrics(asset_id, data); + attack_types_telemetries[asset_id]->updateMetrics(asset_id, data); + + auto agent_mode = Singleton::Consume::by()->getOrchestrationMode(); + string tenant_id = Singleton::Consume::by()->getTenantId(); + if (agent_mode == OrchestrationMode::HYBRID || tenant_id.rfind("org_", 0) == 0) { + if (!metrics.count(asset_id)) { + metrics.emplace(asset_id, make_shared()); + metrics[asset_id]->init( + "Waap Metrics", + ReportIS::AudienceTeam::WAAP, + ReportIS::IssuingEngine::AGENT_CORE, + chrono::minutes(10), + true, + ReportIS::Audience::INTERNAL + ); + metrics[asset_id]->registerListener(); + } + if (!attack_types.count(asset_id)) { + attack_types.emplace(asset_id, make_shared()); + attack_types[asset_id]->init( + "WAAP Attack Type Metrics", + ReportIS::AudienceTeam::WAAP, + ReportIS::IssuingEngine::AGENT_CORE, + chrono::minutes(10), + true, + ReportIS::Audience::INTERNAL + ); + attack_types[asset_id]->registerListener(); + } + + metrics[asset_id]->updateMetrics(asset_id, data); + attack_types[asset_id]->updateMetrics(asset_id, data); + } +} + +void +AssetsMetric::upon(const AssetCountEvent &event) +{ + int assets_count = event.getAssetCount(); + + switch (event.getAssetType()) { + case AssetType::API: { + api_assets.report(assets_count); + break; + } + case AssetType::WEB: { + web_assets.report(assets_count); + break; + } + case AssetType::ALL: { + all_assets.report(assets_count); + break; + } + default: { + dbgWarning(D_WAAP) << "Invalid Asset Type was reported"; + } + } +} diff --git a/components/security_apps/waap/waap_clib/TrustedSources.cc b/components/security_apps/waap/waap_clib/TrustedSources.cc new file mode 100755 index 0000000..aa11c07 --- /dev/null +++ b/components/security_apps/waap/waap_clib/TrustedSources.cc @@ -0,0 +1,217 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "TrustedSources.h" +#include "Waf2Util.h" +#include "CidrMatch.h" +#include "agent_core_utilities.h" + +using namespace Waap::TrustedSources; + +TrustedSourcesParameter::TrustedSourcesParameter() : m_identifiers() +{ + +} + +bool TrustedSourcesParameter::isSourceTrusted(std::string source, TrustedSourceType srcType) +{ + if (m_identifiers.empty()) + { + return false; + } + + if (source.empty()) + { + return false; + } + switch (srcType) + { + case SOURCE_IP: + case X_FORWARDED_FOR: + return m_identifiers[0].isCidrMatch(source, srcType); + case COOKIE_OAUTH2_PROXY: + return m_identifiers[0].isRegexMatch(source, COOKIE_OAUTH2_PROXY); + case SM_USER: + return m_identifiers[0].isRegexMatch(source, SM_USER); + case UNKNOWN: + break; + default: + break; + } + return false; +} + +size_t TrustedSourcesParameter::getNumOfSources() +{ + if (m_identifiers.empty()) + { + return (size_t)(-1); + } + return m_identifiers[0].getNumOfSources(); +} + +std::set Waap::TrustedSources::TrustedSourcesParameter::getTrustedTypes() +{ + if (m_identifiers.empty()) + { + return std::set(); + } + return m_identifiers[0].getTrustedTypes(); +} + + +bool SourcesIdentifers::isCidrMatch(const std::string &source, const TrustedSourceType &trustedSourceType) const +{ + auto found = m_identifiersMap.find(trustedSourceType); + if (found == m_identifiersMap.end()) + { + return false; + } + const std::vector& cidrs = found->second; + for (auto cidr : cidrs) + { + if (Waap::Util::cidrMatch(source, cidr)) + { + dbgTrace(D_WAAP) << "source: " << source << " is trusted for type: " << trustedSourceType << + ", cidr: " << cidr; + return true; + } + } + return false; +} + +bool SourcesIdentifers::isRegexMatch(const std::string &source, const TrustedSourceType& type) const +{ + auto found = m_identifiersMap.find(type); + if (found == m_identifiersMap.end()) + { + return false; + } + const std::vector& regexes = found->second; + for (auto regex : regexes) + { + boost::regex expr{ regex }; + boost::smatch matches; + if (NGEN::Regex::regexSearch(__FILE__, __LINE__, source, matches, expr)) + { + dbgTrace(D_WAAP) << "source: " << source << " is trusted for type: " << type << + ", expr: " << regex; + return true; + } + } + return false; +} + +size_t SourcesIdentifers::getNumOfSources() const +{ + return m_minSources; +} + +const std::set& SourcesIdentifers::getTrustedTypes() +{ + return m_trustedTypes; +} + + +bool SourcesIdentifers::operator!=(const SourcesIdentifers& other) const +{ + if (m_identifiersMap.size() != other.m_identifiersMap.size()) + { + return true; + } + if (m_minSources != other.m_minSources) + { + return true; + } + + for (auto identifier : m_identifiersMap) + { + if (other.m_identifiersMap.find(identifier.first) == other.m_identifiersMap.end()) + { + return true; + } + TrustedSourceType currType = identifier.first; + const std::vector& values = identifier.second; + std::vector otherValues = other.m_identifiersMap.at(currType); + if (values.size() != otherValues.size()) + { + return true; + } + for (size_t i = 0; i < values.size(); i++) + { + if (values[i] != otherValues[i]) + { + return true; + } + } + } + + return false; +} + + +Identifer::Identifer() : identitySource(UNKNOWN), value() +{ +} + +TrustedSourceType Identifer::convertSourceIdentifierToEnum(std::string identifierType) +{ + static const std::string SourceIp = "Source IP"; + static const std::string cookie = "Cookie:_oauth2_proxy"; + static const std::string smUser = "Header:sm_user"; + static const std::string forwrded = "X-Forwarded-For"; + if (memcaseinsensitivecmp(identifierType.c_str(), identifierType.size(), SourceIp.c_str(), SourceIp.size())) + { + return SOURCE_IP; + } + if (memcaseinsensitivecmp(identifierType.c_str(), identifierType.size(), cookie.c_str(), cookie.size())) + { + return COOKIE_OAUTH2_PROXY; + } + if (memcaseinsensitivecmp(identifierType.c_str(), identifierType.size(), forwrded.c_str(), forwrded.size())) + { + return X_FORWARDED_FOR; + } + if (memcaseinsensitivecmp(identifierType.c_str(), identifierType.size(), smUser.c_str(), smUser.size())) + { + return SM_USER; + } + dbgTrace(D_WAAP) << identifierType << " is not a recognized identifier type"; + return UNKNOWN; +} + +bool TrustedSourcesParameter::operator==(const TrustedSourcesParameter &other) const +{ + return !(*this != other); +} + +bool TrustedSourcesParameter::operator!=(const TrustedSourcesParameter& other) const +{ + if (m_identifiers.size() != other.m_identifiers.size()) + { + return true; + } + + for (size_t i = 0; i < m_identifiers.size(); i++) + { + if (m_identifiers[i] != other.m_identifiers[i]) + { + return true; + } + } + + return false; +} diff --git a/components/security_apps/waap/waap_clib/TrustedSources.h b/components/security_apps/waap/waap_clib/TrustedSources.h new file mode 100755 index 0000000..20f4c5e --- /dev/null +++ b/components/security_apps/waap/waap_clib/TrustedSources.h @@ -0,0 +1,111 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include +#include +#include "debug.h" + +USE_DEBUG_FLAG(D_WAAP); + +// used to load trusted sources policy +namespace Waap { + namespace TrustedSources { + + enum TrustedSourceType { + UNKNOWN, + SOURCE_IP, + X_FORWARDED_FOR, + COOKIE_OAUTH2_PROXY, + SM_USER + }; + + class Identifer + { + public: + Identifer(); + + template + void serialize(_A& ar) { + std::string temp; + ar(cereal::make_nvp("sourceIdentifier", temp), + cereal::make_nvp("value", value)); + identitySource = convertSourceIdentifierToEnum(temp); + if (identitySource == UNKNOWN) + { + dbgDebug(D_WAAP) << "loaded " << temp << " from policy is not a recognized source identifier"; + } + } + + static TrustedSourceType convertSourceIdentifierToEnum(std::string identifierType); + + TrustedSourceType identitySource; + std::string value; + }; + + class SourcesIdentifers + { + public: + template + void serialize(_A& ar) { + std::vector identifiers; + ar(cereal::make_nvp("sourcesIdentifiers", identifiers), + cereal::make_nvp("numOfSources", m_minSources)); + for (auto identifier : identifiers) + { + if (identifier.identitySource != UNKNOWN) + { + m_identifiersMap[identifier.identitySource].push_back(identifier.value); + m_trustedTypes.insert(identifier.identitySource); + } + } + } + + bool isCidrMatch(const std::string &source, const TrustedSourceType &type) const; + bool isRegexMatch(const std::string &source, const TrustedSourceType& type) const; + size_t getNumOfSources() const; + const std::set& getTrustedTypes(); + + inline bool operator!=(const SourcesIdentifers& other) const; + private: + std::map> m_identifiersMap; + std::set m_trustedTypes; + size_t m_minSources; + }; + + class TrustedSourcesParameter + { + public: + template + TrustedSourcesParameter(_A& ar) { + ar(cereal::make_nvp("trustedSources", m_identifiers)); + } + + TrustedSourcesParameter(); + + template + void serialize(Archive& ar) { + ar(cereal::make_nvp("trustedSources", m_identifiers)); + } + bool isSourceTrusted(std::string source, TrustedSourceType srcType); + size_t getNumOfSources(); + std::set getTrustedTypes(); + bool operator==(const TrustedSourcesParameter &other) const; + bool operator!=(const TrustedSourcesParameter& other) const; + private: + std::vector m_identifiers; + }; + } +} diff --git a/components/security_apps/waap/waap_clib/TrustedSourcesConfidence.cc b/components/security_apps/waap/waap_clib/TrustedSourcesConfidence.cc new file mode 100755 index 0000000..b59c8a2 --- /dev/null +++ b/components/security_apps/waap/waap_clib/TrustedSourcesConfidence.cc @@ -0,0 +1,269 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "TrustedSourcesConfidence.h" +#include "i_messaging.h" +#include "waap.h" +#include "Waf2Util.h" + +USE_DEBUG_FLAG(D_WAAP_CONFIDENCE_CALCULATOR); +#define SYNC_WAIT_TIME std::chrono::seconds(300) // 5 minutes in seconds + +TrustedSourcesConfidenceCalculator::TrustedSourcesConfidenceCalculator( + std::string path, + const std::string& remotePath, + const std::string& assetId) + : + SerializeToLocalAndRemoteSyncBase(std::chrono::minutes(120), + SYNC_WAIT_TIME, + path, + (remotePath == "") ? remotePath : remotePath + "/Trust", + assetId, + "TrustedSourcesConfidenceCalculator") +{ + restore(); +} + +bool TrustedSourcesConfidenceCalculator::is_confident(Key key, Val value, size_t minSources) const +{ + auto sourceCtrItr = m_logger.find(key); + if (sourceCtrItr != m_logger.end()) + { + auto sourceSetItr = sourceCtrItr->second.find(value); + if (sourceSetItr != sourceCtrItr->second.end()) + { + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "The number of trusted sources for " << key + << " : " << value << " is " << sourceSetItr->second.size(); + return sourceSetItr->second.size() >= minSources; + } + else + { + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "Failed to find the value(" << value << ")"; + } + } + else + { + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "Failed to find the key(" << key << ")"; + } + return false; +} + + +class GetTrustedFile : public RestGetFile +{ +public: + GetTrustedFile() + { + } + + Maybe + getTrustedLogs() const + { + if (!logger.get().empty()) return logger.get(); + return genError("failed to get file"); + } + +private: + S2C_PARAM(TrustedSourcesConfidenceCalculator::KeyValSourceLogger, logger) +}; + +class TrsutedSourcesLogger : public RestGetFile +{ +public: + TrsutedSourcesLogger(const TrustedSourcesConfidenceCalculator::KeyValSourceLogger& _logger) + : logger(_logger) + { + + } +private: + C2S_PARAM(TrustedSourcesConfidenceCalculator::KeyValSourceLogger, logger); +}; + +bool TrustedSourcesConfidenceCalculator::postData() +{ + std::string url = getPostDataUrl(); + + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "Sending the data to: " << url; + + TrsutedSourcesLogger logger(m_logger); + return sendNoReplyObjectWithRetry(logger, + I_Messaging::Method::PUT, + url); +} + +void TrustedSourcesConfidenceCalculator::pullData(const std::vector& files) +{ + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "Fetching the window data for trusted sources"; + std::string url = getPostDataUrl(); + std::string sentFile = url.erase(0, url.find_first_of('/') + 1); + for (auto file : files) + { + if (file == sentFile) + { + continue; + } + GetTrustedFile getTrustFile; + bool res = sendObjectWithRetry(getTrustFile, + I_Messaging::Method::GET, + getUri() + "/" + file); + if (res && getTrustFile.getTrustedLogs().ok()) + { + mergeFromRemote(getTrustFile.getTrustedLogs().unpack()); + } + } +} + +void TrustedSourcesConfidenceCalculator::processData() +{ + +} + +void TrustedSourcesConfidenceCalculator::updateState(const std::vector& files) +{ + m_logger.clear(); + pullProcessedData(files); +} + +void TrustedSourcesConfidenceCalculator::pullProcessedData(const std::vector& files) +{ + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "Fetching the logger object for trusted sources"; + for (auto file : files) + { + GetTrustedFile getTrustFile; + bool res = sendObjectWithRetry(getTrustFile, + I_Messaging::Method::GET, + getUri() + "/" + file); + if (res && getTrustFile.getTrustedLogs().ok()) + { + mergeFromRemote(getTrustFile.getTrustedLogs().unpack()); + } + } +} + +void TrustedSourcesConfidenceCalculator::postProcessedData() +{ + std::string url = getUri() + "/" + m_remotePath + "/processed/data.data"; + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "Sending the processed data to: " << url; + + TrsutedSourcesLogger logger(m_logger); + sendNoReplyObjectWithRetry(logger, + I_Messaging::Method::PUT, + url); +} + +TrustedSourcesConfidenceCalculator::ValuesSet TrustedSourcesConfidenceCalculator::getConfidenceValues( + const Key& key, + size_t minSources) const +{ + ValuesSet values; + auto sourceCtrItr = m_logger.find(key); + if (sourceCtrItr != m_logger.end()) + { + for (auto sourceSetItr : sourceCtrItr->second) + { + if (sourceSetItr.second.size() >= minSources) + { + values.insert(sourceSetItr.first); + } + } + } + else + { + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "Failed to find the key(" << key << ")"; + } + return values; +} + +void TrustedSourcesConfidenceCalculator::serialize(std::ostream& stream) +{ + cereal::JSONOutputArchive archive(stream); + + archive(cereal::make_nvp("version", 2), cereal::make_nvp("logger", m_logger)); +} + +void TrustedSourcesConfidenceCalculator::deserialize(std::istream& stream) +{ + cereal::JSONInputArchive archive(stream); + size_t version = 0; + + try + { + archive(cereal::make_nvp("version", version)); + } + catch (std::runtime_error & e) { + archive.setNextName(nullptr); + version = 0; + dbgDebug(D_WAAP) << "Can't load file version: " << e.what(); + } + + switch (version) + { + case 2: + { + archive(cereal::make_nvp("logger", m_logger)); + break; + } + case 1: + { + KeyValSourceLogger logger; + archive(cereal::make_nvp("logger", logger)); + for (auto& log : logger) + { + m_logger[normalize_param(log.first)] = log.second; + } + break; + } + case 0: + { + archive(cereal::make_nvp("m_logger", m_logger)); + break; + } + default: + dbgError(D_WAAP) << "unknown file format version: " << version; + break; + } +} + +void TrustedSourcesConfidenceCalculator::mergeFromRemote(const KeyValSourceLogger& logs) +{ + for (auto& srcCounterItr : logs) + { + for (auto& sourcesItr : srcCounterItr.second) + { + for (auto& src : sourcesItr.second) + { + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) << "Registering the source: " << src + << " for the value: " << sourcesItr.first << " and the key: " << srcCounterItr.first; + m_logger[normalize_param(srcCounterItr.first)][sourcesItr.first].insert(src); + } + } + } +} + +void TrustedSourcesConfidenceCalculator::log(Key key, Val value, Source source) +{ + dbgTrace(D_WAAP_CONFIDENCE_CALCULATOR) + << "Logging the value: " + << value + << " for the key: " + << key + << " from the source: " + << source; + m_logger[key][value].insert(source); + saveData(); +} + +void TrustedSourcesConfidenceCalculator::reset() +{ + m_logger.clear(); +} diff --git a/components/security_apps/waap/waap_clib/TrustedSourcesConfidence.h b/components/security_apps/waap/waap_clib/TrustedSourcesConfidence.h new file mode 100755 index 0000000..db31bb8 --- /dev/null +++ b/components/security_apps/waap/waap_clib/TrustedSourcesConfidence.h @@ -0,0 +1,57 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "i_serialize.h" +#include +#include +#include + +USE_DEBUG_FLAG(D_WAAP); + +// this class is responsible for logging trusted sources indicators matches (without validation) +class TrustedSourcesConfidenceCalculator : public SerializeToLocalAndRemoteSyncBase +{ +public: + typedef std::string Key; + typedef std::string Val; + typedef std::string Source; + typedef std::set ValuesSet; + typedef std::unordered_set SourcesSet; + typedef std::unordered_map SourcesCounter; + typedef std::unordered_map KeyValSourceLogger; + + TrustedSourcesConfidenceCalculator(std::string path, const std::string& remotePath, + const std::string& assetId); + bool is_confident(Key key, Val value, size_t minSources) const; + + virtual bool postData(); + virtual void pullData(const std::vector& files); + virtual void processData(); + virtual void postProcessedData(); + virtual void pullProcessedData(const std::vector& files); + virtual void updateState(const std::vector& files); + + ValuesSet getConfidenceValues(const Key& key, size_t minSources) const; + + virtual void serialize(std::ostream& stream); + virtual void deserialize(std::istream& stream); + + void mergeFromRemote(const KeyValSourceLogger& logs); + + void log(Key key, Val value, Source source); + void reset(); + +private: + KeyValSourceLogger m_logger; +}; diff --git a/components/security_apps/waap/waap_clib/TuningDecision.cc b/components/security_apps/waap/waap_clib/TuningDecision.cc new file mode 100755 index 0000000..e5f5d3d --- /dev/null +++ b/components/security_apps/waap/waap_clib/TuningDecision.cc @@ -0,0 +1,158 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "TuningDecisions.h" +#include "i_mainloop.h" +#include "i_serialize.h" +#include "waap.h" + +static const std::string BASE_URI = "/storage/waap/"; +USE_DEBUG_FLAG(D_WAAP); + +TuningDecision::TuningDecision(const std::string& remotePath) : + m_remotePath(remotePath + "/tuning") +{ + if (remotePath == "") + { + return; + } + Singleton::Consume::by()->addRecurringRoutine( + I_MainLoop::RoutineType::System, + std::chrono::minutes(10), + [&]() { updateDecisions(); }, + "Get tuning updates" + ); +} + +TuningDecision::~TuningDecision() +{ + +} + +struct TuningEvent +{ + template + void serialize(Archive& ar) + { + ar(cereal::make_nvp("decision", decision)); + ar(cereal::make_nvp("eventType", eventType)); + ar(cereal::make_nvp("eventTitle", eventTitle)); + } + std::string decision; + std::string eventType; + std::string eventTitle; +}; + +class TuningEvents : public RestGetFile +{ +public: + TuningEvents() + { + + } + + Maybe> getTuningEvents() + { + return decisions.get(); + } + +private: + S2C_PARAM(std::vector, decisions); +}; + +TuningDecisionEnum TuningDecision::convertDecision(std::string decisionStr) +{ + if (decisionStr == "benign") + { + return BENIGN; + } + if (decisionStr == "malicious") + { + return MALICIOUS; + } + if (decisionStr == "dismiss") + { + return DISMISS; + } + return NO_DECISION; +} + +TuningDecisionType TuningDecision::convertDecisionType(std::string decisionTypeStr) +{ + if (decisionTypeStr == "source") + { + return TuningDecisionType::SOURCE; + } + if (decisionTypeStr == "url") + { + return TuningDecisionType::URL; + } + if (decisionTypeStr == "parameterName") + { + return TuningDecisionType::PARAM_NAME; + } + if (decisionTypeStr == "parameterValue") + { + return TuningDecisionType::PARAM_VALUE; + } + return TuningDecisionType::UNKNOWN; +} + +void TuningDecision::updateDecisions() +{ + TuningEvents tuningEvents; + RemoteFilesList tuningDecisionFiles; + bool isSuccessful = sendObject(tuningDecisionFiles, + I_Messaging::Method::GET, + BASE_URI + "?list-type=2&prefix=" + m_remotePath); + + if (!isSuccessful || tuningDecisionFiles.getFilesList().empty()) + { + dbgDebug(D_WAAP) << "Failed to get the list of files"; + return; + } + + if (!sendObject(tuningEvents, + I_Messaging::Method::GET, + BASE_URI + tuningDecisionFiles.getFilesList()[0])) + { + return; + } + m_decisions.clear(); + Maybe> events = tuningEvents.getTuningEvents(); + if (!events.ok()) + { + dbgDebug(D_WAAP) << "failed to parse events"; + return; + } + for (const auto& tEvent : events.unpack()) + { + TuningDecisionType type = convertDecisionType(tEvent.eventType); + m_decisions[type][tEvent.eventTitle] = convertDecision(tEvent.decision); + } +} + +TuningDecisionEnum TuningDecision::getDecision(std::string tuningValue, TuningDecisionType tuningType) +{ + const auto& typeDecisionsItr = m_decisions.find(tuningType); + if (typeDecisionsItr == m_decisions.cend()) + { + return NO_DECISION; + } + const auto& decisionItr = typeDecisionsItr->second.find(tuningValue); + if (decisionItr == typeDecisionsItr->second.cend()) + { + return NO_DECISION; + } + return decisionItr->second; +} diff --git a/components/security_apps/waap/waap_clib/TuningDecisions.h b/components/security_apps/waap/waap_clib/TuningDecisions.h new file mode 100755 index 0000000..62f17da --- /dev/null +++ b/components/security_apps/waap/waap_clib/TuningDecisions.h @@ -0,0 +1,89 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __TUNING_DECISIONS_H__ +#define __TUNING_DECISIONS_H__ + +#include +#include +#include "i_messaging.h" +#include "i_agent_details.h" +#include "waap.h" + +enum TuningDecisionEnum +{ + NO_DECISION, + DISMISS = NO_DECISION, + BENIGN, + MALICIOUS +}; + +enum TuningDecisionType +{ + UNKNOWN, + SOURCE, + URL, + PARAM_NAME, + PARAM_VALUE +}; + + +class TuningDecision +{ +public: + TuningDecision(const std::string& remotePath); + ~TuningDecision(); + + TuningDecisionEnum getDecision(std::string tuningValue, TuningDecisionType tuningType); +private: + void updateDecisions(); + TuningDecisionType convertDecisionType(std::string decisionTypeStr); + TuningDecisionEnum convertDecision(std::string decisionStr); + + + template + bool sendObject(T &obj, I_Messaging::Method method, std::string uri) + { + I_Messaging *messaging = Singleton::Consume::by(); + I_AgentDetails *agentDetails = Singleton::Consume::by(); + if (agentDetails->getOrchestrationMode() != OrchestrationMode::ONLINE) { + Flags conn_flags; + conn_flags.setFlag(MessageConnConfig::EXTERNAL); + std::string tenant_header = "X-Tenant-Id: " + agentDetails->getTenantId(); + + return messaging->sendObject( + obj, + method, + "fog-msrv-appsec-shared-files-svc", + 80, + conn_flags, + uri, + tenant_header, + nullptr, + MessageTypeTag::WAAP_LEARNING); + } + return messaging->sendObject( + obj, + method, + uri, + "", + nullptr, + true, + MessageTypeTag::WAAP_LEARNING); + } + + std::string m_remotePath; + std::map> m_decisions; +}; + +#endif diff --git a/components/security_apps/waap/waap_clib/TypeIndicatorsFilter.cc b/components/security_apps/waap/waap_clib/TypeIndicatorsFilter.cc new file mode 100755 index 0000000..f68c3e8 --- /dev/null +++ b/components/security_apps/waap/waap_clib/TypeIndicatorsFilter.cc @@ -0,0 +1,152 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "TypeIndicatorsFilter.h" +#include "waap.h" +#include "debug.h" +#include +#include +#include +#include "FpMitigation.h" +#include "i_transaction.h" +#include "Waf2Util.h" +#include "IndicatorsFiltersManager.h" +#include + +USE_DEBUG_FLAG(D_WAAP); + +#define TYPES_FILTER_PATH(dirPath) dirPath + "/4.data" +#define TYPES_FILTER_TRUST_PATH(dirPath) dirPath + "/9.data" + +TypeIndicatorFilter::TypeIndicatorFilter(I_WaapAssetState* pWaapAssetState, + const std::string& remotePath, + const std::string& assetId, + TuningDecision* tuning, + size_t minSources, + size_t minIntervals, + std::chrono::minutes intervalDuration, + double ratioThreshold) : + IndicatorFilterBase(TYPES_FILTER_PATH(pWaapAssetState->getSignaturesFilterDir()), + TYPES_FILTER_TRUST_PATH(pWaapAssetState->getSignaturesFilterDir()), + (remotePath == "") ? remotePath : remotePath + "/Type", + assetId, + minSources, + minIntervals, + intervalDuration, + ratioThreshold, + "unknown", + tuning), + m_pWaapAssetState(pWaapAssetState) +{ + m_confidence_calc.setOwner("TypeIndicatorFilter"); +} + +TypeIndicatorFilter::~TypeIndicatorFilter() +{ + +} + +bool TypeIndicatorFilter::shouldFilterKeyword(const std::string &key, const std::string &keyword) const +{ + auto keyTypes = getParamTypes(key); + std::string htmlParam = ".html"; + bool isHtmlInput = keyTypes.find("html_input") != keyTypes.end() || + (key.size() > htmlParam.size() && + key.compare(key.size() - htmlParam.size(), htmlParam.size(), htmlParam) == 0); + for (auto keyType : keyTypes) + { + if (keyType == "free_text" && !isHtmlInput) + { + return true; + } + if (m_pWaapAssetState->isKeywordOfType(keyword, Waap::Util::convertTypeStrToEnum(keyType))) + { + return true; + } + } + return false; +} + +void TypeIndicatorFilter::registerKeywords(const std::string& key, Waap::Keywords::KeywordsSet& keywords, + IWaf2Transaction* pTransaction) +{ + (void)keywords; + std::string sample = pTransaction->getLastScanSample(); + registerKeywords(key, sample, pTransaction); +} + +void TypeIndicatorFilter::registerKeywords(const std::string& key, const std::string& sample, + IWaf2Transaction* pTransaction) +{ + std::set types = m_pWaapAssetState->getSampleType(sample); + std::string source = pTransaction->getSourceIdentifier(); + std::string trusted_source = getTrustedSource(pTransaction); + + for (const std::string &type : types) + { + if (type == "local_file_path") + { + std::string location = IndicatorsFiltersManager::getLocationFromKey(key, pTransaction); + if (location == "url" || location == "referer") + { + continue; + } + } + registerKeyword(key, type, source, trusted_source); + if (m_tuning != nullptr && m_tuning->getDecision(pTransaction->getUri(), URL) == BENIGN) + { + source = "TuningDecisionSource_" + source; + registerKeyword(key, type, source, trusted_source); + } + + } +} + +void TypeIndicatorFilter::loadParams(std::shared_ptr pParams) +{ + ConfidenceCalculatorParams params; + + params.minSources = std::stoul( + pParams->getParamVal("typeIndicators.minSources", std::to_string(TYPE_FILTER_CONFIDENCE_MIN_SOURCES))); + params.minIntervals = std::stoul( + pParams->getParamVal("typeIndicators.minIntervals", std::to_string(TYPE_FILTER_CONFIDENCE_MIN_INTERVALS))); + params.intervalDuration = std::chrono::minutes(std::stoul( + pParams->getParamVal("typeIndicators.intervalDuration", + std::to_string(TYPE_FILTER_INTERVAL_DURATION.count())))); + params.ratioThreshold = std::stod(pParams->getParamVal("typeIndicators.ratio", + std::to_string(TYPE_FILTER_CONFIDENCE_THRESHOLD))); + std::string learnPermanentlyStr = pParams->getParamVal("typeIndicators.learnPermanently", "true"); + params.learnPermanently = !boost::iequals(learnPermanentlyStr, "false"); + + std::string remoteSyncStr = pParams->getParamVal("remoteSync", "true"); + bool syncEnabled = !boost::iequals(remoteSyncStr, "false"); + + dbgTrace(D_WAAP) << params << " remote sync: " << remoteSyncStr; + + m_confidence_calc.setRemoteSyncEnabled(syncEnabled); + m_trusted_confidence_calc.setRemoteSyncEnabled(syncEnabled); + + m_confidence_calc.reset(params); +} + +std::set TypeIndicatorFilter::getParamTypes(const std::string& canonicParam) const +{ + std::set types = m_confidence_calc.getConfidenceValues(canonicParam); + if (m_policy != nullptr) + { + std::set types_trusted = m_trusted_confidence_calc.getConfidenceValues(canonicParam, + m_policy->getNumOfSources()); + types.insert(types_trusted.begin(), types_trusted.end()); + } + return types; +} diff --git a/components/security_apps/waap/waap_clib/TypeIndicatorsFilter.h b/components/security_apps/waap/waap_clib/TypeIndicatorsFilter.h new file mode 100755 index 0000000..79ff770 --- /dev/null +++ b/components/security_apps/waap/waap_clib/TypeIndicatorsFilter.h @@ -0,0 +1,52 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "IndicatorsFilterBase.h" +#include "WaapKeywords.h" +#include "WaapEnums.h" +#include "i_waap_asset_state.h" +#include "ConfidenceCalculator.h" +#include "WaapParameters.h" +#include + +#define TYPE_FILTER_CONFIDENCE_MIN_SOURCES 10 +#define TYPE_FILTER_CONFIDENCE_MIN_INTERVALS 5 +#define TYPE_FILTER_CONFIDENCE_THRESHOLD 0.8 +#define TYPE_FILTER_INTERVAL_DURATION std::chrono::minutes(60) + +class TypeIndicatorFilter : public IndicatorFilterBase +{ +public: + TypeIndicatorFilter(I_WaapAssetState* pWaapAssetState, + const std::string& remotePath, + const std::string& assetId, + TuningDecision* tuning = nullptr, + size_t minSources = TYPE_FILTER_CONFIDENCE_MIN_SOURCES, + size_t minIntervals = TYPE_FILTER_CONFIDENCE_MIN_INTERVALS, + std::chrono::minutes intervalDuration = TYPE_FILTER_INTERVAL_DURATION, + double ratioThreshold = TYPE_FILTER_CONFIDENCE_THRESHOLD); + ~TypeIndicatorFilter(); + + virtual void registerKeywords(const std::string& key, Waap::Keywords::KeywordsSet& keyword, + IWaf2Transaction* pTransaction); + + void registerKeywords(const std::string& key, const std::string& sample, IWaf2Transaction* pTransaction); + + void loadParams(std::shared_ptr pParams); + virtual bool shouldFilterKeyword(const std::string &keyword, const std::string &key) const; + std::set getParamTypes(const std::string& canonicParam) const; + +private: + I_WaapAssetState* m_pWaapAssetState; +}; diff --git a/components/security_apps/waap/waap_clib/UserLimitsDecision.cc b/components/security_apps/waap/waap_clib/UserLimitsDecision.cc new file mode 100755 index 0000000..68fd2dc --- /dev/null +++ b/components/security_apps/waap/waap_clib/UserLimitsDecision.cc @@ -0,0 +1,22 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "UserLimitsDecision.h" + +UserLimitsDecision::UserLimitsDecision(DecisionType type): SingleDecision(type) +{} + +std::string UserLimitsDecision::getTypeStr() const +{ + return "User Limits"; +} diff --git a/components/security_apps/waap/waap_clib/UserLimitsDecision.h b/components/security_apps/waap/waap_clib/UserLimitsDecision.h new file mode 100755 index 0000000..f16a599 --- /dev/null +++ b/components/security_apps/waap/waap_clib/UserLimitsDecision.h @@ -0,0 +1,27 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __USER_LIMITS_DECISION_H__ +#define __USER_LIMITS_DECISION_H__ + +#include "SingleDecision.h" +#include "DecisionType.h" +#include + +class UserLimitsDecision: public SingleDecision +{ +public: + explicit UserLimitsDecision(DecisionType type); + std::string getTypeStr() const override; +}; +#endif diff --git a/components/security_apps/waap/waap_clib/UserLimitsPolicy.cc b/components/security_apps/waap/waap_clib/UserLimitsPolicy.cc new file mode 100644 index 0000000..d6391af --- /dev/null +++ b/components/security_apps/waap/waap_clib/UserLimitsPolicy.cc @@ -0,0 +1,330 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "UserLimitsPolicy.h" +#include +#include + +namespace Waap { +namespace UserLimits { + +typedef unsigned long long ull; + +bool Policy::operator==(const Policy& other) const +{ + return getConfig() == other.getConfig(); +} + +bool Policy::Config::operator==(const Policy::Config& other) const +{ + return urlMaxSize == other.urlMaxSize && + httpHeaderMaxSize == other.httpHeaderMaxSize && + httpBodyMaxSize == other.httpBodyMaxSize && + maxObjectDepth == other.maxObjectDepth && + httpIllegalMethodsAllowed == other.httpIllegalMethodsAllowed; +} + +std::ostream& operator<<(std::ostream& os, const Policy& policy) +{ + auto config = policy.getConfig(); + os << "[Policy] " << "urlMaxSize: " << config.urlMaxSize << " " << + "httpHeaderMaxSize: " << config.httpHeaderMaxSize << " " << + "httpBodyMaxSize: " << config.httpBodyMaxSize << " " << + "maxObjectDepth: " << config.maxObjectDepth << " " << + std::boolalpha << "httpIllegalMethodsAllowed: " << config.httpIllegalMethodsAllowed; + return os; +} + +bool State::addUrlBytes(size_t size) +{ + setCurrStateType(StateType::URL); + if (m_urlSize > std::numeric_limits::max() - size) { + // We are about to overflow + setViolationType(ViolationType::URL_OVERFLOW); + m_urlSize = std::numeric_limits::max(); + dbgWarning(D_WAAP_ULIMITS) << "[USER LIMITS] Url size overflow. Asset id: " << getAssetId(); + return true; + } + + m_urlSize += size; + if (m_urlSize > m_policy.getUrlMaxSize()) { + setViolationType(ViolationType::URL_LIMIT); + dbgWarning(D_WAAP_ULIMITS) << "[USER LIMITS] Url size limit exceeded " << + m_urlSize << "/" << m_policy.getUrlMaxSize() << ". Asset id: " << getAssetId(); + return true; + } + dbgTrace(D_WAAP_ULIMITS) << "[USER LIMITS] Current url bytes " << m_urlSize << "/" << + m_policy.getUrlMaxSize(); + return false; +} + +bool State::addHeaderBytes(const std::string& name, const std::string& value) +{ + setCurrStateType(StateType::HEADER); + size_t chunkSize = name.size() + value.size(); + if (m_httpHeaderSize > std::numeric_limits::max() - chunkSize) { + // We are about to overflow + setViolationType(ViolationType::HEADER_OVERFLOW); + m_httpHeaderSize = std::numeric_limits::max(); + dbgWarning(D_WAAP_ULIMITS) << "[USER LIMITS] Http header size overflow. Asset id: " << getAssetId(); + return true; + } + + m_httpHeaderSize += chunkSize; + if (m_httpHeaderSize > m_policy.getHttpHeaderMaxSize()) { + setViolationType(ViolationType::HEADER_LIMIT); + dbgWarning(D_WAAP_ULIMITS) << "[USER LIMITS] Http header size limit exceeded " << + m_httpHeaderSize << "/" << m_policy.getHttpHeaderMaxSize() << ". Asset id: " << getAssetId(); + return true; + } + dbgTrace(D_WAAP_ULIMITS) << "[USER LIMITS] Current header bytes " << m_httpHeaderSize << "/" << + m_policy.getHttpHeaderMaxSize(); + return false; +} + +bool State::addBodyBytes(size_t chunkSize) +{ + setCurrStateType(StateType::BODY); + if (m_httpBodySize > std::numeric_limits::max() - chunkSize) { + // We are about to overflow + setViolationType(ViolationType::BODY_OVERFLOW); + m_httpBodySize = std::numeric_limits::max(); + dbgWarning(D_WAAP_ULIMITS) << "[USER LIMITS] Http body size overflow. Asset id: " << getAssetId(); + return true; + } + + m_httpBodySize += chunkSize; + if (m_httpBodySize > m_policy.getHttpBodyMaxSize()) { + setViolationType(ViolationType::BODY_LIMIT); + dbgWarning(D_WAAP_ULIMITS) << "[USER LIMITS] Http body size limit exceeded " << + m_httpBodySize << "/" << m_policy.getHttpBodyMaxSize() << ". Asset id: " << getAssetId(); + return true; + } + dbgTrace(D_WAAP_ULIMITS) << "[USER LIMITS] Current body bytes " << m_httpBodySize << "/" << + m_policy.getHttpBodyMaxSize(); + return false; +} + +bool State::setObjectDepth(size_t depth) +{ + setCurrStateType(StateType::DEPTH); + m_objectDepth = depth; + if (m_objectDepth > m_policy.getMaxObjectDepth()) { + setViolationType(ViolationType::OBJECT_DEPTH_LIMIT); + dbgWarning(D_WAAP_ULIMITS) << "[USER LIMITS] Http object depth limit exceeded " << + m_objectDepth << "/" << m_policy.getMaxObjectDepth() << ". Asset id: " << getAssetId(); + return true; + } + dbgTrace(D_WAAP_ULIMITS) << "[USER LIMITS] Current object depth " << m_objectDepth << "/" << + m_policy.getMaxObjectDepth(); + return false; +} + +bool State::isValidHttpMethod(const std::string& method) +{ + setCurrStateType(StateType::METHOD); + if (m_policy.isHttpIllegalMethodAllowed()) { + dbgTrace(D_WAAP_ULIMITS) << "[USER LIMITS][method: " << method << "] Http all methods allowed"; + return true; + } + + if (isLegalHttpMethod(method)) { + dbgTrace(D_WAAP_ULIMITS) << "[USER LIMITS][method: " << method << "] Http legal method"; + return true; + } + setViolationType(ViolationType::ILLEGAL_METHOD); + dbgWarning(D_WAAP_ULIMITS) << "[USER LIMITS][method: " << method << "] Http illegal method" << + ". Asset id: " << getAssetId(); + return false; +} + +bool State::isLegalHttpMethod(const std::string& method) const +{ + if (method == "GET") return true; + if (method == "POST") return true; + if (method == "DELETE") return true; + if (method == "PATCH") return true; + if (method == "PUT") return true; + if (method == "CONNECT") return true; + if (method == "OPTIONS") return true; + if (method == "HEAD") return true; + if (method == "TRACE") return true; + // Below methods are part of WebDAV http protocol extension + if (method == "MKCOL") return true; + if (method == "COPY") return true; + if (method == "MOVE") return true; + if (method == "PROPFIND") return true; + if (method == "PROPPATCH") return true; + if (method == "LOCK") return true; + if (method == "UNLOCK") return true; + if (method == "VERSION-CONTROL") return true; + if (method == "REPORT") return true; + if (method == "INDEX") return true; + if (method == "CHECKOUT") return true; + if (method == "CHECKIN") return true; + if (method == "UNCHECKOUT") return true; + if (method == "MKWORKSPACE") return true; + if (method == "UPDATE") return true; + if (method == "LABEL") return true; + if (method == "MERGE") return true; + if (method == "BASELINE-CONTROL") return true; + if (method == "MKACTIVITY") return true; + if (method == "ORDERPATCH") return true; + if (method == "ACL") return true; + if (method == "PATCH") return true; + if (method == "SEARCH") return true; + if (method == "MKREDIRECTREF") return true; + if (method == "BIND") return true; + if (method == "UNBIND") return true; + return false; +} + +bool State::isLimitReached() const +{ + return m_type != ViolationType::NO_LIMIT; +} + +bool State::isIllegalMethodViolation() const +{ + return m_type == ViolationType::ILLEGAL_METHOD; +} + +void State::setViolationType(ViolationType type) +{ + m_type = type; + setViolatedTypeStr(); + setViolatedPolicyStr(); +} + +void State::setViolatedTypeStr() +{ + std::stringstream ss; + switch (m_type) + { + case ViolationType::ILLEGAL_METHOD: { + ss << "method violation"; + break; + } + case ViolationType::URL_LIMIT: { + ss << "url size exceeded"; + break; + } + case ViolationType::URL_OVERFLOW: { + ss << "url size overflow"; + break; + } + case ViolationType::HEADER_LIMIT: { + ss << "header size exceeded"; + break; + } + case ViolationType::HEADER_OVERFLOW: { + ss << "header size overflow"; + break; + } + case ViolationType::BODY_LIMIT: { + ss << "body size exceeded"; + break; + } + case ViolationType::BODY_OVERFLOW: { + ss << "body size overflow"; + break; + } + case ViolationType::OBJECT_DEPTH_LIMIT: { + ss << "object depth exceeded"; + break; + } + default: + ss << "no violation"; + } + m_strData.type = ss.str(); +} + +void State::setViolatedPolicyStr() +{ + std::stringstream ss; + switch (m_type) + { + case ViolationType::ILLEGAL_METHOD: { + if (m_policy.isHttpIllegalMethodAllowed()) { + ss << "true"; + } + else { + ss << "false"; + } + break; + } + case ViolationType::URL_LIMIT: + case ViolationType::URL_OVERFLOW: { + ss << m_policy.getUrlMaxSize(); + if (m_policy.getUrlMaxSize() == 1) { + ss << " Byte"; + } + else { + ss << " Bytes"; + }; + break; + } + case ViolationType::HEADER_LIMIT: + case ViolationType::HEADER_OVERFLOW: { + ss << m_policy.getHttpHeaderMaxSize(); + if (m_policy.getHttpHeaderMaxSize() == 1) { + ss << " Byte"; + } + else { + ss << " Bytes"; + } + break; + } + case ViolationType::BODY_LIMIT: + case ViolationType::BODY_OVERFLOW: { + ss << m_policy.getHttpBodyMaxSizeKb(); + if (m_policy.getHttpBodyMaxSizeKb() == 1) { + ss << " Kilobyte"; + } + else { + ss << " Kilobytes"; + } + break; + } + case ViolationType::OBJECT_DEPTH_LIMIT: { + ss << m_policy.getMaxObjectDepth(); + break; + } + default: + ss << "unknown"; + } + m_strData.policy = ss.str(); +} + +size_t State::getViolatingSize() const +{ + switch (m_type) + { + case ViolationType::URL_LIMIT: + case ViolationType::URL_OVERFLOW: + return m_urlSize; + case ViolationType::HEADER_LIMIT: + case ViolationType::HEADER_OVERFLOW: + return m_httpHeaderSize; + case ViolationType::BODY_LIMIT: + case ViolationType::BODY_OVERFLOW: + return static_cast(m_httpBodySize / 1024); + case ViolationType::OBJECT_DEPTH_LIMIT: + return m_objectDepth; + default: + return 0; + } +} + +} // namespace UserLimits +} // namespace Waap diff --git a/components/security_apps/waap/waap_clib/UserLimitsPolicy.h b/components/security_apps/waap/waap_clib/UserLimitsPolicy.h new file mode 100644 index 0000000..c1332b7 --- /dev/null +++ b/components/security_apps/waap/waap_clib/UserLimitsPolicy.h @@ -0,0 +1,183 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "debug.h" +#include +#include +#include +#include + +USE_DEBUG_FLAG(D_WAAP_ULIMITS); + +namespace Waap { +namespace UserLimits { + +typedef unsigned long long ull; +#define DEFAULT_URL_MAX_SIZE 32*1024 +#define DEFAULT_HEADER_MAX_SIZE 100*1024 +#define DEFAULT_BODY_MAX_SIZE_KB 1000000 +#define DEFAULT_BODY_MAX_SIZE 1000000*1024 +#define DEFAULT_OBJECT_MAX_DEPTH 40 + +// @file Feature behaviour description: +// Phase 1: +// 1. No enforcement. No logs to mgmt. +// 2. Only logs to automation and dev Kibana. +// 3. Logs should represent the state as if the limits are enforced as described in phase 2. +// Phase 2: +// 1. DISABLE mode: no enforcement and no logs. +// 2. LEARNING mode: requests that violated a limit will be accepted, and won't be scanned any further. +// Illegal methods won't be automatically accepted, and will be further scanned. +// 3. PREVENT mode: requests that violated a limit will be dropped, and won't be scanned any further. +class Policy { + struct Config { + Config() : + urlMaxSize(DEFAULT_URL_MAX_SIZE), + httpHeaderMaxSize(DEFAULT_HEADER_MAX_SIZE), + httpBodyMaxSizeKb(DEFAULT_BODY_MAX_SIZE_KB), + httpBodyMaxSize(DEFAULT_BODY_MAX_SIZE), + maxObjectDepth(DEFAULT_OBJECT_MAX_DEPTH), + httpIllegalMethodsAllowed(false) {} + ~Config() {} + + template + void serialize(_A& ar) { + ar(cereal::make_nvp("urlMaxSize", urlMaxSize)); + ar(cereal::make_nvp("httpHeaderMaxSize", httpHeaderMaxSize)); + httpBodyMaxSizeKb = 0; + ar(cereal::make_nvp("httpRequestBodyMaxSize", httpBodyMaxSizeKb)); + // Kilobytes to bytes conversion + httpBodyMaxSize = httpBodyMaxSizeKb * 1024; + ar(cereal::make_nvp("jsonMaxObjectDepth", maxObjectDepth)); + int intToBool = 0; + ar(cereal::make_nvp("httpIllegalMethodsAllowed", intToBool)); + httpIllegalMethodsAllowed = (intToBool == 1); + } + + bool operator==(const Policy::Config& other) const; + + size_t urlMaxSize; // URL max size in bytes + size_t httpHeaderMaxSize; // Header Size in Bytes + size_t httpBodyMaxSizeKb; // Body Size in Kilobytes + ull httpBodyMaxSize; // Body Size in Bytes + size_t maxObjectDepth; // Can range from 0 to 1024 + // List of legal methods can be viewed in isLegalHttpMethod function + bool httpIllegalMethodsAllowed; + }; +public: + template + explicit Policy(_A& ar) + { + ar(cereal::make_nvp("practiceAdvancedConfig", m_config)); + } + Policy() : m_config() {} + ~Policy() {} + + bool operator==(const Policy& other) const; + size_t getUrlMaxSize() const { return m_config.urlMaxSize; } + size_t getMaxObjectDepth() const { return m_config.maxObjectDepth; } + size_t getHttpHeaderMaxSize() const { return m_config.httpHeaderMaxSize; } + size_t getHttpBodyMaxSizeKb() const { return m_config.httpBodyMaxSizeKb; } + ull getHttpBodyMaxSize() const { return m_config.httpBodyMaxSize; } + bool isHttpIllegalMethodAllowed() const { return m_config.httpIllegalMethodsAllowed; } + const Config& getConfig() const { return m_config; } + +private: + Config m_config; + + friend std::ostream& operator<<(std::ostream& os, const Policy& policy); +}; + +struct ViolatedStrData +{ + std::string type; + std::string policy; + std::string assetId; +}; + +class State { +public: + enum class StateType + { + NO_STATE, + URL, + METHOD, + HEADER, + BODY, + DEPTH + }; + + enum class ViolationType + { + NO_LIMIT, + ILLEGAL_METHOD, + URL_LIMIT, + URL_OVERFLOW, + HEADER_LIMIT, + HEADER_OVERFLOW, + BODY_LIMIT, + BODY_OVERFLOW, + OBJECT_DEPTH_LIMIT + }; +public: + explicit State(const Policy& policy) : + m_policy(policy), + m_urlSize(0), + m_httpHeaderSize(0), + m_httpBodySize(0), + m_objectDepth(0), + m_currState(StateType::NO_STATE), + m_type(ViolationType::NO_LIMIT), + m_strData() + { + m_strData.type = "no violation"; + } + ~State() {} + + void setAssetId(const std::string& assetId) { m_strData.assetId = assetId; } + // @return true if limit is reached or overflows + bool addUrlBytes(size_t size); + bool addHeaderBytes(const std::string& name, const std::string& value); + bool addBodyBytes(size_t chunkSize); + // @return true if limit is reached + bool setObjectDepth(size_t depth); + bool isValidHttpMethod(const std::string& method); + bool isLimitReached() const; + bool isIllegalMethodViolation() const; + const std::string getViolatedTypeStr() const { return m_strData.type; } + const ViolatedStrData& getViolatedStrData() const { return m_strData; } + size_t getViolatingSize() const; + +private: + bool isLegalHttpMethod(const std::string& method) const; + void setCurrStateType(StateType type) { m_currState = type; } + StateType getCurrStateType() { return m_currState; } + void setViolationType(ViolationType type); + void setViolatedTypeStr(); + void setViolatedPolicyStr(); + const std::string& getAssetId() const { return m_strData.assetId; } + +private: + const Policy& m_policy; + size_t m_urlSize; + size_t m_httpHeaderSize; + ull m_httpBodySize; + size_t m_objectDepth; + StateType m_currState; // State that is currently being enforced + ViolationType m_type; // Type of violation reached + ViolatedStrData m_strData; // Holds the string info of the violated data +}; + +} +} diff --git a/components/security_apps/waap/waap_clib/WaapAssetState.cc b/components/security_apps/waap/waap_clib/WaapAssetState.cc new file mode 100755 index 0000000..73dcb9c --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapAssetState.cc @@ -0,0 +1,1965 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// #define WAF2_LOGGING_ENABLE (does performance impact) +#include "WaapAssetState.h" +#include "Waf2Regex.h" +#include "debug.h" +#include "Waf2Util.h" +#include "maybe_res.h" +#include "picojson.h" +#include "agent_core_utilities.h" +#include +#include +#include + +#define MAX_CACHE_VALUE_SIZE 1024 + +USE_DEBUG_FLAG(D_WAAP_ASSET_STATE); +USE_DEBUG_FLAG(D_WAAP_SAMPLE_PREPROCESS); +USE_DEBUG_FLAG(D_WAAP_SAMPLE_SCAN); +USE_DEBUG_FLAG(D_WAAP_EVASIONS); + +typedef picojson::value::object JsObj; +typedef picojson::value JsVal; +typedef picojson::value::array JsArr; +typedef std::map> filtered_parameters_t; + +#ifdef WAF2_LOGGING_ENABLE +static void +print_filtered(std::string title, const std::set& ignored_set, const std::vector& v) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << "--------------------------"; +#if 0 // TODO:: may be useful for debug, but in general no need to print this on every scanned value... + dbgTrace(D_WAAP_SAMPLE_SCAN) << "Ignored " << title << " set:"; + for (std::set::const_iterator it = ignored_set.begin(); it != ignored_set.end(); ++it) { + const std::string& word = *it; + dbgTrace(D_WAAP_SAMPLE_SCAN) << "*'" << word << "'"; + } +#endif + dbgTrace(D_WAAP_SAMPLE_SCAN) << title << " collected:"; + for (std::vector::const_iterator it = v.begin(); it != v.end(); ++it) { + const std::string& word = *it; + + if (ignored_set.find(word) == ignored_set.end()) { + // not in ignored_set + dbgTrace(D_WAAP_SAMPLE_SCAN) << "+'" << word << "'"; + } + else { + // in ignored set + dbgTrace(D_WAAP_SAMPLE_SCAN) << "-'" << word << "'"; + } + } + dbgTrace(D_WAAP_SAMPLE_SCAN) << "--------------------------"; +} + +static void print_found_patterns(const Waap::Util::map_of_stringlists_t& m) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << "-- found_patterns: ---------"; + for (auto g = m.begin(); g != m.end(); ++g) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << "'" << g->first << "'"; + for (auto p = g->second.begin(); p != g->second.end(); ++p) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << " `-> '" << (*p) << "'"; + } + } + dbgTrace(D_WAAP_SAMPLE_SCAN) << "--------------------------"; +} +#endif + +static bool err_hex = false; +static const SingleRegex evasion_hex_regex( + "(0x[0-9a-f][0-9a-f])[\\w.%?*\\/\\\\]|[\\w.%?*\\/\\\\](0x[0-9a-f][0-9a-f])", + err_hex, + "evasion_hex_regex"); +static const boost::regex bad_hex_regex = boost::regex("%[cC]1%[19][cC]"); +static const SingleRegex evasion_bad_hex_regex( + "(%[cC]1%[19][cC])[\\w.%?*\\/\\\\]|[\\w.%?*\\/\\\\](%[cC]1%[19][cC])", + err_hex, + "evasion_bad_hex_regex"); + +WaapAssetState::WaapAssetState(const std::shared_ptr& pWaapAssetState, + const std::string& sigScoresFname, + const std::string& id) : + WaapAssetState(pWaapAssetState->m_Signatures, + sigScoresFname, + pWaapAssetState->m_cleanValuesCache.capacity(), + pWaapAssetState->m_suspiciousValuesCache.capacity(), + pWaapAssetState->m_sampleTypeCache.capacity(), + id) +{ + scoreBuilder.mergeScores(pWaapAssetState->scoreBuilder); + updateScores(); + m_typeValidator = pWaapAssetState->m_typeValidator; + + registerConfigLoadCb( + [this]() + { + clearRateLimitingState(); + clearSecurityHeadersState(); + clearErrorLimitingState(); + } + ); +} + +WaapAssetState::WaapAssetState(std::shared_ptr signatures, + const std::string& sigScoresFname, + size_t cleanValuesCacheCapacity, + size_t suspiciousValuesCacheCapacity, + size_t sampleTypeCacheCapacity, + const std::string& assetId) : + m_Signatures(signatures), + m_SignaturesScoresFilePath(sigScoresFname), + m_assetId(assetId), + scoreBuilder(this), + m_rateLimitingState(nullptr), + m_errorLimitingState(nullptr), + m_securityHeadersState(nullptr), + m_filtersMngr(nullptr), + m_typeValidator(getSignaturesFilterDir() + "/8.data"), + m_cleanValuesCache(cleanValuesCacheCapacity), + m_suspiciousValuesCache(suspiciousValuesCacheCapacity), + m_sampleTypeCache(sampleTypeCacheCapacity) + { + if (assetId != "" && Singleton::exists()) + { + I_AgentDetails* agentDetails = Singleton::Consume::by(); + std::string path = agentDetails->getTenantId() + "/" + assetId; + m_filtersMngr = std::make_shared(path, assetId, this); + } + else + { + m_filtersMngr = std::make_shared("", "", this); + } + // Load keyword scores - copy from ScoreBuilder + updateScores(); + } + + WaapAssetState::~WaapAssetState() { + // TODO:: leaving this uncommented may introduce (not critical) memory leak. + // Should return this code after testing it well. +#if 0 + // clean up the headers_re map to avoid memory leak + for (auto it = m_Signatures->headers_re.begin(); it != m_Signatures->headers_re.end(); ++it) { + delete it->second; // delete allocated Regex instances + } +#endif + } + + std::shared_ptr WaapAssetState::getSignatures() const + { + return m_Signatures; + } + + + void WaapAssetState::reset() + { + m_filtersMngr->reset(); + } + + void filterUnicode(std::string & text) { + std::string::iterator it = text.begin(); + std::string::iterator result = it; + uint32_t acc = 0; + int bytes_left = 0; + + for (; it != text.end(); ++it) { + unsigned char ch = (unsigned char)(*it); + + // If character high bits are 10xxxxxx, then it might be UTF-8 character used to evade. + // For example 0xc0, 0xaf may mean '/' in broken utf-8 decoders + // In our implementation we do remove leading byte in UTF8 encoding (such as 0xc0), + // but strip down the following bytes (with high bits 01). + if (ch <= 127) { + *result++ = ch; + bytes_left = 0; // any character <= 127 stops collecting UTF8 code + } + else { + if (bytes_left == 0) { + // collect utf8 code + if ((ch & 0xE0) == 0xC0) { // 110X XXXX two bytes follow + + if ((ch & 0x1E) != 0) { + acc = ch & 31; + } + bytes_left = 1; + } + else if ((ch & 0xF0) == 0xE0) { // 1110 XXXX three bytes follow + acc = ch & 15; + bytes_left = 2; + } + else if ((ch & 0xF8) == 0xF0) { // 1111 0XXX four bytes follow + acc = ch & 7; + bytes_left = 3; + } + else if ((ch & 0xFC) == 0xF8) { // 1111 10XX five bytes follow (by standard -an error) + acc = ch & 3; + bytes_left = 4; + } + else if ((ch & 0xFE) == 0xFC) { // 1111 110X six bytes follow (by standard -an error) + acc = ch & 1; + bytes_left = 5; + } + else { + // error + bytes_left = 0; + } + } + else if (bytes_left > 0) { + // "good" encoder would check that the following bytes contain "10" as their high bits, + // but buggy encoders don't, so are we! + acc = (acc << 6) | (ch & 0x3F); + bytes_left--; + + if (bytes_left == 0) { + // finished collecting the utf8 code + if (acc <= 127) { + *result++ = acc; + } + else if (isSpecialUnicode(acc)) { + *result++ = convertSpecialUnicode(acc); + } + acc = 0; + } + } + } + } + + text.erase(result, text.end()); + } + +#if 0 + //std::replace_if(text.begin(), text.end(), [](char c) { return !(c>=0); }, ' '); + inline void replaceUnicode(std::string & text, const char repl) { + std::string::iterator it = text.begin(); + + for (; it != text.end(); ++it) { + if (*it < 0) { + *it = repl; + } + } + } +#endif + + // Python equivalent: text = re.sub(r'[^\x00-\x7F]+',' ', text) + void replaceUnicodeSequence(std::string & text, const char repl) { + std::string::iterator it = text.begin(); + std::string::iterator result = it; + uint32_t acc = 0; + int bytes_left = 0; + + for (; it != text.end(); ++it) { + unsigned char ch = (unsigned char)(*it); + + // If character high bits are 10xxxxxx, then it might be UTF-8 character used to evade. + // For example 0xc0, 0xaf may mean '/' in broken utf-8 decoders + // In our implementation we do remove leading byte in UTF8 encoding (such as 0xc0), + // but strip down the following bytes (with high bits 01). + if (ch <= 127) { + *result++ = ch; + bytes_left = 0; // any character <= 127 stops collecting UTF8 code + } + else { + if (bytes_left == 0) { + // collect utf8 code + if ((ch & 0xE0) == 0xC0) { // 110X XXXX two bytes follow + if ((ch & 0x1E) != 0) { + acc = ch & 31; + } + bytes_left = 1; + } + else if ((ch & 0xF0) == 0xE0) { // 1110 XXXX three bytes follow + acc = ch & 15; + bytes_left = 2; + } + else if ((ch & 0xF8) == 0xF0) { // 1111 0XXX four bytes follow + acc = ch & 7; + bytes_left = 3; + } + else if ((ch & 0xFC) == 0xF8) { // 1111 10XX five bytes follow (by standard -an error) + acc = ch & 3; + bytes_left = 4; + } + else if ((ch & 0xFE) == 0xFC) { // 1111 110X six bytes follow (by standard -an error) + acc = ch & 1; + bytes_left = 5; + } + else { + // error + bytes_left = 0; + } + } + else if (bytes_left > 0) { + // "good" encoder would check that the following bytes contain "10" as their high bits, + // but buggy encoders don't, so are we! + acc = (acc << 6) | (ch & 0x3F); + bytes_left--; + + if (bytes_left == 0) { + // finished collecting the utf8 code + if (acc <= 127) { + *result++ = acc; + } + else if (isSpecialUnicode(acc)) { + *result++ = convertSpecialUnicode(acc); + } + else { + *result++ = repl; + } + acc = 0; + } + } + } + } + + text.erase(result, text.end()); + } + + void + fixBreakingSpace(std::string &line) + { + for (char &c : line) { + if (c == (char)0xA0) { // "non-breaking space" + c = ' '; // convert to normal space + } + } + } + + std::string unescape(const std::string & s) { + std::string text = s; + dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "unescape: (0) '" << text << "'"; + + fixBreakingSpace(text); + // 1. remove all unicode characters from string. Basically, + // remove all characters whose ASCII code is >=128. + // Python equivalent: text.encode('ascii',errors='ignore') + filterUnicode(text); + dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "unescape: (1) '" << text << "'"; + + text = filterUTF7(text); + dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "unescape: (1) (after filterUTF7) '" << text << "'"; + + // 2. Replace %xx sequences by their single-character equivalents. + // Also replaces '+' symbol by space character. + // Python equivalent: text = urllib.unquote_plus(text) + text.erase(unquote_plus(text.begin(), text.end()), text.end()); + dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "unescape: (2) '" << text << "'"; + + fixBreakingSpace(text); + + // 3. remove all unicode characters from string. Basically, + // remove all characters whose ASCII code is >=128. + // Python equivalent: text.encode('ascii',errors='ignore') + filterUnicode(text); + dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "unescape: (3) '" << text << "'"; + + // 4. oh shi?... should I handle unicode html entities (python's htmlentitydefs module)??? + // Python equivalent: text = HTMLParser.HTMLParser().unescape(text) + text.erase(escape_html(text.begin(), text.end()), text.end()); + dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "unescape: (4) '" << text << "'"; + + // 5. Apply backslash escaping (like in C) + // Python equivalent: text = text.decode('string_escape') + text.erase(escape_backslashes(text.begin(), text.end()), text.end()); + dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "unescape: (5) '" << text << "'"; + + // 6. remove all unicode characters from string. Basically, + // remove all characters whose ASCII code is >=128. + // Python equivalent: text.encode('ascii',errors='ignore') + filterUnicode(text); + dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "unescape: (6) '" << text << "'"; + + // 7. Replace %xx sequences by their single-character equivalents. + // Also replaces '+' symbol by space character. + // Python equivalent: text = urllib.unquote_plus(text) + text.erase(unquote_plus(text.begin(), text.end()), text.end()); + dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "unescape: (7) '" << text << "'"; + + unescapeUnicode(text); + dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "after unescapeUnicode '" << text << "'"; + + // 8. remove all unicode characters from string. Basically, + // remove all characters whose ASCII code is >=128. + // Python equivalent: text.encode('ascii',errors='ignore') + filterUnicode(text); + dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "unescape: (8) '" << text << "'"; + + // 9. ??? + // + //try: + // text = text.decode('utf-8') + //except: + // pass + + // 10. Replace each sequence of unicode characters with single space + // Python equivalent: text = re.sub(r'[^\x00-\x7F]+',' ', text) + // TODO:: actually, in python Pavel do this: + // text = re.sub(r'[^\x00-\x7F]+',' ', text).encode("ascii","ignore") + replaceUnicodeSequence(text, ' '); + +#if 0 // Removed Aug 25 2018. Reason for removal - breaks input containing ASCII zeros. + // 11. remove all unicode characters from string. + // Basically, remove all characters whose ASCII code is >=128. + // Python equivalent: text.encode('ascii',errors='ignore') + filterUnicode(text); +#endif + + dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "unescape: (11) '" << text << "'"; + + // 12. finally, apply tolower() to all characters of a string + // std::for_each(text.begin(), text.end(), [](char &c) { c = tolower(c); }); + for (std::string::iterator pC = text.begin(); pC != text.end(); ++pC) { + *pC = tolower(*pC); + } + + dbgTrace(D_WAAP_SAMPLE_PREPROCESS) << "unescape: (12) '" << text << "'"; + return text; + } + + inline std::string repr_uniq(const std::string & value) { + std::string result; + char hist[256]; + memset(&hist, 0, sizeof(hist)); + + for (std::string::const_iterator pC = value.begin(); pC != value.end(); ++pC) { + unsigned char ch = (unsigned char)(*pC); + + // Only take ASCII characters that are not alphanumeric, and each character only once + if (ch <= 127 && !isalnum(ch) && hist[ch] == 0) { + // Convert low ASCII characters to their C/C++ printable equivalent + // (used for easier viewing. Also, binary data causes issues with ElasticSearch) + switch (ch) { + case 0x07: result += "\\a"; break; + case 0x08: result += "\\b"; break; + case 0x09: result += "\\t"; break; + case 0x0A: result += "\\n"; break; + case 0x0B: result += "\\v"; break; + case 0x0C: result += "\\f"; break; + case 0x0D: result += "\\r"; break; + case 0x5C: result += "\\\\"; break; + case 0x27: result += "\\\'"; break; + case 0x22: result += "\\\""; break; + case 0x3F: result += "\\\?"; break; + default: { + if (ch >= 32) { + result += (char)ch; + } + else { + char buf[16]; + sprintf(buf, "\\" "x%02X", ch); + result += buf; + } + } + } + + hist[ch] = 1; + } + } + + return result; + } + + static bool isShortWord(const std::string &word) + { + return word.size() <= 2; + } + + static bool isShortHtmlTag(const std::string &word) + { + return !word.empty() && word.size() <= 3 && word[0] == '<'; + } + + void + WaapAssetState::checkRegex( + const SampleValue &sample, + const Regex & pattern, + std::vector& keyword_matches, + Waap::Util::map_of_stringlists_t & found_patterns, + bool longTextFound, + bool binaryDataFound) const + { + dbgFlow(D_WAAP_SAMPLE_SCAN) << "checkRegex: line='" << sample.getSampleString() << "' patt='" << + pattern.getName() << "' longTextFound=" << longTextFound << " binaryDataFound=" << binaryDataFound; + + std::vector matches; + sample.findMatches(pattern, matches); + + for (std::vector::const_iterator pMatch = matches.begin(); pMatch != matches.end(); ++pMatch) { + const RegexMatch& match = *pMatch; + + // Get whole match (group[0], which is always present in any match) + std::string word = match.groups.front().value; + + dbgTrace(D_WAAP_SAMPLE_SCAN) << "checkRegex: match='" << word << "':"; + + // Short words matched by regexes wont be detected in some cases like + // if enough binary data is present in the value. + if (binaryDataFound && word.size() <= 2) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << "Will not add a short keyword '" << word << + "' because binaryData was found"; + continue; + } + + for (std::vector::const_iterator pGroup = match.groups.begin() + 1; + pGroup != match.groups.end(); + ++pGroup) { + std::string group = pGroup->name; + + if (group == "") { + continue; // skip unnamed group + } + + const std::string& value = pGroup->value; + dbgTrace(D_WAAP_SAMPLE_SCAN) << "checkRegex: group name='" << group << + "' value='" << value << "', word='" << word << "':"; + + // python: + // if 'fast_reg' in group: + // if 'evasion' in group: + // word = repr(str(''.join(set(value)))) + // else: + // word =group + if (group.find("fast_reg") != std::string::npos) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << "checkRegex: found '*fast_reg*' in group name"; + if (group.find("evasion") != std::string::npos) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << + "checkRegex: found both 'fast_reg' and 'evasion' in group name."; + + word = "encoded_" + repr_uniq(value); + + // Normally, the word added to the keyword_matches list contain the character sequence. + // However, sometimes (for example in case the sequence contained only unicode characters), + // after running repr_uniq() the word will remain empty string. In this case leave + // something meaningful/readable there. + if (word == "encoded_") { + dbgTrace(D_WAAP_SAMPLE_SCAN) << + "checkRegex: empty word after repr_uniq: resetting word to 'character_encoding'" + " and group to 'evasion'."; + word = "character_encoding"; + } + else if (Waap::Util::str_isalnum(word)) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << + "checkRegex: isalnum word after repr_uniq: resetting group to 'evasion'."; + // If the found match is alphanumeric (we've seen strings like "640x480" match) + // we still should assume evasion but it doesn't need to include "fast_reg", + // which would cause unconditional report to stage2 and hit performance... + // This is why we remove the word "fast_reg" from the group name. + group = "evasion"; + } + + if (longTextFound) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << + "checkRegex: longTextFound so resetting group name to 'longtext'"; + group = "longtext"; + } + } + else { + word = group; + } + } + + // In sequences detected as "longTextFound" or "longBinaryFound", do not add words in the + // "keyword_matches" list that: + // - starts with "encoded_" + // - or startswith("\") + // - or equal to "character_encoding" + if ((longTextFound || binaryDataFound) && + (word == "character_encoding" || word.substr(0, 1) == "\\" || word.substr(0, 8) == "encoded_")) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << "Not adding keyword '" << word << "' because longtext was found"; + } + else if (binaryDataFound && (isShortWord(word) || isShortHtmlTag(word) || + NGEN::Regex::regexMatch(__FILE__, __LINE__, group, m_Signatures->binary_data_kw_filter))) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << "Not adding group='" << group << "', word='" << word << + "' - due to binary data"; + continue; + } + else if ((std::find( + keyword_matches.begin(), + keyword_matches.end(), + word) == keyword_matches.end())) { + // python: if (word not in current_matches): current_matches.append(word) + keyword_matches.push_back(word); + } + + // python: + // if group not in found_patterns: + // found_patterns[group]=[] + if (found_patterns.find(group) == found_patterns.end()) { + found_patterns[group] = std::vector(); + } + + // python: + // if value not in found_patterns[group]: + // found_patterns[group].append(value) + if (std::find( + found_patterns[group].begin(), + found_patterns[group].end(), + value + ) == found_patterns[group].end()) { + found_patterns[group].push_back(value); + } + } + } + } + + // TODO:: implement onload mechanism. + static bool isOnLoad = 0; + +static void calcRepeatAndWordsCount(const std::string &line, unsigned int &repeat, unsigned int &wordsCount) +{ + repeat = 0; + wordsCount = 0; + int prev = -1; + int prevPrev = -1; + + for (std::string::const_iterator pC = line.begin(); pC != line.end(); ++pC) { + if (*pC == prev || *pC == prevPrev) { + repeat++; + } + + if (Waap::Util::isAlphaAsciiFast(*pC) && !Waap::Util::isAlphaAsciiFast(prev)) { + wordsCount++; + } + + prevPrev = prev; + prev = *pC; + } +} + +static void calcRepetitionAndProbing(Waf2ScanResult &res, const std::set *ignored_keywords, + const std::string &line, bool &detectedRepetition, bool &detectedProbing, unsigned int &wordsCount) +{ + unsigned int repeat; + calcRepeatAndWordsCount(line, repeat, wordsCount); + + if (!detectedRepetition && repeat>100) { // detect potential buffer overflow attacks + dbgTrace(D_WAAP_SAMPLE_SCAN) << "repetition detected: repeat=" << repeat; + detectedRepetition = true; + res.keyword_matches.push_back("repetition"); + } + + // python: + // keywords_num = sum(1 for x in keyword_matches if x not in ignored_keywords) + size_t keywords_num = countNotInSet(res.keyword_matches, *ignored_keywords); + + dbgTrace(D_WAAP_SAMPLE_SCAN) << "wordsCount: " << wordsCount << ", repeat=" << repeat + << ", keyword_matches(num=" << keywords_num << ", size=" << res.keyword_matches.size() << ")"; + + if (!detectedProbing //res.keyword_matches.size() + && keywords_num + 2 > wordsCount + // res.keyword_matches.size() + && keywords_num != 0) + { + dbgTrace(D_WAAP_SAMPLE_SCAN) << "probing detected: keywords_num=" << keywords_num << + ", wordsCount=" << wordsCount; + detectedProbing = true; + res.keyword_matches.push_back("probing"); + } +} + +void +WaapAssetState::filterKeywordsDueToLongText(Waf2ScanResult &res) const +{ + // Test for long value without spaces (these can often cause false alarms) + if (m_Signatures->nospaces_long_value_re.hasMatch(res.unescaped_line)) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << "nospaces_long_value matched. may remove some keywords below..."; + // remove some keywords that are often present in such long lines + std::vector &v = res.keyword_matches; + for (std::vector::iterator it = v.begin(); it != v.end();) { + std::string &word = *it; + if (m_Signatures->ignored_for_nospace_long_value.find(word) != + m_Signatures->ignored_for_nospace_long_value.end()) { + dbgTrace(D_WAAP_SAMPLE_SCAN) + << "Removing keyword '" + << word + << "' because nospaces_long_value was found"; + it = v.erase(it); + } + else { + ++it; + } + } + } + +#ifdef WAF2_LOGGING_ENABLE + // Dump interesting statistics and scores + print_filtered("keywords", *ignored_keywords, res.keyword_matches); + print_found_patterns(res.found_patterns); + dbgTrace(D_WAAP_SAMPLE_SCAN) << "keyword_matches.size()=" << res.keyword_matches.size(); +#endif +} + +bool +checkBinaryData(const std::string &line, bool binaryDataFound) +{ + // Test whether count of non-printable characters in the parameter value is too high. + // Note that high-ASCII characters (>=128) are assumed "printable". + // All non-ASCII UTF-8 characters fall into this cathegory. + if (!binaryDataFound && line.size() > 25) { + size_t nonPrintableCharsCount = 0; + + for (size_t i=0; i splitType) const +{ + dbgTrace(D_WAAP_SAMPLE_SCAN) + << "WaapAssetState::apply('" + << line + << "', scanStage=" + << scanStage + << ", splitType='" + << (splitType.ok() ? *splitType: "") + << "'"; + + // Handle response scan stages + if (scanStage == "resp_body") { + res.clear(); + SampleValue sample(line, nullptr); + checkRegex(sample, + m_Signatures->resp_body_words_regex_list, + res.keyword_matches, + res.found_patterns, + false, + false); + checkRegex(sample, + m_Signatures->resp_body_pattern_regex_list, + res.keyword_matches, + res.found_patterns, + false, + false); + dbgTrace(D_WAAP_SAMPLE_SCAN) << "WaapAssetState::apply(): response body " << + (res.keyword_matches.empty() ? "is not" : "is") << " suspicious"; + return !res.keyword_matches.empty(); + } + + if (scanStage == "resp_header") { + res.clear(); + SampleValue sample(line, nullptr); + checkRegex(sample, + m_Signatures->resp_body_words_regex_list, + res.keyword_matches, + res.found_patterns, + false, + false); + checkRegex(sample, + m_Signatures->resp_body_pattern_regex_list, + res.keyword_matches, + res.found_patterns, + false, + false); + dbgTrace(D_WAAP_SAMPLE_SCAN) << "WaapAssetState::apply(): response header " << + (res.keyword_matches.empty() ? "is not" : "is") << " suspicious"; + return !res.keyword_matches.empty(); + } + + // Only cache values less or equal than MAX_CACHE_VALUE_SIZE + bool shouldCache = (line.size() <= MAX_CACHE_VALUE_SIZE); + + if (shouldCache) { + // Handle cached clean values + CacheKey cache_key(line, scanStage, isBinaryData, splitType.ok() ? *splitType : ""); + if (m_cleanValuesCache.exist(cache_key)) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << "WaapAssetState::apply('" << line << "'): not suspicious (cache)"; + res.clear(); + return false; + } + + // Handle cached suspicious values (if found - fills out the "res" structure) + if (m_suspiciousValuesCache.get(cache_key, res)) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << "WaapAssetState::apply('" << line << "'): suspicious (cache)"; + +#ifdef WAF2_LOGGING_ENABLE + // Dump cached result + print_filtered("keywords", std::set(), res.keyword_matches); + print_filtered("patterns", std::set(), res.regex_matches); + print_found_patterns(res.found_patterns); +#endif + return true; + } + } + + dbgTrace(D_WAAP_SAMPLE_SCAN) << "WaapAssetState::apply('" << line << "'): passed the cache check."; + + const std::set* ignored_keywords = &m_Signatures->global_ignored_keywords; + const std::set* ignored_patterns = &m_Signatures->global_ignored_patterns; + bool isUrlScanStage = false; + bool isHeaderScanStage = false; + + if ((scanStage.size() == 3 && scanStage == "url") || (scanStage.size() == 7 && scanStage == "referer")) { + if (m_Signatures->url_ignored_re.hasMatch(line)) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << "WaapAssetState::apply('" << line << "'): ignored for URL."; + + if (shouldCache) { + m_cleanValuesCache.insert(CacheKey(line, scanStage, isBinaryData, splitType.ok() ? *splitType : "")); + } + + res.clear(); + return false; + } + + ignored_keywords = &m_Signatures->url_ignored_keywords; + ignored_patterns = &m_Signatures->url_ignored_patterns; + isUrlScanStage = true; + } + else if ((scanStage.size() == 6 && scanStage == "header") || + (scanStage.size() == 6 && scanStage == "cookie")) { + if (m_Signatures->header_ignored_re.hasMatch(line)) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << "WaapAssetState::apply('" << line << "'): ignored for header."; + + if (shouldCache) { + m_cleanValuesCache.insert(CacheKey(line, scanStage, isBinaryData, splitType.ok() ? *splitType : "")); + } + + res.clear(); + return false; + } + + ignored_keywords = &m_Signatures->header_ignored_keywords; + ignored_patterns = &m_Signatures->header_ignored_patterns; + isHeaderScanStage = true; + } + +#if 0 + // Removed by Pavel's request. Leaving here in case he'll want to add this back... + //// Pavel told me he wants to use "global" settings for cookie values, rather than cookie-specific ones here. + //else if (scanStage.size() == 6 && (scanStage == "cookie")) { + // if (cookie_ignored_re.hasMatch(line)) { + // dbgTrace(D_WAAP_SAMPLE_SCAN) << "WaapAssetState::apply('" << line << "'): ignored for cookie."; + // if (shouldCache) { + // m_cleanValuesCache.insert(CacheKey(line, scanStage)); + // } + // res.clear(); + // return false; + // } + + // ignored_keywords = &cookie_ignored_keywords; + // ignored_patterns = &cookie_ignored_patterns; + //} +#endif + +// Only perform these checks under load + if (isOnLoad) { + // Skip values that are too short + if (line.length() < 3) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << "WaapAssetState::apply('" << line << + "'): skipping: did not pass the length check."; + + if (shouldCache) { + m_cleanValuesCache.insert(CacheKey(line, scanStage, isBinaryData, splitType.ok() ? *splitType : "")); + } + + res.clear(); + return false; + } + + // Skip values where all characters are alphanumeric + bool allAlNum = true; + + for (std::string::const_iterator pC = line.begin(); pC != line.end(); ++pC) { + if (!isalnum(*pC)) { + allAlNum = false; + break; + } + } + + if (allAlNum) { + if (shouldCache) { + m_cleanValuesCache.insert(CacheKey(line, scanStage, isBinaryData, splitType.ok() ? *splitType : "")); + } + + res.clear(); + return false; + } + + dbgTrace(D_WAAP_SAMPLE_SCAN) << "WaapAssetState::apply('" << line << "'): passed the stateless checks."; + + // Skip values that are longer than 10 characters, and match allowed_text_re regex + if (line.length() > 10) { + if (m_Signatures->allowed_text_re.hasMatch(line) > 0) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << "WaapAssetState::apply('" << line << + "'): matched on allowed_text - ignoring."; + + if (shouldCache) { + m_cleanValuesCache.insert( + CacheKey(line, scanStage, isBinaryData, splitType.ok() ? *splitType : "") + ); + } + + res.clear(); + return false; + } + } + } + + std::string unquote_line = line; + unquote_line.erase(unquote_plus(unquote_line.begin(), unquote_line.end()), unquote_line.end()); + + // If binary data type is detected outside the scanner - enable filtering specific matches/keywords + bool binaryDataFound = + checkBinaryData(unquote_line, isBinaryData) || + checkBinaryData(line, isBinaryData); + + // Complex unescape and then apply lowercase + res.unescaped_line = unescape(line); + + dbgTrace(D_WAAP_SAMPLE_SCAN) << "unescapedLine: '" << res.unescaped_line << "'"; + + // Detect long text spans, and also any-length spans that end with file extensions such as ".jpg" + bool longTextFound = m_Signatures->longtext_re.hasMatch(res.unescaped_line); + + if (longTextFound) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << "longtext found"; + } + + dbgTrace(D_WAAP_SAMPLE_SCAN) << "doing first set of checkRegex calls..."; + + // Scan unescaped_line with aho-corasick once, and reuse it in multiple calls to checkRegex below + // This is done to improve performance of regex matching. + SampleValue unescapedLineSample(res.unescaped_line, m_Signatures->m_regexPreconditions); + + checkRegex( + unescapedLineSample, + m_Signatures->specific_acuracy_keywords_regex, + res.keyword_matches, + res.found_patterns, + longTextFound, + binaryDataFound + ); + checkRegex(unescapedLineSample, m_Signatures->words_regex, res.keyword_matches, res.found_patterns, longTextFound, + binaryDataFound); + + filterKeywordsDueToLongText(res); + + bool detectedRepetition = false; + bool detectedProbing = false; + unsigned int wordsCount = 0; + + // Calculate repetition and/or probing indicators + if (!binaryDataFound) { + calcRepetitionAndProbing(res, ignored_keywords, res.unescaped_line, detectedRepetition, detectedProbing, + wordsCount); + } + + // List of keywords to remove + std::vector keywordsToRemove; + + // Handle semicolon and pipe-split values. + // Specifically exclude split cookie values to avoid high-probability high-impact false positives. + // note: All-digits values triggers fp when prepended with separator, so they are excluded + if (scanStage != "cookie" && splitType.ok() && !Waap::Util::isAllDigits(res.unescaped_line)) { + dbgTrace(D_WAAP_EVASIONS) << "split value detected type='" << *splitType << "' value='" << line << "'"; + + // Split value detected eligible for special handling. Scan it after prepending the appropriate prefix + std::string unescaped; + + std::set keywords_to_filter { + "probing", + "os_cmd_sep_medium_acuracy" + }; + + if (*splitType == "sem") { + keywords_to_filter.insert(";"); + unescaped = ";" + res.unescaped_line; + } else if (*splitType == "pipe") { + keywords_to_filter.insert("|"); + unescaped = "|" + res.unescaped_line; + } + + SampleValue unescapedSample(unescaped, m_Signatures->m_regexPreconditions); + checkRegex(unescapedSample, m_Signatures->specific_acuracy_keywords_regex, res.keyword_matches, + res.found_patterns, longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->words_regex, res.keyword_matches, res.found_patterns, + longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->pattern_regex, res.regex_matches, res.found_patterns, + longTextFound, binaryDataFound); + + filterKeywordsDueToLongText(res); + + // If only the filtered keywords were detected (no extras) - filter them. If any extra keyword is detected + // then leave everything + if (countNotInSet(res.keyword_matches, keywords_to_filter) == 0) { + for (const std::string &keyword_to_filter : keywords_to_filter) { + keywordsToRemove.push_back(keyword_to_filter); + } + } + + if (!binaryDataFound) { + // Recalculate repetition and/or probing indicators + unsigned int newWordsCount = 0; + calcRepetitionAndProbing(res, ignored_keywords, unescaped, detectedRepetition, detectedProbing, + newWordsCount); + // Take minimal words count because empirically it means evasion was probably succesfully decoded + wordsCount = std::min(wordsCount, newWordsCount); + } + } + + bool os_cmd_ev = Waap::Util::find_in_map_of_stringlists_keys("os_cmd_ev", res.found_patterns); + + if (os_cmd_ev) { + dbgTrace(D_WAAP_EVASIONS) << "os command evasion found"; + + // Possible os command evasion detected: - clean up and scan with regexes again. + std::string unescaped; + size_t kwCount = res.keyword_matches.size(); + size_t pos = 0; + size_t found; + + do { + found = res.unescaped_line.find('[', pos); + if (found != std::string::npos) + { + unescaped += res.unescaped_line.substr(pos, found-pos); + if (found < res.unescaped_line.size() - 3 && + res.unescaped_line[found+1] == res.unescaped_line[found+2] && res.unescaped_line[found+3] == ']') + { + unescaped += res.unescaped_line[found+1]; + pos = found+4; // [aa] + } + else + { + unescaped += res.unescaped_line[found]; + pos = found+1; + } + } + } while(found != std::string::npos); + unescaped += res.unescaped_line.substr(pos); // add tail + + if (res.unescaped_line != unescaped) { + SampleValue unescapedSample(unescaped, m_Signatures->m_regexPreconditions); + checkRegex(unescapedSample, m_Signatures->specific_acuracy_keywords_regex, res.keyword_matches, + res.found_patterns, longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->words_regex, res.keyword_matches, res.found_patterns, + longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->pattern_regex, res.regex_matches, res.found_patterns, + longTextFound, binaryDataFound); + } + + if (kwCount == res.keyword_matches.size()) { + // Remove the evasion keyword if no real evasion found + keywordsToRemove.push_back("os_cmd_ev"); + os_cmd_ev = false; + } + else if (!binaryDataFound) { + // Recalculate repetition and/or probing indicators + unsigned int newWordsCount = 0; + calcRepetitionAndProbing(res, ignored_keywords, unescaped, detectedRepetition, detectedProbing, + newWordsCount); + // Take minimal words count because empirically it means evasion was probably succesfully decoded + wordsCount = std::min(wordsCount, newWordsCount); + } + } + + bool quotes_ev = Waap::Util::find_in_map_of_stringlists_keys("quotes_ev", res.found_patterns); + + if (quotes_ev) { + dbgTrace(D_WAAP_EVASIONS) << "quotes evasion found"; + + // Possible quotes evasion detected: - clean up and scan with regexes again. + + std::string unescaped = m_Signatures->quotes_ev_pattern.sub(res.unescaped_line); + + size_t kwCount = res.keyword_matches.size(); + + if (res.unescaped_line != unescaped) { + SampleValue unescapedSample(unescaped, m_Signatures->m_regexPreconditions); + checkRegex(unescapedSample, m_Signatures->specific_acuracy_keywords_regex, res.keyword_matches, + res.found_patterns, longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->words_regex, res.keyword_matches, res.found_patterns, + longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->pattern_regex, res.regex_matches, res.found_patterns, + longTextFound, binaryDataFound); + } + + if (kwCount == res.keyword_matches.size()) { + // Remove the evasion keyword if no real evasion found + keywordsToRemove.push_back("quotes_ev"); + quotes_ev = false; + } + else if (!binaryDataFound) { + // Recalculate repetition and/or probing indicators + unsigned int newWordsCount = 0; + calcRepetitionAndProbing(res, ignored_keywords, unescaped, detectedRepetition, detectedProbing, + newWordsCount); + // Take minimal words count because empirically it means evasion was probably succesfully decoded + wordsCount = std::min(wordsCount, newWordsCount); + } + } + + if (Waap::Util::containsInvalidUtf8(line)) { + dbgTrace(D_WAAP_EVASIONS) << "invalid utf-8 evasion found"; + + // Possible quotes evasion detected: - clean up and scan with regexes again. + + std::string unescaped = Waap::Util::unescapeInvalidUtf8(line); + + size_t kwCount = res.keyword_matches.size(); + unescaped = unescape(unescaped); + + if (res.unescaped_line != unescaped) { + SampleValue unescapedSample(unescaped, m_Signatures->m_regexPreconditions); + checkRegex(unescapedSample, m_Signatures->specific_acuracy_keywords_regex, res.keyword_matches, + res.found_patterns, longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->words_regex, res.keyword_matches, res.found_patterns, + longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->pattern_regex, res.regex_matches, res.found_patterns, + longTextFound, binaryDataFound); + } + + if (kwCount != res.keyword_matches.size() && !binaryDataFound) { + // Recalculate repetition and/or probing indicators + unsigned int newWordsCount = 0; + calcRepetitionAndProbing(res, ignored_keywords, unescaped, detectedRepetition, detectedProbing, + newWordsCount); + // Take minimal words count because empirically it means evasion was probably succesfully decoded + wordsCount = std::min(wordsCount, newWordsCount); + } + } + + std::string *utf8_broken_line_ptr = nullptr; + if (Waap::Util::containsBrokenUtf8(unquote_line)) { + utf8_broken_line_ptr = &unquote_line; + } else if (Waap::Util::containsBrokenUtf8(line)) { + utf8_broken_line_ptr = (std::string*)&line; + } + + if (utf8_broken_line_ptr) { + dbgTrace(D_WAAP_EVASIONS) << "broken-down utf-8 evasion found"; + std::string unescaped = Waap::Util::unescapeBrokenUtf8(*utf8_broken_line_ptr); + size_t kwCount = res.keyword_matches.size(); + + unescaped = unescape(unescaped); + + if (res.unescaped_line != unescaped) { + SampleValue unescapedSample(unescaped, m_Signatures->m_regexPreconditions); + checkRegex(unescapedSample, m_Signatures->specific_acuracy_keywords_regex, res.keyword_matches, + res.found_patterns, longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->words_regex, res.keyword_matches, res.found_patterns, + longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->pattern_regex, res.regex_matches, res.found_patterns, + longTextFound, binaryDataFound); + } + + if (kwCount != res.keyword_matches.size() && !binaryDataFound) { + // Recalculate repetition and/or probing indicators + unsigned int newWordsCount = 0; + calcRepetitionAndProbing(res, ignored_keywords, unescaped, detectedRepetition, detectedProbing, + newWordsCount); + // Take minimal words count because empirically it means evasion was probably succesfully decoded + wordsCount = std::min(wordsCount, newWordsCount); + } + } + + bool comment_ev = Waap::Util::find_in_map_of_stringlists_keys("comment_ev", res.found_patterns); + + if (comment_ev) { + // Possible quotes evasion detected: - clean up and scan with regexes again. + dbgTrace(D_WAAP_EVASIONS) << "comment evasion found"; + + std::string unescaped = m_Signatures->comment_ev_pattern.sub(res.unescaped_line); + size_t kwCount = res.keyword_matches.size(); + + if (res.unescaped_line != unescaped) { + SampleValue unescapedSample(unescaped, m_Signatures->m_regexPreconditions); + checkRegex(unescapedSample, m_Signatures->specific_acuracy_keywords_regex, res.keyword_matches, + res.found_patterns, longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->words_regex, res.keyword_matches, res.found_patterns, + longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->pattern_regex, res.regex_matches, res.found_patterns, + longTextFound, binaryDataFound); + } + + if (kwCount == res.keyword_matches.size()) { + // Remove the evasion keyword if no real evasion found + keywordsToRemove.push_back("comment_ev"); + comment_ev = false; + } + else if (!binaryDataFound) { + // Recalculate repetition and/or probing indicators + unsigned int newWordsCount = 0; + calcRepetitionAndProbing(res, ignored_keywords, unescaped, detectedRepetition, detectedProbing, + newWordsCount); + // Take minimal words count because empirically it means evasion was probably succesfully decoded + wordsCount = std::min(wordsCount, newWordsCount); + } + } + + bool quoutes_space_evasion = Waap::Util::find_in_map_of_stringlists_keys( + "quotes_space_ev_fast_reg", + res.found_patterns + ); + + if (quoutes_space_evasion) { + // Possible quotes space evasion detected: - clean up and scan with regexes again. + dbgTrace(D_WAAP_EVASIONS) << "quotes space evasion found"; + std::string unescaped = m_Signatures->quotes_space_ev_pattern.sub(res.unescaped_line); + size_t kwCount = res.keyword_matches.size(); + + if (res.unescaped_line != unescaped) { + SampleValue unescapedSample(unescaped, m_Signatures->m_regexPreconditions); + checkRegex(unescapedSample, m_Signatures->specific_acuracy_keywords_regex, res.keyword_matches, + res.found_patterns, longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->words_regex, res.keyword_matches, res.found_patterns, + longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->pattern_regex, res.regex_matches, res.found_patterns, + longTextFound, binaryDataFound); + } + + if (kwCount == res.keyword_matches.size()) { + // Remove the evasion keyword if no real evasion found + keywordsToRemove.push_back("quotes_space_evasion"); + quoutes_space_evasion = false; + } + else if (!binaryDataFound) { + // Recalculate repetition and/or probing indicators + unsigned int newWordsCount = 0; + calcRepetitionAndProbing(res, ignored_keywords, unescaped, detectedRepetition, detectedProbing, + newWordsCount); + // Take minimal words count because empirically it means evasion was probably succesfully decoded + wordsCount = std::min(wordsCount, newWordsCount); + } + } + + if (Waap::Util::testUrlBareUtf8Evasion(line)) { + // Possible quotes evasion detected: - clean up and scan with regexes again. + dbgTrace(D_WAAP_EVASIONS) << "url_bare_utf8 evasion found"; + + // Revert the encoding and rescan again + // Insert additional '%' character after each sequence of three characters either "%C0" or "%c0". + std::string unescaped = line; + replaceAll(unescaped, "%c0", "%c0%"); + replaceAll(unescaped, "%C0", "%C0%"); + + // Run the result through another pass of "unescape" which will now correctly urldecode and utf8-decode it + unescaped = unescape(unescaped); + size_t kwCount = res.keyword_matches.size(); + + if (res.unescaped_line != unescaped) { + SampleValue unescapedSample(unescaped, m_Signatures->m_regexPreconditions); + checkRegex(unescapedSample, m_Signatures->specific_acuracy_keywords_regex, res.keyword_matches, + res.found_patterns, longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->words_regex, res.keyword_matches, res.found_patterns, + longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->pattern_regex, res.regex_matches, res.found_patterns, + longTextFound, binaryDataFound); + } + + if (kwCount != res.keyword_matches.size() && !binaryDataFound) { + // Recalculate repetition and/or probing indicators + unsigned int newWordsCount = 0; + calcRepetitionAndProbing(res, ignored_keywords, unescaped, detectedRepetition, detectedProbing, + newWordsCount); + // Take minimal words count because empirically it means evasion was probably succesfully decoded + wordsCount = std::min(wordsCount, newWordsCount); + } + } + + if ((res.unescaped_line.find("0x") != std::string::npos) && evasion_hex_regex.hasMatch(res.unescaped_line)) { + dbgTrace(D_WAAP_EVASIONS) << "hex evasion found (in unescaped line)"; + + std::string unescaped = res.unescaped_line; + replaceAll(unescaped, "0x", "\\x"); + unescapeUnicode(unescaped); + dbgTrace(D_WAAP_EVASIONS) << "unescaped =='" << unescaped << "'"; + + size_t kwCount = res.keyword_matches.size(); + + if (res.unescaped_line != unescaped) { + SampleValue unescapedSample(unescaped, m_Signatures->m_regexPreconditions); + checkRegex(unescapedSample, m_Signatures->specific_acuracy_keywords_regex, res.keyword_matches, + res.found_patterns, false, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->words_regex, res.keyword_matches, res.found_patterns, + false, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->pattern_regex, res.regex_matches, res.found_patterns, + false, binaryDataFound); + } + + if (kwCount != res.keyword_matches.size() && !binaryDataFound) { + for (const auto &kw : res.keyword_matches) { + if (kw.size() < 2 || str_contains(kw, "os_cmd_high_acuracy_fast_reg") || + str_contains(kw, "regex_code_execution") || kw == "character_encoding" || + str_contains(kw, "quotes_ev_fast_reg") || str_contains(kw, "encoded_") || + str_contains(kw, "medium_acuracy") || str_contains(kw, "high_acuracy_fast_reg_xss")) + { + keywordsToRemove.push_back(kw); + } + } + + // Recalculate repetition and/or probing indicators + unsigned int newWordsCount = 0; + calcRepetitionAndProbing(res, ignored_keywords, unescaped, detectedRepetition, detectedProbing, + newWordsCount); + // Take minimal words count because empirically it means evasion was probably succesfully decoded + wordsCount = std::min(wordsCount, newWordsCount); + } + + } + + if ((line.find("0x") != std::string::npos) && evasion_hex_regex.hasMatch(line)) { + dbgTrace(D_WAAP_EVASIONS) << "hex evasion found (in raw line)"; + std::string unescaped = line; + replaceAll(unescaped, "0x", "\\x"); + unescapeUnicode(unescaped); + dbgTrace(D_WAAP_EVASIONS) << "unescape == '" << unescaped << "'"; + + size_t kwCount = res.keyword_matches.size(); + + if (res.unescaped_line != unescaped) { + SampleValue unescapedSample(unescaped, m_Signatures->m_regexPreconditions); + checkRegex(unescapedSample, m_Signatures->specific_acuracy_keywords_regex, res.keyword_matches, + res.found_patterns, false, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->words_regex, res.keyword_matches, res.found_patterns, + false, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->pattern_regex, res.regex_matches, res.found_patterns, + false, binaryDataFound); + } + + if (kwCount != res.keyword_matches.size() && !binaryDataFound) { + for (const auto &kw : res.keyword_matches) { + if (kw.size() < 2 || str_contains(kw, "os_cmd_high_acuracy_fast_reg") || + str_contains(kw, "regex_code_execution") || kw == "character_encoding" || + str_contains(kw, "quotes_ev_fast_reg") || str_contains(kw, "encoded_") || + str_contains(kw, "medium_acuracy") || str_contains(kw, "high_acuracy_fast_reg_xss")) + { + keywordsToRemove.push_back(kw); + } + } + // Recalculate repetition and/or probing indicators + unsigned int newWordsCount = 0; + calcRepetitionAndProbing(res, ignored_keywords, unescaped, detectedRepetition, detectedProbing, + newWordsCount); + // Take minimal words count because empirically it means evasion was probably succesfully decoded + wordsCount = std::min(wordsCount, newWordsCount); + } + + } + + if ((res.unescaped_line.find("%") != std::string::npos) && evasion_bad_hex_regex.hasMatch(res.unescaped_line)) { + dbgTrace(D_WAAP_EVASIONS) << "Bad hex evasion found (%c1%1c or %c1%9c in unescaped line)"; + + std::string unescaped = res.unescaped_line; + + unescaped = boost::regex_replace(unescaped, bad_hex_regex, "/"); + unescaped = unescape(unescaped); + dbgTrace(D_WAAP_EVASIONS) << "unescaped =='" << unescaped << "'"; + + size_t kwCount = res.keyword_matches.size(); + + if (res.unescaped_line != unescaped) { + SampleValue unescapedSample(unescaped, m_Signatures->m_regexPreconditions); + checkRegex(unescapedSample, m_Signatures->specific_acuracy_keywords_regex, res.keyword_matches, + res.found_patterns, longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->words_regex, res.keyword_matches, res.found_patterns, + longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->pattern_regex, res.regex_matches, res.found_patterns, + longTextFound, binaryDataFound); + } + + if (kwCount != res.keyword_matches.size() && !binaryDataFound) { + // Recalculate repetition and/or probing indicators + unsigned int newWordsCount = 0; + calcRepetitionAndProbing(res, ignored_keywords, unescaped, detectedRepetition, detectedProbing, + newWordsCount); + // Take minimal words count because empirically it means evasion was probably succesfully decoded + wordsCount = std::min(wordsCount, newWordsCount); + } + + } + + if ((line.find("%") != std::string::npos) && evasion_bad_hex_regex.hasMatch(line)) { + dbgTrace(D_WAAP_EVASIONS) << "Bad hex evasion found (%c1%1c or %c1%9c in raw line)"; + std::string unescaped = line; + + unescaped = boost::regex_replace(unescaped, bad_hex_regex, "/"); + unescaped = unescape(unescaped); + dbgTrace(D_WAAP_EVASIONS) << "unescaped == '" << unescaped << "'"; + + size_t kwCount = res.keyword_matches.size(); + + if (res.unescaped_line != unescaped) { + SampleValue unescapedSample(unescaped, m_Signatures->m_regexPreconditions); + checkRegex(unescapedSample, m_Signatures->specific_acuracy_keywords_regex, res.keyword_matches, + res.found_patterns, longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->words_regex, res.keyword_matches, res.found_patterns, + longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->pattern_regex, res.regex_matches, res.found_patterns, + longTextFound, binaryDataFound); + } + + if (kwCount != res.keyword_matches.size() && !binaryDataFound) { + // Recalculate repetition and/or probing indicators + unsigned int newWordsCount = 0; + calcRepetitionAndProbing(res, ignored_keywords, unescaped, detectedRepetition, detectedProbing, + newWordsCount); + // Take minimal words count because empirically it means evasion was probably succesfully decoded + wordsCount = std::min(wordsCount, newWordsCount); + } + } + + // python: escape ='hi_acur_fast_reg_evasion' in found_patterns + bool escape = Waap::Util::find_in_map_of_stringlists_keys("evasion", res.found_patterns); + + if (escape) { + // Possible evasion detected: remove unicode \u and \x sequences, + // delete all trash in un_escape_pattern, and scan with regexes again. + dbgTrace(D_WAAP_EVASIONS) << "escape pattern found"; + + std::string unescaped = res.unescaped_line; + + dbgTrace(D_WAAP_EVASIONS) << "unescape'" << unescaped << "'"; + replaceAll(unescaped, "0x", "\\x"); + replaceAll(unescaped, "%u", "\\u"); + std::string zero; + zero.push_back(0); + replaceAll(unescaped, zero, ""); + unescapeUnicode(unescaped); + + // from python: unescaped = un_escape_pattern.sub(r'',line) + ' ' + un_escape_pattern.sub(r' ',line) + // note: "line" in python is called "unescaped" in this code. + unescaped = m_Signatures->un_escape_pattern.sub(unescaped) + " " + + m_Signatures->un_escape_pattern.sub(unescaped, " "); + + size_t kwCount = res.keyword_matches.size(); + + if (res.unescaped_line != unescaped) { + SampleValue unescapedSample(unescaped, m_Signatures->m_regexPreconditions); + checkRegex(unescapedSample, m_Signatures->specific_acuracy_keywords_regex, res.keyword_matches, + res.found_patterns, longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->words_regex, res.keyword_matches, res.found_patterns, + longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->pattern_regex, res.regex_matches, res.found_patterns, + longTextFound, binaryDataFound); + } + + if (kwCount == res.keyword_matches.size()) { + // Remove the evasion keyword if no real evasion found + keywordsToRemove.push_back("evasion"); + escape = false; + } + else if (!binaryDataFound) { + // Recalculate repetition and/or probing indicators + unsigned int newWordsCount = 0; + calcRepetitionAndProbing(res, ignored_keywords, unescaped, detectedRepetition, detectedProbing, + newWordsCount); + // Take minimal words count because empirically it means evasion was probably succesfully decoded + wordsCount = std::min(wordsCount, newWordsCount); + } + } + + // Detect bash "backslash" evasions + // Note that the search for low binary ASCII codes such as 7 or 8 are done here because + // unescaped_line after unescape() contains post-processed string, where original \b was already converted to + // single character (ASCII 8). + // This should handle cases like /\bin/sh + unsigned char prev_uch = '\0'; + for (char ch : res.unescaped_line) { + unsigned char uch = (unsigned char)ch; + if ((uch >= 0x07 && uch <= 0x0D) || (uch == '\\') || (uch == '/' && prev_uch == '/')) { + escape = true; + break; + } + prev_uch = uch; + } + + if (escape) { + dbgTrace(D_WAAP_EVASIONS) << "try decoding bash evasions"; + + // Possible bash evasion detected: - clean up and scan with regexes again. + dbgTrace(D_WAAP_EVASIONS) << "unescape='" << res.unescaped_line << "'"; + + std::string unescaped; + unescaped.reserve(res.unescaped_line.size()); // preallocate to improve performance of += clauses below + + // Partially revert the effect of the escape_backslashes() function, remove the '\' characters and + // squash string of successive forward slashes to single slash. + // This allows us to decode bash evasions like "/\b\i\n/////s\h" + char prev_ch = '\0'; + for (char ch : res.unescaped_line) { + switch (ch) { + case 7: unescaped += "a"; break; + case 8: unescaped += "b"; break; + case 9: unescaped += "t"; break; + case 10: unescaped += "n"; break; + case 11: unescaped += "v"; break; + case 12: unescaped += "f"; break; + case 13: unescaped += "r"; break; + case '\\': break; // remove backslashes + default: + // squash strings of successive '/' characters into single '/' character + if (prev_ch == '/' && ch == '/') { + break; + } + unescaped += ch; + } + + prev_ch = ch; + } + + size_t kwCount = res.keyword_matches.size(); + + if (res.unescaped_line != unescaped) { + SampleValue unescapedSample(unescaped, m_Signatures->m_regexPreconditions); + checkRegex(unescapedSample, m_Signatures->specific_acuracy_keywords_regex, res.keyword_matches, + res.found_patterns, longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->words_regex, res.keyword_matches, res.found_patterns, + longTextFound, binaryDataFound); + checkRegex(unescapedSample, m_Signatures->pattern_regex, res.regex_matches, res.found_patterns, + longTextFound, binaryDataFound); + } + + if (kwCount == res.keyword_matches.size()) { + // Remove the evasion keyword if no real evasion found + keywordsToRemove.push_back("evasion"); + escape = false; + } + else if (!binaryDataFound) { + // Recalculate repetition and/or probing indicators + unsigned int newWordsCount = 0; + calcRepetitionAndProbing(res, ignored_keywords, unescaped, detectedRepetition, detectedProbing, + newWordsCount); + // Take minimal words count because empirically it means evasion was probably succesfully decoded + wordsCount = std::min(wordsCount, newWordsCount); + } + } + + // Remove evasion keywords that should not be reported because there's no real evasion found + if (!keywordsToRemove.empty()) { + dbgTrace(D_WAAP_SAMPLE_SCAN) + << "Removing these keywords (probably due to evasions): " + << Waap::Util::vecToString(keywordsToRemove); + } + + for (const auto &value : keywordsToRemove) { + Waap::Util::remove_startswith(res.keyword_matches, value); + Waap::Util::remove_in_map_of_stringlists_keys(value, res.found_patterns); + } + + + // python: + // if headers: + // keyword_matches = [x for x in keyword_matches if x not in '\(/);$='] + if (isHeaderScanStage) { + removeItemsMatchingSubstringOf(res.keyword_matches, "\\(/);$="); + // For headers, also remove all ignored patterns entirely, not just ignore it from counts + for (const auto &ignored_pattern : *ignored_patterns) { + if (res.found_patterns.erase(ignored_pattern)) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << "Removed the found pattern in header: '" << ignored_pattern << "'"; + } + } + } + + // python: + // keywords_num = sum(1 for x in keyword_matches if x not in ignored_keywords) + size_t keywords_num = countNotInSet(res.keyword_matches, *ignored_keywords); + size_t regex_num = countNotInSet(res.regex_matches, *ignored_patterns); + + bool forceReport = isUrlScanStage && Waap::Util::find_in_map_of_stringlists_keys("url", res.found_patterns); + + if (forceReport) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << "setting forceReport becacuse we are in url context and " + "'high_acuracy_fast_reg_evation' pattern is found!"; + } + + // python: + // if keywords_num >2 or ('acuracy' in patterns and not headers) or + // special_patten in patterns or 'probing' in keyword_matches or 'repetition' in keyword_matches: + if (keywords_num + regex_num > 2 || + Waap::Util::find_in_map_of_stringlists_keys("acur", res.found_patterns) || + forceReport || + detectedRepetition || + detectedProbing) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << "pre-suspicion found."; + // apply regex signatures + checkRegex(unescapedLineSample, m_Signatures->pattern_regex, res.regex_matches, res.found_patterns, + longTextFound, binaryDataFound); + + // python: + // if len(regex_matches) and 'probing' not in keyword_matches: + // if len(keyword_matches+regex_matches)+2>words: + // keyword_matches.append('probing') + if (!binaryDataFound && res.regex_matches.size() > 0 && !detectedProbing) { + // if len(''.join(res.keyword_matches+res.regex_matches))>=alphanumeric_num { + if (res.keyword_matches.size() + res.regex_matches.size() + 2 > wordsCount) { + detectedProbing = true; + res.keyword_matches.push_back("probing"); + } + } + + // python: + // keywords_num = sum(1 for x in keyword_matches if x not in ignored_keywords) + keywords_num = countNotInSet(res.keyword_matches, *ignored_keywords); + regex_num = countNotInSet(res.regex_matches, *ignored_patterns); + + // Regular (medium) acuracy contributes 1 to the score. + // High acuracy contributes 2 to the score. + int acuracy = 0; + + // python: + // if 'acuracy' in patterns and not url: + if (Waap::Util::find_in_map_of_stringlists_keys("acur", res.found_patterns)) + { + acuracy = 1; + // search for "high_acuracy" or "hi_acur" signature names + if (Waap::Util::find_in_map_of_stringlists_keys("high", res.found_patterns) || + Waap::Util::find_in_map_of_stringlists_keys("hi_acur", res.found_patterns)) + { + acuracy = 2; + } + } + + // "Acuracy" contribution alone won't trigger suspicion yet. It needs additional boost + // of finding some keywords and/or matched regexes. + int score = keywords_num + acuracy + (2 * regex_num); + +#ifdef WAF2_LOGGING_ENABLE + // Dump interesting statistics and scores + print_filtered("keywords", *ignored_keywords, res.keyword_matches); + print_filtered("patterns", *ignored_patterns, res.regex_matches); + print_found_patterns(res.found_patterns); + + dbgTrace(D_WAAP_SAMPLE_SCAN) << "before decision: keywords(num=" << keywords_num << ", size=" << + res.keyword_matches.size() << "); regex(num=" << regex_num << ", size=" << res.regex_matches.size() << + "; acuracy=" << acuracy << "; score=" << score << "; forceReport=" << forceReport << "; probing=" << + detectedProbing << "; repetition=" << detectedRepetition << "; 'fast_reg' in found_patterns: " << + Waap::Util::find_in_map_of_stringlists_keys("fast_reg", res.found_patterns); +#endif + + // python: + // if (keywords_num+acuracy+2*regex_num)>2 or special_patten in patterns or + // 'fast_reg' in patterns or 'probing' in keyword_matches or 'repetition' in keyword_matches: + if (score > 2 || + forceReport || + detectedProbing || + detectedRepetition || + Waap::Util::find_in_map_of_stringlists_keys("fast_reg", res.found_patterns)) { + dbgTrace(D_WAAP_SAMPLE_SCAN) << "apply(): suspicion found (score=" << score << ")."; + + if (shouldCache) { + m_suspiciousValuesCache.insert( + {CacheKey(line, scanStage, isBinaryData, splitType.ok() ? *splitType : ""), res} + ); + } + + return true; // suspicion found + } + + dbgTrace(D_WAAP_SAMPLE_SCAN) << "apply(): suspicion not found (score=" << score << ")."; + } + + dbgTrace(D_WAAP_SAMPLE_SCAN) << "apply(): not suspicious."; + + if (shouldCache) { + m_cleanValuesCache.insert(CacheKey(line, scanStage, isBinaryData, splitType.ok() ? *splitType : "")); + } + + res.clear(); + return false; +} + +void WaapAssetState::updateScores() +{ + scoreBuilder.snap(); +} + +std::string WaapAssetState::getSignaturesScoresFilePath() const { + return m_SignaturesScoresFilePath; +} + +std::map>& WaapAssetState::getFilterVerbose() +{ + return m_filtered_keywords_verbose; +} + +std::string WaapAssetState::getSignaturesFilterDir() const { + size_t lastSlash = m_SignaturesScoresFilePath.find_last_of('/'); + std::string sigsFilterDir = ((lastSlash == std::string::npos) ? + m_SignaturesScoresFilePath : m_SignaturesScoresFilePath.substr(0, lastSlash)); + dbgTrace(D_WAAP_ASSET_STATE) << " signatures filters directory: " << sigsFilterDir; + return sigsFilterDir; +} + +void WaapAssetState::updateFilterManagerPolicy(IWaapConfig* pConfig) +{ + m_filtersMngr->loadPolicy(pConfig); +} + +bool WaapAssetState::isKeywordOfType(const std::string& keyword, ParamType type) const +{ + return m_typeValidator.isKeywordOfType(keyword, type); +} + +bool WaapAssetState::isBinarySampleType(const std::string & sample) const +{ + // Binary data detection is based on existance of at least two ASCII NUL bytes + size_t nulBytePos = sample.find('\0', 0); + if (nulBytePos != std::string::npos) { + nulBytePos = sample.find('\0', nulBytePos+1); + if (nulBytePos != std::string::npos) { + dbgTrace(D_WAAP_ASSET_STATE) << "binary_input sample type detected (nul bytes)"; + return true; + } + } + + std::vector matches; + m_Signatures->format_magic_binary_re.findAllMatches(sample, matches); + if (!matches.empty()) { + dbgTrace(D_WAAP_ASSET_STATE) << "binary_input sample type detected (signature)"; + return true; + } + + return false; +} + +static Maybe +parse_wbxml_uint8(const std::string & sample, size_t &offset) +{ + if (offset >= sample.size()) { + return genError("not wbxml"); + } + return sample[offset++]; +} + +static Maybe +parse_wbxml_mb_uint32(const std::string & sample, size_t &offset) +{ + uint32_t value = 0; + for (int i=0; i < 5; i++) { + Maybe v = parse_wbxml_uint8(sample, offset); + if (!v.ok()) return genError("not wbxml"); + value = (value << 7) | (*v & 0x7F); + if ((*v & 0x80) == 0) { + return value; + } + } + return genError("not wbxml"); +} + +bool WaapAssetState::isWBXMLSampleType(const std::string & sample) const +{ + size_t offset = 0; + // Parse protocol version + Maybe version = parse_wbxml_uint8(sample, offset); + // Support only wbxml protocol versions 1-3 which can be more or less reliably detected + if (!version.ok() || *version==0 || *version > 0x03) return false; + // Parse public id + Maybe public_id = parse_wbxml_mb_uint32(sample, offset); + if (!public_id.ok()) return false; + // Parse and validate charset (this is optional for v0 but we don't detect v0 anyway) + Maybe charset = parse_wbxml_mb_uint32(sample, offset); + if (!charset.ok()) return false; + // Only subset of charsets are allowed + static const uint32_t allowed_charsets[] = {0, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 17, 106, 1000, 1015, 2026}; + if (std::find(std::begin(allowed_charsets), std::end(allowed_charsets), *charset) == + std::end(allowed_charsets)) + { + return false; + } + Maybe strtbl_len = parse_wbxml_mb_uint32(sample, offset); + return strtbl_len.ok() && *strtbl_len <= sample.size() - offset; +} + +std::set WaapAssetState::getSampleType(const std::string & sample) const +{ + std::set types; + bool shouldCache = (sample.size() <= MAX_CACHE_VALUE_SIZE); + + // Handle cached clean values + if (shouldCache && m_sampleTypeCache.exist(sample)) { + dbgTrace(D_WAAP_ASSET_STATE) << "WaapAssetState::getSampleType() sample: '" << sample << + "' type is unknown (cache)"; + types.insert("unknown"); + return types; + } + + for (auto& type_re : m_Signatures->params_type_re) + { + dbgTrace(D_WAAP_ASSET_STATE) << "WaapAssetState::getSampleType checking: " << sample << + " against " << type_re.first; + std::vector matches; + type_re.second->findAllMatches(sample, matches); + + dbgTrace(D_WAAP_ASSET_STATE) << "number of matched keywords: " << matches.size(); + if (matches.empty()) + { + continue; + } + + types.insert(type_re.first); + } + + // Binary data detection is based on existance of at least two ASCII NUL bytes + if (isBinarySampleType(sample)) { + dbgTrace(D_WAAP_ASSET_STATE) << "reporting binary_input sample type"; + types.insert("binary_input"); + } + + if (types.empty()) + { + types.insert("unknown"); + m_sampleTypeCache.insert(sample); + } + + return types; +} + +void WaapAssetState::logIndicatorsInFilters(const std::string ¶m, Waap::Keywords::KeywordsSet& keywords, + IWaf2Transaction* pTransaction) +{ + m_filtersMngr->registerKeywords(param, keywords, pTransaction); +} + +void WaapAssetState::logParamHit(Waf2ScanResult& res, IWaf2Transaction* pTransaction) +{ + Waap::Keywords::KeywordsSet emptySet; + std::string key = IndicatorsFiltersManager::generateKey(res.location, res.param_name, pTransaction); + m_filtersMngr->registerKeywords(key, emptySet, pTransaction); +} + +void WaapAssetState::filterKeywords( + const std::string ¶m, + Waap::Keywords::KeywordsSet& keywords, + std::vector& filteredKeywords) +{ + dbgTrace(D_WAAP_ASSET_STATE) << "filter keywords"; + m_filtersMngr->filterKeywords(param, keywords, filteredKeywords); +} + +void WaapAssetState::clearFilterVerbose() +{ + m_filtered_keywords_verbose.clear(); +} + +void WaapAssetState::filterVerbose(const std::string ¶m, + std::vector& filteredKeywords) +{ + m_filtersMngr->filterVerbose(param, filteredKeywords, m_filtered_keywords_verbose); +} + +void WaapAssetState::filterKeywordsByParameters( + const std::string ¶meter_name, Waap::Keywords::KeywordsSet &keywords_set) +{ + dbgTrace(D_WAAP_ASSET_STATE) << "filter keywords based on parameter name: " << parameter_name; + auto filter_parameters_itr = m_Signatures->filter_parameters.find(parameter_name); + if (filter_parameters_itr != m_Signatures->filter_parameters.end()) + { + dbgTrace(D_WAAP_ASSET_STATE) << "Found keywords to filter based on parameter name"; + const auto &vec = filter_parameters_itr->second; + for (auto keyword_to_filter : vec) + { + auto keywords_set_itr = keywords_set.find(keyword_to_filter); + if (keywords_set_itr != keywords_set.end()) + { + dbgTrace(D_WAAP_ASSET_STATE) << "Filtering keyword: " << keyword_to_filter; + keywords_set.erase(keyword_to_filter); + } + } + } + else + { + dbgTrace(D_WAAP_ASSET_STATE) << "No keywords need to be filter for this parameter"; + } +} + +void WaapAssetState::removeKeywords(Waap::Keywords::KeywordsSet &keywords_set) +{ + for (auto &keyword_to_remove : m_Signatures->remove_keywords_always) + { + auto keyword_set_itr = keywords_set.find(keyword_to_remove); + if (keyword_set_itr != keywords_set.end()) + { + dbgTrace(D_WAAP_ASSET_STATE) << "Removing keyword: " << keyword_to_remove << " from keyword set"; + keywords_set.erase(keyword_set_itr); + } + } +} + +void WaapAssetState::removeWBXMLKeywords(Waap::Keywords::KeywordsSet &keywords_set, + std::vector &filtered_keywords) +{ + for (auto it = keywords_set.begin(); it != keywords_set.end();) { + if (NGEN::Regex::regexMatch(__FILE__, __LINE__, *it, m_Signatures->wbxml_data_kw_filter)) { + dbgTrace(D_WAAP_ASSET_STATE) << "Filtering keyword due to wbxml: '" << *it << "'"; + filtered_keywords.push_back(*it); + it = keywords_set.erase(it); + } + else { + ++it; + } + } +} + +void WaapAssetState::createRateLimitingState(const std::shared_ptr &rateLimitingPolicy) +{ + m_rateLimitingState = std::make_shared(rateLimitingPolicy); +} + +void WaapAssetState::createErrorLimitingState(const std::shared_ptr &errorLimitingPolicy) +{ + m_errorLimitingState = std::make_shared(errorLimitingPolicy); +} + +void WaapAssetState::createSecurityHeadersState( + const std::shared_ptr &securityHeadersPolicy) +{ + m_securityHeadersState = std::make_shared(securityHeadersPolicy); +} + +std::shared_ptr& WaapAssetState::getRateLimitingState() +{ + return m_rateLimitingState; +} + +std::shared_ptr& WaapAssetState::getErrorLimitingState() +{ + return m_errorLimitingState; +} + +std::shared_ptr& WaapAssetState::getSecurityHeadersState() +{ + return m_securityHeadersState; +} + +void WaapAssetState::clearRateLimitingState() +{ + m_rateLimitingState.reset(); +} + +void WaapAssetState::clearErrorLimitingState() +{ + m_errorLimitingState.reset(); +} + +void WaapAssetState::clearSecurityHeadersState() +{ + m_securityHeadersState.reset(); +} + diff --git a/components/security_apps/waap/waap_clib/WaapAssetState.h b/components/security_apps/waap/waap_clib/WaapAssetState.h new file mode 100755 index 0000000..362dc54 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapAssetState.h @@ -0,0 +1,171 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __WAF2_SIGS_H__02a5bdaa +#define __WAF2_SIGS_H__02a5bdaa + +#include "Waf2Regex.h" +#include "Signatures.h" +#include "picojson.h" +#include "lru_cache_set.h" +#include "lru_cache_map.h" +#include +#include +#include +#include +#include "ScoreBuilder.h" +#include "i_encryptor.h" +#include "i_waap_asset_state.h" +#include "RateLimiting.h" +#include "SecurityHeadersPolicy.h" +#include "WaapDefines.h" +#include "IndicatorsFiltersManager.h" +#include "WaapKeywords.h" +#include "KeywordTypeValidator.h" +#include "ScanResult.h" +#include "WaapSampleValue.h" + +class IWaf2Transaction; + +class WaapAssetState : public boost::noncopyable, public I_WaapAssetState +{ +private: //ugly but needed for build + std::shared_ptr m_Signatures; + std::string m_SignaturesScoresFilePath; + std::map> m_filtered_keywords_verbose; + + void checkRegex(const SampleValue &sample, const Regex & pattern, std::vector& keyword_matches, + Waap::Util::map_of_stringlists_t & found_patterns, bool longTextFound, bool binaryDataFound) const; + + void filterKeywordsDueToLongText(Waf2ScanResult &res) const; + +public: + // Load and compile signatures from file + explicit WaapAssetState(std::shared_ptr signatures, const std::string& sigScoresFname, + size_t cleanCacheCapacity = SIGS_APPLY_CLEAN_CACHE_CAPACITY, + size_t suspiciousCacheCapacity = SIGS_APPLY_SUSPICIOUS_CACHE_CAPACITY, + size_t sampleTypeCacheCapacity = SIGS_SAMPLE_TYPE_CACHE_CAPACITY, + const std::string& assetId = ""); + explicit WaapAssetState(const std::shared_ptr& pWaapAssetState, const std::string& sigScoresFname, + const std::string& assetId); + virtual ~WaapAssetState(); + + std::shared_ptr getSignatures() const; + void reset(); + + const std::string m_assetId; + + ScoreBuilder scoreBuilder; + std::shared_ptr m_rateLimitingState; + std::shared_ptr m_errorLimitingState; + std::shared_ptr m_securityHeadersState; + std::shared_ptr m_filtersMngr; + KeywordTypeValidator m_typeValidator; + + bool apply(const std::string &v, Waf2ScanResult &res, const std::string &scanStage, bool isBinaryData=false, + const Maybe splitType=genError("not splitted")) const; + + virtual void updateScores(); + virtual std::string getSignaturesScoresFilePath() const; + virtual std::string getSignaturesFilterDir() const; + std::map>& getFilterVerbose(); + + void updateFilterManagerPolicy(IWaapConfig* pConfig); + virtual bool isKeywordOfType(const std::string& keyword, ParamType type) const; + virtual bool isBinarySampleType(const std::string& sample) const; + virtual bool isWBXMLSampleType(const std::string &sample) const; + virtual std::set getSampleType(const std::string& sample) const; + void logIndicatorsInFilters(const std::string ¶m, Waap::Keywords::KeywordsSet& keywords, + IWaf2Transaction* pTransaction); + void logParamHit(Waf2ScanResult& res, IWaf2Transaction* pTransaction); + void filterKeywords(const std::string ¶m, Waap::Keywords::KeywordsSet& keywords, + std::vector& filteredKeywords); + void clearFilterVerbose(); + void filterVerbose(const std::string ¶m, + std::vector& filteredKeywords); + void filterKeywordsByParameters(const std::string ¶meter_name, Waap::Keywords::KeywordsSet &keywords_set); + void removeKeywords(Waap::Keywords::KeywordsSet &keywords_set); + void removeWBXMLKeywords(Waap::Keywords::KeywordsSet &keywords_set, std::vector &filtered_keywords); + + void createRateLimitingState(const std::shared_ptr &rateLimitingPolicy); + void createErrorLimitingState(const std::shared_ptr &errorLimitingPolicy); + void createSecurityHeadersState(const std::shared_ptr &securityHeadersPolicy); + + void clearRateLimitingState(); + void clearErrorLimitingState(); + void clearSecurityHeadersState(); + + std::shared_ptr& getRateLimitingState(); + std::shared_ptr& getErrorLimitingState(); + std::shared_ptr& getSecurityHeadersState(); + + // Key for the caches includes input values passed to the WaapAssetState::apply() + struct CacheKey { + std::string line; + std::string scanStage; + bool isBinaryData; + std::string splitType; + CacheKey( + const std::string &line, + const std::string &scanStage, + bool isBinaryData, + const std::string &splitType) + : + line(line), + scanStage(scanStage), + isBinaryData(isBinaryData), + splitType(splitType) + { + } + + // comparison operator should be implemented to use this struct as a key in an LRU cache. + bool operator==(CacheKey const& other) const + { + return + line == other.line && + scanStage == other.scanStage && + isBinaryData == other.isBinaryData && + splitType == other.splitType; + } + }; + + // LRU caches are used to increase performance of apply() method for most frequent values + mutable LruCacheSet m_cleanValuesCache; + mutable LruCacheMap m_suspiciousValuesCache; + mutable LruCacheSet m_sampleTypeCache; +}; + +// Support efficient hashing for the CacheKey struct so it can participate in unordered (hashed) containers +inline std::size_t hash_value(WaapAssetState::CacheKey const &cacheKey) +{ + std::size_t hash = 0; + boost::hash_combine(hash, cacheKey.line); + boost::hash_combine(hash, cacheKey.scanStage); + return hash; +} + +void filterUnicode(std::string & text); +void replaceUnicodeSequence(std::string & text, const char repl); +std::string unescape(const std::string & s); + +// This if function is exposed to be tested by unit tests +void +checkRegex( + std::string line, + const Regex &pattern, + std::vector& keyword_matches, + std::vector& keyword_matches_raw, + Waap::Util::map_of_stringlists_t &found_patterns, + bool longTextFound); + +#endif // __WAF2_SIGS_H__02a5bdaa diff --git a/components/security_apps/waap/waap_clib/WaapAssetStatesManager.cc b/components/security_apps/waap/waap_clib/WaapAssetStatesManager.cc new file mode 100755 index 0000000..8819f1e --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapAssetStatesManager.cc @@ -0,0 +1,187 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapAssetStatesManager.h" +#include "WaapDefines.h" +#include "WaapAssetState.h" +#include "i_waapConfig.h" +#include "config.h" +#include "agent_core_utilities.h" + +USE_DEBUG_FLAG(D_WAAP); + +WaapAssetStatesManager::WaapAssetStatesManager() : pimpl(std::make_unique()) +{ +} + +WaapAssetStatesManager::~WaapAssetStatesManager() +{ +} + +void WaapAssetStatesManager::preload() +{ + registerExpectedConfiguration("waap data", "base folder"); +} + +bool WaapAssetStatesManager::initBasicWaapSigs(const std::string& sigsFname, const std::string& sigScoresFname) +{ + return pimpl->initBasicWaapSigs(sigsFname, sigScoresFname); +} + +std::shared_ptr WaapAssetStatesManager::getWaapAssetStateGlobal() +{ + return pimpl->getWaapAssetStateGlobal(); +} + +std::shared_ptr WaapAssetStatesManager::getWaapAssetStateById(const std::string& assetId) +{ + return pimpl->getWaapAssetStateById(assetId); +} + +void WaapAssetStatesManager::setAssetDirectoryPath(const std::string &assetDirectoryPath) +{ + return pimpl->setAssetDirectoryPath(assetDirectoryPath); +} + +WaapAssetStatesManager::Impl::Impl() : + m_signatures(nullptr), + m_basicWaapSigs(nullptr), + m_AssetBasedWaapSigs(), + m_assetDirectoryPath(BACKUP_DIRECTORY_PATH) +{ +} + +WaapAssetStatesManager::Impl::~Impl() +{ +} + +bool WaapAssetStatesManager::Impl::initBasicWaapSigs(const std::string& sigsFname, const std::string& sigScoresFname) +{ + if (m_signatures && !m_signatures->fail() && m_basicWaapSigs) + { + // already initialized successfully. + return true; + } + try { + m_signatures = std::make_shared(sigsFname); + m_basicWaapSigs = std::make_shared( + m_signatures, + sigScoresFname, + SIGS_APPLY_CLEAN_CACHE_CAPACITY, + SIGS_APPLY_SUSPICIOUS_CACHE_CAPACITY); + } + catch (std::runtime_error & e) { + // TODO:: properly handle component initialization failure + dbgTrace(D_WAAP) << + "WaapAssetStatesManager::initBasicWaapSigs(): " << e.what() << ". Failed to read signature files" + " "<< sigsFname << " and " << sigScoresFname << "."; + m_basicWaapSigs.reset(); + return false; + } + + return m_signatures && !m_signatures->fail() && m_basicWaapSigs; +} + +std::shared_ptr WaapAssetStatesManager::Impl::getWaapAssetStateGlobal() +{ + return m_basicWaapSigs; +} + +std::shared_ptr WaapAssetStatesManager::Impl::getWaapAssetStateById(const std::string& assetId) +{ + if (assetId.size() > 0) + { + std::string sigsKey = assetId; + std::string instanceId = ""; + if (Singleton::exists()) + { + I_InstanceAwareness* instance = Singleton::Consume::by(); + Maybe uniqueId = instance->getUniqueID(); + if (uniqueId.ok()) + { + instanceId = uniqueId.unpack(); + sigsKey += "/" + instanceId; + } + } + std::unordered_map>::iterator it; + it = m_AssetBasedWaapSigs.find(sigsKey); + + if (it != m_AssetBasedWaapSigs.end()) + { + return it->second; + } + + if (m_basicWaapSigs == NULL) { + dbgWarning(D_WAAP) << + "WaapAssetStatesManager::Impl::getWaapAssetStateById(): ERROR: m_basicWaapSigs == NULL!"; + return std::shared_ptr(nullptr); + } + + std::shared_ptr newWaapSigs = CreateWaapSigsForAsset(m_basicWaapSigs, assetId, instanceId); + + if (newWaapSigs) + { + m_AssetBasedWaapSigs[sigsKey] = newWaapSigs; + } + + return newWaapSigs; + } + + return std::shared_ptr(nullptr); +} + +void WaapAssetStatesManager::Impl::setAssetDirectoryPath(const std::string &assetDirectoryPath) +{ + m_assetDirectoryPath = assetDirectoryPath; +} + +std::shared_ptr +WaapAssetStatesManager::Impl::CreateWaapSigsForAsset(const std::shared_ptr& pWaapAssetState, + const std::string& assetId, + const std::string& instanceId) +{ + std::string assetPath = + getConfigurationWithDefault(m_assetDirectoryPath, "waap data", "base folder") + + assetId; + if (instanceId != "") + { + assetPath += "/" + instanceId; + } + if (!NGEN::Filesystem::exists(assetPath)) + { + if (!NGEN::Filesystem::makeDirRecursive(assetPath)) + { + dbgWarning(D_WAAP) + << "WaapAssetStatesManager::CreateWaapSigsForAsset() can't create asset folder. " + << "Directory: " + << assetPath; + return std::shared_ptr(nullptr); + } + } + + dbgTrace(D_WAAP) << "WaapAssetStatesManager::CreateWaapSigsForAsset() assetPath is: " << assetPath; + + if (pWaapAssetState == NULL) { + dbgWarning(D_WAAP) << + "WaapAssetStatesManager::CreateWaapSigsForAsset(): failed to create a WaapAssetState object"; + return std::shared_ptr(nullptr); + + } + + std::string basePath = pWaapAssetState->getSignaturesScoresFilePath(); + size_t lastSlash = basePath.find_last_of('/'); + std::string assetScoresPath = assetPath + + ((lastSlash == std::string::npos) ? basePath : basePath.substr(lastSlash)); + dbgTrace(D_WAAP) << "WaapAssetStatesManager::CreateWaapSigsForAsset() assetScoresPath is: " << assetScoresPath; + return std::make_shared(pWaapAssetState, assetScoresPath, assetId); +} diff --git a/components/security_apps/waap/waap_clib/WaapAssetStatesManager.h b/components/security_apps/waap/waap_clib/WaapAssetStatesManager.h new file mode 100755 index 0000000..b900018 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapAssetStatesManager.h @@ -0,0 +1,71 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "singleton.h" +#include "Signatures.h" +#include +#include +#include + +//forward decleration. +class WaapAssetState; + +class I_WaapAssetStatesManager { +public: + virtual bool initBasicWaapSigs(const std::string& sigsFname, const std::string& sigScoresFname) = 0; + virtual std::shared_ptr getWaapAssetStateGlobal() = 0; + virtual std::shared_ptr getWaapAssetStateById(const std::string& assetId) = 0; + virtual void setAssetDirectoryPath(const std::string &assetDirectoryPath) = 0; +}; + +class WaapAssetStatesManager : Singleton::Provide { +public: + WaapAssetStatesManager(); + virtual ~WaapAssetStatesManager(); + + void preload(); + virtual bool initBasicWaapSigs(const std::string& sigsFname, const std::string& sigScoresFname); + virtual std::shared_ptr getWaapAssetStateGlobal(); + virtual std::shared_ptr getWaapAssetStateById(const std::string& assetId); + + virtual void setAssetDirectoryPath(const std::string &assetDirectoryPath); + + class Impl; +protected: + std::unique_ptr pimpl; +}; + +class WaapAssetStatesManager::Impl : Singleton::Provide::From +{ +public: + Impl(); + virtual ~Impl(); + + virtual bool initBasicWaapSigs(const std::string& sigsFname, const std::string& sigScoresFname); + virtual std::shared_ptr getWaapAssetStateGlobal(); + virtual std::shared_ptr getWaapAssetStateById(const std::string& assetId); + virtual void setAssetDirectoryPath(const std::string &assetDirectoryPath); + +private: + std::shared_ptr + CreateWaapSigsForAsset(const std::shared_ptr& pWaapAssetState, + const std::string& assetId, + const std::string& instanceId); + + std::shared_ptr m_signatures; + std::shared_ptr m_basicWaapSigs; + std::unordered_map> m_AssetBasedWaapSigs; + std::string m_assetDirectoryPath; +}; diff --git a/components/security_apps/waap/waap_clib/WaapConfigApi.cc b/components/security_apps/waap/waap_clib/WaapConfigApi.cc new file mode 100755 index 0000000..c9a1c89 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapConfigApi.cc @@ -0,0 +1,114 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapConfigApi.h" +#include "Waf2Util.h" + +#include "telemetry.h" + +using namespace std; + +USE_DEBUG_FLAG(D_WAAP); + +const string WaapConfigAPI::s_PracticeSubType = "Web API"; +set WaapConfigAPI::assets_ids{}; +set WaapConfigAPI::assets_ids_aggregation{}; + +bool +WaapConfigAPI::getWaapAPIConfig(WaapConfigAPI& ngenAPIConfig) { + auto &maybe_ngen_config = getConfiguration( + "WAAP", + "WebAPISecurity" + ); + + if (!maybe_ngen_config.ok()) { + dbgDebug(D_WAAP) << "Unable to get WAAP WebAPISecurity from configuration" << maybe_ngen_config.getErr(); + return false; + } + + ngenAPIConfig = maybe_ngen_config.unpack(); + return true; +} + +WaapConfigAPI::WaapConfigAPI() : WaapConfigBase() +{} + +void +WaapConfigAPI::notifyAssetsCount() +{ + WaapConfigAPI::assets_ids = WaapConfigAPI::assets_ids_aggregation; + AssetCountEvent(AssetType::API, WaapConfigAPI::assets_ids.size()).notify(); +} + +void +WaapConfigAPI::clearAssetsCount() +{ + WaapConfigAPI::assets_ids_aggregation.clear(); +} + +#if 0 // maybe will be used in the future +WaapConfigAPI::WaapConfigAPI( + bool autonomousSecurity, + string autonomousSecurityLevel, + string assetId, + string assetName, + string practiceId, + string practiceName, + string ruleId, + string ruleName, + bool schemaValidation) : + WaapConfigBase( + autonomousSecurity, + autonomousSecurityLevel, + assetId, + assetName, + practiceId, + practiceName, + ruleId, + ruleName), + m_schemaValidation(schemaValidation) +{ +} +#endif + +void WaapConfigAPI::load(cereal::JSONInputArchive& ar) +{ + // order has affect - we need to call base last because of triggers and overrides + readJSONByCereal(ar); + + WaapConfigBase::load(ar); + assets_ids_aggregation.insert(m_assetId); +} + +void WaapConfigAPI::readJSONByCereal(cereal::JSONInputArchive &ar) +{ +} + +bool WaapConfigAPI::operator==(const WaapConfigAPI& other) const +{ + const WaapConfigBase* configBase = this; + const WaapConfigBase& configBaseOther = other; + + return *configBase == configBaseOther; +} + +void WaapConfigAPI::printMe(ostream& os) const +{ + WaapConfigBase::printMe(os); +} + +const string& WaapConfigAPI::get_PracticeSubType() const +{ + return s_PracticeSubType; +} + diff --git a/components/security_apps/waap/waap_clib/WaapConfigApi.h b/components/security_apps/waap/waap_clib/WaapConfigApi.h new file mode 100755 index 0000000..ef3b4d8 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapConfigApi.h @@ -0,0 +1,60 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#ifndef __WAAP_CONFIG_API_H__ +#define __WAAP_CONFIG_API_H__ + +#include + +#include "WaapConfigBase.h" +#include "log_generator.h" +#include "debug.h" + +class WaapConfigAPI : public WaapConfigBase +{ +public: + WaapConfigAPI(); +#if 0 // maybe will be used in the future + WaapConfigAPI( + bool autonomousSecurity, + std::string autonomousSecurityLevel, + std::string assetId, + std::string assetName, + std::string practiceId, + std::string practiceName, + std::string ruleId, + std::string ruleName, + bool schemaValidation); +#endif + + void load(cereal::JSONInputArchive& ar); + bool operator==(const WaapConfigAPI& other) const; + void printMe(std::ostream& os) const; + + virtual const std::string& get_PracticeSubType() const; + static bool getWaapAPIConfig(WaapConfigAPI &ngenAPIConfig); + static void notifyAssetsCount(); + static void clearAssetsCount(); + +private: + void readJSONByCereal(cereal::JSONInputArchive&ar); + + std::string m_schemaValidationPoicyStatusMessage; + + static const std::string s_PracticeSubType; + static std::set assets_ids; + static std::set assets_ids_aggregation; +}; + +#endif // __WAAP_CONFIG_API_H__ diff --git a/components/security_apps/waap/waap_clib/WaapConfigApplication.cc b/components/security_apps/waap/waap_clib/WaapConfigApplication.cc new file mode 100755 index 0000000..3f499b4 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapConfigApplication.cc @@ -0,0 +1,87 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapConfigApplication.h" +#include "telemetry.h" + +using namespace std; + +USE_DEBUG_FLAG(D_WAAP); + +const string WaapConfigApplication::s_PracticeSubType = "Web Application"; +set WaapConfigApplication::assets_ids{}; +set WaapConfigApplication::assets_ids_aggregation{}; + +bool WaapConfigApplication::getWaapSiteConfig(WaapConfigApplication& ngenSiteConfig) { + auto &maybe_ngen_config = getConfiguration( + "WAAP", + "WebApplicationSecurity" + ); + + if (!maybe_ngen_config.ok()) + { + dbgDebug(D_WAAP) << maybe_ngen_config.getErr(); + return false; + } + + ngenSiteConfig = maybe_ngen_config.unpack(); + return true; +} + +WaapConfigApplication::WaapConfigApplication() : + WaapConfigBase() +{ +} + +void +WaapConfigApplication::notifyAssetsCount() +{ + WaapConfigApplication::assets_ids = WaapConfigApplication::assets_ids_aggregation; + AssetCountEvent(AssetType::WEB, WaapConfigApplication::assets_ids.size()).notify(); +} + +void +WaapConfigApplication::clearAssetsCount() +{ + WaapConfigApplication::assets_ids_aggregation.clear(); +} + +const string& WaapConfigApplication::get_PracticeSubType() const +{ + return s_PracticeSubType; +} + +void WaapConfigApplication::load(cereal::JSONInputArchive& ar) +{ + WaapConfigBase::load(ar); + loadOpenRedirectPolicy(ar); + loadErrorDisclosurePolicy(ar); + loadCsrfPolicy(ar); + loadSecurityHeadersPolicy(ar); + + assets_ids_aggregation.insert(m_assetId); +} + + +bool WaapConfigApplication::operator==(const WaapConfigApplication& other) const +{ + const WaapConfigBase* configBase = this; + const WaapConfigBase& configBaseOther = other; + + return *configBase==configBaseOther; +} + +void WaapConfigApplication::printMe(ostream& os) const +{ + WaapConfigBase::printMe(os); +} diff --git a/components/security_apps/waap/waap_clib/WaapConfigApplication.h b/components/security_apps/waap/waap_clib/WaapConfigApplication.h new file mode 100755 index 0000000..f6149d1 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapConfigApplication.h @@ -0,0 +1,57 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#ifndef __WAAP_CONFIG_APPLICATION_H__ +#define __WAAP_CONFIG_APPLICATION_H__ + +#include + +#include "WaapConfigBase.h" +#include "log_generator.h" +#include "debug.h" + +class WaapConfigApplication : public WaapConfigBase +{ +public: + WaapConfigApplication(); +#if 0 // maybe will be used in the future + WaapConfigApplication( + bool autonomousSecurity, + std::string autonomousSecurityLevel, + std::string assetId, + std::string assetName, + std::string practiceId, + std::string practiceName, + std::string ruleId, + std::string ruleName, + bool botProtection); +#endif + + bool operator==(const WaapConfigApplication& other) const; + + virtual const std::string& get_PracticeSubType() const; + + void load(cereal::JSONInputArchive& ar); + void printMe(std::ostream& os) const; + static bool getWaapSiteConfig(WaapConfigApplication& ngenSiteConfig); + static void notifyAssetsCount(); + static void clearAssetsCount(); + +private: + static const std::string s_PracticeSubType; + static std::set assets_ids; + static std::set assets_ids_aggregation; +}; + +#endif // __WAAP_CONFIG_APPLICATION_H__ diff --git a/components/security_apps/waap/waap_clib/WaapConfigBase.cc b/components/security_apps/waap/waap_clib/WaapConfigBase.cc new file mode 100755 index 0000000..63d454f --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapConfigBase.cc @@ -0,0 +1,450 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapConfigBase.h" +#include +#include "WaapConfigApplication.h" +#include "WaapOverride.h" +#include "WaapTrigger.h" +#include "WaapOpenRedirectPolicy.h" +#include "CsrfPolicy.h" +#include "WaapErrorDisclosurePolicy.h" +#include "TrustedSources.h" +#include "Waf2Util.h" + +USE_DEBUG_FLAG(D_WAAP_ULIMITS); +using boost::algorithm::to_lower_copy; + +WaapConfigBase::WaapConfigBase() + : + m_assetId(""), + m_autonomousSecurityLevel(""), + m_autonomousSecurity(false), + m_assetName(""), + m_practiceId(""), + m_practiceName(""), + m_ruleId(""), + m_ruleName(""), + m_overridePolicy(nullptr), + m_triggerPolicy(nullptr), + m_trustedSourcesPolicy(nullptr), + m_waapParameters(nullptr), + m_openRedirectPolicy(nullptr), + m_errorDisclosurePolicy(nullptr), + m_csrfPolicy(nullptr), + m_rateLimitingPolicy(nullptr), + m_errorLimitingPolicy(nullptr), + m_errorLimiting(nullptr), + m_userLimitsPolicy(nullptr), + m_securityHeadersPolicy(nullptr) +{ + m_blockingLevel = BlockingLevel::NO_BLOCKING; +} + +void WaapConfigBase::load(cereal::JSONInputArchive& ar) +{ + readJSONByCereal(ar); + loadTriggersPolicy(ar); + loadOverridePolicy(ar); + loadTrustedSourcesPolicy(ar); + loadWaapParametersPolicy(ar); + loadUserLimitsPolicy(ar); + loadRateLimitingPolicy(ar); + loadErrorLimitingPolicy(ar); +} + +void WaapConfigBase::readJSONByCereal(cereal::JSONInputArchive& ar) +{ + ar( + cereal::make_nvp("webAttackMitigation", m_autonomousSecurity), + cereal::make_nvp("webAttackMitigationAction", m_autonomousSecurityLevel), + cereal::make_nvp("practiceId", m_practiceId), + cereal::make_nvp("practiceName", m_practiceName), + cereal::make_nvp("assetId", m_assetId), + cereal::make_nvp("assetName", m_assetName), + cereal::make_nvp("ruleId", m_ruleId), + cereal::make_nvp("ruleName", m_ruleName) + ); + + m_blockingLevel = blockingLevelBySensitivityStr(m_autonomousSecurityLevel); +} + +void WaapConfigBase::loadCsrfPolicy(cereal::JSONInputArchive& ar) +{ + std::string failMessage = "Failed to load the CSRF policy of the current rule: " + + m_ruleName + ": "; + + try { + m_csrfPolicy = std::make_shared(ar); + } + catch (std::runtime_error& e) { + ar.setNextName(nullptr); + dbgWarning(D_WAAP) << failMessage << e.what(); + m_csrfPolicy = std::make_shared(); + } +} + +void WaapConfigBase::loadSecurityHeadersPolicy(cereal::JSONInputArchive& ar) +{ + std::string failMessage = "Failed to load the Security Headers policy of the current rule: " + + m_ruleName + ": "; + + try { + m_securityHeadersPolicy = std::make_shared(ar); + } + catch (std::runtime_error& e) { + ar.setNextName(nullptr); + // Feature is currently not supported by the UI, thus changing debug level to debug. + dbgDebug(D_WAAP) << failMessage << e.what(); + m_securityHeadersPolicy = nullptr; + } +} + +void WaapConfigBase::loadOverridePolicy(cereal::JSONInputArchive& ar) +{ + std::string failMessage = "Failed to load the WAAP Overrides of the current rule: " + + m_ruleName + ": "; + + try { + m_overridePolicy = std::make_shared(ar); + } + catch (std::runtime_error& e) { + ar.setNextName(nullptr); + dbgWarning(D_WAAP) << failMessage << e.what(); + m_overridePolicy = nullptr; + } +} + +void WaapConfigBase::loadTriggersPolicy(cereal::JSONInputArchive& ar) +{ + std::string failMessage = "Failed to load the WAAP Triggers of the current rule: " + + m_ruleName + ": "; + try { + m_triggerPolicy = std::make_shared(ar); + } + catch (std::runtime_error& e) { + ar.setNextName(nullptr); + dbgWarning(D_WAAP) << failMessage << e.what(); + m_triggerPolicy = nullptr; + } +} + +void WaapConfigBase::loadTrustedSourcesPolicy(cereal::JSONInputArchive& ar) +{ + std::string failMessage = "Failed to load the WAAP Trusted sources of the current rule: " + + m_ruleName + ": "; + try { + m_trustedSourcesPolicy = std::make_shared(ar); + } + catch (std::runtime_error & e) { + ar.setNextName(nullptr); + dbgWarning(D_WAAP) << failMessage << e.what(); + m_trustedSourcesPolicy = nullptr; + } +} + +void WaapConfigBase::loadWaapParametersPolicy(cereal::JSONInputArchive& ar) +{ + std::string failMessage = "Failed to load the WAAP Parameters of the current rule: " + + m_ruleName + ": "; + try { + m_waapParameters = std::make_shared(ar); + } + catch (std::runtime_error & e) { + ar.setNextName(nullptr); + dbgWarning(D_WAAP) << failMessage << e.what(); + m_waapParameters = nullptr; + } +} + +void WaapConfigBase::loadRateLimitingPolicy(cereal::JSONInputArchive& ar) +{ + std::string failMessage = "Failed to load the WAAP Rate Limiting of the current rule: " + + m_ruleName + ": "; + try { + m_rateLimitingPolicy = std::make_shared(ar); + } + catch (std::runtime_error & e) { + ar.setNextName(nullptr); + // Feature is currently not supported by the UI, thus changing debug level to debug. + dbgDebug(D_WAAP) << failMessage << e.what(); + m_rateLimitingPolicy = nullptr; + } +} + +void WaapConfigBase::loadErrorLimitingPolicy(cereal::JSONInputArchive& ar) +{ + std::string failMessage = "Failed to load the WAAP Error Limiting of the current rule: " + + m_ruleName + ": "; + + try { + + m_errorLimiting = std::make_shared(ar); + std::shared_ptr policy; + policy = std::make_shared(); + policy->rules.push_back(Waap::RateLimiting::Policy::Rule()); + policy->rules[0].rate.interval = m_errorLimiting->m_errorLimiterPolicy.interval; + policy->rules[0].rate.events = m_errorLimiting->m_errorLimiterPolicy.events; + policy->rules[0].uriFilter.groupBy = Waap::RateLimiting::Policy::Rule::UriFilter::GroupBy::GLOBAL; + policy->rules[0].sourceFilter.groupBy = Waap::RateLimiting::Policy::Rule::SourceFilter::GroupBy::GLOBAL; + policy->rules[0].uriFilter.scope = Waap::RateLimiting::Policy::Rule::UriFilter::Scope::ALL; + policy->rules[0].sourceFilter.scope = Waap::RateLimiting::Policy::Rule::SourceFilter::Scope::ALL; + policy->m_rateLimiting.enable = m_errorLimiting->getErrorLimitingEnforcementStatus(); + + if (m_errorLimiting->m_errorLimiterPolicy.type == "quarantine") { + policy->rules[0].action.type = Waap::RateLimiting::Policy::Rule::Action::Type::QUARANTINE; + policy->rules[0].action.quarantineTimeSeconds = m_errorLimiting->m_errorLimiterPolicy.blockingTime; + } + else if (m_errorLimiting->m_errorLimiterPolicy.type == "rate limit") { + policy->rules[0].action.type = Waap::RateLimiting::Policy::Rule::Action::Type::RATE_LIMIT; + } + else if (m_errorLimiting->m_errorLimiterPolicy.type == "detect") { + policy->rules[0].action.type = Waap::RateLimiting::Policy::Rule::Action::Type::DETECT; + } + + m_errorLimitingPolicy = policy; + } + catch (std::runtime_error & e) { + ar.setNextName(nullptr); + // Feature is currently not supported by the UI, thus changing debug level to debug. + dbgDebug(D_WAAP) << failMessage << e.what(); + m_errorLimiting = nullptr; + m_errorLimitingPolicy = nullptr; + } + +} + +void WaapConfigBase::loadOpenRedirectPolicy(cereal::JSONInputArchive& ar) +{ + std::string failMessage = "Failed to load the WAAP OpenRedirect policy"; + try { + m_openRedirectPolicy = std::make_shared(ar); + } + catch (std::runtime_error & e) { + ar.setNextName(nullptr); + dbgWarning(D_WAAP) << failMessage << e.what(); + // TODO:: change the default back to nullptr when implemeted in hook + // m_openRedirectPolicy = nullptr; + // Now (until hook is implemented) the default is enabled+enforced + m_openRedirectPolicy = std::make_shared(); + } +} + +void WaapConfigBase::loadErrorDisclosurePolicy(cereal::JSONInputArchive& ar) +{ + std::string failMessage = "Failed to load the WAAP Information Disclosure policy"; + try { + m_errorDisclosurePolicy = std::make_shared(ar); + } + catch (std::runtime_error & e) { + ar.setNextName(nullptr); + dbgWarning(D_WAAP) << failMessage << e.what(); + m_errorDisclosurePolicy = nullptr; + } +} + +void WaapConfigBase::loadUserLimitsPolicy(cereal::JSONInputArchive& ar) +{ + try { + m_userLimitsPolicy = std::make_shared(ar); + dbgInfo(D_WAAP_ULIMITS) << "[USER LIMITS] policy loaded:\n" << *m_userLimitsPolicy; + } + catch (std::runtime_error & e) { + ar.setNextName(nullptr); + m_userLimitsPolicy = std::make_shared(); + dbgInfo(D_WAAP_ULIMITS) << "[USER LIMITS] default policy loaded:\n" << *m_userLimitsPolicy; + } +} + +bool WaapConfigBase::operator==(const WaapConfigBase& other) const +{ + return + m_autonomousSecurity == other.m_autonomousSecurity && + m_autonomousSecurityLevel == other.m_autonomousSecurityLevel && + m_practiceId == other.m_practiceId && + m_practiceName == other.m_practiceName && + m_ruleId == other.m_ruleId && + m_ruleName == other.m_ruleName && + m_assetId == other.m_assetId && + m_assetName == other.m_assetName && + Waap::Util::compareObjects(m_triggerPolicy, other.m_triggerPolicy) && + Waap::Util::compareObjects(m_overridePolicy, other.m_overridePolicy) && + Waap::Util::compareObjects(m_trustedSourcesPolicy, other.m_trustedSourcesPolicy) && + Waap::Util::compareObjects(m_waapParameters, other.m_waapParameters) && + Waap::Util::compareObjects(m_openRedirectPolicy, other.m_openRedirectPolicy) && + Waap::Util::compareObjects(m_errorDisclosurePolicy, other.m_errorDisclosurePolicy) && + Waap::Util::compareObjects(m_rateLimitingPolicy, other.m_rateLimitingPolicy) && + Waap::Util::compareObjects(m_errorLimitingPolicy, other.m_errorLimitingPolicy) && + Waap::Util::compareObjects(m_csrfPolicy, other.m_csrfPolicy) && + Waap::Util::compareObjects(m_userLimitsPolicy, other.m_userLimitsPolicy) && + Waap::Util::compareObjects(m_securityHeadersPolicy, other.m_securityHeadersPolicy); +} + +void WaapConfigBase::printMe(std::ostream& os) const +{ + os << m_autonomousSecurity << ", " << m_autonomousSecurityLevel; + os << ", " << m_ruleId << ", " << m_ruleName; + os << ", " << m_practiceId << ", " << m_practiceName << ", " << m_assetId << ", " << m_assetName; +} + +const std::string& WaapConfigBase::get_AssetId() const +{ + return m_assetId; +} + +const std::string& WaapConfigBase::get_AssetName() const +{ + return m_assetName; +} + +const std::string& WaapConfigBase::get_PracticeId() const +{ + return m_practiceId; +} + +const std::string& WaapConfigBase::get_PracticeName() const +{ + return m_practiceName; +} + +const std::string& WaapConfigBase::get_RuleId() const +{ + return m_ruleId; +} + +const std::string& WaapConfigBase::get_RuleName() const +{ + return m_ruleName; +} + +const bool& WaapConfigBase::get_WebAttackMitigation() const +{ + return m_autonomousSecurity; +} + +const std::string& WaapConfigBase::get_WebAttackMitigationAction() const +{ + return m_autonomousSecurityLevel; +} + +AttackMitigationMode +WaapConfigBase::get_WebAttackMitigationMode(const IWaapConfig& siteConfig) +{ + AttackMitigationMode attackMitigationMode = AttackMitigationMode::UNKNOWN; + if (siteConfig.get_WebAttackMitigation()) { + attackMitigationMode = (siteConfig.get_BlockingLevel() == BlockingLevel::NO_BLOCKING) ? + AttackMitigationMode::LEARNING : AttackMitigationMode::PREVENT; + } + else { + attackMitigationMode = AttackMitigationMode::DISABLED; + } + return attackMitigationMode; +} + +const char* +WaapConfigBase::get_WebAttackMitigationModeStr(const IWaapConfig& siteConfig) +{ + switch(get_WebAttackMitigationMode(siteConfig)) { + case AttackMitigationMode::DISABLED: + return "DISABLED"; + case AttackMitigationMode::LEARNING: + return "LEARNING"; + case AttackMitigationMode::PREVENT: + return "PREVENT"; + default: + return "UNKNOWN"; + } +} + +const BlockingLevel& WaapConfigBase::get_BlockingLevel() const +{ + return m_blockingLevel; +} + +const std::shared_ptr& WaapConfigBase::get_OverridePolicy() const +{ + return m_overridePolicy; +} + +const std::shared_ptr& WaapConfigBase::get_TriggerPolicy() const +{ + return m_triggerPolicy; +} + +const std::shared_ptr& WaapConfigBase::get_TrustedSourcesPolicy() const +{ + return m_trustedSourcesPolicy; +} + +const std::shared_ptr& WaapConfigBase::get_CsrfPolicy() const +{ + return m_csrfPolicy; +} + +const std::shared_ptr& WaapConfigBase::get_WaapParametersPolicy() const +{ + return m_waapParameters; +} + +const std::shared_ptr& WaapConfigBase::get_RateLimitingPolicy() const +{ + return m_rateLimitingPolicy; +} + +const std::shared_ptr& WaapConfigBase::get_ErrorLimitingPolicy() const +{ + return m_errorLimitingPolicy; +} + +const std::shared_ptr& WaapConfigBase::get_OpenRedirectPolicy() const +{ + return m_openRedirectPolicy; +} + +const std::shared_ptr& WaapConfigBase::get_ErrorDisclosurePolicy() const +{ + return m_errorDisclosurePolicy; +} + +const std::shared_ptr& WaapConfigBase::get_SecurityHeadersPolicy() const +{ + return m_securityHeadersPolicy; +} + +const std::shared_ptr& WaapConfigBase::get_UserLimitsPolicy() const +{ + return m_userLimitsPolicy; +} + +BlockingLevel WaapConfigBase::blockingLevelBySensitivityStr(const std::string& sensitivity) const +{ + std::string sensitivityLower = to_lower_copy(sensitivity); + + if (sensitivityLower == "transparent") + { + return BlockingLevel::NO_BLOCKING; + } + else if (sensitivityLower == "low") + { + return BlockingLevel::LOW_BLOCKING_LEVEL; + } + else if (sensitivityLower == "balanced") + { + return BlockingLevel::MEDIUM_BLOCKING_LEVEL; + } + else if (sensitivityLower == "high") + { + return BlockingLevel::HIGH_BLOCKING_LEVEL; + } + return BlockingLevel::NO_BLOCKING; +} diff --git a/components/security_apps/waap/waap_clib/WaapConfigBase.h b/components/security_apps/waap/waap_clib/WaapConfigBase.h new file mode 100755 index 0000000..cc15ebf --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapConfigBase.h @@ -0,0 +1,107 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#ifndef __WAAP_CONFIG_BASE_H__ +#define __WAAP_CONFIG_BASE_H__ + +#include "i_waapConfig.h" +#include "WaapOverride.h" +#include "WaapTrigger.h" +#include "WaapOpenRedirectPolicy.h" +#include "WaapErrorDisclosurePolicy.h" +#include "ErrorLimiting.h" +#include "CsrfPolicy.h" +#include "SecurityHeadersPolicy.h" +#include "UserLimitsPolicy.h" +#include "TrustedSources.h" +#include "Waf2Util.h" +#include "debug.h" + +class WaapConfigBase : public IWaapConfig +{ +public: + static AttackMitigationMode get_WebAttackMitigationMode(const IWaapConfig& siteConfig); + static const char* get_WebAttackMitigationModeStr(const IWaapConfig& siteConfig); + + bool operator==(const WaapConfigBase& other) const; + + virtual const std::string& get_AssetId() const; + virtual const std::string& get_AssetName() const; + virtual const BlockingLevel& get_BlockingLevel() const; + virtual const std::string& get_PracticeId() const; + virtual const std::string& get_PracticeName() const; + virtual const std::string& get_RuleId() const; + virtual const std::string& get_RuleName() const; + virtual const bool& get_WebAttackMitigation() const; + virtual const std::string& get_WebAttackMitigationAction() const; + + virtual const std::shared_ptr& get_OverridePolicy() const; + virtual const std::shared_ptr& get_TriggerPolicy() const; + virtual const std::shared_ptr& get_TrustedSourcesPolicy() const; + virtual const std::shared_ptr& get_WaapParametersPolicy() const; + virtual const std::shared_ptr& get_OpenRedirectPolicy() const; + virtual const std::shared_ptr& get_ErrorDisclosurePolicy() const; + virtual const std::shared_ptr& get_CsrfPolicy() const; + virtual const std::shared_ptr& get_RateLimitingPolicy() const; + virtual const std::shared_ptr& get_SecurityHeadersPolicy() const; + virtual const std::shared_ptr& get_ErrorLimitingPolicy() const; + virtual const std::shared_ptr& get_UserLimitsPolicy() const; + + virtual void printMe(std::ostream& os) const; + +protected: + WaapConfigBase(); + void load(cereal::JSONInputArchive& ar); + void loadOpenRedirectPolicy(cereal::JSONInputArchive& ar); + void loadErrorDisclosurePolicy(cereal::JSONInputArchive& ar); + void loadCsrfPolicy(cereal::JSONInputArchive& ar); + void loadRateLimitingPolicy(cereal::JSONInputArchive& ar); + void loadSecurityHeadersPolicy(cereal::JSONInputArchive& ar); + void loadErrorLimitingPolicy(cereal::JSONInputArchive& ar); + + std::string m_assetId; +private: + void loadOverridePolicy(cereal::JSONInputArchive& ar); + void loadTriggersPolicy(cereal::JSONInputArchive& ar); + void loadTrustedSourcesPolicy(cereal::JSONInputArchive& ar); + void loadWaapParametersPolicy(cereal::JSONInputArchive& ar); + void loadUserLimitsPolicy(cereal::JSONInputArchive& ar); + + void readJSONByCereal(cereal::JSONInputArchive& ar); + BlockingLevel blockingLevelBySensitivityStr(const std::string& sensitivity) const; + + std::string m_autonomousSecurityLevel; + bool m_autonomousSecurity; + std::string m_assetName; + BlockingLevel m_blockingLevel; + std::string m_practiceId; + std::string m_practiceName; + std::string m_ruleId; + std::string m_ruleName; + + std::shared_ptr m_overridePolicy; + std::shared_ptr m_triggerPolicy; + std::shared_ptr m_trustedSourcesPolicy; + std::shared_ptr m_waapParameters; + std::shared_ptr m_openRedirectPolicy; + std::shared_ptr m_errorDisclosurePolicy; + std::shared_ptr m_csrfPolicy; + std::shared_ptr m_rateLimitingPolicy; + std::shared_ptr m_errorLimitingPolicy; + std::shared_ptr m_errorLimiting; + std::shared_ptr m_userLimitsPolicy; + std::shared_ptr m_securityHeadersPolicy; +}; + +#endif // __WAAP_CONFIG_BASE_H__ diff --git a/components/security_apps/waap/waap_clib/WaapConversions.cc b/components/security_apps/waap/waap_clib/WaapConversions.cc new file mode 100644 index 0000000..cd5fd01 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapConversions.cc @@ -0,0 +1,73 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapConversions.h" +#include "debug.h" + +USE_DEBUG_FLAG(D_WAAP); + +namespace Waap { +namespace Conversions { + ThreatLevel convertFinalScoreToThreatLevel(double finalScore) + { + if (finalScore == NO_THREAT_FINAL_SCORE) + { + return NO_THREAT; + } + if (finalScore < INFO_THREAT_THRESHOLD) + { + return THREAT_INFO; + } + if (finalScore < LOW_THREAT_THRESHOLD) + { + return LOW_THREAT; + } + if (finalScore < MED_THREAT_THRESHOLD) + { + return MEDIUM_THREAT; + } + return HIGH_THREAT; + } + + bool shouldDoWafBlocking(const IWaapConfig* pWaapConfig, ThreatLevel threatLevel) + { + if (pWaapConfig == NULL) + { + return false; + } + + if (threatLevel <= THREAT_INFO) + { + return false; + } + + BlockingLevel blockLevel = pWaapConfig->get_BlockingLevel(); + + switch (blockLevel) + { + case BlockingLevel::LOW_BLOCKING_LEVEL: + return threatLevel >= HIGH_THREAT; + case BlockingLevel::MEDIUM_BLOCKING_LEVEL: + return threatLevel >= MEDIUM_THREAT; + case BlockingLevel::HIGH_BLOCKING_LEVEL: + return true; + case BlockingLevel::NO_BLOCKING: + return false; + default: + dbgDebug(D_WAAP) << "Invalid blocking level in WAAP Config: " << static_cast::type>(blockLevel); + } + return false; + } +} +} diff --git a/components/security_apps/waap/waap_clib/WaapConversions.h b/components/security_apps/waap/waap_clib/WaapConversions.h new file mode 100644 index 0000000..ec99dda --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapConversions.h @@ -0,0 +1,27 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __WAAP_CONVERSIONS_H__ +#define __WAAP_CONVERSIONS_H__ + +#include "WaapEnums.h" +#include "i_waapConfig.h" + +namespace Waap { +namespace Conversions { + ThreatLevel convertFinalScoreToThreatLevel(double finalScore); + bool shouldDoWafBlocking(const IWaapConfig* pSitePolicy, ThreatLevel threatLevel); +} +} + +#endif // __WAAP_CONVERSIONS_H__ diff --git a/components/security_apps/waap/waap_clib/WaapDecision.cc b/components/security_apps/waap/waap_clib/WaapDecision.cc new file mode 100755 index 0000000..a03e4eb --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapDecision.cc @@ -0,0 +1,208 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapDecision.h" +#include "OpenRedirectDecision.h" +#include "debug.h" +#include +#include + +USE_DEBUG_FLAG(D_WAAP); + +WaapDecision::WaapDecision() : + m_json(""), + m_decisionFactory() +{ +} + +std::shared_ptr +WaapDecision::getDecision(DecisionType type) const +{ + return m_decisionFactory.getDecision(type); +} + +void +WaapDecision::orderDecisions() +{ + const DecisionsArr& decisions = m_decisionFactory.getDecisions(); + dbgTrace(D_WAAP) << "Original: " << decisions; + std::copy_if(decisions.begin(), + decisions.end(), + std::back_inserter(m_ordered_decisions), + [](const std::shared_ptr& decision) { + return decision && (decision->shouldBlock() || decision->shouldLog()); + }); + if (!m_ordered_decisions.empty()) { + dbgTrace(D_WAAP) << "Reduced: " << m_ordered_decisions; + m_ordered_decisions.sort(sortDecisions); + dbgTrace(D_WAAP) << "Sorted: " << m_ordered_decisions; + } + + setIteratorToFirstDecisionToLog(); +} + +void WaapDecision::setIteratorToFirstDecisionToLog() +{ + m_first_decision_to_log = + std::find_if( + m_ordered_decisions.begin(), + m_ordered_decisions.end(), + [](const std::shared_ptr& decision) + { + return decision && decision->shouldLog(); + }); +} + +bool +WaapDecision::sortDecisions(const std::shared_ptr& lhs, const std::shared_ptr& rhs) +{ + if (lhs->shouldBlock() && !rhs->shouldBlock()) { + return true; + } + else if (!lhs->shouldBlock() && rhs->shouldBlock()) { + return false; + } + else if (lhs->shouldLog() && !rhs->shouldLog()) { + return true; + } + else if (!lhs->shouldLog() && rhs->shouldLog()) { + return false; + } + else if (lhs->getType() < rhs->getType()) { + return true; + } + return false; +} + +std::ostream& operator<<(std::ostream& os, const DecisionsArr& decisions) +{ + os << "Decision(block, log): "; + for (auto decision : decisions) + { + os << decision->getTypeStr() << "(" << decision->shouldBlock() << ", " << + decision->shouldLog() << ") "; + } + return os; +} + +std::ostream& operator<<(std::ostream& os, const std::list>& decisions) +{ + os << "Decision(block, log): "; + for (auto decision : decisions) + { + os << decision->getTypeStr() << "(" << decision->shouldBlock() << ", " << + decision->shouldLog() << ") "; + } + return os; +} + +bool WaapDecision::getShouldBlockFromHighestPriorityDecision() const +{ + if (!m_ordered_decisions.empty()) + { + return m_ordered_decisions.front()->shouldBlock(); + } + return false; +} + +bool WaapDecision::anyDecisionsToLogOrBlock() const +{ + return !m_ordered_decisions.empty(); +} + +DecisionType WaapDecision::getHighestPriorityDecisionToLog() const +{ + if (m_first_decision_to_log == m_ordered_decisions.end()) + { + return DecisionType::NO_WAAP_DECISION; + } + return (*m_first_decision_to_log)->getType(); +} + +void WaapDecision::getIncidentLogFields( + const std::string& responseStatus, + std::string& incidentDetails, + std::string& incidentType +) const +{ + incidentDetails.clear(); + incidentType.clear(); + + for (decision_list::const_iterator iter = m_first_decision_to_log; iter != m_ordered_decisions.end(); ++iter) + { + const std::shared_ptr& nextDecision = *iter; + std::string tempIncidentDetails; + std::string tempIncidentType; + + if (!nextDecision->shouldLog()) + { + continue; + } + + bool isRelevant = true; + switch (nextDecision->getType()) + { + case OPEN_REDIRECT_DECISION: + { + tempIncidentDetails = "OpenRedirect attack detected (" + + std::dynamic_pointer_cast(nextDecision)->getLink() + ")"; + tempIncidentType = "Cross Site Redirect"; + break; + } + + case ERROR_LIMITING_DECISION: + { + tempIncidentDetails = "Application scanning detected"; + tempIncidentType = "Error Limit"; + break; + } + + case RATE_LIMITING_DECISION: + { + tempIncidentDetails = "High request rate detected"; + tempIncidentType = "Request Rate Limit"; + break; + } + case ERROR_DISCLOSURE_DECISION: + tempIncidentDetails = "Information disclosure in server response detected"; + tempIncidentDetails += ", response status code: " + responseStatus; + tempIncidentType = "Error Disclosure"; + break; + default: + isRelevant = false; + break; + } + if (isRelevant) { + if (!incidentDetails.empty()) + { + incidentDetails += " and "; + } + if (!incidentType.empty()) + { + incidentType += ", "; + } + incidentDetails += tempIncidentDetails; + incidentType += tempIncidentType; + } + } +} + +void WaapDecision::setJson(const std::string& json) +{ + m_json = json; +} + +std::string WaapDecision::getJson() const +{ + return m_json; +} diff --git a/components/security_apps/waap/waap_clib/WaapDecision.h b/components/security_apps/waap/waap_clib/WaapDecision.h new file mode 100755 index 0000000..1b2bcc4 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapDecision.h @@ -0,0 +1,61 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __WAAP_DECISION_H__ +#define __WAAP_DECISION_H__ + +#include +#include +#include +#include +#include "WaapEnums.h" +#include "SingleDecision.h" +#include "DecisionFactory.h" +#include "AutonomousSecurityDecision.h" +#include + +std::ostream& operator<<(std::ostream& os, const std::list>& decisions); +std::ostream& operator<<(std::ostream& os, const DecisionsArr& decisions); +typedef std::list> decision_list; + +class WaapDecision { +public: + WaapDecision(); + std::shared_ptr getDecision(DecisionType type) const; + void orderDecisions(); + static bool + sortDecisions(const std::shared_ptr& lhs, const std::shared_ptr& rhs); + bool getShouldBlockFromHighestPriorityDecision() const; + bool anyDecisionsToLogOrBlock() const; + DecisionType getHighestPriorityDecisionToLog() const; + void getIncidentLogFields( + const std::string& response_status, + std::string& incidentDetails, + std::string& incidentType + ) const; + void setJson(const std::string& json); + std::string getJson() const; + +private: + friend std::ostream& operator<<(std::ostream& os, const DecisionsArr& decisions); + friend std::ostream& operator<<(std::ostream& os, + const std::list>& decisions); + + void setIteratorToFirstDecisionToLog(); + + std::string m_json; + DecisionFactory m_decisionFactory; + decision_list m_ordered_decisions; + decision_list::const_iterator m_first_decision_to_log; +}; +#endif diff --git a/components/security_apps/waap/waap_clib/WaapErrorDisclosurePolicy.cc b/components/security_apps/waap/waap_clib/WaapErrorDisclosurePolicy.cc new file mode 100755 index 0000000..e47c149 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapErrorDisclosurePolicy.cc @@ -0,0 +1,27 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapErrorDisclosurePolicy.h" + +namespace Waap { +namespace ErrorDisclosure { + +bool +Policy::operator==(const Policy &other) const +{ + return enable == other.enable && + enforce == other.enforce; +} + +} +} diff --git a/components/security_apps/waap/waap_clib/WaapErrorDisclosurePolicy.h b/components/security_apps/waap/waap_clib/WaapErrorDisclosurePolicy.h new file mode 100755 index 0000000..2248757 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapErrorDisclosurePolicy.h @@ -0,0 +1,50 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include +#include +#include "debug.h" + +namespace Waap { +namespace ErrorDisclosure { + +struct Policy { + template + Policy(_A &ar) + : + enable(false), + enforce(false) + { + std::string level; + ar(cereal::make_nvp("errorDisclosure", level)); + level = boost::algorithm::to_lower_copy(level); + if (level == "detect") { + enable = true; + } + else if (level == "prevent") { + enable = true; + enforce = true; + } + } + + bool operator==(const Policy &other) const; + + bool enable; + bool enforce; +}; + +} +} diff --git a/components/security_apps/waap/waap_clib/WaapKeywords.cc b/components/security_apps/waap/waap_clib/WaapKeywords.cc new file mode 100644 index 0000000..6a1b8e4 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapKeywords.cc @@ -0,0 +1,47 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapKeywords.h" + +namespace Waap { +namespace Keywords { + +void +computeKeywordsSet(KeywordsSet &keywordsSet, const std::vector &keyword_matches, + const Waap::Util::map_of_stringlists_t &found_patterns) +{ + // Add all detected keyword_matches + keywordsSet.insert(keyword_matches.begin(), keyword_matches.end()); + + for (auto it = found_patterns.begin(); it != found_patterns.end(); ++it) { + const std::string& key = it->first; + const std::vector& keywordsList = it->second; + bool foundPatternNotInMatches = false; + + for (auto pKeyword = keywordsList.begin(); pKeyword != keywordsList.end(); ++pKeyword) { + if (std::find(keyword_matches.begin(), keyword_matches.end(), *pKeyword) != keyword_matches.end()) { + foundPatternNotInMatches = true; + } + } + + // Only add keys from found_patterns for which there are no values in keyword_matches + // The reason is to avoid adding both value and its related key to the same mix, which would + // unjustfully pump up the score for the keywordsSet. + if (!foundPatternNotInMatches) { + keywordsSet.insert(key); + } + } +} + +} +} diff --git a/components/security_apps/waap/waap_clib/WaapKeywords.h b/components/security_apps/waap/waap_clib/WaapKeywords.h new file mode 100644 index 0000000..fdc2ca6 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapKeywords.h @@ -0,0 +1,31 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "Waf2Util.h" +#include +#include +#include + +namespace Waap { +namespace Keywords { + +typedef std::unordered_set KeywordsSet; +typedef std::vector KeywordsVec; + +void computeKeywordsSet(KeywordsSet &keywordsSet, const std::vector &keyword_matches, + const Waap::Util::map_of_stringlists_t &found_patterns); + +} +} diff --git a/components/security_apps/waap/waap_clib/WaapOpenRedirect.cc b/components/security_apps/waap/waap_clib/WaapOpenRedirect.cc new file mode 100755 index 0000000..2fc4938 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapOpenRedirect.cc @@ -0,0 +1,90 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapOpenRedirect.h" +#include "WaapOpenRedirectPolicy.h" +#include "Waf2Util.h" +#include +#include +#include +#include + +USE_DEBUG_FLAG(D_WAAP); + +// Max number of openredirect URLs extracted from URL parameters, that are stored +#define MAX_OPENREDIRECT_URLS 25 + +namespace Waap { +namespace OpenRedirect { + +void State::collect(const char* v, size_t v_len, const std::string &hostStr) { + std::string urlDomain; + + if (v_len>8 && memcaseinsensitivecmp(v, 8, "https://", 8)) { + // Detect https URL and extract domain name + urlDomain = std::string(v+8, v_len-8); + } + else if (v_len>7 && memcaseinsensitivecmp(v, 7, "http://", 7)) { + // Detect http URL and extract domain name + urlDomain = std::string(v+7, v_len-7); + } + + // urlDomain starts with domain name (without the schema), which can is terminated by the '/' character + urlDomain = urlDomain.substr(0, urlDomain.find('/', 0)); + + // For comparison, consider domain names from the Host: header and from the value URL, without port numbers + std::string urlDomainNoPort = urlDomain.substr(0, urlDomain.find(":", 0)); + std::string hostStrNoPort = hostStr.substr(0, hostStr.find(":", 0)); + + // Avoid adding URLs whose "domain" part is equal to the site's hostname (take from the request's Host header) + // Also, limit number of openredirect URLs we remember + if (!urlDomainNoPort.empty() && urlDomainNoPort != hostStrNoPort && + m_openRedirectUrls.size() < MAX_OPENREDIRECT_URLS) + { + m_openRedirectUrls.insert(boost::algorithm::to_lower_copy(std::string(v, v_len))); + dbgTrace(D_WAAP) << "Waf2Transaction::collectUrlsForOpenRedirect(): adding url '" << + std::string(v, v_len) << "'"; + } +} + +bool +State::testRedirect(const std::string &redirectUrl) const +{ + if (redirectUrl.empty()) { + return false; + } + + std::string redirectUrlLower = boost::algorithm::to_lower_copy(redirectUrl); + + if (!redirectUrlLower.empty()) + { + for (const auto &collectedUrl : m_openRedirectUrls) { + // Detect whether redirect URL (from the Location response header) starts with one of the collected urls + // Note that the collected URLs are already stored lowercase + if (boost::algorithm::starts_with(redirectUrlLower, collectedUrl)) { + return true; + } + } + } + + return false; +} + +bool +State::empty() const +{ + return m_openRedirectUrls.empty(); +} + +} +} diff --git a/components/security_apps/waap/waap_clib/WaapOpenRedirect.h b/components/security_apps/waap/waap_clib/WaapOpenRedirect.h new file mode 100755 index 0000000..3d13619 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapOpenRedirect.h @@ -0,0 +1,34 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "WaapOpenRedirectPolicy.h" +#include +#include +#include +#include "debug.h" + +namespace Waap { +namespace OpenRedirect { + +class State { +public: + void collect(const char* v, size_t v_len, const std::string &hostStr); + bool testRedirect(const std::string &redirectUrl) const; + bool empty() const; +private: + std::set m_openRedirectUrls; +}; + +} +} diff --git a/components/security_apps/waap/waap_clib/WaapOpenRedirectPolicy.cc b/components/security_apps/waap/waap_clib/WaapOpenRedirectPolicy.cc new file mode 100755 index 0000000..66c39bc --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapOpenRedirectPolicy.cc @@ -0,0 +1,36 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapOpenRedirectPolicy.h" +#include "Waf2Util.h" +#include + +namespace Waap { +namespace OpenRedirect { + +Policy::Policy() +: +enable(false), +enforce(false) +{ +} + +bool +Policy::operator==(const Policy &other) const +{ + return enable == other.enable && + enforce == other.enforce; +} + +} +} diff --git a/components/security_apps/waap/waap_clib/WaapOpenRedirectPolicy.h b/components/security_apps/waap/waap_clib/WaapOpenRedirectPolicy.h new file mode 100755 index 0000000..824e06a --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapOpenRedirectPolicy.h @@ -0,0 +1,52 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include +#include +#include "debug.h" + +namespace Waap { +namespace OpenRedirect { + +struct Policy { + Policy(); + + template + Policy(_A &ar) + : + enable(false), + enforce(false) + { + std::string level; + ar(cereal::make_nvp("openRedirect", level)); + level = boost::algorithm::to_lower_copy(level); + if (level == "detect") { + enable = true; + } + else if (level == "prevent") { + enable = true; + enforce = true; + } + } + + bool operator==(const Policy &other) const; + + bool enable; + bool enforce; +}; + +} +} diff --git a/components/security_apps/waap/waap_clib/WaapOverride.cc b/components/security_apps/waap/waap_clib/WaapOverride.cc new file mode 100755 index 0000000..87af2ae --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapOverride.cc @@ -0,0 +1,81 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapOverride.h" +#include "Waf2Util.h" + +USE_DEBUG_FLAG(D_WAAP); + +namespace Waap { +namespace Override { + +bool Match::operator==(const Match &other) const +{ + return (m_op == other.m_op) && + (m_operand1 == other.m_operand1) && + (m_operand2 == other.m_operand2) && + (m_tag == other.m_tag) && + Waap::Util::compareObjects(m_valueRegex, other.m_valueRegex) && + m_cidr == other.m_cidr && + m_isCidr == other.m_isCidr; +} + +Behavior::Behavior() +: m_action(""), m_log(""), m_sourceIdentifier("") +{ +} + +bool Behavior::operator==(const Behavior &other) const +{ + return (m_action == other.m_action) && (m_log == other.m_log) && (m_sourceIdentifier == other.m_sourceIdentifier); +} + +const std::string & Behavior::getAction() const +{ + return m_action; +} + +const std::string& Behavior::getLog() const +{ + return m_log; +} + +const std::string& Behavior::getSourceIdentifier() const +{ + return m_sourceIdentifier; +} + +bool Rule::operator==(const Rule &other) const +{ + return (m_match == other.m_match) && + (m_isChangingRequestData == other.m_isChangingRequestData) && + (m_behaviors == other.m_behaviors); +} + +bool Policy::operator==(const Policy &other) const +{ + return m_RequestOverrides == other.m_RequestOverrides && + m_ResponseOverrides == other.m_ResponseOverrides; +} + +State::State() : + bForceBlock(false), + bForceException(false), + bIgnoreLog(false), + bSourceIdentifierOverride(false), + sSourceIdentifierMatch("") +{ +} + +} +} diff --git a/components/security_apps/waap/waap_clib/WaapOverride.h b/components/security_apps/waap/waap_clib/WaapOverride.h new file mode 100755 index 0000000..592a57c --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapOverride.h @@ -0,0 +1,345 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include "debug.h" +#include "CidrMatch.h" + +USE_DEBUG_FLAG(D_WAAP_OVERRIDE); + +namespace Waap { +namespace Override { +using boost::algorithm::to_lower_copy; + +class Match { +public: + bool operator==(const Match &other) const; + + template + void serialize(_A &ar) { + // Read the value of "op" + ar(cereal::make_nvp("operator", m_op)); + m_op = to_lower_copy(m_op); + m_isCidr = false; + m_value = ""; + + if (m_op == "basic") { + // If op == "BASIC" - read numeric value + ar(cereal::make_nvp("tag", m_tag)); + m_tag = to_lower_copy(m_tag); + + // The name "value" here is misleading. The real meaning is "regex pattern string" + ar(cereal::make_nvp("value", m_value)); + + if (m_tag == "sourceip" || m_tag == "sourceidentifier") { + m_isCidr = Waap::Util::isCIDR(m_value, m_cidr); + } + + if (!m_isCidr) { + // regex build may throw boost::regex_error + m_valueRegex = nullptr; + try { + m_valueRegex = std::make_shared(m_value); + } + catch (const boost::regex_error &err) { + dbgDebug(D_WAAP_OVERRIDE) << "Waap::Override::Match(): Failed to compile regex pattern '" << + m_value << "' on position " << err.position() << ". Reason: '" << err.what() << "'"; + } + } + } + else { + // If op is "AND" or "OR" - get two operands + if (m_op == "and" || m_op == "or") { + m_operand1 = std::make_shared(); + ar(cereal::make_nvp("operand1", *m_operand1)); + m_operand2 = std::make_shared(); + ar(cereal::make_nvp("operand2", *m_operand2)); + } + else if (m_op == "not") { + // If op is "NOT" get one operand + m_operand1 = std::make_shared(); + ar(cereal::make_nvp("operand1", *m_operand1)); + } + } + } + + template + bool match(TestFunctor testFunctor) const { + if (m_op == "basic" && m_isCidr) { + bool result = testFunctor(m_tag, m_cidr); + dbgTrace(D_WAAP_OVERRIDE) << "Override matching CIDR: " << m_value << " result: " << result; + return result; + } + else if (m_op == "basic" && m_valueRegex) { + bool result = testFunctor(m_tag, *m_valueRegex); + dbgTrace(D_WAAP_OVERRIDE) << "Override matching regex: " << m_value << " result: " << result; + return result; + } + if (m_op == "and") { + bool result = m_operand1->match(testFunctor) && m_operand2->match(testFunctor); + dbgTrace(D_WAAP_OVERRIDE) << "Override matching logical AND result: " << result; + return result; + } + if (m_op == "or") { + bool result = m_operand1->match(testFunctor) || m_operand2->match(testFunctor); + dbgTrace(D_WAAP_OVERRIDE) << "Override matching logical OR result: " << result; + return result; + } + if (m_op == "not") { + bool result = !m_operand1->match(testFunctor); + dbgTrace(D_WAAP_OVERRIDE) << "Override matching logical NOT result: " << result; + return result; + } + + // unknown operator. this should not occur + dbgDebug(D_WAAP_OVERRIDE) << "Invalid override operator " << m_op; + return false; + } + +private: + std::string m_op; + std::shared_ptr m_operand1; + std::shared_ptr m_operand2; + std::string m_tag; + std::string m_value; + std::shared_ptr m_valueRegex; + Waap::Util::CIDRData m_cidr; + bool m_isCidr; +}; + +class Behavior +{ +public: + Behavior(); + bool operator==(const Behavior &other) const; + + template + void serialize(_A &ar) { + try + { + ar(cereal::make_nvp("action", m_action)); + m_action = to_lower_copy(m_action); + } + catch (std::runtime_error& e) { + ar.setNextName(nullptr); + m_action = ""; + } + + try + { + ar(cereal::make_nvp("log", m_log)); + m_log = to_lower_copy(m_log); + } + catch (const std::runtime_error& e) { + ar.setNextName(nullptr); + m_log = ""; + } + + try + { + ar(cereal::make_nvp("httpSourceId", m_sourceIdentifier)); + } + catch (const std::runtime_error & e) { + ar.setNextName(nullptr); + m_sourceIdentifier = ""; + } + + if (!m_log.size() && !m_action.size() && !m_sourceIdentifier.size()) + { + dbgDebug(D_WAAP_OVERRIDE) << "Override does not contain any relevant action"; + } + } + + const std::string &getAction() const; + const std::string &getLog() const; + const std::string &getSourceIdentifier() const; +private: + std::string m_action; + std::string m_log; + std::string m_sourceIdentifier; +}; + +class Rule { +public: + bool operator==(const Rule &other) const; + + template + void serialize(_A &ar) { + try { + ar(cereal::make_nvp("id", m_id)); + } + catch (const cereal::Exception &e) + { + dbgTrace(D_WAAP_OVERRIDE) << "An override rule has no id."; + m_id.clear(); + } + + ar(cereal::make_nvp("parsedMatch", m_match)); + ar(cereal::make_nvp("parsedBehavior", m_behaviors)); + + m_isChangingRequestData = false; + + for (std::vector::const_iterator it = m_behaviors.begin(); + it != m_behaviors.end(); + ++it) + { + const Behavior& behavior = *it; + if (!behavior.getSourceIdentifier().empty()) // this rule changes data in request itself + { + m_isChangingRequestData = true; + break; + } + } + } + + template + void match(TestFunctor testFunctor, std::vector &matchedBehaviors, + std::set &matchedOverrideIds) const + { + if (m_match.match(testFunctor)) { + // extend matchedBehaviors list with all behaviors on this rule + dbgTrace(D_WAAP_OVERRIDE) << "Override rule matched. Adding " << m_behaviors.size() << " new behaviors:"; + std::string overrideId = getId(); + if (!overrideId.empty()) { + matchedOverrideIds.insert(overrideId); + } + for (const Behavior &behavior : m_behaviors) { + dbgTrace(D_WAAP_OVERRIDE) << "Behavior: action='" << behavior.getAction() << "', log='" << + behavior.getLog() << "', sourceIdentifier='" << behavior.getSourceIdentifier() << "'"; + matchedBehaviors.push_back(behavior); + } + return; + } + dbgTrace(D_WAAP_OVERRIDE) << "Rule not matched"; + } + + bool isChangingRequestData() const { + return m_isChangingRequestData; + } + + const std::string &getId() const { + return m_id; + } + +private: + Match m_match; + bool m_isChangingRequestData; + std::vector m_behaviors; + std::string m_id; +}; + +class Policy { +public: + template + Policy(_A &ar) { + std::vector rules; + ar(cereal::make_nvp("overrides", rules)); + + for (std::vector::const_iterator it = rules.begin(); it != rules.end(); ++it) { + const Waap::Override::Rule& rule = *it; + if (rule.isChangingRequestData()) + { + m_RequestOverrides.push_back(rule); + } + else + { + m_ResponseOverrides.push_back(rule); + } + } + } + + bool operator==(const Policy &other) const; + + template + void match(TestFunctor &testFunctor, std::vector &matchedBehaviors, bool requestOverrides, + std::set &matchedOverrideIds) const + { + // Run all rules and collect matched behaviors + + const std::vector& rules = requestOverrides ? m_RequestOverrides : m_ResponseOverrides; + dbgTrace(D_WAAP_OVERRIDE) << "Start matching override rules ..."; + for (const Waap::Override::Rule &rule : rules) { + dbgTrace(D_WAAP_OVERRIDE) << "Matching override rule ..."; + rule.match(testFunctor, matchedBehaviors, matchedOverrideIds); + } + dbgTrace(D_WAAP_OVERRIDE) << "Finished matching override rules."; + } + +private: + std::vector m_RequestOverrides; //overrides that change request data + std::vector m_ResponseOverrides; //overrides that change response/log data +}; + +struct State { + // whether to force block regardless of stage2 response (and even if bSendRequest and/or bSendResponse are false) + bool bForceBlock; + // exception (allow) was matched, so this request won't be blocked. + bool bForceException; + // overrides decision in case log should be ignored + bool bIgnoreLog; + // user identfier override to be applied + bool bSourceIdentifierOverride; + std::string sSourceIdentifierMatch; + + State(); + + // Compute overrides from override policy + template + void applyOverride(const Waap::Override::Policy &policy, Functor functor, + std::set &matchedOverrideIds, bool requestOverrides) + { + // Collect all behaviors from matched rules + std::vector matchedBehaviors; + policy.match(functor, matchedBehaviors, requestOverrides, matchedOverrideIds); + + dbgTrace(D_WAAP_OVERRIDE) << "applyOverride(): " << matchedBehaviors.size() << " detected override actions"; + + // Apply all detected behaviors + for (auto &matchedBehavior : matchedBehaviors) { + dbgTrace(D_WAAP_OVERRIDE) << "applyOverride(): found override action: " << matchedBehavior.getAction(); + if (matchedBehavior.getAction() == "accept") { + dbgTrace(D_WAAP_OVERRIDE) << "applyOverride(): setting bForceException due to override behavior."; + bForceException = true; + } + else if (matchedBehavior.getAction() == "reject") { + dbgTrace(D_WAAP_OVERRIDE) << "applyOverride(): setting bForceBlock due to override behavior."; + bForceBlock = true; + } + + if (matchedBehavior.getLog() == "ignore") + { + dbgTrace(D_WAAP_OVERRIDE) << "applyOverride(): setting bIgnoreLog due to override behavior."; + bIgnoreLog = true; + } + + sSourceIdentifierMatch = matchedBehavior.getSourceIdentifier(); + if (sSourceIdentifierMatch.size()) + { + dbgTrace(D_WAAP_OVERRIDE) << "applyOverride(): setting bSourceIdentifier -" + << "Override due to override behavior: " + << sSourceIdentifierMatch.c_str(); + bSourceIdentifierOverride = true; + } + } + } +}; + +} +} diff --git a/components/security_apps/waap/waap_clib/WaapOverrideFunctor.cc b/components/security_apps/waap/waap_clib/WaapOverrideFunctor.cc new file mode 100755 index 0000000..94bb7bd --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapOverrideFunctor.cc @@ -0,0 +1,108 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include "WaapOverrideFunctor.h" +#include "Waf2Engine.h" +#include "CidrMatch.h" +#include "agent_core_utilities.h" +#include "debug.h" + +USE_DEBUG_FLAG(D_WAAP_OVERRIDE); + +WaapOverrideFunctor::WaapOverrideFunctor(Waf2Transaction& waf2Transaction) :waf2Transaction(waf2Transaction) +{ +} + +bool WaapOverrideFunctor::operator()(const std::string& tag, const Waap::Util::CIDRData& value) { + if (tag == "sourceip") { + dbgDebug(D_WAAP_OVERRIDE) << "Remote IP Address : " << waf2Transaction.getRemoteAddr() << " CIDR: " << + value.cidrString; + std::string sourceIp = waf2Transaction.getRemoteAddr(); + // match sourceIp against the cidr + return Waap::Util::cidrMatch(sourceIp, value); + } + else if (tag == "sourceidentifier") { + dbgDebug(D_WAAP_OVERRIDE) << "Remote IP Address : " << waf2Transaction.getRemoteAddr() << " CIDR: " << + value.cidrString; + std::string sourceIp = waf2Transaction.getSourceIdentifier(); + // match source against the cidr + return Waap::Util::cidrMatch(sourceIp, value); + } + + return false; +} + +bool WaapOverrideFunctor::operator()(const std::string& tag, const boost::regex& rx) +{ + boost::cmatch what; + try { + if (tag == "url") { + return NGEN::Regex::regexMatch(__FILE__, __LINE__, waf2Transaction.getUriStr().c_str(), what, rx); + } + else if (tag == "hostname") { + return NGEN::Regex::regexMatch(__FILE__, __LINE__, waf2Transaction.getHost().c_str(), what, rx); + } + else if (tag == "sourceidentifier") { + return NGEN::Regex::regexMatch( + __FILE__, + __LINE__, + waf2Transaction.getSourceIdentifier().c_str(), + what, + rx + ); + } + else if (tag == "keyword") { + for (const std::string& keywordStr : waf2Transaction.getKeywordMatches()) { + if (NGEN::Regex::regexMatch(__FILE__, __LINE__, keywordStr.c_str(), what, rx)) { + return true; + } + } + return false; + } + else if (tag == "paramname" || tag == "paramName") { + for (const DeepParser::KeywordInfo& keywordInfo : waf2Transaction.getKeywordInfo()) { + if (NGEN::Regex::regexMatch(__FILE__, __LINE__, keywordInfo.getName().c_str(), what, rx)) { + return true; + } + } + if (NGEN::Regex::regexMatch(__FILE__, __LINE__, waf2Transaction.getParamKey().c_str(), what, rx)) { + return true; + } + if (NGEN::Regex::regexMatch(__FILE__, __LINE__, waf2Transaction.getParam().c_str(), what, rx)) { + return true; + } + return false; + } + else if (tag == "paramvalue" || tag == "paramValue") { + for (const DeepParser::KeywordInfo& keywordInfo : waf2Transaction.getKeywordInfo()) { + if (NGEN::Regex::regexMatch(__FILE__, __LINE__, keywordInfo.getValue().c_str(), what, rx)) { + return true; + } + } + if (NGEN::Regex::regexMatch(__FILE__, __LINE__, waf2Transaction.getSample().c_str(), what, rx)) { + return true; + } + return false; + } + } + catch (std::runtime_error & e) { + dbgDebug(D_WAAP_OVERRIDE) << "RegEx match for tag " << tag << " failed due to: " << e.what(); + return false; + } + + // Unknown tag: should not occur + dbgWarning(D_WAAP) << "Invalid override tag:" << tag; + return false; +} diff --git a/components/security_apps/waap/waap_clib/WaapOverrideFunctor.h b/components/security_apps/waap/waap_clib/WaapOverrideFunctor.h new file mode 100755 index 0000000..13d76ef --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapOverrideFunctor.h @@ -0,0 +1,35 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +namespace Waap { + namespace Util { + struct CIDRData; // forward decleration + } +} + +class Waf2Transaction; + +// Functor used to match Override rules against request data +class WaapOverrideFunctor { +public: + WaapOverrideFunctor(Waf2Transaction& waf2Transaction); + + bool operator()(const std::string& tag, const Waap::Util::CIDRData& value); + + bool operator()(const std::string& tag, const boost::regex& rx); + +private: + Waf2Transaction& waf2Transaction; +}; diff --git a/components/security_apps/waap/waap_clib/WaapParameters.cc b/components/security_apps/waap/waap_clib/WaapParameters.cc new file mode 100755 index 0000000..c8e70da --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapParameters.cc @@ -0,0 +1,35 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapParameters.h" + +using namespace Waap::Parameters; + +bool WaapParameters::operator==(const WaapParameters &other) const +{ + return m_paramMap == other.m_paramMap; +} + +ParamMap WaapParameters::getParamsMap() const +{ + return m_paramMap; +} + +Value WaapParameters::getParamVal(Parameter key, Value defaultVal) +{ + if (m_paramMap.find(key) == m_paramMap.end()) + { + return defaultVal; + } + return m_paramMap[key]; +} diff --git a/components/security_apps/waap/waap_clib/WaapParameters.h b/components/security_apps/waap/waap_clib/WaapParameters.h new file mode 100755 index 0000000..c7c5c12 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapParameters.h @@ -0,0 +1,45 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include +#include + + +namespace Waap { + namespace Parameters { + typedef std::string Parameter; + typedef std::string Value; + typedef std::unordered_map ParamMap; + + class WaapParameters + { + public: + template + WaapParameters(_A& ar) + { + ar(cereal::make_nvp("waapParameters", m_paramMap)); + } + + bool operator==(const WaapParameters &other) const; + + ParamMap getParamsMap() const; + Value getParamVal(Parameter key, Value defaultVal); + private: + ParamMap m_paramMap; + }; + + } +} diff --git a/components/security_apps/waap/waap_clib/WaapRegexPreconditions.cc b/components/security_apps/waap/waap_clib/WaapRegexPreconditions.cc new file mode 100755 index 0000000..f4bca8e --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapRegexPreconditions.cc @@ -0,0 +1,341 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapRegexPreconditions.h" +#include "Waf2Util.h" +#include "debug.h" +#include + +USE_DEBUG_FLAG(D_WAAP_REGEX); + +namespace Waap { + const RegexPreconditions::WordIndex RegexPreconditions::emptyWordIndex = 0; + + RegexPreconditions::RegexPreconditions(const picojson::value::object &jsObj, bool &error) + { + // Register empty string work under known index + registerWord(""); + + // The key should always be there unless data file is corrupted (but there's a unit test that tests exactly + // that!) + if (jsObj.find("preconditions") == jsObj.end()) { + dbgError(D_WAAP_REGEX) << "Error loading regex preconditions (signatures data file corrupt?)..."; + error = true; + return; + } + + if (jsObj.find("precondition_keys") == jsObj.end()) { + dbgError(D_WAAP_REGEX) << "Error loading regex precondition sets (signatures data file corrupt?)..."; + error = true; + return; + } + + auto preconditions = jsObj.at("preconditions").get(); + + // Build full list of words to load into aho-corasick pattern matcher + dbgTrace(D_WAAP_REGEX) << "Loading regex precondition_keys into Aho-Corasick pattern matcher..."; + + auto preconditionKeys = jsObj.at("precondition_keys").get(); + std::set pmPatterns; + + for (const auto &preconditionKey : preconditionKeys) { + std::string wordStr(preconditionKey.get()); + + // Do not load the "empty" word into Aho-Corasick. It's meaningless and Aho prepare() call would fail. + if (wordStr.empty()) { + continue; + } + + WordIndex wordIndex = registerWord(wordStr); + pmPatterns.insert(PMPattern(wordStr, false, false, wordIndex)); + } + + // Initialize the aho-corasick pattern matcher with the patterns + Maybe pmHookStatus = m_pmHook.prepare(pmPatterns); + + if (!pmHookStatus.ok()) { + dbgError(D_WAAP_REGEX) << "Aho-Corasick engine failed to load!"; + error = true; + return; + } + + dbgTrace(D_WAAP_REGEX) << "Aho-Corasick engine loaded."; + + // Loop over pre-conditions (rules) and load them + dbgTrace(D_WAAP_REGEX) << "Loading regex preconditions..."; + + for (const auto &precondition : preconditions) + { + // Each precondition consists of an aho-corasick pattern matcher word as a key and list of actions + // (for that word) - as a value. + const std::string wordStr = precondition.first; + + // Information from the "empty string"" word is not required by the engine to operate + if (wordStr.empty()) { + continue; + } + + WordIndex wordIndex = registerWord(wordStr); + + if (boost::algorithm::ends_with(wordStr, "_napost_napre")) { + WordIndex baseWordIndex = registerWord(wordStr.substr(0, wordStr.size() - strlen("_napost_napre"))); + m_pmWordInfo[baseWordIndex].napostNapreWordIndex = wordIndex; + m_pmWordInfo[wordIndex].baseWordIndex = baseWordIndex; + } + else if (boost::algorithm::ends_with(wordStr, "_napost")) { + WordIndex baseWordIndex = registerWord(wordStr.substr(0, wordStr.size() - strlen("_napost"))); + m_pmWordInfo[baseWordIndex].napostWordIndex = wordIndex; + m_pmWordInfo[wordIndex].baseWordIndex = baseWordIndex; + } + else if (boost::algorithm::ends_with(wordStr, "_napre")) { + WordIndex baseWordIndex = registerWord(wordStr.substr(0, wordStr.size() - strlen("_napre"))); + m_pmWordInfo[baseWordIndex].napreWordIndex = wordIndex; + m_pmWordInfo[wordIndex].baseWordIndex = baseWordIndex; + } + + // Load actions + const auto &jsActionsList = precondition.second.get(); + + for (const auto &jsAction : jsActionsList) { + const auto &action = jsAction.get(); + + if (action.empty()) { + continue; + } + + // The first item in the Action json object (it's a tuple of 1 or more items) is an action type string. + const std::string actionType = action[0].get(); + + // There are currently three action types: + // 1. "regex" - allow specific regex to be scanned when the Aho-Corasick word is detected + // 2. "set" - specify another "prefix" (string) to be enabled when the Aho-Corasick word is detected. + // if at least one prefix is enabled - it will trigger one or more other regexes. + // 3. "and_condition" - specify (comma-separated) sorted list of "prefixes" (in one string). + // all of these prefixes should come together in order to complete a set to match a + // condition and enable one or more other regexes. + if (actionType == "regex" && action.size() >= 3) { + const std::string regexPattern = action[1].get(); + if (m_regexToWordMap.find(regexPattern) != m_regexToWordMap.end() && + m_regexToWordMap[regexPattern] != wordIndex) + { + dbgError(D_WAAP_REGEX) << "ERROR: trying to overwrite m_regexToWordMap. pattern='" << + regexPattern << "'. Old wordIndex='" << m_regexToWordMap[regexPattern] << "' new word='" + << wordStr << "' (wordIndex=" << wordIndex << ")"; + error = true; + return; + } + + std::string flags = action[2].get(); + + if (flags == "_noregex") { + // Add regex pattern to set of "noRegex" patterns + m_noRegexPatterns.insert(regexPattern); + } + + m_regexToWordMap[regexPattern] = wordIndex; + } + else if (actionType == "set" && action.size() >= 2) { + const std::string setValueStr = action[1].get(); + WordIndex setValueIndex = registerWord(setValueStr); + std::vector &prefixSet = m_wordToPrefixSet[wordIndex]; + if (std::find(prefixSet.begin(), prefixSet.end(), + setValueIndex) == prefixSet.end()) { + prefixSet.push_back(setValueIndex); + } + } + else if (actionType == "and_condition" && action.size() >= 2) { + const std::string groupValueStr = action[1].get(); + WordIndex groupValueIndex = registerWord(groupValueStr); + size_t expectedCount = static_cast(std::stoi(groupValueStr)); + auto value(std::make_pair(groupValueIndex, expectedCount)); + std::vector> &prefixGroup = m_wordToPrefixGroup[wordIndex]; + if (std::find(prefixGroup.begin(), prefixGroup.end(), + value) == prefixGroup.end()) { + prefixGroup.push_back(value); + } + } + } + } + + dbgTrace(D_WAAP_REGEX) << "Aho-corasick pattern matching engine initialized!"; + } + + bool Waap::RegexPreconditions::isNoRegexPattern(const std::string &pattern) const + { + return m_noRegexPatterns.find(pattern) != m_noRegexPatterns.end(); + } + + const std::string &Waap::RegexPreconditions::getWordStrByWordIndex(WordIndex wordIndex) const + { + WordIndex baseWordIndex = m_pmWordInfo[wordIndex].baseWordIndex; + + if (baseWordIndex != Waap::RegexPreconditions::emptyWordIndex) { + return m_pmWordInfo[baseWordIndex].wordStr; + } + + return m_pmWordInfo[wordIndex].wordStr; + } + + // Check that the regex pattern (string) is known to be related to an Aho-Corasick word/prefix + // Returns empty string if not found, or the Aho-Corasick/prefix string otherwise. + // This function is called during each Regex object creation and helps to pre-compute data required for a fast + // lookup later during traffic processing. + Waap::RegexPreconditions::WordIndex RegexPreconditions::getWordByRegex(const std::string ®exPattern) const + { + const auto &found = m_regexToWordMap.find(regexPattern); + + if (found != m_regexToWordMap.end()) { + return found->second; + } + + return Waap::RegexPreconditions::emptyWordIndex; + } + + void RegexPreconditions::processWord(RegexPreconditions::PmWordSet &wordsSet, WordIndex wordIndex) const + { + const auto &found = m_wordToPrefixSet.find(wordIndex); + + if (found != m_wordToPrefixSet.end()) { + for (const auto &prefixIndex : found->second) { + // One of the items in the "OR" condition - add the OR prefix to the wordsSet + wordsSet.insert(prefixIndex); + } + } + + // Add words from the Aho Corasick scanner + wordsSet.insert(wordIndex); + } + + inline bool isRegexWordChar(u_char c) { + return Waap::Util::isAlphaAsciiFast(c) || isdigit(c) || '_' == c; + } + + void RegexPreconditions::pass1(RegexPreconditions::PmWordSet &wordsSet, Buffer &&buffer) const + { + dbgTrace(D_WAAP_REGEX) << "Rules pass #1: collect OR sets"; + + m_pmHook.scanBufWithOffsetLambda(buffer, [this, &wordsSet, &buffer] + (u_int endMatchOffset, const PMPattern &pmPattern) + { + uint offset = endMatchOffset + 1 - pmPattern.size(); // reported offset points to last character of a match + + // Extract the word index from the PMPattern object (we do not need the string part of it) + WordIndex wordIndex = pmPattern.getIndex(); + + bool regexWordBefore = (offset != 0) && + (isRegexWordChar(buffer.data()[offset - 1])); + bool regexWordAfter = (offset + pmPattern.size() < buffer.size()) && + (isRegexWordChar(buffer.data()[offset + pmPattern.size()])); + + processWord(wordsSet, wordIndex); + + // Compute additional constraints ([!\w] before, [!\w] after, [!\w] aroung the match ...) + WordIndex napreWordIndex = m_pmWordInfo[wordIndex].napreWordIndex; + WordIndex napostWordIndex = m_pmWordInfo[wordIndex].napostWordIndex; + WordIndex napostNapreWordIndex = m_pmWordInfo[wordIndex].napostNapreWordIndex; + + if (!regexWordBefore && regexWordAfter) { + if (napreWordIndex != emptyWordIndex) { + processWord(wordsSet, napreWordIndex); + } + } + else if (regexWordBefore && !regexWordAfter) { + if (napostWordIndex != emptyWordIndex) { + processWord(wordsSet, napostWordIndex); + } + } + else if (!regexWordBefore && !regexWordAfter) { + if (napreWordIndex != emptyWordIndex) { + processWord(wordsSet, napreWordIndex); + } + + if (napostWordIndex != emptyWordIndex) { + processWord(wordsSet, napostWordIndex); + } + + if (napostNapreWordIndex != emptyWordIndex) { + processWord(wordsSet, napostNapreWordIndex); + } + } + }); + } + + void RegexPreconditions::pass2(RegexPreconditions::PmWordSet &wordsSet) const + { + dbgTrace(D_WAAP_REGEX) << "Rules pass #2: collect AND groups"; + + std::unordered_map> allGroups; + std::vector prefixes; + + for (WordIndex wordIndex : wordsSet) { + // find in wordToPrefixGroup map + const auto &found = m_wordToPrefixGroup.find(wordIndex); + + if (found != m_wordToPrefixGroup.end()) { + for (const auto &prefixCountPair : found->second) { + WordIndex prefixIndex = prefixCountPair.first; + size_t expectedCount = prefixCountPair.second; + + auto found = allGroups.find(prefixIndex); + size_t actualWordCount = 1; + + if (found == allGroups.end()) { + allGroups.emplace(prefixIndex, std::set{wordIndex}); + } + else { + found->second.insert(wordIndex); + actualWordCount = found->second.size(); + } + + if (actualWordCount == expectedCount) { + // Full "AND" condition collected succesfully - add the AND prefixCountPair to the wordsSet + prefixes.push_back(prefixIndex); + } + } + } + } + + for (const auto &prefixIndex : prefixes) { + wordsSet.insert(prefixIndex); + } + } + + // This function scans the buffer with Aho-Corasick scanner and adds all the words found into wordsSet + // It then continues and runs two pass algorithm to compute OR and AND conditions over a prefixes data. + // The prefix strings are also added to the wordsSet and are looked up in the same database. + void RegexPreconditions::pmScan(Buffer &&buffer, RegexPreconditions::PmWordSet &wordsSet) const + { + wordsSet.clear(); + pass1(wordsSet, std::move(buffer)); + pass2(wordsSet); + // The empty string key contains all regexes that should always be scanned + wordsSet.insert(Waap::RegexPreconditions::emptyWordIndex); + } + + // Get known wordIndex by wordStr, or allocate a new wordIndex for words yet unknown + Waap::RegexPreconditions::WordIndex RegexPreconditions::registerWord(const std::string &wordStr) + { + const auto &found = m_wordStrToIndex.find(wordStr); + if (found != m_wordStrToIndex.end()) { + return found->second; + } + else { + WordIndex wordIndex = m_pmWordInfo.size(); + m_wordStrToIndex[wordStr] = wordIndex; // index of the new element that will be added below... + WordInfo wordInfo; + wordInfo.wordStr = wordStr; + m_pmWordInfo.push_back(wordInfo); + return wordIndex; + } + } +} diff --git a/components/security_apps/waap/waap_clib/WaapRegexPreconditions.h b/components/security_apps/waap/waap_clib/WaapRegexPreconditions.h new file mode 100755 index 0000000..f3c2c69 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapRegexPreconditions.h @@ -0,0 +1,89 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __WAAP_REGEX_PRECONDITIONS_H__ +#define __WAAP_REGEX_PRECONDITIONS_H__ + +#include "picojson.h" +#include "pm_hook.h" +#include "i_pm_scan.h" +#include +#include +#include +#include + +namespace Waap { + class RegexPreconditions + { + public: + typedef size_t WordIndex; + static const WordIndex emptyWordIndex; // special word index used to index the "impossible" empty word + private: + // Maps regex pattern string to Aho-Coraick pattern matcher word + typedef std::unordered_map RegexToWordMap; + // Maps Aho-Corasick pattern word to list of "prefixes" (special tags used to implement OR and AND semantics) + typedef std::unordered_map> WordToPrefixSet; + typedef std::unordered_map>> WordToPrefixGroup; + public: + typedef std::unordered_set PmWordSet; + + // The constructor builds internal data from Json object. Once built - the object becomes read-only. + RegexPreconditions(const picojson::value::object &jsObj, bool &error); + bool isNoRegexPattern(const std::string &pattern) const; + const std::string &getWordStrByWordIndex(WordIndex wordIndex) const; + Waap::RegexPreconditions::WordIndex getWordByRegex(const std::string &pattern) const; + // Run aho-corasick scan on a sample followed by "set" and "and_condition" rules. Returns set of words + // that can be used to speed up following calls to Regex::findAllMatches() on the same sample. + void pmScan(Buffer &&buffer, RegexPreconditions::PmWordSet &allSets) const; + + private: + void processWord(RegexPreconditions::PmWordSet &wordsSet, WordIndex wordIndex) const; + void pass1(RegexPreconditions::PmWordSet &wordsSet, Buffer &&buffer) const; + void pass2(RegexPreconditions::PmWordSet &wordsSet) const; + + RegexToWordMap m_regexToWordMap; + // For each aho-corasick word - hold a list of "prefixes" which are in OR relationship between them (at least + // one must match in order to trigger a condition on a prefix) + WordToPrefixSet m_wordToPrefixSet; + // For each aho-corasick word - hold a list of "prefixes" which are in AND relationship between them (all must + // be detected in order to trigger a condition on a prefix) + WordToPrefixGroup m_wordToPrefixGroup; + // Aho-Corasick pattern matcher object + PMHook m_pmHook; + + struct WordInfo { + WordIndex napostNapreWordIndex; + WordIndex napostWordIndex; + WordIndex napreWordIndex; + WordIndex baseWordIndex; + std::string wordStr; + + WordInfo() + : + napostNapreWordIndex(emptyWordIndex), + napostWordIndex(emptyWordIndex), + napreWordIndex(emptyWordIndex), + baseWordIndex(0), + wordStr() + { + } + }; + + WordIndex registerWord(const std::string &wordStr); + std::vector m_pmWordInfo; + std::map m_wordStrToIndex; // TODO:: remove this into throwaway object, no need to keep + std::set m_noRegexPatterns; // patterns that require no regex matching (Aho Corasick is enough) + }; +} + +#endif // __WAAP_REGEX_PRECONDITIONS_H__ diff --git a/components/security_apps/waap/waap_clib/WaapResponseInjectReasons.cc b/components/security_apps/waap/waap_clib/WaapResponseInjectReasons.cc new file mode 100644 index 0000000..c88d233 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapResponseInjectReasons.cc @@ -0,0 +1,91 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapResponseInjectReasons.h" +#include "debug.h" +#include + +USE_DEBUG_FLAG(D_WAAP); + +namespace Waap { + +ResponseInjectReasons::ResponseInjectReasons() +: +csrf(false), +antibot(false), +securityHeaders(false) +{ +} + +void +ResponseInjectReasons::clear() +{ + dbgTrace(D_WAAP) << "ResponseInjectReasons::clear()"; + setCsrf(false); + setAntibot(false); + setSecurityHeaders(false); +} + +bool +ResponseInjectReasons::shouldInject() const +{ + dbgTrace(D_WAAP) << "ResponseInjectReasons::shouldInject():" << + " AntiBot= " << antibot << + " CSRF= " << csrf << + " SecurityHeaders= " << securityHeaders; + return csrf || antibot || securityHeaders; +} + +void +ResponseInjectReasons::setAntibot(bool flag) +{ + dbgTrace(D_WAAP) << "Change ResponseInjectReasons(Antibot) " << antibot << " to " << flag; + antibot = flag; +} + +void +ResponseInjectReasons::setCsrf(bool flag) +{ + dbgTrace(D_WAAP) << "Change ResponseInjectReasons(CSRF) " << csrf << " to " << flag; + csrf = flag; +} + +void +ResponseInjectReasons::setSecurityHeaders(bool flag) +{ + dbgTrace(D_WAAP) << "Change ResponseInjectReasons(Security Headers) " << securityHeaders << " to " << flag; + securityHeaders = flag; +} + +bool +ResponseInjectReasons::shouldInjectAntibot() const +{ + dbgTrace(D_WAAP) << "shouldInjectAntibot():: " << antibot; + return antibot; +} + +bool +ResponseInjectReasons::shouldInjectCsrf() const +{ + dbgTrace(D_WAAP) << "shouldInjectCsrf():: " << csrf; + return csrf; +} + +bool +ResponseInjectReasons::shouldInjectSecurityHeaders() const +{ + dbgTrace(D_WAAP) << "shouldInjectSecurityHeaders():: " << securityHeaders; + return securityHeaders; +} + +} diff --git a/components/security_apps/waap/waap_clib/WaapResponseInjectReasons.h b/components/security_apps/waap/waap_clib/WaapResponseInjectReasons.h new file mode 100644 index 0000000..af4e7ee --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapResponseInjectReasons.h @@ -0,0 +1,35 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +namespace Waap { + +class ResponseInjectReasons { +public: + ResponseInjectReasons(); + void clear(); + bool shouldInject() const; + void setAntibot(bool flag); + void setCsrf(bool flag); + void setSecurityHeaders(bool flag); + bool shouldInjectAntibot() const; + bool shouldInjectCsrf() const; + bool shouldInjectSecurityHeaders() const; +private: + bool csrf; + bool antibot; + bool securityHeaders; +}; + +} diff --git a/components/security_apps/waap/waap_clib/WaapResponseInspectReasons.cc b/components/security_apps/waap/waap_clib/WaapResponseInspectReasons.cc new file mode 100644 index 0000000..d626057 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapResponseInspectReasons.cc @@ -0,0 +1,79 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapResponseInspectReasons.h" +#include "debug.h" + +USE_DEBUG_FLAG(D_WAAP); + +namespace Waap { + +ResponseInspectReasons::ResponseInspectReasons() +: +openRedirect(false), +errorDisclosure(false), +errorLimiter(false), +rateLimiting(false), +collectResponseForLog(false) +{ +} + +bool +ResponseInspectReasons::shouldInspect() const +{ + dbgTrace(D_WAAP) << "ResponseInspectReasons::shouldInspect():" << + " OpenRedirect=" << openRedirect << + " ErrorDisclosure=" << errorDisclosure << + " RateLimiting=" << rateLimiting << + " ErrorLimiter=" << errorLimiter << + " collectResponseForLog=" << collectResponseForLog; + return openRedirect || errorDisclosure || rateLimiting || errorLimiter || collectResponseForLog; +} + +void +ResponseInspectReasons::setOpenRedirect(bool flag) +{ + dbgTrace(D_WAAP) << "Change ResponseInspectReasons(OpenRedirect) " << openRedirect << " to " << flag; + openRedirect = flag; +} + +void +ResponseInspectReasons::setErrorDisclosure(bool flag) +{ + dbgTrace(D_WAAP) << "Change ResponseInspectReasons(ErrorDisclosure) " << errorDisclosure << " to " << flag; + errorDisclosure = flag; +} + +void +ResponseInspectReasons::setRateLimiting(bool flag) +{ + dbgTrace(D_WAAP) << "Change ResponseInspectReasons(RateLimiting) " << rateLimiting << " to " << flag; + rateLimiting = flag; +} + +void +ResponseInspectReasons::setErrorLimiter(bool flag) +{ + dbgTrace(D_WAAP) << "Change ResponseInspectReasons(ErrorLimiter) " << errorLimiter << " to " << flag; + errorLimiter = flag; +} + +void +ResponseInspectReasons::setCollectResponseForLog(bool flag) +{ + dbgTrace(D_WAAP) << "Change ResponseInspectReasons(collectResponseForLog) " << collectResponseForLog << " to " << + flag; + collectResponseForLog = flag; +} + +} diff --git a/components/security_apps/waap/waap_clib/WaapResponseInspectReasons.h b/components/security_apps/waap/waap_clib/WaapResponseInspectReasons.h new file mode 100644 index 0000000..eb13c6c --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapResponseInspectReasons.h @@ -0,0 +1,35 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +namespace Waap { + +class ResponseInspectReasons { +public: + ResponseInspectReasons(); + bool shouldInspect() const; + void setOpenRedirect(bool flag); + void setErrorDisclosure(bool flag); + void setRateLimiting(bool flag); + void setErrorLimiter(bool flag); + void setCollectResponseForLog(bool flag); +private: + bool openRedirect; + bool errorDisclosure; + bool errorLimiter; + bool rateLimiting; + bool collectResponseForLog; +}; + +} diff --git a/components/security_apps/waap/waap_clib/WaapResultJson.cc b/components/security_apps/waap/waap_clib/WaapResultJson.cc new file mode 100644 index 0000000..e7aab48 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapResultJson.cc @@ -0,0 +1,255 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapResultJson.h" +#include "Waf2Engine.h" +#include "WaapAssetState.h" + +std::string buildWaapResultJson(Waf2ScanResult *m_scanResult, const Waf2Transaction &t, bool bSendResponse, + const std::string &normalizedUri, const std::string &uri, bool bForceBlock, + bool bForceException) +{ + auto hdr_pairs = t.getHdrPairs(); + auto notes = t.getNotes(); + auto scanResultKeywordCombinations = t.getKeywordsCombinations(); + auto keywordInfo = t.getKeywordInfo(); + auto kvPairs = t.getKvPairs(); + auto scoreArray = t.getScoreArray(); + + if (m_scanResult) { + Waap::Util::Yajl y; + { + Waap::Util::Yajl::Map root(y); + root.gen_key("data"); + { + Waap::Util::Yajl::Map data(y); + + data.gen_key("transaction"); + { + Waap::Util::Yajl::Map transaction(y); + transaction.gen_str("time", t.getLogTime()); + transaction.gen_integer("remote_port", t.getRemotePort()); + transaction.gen_str("remote_address", t.getRemoteAddr()); + std::string support_id = t.getTransactionIdStr(); + transaction.gen_str("support_id", support_id); + } + data.gen_key("request"); + { + Waap::Util::Yajl::Map request(y); + request.gen_str("method", t.getMethod()); + request.gen_str("uri", normalizedUri); + request.gen_str("orig_uri", uri); + request.gen_str("ct", t.getContentTypeStr()); + request.gen_key("headers"); + { + Waap::Util::Yajl::Map headers(y); + for (std::vector >::iterator it = hdr_pairs.begin(); + it != hdr_pairs.end(); + ++it) { + headers.gen_str(it->first, it->second); + } + } + } + data.gen_str("ct", t.getContentTypeStr()); + } + root.gen_key("res"); + { + Waap::Util::Yajl::Map res(y); + res.gen_str("param_location", m_scanResult->location); + res.gen_str("param_name", m_scanResult->param_name); + res.gen_str("line", m_scanResult->unescaped_line); + res.gen_key("keyword_matches"); + { + Waap::Util::Yajl::Array keyword_matches(y); + for (std::vector::iterator pM = m_scanResult->keyword_matches.begin(); + pM != m_scanResult->keyword_matches.end(); + ++pM) { + std::string& m = *pM; + keyword_matches.gen_str(m); + } + } + res.gen_key("ntags"); + { + Waap::Util::Yajl::Map ntags(y); + for (Waap::Util::map_of_stringlists_t::iterator pKv = m_scanResult->found_patterns.begin(); + pKv != m_scanResult->found_patterns.end(); + ++pKv) { + ntags.gen_key(pKv->first); + { + Waap::Util::Yajl::Array ntags_val(y); + for (std::vector::iterator pV = pKv->second.begin(); + pV != pKv->second.end(); + ++pV) { + ntags_val.gen_str(*pV); + } + } + } + } + res.gen_double("score", t.getScore()); + res.gen_key("scores_array"); + { + Waap::Util::Yajl::Array scores_array(y); + for (std::vector::iterator pScore = scoreArray.begin(); + pScore != scoreArray.end(); + ++pScore) { + scores_array.gen_double(*pScore); + } + } + res.gen_key("keyword_combinations"); + { + Waap::Util::Yajl::Array keyword_combinations_array(y); + for (std::vector::iterator pCombination = scanResultKeywordCombinations.begin(); + pCombination != scanResultKeywordCombinations.end(); + ++pCombination) { + keyword_combinations_array.gen_str(*pCombination); + } + } + } + + root.gen_bool("stage1_force_block", bForceBlock); + + if (bForceException) { + root.gen_bool("stage1_force_exception", bForceException); + } + + // TODO:: the output of these should be throttled to up to X per minute (or hour). + // Maybe throttling should be done elsewhere and flag should be present whether to + // output the data or not (or just assume i m_keywordInfo.size()==0 - don't output). + root.gen_key("k_api"); + { + Waap::Util::Yajl::Array k_api(y); + for (std::vector::const_iterator it = keywordInfo.begin(); + it != keywordInfo.end(); + ++it) { + const DeepParser::KeywordInfo& keywordInfo = *it; + Waap::Util::Yajl::Map k_api_kw(y); + k_api_kw.gen_str("type", keywordInfo.getType()); + k_api_kw.gen_str("name", keywordInfo.getName()); + k_api_kw.gen_str("value", keywordInfo.getValue()); + k_api_kw.gen_integer("len", keywordInfo.getValue().length()); + } + } + root.gen_key("x_kvs"); + { + Waap::Util::Yajl::Map x_kvs(y); + for (std::vector >::iterator it = kvPairs.begin(); + it != kvPairs.end(); + ++it) { + std::string& k = it->first; + std::string& v = it->second; + x_kvs.gen_str(k, v); + } + } + + root.gen_str("x_body", t.getRequestBody()); + if (!notes.empty()) { + root.gen_key("notes"); + Waap::Util::Yajl::Array jsNotes(y); + for (std::vector::const_iterator it = notes.begin(); it != notes.end(); ++it) { + jsNotes.gen_str(*it); + } + } + + root.gen_bool("send_response", bSendResponse); + root.gen_bool("login_url", false); + } + + return (bSendResponse ? "1" : "0") + y.get_json_str(); + } + else { + Waap::Util::Yajl y; + { + Waap::Util::Yajl::Map root(y); + root.gen_key("data"); + { + Waap::Util::Yajl::Map data(y); + data.gen_key("transaction"); + { + Waap::Util::Yajl::Map transaction(y); + transaction.gen_str("time", t.getLogTime()); + transaction.gen_integer("remote_port", t.getRemotePort()); + transaction.gen_str("remote_address", t.getRemoteAddr()); + std::string support_id = t.getTransactionIdStr(); + transaction.gen_str("support_id", support_id); + } + data.gen_key("request"); + { + Waap::Util::Yajl::Map request(y); + request.gen_str("method", t.getMethod()); + request.gen_str("uri", normalizedUri); + request.gen_str("orig_uri", uri); + request.gen_str("ct", t.getContentTypeStr()); + request.gen_key("headers"); + { + Waap::Util::Yajl::Map headers(y); + for (std::vector >::iterator it = hdr_pairs.begin(); + it != hdr_pairs.end(); + ++it) { + headers.gen_str(it->first, it->second); + } + } + } + data.gen_str("ct", t.getContentTypeStr()); + } + + root.gen_bool("stage1_force_block", bForceBlock); + + if (bForceException) { + root.gen_bool("stage1_force_exception", bForceException); + } + + // TODO:: the output of these should be throttled to up to X per minute (or hour). + // Maybe throttling should be done elsewhere and flag should be present whether to + // output the data or not (or just assume i m_keywordInfo.size()==0 - don't output). + root.gen_key("k_api"); + { + Waap::Util::Yajl::Array k_api(y); + for (std::vector::const_iterator it = keywordInfo.begin(); + it != keywordInfo.end(); + ++it) { + const DeepParser::KeywordInfo& keywordInfo = *it; + Waap::Util::Yajl::Map k_api_kw(y); + k_api_kw.gen_str("type", keywordInfo.getType()); + k_api_kw.gen_str("name", keywordInfo.getName()); + k_api_kw.gen_str("value", keywordInfo.getValue()); + k_api_kw.gen_integer("len", keywordInfo.getValue().length()); + } + } + root.gen_key("x_kvs"); + { + Waap::Util::Yajl::Map x_kvs(y); + for (std::vector >::iterator it = kvPairs.begin(); + it != kvPairs.end(); + ++it) { + std::string& k = it->first; + std::string& v = it->second; + x_kvs.gen_str(k, v); + } + } + + root.gen_str("x_body", t.getRequestBody()); + if (!notes.empty()) { + root.gen_key("notes"); + Waap::Util::Yajl::Array jsNotes(y); + for (std::vector::const_iterator it = notes.begin(); it != notes.end(); ++it) { + jsNotes.gen_str(*it); + } + } + + root.gen_bool("send_response", bSendResponse); + root.gen_bool("login_url", false); + } + + return (bSendResponse ? "1" : "0") + y.get_json_str(); + } +} diff --git a/components/security_apps/waap/waap_clib/WaapResultJson.h b/components/security_apps/waap/waap_clib/WaapResultJson.h new file mode 100644 index 0000000..1eb5be1 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapResultJson.h @@ -0,0 +1,21 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "Waf2Engine.h" +#include "WaapAssetState.h" +#include + +std::string buildWaapResultJson(Waf2ScanResult *m_scanResult, const Waf2Transaction &t, bool bSendResponse, + const std::string &normalizedUri, const std::string &uri, bool bForceBlock, + bool bForceException); diff --git a/components/security_apps/waap/waap_clib/WaapSampleValue.cc b/components/security_apps/waap/waap_clib/WaapSampleValue.cc new file mode 100644 index 0000000..c776800 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapSampleValue.cc @@ -0,0 +1,41 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapSampleValue.h" + +SampleValue::SampleValue(const std::string &sample, + const std::shared_ptr ®exPreconditions) + : + m_sample(sample), + m_regexPreconditions(regexPreconditions), + m_pmWordSet() +{ + if (m_regexPreconditions) { + // Run aho-corasick and related rules once the sample value is known. + // The result pmWordSet is reused later for multiple calls to findMatches on the same sample. + regexPreconditions->pmScan( + Buffer(m_sample.data(), m_sample.size(), Buffer::MemoryType::STATIC), m_pmWordSet); + } +} + +const std::string & +SampleValue::getSampleString() const +{ + return m_sample; +} + +void +SampleValue::findMatches(const Regex &pattern, std::vector &matches) const +{ + pattern.findAllMatches(m_sample, matches, m_regexPreconditions ? &m_pmWordSet : nullptr); +} diff --git a/components/security_apps/waap/waap_clib/WaapSampleValue.h b/components/security_apps/waap/waap_clib/WaapSampleValue.h new file mode 100644 index 0000000..022f533 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapSampleValue.h @@ -0,0 +1,37 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __WAAP_SAMPLE_VALUE_H__ +#define __WAAP_SAMPLE_VALUE_H__ + +#include +#include +#include +#include "Waf2Regex.h" +#include "WaapRegexPreconditions.h" +#include "buffer.h" + +class SampleValue +{ +public: + SampleValue(const std::string &sample, const std::shared_ptr ®exPreconditions); + const std::string &getSampleString() const; + void findMatches(const Regex &pattern, std::vector &matches) const; + +private: + std::string m_sample; + const std::shared_ptr m_regexPreconditions; + Waap::RegexPreconditions::PmWordSet m_pmWordSet; +}; + +#endif // __WAAP_SAMPLE_VALUE_H__ diff --git a/components/security_apps/waap/waap_clib/WaapScanner.cc b/components/security_apps/waap/waap_clib/WaapScanner.cc new file mode 100755 index 0000000..275cf26 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapScanner.cc @@ -0,0 +1,307 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapScanner.h" +#include "WaapScores.h" +#include "i_transaction.h" +#include +#include "debug.h" +#include "reputation_features_events.h" + +USE_DEBUG_FLAG(D_WAAP_SCANNER); + +double Waap::Scanner::getScoreData(Waf2ScanResult& res, const std::string &poolName) +{ + std::string source = m_transaction->getSourceIdentifier(); + + // Extract set of keyword_matches from keyword_matches, then from ngtags + Waap::Keywords::KeywordsSet keywordsSet; + Waap::Keywords::computeKeywordsSet(keywordsSet, res.keyword_matches, res.found_patterns); + + std::string param_name = IndicatorsFiltersManager::generateKey(res.location, res.param_name, m_transaction); + dbgTrace(D_WAAP_SCANNER) << "filter processing for parameter: " << param_name; + m_transaction->getAssetState()->logIndicatorsInFilters(param_name, keywordsSet, m_transaction); + m_transaction->getAssetState()->filterKeywords(param_name, keywordsSet, res.filtered_keywords); + if (m_transaction->getSiteConfig() != nullptr) + { + auto waapParams = m_transaction->getSiteConfig()->get_WaapParametersPolicy(); + if (waapParams != nullptr && waapParams->getParamVal("filtersVerbose", "false") == "true") { + m_transaction->getAssetState()->filterVerbose(param_name, res.filtered_keywords); + } + } + m_transaction->getAssetState()->filterKeywordsByParameters(res.param_name, keywordsSet); + + // The keywords are only removed in production, they are still used while building scores + if (!m_transaction->get_ignoreScore()) { + m_transaction->getAssetState()->removeKeywords(keywordsSet); + } + + // Filter keywords due to wbxml data format + DeepParser &dp = m_transaction->getDeepParser(); + bool isBrokenWBXML = (m_transaction->getContentType() == Waap::Util::CONTENT_TYPE_WBXML) && (dp.depth() == 0) && + (dp.m_key.first().size() == 4 && dp.m_key.first() == "body" && !dp.isWBXmlData()); + + // If wbxml data detected heuristically, or if not detected but declared by content-type in header + if (dp.isWBXmlData() || isBrokenWBXML) { + dbgTrace(D_WAAP_SCANNER) << "Filtering out wbxml keywords. isWbXmlData: " << dp.isWBXmlData() << + ", isBrokenWBXml:" << isBrokenWBXML; + m_transaction->getAssetState()->removeWBXMLKeywords(keywordsSet, res.filtered_keywords); + } + + // update keywords_matches + res.keyword_matches.clear(); + for (auto keyword : keywordsSet) { + res.keyword_matches.push_back(keyword); + } + std::sort(res.keyword_matches.begin(), res.keyword_matches.end()); + + std::string keywords_string; + for (auto pKeyword = keywordsSet.begin(); pKeyword != keywordsSet.end(); ++pKeyword) { + // Add spaces between the items, but not before the first one + if (pKeyword != keywordsSet.begin()) { + keywords_string += " "; + } + + std::string k = *pKeyword; + stripSpaces(k); + keywords_string += k; + } + + std::vector newKeywords; + for (auto pKeyword = keywordsSet.begin(); pKeyword != keywordsSet.end(); ++pKeyword) { + std::string k = *pKeyword; + stripSpaces(k); + // if keyword_string.count(key) < 2: new_keywords.append(key) + if (countSubstrings(keywords_string, k) < 2) { + newKeywords.push_back(k); + } + } + + std::sort(newKeywords.begin(), newKeywords.end()); + + res.scoreArray.clear(); + res.keywordCombinations.clear(); + + if (!newKeywords.empty()) { + // Collect scores of individual keywords + Waap::Scores::calcIndividualKeywords(m_transaction->getAssetState()->scoreBuilder, poolName, newKeywords, + res.scoreArray); + // Collect keyword combinations and their scores. Append scores to scoresArray, + // and also populate m_scanResultKeywordCombinations list + Waap::Scores::calcCombinations(m_transaction->getAssetState()->scoreBuilder, poolName, newKeywords, + res.scoreArray, res.keywordCombinations); + } + + return Waap::Scores::calcArrayScore(res.scoreArray); +} + +bool Waap::Scanner::suspiciousHit(Waf2ScanResult& res, const std::string& location, const std::string& param_name) { + dbgTrace(D_WAAP_SCANNER) << "suspiciousHit processing for parameter: " << param_name << " at " << location << + " num of keywords " << res.keyword_matches.size(); + + res.location = location; + res.param_name = param_name; // remember the param name (analyzer needs it for reporting) + + // Select scores pool by location + std::string poolName = Waap::Scores::getScorePoolNameByLocation(location); + + double score = getScoreData(res, poolName); + + dbgTrace(D_WAAP_SCANNER) << "score: " << score; + // Add record about scores to the notes[] log (also reported in logs) + if (score > 1.0f) { + DetectionEvent(location, res.keyword_matches).notify(); + char buf[128]; + sprintf(buf, "%.3f", score); + const std::string& res_location = m_transaction->getDeepParser().m_key.first(); + const std::string& res_param_name = m_transaction->getDeepParser().m_key.str(); + m_transaction->addNote( + "sc:" + res_location + (res_param_name.empty() ? "" : "/" + res_param_name) + ":" + std::string(buf) + ); + } + + if (m_transaction->shouldIgnoreOverride(res)) { + dbgTrace(D_WAAP_SCANNER) << "Ignoring parameter key/value " << res.param_name << + " due to ignore action in override"; + m_bIgnoreOverride = true; + return false; + } + + res.score = score; + return m_transaction->reportScanResult(res); +} + +int Waap::Scanner::onKv(const char* k, size_t k_len, const char* v, size_t v_len, int flags) { + Waf2ScanResult& res = m_lastScanResult; + DeepParser &dp = m_transaction->getDeepParser(); + std::string key = std::string(k, k_len); + std::string value = std::string(v, v_len); + res.clear(); + dbgTrace(D_WAAP_SCANNER) << "Waap::Scanner::onKv: k='" << key << + "' v='" << value << "'"; + bool isCookiePayload = dp.m_key.first().size() == 6 && dp.m_key.first() == "cookie"; + bool isUrlParamPayload = dp.m_key.first().size() == 9 && dp.m_key.first() == "url_param"; + bool isSplitUrl = dp.m_key.first().size() == 3 && + dp.m_key.first() == "url" && + dp.m_key.str() != ""; + bool isHeaderPayload = dp.m_key.first().size() == 6 && dp.m_key.first() == "header"; + bool isRefererParamPayload = + (dp.m_key.first().size() == 13 && dp.m_key.first() == "referer_param"); + bool isBodyPayload = dp.m_key.first().size() == 4 && dp.m_key.first() == "body"; + dbgTrace(D_WAAP_SCANNER) << "Waap::Scanner::onKv: depth=" << + dp.depth() << "; first='" << dp.m_key.first().c_str() << "'; key='" << + dp.m_key.str().c_str() << "'"; + + // Collect URLs from values for openRedirect feature. + m_transaction->getOpenRedirectState().collect(v, v_len, m_transaction->getHost()); + + // Do not scan our own anti-bot cookie (match by name), it often false alarms. + const std::string& fullKeyStr = dp.m_key.str(); + dbgTrace(D_WAAP_SCANNER) << "Waap::Scanner::onKv: fullKeyStr: '" << fullKeyStr << "'"; + //Get Anti bot cookie + if(isCookiePayload && fullKeyStr == "__fn1522082288") { + m_antibotCookie = value; + dbgTrace(D_WAAP_SCANNER) << "Waap::Scanner::onKv: found Antibot Cookie: '" << m_antibotCookie << "'"; + } + + // Do not scan our own anti-bot cookie (match by name), it often false alarms. + if (isCookiePayload && + (fullKeyStr.find("fnserr") != std::string::npos || + fullKeyStr.find("__fn1522082288") != std::string::npos || + fullKeyStr.find("_fn_nsess") != std::string::npos)) { + dbgTrace(D_WAAP_SCANNER) << "Waap::Scanner::onKv: skip scanning our own anti-bot cookie, by name"; + return 0; + } + // scan for csrf token. + if (isCookiePayload && fullKeyStr == "x-chkp-csrf-token") { + m_transaction->getCsrfState().set_CsrfToken(v, v_len); + } + if (isHeaderPayload && fullKeyStr == "x-chkp-csrf-token") { + m_transaction->getCsrfState().set_CsrfHeaderToken(v, v_len); + } + if (isBodyPayload && fullKeyStr == "x-chkp-csrf-token") { + m_transaction->getCsrfState().set_CsrfFormToken(v, v_len); + } + + if (dp.depth() == 0 && + isCookiePayload && + (v_len >= 2) && + ((v[0] == '"' && v[v_len - 1] == '"') || (v[0] == '\'' && v[v_len - 1] == '\'')) + ) { + dbgTrace(D_WAAP_SCANNER) << "Waap::Scanner::onKv: removing quotes around cookie value: '" << + value << "'"; + // remove the quotes around the value + v++; + v_len -= 2; + value = std::string(v, v_len); + } + res.location = dp.m_key.first(); + res.param_name = dp.m_key.str(); + res.unescaped_line = unescape(value); + + m_transaction->getAssetState()->logParamHit(res, m_transaction); + + std::set paramTypes = m_transaction->getAssetState()->m_filtersMngr->getParameterTypes( + IndicatorsFiltersManager::generateKey(res.location, res.param_name, m_transaction)); + + if (paramTypes.size() == 1 && paramTypes.find("local_file_path") != paramTypes.end()) + { + dbgTrace(D_WAAP_SCANNER) << "found parameter as local path, val : " << value; + if ((value.find("http://") == 0 || value.find("https://") == 0) && !m_transaction->shouldIgnoreOverride(res)) + { + res.score = 10.0; + res.unescaped_line = value; + res.keyword_matches.push_back("url_instead_of_file"); + m_transaction->addNote("sv: found url in " + res.location + "#" + res.param_name); + m_transaction->reportScanResult(res); + return 0; + } + } + // Special value only matched when XML atribute is found. + if (v_len == 36) { + if (value == "08a80340-06d3-11ea-9f87-0242ac11000f" && !m_transaction->shouldIgnoreOverride(res)) { + // Always return max score when addNote("sv: found xml_entity in " + res.location + "#" + res.param_name); + m_transaction->reportScanResult(res); + return 0; + } + } + + // Scan parameter name + bool badUrlEncoding = + dp.m_key.depth() == 2 && + isUrlParamPayload && key != unescape(key) && + (!checkUrlEncoded(k, k_len) || !checkUrlEncoded(v, v_len)); + bool scanNameDueToSplitUrl = dp.m_key.depth() == 2 && isSplitUrl && key != "url.id"; + bool suspiciousName = dp.depth() == 0 && + (isCookiePayload || isRefererParamPayload || isUrlParamPayload || isBodyPayload) && + (!m_transaction->getAssetState()->getSignatures()-> good_header_name_re.hasMatch(key)); + + dbgTrace(D_WAAP_SCANNER) + << "badUrlEncoding=" + << badUrlEncoding + << ", scanNameDueToSplitUrl=" + << scanNameDueToSplitUrl + << ", suspiciousName" + << suspiciousName; + + if (badUrlEncoding || scanNameDueToSplitUrl || suspiciousName) { + dbgTrace(D_WAAP_SCANNER) << "Waap::Scanner::onKv: candidate to scan parameter names"; + + // Deep-scan parameter names + if (m_transaction->getAssetState()->apply(key, res, dp.m_key.first())) { + if (suspiciousHit(res, dp.m_key.first(), dp.m_key.str())) { + // Scanner found enough evidence to report this res + dbgTrace(D_WAAP_SCANNER) << "Waap::Scanner::onKv: SUSPICIOUS PARAM NAME: k='" << + key << "' v='" << value << "'"; +#ifdef ENABLE_WAAP_ATTACK_IN_PARAM + res.param_name = ATTACK_IN_PARAM; + if (m_transaction->getScanResultPtr()) { + m_transaction->getScanResultPtr()->m_isAttackInParam = true; + m_transaction->getScanResultPtr()->param_name = ATTACK_IN_PARAM; + } + else { + dbgWarning(D_WAAP_SCANNER) << "Uninitialized m_scanResult during scanning parameter name (!!!)"; + } +#endif + m_transaction->addNote("sn:" + res.location + (res.param_name.empty() ? "" : "/" + res.param_name)); + } + } + } + Waf2ScanResult param_name_res = res; + res.clear(); + + // Scan parameter value + if (m_transaction->getAssetState()->apply(value, res, dp.m_key.first(), dp.isBinaryData(), + dp.getSplitType())) + { + if (!param_name_res.keyword_matches.empty() && !res.keyword_matches.empty() && + param_name_res.location == "url_param") + { + dbgTrace(D_WAAP_SCANNER) << "Found suspicios content in param name and value. Merging scans"; + res.mergeFrom(param_name_res); + } + + if (suspiciousHit(res, dp.m_key.first(), dp.m_key.str())) { + // Scanner found enough evidence to report this res + dbgTrace(D_WAAP_SCANNER) << "Waap::Scanner::onKv: SUSPICIOUS VALUE: k='" << key << + "' v='" << value << "'"; + m_transaction->addNote("sv:" + res.location + (res.param_name.empty() ? "" : "/" + res.param_name)); + } + } + + return 0; +} diff --git a/components/security_apps/waap/waap_clib/WaapScanner.h b/components/security_apps/waap/waap_clib/WaapScanner.h new file mode 100644 index 0000000..610473a --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapScanner.h @@ -0,0 +1,52 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __WAAP_SCANNER_H__ +#define __WAAP_SCANNER_H__ + +#include "ParserBase.h" +#include "ScanResult.h" +#include "i_transaction.h" +#include "WaapAssetState.h" +#include + +namespace Waap { + class Scanner : public IParserReceiver + { + public: + Scanner(IWaf2Transaction *transaction) + : + m_lastScanResult(), + m_transaction(transaction), + m_antibotCookie(), + m_bIgnoreOverride(false) + { + } + bool suspiciousHit(Waf2ScanResult &res, const std::string &location, const std::string ¶m_name); + int onKv(const char* k, size_t k_len, const char* v, size_t v_len, int flags) override; + + const std::string &getAntibotCookie() const { return m_antibotCookie; } + bool getIgnoreOverride() { return m_bIgnoreOverride; }; + const Waf2ScanResult &getLastScanResult() const { return m_lastScanResult; } + private: + double getScoreData(Waf2ScanResult& res, const std::string &poolName); + bool shouldIgnoreOverride(const Waf2ScanResult &res); + + Waf2ScanResult m_lastScanResult; + IWaf2Transaction *m_transaction; + std::string m_antibotCookie; + bool m_bIgnoreOverride; + }; +} + +#endif // __WAAP_SCANNER_H__ diff --git a/components/security_apps/waap/waap_clib/WaapScores.cc b/components/security_apps/waap/waap_clib/WaapScores.cc new file mode 100644 index 0000000..51b7a62 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapScores.cc @@ -0,0 +1,113 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapScores.h" +#include +#include +#include "ScoreBuilder.h" +#include "WaapDefines.h" +#include "debug.h" + +USE_DEBUG_FLAG(D_WAAP_SCORE_BUILDER); + +namespace Waap { +namespace Scores { + +std::string getScorePoolNameByLocation(const std::string &location) { + std::string poolName = KEYWORDS_SCORE_POOL_BASE; + if (location == "header") { + poolName = KEYWORDS_SCORE_POOL_HEADERS; + } + return poolName; +} + +void +addKeywordScore( + const ScoreBuilder& scoreBuilder, + const std::string &poolName, + std::string keyword, + double defaultScore, + std::vector& scoresArray) +{ + scoresArray.push_back(scoreBuilder.getSnapshotKeywordScore(keyword, defaultScore, poolName)); +} + +// Calculate score of individual keywords +void +calcIndividualKeywords( + const ScoreBuilder& scoreBuilder, + const std::string &poolName, + const std::vector& keyword_matches, + std::vector& scoresArray) +{ + std::vector keywords = keyword_matches; // deep copy!! (PERFORMANCE WARNING!) + std::sort(keywords.begin(), keywords.end()); + + for (auto pKeyword = keywords.begin(); pKeyword != keywords.end(); ++pKeyword) { + addKeywordScore(scoreBuilder, poolName, *pKeyword, 2.0f, scoresArray); + } +} + +// Calculate keyword combinations and their scores +void +calcCombinations( + const ScoreBuilder& scoreBuilder, + const std::string &poolName, + const std::vector& keyword_matches, + std::vector& scoresArray, + std::vector& keyword_combinations) +{ + keyword_combinations.clear(); + + for (size_t i = 0; i < keyword_matches.size(); ++i) { + std::vector combinations; + for (size_t j = i; j < std::min(i + 2, keyword_matches.size()); ++j) { + combinations.push_back(keyword_matches[j]); + } + if (combinations.size() > 1) { + // Must be sorted to build a string that exactly matches the keys (strings) + // from signature_scores database. + std::sort(combinations.begin(), combinations.end()); + std::string combination; + // note that std::set<> container output sorted data when iterated. + for (auto it = combinations.begin(); it != combinations.end(); it++) { + // add space between all items, except the first one + if (it != combinations.begin()) { + combination += " "; + } + combination += *it; + } + addKeywordScore(scoreBuilder, poolName, combination, 1.0f, scoresArray); + keyword_combinations.push_back(combination); + } + } +} + +double +calcArrayScore(std::vector& scoreArray) +{ + // Calculate cumulative score from array of individual scores + double score = 1.0f; + for (auto pScore = scoreArray.begin(); pScore != scoreArray.end(); ++pScore) { + dbgTrace(D_WAAP_SCORE_BUILDER) << "scoreArr[]=" << *pScore; + double left = 10.0f - score; + double divisor = (*pScore / 3.0f + 10.0f); // note: divisor can't be empty because + // *pScore is always positive and there's a +10 offset + score = 10.0f - left * 10.0f / divisor; + } + dbgTrace(D_WAAP_SCORE_BUILDER) << "calculated score: " << score; + return score; +} + +} +} diff --git a/components/security_apps/waap/waap_clib/WaapScores.h b/components/security_apps/waap/waap_clib/WaapScores.h new file mode 100644 index 0000000..71dcc81 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapScores.h @@ -0,0 +1,53 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include "ScoreBuilder.h" + +namespace Waap { +namespace Scores { + +std::string getScorePoolNameByLocation(const std::string &location); + +void +addKeywordScore( + const ScoreBuilder& scoreBuilder, + const std::string &poolName, + std::string keyword, + double defaultScore, + std::vector& scoresArray); + +// Calculate score of individual keywords +void +calcIndividualKeywords( + const ScoreBuilder& scoreBuilder, + const std::string &poolName, + const std::vector& keyword_matches, + std::vector& scoresArray); + +// Calculate keyword combinations and their scores +void +calcCombinations( + const ScoreBuilder& scoreBuilder, + const std::string &poolName, + const std::vector& keyword_matches, + std::vector& scoresArray, + std::vector& keyword_combinations); + +double calcArrayScore(std::vector& scoreArray); + +} +} diff --git a/components/security_apps/waap/waap_clib/WaapTrigger.cc b/components/security_apps/waap/waap_clib/WaapTrigger.cc new file mode 100755 index 0000000..131fee9 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapTrigger.cc @@ -0,0 +1,79 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapTrigger.h" +#include "Waf2Util.h" + +namespace Waap { +namespace Trigger { + +Log::Log() +: + verbosity("standard"), + complianceWarnings(true), + complianceViolations(true), + acAllow(true), + acDrop(true), + tpDetect(true), + tpPrevent(true), + webRequests(true), + webUrlPath(true), + webUrlQuery(true), + webHeaders(false), + webBody(true), + logToCloud(true), + logToAgent(true), + extendLogging(false), + responseCode(false), + responseBody(false), + extendLoggingMinSeverity("") +{ +} + +bool +Log::operator==(const Log &other) const +{ + return (verbosity == other.verbosity) && + (complianceWarnings == other.complianceWarnings) && + (complianceViolations == other.complianceViolations) && + (acAllow == other.acAllow) && + (acDrop == other.acDrop) && + (tpDetect == other.tpDetect) && + (tpPrevent == other.tpPrevent) && + (webRequests == other.webRequests) && + (webUrlPath == other.webUrlPath) && + (webHeaders == other.webHeaders) && + (webUrlQuery == other.webUrlQuery) && + (webBody == other.webBody) && + (logToCloud == other.logToCloud) && + (logToAgent == other.logToAgent); +} + +Trigger::Trigger():triggerType("log"), log(std::make_shared()) +{ +} + +bool +Trigger::operator==(const Trigger &other) const +{ + return (triggerType == other.triggerType) && + (Waap::Util::compareObjects(log, other.log)); +} + +bool Policy::operator==(const Policy &other) const +{ + return triggers == other.triggers; +} + +} +} diff --git a/components/security_apps/waap/waap_clib/WaapTrigger.h b/components/security_apps/waap/waap_clib/WaapTrigger.h new file mode 100755 index 0000000..e1493fc --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapTrigger.h @@ -0,0 +1,156 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include +#include +#include +#include +#include "debug.h" + +USE_DEBUG_FLAG(D_WAAP); + +namespace Waap { +namespace Trigger { +using boost::algorithm::to_lower_copy; + +struct Log { + template + void serialize(_A &ar) { + ar(cereal::make_nvp("verbosity", verbosity)); + verbosity = to_lower_copy(verbosity); + ar(cereal::make_nvp("complianceWarnings", complianceWarnings)); + ar(cereal::make_nvp("complianceViolations", complianceViolations)); + ar(cereal::make_nvp("acAllow", acAllow)); + ar(cereal::make_nvp("acDrop", acDrop)); + ar(cereal::make_nvp("tpDetect", tpDetect)); + ar(cereal::make_nvp("tpPrevent", tpPrevent)); + ar(cereal::make_nvp("webRequests", webRequests)); + ar(cereal::make_nvp("webUrlPath", webUrlPath)); + ar(cereal::make_nvp("webUrlQuery", webUrlQuery)); + ar(cereal::make_nvp("webBody", webBody)); + ar(cereal::make_nvp("logToCloud", logToCloud)); + ar(cereal::make_nvp("logToAgent", logToAgent)); + + try + { + ar(cereal::make_nvp("webHeaders", webHeaders)); + } + catch (const cereal::Exception &e) + { + ar.setNextName(nullptr); + dbgDebug(D_WAAP) << "failed to load webHeaders field. Error: " << e.what(); + } + + try + { + ar(cereal::make_nvp("extendLogging", extendLogging)); + } + catch(const cereal::Exception &e) + { + ar.setNextName(nullptr); + dbgDebug(D_WAAP) << "Failed to load extendedLogging field. Error: " << e.what(); + } + + + if (extendLogging) + { + try + { + ar(cereal::make_nvp("extendLoggingMinSeverity", extendLoggingMinSeverity)); + } + catch(const cereal::Exception &e) + { + ar.setNextName(nullptr); + dbgDebug(D_WAAP) << "Failed to load extendLoggingMinSeverity field. Error: " << e.what(); + } + + try + { + ar(cereal::make_nvp("responseCode", responseCode)); + } + catch(const cereal::Exception &e) + { + ar.setNextName(nullptr); + dbgDebug(D_WAAP) << "Failed to load responseCode field. Error: " << e.what(); + } + + try + { + ar(cereal::make_nvp("responseBody", responseBody)); + } + catch(const cereal::Exception &e) + { + ar.setNextName(nullptr); + dbgDebug(D_WAAP) << "Failed to load responseBody field. Error: " << e.what(); + } + } + } + + Log(); + bool operator==(const Log &other) const; + + std::string verbosity; + bool complianceWarnings; + bool complianceViolations; + bool acAllow; + bool acDrop; + bool tpDetect; + bool tpPrevent; + bool webRequests; + bool webUrlPath; + bool webUrlQuery; + bool webHeaders; + bool webBody; + bool logToCloud; + bool logToAgent; + bool extendLogging; + bool responseCode; + bool responseBody; + std::string extendLoggingMinSeverity; +}; + +struct Trigger { + template + void serialize(_A &ar) { + ar(cereal::make_nvp("$triggerType", triggerType)); + triggerType = to_lower_copy(triggerType); + + // Currently, only load triggers of type "log". + if (triggerType == "log") { + ar(cereal::make_nvp("log", *log)); + } + } + + Trigger(); + bool operator==(const Trigger &other) const; + + std::string triggerType; + std::shared_ptr log; +}; + +struct Policy { + template + Policy(_A &ar) { + ar(cereal::make_nvp("triggers", triggers)); + } + + bool operator==(const Policy &other) const; + + std::vector triggers; +}; + +} +} diff --git a/components/security_apps/waap/waap_clib/WaapValueStatsAnalyzer.cc b/components/security_apps/waap/waap_clib/WaapValueStatsAnalyzer.cc new file mode 100755 index 0000000..b1bb373 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapValueStatsAnalyzer.cc @@ -0,0 +1,264 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "WaapValueStatsAnalyzer.h" +#include +#include +#include "debug.h" +#include "Waf2Util.h" + +USE_DEBUG_FLAG(D_WAAP); + +bool checkUrlEncoded(const char *buf, size_t len) +{ + dbgFlow(D_WAAP); + size_t i = 0; + int hex_characters_to_follow = 0; + + for (; i < len; i++) { + char ch = buf[i]; + if (ch == '%' && hex_characters_to_follow == 2) { + continue; + } + + if (hex_characters_to_follow > 0) { + hex_characters_to_follow--; + if (isHexDigit(ch)) { + continue; + } + return false; + } else if (ch == '%') { + hex_characters_to_follow = 2; + continue; + } + + if (Waap::Util::isAlphaAsciiFast(static_cast(ch)) || isdigit(ch)) { + continue; + } + + switch (ch) { + case '.': + case '-': + case '_': + case '~': + case '!': + case '*': + case '\'': + case '(': + case ')': + case ';': + case ':': + case '@': + case '&': + case '=': + case '+': + case '$': + case ',': + case '/': + case '?': + case '#': + case '[': + case ']': + continue; + default: + return false; + } + } + + return true; +} + +ValueStatsAnalyzer::ValueStatsAnalyzer(const std::string &cur_val) + : + hasCharSlash(false), + hasCharColon(false), + hasCharAmpersand(false), + hasCharEqual(false), + hasTwoCharsEqual(false), + hasCharSemicolon(false), + hasCharPipe(false), + longestZerosSeq{0}, + isUTF16(false), + canSplitSemicolon(true), + canSplitPipe(true), + hasSpace(false), + isUrlEncoded(false) +{ + unsigned int zerosSeq[2] = {0}; + bool lastNul = false; // whether last processed character was ASCII NUL + size_t curValLength = cur_val.length(); + + if (curValLength == 0) { + canSplitSemicolon = false; + canSplitPipe = false; + return; + } + + // Decide the input is candidate for UTF16 if all the following rules apply: + // 1. Input buffer length is longer than 2 bytes + // 2. Input buffer length is divisible by 2 + isUTF16 = (curValLength > 2) && (curValLength % 2 == 0); + + for (size_t i = 0; i < curValLength; ++i) + { + unsigned char ch = (unsigned char)cur_val[i]; + + switch(ch) { + case '/': + hasCharSlash = true; + break; + case ':': + hasCharColon = true; + break; + case '&': + hasCharAmpersand = true; + break; + case '=': + if (!hasTwoCharsEqual) { + if (hasCharEqual) { + hasTwoCharsEqual = true; + } + hasCharEqual = true; + } + break; + case ';': + hasCharSemicolon = true; + break; + case '|': + hasCharPipe = true; + break; + } + + // The index will be 0 for even, and 1 for odd offsets + int index = i % 2; + + // Compute longest sequence of ASCII NUL bytes over even and odd offsets in cur_val + if (ch == 0) + { + if (lastNul) + { + // UTF-16 consists of subsequent pairs of bytes. Cancel UTF16 detection if there is a NUL bytes pair. + // (but allow such a pair at the end of the input buffer: UTF16 could be "NUL terminated" this way) + if (isUTF16 && (index == 1) && (i + 1 < curValLength)) { + isUTF16 = false; + } + + // Anytime two ASCII NULs are encountered in a row - terminate counting the NUL-sequence length. + zerosSeq[0] = 0; + zerosSeq[1] = 0; + } + else + { + zerosSeq[index]++; + longestZerosSeq[index] = std::max(zerosSeq[index], longestZerosSeq[index]); + } + + lastNul = true; + } + else + { + zerosSeq[index] = 0; + lastNul = false; + } + + bool isAlphaNumeric = Waap::Util::isAlphaAsciiFast(ch) || isdigit(ch); + + if (canSplitSemicolon && !isAlphaNumeric) { + switch (ch) { + case '.': + case '-': + case '_': + case '=': + case ',': + case '(': + case ')': + case ';': + break; + default: + // Only alphanumeric characters and characters listed above are allowed, anything else disables + canSplitSemicolon = false; + } + } + + if (canSplitPipe && !isAlphaNumeric) { + switch (ch) { + case ':': + case '?': + case '.': + case '-': + case '_': + case '=': + case ',': + case '[': + case ']': + case '/' : + case ' ': + case '\f': + case '\v': + case '\t': + case '\r': + case '\n': + case '(': + case ')': + case '|': + break; + default: + // Only alphanumeric characters and characters listed above are allowed, anything else disables + canSplitPipe = false; + } + } + } + + // Only decode UTF16 if at least one longest zero bytes sequence (computed over odd + // or over even input bytes) is longer than 2. + // If both sequences are too short - do not decode UTF16 on such input. + if (longestZerosSeq[0] <= 2 && longestZerosSeq[1] <= 2) { + isUTF16 = false; + } + + // Detect URLEncode value + size_t ofs = 0; + for (size_t i = 0 ; i < cur_val.size(); ++i) { + char ch = cur_val[i]; + + if (isspace(ch)) { + hasSpace = true; + isUrlEncoded = false; + break; + } + + if (ofs == 0) { + if (ch == '%') { + ofs++; + } + } + else if (ofs <= 2) { + if (!isHexDigit(ch)) { + isUrlEncoded = false; + break; // at least one broken URLEncode sequence detected + } + if (ofs == 2) { + isUrlEncoded = true; // complete '%hh' sequence + ofs = 0; // search for next '%' character + } + else { + ofs++; + } + } + } + + // Cancel url decoding if partial match after '%' is found, or if potential specific utf8 evasion is suspected + if (ofs != 0) { + isUrlEncoded = false; + } +} diff --git a/components/security_apps/waap/waap_clib/WaapValueStatsAnalyzer.h b/components/security_apps/waap/waap_clib/WaapValueStatsAnalyzer.h new file mode 100755 index 0000000..fcd2e30 --- /dev/null +++ b/components/security_apps/waap/waap_clib/WaapValueStatsAnalyzer.h @@ -0,0 +1,39 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include + +bool checkUrlEncoded(const char *buf, size_t len); + +// Process value (buffer) and calculate some statistics/insights over it, for use in later processing. +// The insights are computed in te same for loop for performance reasons. +struct ValueStatsAnalyzer +{ + ValueStatsAnalyzer(const std::string &cur_val); + bool hasCharSlash; + bool hasCharColon; + bool hasCharAmpersand; + bool hasCharEqual; + bool hasTwoCharsEqual; + bool hasCharSemicolon; + bool hasCharPipe; + unsigned int longestZerosSeq[2]; // longest zeros sequence. counted over even (index 0) and odd (index 1) offsets + bool isUTF16; + bool canSplitSemicolon; + bool canSplitPipe; + bool hasSpace; + bool isUrlEncoded; +}; + + diff --git a/components/security_apps/waap/waap_clib/Waf2Engine.cc b/components/security_apps/waap/waap_clib/Waf2Engine.cc new file mode 100755 index 0000000..c86432f --- /dev/null +++ b/components/security_apps/waap/waap_clib/Waf2Engine.cc @@ -0,0 +1,2271 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "Waf2Engine.h" +#include "waap.h" +#include "WaapAssetState.h" +#include "CidrMatch.h" +#include "ParserRaw.h" +#include "ParserUrlEncode.h" +#include "ParserMultipartForm.h" +#include "ParserXML.h" +#include "ParserJson.h" +#include "ContentTypeParser.h" +#include "Waf2Util.h" +#include "debug.h" +#include "DeepAnalyzer.h" +#include "WaapConfigApplication.h" +#include "WaapConfigApi.h" +#include "WaapDefines.h" +#include "WaapTrigger.h" +#include "WaapScores.h" +#include "WaapDecision.h" +#include "WaapConversions.h" +#include "WaapResultJson.h" +#include "WaapAssetStatesManager.h" +#include "log_generator.h" +#include "config.h" +#include "WaapOverrideFunctor.h" +#include "WaapOpenRedirect.h" +#include "WaapOpenRedirectPolicy.h" +#include "WaapErrorDisclosurePolicy.h" +#include +#include "generic_rulebase/parameters_config.h" +#include +#include "ParserDelimiter.h" +#include "OpenRedirectDecision.h" +#include "DecisionType.h" +#include "generic_rulebase/triggers_config.h" +#include "config.h" +#include "LogGenWrapper.h" +#include "reputation_features_events.h" +#include "telemetry.h" +#include "agent_core_utilities.h" + +USE_DEBUG_FLAG(D_WAAP); +USE_DEBUG_FLAG(D_WAAP_ULIMITS); +USE_DEBUG_FLAG(D_WAAP_BOT_PROTECTION); +using namespace ReportIS; + +#define MAX_REQUEST_BODY_SIZE (2*1024) +#define MAX_RESPONSE_BODY_SIZE (2*1024) +#define MAX_RESPONSE_BODY_SIZE_ERR_DISCLOSURE (2*1024) +#define OVERRIDE_ACCEPT "Accept" +#define OVERRIDE_DROP "Drop" +#define OVERRIDE_IGNORE "Ignore" + +// Score threshold below which the match won't be considered +#define SCORE_THRESHOLD (1.4f) + +static const ParameterBehavior action_ignore(BehaviorKey::ACTION, BehaviorValue::IGNORE); + +void Waf2Transaction::learnScore(ScoreBuilderData& data, const std::string &poolName) +{ + m_pWaapAssetState->scoreBuilder.analyzeFalseTruePositive(data, poolName, !m_ignoreScore); + + if (m_ignoreScore) // check if we are in building scores state + { + // Set the relative reputation to max to ensure learning fp in score builder + data.m_relativeReputation = MAX_RELATIVE_REPUTATION; + } + m_pWaapAssetState->scoreBuilder.checkBadSourcesForLearning( + data.m_relativeReputation, + data.m_sourceIdentifier, + data.m_userAgent); +} + +void Waf2Transaction::start_response(int response_status, int http_version) +{ + dbgTrace(D_WAAP) << "[transaction:" << this << "] start_response(response_status=" << response_status + << "," << " http_version=" << http_version << ")"; + m_responseStatus = response_status; + + if(m_responseStatus == 404) + { + // Create error limiting policy (lazy, on first request) + if(m_siteConfig != NULL) { + const std::shared_ptr errorLimitingPolicy = + m_siteConfig->get_ErrorLimitingPolicy(); + + if (errorLimitingPolicy && errorLimitingPolicy->getRateLimitingEnforcementStatus()) { + if (m_pWaapAssetState->getErrorLimitingState() == nullptr) { + m_pWaapAssetState->createErrorLimitingState(errorLimitingPolicy); + dbgTrace(D_WAAP) << "Waf2Transaction::start_response: Create Error Limiting State"; + } + + bool errorLimitingLog = false; + bool blockDueToErrorLimiting = Waap::ErrorLimiting::enforce + (m_source_identifier, m_uriPath, m_pWaapAssetState, errorLimitingLog); + + dbgTrace(D_WAAP) << + "Waf2Transaction::start_response: response code: 404 :: Error Limiting Block : " << + blockDueToErrorLimiting; + + auto decision = m_waapDecision.getDecision(ERROR_LIMITING_DECISION); + decision->setLog(errorLimitingLog); + decision->setBlock(blockDueToErrorLimiting); + } + } + } +} + +void Waf2Transaction::start_response_hdrs() +{ + dbgTrace(D_WAAP) << "[transaction:" << this << "] start_response_hdrs"; +} + +void Waf2Transaction::add_response_hdr(const char* name, int name_len, const char* value, int value_len) +{ + dbgTrace(D_WAAP) << "[transaction:" << this << "] add_response_hdr(name='" << std::string(name, name_len) << + "', value='" << std::string(value, value_len) << "')"; + + // Detect location header and remember its value + static const char location[] = "location"; + + auto openRedirectPolicy = m_siteConfig ? m_siteConfig->get_OpenRedirectPolicy() : NULL; + if (openRedirectPolicy && openRedirectPolicy->enable && + memcaseinsensitivecmp(name, name_len, location, sizeof(location) - 1)) { + std::string redirectUrl = std::string(value, value_len); + dbgTrace(D_WAAP) << "Detected the redirect 'Location' header: '" << redirectUrl << "'"; + + if (m_responseStatus >= 300 && m_responseStatus < 400 && m_openRedirectState.testRedirect(redirectUrl)) { + dbgTrace(D_WAAP) << "Waf2Transaction::decideResponse: openRedirect detected (enforce=" << + openRedirectPolicy->enforce << ")"; + auto decision = std::dynamic_pointer_cast( + m_waapDecision.getDecision(OPEN_REDIRECT_DECISION)); + decision->setLog(true); + decision->setBlock(openRedirectPolicy->enforce); + decision->setLink(redirectUrl); + } + } + + if (m_responseStatus >= 400 && m_responseStatus <= 599) { + auto errorDisclosurePolicy = m_siteConfig ? m_siteConfig->get_ErrorDisclosurePolicy() : NULL; + if (errorDisclosurePolicy && errorDisclosurePolicy->enable) { + // Scan response header values + Waf2ScanResult res; + if (m_pWaapAssetState->apply(std::string(value, value_len), res, "resp_header")) { + // Found some signatures in response! + delete m_scanResult; + m_scanResult = new Waf2ScanResult(res); + dbgTrace(D_WAAP) << "found indicators in response header"; + auto decision = m_waapDecision.getDecision(ERROR_DISCLOSURE_DECISION); + decision->setLog(true); + decision->setBlock(errorDisclosurePolicy->enforce); + } + } + } +} + +void Waf2Transaction::end_response_hdrs() +{ + dbgTrace(D_WAAP) << "[transaction:" << this << "] end_response_hdrs"; + + // Enable response body processing only if response scanning is enabled in policy + auto errorDisclosurePolicy = m_siteConfig ? m_siteConfig->get_ErrorDisclosurePolicy() : NULL; + m_responseInspectReasons.setErrorDisclosure(errorDisclosurePolicy && errorDisclosurePolicy->enable); + + // OpenRedirect is only interested to see response headers, the body + m_responseInspectReasons.setOpenRedirect(false); +} + +void Waf2Transaction::start_response_body() +{ + dbgTrace(D_WAAP) << "[transaction:" << this << "] start_response_body"; + m_response_body_bytes_received = 0; + m_response_body.clear(); +} + +void Waf2Transaction::add_response_body_chunk(const char* data, int data_len) +{ + dbgTrace(D_WAAP) << "[transaction:" << this << "] add_response_body_chunk (" << data_len << " bytes)"; + m_response_body_bytes_received += data_len; + + auto errorDisclosurePolicy = m_siteConfig ? m_siteConfig->get_ErrorDisclosurePolicy() : NULL; + if (errorDisclosurePolicy && errorDisclosurePolicy->enable && + (m_responseStatus >= 400 && m_responseStatus <= 599)) { + // Collect up to MAX_RESPONSE_BODY_SIZE_ERR_DISCLOSURE of input data for each response + if (m_response_body_err_disclosure.length() + data_len <= MAX_RESPONSE_BODY_SIZE_ERR_DISCLOSURE) { + m_response_body_err_disclosure.append(data, (size_t)data_len); + } + else if (m_response_body_err_disclosure.length() < MAX_RESPONSE_BODY_SIZE_ERR_DISCLOSURE) { + size_t piece = MAX_RESPONSE_BODY_SIZE_ERR_DISCLOSURE - m_response_body_err_disclosure.length(); + // Note: piece is guaranteed to be > data_len, so the write below is safe. + m_response_body_err_disclosure.append(data, piece); + } + else { + m_responseInspectReasons.setErrorDisclosure(false); + } + } + + if (m_response_body_err_disclosure.length() <= MAX_RESPONSE_BODY_SIZE_ERR_DISCLOSURE) { + // Scan now, buffer is filled up. + scanErrDisclosureBuffer(); + } + + // Collect up to MAX_RESPONSE_BODY_SIZE of input data for each response + if (m_response_body.length() + data_len <= MAX_RESPONSE_BODY_SIZE) { + m_response_body.append(data, (size_t)data_len); + } + else if (m_response_body.length() < MAX_RESPONSE_BODY_SIZE) { + size_t piece = MAX_RESPONSE_BODY_SIZE - m_response_body.length(); + // Note: piece is guaranteed to be > data_len, so the write below is safe. + m_response_body.append(data, piece); + } + else { + // No more need to collect response body for log (got enough data - up to MAX_RESPONSE_BODY_SIZE collected) + m_responseInspectReasons.setCollectResponseForLog(false); + } +} + +void Waf2Transaction::end_response_body() +{ + dbgTrace(D_WAAP) << "[transaction:" << this << "] end_response_body"; +} + +void Waf2Transaction::scanErrDisclosureBuffer() +{ + if (m_responseStatus >= 400 && m_responseStatus <= 599) { + auto errorDisclosurePolicy = m_siteConfig ? m_siteConfig->get_ErrorDisclosurePolicy() : NULL; + if (errorDisclosurePolicy && errorDisclosurePolicy->enable) { + // Scan response body chunks. + Waf2ScanResult res; + if (m_pWaapAssetState->apply(std::string(m_response_body_err_disclosure.data(), + m_response_body_err_disclosure.size()), res, "resp_body")) { + // Found some signatures in response! + delete m_scanResult; + m_scanResult = new Waf2ScanResult(res); + dbgTrace(D_WAAP) << "found indicators in response body"; + auto decision = m_waapDecision.getDecision(ERROR_DISCLOSURE_DECISION); + decision->setLog(true); + decision->setBlock(errorDisclosurePolicy->enforce); + } + } + } + m_responseInspectReasons.setErrorDisclosure(false); +} + +void Waf2Transaction::end_response() +{ + dbgTrace(D_WAAP) << "[transaction:" << this << "] end_response"; +} + +void Waf2Transaction::setCurrentAssetState(IWaapConfig* sitePolicy) +{ + I_WaapAssetStatesManager* pWaapAssetStatesManager = + Singleton::Consume::by(); + std::shared_ptr pCurrentWaapAssetState = + pWaapAssetStatesManager->getWaapAssetStateById(sitePolicy->get_AssetId()); + + if (!pCurrentWaapAssetState || pCurrentWaapAssetState->getSignatures()->fail()) + { + dbgWarning(D_WAAP) << "[transaction:" << this << "] " + "couldn't set waapAssetState for asset... using original waapAssetState"; + return; + } + + m_pWaapAssetState = pCurrentWaapAssetState; +} + +void Waf2Transaction::clearRequestParserState() { + if (m_requestBodyParser != NULL) { + delete m_requestBodyParser; + m_requestBodyParser = NULL; + } +} + +Waf2Transaction::Waf2Transaction() : + TableOpaqueSerialize(this), + m_pWaapAssetState(NULL), + m_ignoreScore(false), + m_remote_port(0), + m_local_port(0), + m_csrfState(), + m_userLimitsState(nullptr), + m_siteConfig(NULL), + m_contentType(Waap::Util::CONTENT_TYPE_UNKNOWN), + m_requestBodyParser(NULL), + m_tagHist{0}, + m_tagHistPos(0), + m_isUrlValid(false), + m_scanner(this), + m_deepParser(m_pWaapAssetState, m_scanner, this), + m_deepParserReceiver(m_deepParser), + m_scanResult(NULL), + m_request_body_bytes_received(0), + m_response_body_bytes_received(0), + m_processedUri(false), + m_processedHeaders(false), + m_isScanningRequired(false), + m_responseStatus(0), + m_responseInspectReasons(), + m_responseInjectReasons(), + m_index(-1), + m_triggerLog(), + m_waf2TransactionFlags() +{ + is_hybrid_mode = + Singleton::exists() ? + Singleton::Consume::by()->getOrchestrationMode() == OrchestrationMode::HYBRID + : false; + if (is_hybrid_mode) { + max_grace_logs = getProfileAgentSettingWithDefault( + 10, + "rulebase.initialForcedSecurityLogsToLocalStorage.count" + ); + } +} + +Waf2Transaction::Waf2Transaction(std::shared_ptr pWaapAssetState) : + TableOpaqueSerialize(this), + m_pWaapAssetState(pWaapAssetState), + m_ignoreScore(false), + m_remote_port(0), + m_local_port(0), + m_csrfState(), + m_userLimitsState(nullptr), + m_siteConfig(NULL), + m_contentType(Waap::Util::CONTENT_TYPE_UNKNOWN), + m_requestBodyParser(NULL), + m_tagHist{0}, + m_tagHistPos(0), + m_isUrlValid(false), + m_scanner(this), + m_deepParser(m_pWaapAssetState, m_scanner, this), + m_deepParserReceiver(m_deepParser), + m_scanResult(NULL), + m_request_body_bytes_received(0), + m_response_body_bytes_received(0), + m_processedUri(false), + m_processedHeaders(false), + m_isScanningRequired(false), + m_responseStatus(0), + m_responseInspectReasons(), + m_responseInjectReasons(), + m_index(-1), + m_triggerLog(), + m_waf2TransactionFlags() +{ + is_hybrid_mode = + Singleton::exists() ? + Singleton::Consume::by()->getOrchestrationMode() == OrchestrationMode::HYBRID + : false; + if (is_hybrid_mode) { + max_grace_logs = getProfileAgentSettingWithDefault( + 10, + "rulebase.initialForcedSecurityLogsToLocalStorage.count" + ); + } +} + +Waf2Transaction::~Waf2Transaction() { + dbgTrace(D_WAAP) << "Waf2Transaction::~Waf2Transaction: deleting m_requestBodyParser"; + delete m_requestBodyParser; + dbgTrace(D_WAAP) << "Waf2Transaction::~Waf2Transaction: deleting m_scanResult"; + delete m_scanResult; +} + +HeaderType Waf2Transaction::detectHeaderType(const char* name, int name_len) { + // Detect host header + static const char host[] = "host"; + static const char user_agent[] = "user-agent"; + static const char content_type[] = "content-Type"; + static const char cookie[] = "cookie"; + static const char referer[] = "referer"; + + if (memcaseinsensitivecmp(name, name_len, host, sizeof(host) - 1)) { + return HeaderType::HOST_HEADER; + } + if (memcaseinsensitivecmp(name, name_len, user_agent, sizeof(user_agent) - 1)) { + return HeaderType::USER_AGENT_HEADER; + } + if (memcaseinsensitivecmp(name, name_len, content_type, sizeof(content_type) - 1)) { + return HeaderType::CONTENT_TYPE_HEADER; + } + if (memcaseinsensitivecmp(name, name_len, cookie, sizeof(cookie) - 1)) { + return HeaderType::COOKIE_HEADER; + } + if (memcaseinsensitivecmp(name, name_len, referer, sizeof(referer) - 1)) { + return HeaderType::REFERER_HEADER; + } + return UNKNOWN_HEADER; +} + +HeaderType Waf2Transaction::checkCleanHeader(const char* name, int name_len, const char* value, int value_len) const +{ + if (m_pWaapAssetState != nullptr) { + for (auto it = m_pWaapAssetState->getSignatures()->headers_re.begin(); + it != m_pWaapAssetState->getSignatures()->headers_re.end(); + ++it) { + const std::string& reHeaderName = it->first; + Regex* pRegex = it->second; + if (memcaseinsensitivecmp(name, name_len, reHeaderName.data(), reHeaderName.size())) { + dbgTrace(D_WAAP) << "[transaction:" << this << "] special header '" << std::string(name, name_len) << + "' - scan with regex '" << pRegex->getName().c_str() << "' to determine cleanliness ..."; + if(pRegex->hasMatch(std::string(value, value_len))) { + dbgTrace(D_WAAP) << "[transaction:" << this << "] special header '" << + std::string(name, name_len) << " is clean"; + return CLEAN_HEADER; + } + return OTHER_KNOWN_HEADERS; + } + } + + static const std::string x_newrelic_id("x-newrelic-id"); + static const std::string x_newrelic_transaction("x-newrelic-transaction"); + if (memcaseinsensitivecmp(name, name_len, x_newrelic_id.data(), x_newrelic_id.size()) || + memcaseinsensitivecmp(name, name_len, x_newrelic_transaction.data(), x_newrelic_transaction.size())) { + dbgTrace(D_WAAP) << "[transaction:" << this << "] special header '" << std::string(name, name_len) << + "' - detect base64 to determine cleanliness ..."; + + std::string result; + int decodedCount = 0; + int deletedCount = 0; + + // Detect potential base64 matches + Waap::Util::b64Decode(std::string(value, value_len), b64DecodeChunk, decodedCount, deletedCount, result); + + if (result.empty() && (decodedCount + deletedCount == 1)) { + // Decoded 1 base64 chunk and nothing left behind it + dbgTrace(D_WAAP) << "[transaction:" << this << "] special header '" << + std::string(name, name_len) << " is clean"; + return CLEAN_HEADER; + } + } + + static const std::string authorization("authorization"); + if (memcaseinsensitivecmp(name, name_len, authorization.data(), authorization.size())) { + dbgTrace(D_WAAP) << "[transaction:" << this << "] special header '" << std::string(name, name_len) << + "' - detect base64 to determine cleanliness ..."; + + std::string result; + int decodedCount = 0; + int deletedCount = 0; + + std::string v(value, value_len); + boost::algorithm::to_lower(v); + const std::string negotiate("negotiate "); + + if (boost::algorithm::starts_with(v, negotiate)) { + v = v.substr(negotiate.size(), v.size() - negotiate.size()); + + // Detect potential base64 match after the "Negotiate " prefix + Waap::Util::b64Decode(v, b64DecodeChunk, decodedCount, deletedCount, result); + if (result.empty() && (deletedCount + decodedCount == 1)) { + // Decoded 1 base64 chunk and nothing left behind it + dbgTrace(D_WAAP) << "[transaction:" << this << "] special header '" << + std::string(name, name_len) << " is clean"; + return CLEAN_HEADER; + } + } + } + + } + return UNKNOWN_HEADER; +} + +// Methods below are callbacks that are called during HTTP transaction processing by the front-end server/proxy +void Waf2Transaction::start() { + dbgTrace(D_WAAP) << "[Waf2Transaction::start():" << this << "] start"; + // TODO:: maybe guard against double call of this function by buggy client. + m_contentType = Waap::Util::CONTENT_TYPE_UNKNOWN; + m_remote_addr.clear(); + m_remote_port = 0; + m_local_addr.clear(); + m_local_port = 0; + m_request_body_bytes_received = 0; + m_response_body_bytes_received = 0; + m_requestBodyParser = NULL; + m_methodStr.clear(); + m_uriStr.clear(); + m_uriPath.clear(); + m_uriReferer.clear(); + m_uriQuery.clear(); + m_contentTypeStr.clear(); + m_hostStr.clear(); + m_userAgentStr.clear(); + m_cookieStr.clear(); + m_notes.clear(); + m_source_identifier.clear(); + // TODO:: remove this! refactor extraction of kv_pairs! + m_deepParser.clear(); + hdrs_map.clear(); + m_request_body.clear(); + m_response_body.clear(); +} + +void Waf2Transaction::set_transaction_time(const char* log_time) { + dbgTrace(D_WAAP) << "[transaction:" << this << "] set_transaction_time(log_time='" << log_time << "')"; + m_log_time = log_time; +} + +void Waf2Transaction::set_transaction_remote(const char* remote_addr, int remote_port) { + dbgTrace(D_WAAP) << "[transaction:" << this << "] set_transaction_remote('" << remote_addr << ":" << remote_port << + "')"; + m_remote_addr = remote_addr; + m_remote_port = remote_port; + m_source_identifier = remote_addr; +} + +void Waf2Transaction::set_transaction_local(const char* local_addr, int local_port) { + dbgTrace(D_WAAP) << "[transaction:" << this << "] set_transaction_local('" << local_addr << ":" << local_port << + "')"; + m_local_addr = local_addr; + m_local_port = local_port; +} + +void Waf2Transaction::set_method(const char* method) { + + dbgTrace(D_WAAP) << "[transaction:" << this << "] set_method('" << method << "')"; + m_methodStr = method; +} + +bool Waf2Transaction::checkIsScanningRequired() +{ + bool result = false; + if (WaapConfigAPI::getWaapAPIConfig(m_ngenAPIConfig)) { + m_siteConfig = &m_ngenAPIConfig; + auto rateLimitingPolicy = m_siteConfig ? m_siteConfig->get_RateLimitingPolicy() : NULL; + result |= m_siteConfig->get_WebAttackMitigation(); + if(rateLimitingPolicy) { + result |= m_siteConfig->get_RateLimitingPolicy()->getRateLimitingEnforcementStatus(); + } + + auto userLimitsPolicy = m_siteConfig ? m_siteConfig->get_UserLimitsPolicy() : nullptr; + if (userLimitsPolicy) { + result = true; + } + } + + if (WaapConfigApplication::getWaapSiteConfig(m_ngenSiteConfig)) { + m_siteConfig = &m_ngenSiteConfig; + auto rateLimitingPolicy = m_siteConfig ? m_siteConfig->get_RateLimitingPolicy() : NULL; + auto errorLimitingPolicy = m_siteConfig ? m_siteConfig->get_ErrorLimitingPolicy() : NULL; + auto csrfPolicy = m_siteConfig ? m_siteConfig->get_CsrfPolicy() : NULL; + auto userLimitsPolicy = m_siteConfig ? m_siteConfig->get_UserLimitsPolicy() : nullptr; + result |= m_siteConfig->get_WebAttackMitigation(); + + if (rateLimitingPolicy) { + result |= m_siteConfig->get_RateLimitingPolicy()->getRateLimitingEnforcementStatus(); + } + if (errorLimitingPolicy) { + result |= m_siteConfig->get_ErrorLimitingPolicy()->getRateLimitingEnforcementStatus(); + } + if (csrfPolicy) { + result |= m_siteConfig->get_CsrfPolicy()->enable; + } + if (userLimitsPolicy) { + result = true; + } + } + return result; +} + +bool Waf2Transaction::setCurrentAssetContext() +{ + // the return value tells me if I need to scan traffic + bool result = false; + m_siteConfig = NULL; + + result |= checkIsScanningRequired(); + + if (!m_siteConfig) { + dbgWarning(D_WAAP) << "[transaction:" << this << "] " + "Failed to set sitePolicy for asset... using the original signatures"; + return result; + } + + setCurrentAssetState(m_siteConfig); + m_deepParser.setWaapAssetState(m_pWaapAssetState); + m_pWaapAssetState->updateFilterManagerPolicy(m_siteConfig); + m_pWaapAssetState->clearFilterVerbose(); + + return result; +} + +void Waf2Transaction::processUri(const char* uri, const std::string& scanStage) { + m_processedUri = true; + const char* p = uri; + std::string baseUri; + + // TODO:: refactor out this block to method, and the next block (parsing url parameters), too. + { + bool pushed = false; + bool firstPush = true; + + // Parse URL + ParserRaw urlParser(m_deepParserReceiver, scanStage); + + // Scan the uri until '?' character found (or until end of the uri string). + do { + const char* q = strchr(p, '?'); + + if (q == NULL) { + // Handle special case found in customer traffic where instead of '?' there was ';' character. + q = strchr(p, ';'); + if (q) { + // Check that after ';' the parameter name is valid and terminated with '='. This would normally be + // the case in legit traffic, but not in attacks. This covers a case of "sap login". + const char *qq; + for (qq = q + 1; isalpha(*qq) || isdigit(*qq) || *qq=='-' || *qq=='_' || *qq=='*'; ++qq); + if (*qq != '=') { + // Assume it might be attack and cancel the separation by the ';' character (scan whole URL) + q = NULL; + } + } + } + + if (q == NULL) { + baseUri = std::string(p); + if (scanStage == "url") { + m_uriPath = baseUri; + } + if (firstPush) { + dbgTrace(D_WAAP) << "[transaction:" << this << "] scanning the " << scanStage.c_str(); + firstPush = false; + } + + // Push the last piece to URL scanner + pushed = true; + std::string url(p, strlen(p)); + + Waap::Util::decodePercentEncoding(url); + urlParser.push(url.data(), url.size()); + + // We found no '?' character so set p to NULL to prevent parameters scan below. + p = NULL; + break; + } + + baseUri = std::string(p, q - p); + if (scanStage == "url") { + m_uriPath = baseUri; + } + + // Push data between last point (p) and the character we found ('?'), not includig the character. + if (q != p) { + // Just so we print this trace message only once + if (firstPush) { + dbgTrace(D_WAAP) << "[transaction:" << this << "] scanning the " << scanStage.c_str(); + firstPush = false; + } + + pushed = true; + std::string url(p, q-p); + Waap::Util::decodePercentEncoding(url); + urlParser.push(url.data(), url.size()); + } + + // If we hit the '?' character, finish parsing the URL and continue parsing URL + // parameters from the character next to '?' + p = q + 1; + break; + } while (1); + + if (pushed) { + urlParser.finish(); + m_notes.push_back(scanStage + "_scanned"); + } + } + // in case we found any indication in one of the URI segments and there is not one that starts with / + // scan the whole URI + if (m_scanResult && m_scanResult->score != 0 && (m_scanResult->location == scanStage) && + std::find_if(m_scanResult->keyword_matches.begin(), + m_scanResult->keyword_matches.end(), [](std::string keyword) { return keyword[0] == '/'; }) == + m_scanResult->keyword_matches.end()) + { + auto scanResultBackup = m_scanResult; + m_scanResult = nullptr; + bool ignoreScore = m_ignoreScore; + m_ignoreScore = true; + m_deepParser.m_key.push(scanStage.c_str(), scanStage.size()); + ParserDelimiter uriSegmentsParser(m_deepParserReceiver, '/', scanStage); + std::string baseUriUnescaped(baseUri); + Waap::Util::decodePercentEncoding(baseUriUnescaped); + uriSegmentsParser.push(baseUriUnescaped.c_str(), baseUriUnescaped.length()); + uriSegmentsParser.finish(); + m_deepParser.m_key.pop(scanStage.c_str()); + m_ignoreScore = ignoreScore; + if (uriSegmentsParser.error()) + { + // handle special case where there is no / in the URI - can happen in attackes + m_deepParserReceiver.clear(); + delete m_scanResult; + m_scanResult = scanResultBackup; + } + else { + if (m_scanResult) + { + // keep original scan of the whole URL + delete m_scanResult; + m_scanResult = scanResultBackup; + } + else + { + // scan result is empty when we parsing each segments + // i.e. scan result from using (acceptable) irregular format in the URI - discarding the original scan + delete scanResultBackup; + } + } + } + // at this point, p can either be NULL (if there are no URL parameters), + // or point to the parameters string (right after the '?' character) + + if (p && *p) { + // Decode URLEncoded data and send decoded key/value pairs to deep inspection + dbgTrace(D_WAAP) << "[transaction:" << this << "] scanning the " << scanStage.c_str() << " parameters"; + + if (scanStage == "url") { + m_uriQuery = std::string(p); + } + + std::string tag = scanStage + "_param"; + m_deepParser.m_key.push(tag.data(), tag.size()); + size_t buff_len = strlen(p); + ParserUrlEncode up(m_deepParserReceiver, '&', checkUrlEncoded(p, buff_len)); + up.push(p, buff_len); + up.finish(); + m_deepParser.m_key.pop(tag.c_str()); + m_notes.push_back(scanStage + "_params_scanned"); + } +} + +void Waf2Transaction::parseContentType(const char* value, int value_len) +{ + // content type header parser + ContentTypeParser ctp; + + ctp.push(value, value_len); + ctp.finish(); + + dbgTrace(D_WAAP) << "[transaction:" << this << "] ctp detected content type: '" << + ctp.contentTypeDetected.c_str() << "'"; + // The above fills m_contentTypeDetected + m_contentType = Waap::Util::detectContentType(ctp.contentTypeDetected.c_str()); + + // extract boundary string required for parsing multipart-form-data stream + if (m_contentType == Waap::Util::CONTENT_TYPE_MULTIPART_FORM) { + dbgTrace(D_WAAP) << "content_type detected: " << Waap::Util::getContentTypeStr(m_contentType) << + "; boundary='" << ctp.boundaryFound.c_str() << "'"; + m_deepParser.setMultipartBoundary(ctp.boundaryFound); + } + else { + dbgTrace(D_WAAP) << "content_type detected: " << Waap::Util::getContentTypeStr(m_contentType); + } + + std::string contentTypeFull(value, value_len); + // Use content-type trimmed by the first ';' character + m_contentTypeStr = contentTypeFull.substr(0, contentTypeFull.find(";")); +} + +void Waf2Transaction::parseCookie(const char* value, int value_len) +{ + m_cookieStr = std::string(value, value_len); + +#ifdef NO_HEADERS_SCAN + return; +#endif + + if (value_len > 0) { + dbgTrace(D_WAAP) << "[transaction:" << this << "] scanning the cookie value"; + m_deepParser.m_key.push("cookie", 6); + ParserUrlEncode cookieValueParser(m_deepParserReceiver, ';'); + cookieValueParser.push(value, value_len); + cookieValueParser.finish(); + m_deepParser.m_key.pop("cookie"); + m_notes.push_back("cookie_scanned"); + } +} + +void Waf2Transaction::parseReferer(const char* value, int value_len) +{ +#ifdef NO_HEADERS_SCAN + return; +#endif + dbgTrace(D_WAAP) << "Parsed Referer. Referer URI: " << m_uriReferer; + + std::string referer(value, value_len); + std::vector regex_matches; + size_t uriParsedElements = + m_pWaapAssetState->getSignatures()->uri_parser_regex.findAllMatches(referer, regex_matches); + if(uriParsedElements > 0) + { + RegexMatch::MatchGroup& uriPathGroup = regex_matches[0].groups[3]; + m_uriReferer = uriPathGroup.value; + m_uriReferer = normalize_uri(m_uriReferer); + } + // Parse referer value as if it was a URL + if (value_len > 0) + { + processUri(std::string(value, value_len).c_str(), "referer"); + } +} + +void Waf2Transaction::parseUnknownHeaderName(const char* name, int name_len) +{ +#ifdef NO_HEADERS_SCAN + return; +#endif + // Apply signatures on all other, header names, unless they are considered "good" ones to skip scanning them. + if (name_len && + !m_pWaapAssetState->getSignatures()->good_header_name_re.hasMatch(std::string(name, name_len))) { + dbgTrace(D_WAAP) << "[transaction:" << this << "] scanning the header name"; + m_deepParser.m_key.push("header", 6); + ParserRaw headerNameParser(m_deepParserReceiver, std::string(name, name_len)); + headerNameParser.push(name, name_len); + headerNameParser.finish(); + m_deepParser.m_key.pop("header name"); + m_notes.push_back("hn:" + std::string(name, name_len)); + } +} + +void Waf2Transaction::parseGenericHeaderValue(const std::string &headerName, const char* value, int value_len) +{ +#ifdef NO_HEADERS_SCAN + return; +#endif + if (value_len == 0) { + return; + } + + dbgTrace(D_WAAP) << "[transaction:" << this << "] scanning the header value"; + m_deepParser.m_key.push("header", 6); + ParserRaw headerValueParser(m_deepParserReceiver, headerName); + headerValueParser.push(value, value_len); + headerValueParser.finish(); + m_deepParser.m_key.pop("header value"); + m_notes.push_back("hv:" + headerName); +}; + +// Scan relevant headers to detect attacks inside them +void Waf2Transaction::scanSpecificHeder(const char* name, int name_len, const char* value, int value_len) +{ + HeaderType header_t = detectHeaderType(name, name_len); + std::string headerName = std::string(name, name_len); + + switch (header_t) + { + case HeaderType::COOKIE_HEADER: + parseCookie(value, value_len); + break; + case HeaderType::REFERER_HEADER: + parseReferer(value, value_len); + break; + case HeaderType::UNKNOWN_HEADER: { + HeaderType headerType = checkCleanHeader(name, name_len, value, value_len); + if(headerType == HeaderType::CLEAN_HEADER) { + break; + } + // Scan names of all unknown headers + parseUnknownHeaderName(name, name_len); + // Scan unknown headers whose values do not match "clean generic header" pattern. + // Note that we do want to process special header named x-chkp-csrf-token header - it is treated specially. + if (!m_pWaapAssetState->getSignatures()->good_header_value_re.hasMatch(std::string(value, value_len)) || + headerName == "x-chkp-csrf-token" || headerType == HeaderType::OTHER_KNOWN_HEADERS) { + parseGenericHeaderValue(headerName, value, value_len); + } + break; + } + case HeaderType::USER_AGENT_HEADER: { + HeaderType headerType = checkCleanHeader(name, name_len, value, value_len); + if(headerType == HeaderType::CLEAN_HEADER) { + break; + } + // In case the user agent header contains a known regex match, remove the match before scanning + std::string hdrValue(value, value_len); + hdrValue = NGEN::Regex::regexReplace( + __FILE__, + __LINE__, + hdrValue, + m_pWaapAssetState->getSignatures()->user_agent_prefix_re, + "" + ); + parseGenericHeaderValue(headerName, hdrValue.data(), hdrValue.size()); + break; + } + case HeaderType::CONTENT_TYPE_HEADER: { + HeaderType headerType = checkCleanHeader(name, name_len, value, value_len); + if(headerType == HeaderType::CLEAN_HEADER) { + break; + } + // Parsing of a known header will only take place if its value does not match strict rules and is therefore + // suspected to contain an attack + parseGenericHeaderValue(headerName, value, value_len); + break; + } + default: + break; + } +}; + +// Read headers to extract information from them (like hostname from the Host: header). Do not scan them for attacks. +void Waf2Transaction::detectSpecificHeader(const char* name, int name_len, const char* value, int value_len) +{ + HeaderType header_t = detectHeaderType(name, name_len); + + switch (header_t) + { + case HeaderType::CONTENT_TYPE_HEADER: + parseContentType(value, value_len); + break; + case HeaderType::HOST_HEADER: + m_hostStr = std::string(value, value_len); + break; + case HeaderType::USER_AGENT_HEADER: + m_userAgentStr = std::string(value, value_len); + break; + default: + break; + } +} + +void Waf2Transaction::detectHeaders() +{ + if (isUrlLimitReached(m_uriStr.size())) { + dbgTrace(D_WAAP_ULIMITS) << "[USER LIMITS] Url limit exceeded"; + return; + } + else if (!isPreventModeValidMethod(getMethod())) { + dbgTrace(D_WAAP_ULIMITS) << "[USER LIMITS] Invalid http method: " << getMethod(); + return; + } + + for (auto it = hdrs_map.begin(); it != hdrs_map.end(); ++it) + { + if (isHttpHeaderLimitReached(it->first, it->second)) { + dbgTrace(D_WAAP_ULIMITS) << "[USER LIMITS] Http header limit exceeded"; + return; + } + detectSpecificHeader(it->first.c_str(), it->first.size(), + it->second.c_str(), it->second.size()); + } +} + +void Waf2Transaction::scanHeaders() +{ + m_processedHeaders = true; + + // Scan relevant headers for attacks + for (auto it = hdrs_map.begin(); it != hdrs_map.end(); ++it) + { + scanSpecificHeder(it->first.c_str(), it->first.size(), + it->second.c_str(), it->second.size()); + } +} + +void Waf2Transaction::set_uri(const char* uri) { + dbgTrace(D_WAAP) << "[transaction:" << this << "] set_uri('" << uri << "')"; + m_uriStr = uri; +} + +void Waf2Transaction::set_host(const char* host) { + dbgTrace(D_WAAP) << "[transaction:" << this << "] set_host('" << host << "')"; + m_hostStr = host; +} + +void Waf2Transaction::start_request_hdrs() { + dbgTrace(D_WAAP) << "[transaction:" << this << "] start_request_hdrs"; + // Clear all things that will be filled by the incoming request headers that will follow + m_contentType = Waap::Util::CONTENT_TYPE_UNKNOWN; + m_requestBodyParser = NULL; +} + +void Waf2Transaction::add_request_hdr(const char* name, int name_len, const char* value, int value_len) { + dbgTrace(D_WAAP) << "[transaction:" << this << "] add_request_hdr(name='" << std::string(name, name_len) << + "', value='" << std::string(value, value_len) << "')"; + std::string header_name(name, name_len); + boost::algorithm::to_lower(header_name); + hdrs_map[header_name] = std::string(value, value_len); +} + +void Waf2Transaction::end_request_hdrs() { + + dbgFlow(D_WAAP) << "[transaction:" << this << "] end_request_hdrs"; + m_isScanningRequired = setCurrentAssetContext(); + if (m_siteConfig != NULL) + { + // getOverrideState also extracts the source identifier and populates m_source_identifier + // but the State itself is not needed now + Waap::Override::State overrideState = getOverrideState(m_siteConfig); + } + IdentifiersEvent ids(m_source_identifier, m_pWaapAssetState->m_assetId); + ids.notify(); + // Read relevant headers and extract meta information such as host name + // Do this before scanning the URL because scanning URL might require this information. + if (m_isScanningRequired) { + createUserLimitsState(); + detectHeaders(); + if (isUserLimitReached()) { + return; + } + } + // Scan URL and url query + if (m_isScanningRequired && !m_processedUri) { + processUri(m_uriStr.c_str(), "url"); + } + // Scan relevant headers for attacks + if (m_isScanningRequired && !m_processedHeaders) { + scanHeaders(); + } + + if(m_siteConfig != NULL) { + // Create rate limiting policy (lazy, on first request) + const std::shared_ptr rateLimitingPolicy = m_siteConfig->get_RateLimitingPolicy(); + if(rateLimitingPolicy && rateLimitingPolicy->getRateLimitingEnforcementStatus()) + { + if (m_pWaapAssetState->getRateLimitingState() == nullptr) + { + m_pWaapAssetState->createRateLimitingState(rateLimitingPolicy); + } + dbgTrace(D_WAAP) << "(Waf2Engine::end_request_hdrs): RateLimiting check starts."; + + // Get current clock time + I_TimeGet* timer = Singleton::Consume::by(); + + // The rate limiting state tracks rate limiting information for all sources + std::shared_ptr rateLimitingState = m_pWaapAssetState->getRateLimitingState(); + + std::chrono::seconds now = std::chrono::duration_cast(timer->getMonotonicTime()); + + bool logRateLimiting = false; + if (rateLimitingState && (rateLimitingState->execute + (m_source_identifier, m_uriPath, now, logRateLimiting) == false)) + { + dbgTrace(D_WAAP) << "(Waf2Engine::end_request_hdrs): RateLimiting decision: Block."; + // block request due to rate limiting + auto decision = m_waapDecision.getDecision(RATE_LIMITING_DECISION); + decision->setBlock(true); + decision->setLog(logRateLimiting); + } + } + else { + dbgTrace(D_WAAP) << "(Waf2Engine::end_request_hdrs): No rate limiting policy."; + } + } +} + +void Waf2Transaction::start_request_body() { + dbgTrace(D_WAAP) << "[transaction:" << this << "] start_request_body: m_contentType=" << m_contentType; + + clearRequestParserState(); + + m_requestBodyParser = new ParserRaw(m_deepParserReceiver, "body"); + + m_request_body_bytes_received = 0; + m_request_body.clear(); +} + +void Waf2Transaction::add_request_body_chunk(const char* data, int data_len) { + dbgTrace(D_WAAP) << "[transaction:" << this << "] add_request_body_chunk (" << data_len << " bytes): parser='" << + (m_requestBodyParser ? m_requestBodyParser->name() : "none") << "': '" << std::string(data, data_len) << "'"; + + if (isHttpBodyLimitReached(data_len)) { + dbgTrace(D_WAAP_ULIMITS) << "[USER LIMITS] Http body limit exceeded"; + return; + } + m_request_body_bytes_received += data_len; + size_t maxSizeToScan = m_request_body_bytes_received; + + if (m_siteConfig != NULL) + { + auto waapParams = m_siteConfig->get_WaapParametersPolicy(); + if (waapParams != nullptr) + { + std::string maxSizeToScanStr = waapParams->getParamVal("max_body_size", ""); + if (maxSizeToScanStr != "") + { + maxSizeToScan = std::stoul(maxSizeToScanStr.c_str()); + } + } + } + + if (m_isScanningRequired && m_request_body_bytes_received <= maxSizeToScan) + { + if (m_requestBodyParser != NULL) { + m_requestBodyParser->push(data, data_len); + if (isObjectDepthLimitReached(m_deepParser.getLocalMaxObjectDepth())) { + dbgTrace(D_WAAP_ULIMITS) << "[USER LIMITS] Object depth limit exceeded"; + return; + } + } + else { + dbgWarning(D_WAAP) << "[transaction:" << this << "] add_request_body_chunk (" << data_len << + " bytes): parser='NONE'. This is most probably a bug. " + "Some parser MUST be installed for this transaction!"; + } + } + + // Collect up to MAX_REQUEST_BODY_SIZE of input data for each request + if (m_request_body.length() + data_len <= MAX_REQUEST_BODY_SIZE) { + m_request_body.append(data, (size_t)data_len); + } + else if (m_request_body.length() < MAX_REQUEST_BODY_SIZE) { + size_t piece = MAX_REQUEST_BODY_SIZE - m_request_body.length(); + // Note: piece is guaranteed to be > data_len, so the write below is safe. + m_request_body.append(data, piece); + } +} + +void Waf2Transaction::end_request_body() { + dbgTrace(D_WAAP) << "[transaction:" << this << "] end_request_body"; + + if (m_requestBodyParser != NULL) { + m_requestBodyParser->finish(); + if (isObjectDepthLimitReached(m_deepParser.getLocalMaxObjectDepth())) { + dbgTrace(D_WAAP_ULIMITS) << "[USER LIMITS] Object depth limit exceeded"; + } + + if (m_contentType != Waap::Util::CONTENT_TYPE_UNKNOWN && m_request_body.length() > 0) { + m_deepParser.m_key.pop("body"); + } + } + + // Check and output [ERROR] message if keyStack is not empty (it should be empty here). + if (!m_deepParser.m_key.empty()) { + dbgWarning(D_WAAP) << "[transaction:" << this << "] end_request_body: parser='" << + (m_requestBodyParser ? m_requestBodyParser->name() : "") << + "'. ERROR: m_key is not empty. full key='" << m_deepParser.m_key.c_str() << "'"; + } + + clearRequestParserState(); +} + +void Waf2Transaction::end_request() { + dbgTrace(D_WAAP) << "[transaction:" << this << "] end_request"; + clearRequestParserState(); + + // Enable response headers processing only if values parsed from request contained at least one URL + auto openRedirectPolicy = m_siteConfig ? m_siteConfig->get_OpenRedirectPolicy() : NULL; + if (openRedirectPolicy && openRedirectPolicy->enable && !m_openRedirectState.empty()) { + m_responseInspectReasons.setOpenRedirect(true); + } + + auto errorLimitingPolicy = m_siteConfig ? m_siteConfig->get_ErrorLimitingPolicy() : NULL; + if (errorLimitingPolicy && errorLimitingPolicy->getRateLimitingEnforcementStatus()) { + m_responseInspectReasons.setErrorLimiter(true); + } + + auto rateLimitingPolicy = m_siteConfig ? m_siteConfig->get_RateLimitingPolicy() : NULL; + if (rateLimitingPolicy && rateLimitingPolicy->getRateLimitingEnforcementStatus()) { + m_responseInspectReasons.setRateLimiting(true); + } + + auto securityHeadersPolicy = m_siteConfig ? m_siteConfig->get_SecurityHeadersPolicy() : NULL; + if (securityHeadersPolicy && securityHeadersPolicy->m_securityHeaders.enable) { + m_responseInjectReasons.setSecurityHeaders(true); + if (m_pWaapAssetState->getSecurityHeadersState() == nullptr) + { + m_pWaapAssetState->createSecurityHeadersState(securityHeadersPolicy); + } + dbgTrace(D_WAAP) << "(Waf2Engine::end_request): Security Headers State was created"; + } + + // Enable response headers processing if response scanning is enabled in policy + auto errorDisclosurePolicy = m_siteConfig ? m_siteConfig->get_ErrorDisclosurePolicy() : NULL; + m_responseInspectReasons.setErrorDisclosure(errorDisclosurePolicy && errorDisclosurePolicy->enable); +} + +void Waf2Transaction::extractEnvSourceIdentifier() +{ + auto env = Singleton::Consume::by(); + auto env_source_identifiers = env->get("sourceIdentifiers"); + if (!env_source_identifiers.ok() || env_source_identifiers.unpack().empty()) { + dbgInfo(D_WAAP) << "Could not extract source identifier from the environment"; + return; + } + + // Take the first source identifier in set provided by the environment + dbgTrace(D_WAAP) << "Set source identifier from the Environment"; + m_source_identifier = *(env_source_identifiers); +} + +void Waf2Transaction::finish() { + dbgTrace(D_WAAP) << "[transaction:" << this << "] finish"; + clearRequestParserState(); +} + +void Waf2Transaction::set_ignoreScore(bool ignoreScore) { + m_ignoreScore = ignoreScore; +} + +void +Waf2Transaction::decide( + bool& bForceBlock, + bool& bForceException, + int mode) +{ + dbgTrace(D_WAAP) << "[transaction:" << this << "] decide (m_scanResult=" << m_scanResult << ")..."; + + int bSendResponse = false; + + // If WAF stage1 found suspicious request - send it to stage2 and wait for decision. + if (m_scanResult) { + bSendResponse = true; + } + + // If mode == 2 - don't send all traffic to stage2 (it won't be logged) + if (mode == 2) { + bSendResponse = false; + } + + // Normalize URL + std::string normalizedUri = normalize_uri(m_uriStr); + + std::string json = buildWaapResultJson( + m_scanResult, + *this, + bSendResponse, + normalizedUri, + m_uriStr, + bForceBlock, + bForceException + ); + m_waapDecision.setJson(json); +} + +bool +Waf2Transaction::isHtmlType(const char* data, int data_len){ + if(m_uriPath.find(".js") != std::string::npos || m_uriPath.find(".css") != std::string::npos) + { + dbgTrace(D_WAAP) << "Waf2Transaction::isHtmlType: false"; + return false; + } + std::string body(data); + if(!m_pWaapAssetState->getSignatures()->html_regex.hasMatch(body)) + { + dbgTrace(D_WAAP) << "Waf2Transaction::isHtmlType: false"; + return false; + } + dbgTrace(D_WAAP) << "Waf2Transaction::isHtmlType: true"; + return true; +} + +// Search for html tag - return true if found and update the injection correct position. +bool +Waf2Transaction::findHtmlTagToInject(const char* data, int data_len, int& pos) +{ + bool headFound = false; + static const char tag[] = ""; + static size_t tagSize = sizeof(tag) - 1; + + // Searching tag by iterating over data and always check last 6 bytes against the required tag. + for (pos = 0; pos= tagSize) { + m_tagHistPos = 0; + } + // check + bool tagMatches = true; + size_t tagHistPosCheck = m_tagHistPos; + for (size_t i=0; i < tagSize; ++i) { + if (tag[i] != ::tolower(m_tagHist[tagHistPosCheck])) { + tagMatches = false; + break; + } + tagHistPosCheck++; + if (tagHistPosCheck >= tagSize) { + tagHistPosCheck = 0; + } + } + if (tagMatches) { + headFound = true; + } + } + + if(!headFound) + { + return false; + } + + return true; +} + +void +Waf2Transaction::completeInjectionResponseBody(std::string& strInjection) +{ + if (m_responseInjectReasons.shouldInjectAntibot()) { + dbgTrace(D_WAAP_BOT_PROTECTION) << + "Waf2Transaction::completeInjectionResponseBody(): Injecting data (antiBot)"; + strInjection += ""; + // No need to inject more than once + m_responseInjectReasons.setAntibot(false); + } + + if (m_responseInjectReasons.shouldInjectCsrf()) { + dbgTrace(D_WAAP) << "Waf2Transaction::completeInjectionResponseBody(): Injecting data (csrf)"; + strInjection += ""; + // No need to inject more than once + m_responseInjectReasons.setCsrf(false); + } +} + +void +Waf2Transaction::handleSecurityHeadersInjection(std::vector>& injectHeaderStrs){ + auto securityHeadersPolicy = m_siteConfig ? m_siteConfig->get_SecurityHeadersPolicy() : NULL; + if (securityHeadersPolicy) { + if (!securityHeadersPolicy->m_securityHeaders.enable) { + dbgTrace(D_WAAP) << + "(Waf2Engine::handleSecurityHeadersInjection): Security Headers Disabled"; + } + else if (m_pWaapAssetState->getSecurityHeadersState() == nullptr) { + dbgDebug(D_WAAP) << + "(Waf2Engine::handleSecurityHeadersInjection): Security Headers State was not created as expected"; + } + else { + injectHeaderStrs = m_pWaapAssetState->getSecurityHeadersState()->headersInjectStrs; + } + } +} + +bool Waf2Transaction::shouldInjectCSRF() +{ + return m_responseInjectReasons.shouldInjectCsrf(); +} + +void Waf2Transaction::disableShouldInjectSecurityHeaders() { + m_responseInjectReasons.setSecurityHeaders(false); +} + +bool Waf2Transaction::shouldInjectSecurityHeaders() +{ + return m_responseInjectReasons.shouldInjectSecurityHeaders(); +} + +void +Waf2Transaction::checkShouldInject() +{ + dbgTrace(D_WAAP) << "Waf2Transaction::checkShouldInject(): starts"; + std::string uri = m_uriPath; + std::string low_method = m_methodStr; + std::transform(low_method.begin(), low_method.end(), low_method.begin(), ::tolower); + + auto csrfPolicy = m_siteConfig ? m_siteConfig->get_CsrfPolicy() : NULL; + bool csrf = false; + dbgTrace(D_WAAP) << "Waf2Transaction::checkShouldInject(): received the relevant Application configuration " + "from the I/S"; + if (csrfPolicy && csrfPolicy->enable) { + csrf = true; + } + else + { + dbgTrace(D_WAAP) << "Waf2Transaction::checkShouldInject(): Should not inject CSRF scripts."; + } + + if(csrf) { + dbgTrace(D_WAAP) << "Waf2Transaction::checkShouldInject(): Should inject CSRF script"; + m_responseInjectReasons.setCsrf(true); + } + return; +} + +bool +Waf2Transaction::decideAfterHeaders() +{ + dbgFlow(D_WAAP) << "Waf2Transaction::decideAfterHeaders()"; + + WaapConfigAPI ngenAPIConfig; + WaapConfigApplication ngenSiteConfig; + IWaapConfig *sitePolicy = NULL; // will be NULL or point to either API or SITE config. + + if (WaapConfigAPI::getWaapAPIConfig(ngenAPIConfig)) { + dbgTrace(D_WAAP) << "Waf2Transaction::decideAfterHeaders(): got relevant API configuration from the I/S"; + sitePolicy = &ngenAPIConfig; + } + else if (WaapConfigApplication::getWaapSiteConfig(ngenSiteConfig)) { + dbgTrace(D_WAAP) << + "Waf2Transaction::decideAfterHeaders(): got relevant Application configuration from the I/S"; + sitePolicy = &ngenSiteConfig; + } + + if (!sitePolicy) { + dbgTrace(D_WAAP) << "Waf2Transaaction::decideAfterHeaders(): no policy - do not block"; + return false; + } + + m_overrideState = getOverrideState(sitePolicy); + + // Select scores pool by location (but use forced pool when forced) + std::string realPoolName = + (m_scanResult) ? + Waap::Scores::getScorePoolNameByLocation(m_scanResult->location) : + KEYWORDS_SCORE_POOL_BASE; + + // Autonomus Security + AnalysisResult analysisResult; + bool shouldBlock = decideAutonomousSecurity( + *sitePolicy, + 1, + true, + analysisResult, + realPoolName, + UNKNOWN_TYPE + ); + + return finalizeDecision(sitePolicy, shouldBlock); +} + +// Note: the only user of the transactionResult structure filled by this method is waap_automation. +// TODO: Consider removing this parameter (and provide access to this information by other means) +int +Waf2Transaction::decideFinal( + int mode, + AnalysisResult &transactionResult, + const std::string &poolName, + PolicyCounterType fpClassification) +{ + dbgFlow(D_WAAP) << "Waf2Transaction::decideFinal(): starts"; + + // Select scores pool by location (but use forced pool when forced) + std::string realPoolName = + (poolName == KEYWORDS_SCORE_POOL_BASE && m_scanResult) ? + Waap::Scores::getScorePoolNameByLocation(m_scanResult->location) : + poolName; + + // decision of (either) API or Application module + bool shouldBlock = false; + + // TODO:: base class for both, with common inteface + WaapConfigAPI ngenAPIConfig; + WaapConfigApplication ngenSiteConfig; + IWaapConfig *sitePolicy = NULL; // will be NULL or point to either API or SITE config. + + // API config is more specific, hence if it exists it overrides anything from WaapConfigApplication + if (WaapConfigAPI::getWaapAPIConfig(ngenAPIConfig)) { + dbgTrace(D_WAAP) << "Waf2Transaction::decideFinal(): got relevant API configuration from the I/S"; + sitePolicy = &ngenAPIConfig; + m_overrideState = getOverrideState(sitePolicy); + + // User limits + shouldBlock = (getUserLimitVerdict() == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP); + } + else if (WaapConfigApplication::getWaapSiteConfig(ngenSiteConfig)) { + dbgTrace(D_WAAP) << "Waf2Transaction::decideFinal(): got relevant Application configuration from the I/S"; + sitePolicy = &ngenSiteConfig; + m_overrideState = getOverrideState(sitePolicy); + + // Autonomus Security + shouldBlock = decideAutonomousSecurity( + *sitePolicy, + mode, + false, + transactionResult, + realPoolName, + fpClassification + ); + // CSRF Protection + auto csrfPolicy = m_siteConfig ? m_siteConfig->get_CsrfPolicy() : nullptr; + if(csrfPolicy && csrfPolicy->enable) { + shouldBlock |= m_csrfState.decide(m_methodStr, m_waapDecision, csrfPolicy); + } + // User limits + shouldBlock |= (getUserLimitVerdict() == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP); + } + + if (mode == 2) { + decide( + m_overrideState.bForceBlock, + m_overrideState.bForceException, + mode + ); + shouldBlock = isSuspicious(); + } + + return finalizeDecision(sitePolicy, shouldBlock); +} + +int +Waf2Transaction::finalizeDecision(IWaapConfig *sitePolicy, bool shouldBlock) +{ + auto decision = std::dynamic_pointer_cast( + m_waapDecision.getDecision(AUTONOMOUS_SECURITY_DECISION)); + // Send log + if (sitePolicy) + { + // auto reject should have default threat level info and above + if (m_overrideState.bForceBlock && decision->getThreatLevel() == ThreatLevel::NO_THREAT) + { + decision->setThreatLevel(ThreatLevel::THREAT_INFO); + } + } + + if (m_overrideState.bForceBlock) { + dbgTrace(D_WAAP) << "Waf2Transaction::finalizeDecision(): setting shouldBlock to true due to override"; + shouldBlock = true; // BLOCK + } + else if (m_overrideState.bForceException) { + dbgTrace(D_WAAP) << "Waf2Transaction::finalizeDecision(): setting shouldBlock to false due to override"; + shouldBlock = false; // PASS + } + + if (m_siteConfig) { + const std::shared_ptr triggerPolicy = m_siteConfig->get_TriggerPolicy(); + if (triggerPolicy) { + const std::shared_ptr triggerLog = getTriggerLog(triggerPolicy); + if (triggerLog && shouldSendExtendedLog(triggerLog)) + { + m_responseInspectReasons.setCollectResponseForLog(true); + } + } + } + + dbgTrace(D_WAAP) << "Waf2Transaction::finalizeDecision(): returning shouldBlock: " << shouldBlock; + return shouldBlock; +} + +void Waf2Transaction::appendCommonLogFields(LogGen& waapLog, + const std::shared_ptr &triggerLog, + bool shouldBlock, + const std::string& logOverride, + const std::string& incidentType) const +{ + auto env = Singleton::Consume::by(); + auto proxy_ip = env->get(HttpTransactionData::proxy_ip_ctx); + if (proxy_ip.ok() && m_remote_addr != proxy_ip.unpack()) + { + waapLog << LogField("proxyIP", static_cast(proxy_ip.unpack())); + } + waapLog << LogField("sourceIP", m_remote_addr); + waapLog << LogField("httpSourceId", m_source_identifier); + waapLog << LogField("sourcePort", m_remote_port); + waapLog << LogField("httpHostName", m_hostStr); + waapLog << LogField("httpMethod", m_methodStr); + const auto& autonomousSecurityDecision = std::dynamic_pointer_cast( + m_waapDecision.getDecision(AUTONOMOUS_SECURITY_DECISION)); + bool send_extended_log = shouldSendExtendedLog(triggerLog); + if (send_extended_log || triggerLog->webUrlPath || autonomousSecurityDecision->getOverridesLog()) { + std::string httpUriPath = m_uriPath; + + if (httpUriPath.length() > MAX_LOG_FIELD_SIZE) + { + httpUriPath.resize(MAX_LOG_FIELD_SIZE); + } + + waapLog << LogField("httpUriPath", httpUriPath, LogFieldOption::XORANDB64); + } + if (send_extended_log || triggerLog->webUrlQuery || autonomousSecurityDecision->getOverridesLog()) { + std::string uriQuery = m_uriQuery; + if (uriQuery.length() > MAX_LOG_FIELD_SIZE) + { + uriQuery.resize(MAX_LOG_FIELD_SIZE); + } + waapLog << LogField("httpUriQuery", uriQuery, LogFieldOption::XORANDB64); + } + if (send_extended_log || triggerLog->webHeaders || autonomousSecurityDecision->getOverridesLog()) { + waapLog << LogField("httpRequestHeaders", logHeadersStr(), LogFieldOption::XORANDB64); + } + // Log http response code if it is known + if (m_responseStatus != 0 && send_extended_log && triggerLog->responseCode) { + waapLog << LogField("httpResponseCode", std::to_string(m_responseStatus)); + } + + // Count of bytes available to send to the log + std::string requestBodyToLog = (send_extended_log || triggerLog->webBody) ? + m_request_body : std::string(); + std::string responseBodyToLog = m_response_body; + if (!shouldBlock && responseBodyToLog.empty()) + { + responseBodyToLog = ""; + } + + if (!requestBodyToLog.empty()) { + size_t requestBodyMaxSize = MAX_LOG_FIELD_SIZE - std::min(MIN_RESP_BODY_LOG_FIELD_SIZE, + responseBodyToLog.size()); + // Limit request body log field size + if (requestBodyToLog.length() > requestBodyMaxSize) + { + requestBodyToLog.resize(requestBodyMaxSize); + } + } + + if (!m_response_body.empty()) { + size_t responseBodyMaxSize = MAX_LOG_FIELD_SIZE - requestBodyToLog.size(); + // Limit response body log field size + if (responseBodyToLog.length() > responseBodyMaxSize) + { + responseBodyToLog.resize(responseBodyMaxSize); + } + } + + if (!requestBodyToLog.empty()) + { + waapLog << LogField("httpRequestBody", requestBodyToLog, LogFieldOption::XORANDB64); + } + + if (!responseBodyToLog.empty() && send_extended_log && triggerLog->responseBody) + { + waapLog << LogField("httpResponseBody", responseBodyToLog, LogFieldOption::XORANDB64); + } + + waapLog << LogField("ruleId", m_siteConfig->get_RuleId()); + waapLog << LogField("securityAction", shouldBlock ? "Prevent" : "Detect"); + waapLog << LogField("waapOverride", logOverride); + waapLog << LogField("practiceType", "Threat Prevention"); + waapLog << LogField("practiceSubType", m_siteConfig->get_PracticeSubType()); + waapLog << LogField("ruleName", m_siteConfig->get_RuleName()); + waapLog << LogField("practiceId", m_siteConfig->get_PracticeId()); + waapLog << LogField("practiceName", m_siteConfig->get_PracticeName()); + waapLog << LogField("waapIncidentType", incidentType); + + // Registering this value would append the list of matched override IDs to the unified log + if (!m_matchedOverrideIds.empty()) { + // Convert set to vector and send to log as a list + std::vector vOverrideIds(m_matchedOverrideIds.size()); + std::copy(m_matchedOverrideIds.begin(), m_matchedOverrideIds.end(), vOverrideIds.begin()); + waapLog.addToOrigin(LogField("exceptionIdList", vOverrideIds)); + } +} + +void +Waf2Transaction::sendLog() +{ + dbgFlow(D_WAAP); + m_waapDecision.orderDecisions(); + if (m_siteConfig == NULL) { + dbgWarning(D_WAAP) << + "Waf2Transaction::sendLog: no site policy associated with transaction - not sending a log"; + return; + } + std::string attackTypes = buildAttackTypes(); + std::string logOverride = "None"; + DecisionTelemetryData telemetryData; + std::string assetId = m_siteConfig->get_AssetId(); + const auto& autonomousSecurityDecision = std::dynamic_pointer_cast( + m_waapDecision.getDecision(AUTONOMOUS_SECURITY_DECISION)); + + telemetryData.source = getSourceIdentifier(); + telemetryData.assetName = m_siteConfig->get_AssetName(); + telemetryData.practiceId = m_siteConfig->get_PracticeId(); + telemetryData.practiceName = m_siteConfig->get_PracticeName(); + if (m_scanResult) { + telemetryData.attackTypes = m_scanResult->attack_types; + } + telemetryData.threat = autonomousSecurityDecision->getThreatLevel(); + if (m_overrideState.bForceBlock) { + telemetryData.blockType = FORCE_BLOCK; + } + else if (m_overrideState.bForceException) { + telemetryData.blockType = FORCE_EXCEPTION; + } + else if (m_waapDecision.getDecision(USER_LIMITS_DECISION)->shouldBlock()) { + telemetryData.blockType = LIMIT_BLOCK; + } + else if (autonomousSecurityDecision->shouldBlock()) { + telemetryData.blockType = WAF_BLOCK; + } + else if (m_waapDecision.getDecision(CSRF_DECISION)->shouldBlock()) { + telemetryData.blockType = CSRF_BLOCK; + } + else { + telemetryData.blockType = NOT_BLOCKING; + } + + WaapTelemetryEvent(assetId, telemetryData).notify(); + + if (m_overrideState.bIgnoreLog) { + dbgTrace(D_WAAP) << "Waf2Transaction::sendLog: override is to ignore log - not sending a log"; + return; + } + + bool shouldBlock = false; + if (m_overrideState.bForceBlock) { + // If override forces "reject" decision, mention it in the "override" log field. + logOverride = OVERRIDE_DROP; + shouldBlock = true; + } + else if (m_overrideState.bForceException) { + // If override forces "allow" decision, mention it in the "override" log field. + logOverride = OVERRIDE_ACCEPT; + } else if (m_scanner.getIgnoreOverride()) { + logOverride = OVERRIDE_IGNORE; + } + + // Get triggers + const std::shared_ptr triggerPolicy = m_siteConfig->get_TriggerPolicy(); + + if (!triggerPolicy || triggerPolicy->triggers.empty()) { + dbgTrace(D_WAAP) << "Waf2Transaction::sendLog: found no triggers (or triggers are absent) - not sending a log"; + return; + } + + const std::shared_ptr triggerLog = getTriggerLog(triggerPolicy); + + // If there were no triggers of type Log - do not send log + if (!triggerLog) { + dbgTrace(D_WAAP) << "Waf2Transaction::sendLog: found no triggers of type 'Log' - not sending a log"; + return; + } + + static int cur_grace_logs = 0; + bool grace_period = is_hybrid_mode && cur_grace_logs < max_grace_logs; + bool send_extended_log = grace_period || shouldSendExtendedLog(triggerLog); + if (grace_period) { + dbgTrace(D_WAAP) + << "Waf2Transaction::sendLog: current grace log index: " + << cur_grace_logs + 1 + << " out of " + << max_grace_logs; + } + + shouldBlock |= m_waapDecision.getShouldBlockFromHighestPriorityDecision(); + // Do not send Detect log if trigger disallows it + if (!send_extended_log && shouldBlock == false && !triggerLog->tpDetect && + !autonomousSecurityDecision->getOverridesLog()) + { + dbgTrace(D_WAAP) << "Waf2Transaction::sendLog: not sending Detect log (triggers)"; + return; + } + + // Do not send Prevent log if trigger disallows it + if (!send_extended_log && shouldBlock == true && !triggerLog->tpPrevent && + !autonomousSecurityDecision->getOverridesLog()) + { + dbgTrace(D_WAAP) << "Waf2Transaction::sendLog: not sending Prevent log (triggers)"; + return; + } + + // In case no decision to block or log - send log if extend log or override + if (!m_waapDecision.anyDecisionsToLogOrBlock()) + { + if (send_extended_log || autonomousSecurityDecision->getOverridesLog()) + { + sendAutonomousSecurityLog(triggerLog, shouldBlock, logOverride, attackTypes); + dbgTrace(D_WAAP) << "Waf2Transaction::sendLog()::" << + "sending autonomous security log due to either extended log or an override"; + } + else + { + dbgTrace(D_WAAP) << "Waf2Transaction::sendLog: no decision to log"; + } + return; + } + + DecisionType decision_type = m_waapDecision.getHighestPriorityDecisionToLog(); + if (decision_type == DecisionType::NO_WAAP_DECISION) { + if (send_extended_log || autonomousSecurityDecision->getOverridesLog()) { + sendAutonomousSecurityLog(triggerLog, shouldBlock, logOverride, attackTypes); + if (grace_period) { + dbgTrace(D_WAAP) + << "Waf2Transaction::sendLog: Sending log in grace period. Log " + << ++cur_grace_logs + << "out of " + << max_grace_logs; + } + } + dbgTrace(D_WAAP) << "Waf2Transaction::sendLog: decisions marked for block only"; + return; + } + + auto maybeLogTriggerConf = getConfiguration("rulebase", "log"); + switch (decision_type) + { + case USER_LIMITS_DECISION: { + std::string incidentDetails; + std::string incidentType; + if (isIllegalMethodViolation()) { + incidentDetails += "Http method received: "; + incidentDetails += getMethod(); + incidentType += "Illegal http method violation"; + } + else { + auto strData = getViolatedUserLimitStrData(); + incidentDetails += "Http request "; + incidentDetails += strData.type; + incidentDetails += " ("; + incidentDetails += strData.policy; + incidentDetails += ")"; + incidentType += "Http limit violation"; + } + + LogGenWrapper logGenWrapper( + maybeLogTriggerConf, + "Web Request", + ReportIS::Audience::SECURITY, + LogTriggerConf::SecurityType::ThreatPrevention, + Severity::HIGH, + Priority::HIGH, + shouldBlock); + + LogGen& waap_log = logGenWrapper.getLogGen(); + appendCommonLogFields(waap_log, triggerLog, shouldBlock, logOverride, incidentType); + waap_log << LogField("waapIncidentDetails", incidentDetails); + waap_log << LogField("eventConfidence", "High"); + if (grace_period) { + dbgTrace(D_WAAP) + << "Waf2Transaction::sendLog: Sending log in grace period. Log " + << ++cur_grace_logs + << "out of " + << max_grace_logs; + } + break; + } + case OPEN_REDIRECT_DECISION: + case ERROR_LIMITING_DECISION: + case RATE_LIMITING_DECISION: + case ERROR_DISCLOSURE_DECISION: { + LogGenWrapper logGenWrapper( + maybeLogTriggerConf, + "API Request", + ReportIS::Audience::SECURITY, + LogTriggerConf::SecurityType::ThreatPrevention, + Severity::CRITICAL, + Priority::HIGH, + shouldBlock); + + LogGen& waap_log = logGenWrapper.getLogGen(); + waap_log << LogField("eventConfidence", "Very High"); + + std::string incidentDetails; + std::string incidentType; + m_waapDecision.getIncidentLogFields( + std::to_string(m_responseStatus), + incidentDetails, + incidentType + ); + + if (decision_type == ERROR_DISCLOSURE_DECISION) { + waap_log << LogField("waapFoundIndicators", getKeywordMatchesStr(), LogFieldOption::XORANDB64); + } + + appendCommonLogFields(waap_log, triggerLog, shouldBlock, logOverride, incidentType); + + waap_log << LogField("waapIncidentDetails", incidentDetails); + if (grace_period) { + dbgTrace(D_WAAP) + << "Waf2Transaction::sendLog: Sending log in grace period. Log " + << ++cur_grace_logs + << "out of " + << max_grace_logs; + } + break; + } + case CSRF_DECISION: { + LogGenWrapper logGenWrapper( + maybeLogTriggerConf, + "CSRF Protection", + ReportIS::Audience::SECURITY, + LogTriggerConf::SecurityType::ThreatPrevention, + Severity::CRITICAL, + Priority::HIGH, + shouldBlock); + + LogGen& waap_log = logGenWrapper.getLogGen(); + appendCommonLogFields(waap_log, triggerLog, shouldBlock, logOverride, "Cross Site Request Forgery"); + waap_log << LogField("waapIncidentDetails", "CSRF Attack discovered."); + if (grace_period) { + dbgTrace(D_WAAP) + << "Waf2Transaction::sendLog: Sending log in grace period. Log " + << ++cur_grace_logs + << "out of " + << max_grace_logs; + } + break; + } + case AUTONOMOUS_SECURITY_DECISION: { + if (triggerLog->webRequests || + send_extended_log || + autonomousSecurityDecision->getThreatLevel() != ThreatLevel::NO_THREAT || + autonomousSecurityDecision->getOverridesLog()) { + sendAutonomousSecurityLog(triggerLog, shouldBlock, logOverride, attackTypes); + if (grace_period) { + dbgTrace(D_WAAP) + << "Waf2Transaction::sendLog: Sending log in grace period. Log " + << ++cur_grace_logs + << "out of " + << max_grace_logs; + } + } + break; + } + default: + static_assert(true, "Illegal DecisionType enum value"); + break; + } // end switch +} + +bool +Waf2Transaction::decideAutonomousSecurity( + const IWaapConfig &sitePolicy, + int mode, + bool afterHeaders, + AnalysisResult &transactionResult, + const std::string &poolName, + PolicyCounterType fpClassification) +{ + dbgFlow(D_WAAP) << + "Waf2Transaction::decideAutonomousSecurity(): " << + "mode=" << mode << + ", afterHeaders=" << afterHeaders << + ", poolName='" << poolName << "'"; + + if (mode == 2) + { + return isSuspicious(); + } + + if (!sitePolicy.get_WebAttackMitigation()) { + // Web security not enabled + dbgTrace(D_WAAP) << "Autonomous security is not enabled in policy."; + return false; + } + + std::shared_ptr decision = std::dynamic_pointer_cast( + m_waapDecision.getDecision(AUTONOMOUS_SECURITY_DECISION)); + + // Do not call stage2 so it doesn't learn from exceptions. + // Also do not call stage2 for attacks found in parameter name + if (!m_overrideState.bForceException && !(m_scanResult && m_scanResult->m_isAttackInParam)) { + if (!m_processedUri) { + dbgWarning(D_WAAP) << "decideAutonomousSecurity(): processing URI although is was supposed " + "to be processed earlier ..."; + processUri(m_uriStr.c_str(), "url"); + } + + if (!m_processedHeaders) { + dbgWarning(D_WAAP) << "decideAutonomousSecurity(): processing Headers although is was supposed " + "to be processed earlier ..."; + scanHeaders(); + } + + dbgTrace(D_WAAP) << "decideAutonomousSecurity(): processing stage2 for final decision ..."; + + // Call stage2 + transactionResult = + Singleton::Consume::by()->analyzeData(this, &sitePolicy); + + decision->setThreatLevel(transactionResult.threatLevel); + + decision->setBlock(transactionResult.shouldBlock); + + // Once these are known - fill the values to be included in the log + decision->setRelativeReputation(transactionResult.d2Analysis.relativeReputation); + decision->setFpMitigationScore(transactionResult.d2Analysis.fpMitigationScore); + decision->setFinalScore(transactionResult.d2Analysis.finalScore); + decision->setRelativeReputationMean(transactionResult.d2Analysis.reputationMean); + decision->setVariance(transactionResult.d2Analysis.variance); + + dbgTrace(D_WAAP) << "decideAutonomousSecurity(): stage2 decision is: " << + decision->shouldBlock() << "; threatLevel: " << decision->getThreatLevel() << + "; blockingLevel: " << static_cast::type>( + sitePolicy.get_BlockingLevel()); + + if (!afterHeaders || decision->shouldBlock()) { + ScoreBuilderData sbData; + + sbData.m_fpClassification = transactionResult.d2Analysis.fpClassification; + sbData.m_sourceIdentifier = getSourceIdentifier(); + sbData.m_keywordsCombinations = getKeywordsCombinations(); + sbData.m_keywordsMatches = getKeywordMatches(); + sbData.m_userAgent = getUserAgent(); + sbData.m_sample = getSample(); + sbData.m_relativeReputation = transactionResult.d2Analysis.relativeReputation; + + if (fpClassification != UNKNOWN_TYPE) { + sbData.m_fpClassification = fpClassification; + } + + learnScore(sbData, poolName); + } + } + + // Fill attack details for attacks found in parameter names + if (!m_overrideState.bForceException && m_scanResult && m_scanResult->m_isAttackInParam) { + // Since stage2 learning doesn't run in this case, assume stage1 score is the final score + float finalScore = m_scanResult->score; + ThreatLevel threat = Waap::Conversions::convertFinalScoreToThreatLevel(finalScore); + bool shouldBlock = Waap::Conversions::shouldDoWafBlocking(&sitePolicy, threat); + + dbgTrace(D_WAAP) << "attack_in_param without stage2 analysis: final score: " << finalScore << + ", threat level: " << threat << "\nWAF2 decision to block: " << + (shouldBlock ? "block" : "pass"); + + decision->setFinalScore(finalScore); + decision->setThreatLevel(threat); + decision->setBlock(shouldBlock); + + // Fill transactionResult + transactionResult.d2Analysis.finalScore = finalScore; + transactionResult.shouldBlock = shouldBlock; + transactionResult.threatLevel = threat; + } + + // Apply overrides + if (m_overrideState.bForceBlock) { + dbgTrace(D_WAAP) << "decideAutonomousSecurity(): decision was " << decision->shouldBlock() << + " and override forces REJECT ..."; + decision->setBlock(true); + if (!m_overrideState.bIgnoreLog) + { + decision->setOverridesLog(true); + } + } + else if (m_overrideState.bForceException) { + dbgTrace(D_WAAP) << "decideAutonomousSecurity(): decision was " << decision->shouldBlock() << + " and override forces ALLOW ..."; + decision->setBlock(false); + if (!m_overrideState.bIgnoreLog) + { + decision->setOverridesLog(true); + } + } + + if(decision->getThreatLevel() <= ThreatLevel::THREAT_INFO) { + decision->setLog(false); + } else { + decision->setLog(true); + } + + return decision->shouldBlock(); +} + +void Waf2Transaction::handleCsrfHeaderInjection(std::string& injectStr) +{ + m_csrfState.injectCookieHeader(injectStr); +} + +// Disables response injection (masking any pending injection reasons such as from antibot or csrf) +void +Waf2Transaction::clearAllInjectionReasons() { + m_responseInjectReasons.clear(); +} + +// Returns true if WAAP engine is interested in receiving more information about response for this transaction +bool Waf2Transaction::shouldInspectResponse() +{ + return m_responseInspectReasons.shouldInspect() || m_responseInjectReasons.shouldInject(); +} +bool Waf2Transaction::shouldInjectResponse() +{ + return m_responseInjectReasons.shouldInject(); +} + +bool Waf2Transaction::decideResponse() +{ + dbgTrace(D_WAAP) << "Waf2Transaction::decideResponse()"; + + if(m_waapDecision.getDecision(ERROR_LIMITING_DECISION)->shouldBlock()) { + return false; // block + } + if(m_waapDecision.getDecision(RATE_LIMITING_DECISION)->shouldBlock()) { + return false; // block + } + + bool openRedirectBlock = m_waapDecision.getDecision(OPEN_REDIRECT_DECISION)->shouldBlock(); + bool errorDisclosureBlock = m_waapDecision.getDecision(ERROR_DISCLOSURE_DECISION)->shouldBlock(); + if (openRedirectBlock || errorDisclosureBlock) { + dbgTrace(D_WAAP) << "Waf2Transaction::decideResponse(): blocking due to" << + " OpenRedirect:" << openRedirectBlock << + " ErrorDisclosure:" << errorDisclosureBlock; + return false; // block + } + + if (m_siteConfig) { + const std::shared_ptr triggerPolicy = m_siteConfig->get_TriggerPolicy(); + if (!triggerPolicy) { + dbgTrace(D_WAAP) << "Trigger policy was not found. Returning true (accept)"; + return true; // accept + } + + const std::shared_ptr triggerLog = getTriggerLog(triggerPolicy); + if (!triggerLog) { + dbgTrace(D_WAAP) << "Log trigger configuration was not found. Returning true (accept)"; + return true; // accept + } + + auto env = Singleton::Consume::by(); + auto http_chunk_type = env->get("HTTP Chunk type"); + bool should_send_extended_log = shouldSendExtendedLog(triggerLog) && http_chunk_type.ok(); + if (should_send_extended_log && + *http_chunk_type == ngx_http_chunk_type_e::RESPONSE_CODE && + !triggerLog->responseBody + ) { + should_send_extended_log = false; + } else if (should_send_extended_log && + *http_chunk_type == ngx_http_chunk_type_e::REQUEST_END && + !triggerLog->responseCode && + !triggerLog->responseBody + ) { + should_send_extended_log = false; + } + + dbgTrace(D_WAAP) + << "Setting flag for collection of respond content logging to: " + << (should_send_extended_log ? "True": "False"); + m_responseInspectReasons.setCollectResponseForLog(should_send_extended_log); + + } + + dbgTrace(D_WAAP) << "Waf2Transaction::decideResponse: returns true (accept)"; + return true; // accept +} + +bool +Waf2Transaction::reportScanResult(const Waf2ScanResult &res) { + if (get_ignoreScore() || (res.score >= SCORE_THRESHOLD && + (m_scanResult == nullptr || res.score > m_scanResult->score))) + { + // Forget any previous scan result and replace with new + delete m_scanResult; + m_scanResult = new Waf2ScanResult(res); + return true; + } + + return false; +} + +bool +Waf2Transaction::shouldIgnoreOverride(const Waf2ScanResult &res) { + dbgTrace(D_WAAP) << "reading exceptions"; + + auto exceptions = getConfiguration("rulebase", "exception"); + if (!exceptions.ok()) return false; + + dbgTrace(D_WAAP) << "matching exceptions"; + + std::unordered_map> exceptions_dict; + + if (res.location != "referer") { + // collect param name + exceptions_dict["paramName"].insert(res.param_name); + exceptions_dict["paramName"].insert(IndicatorsFiltersManager::generateKey(res.location, res.param_name, this)); + + std::set param_name_set; + param_name_set.insert(res.param_name); + param_name_set.insert(IndicatorsFiltersManager::generateKey(res.location, res.param_name, this)); + + // collect param value + exceptions_dict["paramValue"].insert(res.unescaped_line); + + ScopedContext ctx; + ctx.registerValue("paramValue", res.unescaped_line); + ctx.registerValue>("paramName", param_name_set); + + // collect sourceip, sourceIdentifier, url + exceptions_dict["sourceIP"].insert(m_remote_addr); + exceptions_dict["sourceIdentifier"].insert(m_source_identifier); + exceptions_dict["url"].insert(getUriStr()); + exceptions_dict["hostName"].insert(m_hostStr); + + // calling behavior and check if there is a behavior that match to this specific param name. + auto behaviors = exceptions.unpack().getBehavior(exceptions_dict); + for (auto const &behavior : behaviors) { + if (behavior == action_ignore) { + dbgTrace(D_WAAP) << "matched exceptions for " << res.param_name << " should ignore."; + std::string overrideId = behavior.getId(); + if (!overrideId.empty()) { + m_matchedOverrideIds.insert(overrideId); + } + return true; + } + } + } + + return false; +} + +const std::string Waf2Transaction::buildAttackTypes() const +{ + typedef std::map>::const_iterator attack_types_iter; + if (m_scanResult) + { + for (const std::string ®ex_name : m_found_patterns) + { + attack_types_iter attack_types_for_regex = + m_pWaapAssetState->getSignatures()->m_attack_types.find(regex_name); + if (attack_types_for_regex != m_pWaapAssetState->getSignatures()->m_attack_types.end()) + { + for (const std::string &attack_type : attack_types_for_regex->second) + { + m_scanResult->attack_types.insert(attack_type); + } + } + else {m_scanResult->attack_types.insert("General");} + } + + if (Waap::Util::vectorStringContain(m_scanResult->keyword_matches, "xml_entity")) { + m_scanResult->attack_types.insert("XML External Entity"); + } + + if (Waap::Util::vectorStringContain(m_scanResult->keyword_matches, "url_instead_of_file")) { + m_scanResult->attack_types.insert("URL instead of file"); + } + + auto csrfDecision = m_waapDecision.getDecision(CSRF_DECISION); + if(csrfDecision && csrfDecision->shouldBlock()) { + m_scanResult->attack_types.insert("Cross Site Request Forgery"); + } + auto openRedirectDecision = m_waapDecision.getDecision(OPEN_REDIRECT_DECISION); + if (openRedirectDecision && openRedirectDecision->shouldBlock()) { + m_scanResult->attack_types.insert("Open Redirect"); + } + + if (m_scanResult->attack_types.find("General") != m_scanResult->attack_types.end() + && m_scanResult->attack_types.size() > 1) { + m_scanResult->attack_types.erase("General"); + } + return Waap::Util::setToString(m_scanResult->attack_types, false); + } + + return ""; +} + +void Waf2Transaction::collectFoundPatterns() +{ + if (m_scanResult) + { + for (const std::pair> &found_pattern : m_scanResult->found_patterns) + { + const std::string ®ex_name = found_pattern.first; // the regex name (key) + m_found_patterns.insert(regex_name); + } + } +} + +bool Waf2Transaction::shouldSendExtendedLog(const std::shared_ptr &trigger_log) const +{ + if (!trigger_log->extendLogging) + { + dbgTrace(D_WAAP) << "Should not send extended log. Extended log is disabled."; + return false; + } + + auto autonomousSecurityDecision = std::dynamic_pointer_cast( + m_waapDecision.getDecision(AUTONOMOUS_SECURITY_DECISION)); + ReportIS::Severity severity = Waap::Util::computeSeverityFromThreatLevel( + autonomousSecurityDecision->getThreatLevel()); + + if (trigger_log->extendLoggingMinSeverity == "Critical") + { + if (severity == ReportIS::Severity::CRITICAL) + { + dbgTrace(D_WAAP) << "Should send extended logging. Min Severity Critical. Severity: " << (int) severity; + return true; + } + dbgTrace(D_WAAP) << "Should not send extended logging. Min Severity Critical. Severity: " << (int) severity; + return false; + } + else if (trigger_log->extendLoggingMinSeverity == "High") + { + if (severity == ReportIS::Severity::CRITICAL || severity == ReportIS::Severity::HIGH) + { + dbgTrace(D_WAAP) << "Should send extended logging. Min Severity High. Severity: " << (int) severity; + return true; + } + dbgTrace(D_WAAP) << "Should not send extended logging. Min Severity High. Severity: " << (int) severity; + return false; + } + + dbgTrace(D_WAAP) << "Should not send extended logging. Min Severity: " << trigger_log->extendLoggingMinSeverity; + return false; +} diff --git a/components/security_apps/waap/waap_clib/Waf2Engine.h b/components/security_apps/waap/waap_clib/Waf2Engine.h new file mode 100755 index 0000000..cc3e16a --- /dev/null +++ b/components/security_apps/waap/waap_clib/Waf2Engine.h @@ -0,0 +1,352 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __WAF2_TRANSACTION_H__99e4201a +#define __WAF2_TRANSACTION_H__99e4201a + +#include "Csrf.h" +#include "UserLimitsPolicy.h" +#include "ParserBase.h" +#include "DeepParser.h" +#include "WaapAssetState.h" +#include "PatternMatcher.h" +#include "Waf2Util.h" +#include "WaapConfigApplication.h" +#include "WaapConfigApi.h" +#include "WaapDecision.h" +#include "DeepAnalyzer.h" +#include +#include +#include +#include +#include +#include +#include // uuid class +#include // uuid generators +#include +#include +#include "i_transaction.h" +#include "i_waap_telemetry.h" +#include "i_deepAnalyzer.h" +#include "table_opaque.h" +#include "WaapResponseInspectReasons.h" +#include "WaapResponseInjectReasons.h" +#include "WaapOpenRedirect.h" +#include "WaapOpenRedirectPolicy.h" +#include "WaapScanner.h" +#include "singleton.h" + +struct DecisionTelemetryData; +class Waf2Transaction; + +// Callback that is called upon completion of next sub transaction +typedef void(*subtransaction_cb_t)(Waf2Transaction* subTransaction, void *ctx); +#define OVERRIDE_ACCEPT "Accept" +#define OVERRIDE_DROP "Drop" + +class Waf2Transaction : + public IWaf2Transaction, + public TableOpaqueSerialize, + public Singleton::Consume, + private boost::noncopyable, + Singleton::Consume, + Singleton::Consume +{ +public: + Waf2Transaction(std::shared_ptr pWaapAssetState); + Waf2Transaction(); + ~Waf2Transaction(); + + // setters + void set_transaction_time(const char *log_time); + void set_transaction_remote(const char *remote_addr, int remote_port); + void set_transaction_local(const char *local_addr, int local_port); + void set_method(const char *method); + void set_uri(const char *uri); + void set_host(const char *host); + + // getters + const std::string& getRemoteAddr() const; + virtual const std::string getUri() const; + const std::string getUriStr() const; + const std::string& getSourceIdentifier() const; + virtual const std::string getUserAgent() const; + const std::string getParam() const; + const std::string getParamKey() const; + const std::vector getKeywordMatches() const; + const std::vector getFilteredKeywords() const; + const std::map> getFilteredVerbose() const; + virtual const std::vector getKeywordsCombinations() const; + const std::vector& getKeywordInfo() const; + const std::vector >& getKvPairs() const; + const std::string getKeywordMatchesStr() const; + const std::string getFilteredKeywordsStr() const; + const std::string getSample() const; + const std::string getLastScanSample() const; + virtual const std::string& getLastScanParamName() const; + double getScore() const; + const std::vector getScoreArray() const; + Waap::CSRF::State& getCsrfState(); + const std::set getFoundPatterns() const; + const std::string getContentTypeStr() const; + Waap::Util::ContentType getContentType() const; + int getRemotePort() const; + const std::string getLocalAddress() const; + int getLocalPort() const; + const std::string getLogTime() const; + ParserBase* getRequestBodyParser(); + const std::string getMethod() const; + const std::string getHost() const; + const std::string getCookie() const; + const std::vector getNotes() const; + DeepParser& getDeepParser(); + std::vector > getHdrPairs() const; + virtual const std::string getHdrContent(std::string hdrName) const; + const std::string getRequestBody() const; + const std::string getTransactionIdStr() const; + const WaapDecision &getWaapDecision() const; + virtual std::shared_ptr getAssetState(); + virtual const std::string getLocation() const; + + ngx_http_cp_verdict_e getUserLimitVerdict(); + const std::string getUserLimitVerdictStr() const; + const std::string getViolatedUserLimitTypeStr() const; + + virtual HeaderType detectHeaderType(const char* name, int name_len); + HeaderType checkCleanHeader(const char* name, int name_len, const char* value, int value_len) const; + + // flow control + void start(); + + void start_request_hdrs(); + void add_request_hdr(const char *name, int name_len, const char *value, int value_len); + void end_request_hdrs(); + void start_request_body(); + void add_request_body_chunk(const char *data, int data_len); + void end_request_body(); + void end_request(); + + void start_response(int response_status, int http_version); + void start_response_hdrs(); + void add_response_hdr(const char* name, int name_len, const char* value, int value_len); + void end_response_hdrs(); + void start_response_body(); + void add_response_body_chunk(const char* data, int data_len); + void end_response_body(); + void end_response(); + void extractEnvSourceIdentifier(); + void finish(); + Waf2TransactionFlags &getTransactionFlags(); + + // inject function + void checkShouldInject(); + void completeInjectionResponseBody(std::string& strInjection); + bool findHtmlTagToInject(const char* data, int data_len, int& pos); + bool isHtmlType(const char* data, int data_len); + + // decision functions + void set_ignoreScore(bool ignoreScore); + bool get_ignoreScore() const { return m_ignoreScore; } + void decide( + bool& bForceBlock, + bool& bForceException, + int mode); + bool decideAfterHeaders(); + int decideFinal( + int mode, + AnalysisResult &transactionResult, + const std::string &poolName = KEYWORDS_SCORE_POOL_BASE, + PolicyCounterType fpClassification = UNKNOWN_TYPE); + bool decideAutonomousSecurity( + const IWaapConfig& config, + int mode, + bool afterHeaders, + AnalysisResult &transactionResult, + const std::string &poolName, + PolicyCounterType fpClassification = UNKNOWN_TYPE); + bool decideResponse(); + void clearAllInjectionReasons(); + bool shouldInspectResponse(); + bool shouldInjectResponse(); + bool shouldInjectCSRF(); + bool shouldInjectSecurityHeaders(); + void handleCsrfHeaderInjection(std::string& injectStr); + void handleSecurityHeadersInjection(std::vector>& injectHeaderStrs); + void disableShouldInjectSecurityHeaders(); + + bool shouldSendExtendedLog(const std::shared_ptr &trigger_log) const; + + // query + virtual bool isSuspicious() const; + virtual uint64_t getIndex() const; + virtual void setIndex(uint64_t index); + + //misc + void sendLog(); + const std::string logHeadersStr() const; + void learnScore(ScoreBuilderData& data, const std::string &poolName); + const std::string buildAttackTypes() const; + void collectFoundPatterns(); + ReportIS::Severity computeEventSeverityFromDecision() const; + + // LCOV_EXCL_START - sync functions, can only be tested once the sync module exists + + static std::string name() { return "Waf2Transaction"; }; + static std::unique_ptr prototype() { return std::make_unique(); }; + static uint currVer() { return 0; } + static uint minVer() { return 0; } + + template + void serialize(T& ar, uint) { + ar(0); + } + + // LCOV_EXCL_STOP + + bool reportScanResult(const Waf2ScanResult &res); + bool shouldIgnoreOverride(const Waf2ScanResult &res); + Waap::OpenRedirect::State &getOpenRedirectState() { return m_openRedirectState; } + IWaapConfig* getSiteConfig() { return m_siteConfig; } + void addNote(const std::string ¬e) { m_notes.push_back(note); } + +private: + int finalizeDecision(IWaapConfig *sitePolicy, bool shouldBlock); + const std::shared_ptr getTriggerLog(const std::shared_ptr& + triggerPolicy) const; + void sendAutonomousSecurityLog( + const std::shared_ptr& triggerLog, + bool shouldBlock, + const std::string& logOverride, + const std::string& attackTypes) const; + void appendCommonLogFields(LogGen& waapLog, + const std::shared_ptr &triggerLog, + bool shouldBlock, + const std::string& logOverride, + const std::string& incidentType) const; + std::string getUserReputationStr(double relativeReputation) const; + bool isTrustedSource() const; + + void setCurrentAssetState(IWaapConfig* sitePolicy); + bool setCurrentAssetContext(); + bool checkIsScanningRequired(); + Waap::Override::State getOverrideState(IWaapConfig* sitePolicy); + + // User limits functions + void createUserLimitsState(); + bool isUrlLimitReached(size_t size); + bool isHttpHeaderLimitReached(const std::string& name, const std::string& value); + bool isHttpBodyLimitReached(size_t chunkSize); + bool isObjectDepthLimitReached(size_t depth); + bool isPreventModeValidMethod(const std::string& method); + bool isUserLimitReached() const; + bool isIllegalMethodViolation() const; + const Waap::UserLimits::ViolatedStrData& getViolatedUserLimitStrData() const; + size_t getViolatingUserLimitSize() const; + + // Internal + void processUri(const char *uri, const std::string &scanStage); + void parseContentType(const char* value, int value_len); + void parseCookie(const char* value, int value_len); + void parseReferer(const char* value, int value_len); + void parseUnknownHeaderName(const char* name, int name_len); + void parseGenericHeaderValue(const std::string &headerName, const char* value, int value_len); + void scanSpecificHeder(const char* name, int name_len, const char* value, int value_len); + void detectSpecificHeader(const char* name, int name_len, const char* value, int value_len); + void detectHeaders(); + void scanHeaders(); + void clearRequestParserState(); + void scanErrDisclosureBuffer(); + + std::shared_ptr m_pWaapAssetState; + bool m_ignoreScore; // override the scoring filter and (effectively) take the last suspicious parameter, + // instead of the one with highest score that is > SCORE_THRESHOLD + boost::uuids::uuid m_transaction_id; + std::string m_log_time; + std::string m_remote_addr; + std::string m_source_identifier; + int m_remote_port; + std::string m_local_addr; + int m_local_port; + + // Matched override IDs + std::set m_matchedOverrideIds; + + //csrf state + Waap::CSRF::State m_csrfState; + // UserLimits state + std::shared_ptr m_userLimitsState; + + WaapConfigAPI m_ngenAPIConfig; + WaapConfigApplication m_ngenSiteConfig; + IWaapConfig* m_siteConfig; + + // Current content type and (for multiplart), MIME boundary identifier + Waap::Util::ContentType m_contentType; + + // Request body parser, type is derived from headers/ContentType. + // May be NULL if request payload is of unknown type! + ParserBase *m_requestBodyParser; + + // find html tag + char m_tagHist[6]; // strlen("") + size_t m_tagHistPos; + bool m_isUrlValid; + + Waap::Scanner m_scanner; // Receives the param+value pairs from DeepParser and scans them + DeepParser m_deepParser; // recursive (deep) parser that can parse deep content encodings + // hierarchies like XML in JSON in URLEncode in ... + BufferedReceiver m_deepParserReceiver; // buffered receiver forwarding to m_deepParser + Waf2ScanResult *m_scanResult; + + std::string m_methodStr; + std::string m_uriStr; + std::string m_uriPath; + std::string m_uriReferer; + std::string m_uriQuery; + std::string m_contentTypeStr; + std::string m_hostStr; + std::string m_userAgentStr; + std::string m_cookieStr; + std::vector m_notes; + std::set m_found_patterns; + + Waap::OpenRedirect::State m_openRedirectState; + std::map hdrs_map; + std::string m_request_body; + std::string m_response_body; + std::string m_response_body_err_disclosure; + size_t m_request_body_bytes_received; + size_t m_response_body_bytes_received; + + bool m_processedUri; + bool m_processedHeaders; + bool m_isScanningRequired; + int m_responseStatus; + Waap::ResponseInspectReasons m_responseInspectReasons; + Waap::ResponseInjectReasons m_responseInjectReasons; + WaapDecision m_waapDecision; + Waap::Override::State m_overrideState; + + uint64_t m_index; + + // Cached pointer to const triggerLog (hence mutable) + mutable std::shared_ptr m_triggerLog; + + Waf2TransactionFlags m_waf2TransactionFlags; + + // Grace period for logging + int max_grace_logs; + bool is_hybrid_mode = false; +}; + +#endif // __WAF2_TRANSACTION_H__99e4201a diff --git a/components/security_apps/waap/waap_clib/Waf2EngineGetters.cc b/components/security_apps/waap/waap_clib/Waf2EngineGetters.cc new file mode 100755 index 0000000..b13400a --- /dev/null +++ b/components/security_apps/waap/waap_clib/Waf2EngineGetters.cc @@ -0,0 +1,623 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "Waf2Engine.h" +#include "WaapOverrideFunctor.h" +#include // uuid class +#include // uuid generators +#include +#include +#include "generic_rulebase/triggers_config.h" +#include "config.h" +#include "LogGenWrapper.h" +#include + +USE_DEBUG_FLAG(D_WAAP_ULIMITS); + +#define LOW_REPUTATION_THRESHOLD 4 +#define NORMAL_REPUTATION_THRESHOLD 6 +#define LOG_HEADER_MAX_LENGTH 200 + +bool Waf2Transaction::isTrustedSource() const +{ + auto policy = m_ngenSiteConfig.get_TrustedSourcesPolicy(); + if (policy == nullptr) + { + dbgTrace(D_WAAP) << "Policy for trusted sources is not set"; + return false; + } + auto trustedTypes = policy->getTrustedTypes(); + std::string cookieVal; + auto env = Singleton::Consume::by(); + auto proxy_ip = env->get(HttpTransactionData::proxy_ip_ctx); + for (auto& trustedType : trustedTypes) + { + switch (trustedType) + { + case Waap::TrustedSources::TrustedSourceType::SOURCE_IP: + dbgTrace(D_WAAP) << "check source: " << getRemoteAddr(); + return policy->isSourceTrusted(getRemoteAddr(), trustedType); + case Waap::TrustedSources::TrustedSourceType::X_FORWARDED_FOR: + if (proxy_ip.ok()) + { + return policy->isSourceTrusted(proxy_ip.unpack(), trustedType); + } else { + return false; + } + case Waap::TrustedSources::TrustedSourceType::COOKIE_OAUTH2_PROXY: + if (cookieVal.empty()) + { + cookieVal = getHdrContent("Cookie"); + } + return policy->isSourceTrusted(Waap::Util::extractKeyValueFromCookie(cookieVal, "_oauth2_proxy"), + trustedType); + default: + dbgWarning(D_WAAP) << "unrecognized trusted source identifier type: " << trustedType; + break; + } + } + return false; +} + +std::string Waf2Transaction::getUserReputationStr(double relativeReputation) const +{ + if (isTrustedSource()) { + return "Trusted"; + } + if (relativeReputation < LOW_REPUTATION_THRESHOLD) + { + return "Low"; + } + if (relativeReputation < NORMAL_REPUTATION_THRESHOLD) + { + return "Normal"; + } + return "High"; +} + +const std::string Waf2Transaction::logHeadersStr() const +{ + std::vector hdrsLog; + + for (auto hdr : hdrs_map) + { + std::string hdrName = hdr.first; + std::string hdrValue = hdr.second.substr(0, LOG_HEADER_MAX_LENGTH); + hdrsLog.push_back(hdrName + ": " + hdrValue); + } + + return Waap::Util::vecToString(hdrsLog, ';').substr(0, MAX_LOG_FIELD_SIZE); +} + + +const WaapDecision& +Waf2Transaction::getWaapDecision() const +{ + return m_waapDecision; +} +std::shared_ptr Waf2Transaction::getAssetState() +{ + return m_pWaapAssetState; +} +const std::string& Waf2Transaction::getRemoteAddr() const +{ + return m_remote_addr; +} +const std::string& Waf2Transaction::getSourceIdentifier() const +{ + return m_source_identifier; +} +const std::string Waf2Transaction::getUri() const +{ + return m_uriPath; +} +const std::string Waf2Transaction::getUriStr() const +{ + return normalize_uri(m_uriStr); +} +bool Waf2Transaction::isSuspicious() const +{ + return !!m_scanResult; +} +uint64_t Waf2Transaction::getIndex() const +{ + return m_index; +} +void Waf2Transaction::setIndex(uint64_t index) +{ + m_index = index; +} +const std::string Waf2Transaction::getUserAgent() const +{ + return m_userAgentStr; +} +const std::string Waf2Transaction::getParam() const +{ + if (m_scanResult == NULL) + { + return ""; + } + return m_scanResult->param_name; +} +const std::string Waf2Transaction::getParamKey() const +{ + if (m_scanResult == NULL) + { + return ""; + } + return IndicatorsFiltersManager::generateKey(m_scanResult->location, m_scanResult->param_name, this); +} +const std::vector Waf2Transaction::getKeywordMatches() const +{ + if (m_scanResult == NULL) + { + return std::vector(); + } + return m_scanResult->keyword_matches; +} +const std::vector Waf2Transaction::getFilteredKeywords() const +{ + if (m_scanResult == NULL) + { + return std::vector(); + } + return m_scanResult->filtered_keywords; +} +const std::map> Waf2Transaction::getFilteredVerbose() const +{ + if (m_pWaapAssetState == NULL) + { + return std::map>(); + } + return m_pWaapAssetState->getFilterVerbose(); +} +const std::vector Waf2Transaction::getKeywordsCombinations() const +{ + if (m_scanResult) + { + return m_scanResult->keywordCombinations; + } + return std::vector(); +} +const std::vector& Waf2Transaction::getKeywordInfo() const +{ + return m_deepParser.m_keywordInfo; +} +const std::vector >& Waf2Transaction::getKvPairs() const +{ + return m_deepParser.kv_pairs; +} +const std::string Waf2Transaction::getSample() const +{ + if (m_scanResult) + { + return m_scanResult->unescaped_line; + } + return std::string(); +} +const std::string Waf2Transaction::getLastScanSample() const +{ + return m_scanner.getLastScanResult().unescaped_line; +} +const std::string& Waf2Transaction::getLastScanParamName() const +{ + return m_scanner.getLastScanResult().param_name; +} +const std::string Waf2Transaction::getKeywordMatchesStr() const +{ + std::vector vec = getKeywordMatches(); + return Waap::Util::vecToString(vec); +} +const std::string Waf2Transaction::getFilteredKeywordsStr() const +{ + std::vector vec = getFilteredKeywords(); + return Waap::Util::vecToString(vec); +} +double Waf2Transaction::getScore() const +{ + if (m_scanResult) { + return m_scanResult->score; + } + return 0; +} +const std::vector Waf2Transaction::getScoreArray() const +{ + if (m_scanResult) { + return m_scanResult->scoreArray; + } + return std::vector(); +} +const std::string Waf2Transaction::getContentTypeStr() const +{ + return m_contentTypeStr; +} +Waap::Util::ContentType Waf2Transaction::getContentType() const +{ + return m_contentType; +} +int Waf2Transaction::getRemotePort() const +{ + return m_remote_port; +} +const std::string Waf2Transaction::getLocalAddress() const +{ + return m_local_addr; +} +int Waf2Transaction::getLocalPort() const +{ + return m_local_port; +} +const std::string Waf2Transaction::getLogTime() const +{ + return m_log_time; +} +ParserBase* Waf2Transaction::getRequestBodyParser() +{ + return m_requestBodyParser; +} +const std::string Waf2Transaction::getMethod() const +{ + return m_methodStr; +} +const std::string Waf2Transaction::getHost() const +{ + return m_hostStr; +} +const std::string Waf2Transaction::getCookie() const +{ + return m_cookieStr; +} +const std::vector Waf2Transaction::getNotes() const +{ + return m_notes; +} +DeepParser& Waf2Transaction::getDeepParser() +{ + return m_deepParser; +} +std::vector > Waf2Transaction::getHdrPairs() const +{ + std::vector > res; + for (auto hdr_pair : hdrs_map) { + res.push_back(std::pair(hdr_pair.first, hdr_pair.second)); + } + return res; +} +const std::string Waf2Transaction::getHdrContent(std::string hdrName) const +{ + boost::algorithm::to_lower(hdrName); + auto hdr_it = hdrs_map.find(hdrName); + if (hdr_it != hdrs_map.end()) { + return hdr_it->second; + } + return ""; +} +const std::string Waf2Transaction::getRequestBody() const +{ + return m_request_body; +} +const std::string Waf2Transaction::getTransactionIdStr() const +{ + return boost::uuids::to_string(m_transaction_id); +} +const std::string Waf2Transaction::getLocation() const +{ + if (m_scanResult) { + return m_scanResult->location; + } + return std::string(); +} +Waap::CSRF::State& Waf2Transaction::getCsrfState() +{ + return m_csrfState; +} + +void Waf2Transaction::sendAutonomousSecurityLog( + const std::shared_ptr& triggerLog, + bool shouldBlock, + const std::string& logOverride, + const std::string& attackTypes) const +{ + auto autonomousSecurityDecision = std::dynamic_pointer_cast( + m_waapDecision.getDecision(AUTONOMOUS_SECURITY_DECISION)); + ReportIS::Severity severity = Waap::Util::computeSeverityFromThreatLevel( + autonomousSecurityDecision->getThreatLevel()); + if (autonomousSecurityDecision->getOverridesLog() && logOverride == OVERRIDE_DROP) + { + severity = ReportIS::Severity::MEDIUM; + } + else if (autonomousSecurityDecision->getOverridesLog() && logOverride == OVERRIDE_ACCEPT) + { + severity = ReportIS::Severity::INFO; + } + + const ReportIS::Priority priority = + Waap::Util::computePriorityFromThreatLevel(autonomousSecurityDecision->getThreatLevel()); + + auto maybeLogTriggerConf = getConfiguration("rulebase", "log"); + LogGenWrapper logGenWrapper( + maybeLogTriggerConf, + "Web Request", + ReportIS::Audience::SECURITY, + LogTriggerConf::SecurityType::ThreatPrevention, + severity, + priority, + shouldBlock); + + LogGen& waap_log = logGenWrapper.getLogGen(); + ThreatLevel threat_level = autonomousSecurityDecision->getThreatLevel(); + if (threat_level != ThreatLevel::NO_THREAT) { + std::string confidence = Waap::Util::computeConfidenceFromThreatLevel(threat_level); + waap_log << LogField("eventConfidence", confidence); + } + + appendCommonLogFields(waap_log, triggerLog, shouldBlock, logOverride, attackTypes); + + std::string sampleString = getSample(); + if (sampleString.length() > MAX_LOG_FIELD_SIZE) { + sampleString.resize(MAX_LOG_FIELD_SIZE); + } + waap_log << LogField("matchedSample", sampleString, LogFieldOption::XORANDB64); + std::string location = getLocation(); + if (location == "url_param") + { + location = "url parameter"; + } + else if (location == "referer_param") + { + location = "referer parameter"; + } + waap_log << LogField("matchedLocation", location); + waap_log << LogField("matchedParameter", getParam()); + + // Patch for reporting of log4j under different name (currently only in logs) + std::vector keywordMatches = getKeywordMatches(); + std::replace(keywordMatches.begin(), keywordMatches.end(), std::string("jndi:"), std::string("java_1")); + std::string keywordMatchesStr = Waap::Util::vecToString(keywordMatches); + + waap_log << LogField("waapFoundIndicators", keywordMatchesStr, LogFieldOption::XORANDB64); + waap_log << LogField("matchedIndicators", keywordMatchesStr, LogFieldOption::XORANDB64); + waap_log << LogField("learnedIndicators", getFilteredKeywordsStr(), LogFieldOption::XORANDB64); + waap_log << LogField("waapUserReputationScore", (int)( + autonomousSecurityDecision->getRelativeReputation() * 100)); + waap_log << LogField("waapUserReputation", getUserReputationStr( + autonomousSecurityDecision->getRelativeReputation())); + waap_log << LogField("waapUriFalsePositiveScore", (int)( + autonomousSecurityDecision->getFpMitigationScore() * 100)); + waap_log << LogField("waapKeywordsScore", (int)(getScore() * 100)); + waap_log << LogField("waapFinalScore", (int)(autonomousSecurityDecision->getFinalScore() * 100)); + waap_log << LogField("waapCalculatedThreatLevel", autonomousSecurityDecision->getThreatLevel()); +} + +void Waf2Transaction::createUserLimitsState() +{ + if (!m_siteConfig || m_userLimitsState || + (WaapConfigBase::get_WebAttackMitigationMode(*m_siteConfig) == AttackMitigationMode::DISABLED)) { + return; + } + + auto userLimitsPolicy = m_siteConfig->get_UserLimitsPolicy(); + if (userLimitsPolicy) { + m_userLimitsState = std::make_shared(*userLimitsPolicy); + m_userLimitsState->setAssetId(m_siteConfig->get_AssetId()); + m_deepParser.setGlobalMaxObjectDepth(userLimitsPolicy->getMaxObjectDepth()); + if (m_uriPath.empty()) { + // Initialize uriPath so it will be available in the sent log, + // in case a limit is reached early in the flow + m_uriPath = m_uriStr.substr(0, LOG_HEADER_MAX_LENGTH); + } + dbgTrace(D_WAAP_ULIMITS) << "[USER LIMITS] state created with '" << + WaapConfigBase::get_WebAttackMitigationModeStr(*m_siteConfig) << "' mode\n" << + *userLimitsPolicy; + } + else { + dbgTrace(D_WAAP_ULIMITS) << "[USER LIMITS] couldn't load policy"; + } +} + +ngx_http_cp_verdict_e +Waf2Transaction::getUserLimitVerdict() +{ + if (!isUserLimitReached()) { + // Either limit not reached or attack mitigation mode is DISABLED + return ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT; + } + + std::string msg; + msg = "[USER LIMITS][" + + std::string(WaapConfigBase::get_WebAttackMitigationModeStr(*m_siteConfig)) + + " mode] " + "Verdict is "; + std::string reason; + reason = " reason: " + getViolatedUserLimitTypeStr(); + + ngx_http_cp_verdict_e verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT; + const AttackMitigationMode mode = WaapConfigBase::get_WebAttackMitigationMode(*m_siteConfig); + auto decision = m_waapDecision.getDecision(USER_LIMITS_DECISION); + if (mode == AttackMitigationMode::LEARNING) { + decision->setLog(true); + decision->setBlock(false); + if (isIllegalMethodViolation()) { + dbgInfo(D_WAAP_ULIMITS) << msg << "INSPECT" << reason << " in detect mode"; + verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT; + } + else { + dbgInfo(D_WAAP_ULIMITS) << msg << "PASS" << reason; + verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT; + } + } + else if (mode == AttackMitigationMode::PREVENT) { + decision->setLog(true); + decision->setBlock(true); + dbgInfo(D_WAAP_ULIMITS) << msg << "BLOCK" << reason; + verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP; + } + + return verdict; +} + +const std::string Waf2Transaction::getUserLimitVerdictStr() const +{ + std::stringstream verdict; + if (!isUserLimitReached()) { + verdict << getViolatedUserLimitTypeStr(); + } + else if (isIllegalMethodViolation()) { + verdict << getViolatedUserLimitTypeStr() << " (" << getMethod() << ")"; + } + else { + auto strData = getViolatedUserLimitStrData(); + verdict << strData.type << " (" << getViolatingUserLimitSize() << + "/" << strData.policy << ")"; + } + return verdict.str(); +} + +bool Waf2Transaction::isUrlLimitReached(size_t size) +{ + if (!m_userLimitsState) { + return false; + } + return m_userLimitsState->addUrlBytes(size); +} +bool Waf2Transaction::isHttpHeaderLimitReached(const std::string& name, const std::string& value) +{ + if (!m_userLimitsState) { + return false; + } + return m_userLimitsState->addHeaderBytes(name, value); +} +bool Waf2Transaction::isHttpBodyLimitReached(size_t chunkSize) +{ + if (!m_userLimitsState) { + return false; + } + return m_userLimitsState->addBodyBytes(chunkSize); +} +bool Waf2Transaction::isObjectDepthLimitReached(size_t depth) +{ + if (!m_userLimitsState) { + return false; + } + return m_userLimitsState->setObjectDepth(depth); +} +bool Waf2Transaction::isPreventModeValidMethod(const std::string& method) +{ + if (!m_userLimitsState) { + return true; + } + + if (m_userLimitsState->isValidHttpMethod(method) || + (WaapConfigBase::get_WebAttackMitigationMode(*m_siteConfig) == AttackMitigationMode::LEARNING)) { + return true; + } + return false; +} +bool Waf2Transaction::isUserLimitReached() const +{ + return m_userLimitsState ? m_userLimitsState->isLimitReached() : false; +} +bool Waf2Transaction::isIllegalMethodViolation() const +{ + return m_userLimitsState ? m_userLimitsState->isIllegalMethodViolation() : false; +} +const std::string Waf2Transaction::getViolatedUserLimitTypeStr() const +{ + return m_userLimitsState ? m_userLimitsState->getViolatedTypeStr() : "no enforcement"; +} +const Waap::UserLimits::ViolatedStrData& +Waf2Transaction::getViolatedUserLimitStrData() const +{ + return m_userLimitsState->getViolatedStrData(); +} +size_t Waf2Transaction::getViolatingUserLimitSize() const +{ + return m_userLimitsState ? m_userLimitsState->getViolatingSize() : 0; +} + +const std::set Waf2Transaction::getFoundPatterns() const +{ + return m_found_patterns; +} + +Waap::Override::State Waf2Transaction::getOverrideState(IWaapConfig* sitePolicy) +{ + Waap::Override::State overrideState; + std::shared_ptr overridePolicy = sitePolicy->get_OverridePolicy(); + if (overridePolicy) { // at first we will run request overrides (in order to set the source) + overrideState.applyOverride(*overridePolicy, WaapOverrideFunctor(*this), m_matchedOverrideIds, true); + } + + extractEnvSourceIdentifier(); + + Waap::Override::State overrideStateResponse; + if (overridePolicy) { // later we will run response overrides + overrideStateResponse.applyOverride(*overridePolicy, WaapOverrideFunctor(*this), m_matchedOverrideIds, false); + } + return overrideStateResponse; +} + +Waf2TransactionFlags &Waf2Transaction::getTransactionFlags() +{ + return m_waf2TransactionFlags; +} + +const std::shared_ptr Waf2Transaction::getTriggerLog(const std::shared_ptr< + Waap::Trigger::Policy> &triggerPolicy) const +{ + // Trigger log already known (no need to extract it second time) + if (m_triggerLog) { + return m_triggerLog; + } + + // Walk over trigger logs and choose the last one of type Log + for (const Waap::Trigger::Trigger &trigger : triggerPolicy->triggers) { + if (trigger.triggerType == "log") { + m_triggerLog = trigger.log; + } + } + + return m_triggerLog; +} + +ReportIS::Severity Waf2Transaction::computeEventSeverityFromDecision() const +{ + DecisionType type = m_waapDecision.getHighestPriorityDecisionToLog(); + switch (type) + { + case DecisionType::USER_LIMITS_DECISION: + { + return ReportIS::Severity::HIGH; + break; + } + case DecisionType::OPEN_REDIRECT_DECISION: + case DecisionType::ERROR_LIMITING_DECISION: + case DecisionType::RATE_LIMITING_DECISION: + case DecisionType::CSRF_DECISION: + case DecisionType::ERROR_DISCLOSURE_DECISION: + { + return ReportIS::Severity::CRITICAL; + break; + } + case DecisionType::AUTONOMOUS_SECURITY_DECISION: + { + auto autonomousSecurityDecision = std::dynamic_pointer_cast( + m_waapDecision.getDecision(DecisionType::AUTONOMOUS_SECURITY_DECISION)); + return Waap::Util::computeSeverityFromThreatLevel(autonomousSecurityDecision->getThreatLevel()); + } + default: + static_assert(true, "Illegal DecisionType enum value"); + break; + } + + return ReportIS::Severity::INFO; +} diff --git a/components/security_apps/waap/waap_clib/Waf2Regex.cc b/components/security_apps/waap/waap_clib/Waf2Regex.cc new file mode 100755 index 0000000..b6dfe02 --- /dev/null +++ b/components/security_apps/waap/waap_clib/Waf2Regex.cc @@ -0,0 +1,653 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// #define WAF2_LOGGING_ENABLE + +#include "Waf2Regex.h" +#include "debug.h" +#include +#include + +USE_DEBUG_FLAG(D_WAAP_REGEX); + +// SingleRegex + +SingleRegex::SingleRegex( + const std::string& pattern, + bool& error, + const std::string& regexName, + bool bNoRegex, + const std::string ®exMatchName, + const std::string ®exMatchValue) + : + m_re(NULL), + m_matchData(NULL), + m_regexName(regexName), + m_noRegex(bNoRegex), + m_regexMatchName(regexMatchName), + m_regexMatchValue(regexMatchValue) + { + dbgTrace(D_WAAP_REGEX) << "Create SingleRegex '" << m_regexName << "' PATTERN: '" << + std::string(pattern.data(), pattern.size()) << "'"; + + if (error) { + // Skip initialization if already in error condition + dbgError(D_WAAP_REGEX) << "Skip compiling regex: " << m_regexName << " (single) due to previous error"; + return; + } + + int errorCode; + size_t errorOffset; + m_re = pcre2_compile( + reinterpret_cast(pattern.data()), + pattern.size(), + 0, + &errorCode, + &errorOffset, + NULL + ); + + if (pcre2_jit_compile(m_re, PCRE2_JIT_COMPLETE) < 0) { + dbgError(D_WAAP_REGEX) << "pcre2_jit_compile failed for regex: " << m_regexName << " (single)"; + error = true; + } + + if (m_re == NULL) { + PCRE2_UCHAR errMessage[4096]; + pcre2_get_error_message(errorCode, errMessage, sizeof(errMessage)); + dbgError(D_WAAP_REGEX) << "pcre2_compile failed: error (" << errorCode << "), " << errMessage << + ", at offset " << errorOffset << " in pattern (single) of regex " << m_regexName << "."; + dbgError(D_WAAP_REGEX) << "pattern: '" << pattern.c_str() << "'"; + error = true; + return; + } + + // Create matchData object that is ready to receive any possible match from m_re + m_matchData = pcre2_match_data_create_from_pattern(m_re, NULL); + + if (m_matchData == NULL) { + dbgError(D_WAAP_REGEX) << "pcre2_compile failed to allocate matchData. pattern: '" << + std::string(pattern.data(), pattern.size()) << "'"; + pcre2_code_free(m_re); + m_re = NULL; + return; + } + + // Get info about compiled pattern + pcre2_pattern_info(m_re, PCRE2_INFO_CAPTURECOUNT, &m_captureGroupsCount); + PCRE2_SPTR nameTable; + uint32_t nameCount; + uint32_t nameEntrySize; + pcre2_pattern_info(m_re, PCRE2_INFO_NAMECOUNT, &nameCount); + pcre2_pattern_info(m_re, PCRE2_INFO_NAMEENTRYSIZE, &nameEntrySize); + pcre2_pattern_info(m_re, PCRE2_INFO_NAMETABLE, &nameTable); + + // Allocate enough items for group names to be indexed by capture group index + // Note that number capture groups are numbered starting from 1. Group "0" is for the "whole match" + m_captureNames.resize(m_captureGroupsCount + 1); + + for (uint32_t i = 0; i < nameCount; i++) { + PCRE2_SPTR nameTableEntry = nameTable + i * nameEntrySize; + // According to pcre2 docs, each entry struct starts with 16-bit capture index (big-endian). Consume it. + uint16_t captureIndex = (nameTableEntry[0] << 8) + nameTableEntry[1]; + // Note that capture group indices are numbered starting from 1. Group "0" is for the "whole match" + nameTableEntry += sizeof(uint16_t); + // After the index comes zero-terminated capture name. Consume it too. + m_captureNames[captureIndex] = (char*)nameTableEntry; + } +} + +SingleRegex::~SingleRegex() { + if (m_matchData) { + pcre2_match_data_free(m_matchData); + } + + if (m_re) { + pcre2_code_free(m_re); + } +} + +bool SingleRegex::hasMatch(const std::string& s) const { + int rc = pcre2_match( + m_re, // code + reinterpret_cast(s.data()), s.size(), // subject/subject length + 0, // start offset + 0, // options + m_matchData, + NULL // match_context + ); + + if (rc <= 0) { + if (rc != PCRE2_ERROR_NOMATCH) { + PCRE2_UCHAR errmsg[4096]; + pcre2_get_error_message(rc, errmsg, sizeof(errmsg) - 1); + dbgDebug(D_WAAP_REGEX) << "SingleRegex['" << m_regexName << "']::hasMatch " << + "failed with error code: " << rc << " ('" << errmsg << "')"; + } + return false; + } + + return true; +} + +size_t SingleRegex::findAllMatches(const std::string& s, std::vector& matches) const { + size_t matchesCount = 0; + + // Optimized regex that always immediately reports a "simulated" match without spending time to do a scan + if (m_noRegex) { + RegexMatch match; + // Group 0 is "whole match" must always be present and have no name + match.groups.push_back( + RegexMatch::MatchGroup( + 1, + "", + m_regexMatchValue + ) + ); + // Group 1 is "specific match" must be present and have a name + match.groups.push_back( + RegexMatch::MatchGroup( + 2, + m_regexMatchName, + m_regexMatchValue + ) + ); + matches.push_back(match); + matchesCount++; + return matchesCount; + } + + PCRE2_SIZE startOffset = 0; + + do { + int rc = pcre2_match( + m_re, // code + reinterpret_cast(s.data()), s.size(), // subject/subject length + startOffset, // start offset + 0, // options + m_matchData, + NULL // match_context + ); + + if (rc <= 0) { + if (rc != PCRE2_ERROR_NOMATCH) { + PCRE2_UCHAR errmsg[4096]; + pcre2_get_error_message(rc, errmsg, sizeof(errmsg) - 1); + dbgDebug(D_WAAP_REGEX) << "SingleRegex['" << m_regexName << "']::findAllMatches " << + "failed with error code: " << rc << " ('" << errmsg << "')"; + } + break; + } + + int highestMatchedGroupIndex = rc; + + // Get pointer to array of offsets into s, and its size + uint32_t ovCount = pcre2_get_ovector_count(m_matchData); + PCRE2_SIZE* ov = pcre2_get_ovector_pointer(m_matchData); + + RegexMatch match; + match.groups.reserve(ovCount); + + dbgTrace(D_WAAP_REGEX) << "regex '" << m_regexName << "', captureGroupsCount = " << + m_captureGroupsCount << ". ovCount = " << ovCount << "; highestMatchedGroupIndex = " << + highestMatchedGroupIndex; + + // ov is vector of ovCount pairs of PCRE2_SIZE values. + // First entry in pair is offset of start of the match (in s), + // second entry is offset of character one after end of the match. + // Walk over all matches and fill them here (-1 because first one isn't included in ovCount). + for (int groupIndex = 1; groupIndex < highestMatchedGroupIndex; ++groupIndex) { + PCRE2_SIZE rangeStart = ov[groupIndex * 2]; + PCRE2_SIZE rangeEnd = ov[groupIndex * 2 + 1]; + + // Skip matches that are not set + if (rangeStart == PCRE2_UNSET || rangeEnd == PCRE2_UNSET) { + continue; + } + + dbgTrace(D_WAAP_REGEX) << "groupIndex=" << groupIndex << " ['" << m_captureNames[groupIndex] << + "']: range " << rangeStart << " -> " << rangeEnd; + match.groups.push_back( + RegexMatch::MatchGroup( + groupIndex, + m_captureNames[groupIndex], + s.substr(rangeStart, rangeEnd - rangeStart) + ) + ); + } + + matches.push_back(match); + + // Count matches found in this SingleRegex + matchesCount++; + + // continue searching for next match starting from end of this match + // (first two entries in ov[] are start and end offsets of current full match) + startOffset = ov[1]; + } while (true); + + return matchesCount; +} + +const std::string &SingleRegex::getName() const +{ + return m_regexName; +} + +size_t SingleRegex::findMatchRanges(const std::string& s, std::vector& matchRanges) const { + PCRE2_SIZE startOffset = 0; + + do { + int rc = pcre2_match( + m_re, // code + reinterpret_cast(s.data()), s.size(), // subject/subject length + startOffset, // start offset + 0, // options + m_matchData, + NULL // match_context + ); + + // Note: PCRE2_ERROR_NOMATCH is the normal situation here, but there could be other errors. + // However, whichever error occurred, the loop is stopped. + if (rc <= 0) { + if (rc != PCRE2_ERROR_NOMATCH) { + PCRE2_UCHAR errmsg[4096]; + pcre2_get_error_message(rc, errmsg, sizeof(errmsg) - 1); + dbgDebug(D_WAAP_REGEX) << "SingleRegex['" << m_regexName << "']::findMatchRanges " << + "failed with error code: " << rc << " ('" << errmsg << "')"; + } + break; + } + + // Get pointer to array of offsets into s + PCRE2_SIZE* ov = pcre2_get_ovector_pointer(m_matchData); + + // start searching for next match starting from end of this match + // (first two entries in ov[] are start and end offsets of current full match) + startOffset = ov[1]; + + matchRanges.push_back(RegexMatchRange(ov[0], ov[1])); + } while (true); + + return matchRanges.size(); +} + +// Regex + +Regex::Regex(const std::string& pattern, bool &error, const std::string& regexName) +: +m_regexName(regexName), +m_regexPreconditions(nullptr) // no need for preconditions for single regex mode +{ + if (error) { + // Skip initialization if already in error condition + dbgError(D_WAAP_REGEX) << "Skip compiling regex: " << m_regexName << " (single) due to previous error"; + return; + } + + m_sre.push_back(new SingleRegex(pattern, error, m_regexName)); +} + +// Divide regexp patterns longer than the limit (imposed by pcre2 library!) into multiple regexes. +#define REGEX_PATT_MAX_SIZE 0 + +Regex::Regex( + const std::vector & patterns, + bool &error, + const std::string & regexName, + std::shared_ptr regexPreconditions) +: +m_regexName(regexName), +m_regexPreconditions(regexPreconditions) +{ + if (error) { + // Skip initialization if already in error condition + dbgError(D_WAAP_REGEX) << "Skip compiling regex: " << m_regexName << " due to previous error"; + return; + } + + // This regex helps to parse out group names from regex patterns + SingleRegex patternParseRegex("^\\(\\?P<(.*?)>(.*?)\\)$", error, "patternParseRegex"); + + std::string acc; + + for (std::vector::const_iterator pPattern = patterns.begin(); + pPattern != patterns.end(); + ++pPattern) { + const std::string& pattern = *pPattern; + if ((acc.size() + pattern.size()) > REGEX_PATT_MAX_SIZE) { + if (!acc.empty()) { + assert(false); // this should never happen + m_sre.push_back(new SingleRegex(acc + ")", error, m_regexName)); + acc = "(" + pattern; + } + else + { + bool bNoRegex = false; + std::string regexMatchName; + std::string regexMatchValue; + + // This is the only place where patterns are loaded (one-by-one) + if (m_regexPreconditions) { + // If preconditions are enabled on this Regex instance - build list of indices of SingleRegex + // that should be triggered (executed) for each related word found by aho-corasick pattern scan. + Waap::RegexPreconditions::WordIndex wordIndex = + m_regexPreconditions->getWordByRegex(pattern); + + // Extract group name from the regex pattern string + if (m_regexPreconditions->isNoRegexPattern(pattern)) { + // This word should not be scanned with regex. Instead, it should directly return a match + std::vector parsedMatches; + patternParseRegex.findAllMatches(pattern, parsedMatches); + bNoRegex = true; + regexMatchName = parsedMatches[0].groups[0].value; + regexMatchValue = m_regexPreconditions->getWordStrByWordIndex(wordIndex); + } + + // For each word - build list of SingleRegex indices to be scanned if that word is detected + // Note that if aho-corasick word for this regex is not yet defined it will enter the [""] entry + // and will always be executed. This is less efficient but ensures correct attack detection. + m_wordToRegexIndices[wordIndex].push_back(m_sre.size()); + } + else { + // If preconditions are not enabled on this Regex instance - all SingleRegexes in it will always + // be executed. + m_wordToRegexIndices[Waap::RegexPreconditions::emptyWordIndex].push_back(m_sre.size()); + } + + m_sre.push_back(new SingleRegex("(" + pattern+ ")", error, m_regexName + "/" + pattern, bNoRegex, + regexMatchName, regexMatchValue)); + } + } + else { + assert(false); // this should never happen anymore. + // Add | character between individual patterns, but not before the very first one! + if (acc.empty()) { + // first group + acc = "(" + pattern; + } + else { + // non-first group + acc += "|" + pattern; + } + } + } + + if (acc.size() > 0) { + assert(false); // this should never happen anymore. + m_sre.push_back(new SingleRegex(acc + ")", error, m_regexName)); + } +} + +Regex::~Regex() { + for (std::vector::iterator ppSingleRegex = m_sre.begin(); + ppSingleRegex != m_sre.end(); + ++ppSingleRegex) { + SingleRegex* pSingleRegex = *ppSingleRegex; + + if (pSingleRegex) { + delete pSingleRegex; + } + } +} + +bool Regex::hasMatch(const std::string& s) const { + for (std::vector::const_iterator ppSingleRegex = m_sre.begin(); + ppSingleRegex != m_sre.end(); + ++ppSingleRegex) { + SingleRegex* pSingleRegex = *ppSingleRegex; + + if (pSingleRegex->hasMatch(s)) { + dbgTrace(D_WAAP_REGEX) << "Regex['" << m_regexName << "']['" << pSingleRegex->getName() << + "']::hasMatch() found!"; + return true; + } + } + + return false; +} + +size_t Regex::findAllMatches(const std::string& s, std::vector& matches, + const Waap::RegexPreconditions::PmWordSet *pmWordSet) const { + matches.clear(); + + if (m_regexPreconditions && pmWordSet) { + // If preconditions are enabled on this regex - execute them to make scanning more efficient + std::unordered_set dupIndices; + + for (Waap::RegexPreconditions::WordIndex wordIndex : *pmWordSet) { + const auto &found = m_wordToRegexIndices.find(wordIndex); + + // Check that the wordIndex is related to this instance of Regex object + if (found == m_wordToRegexIndices.end()) { + continue; + } + + const std::vector ®exIndicesList = found->second; + + for (size_t regexIndex : regexIndicesList) { + if (dupIndices.find(regexIndex) != dupIndices.end()) { + // Avoid scanning the same regex index twice (in case it is registered for more than one wordIndex) + continue; + } + + // Scan only regexes that are enabled by aho-corasick scan + m_sre[regexIndex]->findAllMatches(s, matches); + dbgTrace(D_WAAP_REGEX) << "Regex['" << m_sre[regexIndex]->getName() << + "',index=" << regexIndex << "]::findAllMatches(): " << matches.size() << " matches found (so far)"; + + dupIndices.insert(regexIndex); + } + } + } + else { + // When optimization is disabled - scan all regexes + for (SingleRegex* pSingleRegex : m_sre) { + pSingleRegex->findAllMatches(s, matches); + dbgTrace(D_WAAP_REGEX) << "Regex['" << m_regexName << "']['" << pSingleRegex->getName() << + "']::findAllMatches(): " << matches.size() << " matches found (so far)"; + } + } + + dbgTrace(D_WAAP_REGEX) << "Regex['" << m_regexName << "']::findAllMatches(): total " << + matches.size() << " matches found."; + return matches.size(); +} + +inline bool consolidateMatchRangesSortFunc(const RegexMatchRange& a, const RegexMatchRange& b) { + return a.start > b.start; +} + +// Consolidate ranges in-place (algorithm adapted from this solution: +// http://www.geeksforgeeks.org/merging-intervals) +static void consolidateMatchRanges(std::vector& matchRanges) { + // Sort ranges in decreasing order of their start offsets (O(logN) time) + std::sort(matchRanges.begin(), matchRanges.end(), consolidateMatchRangesSortFunc); + int lastIndex = 0; // index of last range in matchRanges vector (up to this range everything is merged) + + // Traverse all ranges and merge where necessary + for (size_t i = 0; i < matchRanges.size(); ++i) { + // If this is not first range and it overlaps with the previous range + if (lastIndex != 0 && matchRanges[lastIndex - 1].start < matchRanges[i].end) { + while (lastIndex != 0 && matchRanges[lastIndex - 1].start < matchRanges[i].end) { + // merge previous and current ranges + matchRanges[lastIndex - 1].end = std::max(matchRanges[lastIndex - 1].end, matchRanges[i].end); + matchRanges[lastIndex - 1].start = std::min(matchRanges[lastIndex - 1].start, matchRanges[i].start); + lastIndex--; + } + } + else { + // Doesn't overlap with previous (or no previous because this is first range), + // add the range as-is + matchRanges[lastIndex] = matchRanges[i]; + } + + lastIndex++; + } + + // Keep only merged ranges. Erase extra ranges that are not used anymore + matchRanges.resize(lastIndex); +} + +std::string Regex::sub(const std::string& s, const std::string& repl) const { + std::vector matchRanges; + + // Find all ranges of all matches + for (std::vector::const_iterator ppSingleRegex = m_sre.begin(); + ppSingleRegex != m_sre.end(); + ++ppSingleRegex) { + SingleRegex* pSingleRegex = *ppSingleRegex; + pSingleRegex->findMatchRanges(s, matchRanges); +#ifdef WAF2_LOGGING_ENABLE + dbgTrace(D_WAAP_REGEX) << "Regex['" << m_regexName << "']['" << pSingleRegex->getName() << + "']::sub(): " << matchRanges.size() << " match ranges found (so far):"; + for (size_t i = 0; i < matchRanges.size(); ++i) { + dbgTrace(D_WAAP_REGEX) << "Range [" << i << "]: " << matchRanges[i].start << " -> " << matchRanges[i].end; + } +#endif + } + + // No matches - nothing to replace. + if (matchRanges.empty()) { + return s; + } + + // Match ranges collected from multiple single regexps could overlap and be out of order + // This function sorts the ranges in place (in decreasing order) and also consolidates overlapping + // ranges so they do not overlap. + consolidateMatchRanges(matchRanges); + +#ifdef WAF2_LOGGING_ENABLE + dbgTrace(D_WAAP_REGEX) << "Regex['" << m_regexName << "']::sub(): " << + matchRanges.size() << " match ranges (after consolidation):"; + for (size_t i = 0; i < matchRanges.size(); ++i) { + dbgTrace(D_WAAP_REGEX) << "Range [" << i << "]: " << matchRanges[i].start << " -> " << matchRanges[i].end; + } +#endif + + // Now walk over (consolidated) ranges (that are now guaranteed not to overlap), and copy everything around them + // Note that ranges are still sorted in decreasing order, so we traverse the list backwards to see them in + // increasing order + PCRE2_SIZE startOffset = 0; + std::string outStr; + + for (std::vector::const_reverse_iterator pMatchRange = matchRanges.rbegin(); + pMatchRange != matchRanges.rend(); + ++pMatchRange) { + // Add everything since startOffset until start of current range + outStr += s.substr(startOffset, pMatchRange->start - startOffset); + + // Add replacement + if (!repl.empty()) { + outStr += repl; + } + // Keep copying only after end of current range + startOffset = pMatchRange->end; + } + + // Add remainder of string after last range + outStr += s.substr(startOffset); + return outStr; +} + +// TODO:: refactor out with C++ functor instead of C-style pointer-callback! +void +Regex::sub( + const std::string& s, + Waap::Util::RegexSubCallback_f cb, + int& decodedCount, + int& deletedCount, + std::string& outStr) const +{ + decodedCount = 0; + deletedCount = 0; + + // Clear outStr, it will be filled with output string (with changes, if applicable) + outStr.clear(); + + std::vector matchRanges; + + // Find all ranges of all matches + for (std::vector::const_iterator ppSingleRegex = m_sre.begin(); + ppSingleRegex != m_sre.end(); + ++ppSingleRegex) { + SingleRegex* pSingleRegex = *ppSingleRegex; + pSingleRegex->findMatchRanges(s, matchRanges); +#ifdef WAF2_LOGGING_ENABLE + dbgTrace(D_WAAP_REGEX) << "Regex['" << m_regexName << "']['" << pSingleRegex->getName() + << "']::sub(): " << matchRanges.size() << " match ranges found (so far):"; + for (size_t i = 0; i < matchRanges.size(); ++i) { + dbgTrace(D_WAAP_REGEX) << "Range [" << i << "]: " << matchRanges[i].start << " -> " << matchRanges[i].end; + } +#endif + } + + // No matches - nothing to replace. + if (matchRanges.empty()) { + outStr = s; + return; + } + + // Match ranges collected from multiple single regexps could overlap and be out of order + // This function sorts the ranges in place (in decreasing order) and also consolidates + // overlapping ranges so they do not overlap. + consolidateMatchRanges(matchRanges); + +#ifdef WAF2_LOGGING_ENABLE + dbgTrace(D_WAAP_REGEX) << "Regex['" << m_regexName << "']::sub(): " << + matchRanges.size() << " match ranges (after consolidation):"; + for (size_t i = 0; i < matchRanges.size(); ++i) { + dbgTrace(D_WAAP_REGEX) << "Range [" << i << "]: " << matchRanges[i].start << " -> " << matchRanges[i].end; + } +#endif + + // Now walk over (consolidated) ranges (that are now guaranteed not to overlap), and copy everything around them + // Note that ranges are still sorted in decreasing order, so we traverse the list backwards to see them in + // increasing order + PCRE2_SIZE startOffset = 0; + + for (std::vector::const_reverse_iterator pMatchRange = matchRanges.rbegin(); + pMatchRange != matchRanges.rend(); + ++pMatchRange) { + // Add everything since startOffset until start of current range + outStr += s.substr(startOffset, pMatchRange->start - startOffset); + + // Compute replacement + std::string repl; + if (cb(s, s.begin() + pMatchRange->start, s.begin() + pMatchRange->end, repl)) { + if (!repl.empty()) { + outStr += repl; + decodedCount++; + } + else { + deletedCount++; + } + } + else { + // if callback told us the chunk was not processed - put original text inside + outStr += s.substr(pMatchRange->start, pMatchRange->end - pMatchRange->start); + } + + // Keep copying only after end of current range + startOffset = pMatchRange->end; + } + + // Add remainder of string after last range + outStr += s.substr(startOffset); + return; +} + +const std::string &Regex::getName() const +{ + return m_regexName; +} diff --git a/components/security_apps/waap/waap_clib/Waf2Regex.h b/components/security_apps/waap/waap_clib/Waf2Regex.h new file mode 100755 index 0000000..8b80292 --- /dev/null +++ b/components/security_apps/waap/waap_clib/Waf2Regex.h @@ -0,0 +1,100 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __WAF2_REGEX_H__c31bc34a +#define __WAF2_REGEX_H__c31bc34a + +// Note: good usage reference found here: http://codegists.com/snippet/c/pcre2_matchcpp_neurobin_c +// and also here https://svn.apache.org/repos/asf/httpd/httpd/trunk/server/util_pcre.c + +#define PCRE2_CODE_UNIT_WIDTH 8 +#include "Waf2Util.h" +#include "WaapRegexPreconditions.h" +#include +#include +#include +#include + +struct RegexMatch { + struct MatchGroup { + uint16_t index; + std::string name; + std::string value; + + MatchGroup(uint16_t index, const std::string &name, const std::string &value) + :index(index), name(name), value(value) { + } + }; + + std::vector groups; +}; + +struct RegexMatchRange { + PCRE2_SIZE start; + PCRE2_SIZE end; +// LCOV_EXCL_START Reason: coverage upgrade + RegexMatchRange() {} +// LCOV_EXCL_STOP + RegexMatchRange(PCRE2_SIZE start, PCRE2_SIZE end):start(start), end(end) {} +}; + +class SingleRegex : public boost::noncopyable { +friend class Regex; +public: + SingleRegex(const std::string &pattern, bool &error, const std::string ®exName, bool bNoRegex=false, + const std::string ®exMatchName="", const std::string ®exMatchValue=""); + ~SingleRegex(); + bool hasMatch(const std::string &s) const; + size_t findAllMatches(const std::string &s, std::vector &matches) const; + size_t findMatchRanges(const std::string &s, std::vector &matchRanges) const; + const std::string &getName() const; +private: + pcre2_code *m_re; + pcre2_match_data *m_matchData; + uint32_t m_captureGroupsCount; + std::vector m_captureNames; // capture index => name translation (unnamed items are empty strings) + std::string m_regexName; + bool m_noRegex; + std::string m_regexMatchName; + std::string m_regexMatchValue; +}; + +class Regex : public boost::noncopyable { +public: + Regex(const std::string &pattern, bool &error, const std::string ®exName); + Regex(const std::vector &patterns, bool &error, const std::string ®exName, + std::shared_ptr regexPreconditions); + ~Regex(); + bool hasMatch(const std::string &s) const; + size_t findAllMatches(const std::string &v, std::vector &maches, + const Waap::RegexPreconditions::PmWordSet *pmWordSet=nullptr) const; + std::string sub(const std::string &s, const std::string &repl="") const; + // Run regex search, and for each found match - run callback. + // The callback can cancel replacement of the match (leave source match "as-is"), provide a replacement string, + // or delete the match (replace with empty string). + // The "decodedCount" counts match replacement events and the "deletedCount" counts match deletion events. + void sub( + const std::string &s, + Waap::Util::RegexSubCallback_f cb, + int &decodedCount, + int &deletedCount, + std::string &outStr) const; + const std::string &getName() const; +private: + std::vector m_sre; + std::string m_regexName; + std::shared_ptr m_regexPreconditions; + std::unordered_map> m_wordToRegexIndices; +}; + +#endif // __WAF2_REGEX_H__c31bc34a diff --git a/components/security_apps/waap/waap_clib/Waf2Util.cc b/components/security_apps/waap/waap_clib/Waf2Util.cc new file mode 100755 index 0000000..79cba75 --- /dev/null +++ b/components/security_apps/waap/waap_clib/Waf2Util.cc @@ -0,0 +1,1921 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "Waf2Util.h" + +#include "debug.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "CidrMatch.h" +#include "debug.h" +#include "config.h" +#include "generic_rulebase/rulebase_config.h" +#include "user_identifiers_config.h" +#include "Waf2Regex.h" + +using boost::algorithm::to_lower_copy; +using namespace std; + +USE_DEBUG_FLAG(D_WAAP); +USE_DEBUG_FLAG(D_WAAP_EVASIONS); + +#define MIN_HEX_LENGTH 6 +#define charToDigit(c) (c - '0') + +// See https://dev.w3.org/html5/html-author/charref +const struct HtmlEntity g_htmlEntities[] = +{ + {"Tab;", 0x0009}, + {"NewLine;", 0x000A}, + {"nbsp;", 0x00A0}, + {"NonBreakingSpace;", 0x00A0}, + {"excl;", 0x0021}, + {"num;", 0x0023}, + {"dollar;", 0x0024}, + {"percnt;", 0x0025}, + {"lpar;", 0x0028}, + {"rpar;", 0x0029}, + {"ast;", 0x002A}, + {"midast;", 0x002A}, + {"plus;", 0x002B}, + {"comma;", 0x002C}, + {"period;", 0x002E}, + {"sol;", 0x002F}, + {"colon;", 0x003A}, + {"semi;", 0x003B}, + {"iexcl;", 0x00A1}, + {"cent;", 0x00A2}, + {"pound;", 0x00A3}, + {"curren;", 0x00A4}, + {"yen;", 0x00A5}, + {"brvbar;", 0x00A6}, + {"sect;", 0x00A7}, + {"uml;", 0x00A8}, + {"copy;", 0x00A9}, + {"ordf;", 0x00AA}, + {"laquo;", 0x00AB}, + {"not;", 0x00AC}, + {"shy;", 0x00AD}, + {"reg;", 0x00AE}, + {"macr;", 0x00AF}, + {"deg;", 0x00B0}, + {"plusmn;", 0x00B1}, + {"sup2;", 0x00B2}, + {"sup3;", 0x00B3}, + {"acute;", 0x00B4}, + {"micro;", 0x00B5}, + {"para;", 0x00B6}, + {"middot;", 0x00B7}, + {"cedil;", 0x00B8}, + {"sup1;", 0x00B9}, + {"ordm;", 0x00BA}, + {"raquo;", 0x00BB}, + {"frac14;", 0x00BC}, + {"frac12;", 0x00BD}, + {"frac34;", 0x00BE}, + {"iquest;", 0x00BF}, + {"Agrave;", 0x00C0}, + {"Aacute;", 0x00C1}, + {"Acirc;", 0x00C2}, + {"Atilde;", 0x00C3}, + {"Auml;", 0x00C4}, + {"Aring;", 0x00C5}, + {"AElig;", 0x00C6}, + {"Ccedil;", 0x00C7}, + {"Egrave;", 0x00C8}, + {"Eacute;", 0x00C9}, + {"Ecirc;", 0x00CA}, + {"Euml;", 0x00CB}, + {"Igrave;", 0x00CC}, + {"Iacute;", 0x00CD}, + {"Icirc;", 0x00CE}, + {"Iuml;", 0x00CF}, + {"ETH;", 0x00D0}, + {"Ntilde;", 0x00D1}, + {"Ograve;", 0x00D2}, + {"Oacute;", 0x00D3}, + {"Ocirc;", 0x00D4}, + {"Otilde;", 0x00D5}, + {"Ouml;", 0x00D6}, + {"times;", 0x00D7}, + {"Oslash;", 0x00D8}, + {"Ugrave;", 0x00D9}, + {"Uacute;", 0x00DA}, + {"Ucirc;", 0x00DB}, + {"Uuml;", 0x00DC}, + {"Yacute;", 0x00DD}, + {"THORN;", 0x00DE}, + {"szlig;", 0x00DF}, + {"agrave;", 0x00E0}, + {"aacute;", 0x00E1}, + {"acirc;", 0x00E2}, + {"atilde;", 0x00E3}, + {"auml;", 0x00E4}, + {"aring;", 0x00E5}, + {"aelig;", 0x00E6}, + {"ccedil;", 0x00E7}, + {"egrave;", 0x00E8}, + {"eacute;", 0x00E9}, + {"ecirc;", 0x00EA}, + {"euml;", 0x00EB}, + {"igrave;", 0x00EC}, + {"iacute;", 0x00ED}, + {"icirc;", 0x00EE}, + {"iuml;", 0x00EF}, + {"eth;", 0x00F0}, + {"ntilde;", 0x00F1}, + {"ograve;", 0x00F2}, + {"oacute;", 0x00F3}, + {"ocirc;", 0x00F4}, + {"otilde;", 0x00F5}, + {"ouml;", 0x00F6}, + {"divide;", 0x00F7}, + {"oslash;", 0x00F8}, + {"ugrave;", 0x00F9}, + {"uacute;", 0x00FA}, + {"ucirc;", 0x00FB}, + {"uuml;", 0x00FC}, + {"yacute;", 0x00FD}, + {"thorn;", 0x00FE}, + {"yuml;", 0x00FF}, + {"quot;", 0x0022}, + {"amp;", 0x0026}, + {"lt;", 0x003C}, + {"LT;", 0x003C}, + {"equals;", 0x003D}, + {"gt;", 0x003E}, + {"GT;", 0x003E}, + {"quest;", 0x003F}, + {"commat;", 0x0040}, + {"lsqb;", 0x005B}, + {"lback;", 0x005B}, + {"bsol;", 0x005C}, + {"rsqb;", 0x005D}, + {"rbrack;", 0x005D}, + {"Hat;", 0x005E}, + {"lowbar;", 0x005F}, + {"grave;", 0x0060}, + {"DiacriticalGrave;", 0x0060}, + {"lcub;", 0x007B}, + {"lbrace;", 0x007B}, + {"verbar;", 0x007C}, + {"vert;", 0x007C}, + {"VerticalLine;", 0x007C}, + {"rcub;", 0x007D}, + {"rbrace;", 0x007D}, + {"apos;", 0x0027}, + {"OElig;", 0x0152}, + {"oelig;", 0x0153}, + {"Scaron;", 0x0160}, + {"scaron;", 0x0161}, + {"Yuml;", 0x0178}, + {"circ;", 0x02C6}, + {"tilde;", 0x02DC}, + {"ensp;", 0x2002}, + {"emsp;", 0x2003}, + {"emsp13;", 0x2004}, + {"emsp14;", 0x2005}, + {"numsp;", 0x2007}, + {"puncsp;", 0x2008}, + {"thinsp;", 0x2009}, + {"ThinSpace;", 0x2009}, + {"hairsp;", 0x200A}, + {"VeryThinSpace;", 0x200A}, + {"ZeroWidthSpace;", 0x200B}, + {"NegativeVeryThinSpace;", 0x200B}, + {"NegativeThinSpace;", 0x200B}, + {"NegativeMediumSpace;", 0x200B}, + {"NegativeThickSpace;", 0x200B}, + {"zwnj;", 0x200C}, + {"zwj;", 0x200D}, + {"lrm;", 0x200E}, + {"rlm;", 0x200F}, + {"hyphen;", 0x2010}, + {"dash;", 0x2010}, + {"ndash;", 0x2013}, + {"mdash;", 0x2014}, + {"horbar;", 0x2015}, + {"Verbar;", 0x2016}, + {"Vert;", 0x2016}, + {"lsquo;", 0x2018}, + {"OpenCurlyQuote;", 0x2018}, + {"rsquo;", 0x2019}, + {"rsquor;", 0x2019}, + {"CloseCurlyQuote;", 0x2019}, + {"lsquor;", 0x201A}, + {"sbquo;", 0x201A}, + {"ldquo;", 0x201C}, + {"OpenCurlyDoubleQuote;", 0x201C}, + {"rdquo;", 0x201D}, + {"rdquor;", 0x201D}, + {"CloseCurlyDoubleQuote;", 0x201D}, + {"ldquor;", 0x201E}, + {"bdquo;", 0x201E}, + {"dagger;", 0x2020}, + {"Dagger;", 0x2021}, + {"permil;", 0x2030}, + {"lsaquo;", 0x2039}, + {"rsaquo;", 0x203A}, + {"euro;", 0x20AC}, + {"fnof;", 0x0192}, + {"Alpha;", 0x0391}, + {"Beta;", 0x0392}, + {"Gamma;", 0x0393}, + {"Delta;", 0x0394}, + {"Epsilon;", 0x0395}, + {"Zeta;", 0x0396}, + {"Eta;", 0x0397}, + {"Theta;", 0x0398}, + {"Iota;", 0x0399}, + {"Kappa;", 0x039A}, + {"Lambda;", 0x039B}, + {"Mu;", 0x039C}, + {"Nu;", 0x039D}, + {"Xi;", 0x039E}, + {"Omicron;", 0x039F}, + {"Pi;", 0x03A0}, + {"Rho;", 0x03A1}, + {"Sigma;", 0x03A3}, + {"Tau;", 0x03A4}, + {"Upsilon;", 0x03A5}, + {"Phi;", 0x03A6}, + {"Chi;", 0x03A7}, + {"Psi;", 0x03A8}, + {"Omega;", 0x03A9}, + {"alpha;", 0x03B1}, + {"beta;", 0x03B2}, + {"gamma;", 0x03B3}, + {"delta;", 0x03B4}, + {"epsilon;", 0x03B5}, + {"zeta;", 0x03B6}, + {"eta;", 0x03B7}, + {"theta;", 0x03B8}, + {"iota;", 0x03B9}, + {"kappa;", 0x03BA}, + {"lambda;", 0x03BB}, + {"mu;", 0x03BC}, + {"nu;", 0x03BD}, + {"xi;", 0x03BE}, + {"omicron;", 0x03BF}, + {"pi;", 0x03C0}, + {"rho;", 0x03C1}, + {"sigmaf;", 0x03C2}, + {"sigma;", 0x03C3}, + {"tau;", 0x03C4}, + {"upsilon;", 0x03C5}, + {"phi;", 0x03C6}, + {"chi;", 0x03C7}, + {"psi;", 0x03C8}, + {"omega;", 0x03C9}, + {"thetasym;", 0x03D1}, + {"upsih;", 0x03D2}, + {"piv;", 0x03D6}, + {"bull;", 0x2022}, + {"hellip;", 0x2026}, + {"prime;", 0x2032}, + {"Prime;", 0x2033}, + {"oline;", 0x203E}, + {"frasl;", 0x2044}, + {"MediumSpace;", 0x205F}, + {"NoBreak;", 0x2060}, + {"ApplyFunction;", 2061}, + {"af;", 2061}, + {"it;", 0x2062}, + {"InvisibleTimes;", 0x2062}, + {"ic;", 0x2063}, + {"InvisibleComma;", 0x2063}, + {"weierp;", 0x2118}, + {"image;", 0x2111}, + {"real;", 0x211C}, + {"trade;", 0x2122}, + {"alefsym;", 0x2135}, + {"larr;", 0x2190}, + {"uarr;", 0x2191}, + {"rarr;", 0x2192}, + {"darr;", 0x2193}, + {"harr;", 0x2194}, + {"crarr;", 0x21B5}, + {"lArr;", 0x21D0}, + {"uArr;", 0x21D1}, + {"rArr;", 0x21D2}, + {"dArr;", 0x21D3}, + {"hArr;", 0x21D4}, + {"forall;", 0x2200}, + {"part;", 0x2202}, + {"exist;", 0x2203}, + {"empty;", 0x2205}, + {"nabla;", 0x2207}, + {"isin;", 0x2208}, + {"notin;", 0x2209}, + {"ni;", 0x220B}, + {"prod;", 0x220F}, + {"sum;", 0x2211}, + {"minus;", 0x2212}, + {"lowast;", 0x2217}, + {"radic;", 0x221A}, + {"prop;", 0x221D}, + {"infin;", 0x221E}, + {"ang;", 0x2220}, + {"and;", 0x2227}, + {"or;", 0x2228}, + {"cap;", 0x2229}, + {"cup;", 0x222A}, + {"int;", 0x222B}, + {"there4;", 0x2234}, + {"sim;", 0x223C}, + {"cong;", 0x2245}, + {"asymp;", 0x2248}, + {"ne;", 0x2260}, + {"equiv;", 0x2261}, + {"le;", 0x2264}, + {"ge;", 0x2265}, + {"sub;", 0x2282}, + {"sup;", 0x2283}, + {"nsub;", 0x2284}, + {"sube;", 0x2286}, + {"supe;", 0x2287}, + {"oplus;", 0x2295}, + {"otimes;", 0x2297}, + {"perp;", 0x22A5}, + {"sdot;", 0x22C5}, + {"lceil;", 0x2308}, + {"rceil;", 0x2309}, + {"lfloor;", 0x230A}, + {"rfloor;", 0x230B}, + {"lang;", 0x2329}, + {"rang;", 0x232A}, + {"loz;", 0x25CA}, + {"spades;", 0x2660}, + {"clubs;", 0x2663}, + {"hearts;", 0x2665}, + {"diams;", 0x2666} +}; + +const size_t g_htmlEntitiesCount = sizeof(g_htmlEntities) / sizeof(g_htmlEntities[0]); + +const char* g_htmlTags[] = { + "a", + "abbr", + "acronym", + "address", + "applet", + "embed", + "object", + "area", + "article", + "aside", + "audio", + "b", + "base", + "basefont", + "bdi", + "bdo", + "big", + "blockquote", + "body", + "br", + "button", + "canvas", + "caption", + "center", + "cite", + "code", + "col", + "colgroup", + "datalist", + "dd", + "del", + "details", + "dfn", + "dialog", + "dir", + "ul", + "div", + "dl", + "dt", + "em", + "fieldset", + "figcaption", + "figure", + "font", + "footer", + "form", + "frame", + "frameset", + "h1", + "h6", + "head", + "header", + "hr", + "html", + "i", + "iframe", + "img", + "input", + "ins", + "kbd", + "keygen", + "label", + "legend", + "li", + "link", + "main", + "map", + "mark", + "menu", + "menuitem", + "meta", + "meter", + "nav", + "noframes", + "noscript", + "ol", + "optgroup", + "option", + "output", + "p", + "param", + "pre", + "progress", + "q", + "rp", + "rt", + "ruby", + "s", + "samp", + "script", + "section", + "select", + "small", + "source", + "video", + "span", + "strike", + "strong", + "style", + "sub", + "summary", + "sup", + "table", + "tbody", + "td", + "textarea", + "tfoot", + "th", + "thead", + "time", + "title", + "tr", + "track", + "tt", + "u", + "var", + "wbr", + "event-source", + "math", + "svg", + "h1", + "h2", + "h3", + "h4", + "h5", + "h6" +}; + +const size_t g_htmlTagsCount = sizeof(g_htmlTags) / sizeof(g_htmlTags[0]); + +bool startsWithHtmlTagName(const char* text) { + for (size_t index = 0; index < g_htmlTagsCount; ++index) { + // Return true if text starts with one of html tags + if (my_stristarts_with(text, g_htmlTags[index])) { + // starts with html tag, followed by space/tab/crlf character (see man isspace(), + // or ends with '>' character. + char termChar = text[strlen(g_htmlTags[index])]; + if (isspace(termChar) || termChar == '>' || termChar == '/') { + return true; + } + } + } + + return false; +} + +string normalize_uri(const string& uri) { + string result; + string::const_iterator mark = uri.begin(); + bool isNumeric = false; + + string::const_iterator it = uri.begin(); + for (; it != uri.end() && *it != '?'; ++it) { + if (*it == '/') { + if (mark != it) { + if (isNumeric) { + result += "_num"; + } + else { + result.append(mark, it); + } + } + + result += "/"; + mark = it + 1; + isNumeric = true; + continue; + } + + // reset isNumeric flag on first non-digit character in the path element string + if (!isdigit(*it)) { + isNumeric = false; + } + } + + // At this point, "it" points to where scanning stopped (can be end of uri string or the '?' character) + // Append the rest of the string (or "_num" if last uri part was all numeric) - to the output. + if (mark != it) { + if (isNumeric) { + result += "_num"; + } + else { + result.append(mark, it); + } + } + + return result; +} + +string +normalize_param(const string& param) +{ + string result; + string::const_iterator mark = param.begin(); + bool isNumeric = true; + bool isHex = true; + + string::const_iterator it = param.begin(); + for (; it != param.end(); ++it) { + if (!isalnum(*it)) { + if (mark != it) { + if (isNumeric || (isHex && it - mark >= MIN_HEX_LENGTH)) { + result += "_num"; + } + else { + result.append(mark, it); + } + } + + result += *it; + mark = it + 1; + isNumeric = true; + isHex = true; + continue; + } + + // reset isNumeric flag on first non-digit character in the path element string + if (isHex && !isdigit(*it)) { + if (!isHexDigit(*it)) { + isHex = false; + } + isNumeric = false; + } + } + + // At this point, "it" points to where scanning stopped (can be end of uri string or the '?' character) + // Append the rest of the string (or "_num" if last uri part was all numeric) - to the output. + if (mark != it) { + if (isNumeric || (isHex && it - mark >= MIN_HEX_LENGTH)) { + result += "_num"; + } + else { + result.append(mark, it); + } + } + + return result; +} + +void unescapeUnicode(string& text) { + string::iterator it = text.begin(); + string::iterator result = it; + char acc[16]; // accumulates characters we are parsing and do not want to copy directly. + // max len really possible is "\u00000000" + 1 char = 11 chars + char* pAcc = NULL; // when non-NULL, points where to put next character inside acc buffer + int digitsAnticipated = 0; // in state STATE_ESCAPE, how many hex digits we anticipate to be parsed + uint32_t code = 0; // The Unicode codepoint value can't be larger than 32 bits + char* p; + // in state STATE_ESCAPE_X, how many non-zerohex digits discovered - to eliminate leading zeroes like \x000012 + int nonZeroHexCounter = 0; + enum { + STATE_COPY, + STATE_FLUSH, + STATE_ESCAPE, + STATE_ESCAPE_U, + STATE_ESCAPE_X + } state = STATE_COPY; + + for (; it != text.end(); ++it) { + const char ch = *it; + + switch (state) { + case STATE_FLUSH: { + // flush any accumulated left-overs into output buffer + if (pAcc) { + for (p = acc; p < pAcc; p++) { + *result++ = *p; + } + pAcc = NULL; // clear the acc buffer after we flushed it + } + state = STATE_COPY; + // fall-through + //RB: why no break? + } + // fallthrough + case STATE_COPY: { + + if (ch == '\\') { + // start accumulating characters instead of copying them + pAcc = acc; + state = STATE_ESCAPE; + break; + } + break; + } + case STATE_ESCAPE: { + // decide which kind of escape + if (ch == 'u') { + digitsAnticipated = 4; // parse/skip 4 hex digits + code = 0; + state = STATE_ESCAPE_U; + } + else if (ch == 'U') { + digitsAnticipated = 8; // parse/skip 8 hex digits + code = 0; + state = STATE_ESCAPE_U; + } + else if (ch == 'x') { +#if 1 + digitsAnticipated = 1; // anticipate at least one HEX digit after \x + code = 0; + nonZeroHexCounter = 0; + state = STATE_ESCAPE_X; +#else + digitsAnticipated = 2; // parse/skip 2 hex digits + code = 0; + state = STATE_ESCAPE_U; +#endif + } + else { + // this is invalid escape sequence: rollback and copy this character too + state = STATE_FLUSH; + } + break; + } + case STATE_ESCAPE_U: { + if (isHexDigit(ch)) { + // accumulate code value + code = (code << 4) + (isdigit(ch) ? ch - '0' : tolower(ch) - 'a' + 10); + digitsAnticipated--; + + if (digitsAnticipated == 0) { + // only output ASCII codes <= 127. "swallow" all unicode. + if (code <= 127) { + *result++ = (char)code; + } + else if (isSpecialUnicode(code)) { + *result++ = convertSpecialUnicode(code); + } + + if (pAcc) { + pAcc = NULL; // throw away the accumulated source (escaped) sequencec. + } + + // not STATE_COPY to avoid outputting current ch verbatim. + // FLUSH will output nothing because there's no ACC + state = STATE_FLUSH; + break; + } + + } + else { + // invalid (non-hex) digit enountered + state = STATE_FLUSH; + } + + break; + } + case STATE_ESCAPE_X: { + if (isHexDigit(ch)) { + if ((nonZeroHexCounter) > 1) { + *result++ = (char)code; + if (pAcc) { + pAcc = NULL; // throw away the accumulated source (escaped) sequence. + } + state = STATE_COPY; + } else { + code = (code << 4) + (isdigit(ch) ? ch - '0' : tolower(ch) - 'a' + 10); + code &= 0xFF; // clamp the code value to two last digits + // Once at least one valid hex digit is here the sequence is considered + // valid and there's no need to accumulate it anymore. + if (pAcc) { + pAcc = NULL; + } + + if (digitsAnticipated > 0) { + digitsAnticipated--; + } + if (code) { + nonZeroHexCounter++; + } + } + } else { + // According to C standard, '\x' sequence must be followed by at least 1 valid hex digit + if (digitsAnticipated > 0) { + // This is first character right after the '\x' sequence, + // and it is not a valid hex. This is bad sequence. + state = STATE_FLUSH; + } else { + // We found non-hex character that terminates our \xhhhhh... sequence + *result++ = (char)code; + if (pAcc) { + pAcc = NULL; // throw away the accumulated source (escaped) sequence. + } + + if (ch == '\\') { + // start accumulating characters instead of copying them + pAcc = acc; + state = STATE_ESCAPE; + break; + } + + // STATE_COPY will cause current character (sequence terminator) + // to be output verbatim. + state = STATE_COPY; + } + } + + if (digitsAnticipated > 0) { + digitsAnticipated--; + } + break; + } + } + + // Copy to output + if (state == STATE_COPY) { + *result++ = ch; + } + + // Accumulate + if (pAcc) { + // Ensure we don't have buffer overflow + assert(size_t(pAcc - acc) < sizeof(acc)); + *pAcc++ = ch; + } + } + + dbgTrace(D_WAAP) << " - LOOP FINISHED with state=" << state << "; digitsAnticipated=" << + digitsAnticipated << ", acc='" << string(acc, pAcc ? (int)(pAcc - acc) : 0) << "'"; + + // Output code if we just finished decoding an escape sequence succesully and reached end of string + if (state == STATE_ESCAPE_U && digitsAnticipated == 0) { + // only output ASCII codes <= 127. "swallow" all unicode. + if (code <= 127) { + *result++ = (char)code; + } + else if (isSpecialUnicode(code)) { + *result++ = convertSpecialUnicode(code); + } + + if (pAcc) { + pAcc = NULL; // throw away the accumulated source (escaped) sequencec. + } + } + else if (state == STATE_ESCAPE_X) { + if (isSpecialUnicode(code)) { + *result++ = convertSpecialUnicode(code); + } + else + { + *result++ = (char)code; + } + } + + // flush any accumulated left-overs into output buffer + if (pAcc) { + for (p = acc; p < pAcc; p++) { + *result++ = *p; + } + } + + text.erase(result, text.end()); +} + +// Attempts to validate and decode utf7-encoded chunk. +// Returns "next" iterator to position where to continue parsing for next chunks, and +// fills the "decoded" string with decoded data. +// If failed, the "next" will be equal to passed "it", and empty string put in "decoded". +inline const string::const_iterator +decodeUTF7Chunk(string::const_iterator it, string::const_iterator end, string& decoded) { + decoded.clear(); + unsigned char val = 0; + uint32_t acc = 0; + int acc_bits = 0; // how many bits are filled in acc + string::const_iterator next = it; + + while (it != end) { + unsigned char c = *it; + + if (c >= 'A' && c <= 'Z') { + val = c - 'A'; + } + else if (c >= 'a' && c <= 'z') { + val = c - 'a' + 26; + } + else if (c >= '0' && c <= '9') { + val = c - '0' + 52; + } + else if (c == '+') { + val = 62; + } + else if (c == '/') { + val = 63; + } + else if (c == '-') { + // end of encoded sequence (the '-' itself must not be output) + if (!decoded.empty()) { + next = it; + return next; // successfully decoded. Returns decoded data in "decoded" parameter + } + + decoded.clear(); // discard partial data + return next; + } + else { + decoded.clear(); // discard partial data + return next; + } + + acc = acc << 6 | val; + acc_bits += 6; + + if (acc_bits >= 16) { // we got 16 bits or more in the accumulator + int code = (acc >> (acc_bits - 16)) & 0xFFFF; + + // Take into account still-printable Unicode characters, convert them back to ASCII + if (isSpecialUnicode(code)) { + code = convertSpecialUnicode(code); + } + + // Stop and return empty if we hit at least one non-printable character + if (!isprint(code) && code != 0) { + decoded.clear(); // discard partial data + return next; + } + + decoded += (char)code; + acc_bits -= 16; + acc &= (1 - (1 << acc_bits)); // leave only acc_bits low bits in the acc, clear the rest. + } + + it++; + } + + decoded.clear(); // discard partial data + return next; +} + +string filterUTF7(const string& text) { + string result; + string decoded; + decoded.reserve(8); + result.reserve(text.length()); + + for (string::const_iterator it = text.begin(); it != text.end(); ++it) { + if (*it == '+') { + if (it + 1 == text.end()) { // "+" at end of string + result += *it; + } + else if (*(it + 1) == '-') { + // '+-' combination is converted to single '+' + result += '+'; + it++; // this skips the "-" + if (it == text.end()) { + break; + } + } + else { + // attempt to decode chunk + it = decodeUTF7Chunk(it + 1, text.end(), decoded); + if (decoded.empty()) { // decoding failed + result += '+'; + result += *it; + } + else { // decoding succeeded + result += decoded; + } + } + } + else { + result += *it; + } + } + + return result; +} + +// Attempts to validate and decode base64-encoded chunk. +// Value is the full value inside which potential base64-encoded chunk was found, +// it and end point to start and end of that chunk. +// Returns true if succesful (and fills the "decoded" string with decoded data). +// Success criterias: +// 0. encoded sequence covers the whole value (doesn't have any characters around it) +// 1. encoded sequence consist of base64 alphabet (may end with zero, one or two '=' characters') +// 2. length of encoded sequence is exactly divisible by 4. +// 3. length of decoded is minimum 5 characters. +// 4. percent of non-printable characters (!isprint()) +// in decoded data is less than 10% (statistical garbage detection). +// Returns false above checks fail. +bool +b64DecodeChunk( + const string& value, + string::const_iterator it, + string::const_iterator end, + string& decoded) +{ + decoded.clear(); + uint32_t acc = 0; + int acc_bits = 0; // how many bits are filled in acc + int terminatorCharsSeen = 0; // whether '=' character was seen, and how many of them. + uint32_t nonPrintableCharsCount = 0; + + dbgTrace(D_WAAP) << "b64DecodeChunk: value='" << value << "' match='" << string(it, end) << "'"; + + // skip "base64," prefix if the line is starting with it. + if (end - it >= 7 && + *it == 'b' && + *(it + 1) == 'a' && + *(it + 2) == 's' && + string(it, it + 7) == "base64,") { + it += 7; // skip the prefix + } + else { + // If the base64 candidate match within value is surrounded by other dat + // (doesn't cover the value fully) - ignore the match. + // This will result in the match being scanned raw. + // Note that this purposedly doesn't include matches starting with "base64," + // prefix: we do want those prefixed matches to be decoded! + if (it != value.begin() || end != value.end()) { + dbgTrace(D_WAAP) << "b64DecodeChunk: (leave as-is) because match is surrounded by other data."; + return false; + } + } + + // The encoded data length (without the "base64," prefix) should be exactly divisible by 4 + // to be considered valid base64. + if ((end - it) % 4 != 0) { + dbgTrace(D_WAAP) << + "b64DecodeChunk: (leave as-is) because encoded data length should be exactly divisible by 4."; + return false; + } + + while (it != end) { + unsigned char c = *it; + + if (terminatorCharsSeen) { + // terminator characters must all be '=', until end of match. + if (c != '=') { + dbgTrace(D_WAAP) << + "b64DecodeChunk: (leave as-is) because terminator characters must all be '=', until end of match."; + return false; + } + + // We should see 0, 1 or 2 (no more) terminator characters + terminatorCharsSeen++; + + if (terminatorCharsSeen > 2) { + dbgTrace(D_WAAP) << "b64DecodeChunk: (leave as-is) because terminatorCharsSeen > 2"; + return false; + } + + // allow for more terminator characters + it++; + continue; + } + + unsigned char val = 0; + + if (c >= 'A' && c <= 'Z') { + val = c - 'A'; + } + else if (c >= 'a' && c <= 'z') { + val = c - 'a' + 26; + } + else if (isdigit(c)) { + val = c - '0' + 52; + } + else if (c == '+') { + val = 62; + } + else if (c == '/') { + val = 63; + } + else if (c == '=') { + // Start tracking terminator characters + terminatorCharsSeen++; + it++; + continue; + } + else { + dbgTrace(D_WAAP) << "b64DecodeChunk: (leave as-is) because of non-base64 character ('" << c << + "', ASCII " << (unsigned int)c << ")"; + return false; // non-base64 character + } + + acc = (acc << 6) | val; + acc_bits += 6; + + if (acc_bits >= 8) { + int code = (acc >> (acc_bits - 8)) & 0xFF; + // only leave low "acc_bits-8" bits, clear all higher bits + uint32_t mask = ~(1 << (acc_bits - 8)); + acc &= mask; + acc_bits -= 8; + + // Count non-printable characters seen + if (!isprint(code)) { + nonPrintableCharsCount++; + } + + decoded += (char)code; + } + + it++; + } + + // end of encoded sequence decoded. + + dbgTrace(D_WAAP) << "b64DecodeChunk: decoded.size=" << decoded.size() << ", nonPrintableCharsCount=" << + nonPrintableCharsCount << "; decoded='" << decoded << "'"; + + // Return success only if decoded.size>=5 and there are less than 10% of non-printable + // characters in output. + if (decoded.size() >= 5) { + if (nonPrintableCharsCount * 10 < decoded.size()) { + dbgTrace(D_WAAP) << "b64DecodeChunk: (decode/replace) decoded.size=" << decoded.size() << + ", nonPrintableCharsCount=" << nonPrintableCharsCount << ": replacing with decoded data"; + } + else { + dbgTrace(D_WAAP) << "b64DecodeChunk: (delete) because decoded.size=" << decoded.size() << + ", nonPrintableCharsCount=" << nonPrintableCharsCount; + // If percentage of non-printable characters in decoded is high, filter them out to prevent false alarms. + decoded.clear(); + } + return true; // successfully decoded. Returns decoded data in "decoded" parameter + } + + // If decoded size is too small - leave the encoded value (return false) + decoded.clear(); // discard partial data + dbgTrace(D_WAAP) << "b64DecodeChunk: (leave as-is) because decoded too small. decoded.size=" << decoded.size() << + ", nonPrintableCharsCount=" << nonPrintableCharsCount; + return false; +} + +vector split(const string& s, char delim) { + vector elems; + stringstream ss(s); + string value; + while (getline(ss, value, delim)) { + elems.push_back(Waap::Util::trim(value)); + } + return elems; +} + +namespace Waap { +namespace Util { + +#define B64_TRAILERCHAR '=' +static const string b64_prefix("base64,"); +static bool err = false; + +static const SingleRegex invalid_hex_evasion_re( + "%([g-zG-Z][0-9a-zA-Z]|[0-9a-zA-Z][g-zG-Z])", + err, + "invalid_hex_evasion" +); +static const SingleRegex broken_utf_evasion_re( + "(?:^|[^%])(%[0-9a-f]%[0-9a-f])", + err, + "broken_utf_evasion" +); + +static void b64TestChunk(const string &s, + string::const_iterator chunkStart, + string::const_iterator chunkEnd, + RegexSubCallback_f cb, + int &decodedCount, + int &deletedCount, + string &outStr) +{ + size_t chunkLen = (chunkEnd - chunkStart); + + if ((chunkEnd - chunkStart) > static_cast(b64_prefix.size()) && + chunkStart[0] == 'b' && chunkStart[1] == 'a' && chunkStart[2] == 's' && chunkStart[3] == 'e' && + chunkStart[4] == '6' && chunkStart[5] == '4' && chunkStart[6] == ',') { + chunkLen -= b64_prefix.size(); + } + + size_t chunkRem = chunkLen % 4; + + // Only match chunk whose length is divisible by 4 + string repl; + if (chunkRem == 0 && cb(s, chunkStart, chunkEnd, repl)) { + // Succesfully matched b64 chunk + if (!repl.empty()) { + outStr += repl; + decodedCount++; + } + else { + deletedCount++; + } + } + else { + // Chunk was not processed - put original text + size_t from = chunkStart - s.begin(); + size_t len = chunkEnd - chunkStart; + outStr += s.substr(from, len); + } +} + +void b64Decode( + const string &s, + RegexSubCallback_f cb, + int &decodedCount, + int &deletedCount, + string &outStr) +{ + decodedCount = 0; + deletedCount = 0; + outStr = ""; + int offsetFix = 0; + + string::const_iterator it = s.begin(); + + // Minimal length + if (s.end() - it < 8) { + return; + } + + // Search for substrings that match these criterias: + // 1. substring length is divisible by 4 + // 2. substring contains only letters a-z, 0-9, '/' or '+' except last 1 or two characters that can be '=' + + string::const_iterator chunkStart = s.end(); + for (; it != s.end(); ++it) { + bool isB64AlphaChar = Waap::Util::isAlphaAsciiFast(*it) || isdigit(*it) || *it=='/' || *it=='+'; + if (chunkStart == s.end()) { + if (isB64AlphaChar) { + // start tracking potential b64 chunk + chunkStart = it; + } + else { + // Add anything before the potential match + outStr += string(1, *it); + } + } + else { + // tracking b64 chunk + if (!isB64AlphaChar) { + if (*it == ',') { + // Check back and skip the "base64," prefix + if (chunkStart + b64_prefix.size() - 1 == it) { + string cand(chunkStart, it + 1); + if (cand == b64_prefix) { + offsetFix = b64_prefix.size(); + continue; + } + } + } + + size_t chunkLen = (it - chunkStart) - offsetFix; + size_t chunkRem = chunkLen % 4; + + // Allow only one or two '=' characters at the end of the match + if ((*it == B64_TRAILERCHAR) && (chunkRem == 2 || chunkRem == 3)) { + continue; + } + + // Decode and add chunk + b64TestChunk(s, chunkStart, it, cb, decodedCount, deletedCount, outStr); + + // stop tracking b64 chunk + outStr += string(1, *it); // put the character that terminated the chunk + chunkStart = s.end(); + offsetFix = 0; + } + } + } + + if (chunkStart != s.end()) { + b64TestChunk(s, chunkStart, it, cb, decodedCount, deletedCount, outStr); + } +} + +// Base64 functions stolen from orchestration_tools.cc +static const string base64_base_str = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +string +base64Encode(const string &input) +{ + string out; + int val = 0, val_base = -6; + for (unsigned char c : input) { + val = (val << 8) + c; + val_base += 8; + while (val_base >= 0) { + out.push_back(base64_base_str[(val >> val_base) & 0x3F]); + val_base -= 6; + } + } + // -6 indicates the number of bits to take from each character + // (6 bits is enough to present a range of 0 to 63) + if (val_base > -6) out.push_back(base64_base_str[((val << 8) >> (val_base + 8)) & 0x3F]); + while (out.size() % 4) out.push_back('='); + return out; +} + +bool find_in_map_of_stringlists_keys(const string &what, const map_of_stringlists_t &where) +{ + for (map_of_stringlists_t::const_iterator it = where.begin(); it != where.end(); ++it) { + if (it->first.find(what) != string::npos) { + return true; + } + } + + return false; +} + +void remove_in_map_of_stringlists_keys(const string &what, map_of_stringlists_t &where) +{ + map_of_stringlists_t::iterator it = where.begin(); + + while (it != where.end()) { + if (it->first.find(what) != string::npos) { + it = where.erase(it); + } + else { + it++; + } + } +} + +void remove_startswith(vector &vec, const string &prefix) +{ + vec.erase( + remove_if(vec.begin(), vec.end(), + [&prefix](const string &kw) + { + return boost::starts_with(kw, prefix); + } + ), + vec.end() + ); +} + +string AES128Decrypt( + string& key, + string& iv, + string& message) +{ + unsigned char* outdata = new unsigned char[message.length()]; + + // data structure that contains the key itself + AES_KEY aes_key; + + // set the encryption key + AES_set_decrypt_key((const unsigned char*)key.c_str(), 128, &aes_key); + + AES_cbc_encrypt( + (unsigned char*)message.c_str(), + outdata, message.length(), + &aes_key, (unsigned char*)iv.c_str(), + AES_DECRYPT + ); + + // get value without padding + size_t padding = outdata[message.length() - 1]; // last byte contain padding size + string result = string((const char*)outdata, message.length() - padding); + + delete[] outdata; + return result; +} + +string +base64Decode(const string &input) +{ + string out; + vector T(256, -1); + for (int i = 0; i < 64; i++) { + T[base64_base_str[i]] = i; + } + int val = 0, valb = -8; + for (unsigned char c : input) { + if (T[c] == -1) break; + val = (val << 6) + T[c]; + valb += 6; + if (valb >= 0) { + out.push_back(char((val >> valb) & 0xFF)); + valb -= 8; + } + } + return out; +} + +bool +containsInvalidUtf8(const string &payload) +{ + return invalid_hex_evasion_re.hasMatch(payload); +} + +string +unescapeInvalidUtf8(const string &payload) +{ + dbgFlow(D_WAAP_EVASIONS); + vector regex_matches; + invalid_hex_evasion_re.findMatchRanges(payload, regex_matches); + + string unescaped_text = payload; + for (const auto &match : regex_matches) { + static const int evasion_pattern_length = 3; + + int num = 0; + size_t pos = match.start + 1; + for (; pos < match.end; pos++) { + const char &byte = unescaped_text[pos]; + if (isdigit(byte)) { + num = (num << 4) + charToDigit(byte); + } else { + num = (num << 4) + ((tolower(byte) - 'a') + 10); + } + } + + char buf[evasion_pattern_length]; + snprintf(buf, evasion_pattern_length, "%02x", (num & 0xff)); + unescaped_text.replace(match.start + 1, evasion_pattern_length - 1, buf); + + dbgTrace(D_WAAP_EVASIONS) << "Value after conversion: decimal = " << num << ", hex = " << buf; + } + + dbgTrace(D_WAAP_EVASIONS) << "unescaped_text: " << unescaped_text; + + return unescaped_text; +} + +bool +containsBrokenUtf8(const string &payload) +{ + return broken_utf_evasion_re.hasMatch(payload); +} + +string +unescapeBrokenUtf8(const string &payload) +{ + string unescaped_text; + unescaped_text.reserve(payload.length()); + + int prev_esc_pos = -1; + for (size_t pos = 0; pos < payload.length(); ++pos) { + char c = payload[pos]; + if (c == '%') { + // skip copying current '%' when encountered with the 2nd '%' + // that follows and followed by only one hex digit + if (prev_esc_pos >= 0 && pos-prev_esc_pos == 2 && isxdigit(payload[pos-1]) && + pos+1 < payload.length() && isxdigit(payload[pos+1]) ) { + prev_esc_pos = -1; + continue; + } + // mark current '%' only when not following another '%' + if (prev_esc_pos < 0 || pos-prev_esc_pos > 1) { + prev_esc_pos = pos; + } + } + unescaped_text += c; + } + + dbgTrace(D_WAAP_EVASIONS) << "unescaped_text: " << unescaped_text; + return unescaped_text; +} + +string +charToString(const char* s, int slen) +{ + if (!s || slen == 0) return ""; + + return string(s, slen); +} + +string +vecToString(const vector& vec, char delim) { + ostringstream vts; + + string delimStr; + delimStr.push_back(delim); + if (delim != '\n') + { + delimStr.push_back(' '); + } + + vts << "["; + if (!vec.empty()) + { + // Convert all but the last element to avoid a trailing "," + copy(vec.begin(), vec.end() - 1, + ostream_iterator(vts, delimStr.c_str())); + + // Now add the last element with no delimiter + vts << vec.back(); + } + else + { + return string(); + } + vts << "]"; + + + return vts.str(); +} + + +string +obfuscateXor(const string& toEncrypt) { + char key[] = "CHECKPOINT"; //Any chars will work + string output = toEncrypt; + + for (size_t i = 0; i < toEncrypt.size(); i++) { + output[i] = toEncrypt[i] ^ key[i % ((sizeof(key)-1) / sizeof(char))]; + } + + return output; +} + +string +obfuscateXorBase64(const string& toEncrypt) { + return base64Encode(obfuscateXor(toEncrypt)); +} + +string injectSpacesToString(const string& str) { + string retStr = ""; + if (str.length() == 0) + { + return retStr; + } + retStr.resize(str.length() * 2, ' '); + for (size_t i = 0; i < str.length(); i++) + { + retStr[i * 2] = str[i]; + } + retStr.pop_back(); + return retStr; +} + +ReportIS::Severity computeSeverityFromThreatLevel(ThreatLevel threatLevel) { + if (threatLevel == NO_THREAT) { + return ReportIS::Severity::INFO; + } + else if (threatLevel == THREAT_INFO) { + return ReportIS::Severity::LOW; + } + else if (threatLevel == LOW_THREAT) { + return ReportIS::Severity::MEDIUM; + } + else if (threatLevel == MEDIUM_THREAT) { + return ReportIS::Severity::HIGH; + } + + return ReportIS::Severity::CRITICAL; +} + +ReportIS::Priority computePriorityFromThreatLevel(ThreatLevel threatLevel) { + if (threatLevel == NO_THREAT) { + return ReportIS::Priority::LOW; + } + else if (threatLevel == THREAT_INFO) { + return ReportIS::Priority::MEDIUM; + } + else if (threatLevel == LOW_THREAT) { + return ReportIS::Priority::MEDIUM; + } + else if (threatLevel == MEDIUM_THREAT) { + return ReportIS::Priority::HIGH; + } + + return ReportIS::Priority::HIGH; +} + +string computeConfidenceFromThreatLevel(ThreatLevel threatLevel) +{ + switch(threatLevel) { + case NO_THREAT: return "Low"; + case THREAT_INFO: return "Low"; + case LOW_THREAT: return "Medium"; + case MEDIUM_THREAT: return "High"; + case HIGH_THREAT: return "Very High"; + } + dbgWarning(D_WAAP) << "Reached impossible threat level value of: " << static_cast(threatLevel); + return "Low"; +} + +void decodePercentEncoding(string &text, bool decodePlus) +{ + // Replace %xx sequences by their single-character equivalents. + // Do not replace the '+' symbol by space character because this would corrupt some base64 source strings + // (base64 alphabet includes the '+' character). + text.erase( + unquote_plus(text.begin(), text.end(), checkUrlEncoded(text.data(), text.size()), decodePlus), text.end() + ); + dbgTrace(D_WAAP) << "decodePercentEncoding: (after unquote_plus) '" << text << "'"; +} + +// Try to detect/decode UTF16 (detecting either BE and LE variant). +// The function uses statistics to try to guess whether UTF-16 is present and its exact variant (Big/Little endianess) +// If UTF-16 value is detected, value in cur_val is converted to utf8 in-place for use in later processing. +void decodeUtf16Value(const ValueStatsAnalyzer &valueStats, string &cur_val) +{ + // Do not change cur_val if UTF16 is not detected + if (!valueStats.isUTF16) { + return; + } + + dbgTrace(D_WAAP) << "decoding UTF-16 into UTF-8 in-place"; + + bool isBigEndian = false; + size_t pos = 0; + + // First, detect BOM as a hint of UTF16-BE vs. LE variant. See https://unicode.org/faq/utf_bom.html#utf8-4 + if (cur_val[0] == (char)0xFE && cur_val[1] == (char)0xFF) { + // UTF16-BE hint + isBigEndian = true; + // Skip the BOM + pos++; + } + else if (cur_val[0] == (char)0xFF && cur_val[1] == (char)0xFE) { + // UTF16-LE hint + isBigEndian = false; + // Skip the BOM + pos++; + } + else { + isBigEndian = (valueStats.longestZerosSeq[0] > valueStats.longestZerosSeq[1]); + } + + // Decode utf16 into utf8 + string utf8Out; + for (; pos> 6) | 0xC0; + utf8Out += (code & 0x3F) | 0x80; + } + else { // (code <= 0xFFFF : always true because code type is unsigned short which is 16-bit + utf8Out += (code >> 12) | 0xE0; + utf8Out += ((code >> 6) & 0x3F) | 0x80; + utf8Out += (code & 0x3F) | 0x80; + } + } + + // Return the value converted from UTF-16 to UTF-8 + cur_val = utf8Out; +} + +bool testUrlBareUtf8Evasion(const string &line) { + size_t percentPos = 0; + + while (percentPos < line.size()) { + percentPos = line.find("%", percentPos); + + if (percentPos == string::npos) { + return false; + } + + if (percentPos + 2 < line.size() && tolower(line[percentPos + 1]) == 'c' && line[percentPos + 2] == '0') { + // found "%c0" + return true; + } + + // Continue searching from next character after '%' + percentPos++; + } + + return false; +} + +bool testUrlBadUtf8Evasion(const string &line) { + size_t percentPos = 0; + + while (percentPos < line.size()) { + percentPos = line.find("%", percentPos); + + if (percentPos == string::npos) { + return false; + } + + if (percentPos + 2 < line.size() && tolower(line[percentPos + 1]) == 'c' && line[percentPos + 2] == '1') { + // found "%c1" + return true; + } + + // Continue searching from next character after '%' + percentPos++; + } + + return false; +} + +string urlDecode(string src) { + src.erase(unquote_plus(src.begin(), src.end(), true, false), src.end()); + return src; +} + +// LCOV_EXCL_START Reason: The function will be deleted on another task +string +stripOptionalPort(const string::const_iterator &first, const string::const_iterator &last) +{ + // Microsoft XFF+IPv6+Port yikes - see also here https://github.com/eclipse/jetty.project/issues/3630 + if (*first == '[') { + // Possible bracketed IPv6 address such as "[2001:db8::1]" + optional numeric ":" + auto close_bracket = find(first + 1, last, ']'); + if (close_bracket == last) return string(first, last); + return string(first+1, close_bracket); + } + + auto first_colon = find(first, last, ':'); + if (first_colon == last) return string(first, last); + + // If there is more than one colon it means its probably IPv6 address without brackets + auto second_colon = find(first_colon + 1, last, ':'); + if (second_colon != last) return string(first, last); + + // If there's only one colon it can't be IPv6 and can only be IPv4 with port + return string(first, first_colon); +} + +bool +isIpTrusted(const string &ip, const vector &trusted_ips) +{ + Waap::Util::CIDRData cidr_data; + for (const auto &trusted_ip : trusted_ips) { + if ( + ip == trusted_ip || + (Waap::Util::isCIDR(trusted_ip, cidr_data) && Waap::Util::cidrMatch(ip, cidr_data)) + ) { + return true; + } + } + return false; +} + +string extractForwardedIp(const string &x_forwarded_hdr_val) +{ + vector xff_splitted = split(x_forwarded_hdr_val, ','); + vector trusted_ips; + string forward_ip; + + auto identify_config = getConfiguration( + "rulebase", + "usersIdentifiers" + ); + + if (!identify_config.ok()) { + dbgDebug(D_WAAP) << "did not find xff definition in policy"; + } else { + trusted_ips = (*identify_config).getHeaderValuesFromConfig("x-forwarded-for"); + } + + if (xff_splitted.size() > 0) + { + for (size_t k = 0; k < xff_splitted.size(); ++k) + { + string optional_result = trim(xff_splitted[k]); + optional_result = stripOptionalPort(optional_result.cbegin(), optional_result.cend()); + if (isIpAddress(optional_result)) + { + if (!isIpTrusted(optional_result, trusted_ips) && !trusted_ips.empty()) { + return ""; + } else if (forward_ip.empty()) { + forward_ip = optional_result; + } + } + } + } + return forward_ip; +} + +bool isIpAddress(const string &ip_address) +{ + struct in_addr source_inaddr; + struct in6_addr source_inaddr6; + + // check from which type the target ip and check if ip belongs to is mask ip + //convert sourceip to ip v4 or v6. + bool isIpV4 = inet_pton(AF_INET, ip_address.c_str(), &source_inaddr) == 1; + bool isIpV6 = inet_pton(AF_INET6, ip_address.c_str(), &source_inaddr6) == 1; + + return isIpV4 || isIpV6; +} + +// LCOV_EXCL_STOP + +string extractKeyValueFromCookie(const string &cookie, const string &key) +{ + string source = ""; + vector cookie_splitted = split(cookie, ';'); + for (size_t l = 0; l < cookie_splitted.size(); ++l) + { + vector cookie_key_splitted = split(cookie_splitted[l], '='); + if (cookie_key_splitted.empty()) + { + dbgWarning(D_WAAP) << "Failed to split the key-value from: " << cookie_splitted[l]; + continue; + } + if (cookie_key_splitted[0] == key) + { + source = cookie_key_splitted[1]; + + if (key == "_oauth2_proxy") + { + source = Waap::Util::base64Decode(source); + + vector currentUserIdentifier_splitted = split(source, '|'); + + if (currentUserIdentifier_splitted.size() > 0) + { + source = currentUserIdentifier_splitted[0]; + } + } + break; + } + } + dbgTrace(D_WAAP) << "extracted source from Cookie:" << key << " : " << source; + return source; +} + +bool vectorStringContain(const vector& vec, const string& str) +{ + for(auto ¶m : vec) { + if(param.compare(str) == 0) + { + return true; + } + } + return false; +} + +ContentType detectContentType(const char* hdr_value) { + const char* plus_p = ::strrchr(hdr_value, '+'); + // Detect XML content type if Content-Type header value ends with "+xml". + // For example: "application/xhtml+xml", or "image/svg+xml" + // For reference: see first answer here: + // https://stackoverflow.com/questions/2965587/valid-content-type-for-xml-html-and-xhtml-documents + if (plus_p && my_stricmp(plus_p + 1, "xml")) { + return CONTENT_TYPE_XML; + } + + const char* slash_p = ::strrchr(hdr_value, '/'); + + if (slash_p) { + // Detect XML content type if Content-Type header value ends with "/xml" + if (my_stricmp(slash_p + 1, "xml")) { + return CONTENT_TYPE_XML; + } + + // Detect JSON content type if Content-Type header value is application/json or ends with "/json" + if (my_stricmp(slash_p + 1, "json") || my_stristarts_with(hdr_value, "application/json")) { + return CONTENT_TYPE_JSON; + } + + // Detect HTML content type + if (my_stristarts_with(hdr_value, "text/html")) { + return CONTENT_TYPE_HTML; + } + + // Detect Multiplart Form Data content type + if (my_stristarts_with(hdr_value, "multipart/form-data")) { + return CONTENT_TYPE_MULTIPART_FORM; + } + + // Detect URL Encoded content type + if (my_stristarts_with(hdr_value, "application/x-www-form-urlencoded")) { + return CONTENT_TYPE_URLENCODED; + } + + // Detect binary xml file type + if (my_stristarts_with(hdr_value, "application/vnd.ms-sync.wbxml")) { + return CONTENT_TYPE_WBXML; + } + } + + return CONTENT_TYPE_UNKNOWN; +} + +string convertParamTypeToStr(ParamType type) +{ + switch (type) + { + case UNKNOWN_PARAM_TYPE: + return "unknown"; + case HTML_PARAM_TYPE: + return "html_input"; + case URL_PARAM_TYPE: + return "urls"; + case FREE_TEXT_PARAM_TYPE: + return "free_text"; + case PIPE_PARAM_TYPE: + return "pipes"; + case LONG_RANDOM_TEXT_PARAM_TYPE: + return "long_random_text"; + case BASE64_PARAM_TYPE: + return "base64"; + case ADMINISTRATOR_CONFIG_PARAM_TYPE: + return "administration_config"; + case FILE_PATH_PARAM_TYPE: + return "local_file_path"; + case SEMICOLON_DELIMITED_PARAM_TYPE: + return "semicolon_delimiter"; + case ASTERISK_DELIMITED_PARAM_TYPE: + return "asterisk_delimiter"; + case COMMA_DELIMITED_PARAM_TYPE: + return "comma_delimiter"; + case AMPERSAND_DELIMITED_PARAM_TYPE: + return "ampersand_delimiter"; + case BINARY_PARAM_TYPE: + return "binary_input"; + default: + dbgWarning(D_WAAP) << "unrecognized type " << to_string(type); + return "unrecognized type"; + } +} + +ParamType convertTypeStrToEnum(const string& typeStr) +{ + static unordered_map sNameTypeMap = { + {"unknown", ParamType::UNKNOWN_PARAM_TYPE}, + {"administration_config", ParamType::ADMINISTRATOR_CONFIG_PARAM_TYPE}, + {"base64", ParamType::BASE64_PARAM_TYPE }, + {"free_text", ParamType::FREE_TEXT_PARAM_TYPE}, + {"html_input", ParamType::HTML_PARAM_TYPE}, + {"long_random_text", ParamType::LONG_RANDOM_TEXT_PARAM_TYPE}, + {"pipes", ParamType::PIPE_PARAM_TYPE}, + {"urls", ParamType::URL_PARAM_TYPE}, + {"local_file_path", ParamType::FILE_PATH_PARAM_TYPE}, + {"semicolon_delimiter", ParamType::SEMICOLON_DELIMITED_PARAM_TYPE}, + {"asterisk_delimiter", ParamType::ASTERISK_DELIMITED_PARAM_TYPE}, + {"comma_delimiter", ParamType::COMMA_DELIMITED_PARAM_TYPE}, + {"ampersand_delimiter", ParamType::AMPERSAND_DELIMITED_PARAM_TYPE}, + {"binary_input", ParamType::BINARY_PARAM_TYPE} + }; + + auto mapItr = sNameTypeMap.find(typeStr); + if (mapItr != sNameTypeMap.end()) + { + return mapItr->second; + } + dbgWarning(D_WAAP) << "unrecognized parameter type name: " << typeStr; + return UNKNOWN_PARAM_TYPE; + +} + + +} +} diff --git a/components/security_apps/waap/waap_clib/Waf2Util.h b/components/security_apps/waap/waap_clib/Waf2Util.h new file mode 100755 index 0000000..6b47d22 --- /dev/null +++ b/components/security_apps/waap/waap_clib/Waf2Util.h @@ -0,0 +1,1154 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __WAF2_UTIL_H__148aa7e4 +#define __WAF2_UTIL_H__148aa7e4 + +#include "WaapValueStatsAnalyzer.h" +#include "log_generator.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "WaapEnums.h" +#include "yajl/yajl_gen.h" + +#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) + +// This is portable version of stricmp(), which is non-standard function (not even in C). +// Contrary to stricmp(), for a slight optimization, s2 is ASSUMED to be already in lowercase. +// s1 can be in mixed case and is convetred using tolower() before comparing to s2. +// The function returns true if s1 (with all charactes lowered case) matches s2, false if not. +inline bool my_stricmp(const char *s1, const char *s2) { + assert(s1 != NULL); + assert(s2 != NULL); + + // Compare header name, case insensitively, to "content-type" + while (*s1 && *s2 && tolower(*s1)==*s2) { + s1++; + s2++; + } + + // returns true if s1 (after applying tolower()) eactly matches s2 + return (*s1=='\0' && *s2=='\0'); +} + +// same as my_stricmp(), but assumes s1 has known size, and does not assume s1 string is null-terminated. +inline bool my_strincmp(const char *s1, const char *s2, size_t s1_size) { + assert(s1 != NULL); + assert(s2 != NULL); + + // Compare header name, case insensitively, to "content-type" + while (s1_size > 0 && *s2 && tolower(*s1)==*s2) { + s1++; + s2++; + s1_size--; // reduce s1_size until we exhaust at most s1_size characters of the s1 string. + } + + // returns true if s1 (after applying tolower()) eactly matches s2 + return (s1_size==0 && *s2=='\0'); +} + +inline bool my_stristarts_with(const char *s1, const char *s2) { + assert(s1 != NULL); + assert(s2 != NULL); + + // Compare case insensitively + while (*s1 && *s2 && tolower(*s1)==*s2) { + s1++; + s2++; + } + + // returns true if s1 (after applying tolower()) starts with s2 + // (this happens when we finished to examine all s2 and it compared correctly to start of s1) + return (*s2=='\0'); +} + +inline unsigned char from_hex(unsigned char ch, bool &valid) { + valid = true; + + if (ch <= '9' && ch >= '0') + ch -= '0'; + else if (ch <= 'f' && ch >= 'a') + ch -= 'a' - 10; + else if (ch <= 'F' && ch >= 'A') + ch -= 'A' - 10; + else { + valid = false; + ch = 0; + } + + return ch; +} + +template +_IT unquote_plus(_IT first, _IT last, bool decodeUrl=true, bool decodePlus=true) { + _IT result = first; + enum { STATE_COPY, STATE_FIRST_DIGIT, STATE_SECOND_DIGIT } state = STATE_COPY; + unsigned char accVal = 0; // accumulated character (from hex digits) + char lastCh = 0; + + for (; first != last; ++first) { + switch (state) { + case STATE_COPY: + if (*first == '+' && decodePlus) { + *result++ = ' '; + } + else if (decodeUrl && *first == '%') { + state = STATE_FIRST_DIGIT; + } + else { + *result++ = *first; + } + + break; + case STATE_FIRST_DIGIT: { + bool valid; + lastCh = *first; // remember it solely for special case where 2nd character is invalid hex + accVal = from_hex(*first, valid); + + if (valid) { + state = STATE_SECOND_DIGIT; + } + else { + *result++ = '%'; // put the percent symbol to the output stream + if (*first == '%') { + // we found the '%%' sequence. Put back the first '%' character and continue + // in the same state (as if we've just seen the first '%') + // this supports the case of %%xx, which would otherwise fail to parse. + } + else { + // put the "invalid" symbol to the output stream + *result++ = *first; + // continue copying + state = STATE_COPY; + } + } + + break; + } + case STATE_SECOND_DIGIT: { + bool valid; + accVal = (accVal << 4) | from_hex(*first, valid); + + if (valid) { + // After second hex digit decoded succesfully - put computed character to output and + // continue to "copying" state + *result++ = accVal; + } + else { + if (*first == '%') { + // put the percent symbol to the output + *result++ = '%'; + // put the first (among two) character (that was valid hex char), back to the output stream. + *result++ = lastCh; + state = STATE_FIRST_DIGIT; + break; + } + // If second character is invalid - return original '%', the first character, + // and the second character to the output. + + // put the percent symbol to the output + *result++ = '%'; + // put the first (among two) character (that was valid hex char), back to the output stream. + *result++ = lastCh; + // put the second (among two) "invalid" character to the output stream. + *result++ = *first; + } + + state = STATE_COPY; + break; + } + } + } + + if (state == STATE_FIRST_DIGIT) { + // put the percent symbol to the output stream + *result++ = '%'; + } + else if (state == STATE_SECOND_DIGIT) { + // put the percent symbol to the output + *result++ = '%'; + // put the first (among two) character (that was valid hex char), back to the output stream. + *result++ = lastCh; + } + + return result; +} + +inline bool isHexDigit(const char ch) { + return isdigit(ch) || (ch >= 'a' && ch <= 'f') || (ch >= 'A' && ch <= 'F'); +} + +template +_IT escape_backslashes(_IT first, _IT last) { + _IT result = first; + enum { STATE_COPY, STATE_ESCAPE, STATE_OCTAL, STATE_HEX } state = STATE_COPY; + unsigned char accVal = 0; + unsigned char digitsCount = 0; + _IT mark = first; + + for (; first != last; ++first) { + switch (state) { + case STATE_COPY: + if (*first == '\\') { + mark = first; + state = STATE_ESCAPE; + } + else { + *result++ = *first; + } + break; + case STATE_ESCAPE: { + if (*first >= '0' && *first <= '7') { + accVal = *first - '0'; + digitsCount = 1; + state = STATE_OCTAL; + break; + } else if (*first == 'x') { + accVal = 0; + digitsCount = 0; + state = STATE_HEX; + break; + } + else { + switch (*first) { + case 'a': *result++ = 7; break; // BELL + case 'b': *result++ = 8; break; // BACKSPACE + case 't': *result++ = 9; break; // HORIZONTAL TAB + case 'n': *result++ = 10; break; // LINEFEED + case 'v': *result++ = 11; break; // VERTICAL TAB + case 'f': *result++ = 12; break; // FORMFEED + case 'r': *result++ = 13; break; // CARRIAGE RETURN + case '\\': *result++ = '\\'; break; // upon seeing double backslash - output only one + case '\"': *result++ = '"'; break; // backslash followed by '"' - output only '"' + default: + // invalid escape sequence - do not replace it (return original characters) + // Copy from back-track, not including current character, and continue + while (mark < first) { + *result++ = *mark++; + } + + // Copy current (terminator) character which is not "escape" and return to copy state + // If current character is escape - stay is "escape" state + if (*first != '\\') { + *result++ = *mark++; + state = STATE_COPY; + } + } + + state = STATE_COPY; + } + + break; + } + case STATE_OCTAL: { + if (*first >='0' && *first<='7') { + accVal = (accVal << 3) | (*first - '0'); + digitsCount++; + + // Up to 3 octal digits imposed by C standard, so after 3 digits accumulation stops. + if (digitsCount == 3) { + *result++ = accVal; // output character corresponding to collected accumulated value + digitsCount = 0; + state = STATE_COPY; + } + } + else { + // invalid octal digit stops the accumulation + *result++ = accVal; // output character corresponding to collected accumulated value + digitsCount = 0; + if (*first != '\\') { + // If terminating character is not backslash output the terminating character + *result++ = *first; + state = STATE_COPY; + } + else { + // If terminating character is backslash start next escape sequence + state = STATE_ESCAPE; + } + } + + break; + } + case STATE_HEX: { + if (!isHexDigit(*first)) { + // Copy from back-track, not including current character (which is absent), and continue + while (mark < first) { + *result++ = *mark++; + } + if (*first != '\\') { + // If terminating character is not backslash output the terminating character + *result++ = *first; + state = STATE_COPY; + } + else { + // If terminating character is backslash start next escape sequence + state = STATE_ESCAPE; + } + } + else { + accVal = accVal << 4; + if (isdigit(*first)) { + accVal += *first - '0'; + } + else if (*first >= 'a' && *first <= 'f') { + accVal += *first - 'a' + 10; + } + else if (*first >= 'A' && *first <= 'F') { + accVal += *first - 'A' + 10; + } + digitsCount++; + // exactly 2 hex digits are anticipated, so after 2 digits accumulation stops. + if (digitsCount == 2) { + *result++ = accVal; // output character corresponding to collected accumulated value + digitsCount = 0; + state = STATE_COPY; + } + } + break; + } + } + } + + // Handle state at end of input + bool copyBackTrack = true; + switch (state) { + case STATE_HEX: + // this can only happen on this sequence '\xH' where H is a single hex digit. + // in this case the sequence is considered invalid and should be copied verbatim (copyBackTrack=true) + break; + case STATE_OCTAL: + // this can only happen when less than 3 octal digits are found at the value end, like '\1' or '\12' + *result++ = accVal; // output character corresponding to collected accumulated value + copyBackTrack = false; + break; + case STATE_COPY: + copyBackTrack = false; + break; + case STATE_ESCAPE: + break; + } + + if (copyBackTrack) { + // invalid escape sequence - do not replace it (return original characters) + // Copy from back-track + while (mark < first) { + *result++ = *mark++; + } + } + + return result; +} + +inline bool str_contains(const std::string &haystack, const std::string &needle) +{ + return haystack.find(needle) != std::string::npos; +} + +struct HtmlEntity { + const char *name; + unsigned short value; +}; + +extern const struct HtmlEntity g_htmlEntities[]; +extern const size_t g_htmlEntitiesCount; + +template +_IT escape_html(_IT first, _IT last) { + _IT result = first; + enum { + STATE_COPY, + STATE_ESCAPE, + STATE_NAMED_CHARACTER_REFERENCE, + STATE_NUMERIC_START, + STATE_NUMERIC, STATE_HEX + } state = STATE_COPY; + unsigned short accVal = 0; // should be unsigned short to hold unicode character code (16-bits) + bool digitsSeen = false; + std::list potentialMatchIndices; + size_t matchLength = 0; + size_t lastKnownMatchIndex = -1; + _IT mark = first; + + for (; first != last; ++first) { + switch (state) { + case STATE_COPY: + if (*first == '&') { + mark = first; + state = STATE_ESCAPE; + } + else { + *result++ = *first; + } + break; + case STATE_ESCAPE: + if (isalpha(*first)) { + // initialize potential matches list + potentialMatchIndices.clear(); + + for (size_t index = 0; index < g_htmlEntitiesCount; ++index) { + if (*first == g_htmlEntities[index].name[0]) { + potentialMatchIndices.push_back(index); + lastKnownMatchIndex = index; + } + } + + // No potential matches - send ampersand and current character to output + if (potentialMatchIndices.size() == 0) { + *result++ = '&'; + *result++ = *first; + state = STATE_COPY; + break; + } + + // 1st character already matched, so matchLen already starts from 1 + matchLength = 1; + state = STATE_NAMED_CHARACTER_REFERENCE; + } + else if (*first == '#') { + digitsSeen = 0; + accVal = 0; + state = STATE_NUMERIC_START; + } + else { + // not isalpha and not '#' - this is invalid character reference - do not replace it + // (return original characters) + *result++ = '&'; + *result++ = *first; + state = STATE_COPY; + } + break; + + case STATE_NAMED_CHARACTER_REFERENCE: + // Find and remove all potential matches that do not match anymore + { + int increaseMatchLength = 0; + + for ( + std::list::iterator pPotentialMatchIndex = potentialMatchIndices.begin(); + pPotentialMatchIndex != potentialMatchIndices.end(); + ) { + lastKnownMatchIndex = *pPotentialMatchIndex; + const char *matchName = g_htmlEntities[lastKnownMatchIndex].name; + + // If there are no more characters in the potntial match name, + // or the next tested character doesn't match - kill the match + if ((matchName[matchLength] == '\0') || (matchName[matchLength] != *first)) { + // remove current element from the list of potential matches + pPotentialMatchIndex = potentialMatchIndices.erase(pPotentialMatchIndex); + } + else { + increaseMatchLength = 1; + ++pPotentialMatchIndex; + } + } + + matchLength += increaseMatchLength; + } + + // No more potential matches: unsuccesful match -> flush all consumed characters back to output stream + if (potentialMatchIndices.size() == 0) { + // Send consumed ampersand to the output + *result++ = '&'; + + // Send those matched characters (these are the same that we consumed) - to the output + for (size_t i = 0; i < matchLength; i++) { + *result++ = g_htmlEntities[lastKnownMatchIndex].name[i]; + } + + // Send the character that terminated our search for possible matches + *result++ = *first; + + // Continue copying text verbatim + state = STATE_COPY; + break; // note: this breaks out of the for() loop, not out of the switch + } + + // There are still potential matches and ';' is hit + if (*first == ';') { + // longest match found for the named character reference. + // translate it into output character(s) and we're done. + unsigned short value = g_htmlEntities[lastKnownMatchIndex].value; + + // Encode UTF code point as UTF-8 bytes + if (value < 0x80) { + *result++ = value; + } + else if (value < 0x800 ) { + *result++ = (value >> 6) | 0xC0; + *result++ = (value & 0x3F) | 0x80; + } + else { // (value <= 0xFFFF : always true because value type is unsigned short which is 16-bit + *result++ = (value >> 12) | 0xE0; + *result++ = ((value >> 6) & 0x3F) | 0x80; + *result++ = (value & 0x3F) | 0x80; + } + + // Continue copying text verbatim + state = STATE_COPY; + break; // note: this breaks out of the for() loop, not out of the switch + } + break; + case STATE_NUMERIC_START: + digitsSeen = false; + accVal = 0; + if (*first == 'x' || *first == 'X') { + state = STATE_HEX; + } + else if (isdigit(*first)) { + digitsSeen = true; + accVal = *first - '0'; + state = STATE_NUMERIC; + } + else { + // Sequence started with these two characters: '&#', and here is the third, non-digit character + + // Copy from back-track, not including current character, and continue + while (mark < first) { + *result++ = *mark++; + } + + if (*first == '&') { + // Terminator is also start of next escape sequence + mark = first; + state = STATE_ESCAPE; + break; + } + else { + // Copy the terminating character too + *result++ = *first; + } + state = STATE_COPY; + } + break; + case STATE_NUMERIC: + if (!isdigit(*first)) { + if (digitsSeen) { + // Encode UTF code point as UTF-8 bytes + if (accVal < 0x80) { + *result++ = accVal; + } + else if (accVal < 0x800 ) { + *result++ = (accVal >> 6) | 0xC0; + *result++ = (accVal & 0x3F) | 0x80; + } + else { // (accVal <= 0xFFFF : always true because accVal type is unsigned short which is 16-bit + *result++ = (accVal >> 12) | 0xE0; + *result++ = ((accVal >> 6) & 0x3F) | 0x80; + *result++ = (accVal & 0x3F) | 0x80; + } + } + else { + // Copy from back-track, not including current character (which is absent), and continue + while (mark < first) { + *result++ = *mark++; + } + } + + if (*first == '&') { + // Terminator is also start of next escape sequence + mark = first; + state = STATE_ESCAPE; + break; + } + else if (!digitsSeen || *first != ';') { + // Do not copy the ';' but do copy any other terminator + // Note: the ';' should remain in the output if there were no digits seen. + *result++ = *first; + } + state = STATE_COPY; + } + else { + digitsSeen = true; + accVal = accVal * 10 + *first - '0'; // TODO:: beware of integer overflow? + } + break; + case STATE_HEX: + if (!isHexDigit(*first)) { + if (digitsSeen) { + // Encode UTF code point as UTF-8 bytes + if (accVal < 0x80) { + *result++ = accVal; + } + else if (accVal < 0x800 ) { + *result++ = (accVal >> 6) | 0xC0; + *result++ = (accVal & 0x3F) | 0x80; + } + else { // (accVal <= 0xFFFF : always true because accVal type is unsigned short which is 16-bit + *result++ = (accVal >> 12) | 0xE0; + *result++ = ((accVal >> 6) & 0x3F) | 0x80; + *result++ = (accVal & 0x3F) | 0x80; + } + } + else { + // Copy from back-track, not including current character (which is absent), and continue + while (mark < first) { + *result++ = *mark++; + } + } + + if (*first == '&') { + // Terminator is also start of next escape sequence + mark = first; + state = STATE_ESCAPE; + break; + } + else if (!digitsSeen || *first != ';') { + // Do not copy the ';' but do copy any other terminator + // Note: the ';' should remain in the output if there were no digits seen. + *result++ = *first; + } + state = STATE_COPY; + } + else { + digitsSeen = true; + accVal = accVal << 4; + if (isdigit(*first)) { + accVal += *first - '0'; + } + else if (*first >= 'a' && *first <= 'f') { + accVal += *first - 'a' + 10; + } + else if (*first >= 'A' && *first <= 'F') { + accVal += *first - 'A' + 10; + } + } + break; + } + } + + if (state == STATE_ESCAPE) { + *result++ = '&'; + } + else if (state == STATE_NAMED_CHARACTER_REFERENCE && potentialMatchIndices.size() > 0) { + // Send consumed ampersand to the output + *result++ = '&'; + + // Send those matched characters (these are the same that we consumed) - to the output + for (size_t i = 0; i < matchLength; i++) { + // Even if there are multiple potential matches, all of them start with the same + // matchLength characters that we consumed! + *result++ = g_htmlEntities[lastKnownMatchIndex].name[i]; + } + } + if (state == STATE_HEX && !digitsSeen) { // Special case of "&#x" + // Copy from back-track, not including current character (which is absent), and continue + while (mark < first) { + *result++ = *mark++; + } + state = STATE_COPY; + } + else if (state == STATE_HEX || state == STATE_NUMERIC || state == STATE_NUMERIC_START) { + if (digitsSeen) { + // Encode UTF code point as UTF-8 bytes + if (accVal < 0x80) { + *result++ = accVal; + } + else if (accVal < 0x800 ) { + *result++ = (accVal >> 6) | 0xC0; + *result++ = (accVal & 0x3F) | 0x80; + } + else { // (accVal <= 0xFFFF : always true because accVal type is unsigned short which is 16-bit + *result++ = (accVal >> 12) | 0xE0; + *result++ = ((accVal >> 6) & 0x3F) | 0x80; + *result++ = (accVal & 0x3F) | 0x80; + } + } + else { + // Copy from back-track, not including current character (which is absent), and continue + while (mark < first) { + *result++ = *mark++; + } + state = STATE_COPY; + } + } + + return result; +} + +// Compare two buffers, case insensitive. Return true if they are equal (case-insensitive) +inline bool memcaseinsensitivecmp(const char *buf1, size_t buf1_len, const char *buf2, size_t buf2_len) { + if (buf1_len != buf2_len) { + return false; + } + + for (; buf1_len > 0; --buf1_len) { + if (tolower(*buf1++) != tolower(*buf2++)) { + return false; // different + } + } + + return true; // equal +} + +inline void replaceAll(std::string& str, const std::string& from, const std::string& to) { + if(from.empty()) { + return; + } + + size_t start_pos = 0; + + while((start_pos = str.find(from, start_pos)) != std::string::npos) { + str.replace(start_pos, from.length(), to); + start_pos += to.length(); // In case 'to' contains 'from', like replacing 'x' with 'yx' + } +} + +// Count items in v that are not in ignored_set +inline size_t countNotInSet(const std::vector &v, const std::set &ignored_set) { + size_t count = 0; + + for (const std::string &word : v) { + if (ignored_set.find(word) == ignored_set.end()) { + // not in ignored_set + count++; + } + } + + return count; +} + +// note: this algorithm may probably be rewritten with std::remove_if() and probably lambda, +// but this better done when we can finally use c++11 +inline void removeItemsMatchingSubstringOf(std::vector &v, const std::string& match) { + for (std::vector::iterator it=v.begin(); it != v.end();) { + // Remove items that are contained (substr) within the (longer or equal-length) match string. + if (match.find(*it) != std::string::npos) { + it = v.erase(it); + } + else { + ++it; + } + } +} + +// Detect whether unicode code is in the "Halfwidth and Fullwidth Forms" set convertable to ASCII. +inline bool isUnicodeHalfAndFullWidthRange(uint32_t code) { + return (code >= 0xFF01 && code <=0xFF5E); +} + +// Convert unicode code from the "Halfwidth and Fullwidth Forms" set to ASCII. +inline char convertFromUnicodeHalfAndFullWidthRange(uint32_t code) { + assert(isUnicodeHalfAndFullWidthRange(code)); + // Support set of unicode characters from the "Halfwidth and Fullwidth Forms" that are converted to ASCII + static const char *xlat = + "!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"; + return xlat[code - 0xFF01]; +} + +inline bool isSpecialUnicode(uint32_t code) { + return isUnicodeHalfAndFullWidthRange(code) + || 0x2028 == code || 0x2029 == code + || 0x2216 == code || 0xEFC8 == code || 0xF025 == code; +} + +inline char convertSpecialUnicode(uint32_t code) { + if (isUnicodeHalfAndFullWidthRange(code)) { + return convertFromUnicodeHalfAndFullWidthRange(code); + } + else if (0x2216 == code || 0xEFC8 == code || 0xF025 == code) + { + return '\\'; + } + // assuming 0x2028 == code || 0x2029 == code + else + { + return '\n'; + } +} + +inline void stripSpaces(std::string &text) { + std::string::iterator it = text.begin(); + std::string::iterator result = it; + + for (; it != text.end(); ++it) { + unsigned char ch = (unsigned char)(*it); + + // Include only non-space characters + if (!isspace(ch)) { + *result++ = ch; + } + } + + text.erase(result, text.end()); +} + +inline size_t countSubstrings(const std::string &str, const std::string &subStr) { + if (subStr.empty()) { + return str.size() + 1; // to conform to python's "str.count(subStr)" behavior when substr is empty string... + } + + size_t count = 0; + size_t pos = str.find(subStr); + + while( pos != std::string::npos) { + count++; + pos = str.find(subStr, pos + subStr.size()); + } + + return count; +} + + +// Test whether text starts one of the known HTML tag names +bool startsWithHtmlTagName(const char *text); + +// Normalizing URL means replacing any pure-numeric URL parts with the word "_num" +// The parameters part of the given uri is also stripped (the '?' character and anything after it). +std::string normalize_uri(const std::string &uri); + +std::string normalize_param(const std::string& param); + +// Analogous to python's text.decode('unicode_escape'), with the distinction that +// this function simply throws out the \uXXXX sequences instead of converting them to binary unicode sequences. +// This function performs in-place decoding, updating text string in progress. +void unescapeUnicode(std::string &text); + +// Try to find and decode UTF7 chunks +std::string filterUTF7(const std::string &text); + +bool +b64DecodeChunk( + const std::string &value, + std::string::const_iterator it, + std::string::const_iterator end, + std::string &decoded); + +std::vector +split(const std::string& s, char delim); + +namespace Waap { +namespace Util { + typedef bool (*RegexSubCallback_f)( + const std::string &value, + std::string::const_iterator b, + std::string::const_iterator e, + std::string &repl); + + void b64Decode( + const std::string &s, + RegexSubCallback_f cb, + int &decodedCount, + int &deletedCount, + std::string &outStr); + + // The original stdlib implementation of isalpha() supports locale settings which we do not really need. + // It is also proven to contribute to slow performance in some of the algorithms using it. + // This function has reduced functionality compared to stdlib isalpha(), but is much faster. + inline bool isAlphaAsciiFast(unsigned char ch) { + return ((unsigned int)ch | 32) - 'a' < 26; + } + + // Compare two objects referenced by pointer - comparison is done by value (comparing objects themselves) + // This is different from comparing object pointers. + template + bool compareObjects(_T &first, _T &second) + { + // If both are the same object (or both are equal to nullptr - then they are equivalent) + if (first == second) { + return true; + } + + // If pointers are different and at least one of them is nullptr, then the other is not nullptr - so they are + // not equivalent + if (first == nullptr || second == nullptr) { + return false; + } + + // At this point, both pointers are for sure not nullptr, so we can dereference and compare objects pointed by + return *first == *second; + } + + inline bool str_isalnum(const std::string & value) { + for (std::string::const_iterator pC = value.begin(); pC != value.end(); ++pC) { + if (!std::isalnum(*pC)) { + return false; // at least one non alphanumeric character detected + } + } + + return true; + } + + inline bool isAllDigits(const std::string & value) { + for (char ch : value) { + if (!isdigit(ch)) { + return false; // at least one non digit character detected + } + } + + return true; + } + + typedef std::map > map_of_stringlists_t; + + // Yajl generator (C++ RAII edition :) + struct Yajl { + yajl_gen g; + Yajl() :g(yajl_gen_alloc(NULL)) {} + ~Yajl() + { + yajl_gen_free(g); + } + + struct Map { + yajl_gen& g; + explicit Map(Yajl& y) : g(y.g) + { + yajl_gen_map_open(g); + } + ~Map() + { + yajl_gen_map_close(g); + } + void gen_null(const std::string& k) + { + yajl_gen_string(g, (unsigned char*)k.data(), k.size()); yajl_gen_null(g); + } + void gen_str(const std::string& k, const std::string& v) + { + yajl_gen_string(g, (unsigned char*)k.data(), k.size()); + yajl_gen_string(g, (unsigned char*)v.data(), v.size()); + } + void gen_bool(const std::string& k, bool v) + { + yajl_gen_string(g, (unsigned char*)k.data(), k.size()); yajl_gen_bool(g, v); + } + void gen_integer(const std::string& k, long long int v) + { + yajl_gen_string(g, (unsigned char*)k.data(), k.size()); yajl_gen_integer(g, v); + } + void gen_double(const std::string& k, double v) + { + yajl_gen_string(g, (unsigned char*)k.data(), k.size()); yajl_gen_double(g, v); + } + void gen_key(const std::string& k) + { + yajl_gen_string(g, (unsigned char*)k.data(), k.size()); + } + }; + + struct Array { + yajl_gen& g; + explicit Array(Yajl& y) :g(y.g) { yajl_gen_array_open(g); } + ~Array() { yajl_gen_array_close(g); } + void gen_null() { yajl_gen_null(g); } + void gen_str(const std::string& v) { yajl_gen_string(g, (unsigned char*)v.data(), v.size()); } + void gen_bool(bool v) { yajl_gen_bool(g, v); } + void gen_integer(long long int v) { yajl_gen_integer(g, v); } + void gen_double(double v) { yajl_gen_double(g, v); } + }; + + std::string get_json_str() const { + const unsigned char* buf; + size_t len; + yajl_gen_get_buf(g, &buf, &len); + return std::string((char*)buf, len); + } + }; + + enum ContentType { + CONTENT_TYPE_UNKNOWN, + CONTENT_TYPE_XML, + CONTENT_TYPE_JSON, + CONTENT_TYPE_HTML, + CONTENT_TYPE_MULTIPART_FORM, + CONTENT_TYPE_URLENCODED, + CONTENT_TYPE_WBXML, + CONTENT_TYPES_COUNT + }; + +// LCOV_EXCL_START Reason: coverage upgrade + inline const char* getContentTypeStr(enum ContentType contentType) { + static const char* contentTypeStr[] = { + "UNKNOWN", + "XML", + "JSON", + "HTML", + "MULTIPART_FORM", + "URLENCODED", + "WBXML" + }; + + if (contentType >= CONTENT_TYPES_COUNT) { + contentType = CONTENT_TYPE_UNKNOWN; + } + + return contentTypeStr[contentType]; + }; +// LCOV_EXCL_STOP + + static const std::string s_EncryptionKey = "KSO+hOFs1q5SkEnx8bvp67Om2zyHDD6ZJF4NHAa3R94=";; + static const std::string s_EncryptionIV = "sxJNyEO7i6YfA1p9CTglHw=="; + + // trim from start + static inline std::string <rim(std::string &s) { + s.erase(s.begin(), std::find_if(s.begin(), s.end(), + std::not1(std::ptr_fun(std::isspace)))); + return s; + } + + // trim from end + static inline std::string &rtrim(std::string &s) { + s.erase(std::find_if(s.rbegin(), s.rend(), + std::not1(std::ptr_fun(std::isspace))).base(), s.end()); + return s; + } + + // trim from both ends + static inline std::string &trim(std::string &s) { + return ltrim(rtrim(s)); + } + + // Find whether some word (what) exists wihin keys of the map. + // The search done by *searching* for "what" string within each key string, + // not by *comparing* "what" with each key string. + bool find_in_map_of_stringlists_keys(const std::string & what, const map_of_stringlists_t & where); + + void remove_in_map_of_stringlists_keys(const std::string & what, map_of_stringlists_t & where); + + void remove_startswith(std::vector &vec, const std::string &prefix); + + std::string AES128Decrypt(std::string& key, std::string& iv, std::string& message); + std::string base64Encode(const std::string &input); + std::string base64Decode(const std::string &input); + std::string obfuscateXor(const std::string& toEncrypt); + std::string obfuscateXorBase64(const std::string& toEncrypt); + + bool containsInvalidUtf8(const std::string &payload); + + // based on invalid utf-8 evasion from here: https://www.cgisecurity.com/lib/URLEmbeddedAttacks.html + std::string unescapeInvalidUtf8(const std::string &text); + + bool containsBrokenUtf8(const std::string &payload); + std::string unescapeBrokenUtf8(const std::string &text); + + bool testUrlBareUtf8Evasion(const std::string &line); + bool testUrlBadUtf8Evasion(const std::string &line); + + std::string urlDecode(std::string src); + + std::string injectSpacesToString(const std::string& std); + + std::string charToString(const char* s, int slen); + + std::string vecToString(const std::vector& vec, char delim = ','); + template + std::string + setToString(const std::set& set, bool addParenthesis=true) { + std::ostringstream vts; + + if (addParenthesis) + { + vts << "["; + } + + if (!set.empty()) + { + for (auto itr = set.begin(); itr != set.end(); itr++) + { + vts << *itr << ", "; + } + } + else + { + return std::string(); + } + std::string res = vts.str(); + res.pop_back(); + res.pop_back(); + if (addParenthesis) + { + res += "]"; + } + + + return res; + } + + template + void mergeFromVectorWithoutDuplicates( + const std::vector& first_vector, + std::vector& second_vector) + { + for (const V& element : first_vector) + { + if(find(second_vector.begin(), second_vector.end(), element) == second_vector.end()) + { + second_vector.push_back(element); + } + } + } + + template + void mergeFromMapOfVectorsWithoutDuplicates( + const std::map>& first_map, + std::map>& second_map) + { + for (auto itr = first_map.begin(); itr != first_map.end(); itr++) + { + if (second_map.find(itr->first) != second_map.end()) + { + const std::vector& first_vector = first_map.at(itr->first); + mergeFromVectorWithoutDuplicates(first_vector, second_map[itr->first]); + } + else + { + const std::vector& first_vector = itr->second; + second_map[itr->first] = first_vector; + } + } + } + + template + void mergeSets(const std::set& first_set, const std::set& second_set, std::set& merged_set) + { + std::set_union( + first_set.begin(), + first_set.end(), + second_set.begin(), + second_set.end(), + std::inserter(merged_set, merged_set.begin()) + ); + } + + + ReportIS::Severity computeSeverityFromThreatLevel(ThreatLevel threatLevel); + ReportIS::Priority computePriorityFromThreatLevel(ThreatLevel threatLevel); + std::string computeConfidenceFromThreatLevel(ThreatLevel threatLevel); + + void decodePercentEncoding(std::string &text, bool decodePlus=false); + void decodeUtf16Value(const ValueStatsAnalyzer &valueStats, std::string &cur_val); + + std::string stripOptionalPort(const std::string::const_iterator &first, const std::string::const_iterator &last); + std::string extractKeyValueFromCookie(const std::string &cookie, const std::string &key); + bool isIpAddress(const std::string &ip_address); + bool vectorStringContain(const std::vector& vec, const std::string& str); + bool isIpTrusted(const std::string &ip, const std::vector &trusted_ips); + + + ContentType detectContentType(const char* hdr_value); + std::string convertParamTypeToStr(ParamType type); + ParamType convertTypeStrToEnum(const std::string& typeStr); + +} +} + +#endif // __WAF2_UTIL_H__148aa7e4 diff --git a/components/security_apps/waap/waap_clib/lru_cache_map.h b/components/security_apps/waap/waap_clib/lru_cache_map.h new file mode 100755 index 0000000..6a3a139 --- /dev/null +++ b/components/security_apps/waap/waap_clib/lru_cache_map.h @@ -0,0 +1,113 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include + +template +class LruCacheMap { +public: + // Type that should be passed to the insert() method + typedef std::pair value_type; +private: + struct TagQueue {}; + struct TagHash {}; + + // Multi-Index container implementing both queue and hashmap + typedef boost::multi_index::multi_index_container< + value_type, + boost::multi_index::indexed_by< + // Interface #0 (default) - sequenced (std::list) + boost::multi_index::sequenced< + boost::multi_index::tag + >, + // Interface #1 - hashed (std::unordered_map) + boost::multi_index::hashed_unique< + boost::multi_index::tag, + boost::multi_index::member< + value_type, + KeyType, + &value_type::first // hash by the key + > + > + > + > container_type; + + typedef typename container_type::template index::type container_queue_index_type; + typedef typename container_type::template index::type container_hash_index_type; +public: + // Allow iteration + typedef typename container_type::template index::type::iterator iterator; + typedef typename container_type::template index::type::const_iterator const_iterator; + iterator begin() { return m_queueIndex.begin(); } + iterator end() { return m_queueIndex.end(); } + const_iterator cbegin() const { return m_queueIndex.cbegin(); } + const_iterator cend() const { return m_queueIndex.cend(); } + + // Container constructor + LruCacheMap(int capacity) + :m_capacity(capacity), + m_queueIndex(m_container.template get()), + m_hashIndex(m_container.template get()) + {} + + // Get capacity + std::size_t capacity() const { return m_capacity; } + // Get count of entries stored + std::size_t size() const { return m_queueIndex.size(); } + // Return true if cache is empty + bool empty() const { return m_queueIndex.empty(); } + // Clear the cache + void clear() { m_queueIndex.clear(); } + + // Check if key exists by quickly looking in a hashmap + bool exist(const KeyType &key) const { + return m_hashIndex.find(key) != m_hashIndex.end(); + } + + bool get(const KeyType &key, ValueType &value) const { + // get the std::unordered_map index + const auto &found = m_hashIndex.find(key); + if (found == m_hashIndex.end()) { + // Value not found. Do not touch the value and return false. + return false; + } + // Value found - fill out the value and return true + value = found->second; + return true; + } + + // Insert entry into an LRU cache + void insert(const value_type &item) { + // Try to push a new entry to the front (may be rejected due to the hashed_unique index) + std::pair p = m_queueIndex.push_front(item); + if (!p.second) { + // not inserted - entry already existed - relocate the entry to the queue front + m_queueIndex.relocate(m_queueIndex.begin(), p.first); + } + else if (m_queueIndex.size() > m_capacity) { + // remove old unused entries at queue back to keep entries count under capacity + m_queueIndex.pop_back(); + } + } + +private: + std::size_t m_capacity; + container_type m_container; + container_queue_index_type &m_queueIndex; + container_hash_index_type &m_hashIndex; +}; diff --git a/components/security_apps/waap/waap_clib/lru_cache_set.h b/components/security_apps/waap/waap_clib/lru_cache_set.h new file mode 100755 index 0000000..04c0234 --- /dev/null +++ b/components/security_apps/waap/waap_clib/lru_cache_set.h @@ -0,0 +1,98 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include + +template +class LruCacheSet { +public: + // Type that should be passed to the insert() method + typedef KeyType value_type; +private: + struct TagQueue {}; + struct TagHash {}; + + // Multi-Index container implementing both queue and hashmap + typedef boost::multi_index::multi_index_container< + value_type, + boost::multi_index::indexed_by< + // Interface #0 (default) - sequenced (std::list) + boost::multi_index::sequenced< + boost::multi_index::tag + >, + // Interface #1 - hashed (std::unordered_set) + boost::multi_index::hashed_unique< + boost::multi_index::tag, + boost::multi_index::identity + > + > + > container_type; + + typedef typename container_type::template index::type container_queue_index_type; + typedef typename container_type::template index::type container_hash_index_type; +public: + // Allow iteration + typedef typename container_type::template index::type::iterator iterator; + typedef typename container_type::template index::type::const_iterator const_iterator; + iterator begin() { return m_queueIndex.begin(); } + iterator end() { return m_queueIndex.end(); } + const_iterator cbegin() const { return m_queueIndex.cbegin(); } + const_iterator cend() const { return m_queueIndex.cend(); } + + // Container constructor + LruCacheSet(int capacity) + :m_capacity(capacity), + m_queueIndex(m_container.template get()), + m_hashIndex(m_container.template get()) + {} + + // Get capacity + std::size_t capacity() const { return m_capacity; } + // Get count of entries stored + std::size_t size() const { return m_queueIndex.size(); } + // Return true if cache is empty + bool empty() const { return m_queueIndex.empty(); } + // Clear the cache + void clear() { m_queueIndex.clear(); } + + // Check if key exists by quickly looking in a hashmap + bool exist(const KeyType &key) const { + //m_queueIndex.hash_function(); <-- TODO:: remove -- test that this is indeed hash interface! + return m_hashIndex.find(key) != m_hashIndex.end(); + } + + // Insert entry into an LRU cache + void insert(const value_type &item) { + // Try to push a new entry to the front (may be rejected due to the hashed_unique index) + std::pair p = m_queueIndex.push_front(item); + if (!p.second) { + // not inserted - entry already existed - relocate the entry to the queue front + m_queueIndex.relocate(m_queueIndex.begin(), p.first); + } + else if (m_queueIndex.size() > m_capacity) { + // remove old unused entries at queue back to keep entries count under capacity + m_queueIndex.pop_back(); + } + } + +private: + std::size_t m_capacity; + container_type m_container; + container_queue_index_type &m_queueIndex; + container_hash_index_type &m_hashIndex; +}; diff --git a/components/security_apps/waap/waap_clib/waf2_reporting.h b/components/security_apps/waap/waap_clib/waf2_reporting.h new file mode 100755 index 0000000..1a77d6b --- /dev/null +++ b/components/security_apps/waap/waap_clib/waf2_reporting.h @@ -0,0 +1,198 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __WAF2_REPORTING__001de2f8 +#define __WAF2_REPORTING__001de2f8 + +// Generates data in JSON structure similar to what mod_security generates for its audit log +#include +#include +#include "yajl/yajl_gen.h" +#include "yajl/yajl_version.h" + +#define yajl_string(s) yajl_gen_string(g, (const unsigned char *)(s), strlen(s)) +#define yajl_string_len(s, l) yajl_gen_string(g, (const unsigned char *)(s), l) + +#define yajl_kv_null(k) yajl_string(k); yajl_gen_null(g) +#define yajl_kv_int(k, v) yajl_string(k); yajl_gen_integer(g, v) +#define yajl_kv_bool(k, v) yajl_string(k); yajl_gen_bool(g, v) +#define yajl_kv_string(k, v) yajl_string(k); yajl_string(v) +#define yajl_kv_string_len(k, v, vlen) yajl_string(k); yajl_string_len(v, vlen) + +typedef yajl_gen reporting_ctx_t; + +inline reporting_ctx_t +reporting_ctx_create() +{ + return yajl_gen_alloc(NULL); +} + +inline static void +reporting_ctx_free(reporting_ctx_t g) +{ + yajl_gen_free(g); +} + +inline void +reporting_start_report(reporting_ctx_t g) +{ + yajl_gen_map_open(g); +} + +inline void +reporting_emit_transaction_info( + reporting_ctx_t g, const char *log_time, + const char *transaction_id, + const char *remote_addr, + int remote_port, + const char *local_addr, + int local_port) +{ + yajl_string("transaction"); + yajl_gen_map_open(g); + yajl_kv_string("time", log_time); + yajl_kv_string("transaction_id", transaction_id); + yajl_kv_string("remote_address", remote_addr); + yajl_kv_int("remote_port", remote_port); + yajl_kv_string("local_address", local_addr); + yajl_kv_int("local_port", local_port); + yajl_gen_map_close(g); +} + +// Request +inline void +reporting_start_request(reporting_ctx_t g, const char *uri) +{ + yajl_string("request"); + yajl_gen_map_open(g); + yajl_kv_string("uri", uri); +} + +inline void +reporting_start_request_hdrs(reporting_ctx_t g) +{ + yajl_string("headers"); + yajl_gen_map_open(g); +} + +inline void +reporting_add_request_hdr(reporting_ctx_t g, const char *name, int name_len, const char *value, int value_len) +{ + yajl_string_len(name, name_len); + yajl_string_len(value, value_len); +} + +inline void +reporting_end_request_hdrs(reporting_ctx_t g) +{ + yajl_gen_map_close(g); +} + +inline void +reporting_start_request_body(reporting_ctx_t g) +{ + yajl_string("body"); + yajl_gen_array_open(g); +} + +inline void +reporting_add_request_body_chunk(reporting_ctx_t g, const char *data, int data_len) +{ + yajl_string_len(data, data_len); +} + +inline void +reporting_end_request_body(reporting_ctx_t g) +{ + yajl_gen_array_close(g); +} + +inline void +reporting_end_request(reporting_ctx_t g) +{ + yajl_gen_map_close(g); +} + +// Response +inline void +reporting_start_response(reporting_ctx_t g, int response_status, int http_version) +{ + yajl_string("response"); + yajl_gen_map_open(g); + yajl_kv_string("protocol", (http_version==1) ? "HTTP/1.1" : "HTTP/1.0"); + // as an integer, response status is easier to parse than status_line + yajl_kv_int("status", response_status); +} + +inline void +reporting_start_response_hdrs(reporting_ctx_t g) +{ + yajl_string("headers"); + yajl_gen_map_open(g); +} + +inline void +reporting_add_response_hdr(reporting_ctx_t g, const char *name, int name_len, const char *value, int value_len) +{ + yajl_string_len(name, name_len); + yajl_string_len(value, value_len); +} + +inline void +reporting_end_response_hdrs(reporting_ctx_t g) +{ + yajl_gen_map_close(g); +} + +inline void +reporting_start_response_body(reporting_ctx_t g) +{ + yajl_string("body"); + yajl_gen_array_open(g); +} + +inline void +reporting_add_response_body_chunk(reporting_ctx_t g, const char *data, int data_len) +{ + yajl_string_len(data, data_len); +} + +inline void +reporting_end_response_body(reporting_ctx_t g) +{ + yajl_gen_array_close(g); +} + +inline void +reporting_end_response(reporting_ctx_t g) +{ + yajl_gen_map_close(g); +} + +inline void +reporting_end_report(reporting_ctx_t g) +{ + yajl_gen_map_close(g); +} + +inline void +reporting_dump_report(reporting_ctx_t g, FILE *f) +{ + const unsigned char *final_buf; + size_t len; + yajl_gen_get_buf(g, &final_buf, &len); + fwrite(final_buf, 1, len, f); + yajl_gen_clear(g); +} + +#endif // __WAF2_REPORTING__001de2f8 diff --git a/components/security_apps/waap/waap_component.cc b/components/security_apps/waap/waap_component.cc new file mode 100755 index 0000000..8a4ad8c --- /dev/null +++ b/components/security_apps/waap/waap_component.cc @@ -0,0 +1,73 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include "waap.h" +#include "telemetry.h" +#include "waap_clib/DeepAnalyzer.h" +#include "waap_component_impl.h" +#include "debug.h" +#include "waap_clib/WaapConfigApplication.h" +#include "waap_clib/WaapConfigApi.h" + +USE_DEBUG_FLAG(D_WAAP); +USE_DEBUG_FLAG(D_WAAP_API); + +WaapComponent::WaapComponent() : Component("WaapComponent"), pimpl(std::make_unique()) +{ + dbgTrace(D_WAAP) << "WaapComponent::WaapComponent()"; +} + +WaapComponent::~WaapComponent() +{ + dbgTrace(D_WAAP) << "WaapComponent::~WaapComponent()"; +} + +void +WaapComponent::init() +{ + pimpl->init(); +} + +void +WaapComponent::fini() +{ + pimpl->fini(); +} + +void +WaapComponent::preload() +{ + // TODO:: call stuff like registerExpectedCofiguration here.. + registerExpectedConfiguration("WAAP", "WebApplicationSecurity"); + registerExpectedConfiguration("WAAP", "WebAPISecurity"); + registerExpectedConfiguration("WAAP", "Sigs score file path"); + registerExpectedConfiguration("WAAP", "Sigs file path"); + registerExpectedConfigFile("waap", Config::ConfigFileType::Policy); + registerConfigLoadCb( + [this]() + { + WaapConfigApplication::notifyAssetsCount(); + WaapConfigAPI::notifyAssetsCount(); + } + ); + registerConfigPrepareCb( + [this]() + { + WaapConfigApplication::clearAssetsCount(); + WaapConfigAPI::clearAssetsCount(); + } + ); + dbgTrace(D_WAAP) << "WaapComponent::preload() exit"; +} diff --git a/components/security_apps/waap/waap_component_impl.cc b/components/security_apps/waap/waap_component_impl.cc new file mode 100755 index 0000000..bf13ffb --- /dev/null +++ b/components/security_apps/waap/waap_component_impl.cc @@ -0,0 +1,750 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#define WAF2_LOGGING_ENABLE + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "debug.h" +#include "waap_clib/WaapAssetStatesManager.h" +#include "waap_clib/Waf2Engine.h" +#include "waap_clib/WaapConfigApi.h" +#include "waap_clib/WaapConfigApplication.h" +#include "waap_clib/WaapDecision.h" +#include "telemetry.h" +#include "waap_clib/DeepAnalyzer.h" +#include "waap_component_impl.h" +#include "i_waapConfig.h" +#include "generic_rulebase/rulebase_config.h" +#include "report_messaging.h" +#include "first_request_object.h" + +using namespace std; + +USE_DEBUG_FLAG(D_WAAP); +USE_DEBUG_FLAG(D_WAAP_ULIMITS); + +WaapComponent::Impl::Impl() : + pending_response(ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT), + accept_response(ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT), + drop_response(ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP), + waapStateTable(NULL), + transactionsCount(0), + deepAnalyzer() +{ +} + +WaapComponent::Impl::~Impl() +{ +} + +// Called when component is initialized +void +WaapComponent::Impl::init() +{ + std::string sigs_file_path = getConfigurationWithDefault( + "/etc/cp/conf/waap/1.data", + "WAAP", + "Sigs file path" + ); + + std::string sigs_score_file_path = getConfigurationWithDefault( + "/etc/cp/conf/waap/2.data", + "WAAP", + "Sigs score file path" + ); + + assets_metric.init( + "Assets Count", + ReportIS::AudienceTeam::AGENT_CORE, + ReportIS::IssuingEngine::AGENT_CORE, + std::chrono::minutes(10), + true, + ReportIS::Audience::INTERNAL + ); + assets_metric.registerListener(); + registerListener(); + waap_metric.registerListener(); + + init(sigs_file_path, sigs_score_file_path); +} + +void +WaapComponent::Impl::init(const std::string &sigs_file_path, const std::string &sigs_scores_file_path) +{ + //waf2_set_log_target(WAF2_LOGTARGET_STDERR); + dbgTrace(D_WAAP) << "WaapComponent::Impl::init() ..."; + + reputationAggregator.init(); + + bool success = waf2_proc_start(sigs_file_path, sigs_scores_file_path); + if (!success) { + dbgWarning(D_WAAP) << "WAF2 engine FAILED to initialize (probably failed to load signatures). Aborting!"; + waf2_proc_exit(); + return; + } + + dbgTrace(D_WAAP) << "WaapComponent::Impl::init() signatures loaded succesfully."; + + I_StaticResourcesHandler *static_resources = Singleton::Consume::by(); + static_resources->registerStaticResource("cp-ab.js", "/etc/cp/conf/waap/cp-ab.js"); + static_resources->registerStaticResource("cp-csrf.js", "/etc/cp/conf/waap/cp-csrf.js"); + + waapStateTable = Singleton::Consume::by(); +} + +// Called when component is shut down +void +WaapComponent::Impl::fini() +{ + dbgTrace(D_WAAP) << "WaapComponent::impl::fini(). Shutting down waap engine before exiting..."; + unregisterListener(); + waf2_proc_exit(); +} + +std::string +WaapComponent::Impl::getListenerName() const +{ + return "waap application"; +} + +// Start request (called before headers arrive). However, the method and URL path is known at this stage. +// Should return pending_response to hold the data (not send to upstream) +EventVerdict +WaapComponent::Impl::respond(const NewHttpTransactionEvent &event) +{ + dbgTrace(D_WAAP) << " * \e[32mNGEN_EVENT: NewTransactionEvent\e[0m"; + + if (waapStateTable->hasState()) { + dbgWarning(D_WAAP) << " * \e[31 -- NewTransactionEvent called twice on same entry \e[0m"; + return drop_response; + } + + I_WaapAssetStatesManager* pWaapAssetStatesManager = + Singleton::Consume::by(); + std::shared_ptr pCurrentWaapAssetState = pWaapAssetStatesManager->getWaapAssetStateGlobal(); + + if (!pCurrentWaapAssetState || pCurrentWaapAssetState->getSignatures()->fail()) + { + dbgTrace(D_WAAP) << "WaapComponent::Impl::UponEvent(NewTransactionEvent): couldn't get WaapAssetState ..."; + return drop_response; + } + + dbgTrace(D_WAAP) << "WaapComponent::Impl::UponEvent(NewTransactionEvent): creating state..."; + if(!waapStateTable->createState(pCurrentWaapAssetState)) { + dbgWarning(D_WAAP) << " * \e[31 -- NewTransactionEvent failed to create new state in table\e[0m"; + return drop_response; + } + + if (!waapStateTable->hasState()) { + dbgWarning(D_WAAP) << " * \e[31 -- NewTransactionEvent state was created but still missing \e[0m"; + return drop_response; + } + + IWaf2Transaction& waf2Transaction = waapStateTable->getState(); + + // Assign unique numeric index to this transaction + waf2Transaction.setIndex(transactionsCount++); + + std::string uri = event.getURI(); + std::string httpMethodStr = event.getHttpMethod(); + + dbgTrace(D_WAAP) << "start Transaction: " << httpMethodStr << " " << uri << " (REQUEST)"; + + // See below.. + Waf2TransactionFlags &waf2TransactionFlags = waf2Transaction.getTransactionFlags(); + waf2TransactionFlags.requestDataPushStarted = false; + waf2TransactionFlags.endResponseHeadersCalled = false; + waf2TransactionFlags.responseDataPushStarted = false; + + waf2Transaction.start(); + + char sourceIpStr[INET_ADDRSTRLEN]; + inet_ntop(AF_INET, &(event.getSourceIP()), sourceIpStr, INET_ADDRSTRLEN); + + char listeningIpStr[INET_ADDRSTRLEN]; + inet_ntop(AF_INET, &(event.getListeningIP()), listeningIpStr, INET_ADDRSTRLEN); + + // Set envelope data + waf2Transaction.set_transaction_remote(sourceIpStr, event.getSourcePort()); + waf2Transaction.set_transaction_local(listeningIpStr, event.getListeningPort()); + + waf2Transaction.set_method(httpMethodStr.c_str()); + waf2Transaction.set_uri(uri.c_str()); + + // Tell waf2 API that request headers started + waf2Transaction.start_request_hdrs(); + + return pending_response; +} + +// Request headers coming +// Should return pending_response to hold the data (not send to upstream) +EventVerdict +WaapComponent::Impl::respond(const HttpRequestHeaderEvent &event) +{ + auto &header_name = event.getKey(); + auto &header_value = event.getValue(); + + dbgTrace(D_WAAP) + << " * \e[32mNGEN_EVENT: HttpHeaderRequest event: " + << string(header_name) + << ": " + << string(header_value) + << "\e[0m"; + + if (!waapStateTable->hasState()) { + dbgWarning(D_WAAP) + << " * \e[31mNGEN_EVENT: http_header - " + << "failed to get waf2 transaction, state not exist\e[0m"; + return drop_response; + } + IWaf2Transaction& waf2Transaction = waapStateTable->getState(); + + // Tell waf2 API that another request header arrived + waf2Transaction.add_request_hdr( + reinterpret_cast(header_name.data()), //const char * name // + header_name.size(), //int name_len // + reinterpret_cast(header_value.data()), //const char * value // + header_value.size() //int value_len // + ); + + EventVerdict verdict = pending_response; + + // Last header handled + if (event.isLastHeader()) { + waf2Transaction.end_request_hdrs(); + + verdict = waf2Transaction.getUserLimitVerdict(); + + if (verdict.getVerdict() == pending_response.getVerdict()) { + // waapDecision returns one of these verdicts: accept, drop, pending + // PENDING verdict (also called INSPECT by ngen core) will be returned if the waap engine wants to also + // inspect response. + verdict = waapDecisionAfterHeaders(waf2Transaction); + } + + } + + // Delete state before returning any verdict which is not pending + if ((verdict.getVerdict() != pending_response.getVerdict()) && waapStateTable->hasState()) { + finishTransaction(waf2Transaction); + } + + return verdict; +} + +// Request body pieces coming. +// Should return pending_response to hold the data (not send to upstream) +EventVerdict +WaapComponent::Impl::respond(const HttpRequestBodyEvent &event) +{ + dbgTrace(D_WAAP) << " * \e[32mNGEN_EVENT: HttpBodyRequest data buffer event\e[0m"; + + if (!waapStateTable->hasState()) { + dbgWarning(D_WAAP) << + " * \e[31mNGEN_EVENT: data buffer - failed to get waf2 transaction, state not exist\e[0m"; + return drop_response; + } + + IWaf2Transaction& waf2Transaction = waapStateTable->getState(); + Waf2TransactionFlags &waf2TransactionFlags = waf2Transaction.getTransactionFlags(); + + // Do this only once (on first request body data packet) + if (!waf2TransactionFlags.requestDataPushStarted) { + dbgTrace(D_WAAP) << "first request body packet"; + waf2Transaction.start_request_body(); + waf2TransactionFlags.requestDataPushStarted = true; + } + + // Push the request data chunk to the waf2 engine + const char *dataBuf = (const char*)event.getData().data(); + size_t dataBufLen = event.getData().size(); + + waf2Transaction.add_request_body_chunk(dataBuf, dataBufLen); + + ngx_http_cp_verdict_e verdict = waf2Transaction.getUserLimitVerdict(); + if (verdict != ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT) { + finishTransaction(waf2Transaction); + } + + return EventVerdict(verdict); +} + +// Called when request ends and response starts. +// For WAAP its time to decide and return either "accept_response" or "drop_response" +EventVerdict +WaapComponent::Impl::respond(const EndRequestEvent &) +{ + dbgTrace(D_WAAP) << " * \e[32mNGEN_EVENT: endRequest event\e[0m"; + + if (!waapStateTable->hasState()) { + dbgWarning(D_WAAP) + << "* \e[31mNGEN_EVENT: endRequest - failed to get waf2 transaction, state does not exist\e[0m"; + return drop_response; + } + + IWaf2Transaction& waf2Transaction = waapStateTable->getState(); + Waf2TransactionFlags &waf2TransactionFlags = waf2Transaction.getTransactionFlags(); + + // Do not forget to tell waf2 engine that data ended (if we started request_body above...) + if (waf2TransactionFlags.requestDataPushStarted) { + waf2Transaction.end_request_body(); + waf2TransactionFlags.requestDataPushStarted = false; + } + + // Tell waf2 engine that request stage is finished + waf2Transaction.end_request(); + + // waapDecision returns one of these verdicts: accept, drop, pending + // PENDING verdict (also called INSPECT by ngen core) will be returned if the waap engine wants to also inspect + // response. + EventVerdict verdict = waapDecision(waf2Transaction); + + // Delete state before returning any verdict which is not pending + if (verdict.getVerdict() != ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT && + waapStateTable->hasState() + ) { + finishTransaction(waf2Transaction); + } + + return verdict; +} + +EventVerdict +WaapComponent::Impl::respond(const ResponseCodeEvent &event) +{ + dbgTrace(D_WAAP) + << " * \e[32mNGEN_EVENT: ResponseCodeTransactionEvent event code = " + << event.getResponseCode() + << "\e[0m"; + + if (!waapStateTable->hasState()) { + dbgWarning(D_WAAP) + << " * \e[31mNGEN_EVENT: ResponseCodeTransactionEvent - failed to get waf2 transaction, " + << "state does not exist\e[0m"; + return drop_response; + } + + IWaf2Transaction& waf2Transaction = waapStateTable->getState(); + + // TODO:: extract HTTP version from attachment? + static const int http_version = 0x11; + + // Tell waf2 API that response starts + waf2Transaction.start_response(event.getResponseCode(), http_version); + + EventVerdict verdict = pending_response; + + // Set drop verdict if waap engine decides to drop response. + if (!waf2Transaction.decideResponse()) { + dbgTrace(D_WAAP) << " * \e[32m ResponseCodeTransactionEvent: decideResponse: DROP\e[0m"; + verdict = drop_response; + } else if (!waf2Transaction.shouldInspectResponse()) { + // Set accept verdict if waap engine no more interested in response + dbgTrace(D_WAAP) << " * \e[32m ResponseCodeTransactionEvent: shouldInspectResponse==false: ACCEPT\e[0m"; + verdict = accept_response; + } else { + // Tell waf2 API that response headers start + waf2Transaction.start_response_hdrs(); + } + + // Delete state before returning any verdict which is not pending + if (verdict.getVerdict() != ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT && + verdict.getVerdict() != ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT && + waapStateTable->hasState() + ) { + finishTransaction(waf2Transaction); + } + + return verdict; +} + +EventVerdict +WaapComponent::Impl::respond(const HttpResponseHeaderEvent &event) +{ + auto &header_name = event.getKey(); + auto &header_value = event.getValue(); + + dbgTrace(D_WAAP) + << " * \e[32mNGEN_EVENT: HttpHeaderResponse event: " + << string(header_name) + << ": " + << string(header_value) + << "\e[0m"; + + if (!waapStateTable->hasState()) { + dbgWarning(D_WAAP) + << " * \e[31mNGEN_EVENT: HttpHeaderResponse - " + << "failed to get waf2 transaction, state does not exist\e[0m"; + return drop_response; + } + + IWaf2Transaction& waf2Transaction = waapStateTable->getState(); + + // Send response header to the engine + waf2Transaction.add_response_hdr( + reinterpret_cast(header_name.data()), + header_name.size(), + reinterpret_cast(header_value.data()), + header_value.size() + ); + + ngx_http_cp_verdict_e verdict = pending_response.getVerdict(); + HttpHeaderModification modifications; + bool isSecurityHeadersInjected = false; + + if (waf2Transaction.shouldInjectSecurityHeaders()) { + dbgTrace(D_WAAP) << " * \e[32m HttpHeaderResponse: Trying to inject Security Headers\e[0m"; + if (event.isLastHeader()) { + dbgTrace(D_WAAP) << " * \e[32m HttpHeaderResponse: Injecting Security Headers\e[0m"; + std::vector> injectHeaderStr; + waf2Transaction.handleSecurityHeadersInjection(injectHeaderStr); + for (auto header : injectHeaderStr) { + dbgTrace(D_WAAP) << " * \e[32m HttpHeaderResponse: Injecting Security Header. Header name: \e[0m" << + header.first << " Header value: " << header.second; + Buffer headerValue(header.second); + HeaderKey headerName(header.first); + Maybe result = modifications.appendHeader(std::move(headerName), std::move(headerValue)); + if (!result.ok()) { + dbgWarning(D_WAAP) + << "Failed to inject (Security header) buffer in requested position. Buffer: " + << header.second + << ", position: " + << 0 + << ". Error: " + << result.getErr(); + } + } + isSecurityHeadersInjected = true; + verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT; + } + } + + if (waf2Transaction.shouldInjectCSRF()) { + if (event.isLastHeader()) { + std::string injectStr; + waf2Transaction.handleCsrfHeaderInjection(injectStr); + Buffer injected_buffer(injectStr); + HeaderKey setCookie("Set-Cookie"); + Maybe result = modifications.appendHeader(std::move(setCookie), std::move(injected_buffer)); + if (!result.ok()) { + dbgWarning(D_WAAP) + << "Failed to inject (CSRF header) buffer in requested position. Buffer: " + << injectStr + << ", position: " + << 0 + << ". Error: " + << result.getErr(); + } + verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT; + } + } + + // Set drop verdict if waap engine decides to drop response. + if (!waf2Transaction.decideResponse()) { + dbgTrace(D_WAAP) << " * \e[32m HttpHeaderResponse: decideResponse: DROP\e[0m"; + verdict = drop_response.getVerdict(); + } else if (!waf2Transaction.shouldInspectResponse()) { + // Set accept verdict if waap engine no more interested in response + dbgTrace(D_WAAP) << " * \e[32m HttpHeaderResponse: shouldInspectResponse==false: ACCEPT\e[0m"; + verdict = accept_response.getVerdict(); + } + + if (waf2Transaction.shouldInjectSecurityHeaders() && isSecurityHeadersInjected && + verdict == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT + ) { + // disable should inject security headers after injection to avoid response body scanning when it's unnecessary + waf2Transaction.disableShouldInjectSecurityHeaders(); + } + + // Delete state before returning any verdict which is not pending + if (verdict != ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT && + verdict != ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT && + waapStateTable->hasState() + ) { + finishTransaction(waf2Transaction); + } + + return EventVerdict(move(modifications.getModificationList()), verdict); +} + +EventVerdict +WaapComponent::Impl::respond(const HttpResponseBodyEvent &event) +{ + dbgTrace(D_WAAP) << " * \e[32mNGEN_EVENT: HttpBodyResponse data buffer event\e[0m"; + + if (!waapStateTable->hasState()) { + dbgWarning(D_WAAP) << + " * \e[31mNGEN_EVENT: HttpBodyResponse - failed to get waf2 transaction, state does not exist\e[0m"; + return drop_response; + } + + IWaf2Transaction& waf2Transaction = waapStateTable->getState(); + Waf2TransactionFlags &waf2TransactionFlags = waf2Transaction.getTransactionFlags(); + + // Do this only once (on first response body data packet) + if (!waf2TransactionFlags.responseDataPushStarted) { + dbgTrace(D_WAAP) << "first response body packet"; + + // Tell waf2 transaction that all response headers are finished + if (!waf2TransactionFlags.endResponseHeadersCalled) { + // At this point, all response headers were received + waf2Transaction.end_response_hdrs(); + waf2TransactionFlags.endResponseHeadersCalled = true; + } + + waf2Transaction.start_response_body(); + waf2TransactionFlags.responseDataPushStarted = true; + } + + dbgTrace(D_WAAP) << "HttpBodyResponse"; + + + // Push the response data chunk to the waf2 engine + const char *dataBuf = (const char*)event.getData().data(); + size_t dataBufLen = event.getData().size(); + + waf2Transaction.add_response_body_chunk(dataBuf, dataBufLen); + + ngx_http_cp_verdict_e verdict = pending_response.getVerdict(); + HttpBodyModification modifications; + + // Set drop verdict if waap engine decides to drop response. + if (!waf2Transaction.decideResponse()) { + dbgTrace(D_WAAP) << " * \e[32m HttpBodyResponse: decideResponse: DROP\e[0m"; + verdict = drop_response.getVerdict(); + } + + if (verdict == pending_response.getVerdict() && + waf2Transaction.shouldInjectResponse() && + !event.isLastChunk() + ) { + // Inject if needed. Note that this is only reasonable to do if there was no DROP decision above + + std::string injectionStr; + int pos = 0; + + if(waf2Transaction.isHtmlType(dataBuf, dataBufLen)) { + bool htmlTagFound = waf2Transaction.findHtmlTagToInject( + dataBuf, + dataBufLen, + pos + ); + + pos = htmlTagFound ? pos + 1 : 0; + + waf2Transaction.completeInjectionResponseBody(injectionStr); + dbgTrace(D_WAAP) << "HttpBodyResponse(): injectionStr: " << injectionStr << " pos: " << pos + << " URI: " << waf2Transaction.getUriStr(); + Maybe result = modifications.inject(pos, Buffer(injectionStr)); + if(!result.ok()) { + dbgWarning(D_WAAP) << "HttpBodyResponse(): Scripts injection failed!"; + } + verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT; + } else { + // This response body is not considered "HTML" - disable injection + dbgTrace(D_WAAP) << "HttpBodyResponse(): the response body is not HTML - disabling injection"; + + // Note that this operation might affect the shouldInspectResponse() state if injection was the only reason + // to inspect the response body. + waf2Transaction.clearAllInjectionReasons(); + } + } + + if (verdict == pending_response.getVerdict() && !waf2Transaction.shouldInspectResponse()) { + // Set accept verdict if waap engine no more interested in response + dbgTrace(D_WAAP) << " * \e[32m HttpBodyResponse: shouldInspectResponse==false: ACCEPT\e[0m"; + verdict = accept_response.getVerdict(); + } + + // Delete state before returning any verdict which is not pending or inject + if (verdict != ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT && + verdict != ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT && + waapStateTable->hasState() + ) { + finishTransaction(waf2Transaction); + } + + return EventVerdict(modifications.getModificationList(), verdict); +} + +EventVerdict +WaapComponent::Impl::respond(const EndTransactionEvent &) +{ + if (!waapStateTable->hasState()) { + dbgWarning(D_WAAP) << + " * \e[31mNGEN_EVENT: endTransaction - failed to get waf2 transaction, state does not exist\e[0m"; + return EventVerdict(drop_response); + } + + IWaf2Transaction& waf2Transaction = waapStateTable->getState(); + Waf2TransactionFlags &waf2TransactionFlags = waf2Transaction.getTransactionFlags(); + + // Do not forget to tell waf2 engine that response headers ended. + if (!waf2TransactionFlags.endResponseHeadersCalled) { + waf2Transaction.end_response_hdrs(); + waf2TransactionFlags.endResponseHeadersCalled = true; + } else if (waf2TransactionFlags.responseDataPushStarted) { + // Do not forget to tell waf2 engine that data ended (if we started response_body above...) + waf2Transaction.end_response_body(); + waf2TransactionFlags.responseDataPushStarted = false; + } + + waf2Transaction.end_response(); + + EventVerdict verdict = accept_response; + + // Set drop verdict if waap engine decides to drop response. + if (!waf2Transaction.decideResponse()) { + dbgTrace(D_WAAP) << " * \e[32m endTransaction: decideResponse: DROP\e[0m"; + verdict = drop_response; + } else if (!waf2Transaction.shouldInspectResponse()) { + // Set accept verdict if waap engine no more interested in response + dbgTrace(D_WAAP) << " * \e[32m endTransaction: shouldInspectResponse==false: ACCEPT\e[0m"; + } + + // This is our last chance to delete the state. The verdict must not be "PENDING" at this point. + finishTransaction(waf2Transaction); + return verdict; +} + +EventVerdict +WaapComponent::Impl::waapDecisionAfterHeaders(IWaf2Transaction& waf2Transaction) +{ + dbgTrace(D_WAAP) << "waapDecisionAfterHeaders() started"; + if (waf2Transaction.decideAfterHeaders()) { + dbgTrace(D_WAAP) << "WaapComponent::Impl::waapDecisionAfterHeaders(): returning DROP response."; + return drop_response; + } + return pending_response; +} + +EventVerdict +WaapComponent::Impl::waapDecision(IWaf2Transaction& waf2Transaction) +{ + dbgTrace(D_WAAP) << "waapDecision() started"; + + static const int mode = 1; + AnalysisResult result; + int verdictCode = waf2Transaction.decideFinal(mode, result); + + EventVerdict verdict = accept_response; + + // Note: verdict is 0 if nothing suspicious, 1 if should block, or negative if error occurred + // (in the latter case - decision to drop/pass should be governed by failopen setting) + if (verdictCode == 0) { + waf2Transaction.checkShouldInject(); + + if (waf2Transaction.shouldInspectResponse()) { + verdict = pending_response; + } else { + dbgTrace(D_WAAP) << "WAF VERDICT: " << verdictCode << " (\e[32mPASS\e[0m)"; + verdict = accept_response; + } + } else { + std::string message = (verdictCode == 1) ? " (\e[31mBLOCK\e[0m)" : " (\e[31mERROR!\e[0m)"; + dbgTrace(D_WAAP) << "WAF VERDICT: " << verdictCode << message; + verdict = drop_response; + } + + dbgTrace(D_WAAP) << "waapDecision() finished"; + return verdict; +} + +void +WaapComponent::Impl::finishTransaction(IWaf2Transaction& waf2Transaction) +{ + waf2Transaction.collectFoundPatterns(); + waf2Transaction.sendLog(); + ReportIS::Severity severity = waf2Transaction.computeEventSeverityFromDecision(); + validateFirstRequestForAsset(severity); + waapStateTable->deleteState(); +} + +void WaapComponent::Impl::validateFirstRequestForAsset(const ReportIS::Severity severity) +{ + static BasicRuleConfig empty_rule; + const BasicRuleConfig& rule_by_ctx = getConfigurationWithDefault( + empty_rule, + "rulebase", + "rulesConfig"); + if (rule_by_ctx.getAssetId().empty()) { + dbgWarning(D_WAAP) << "Failed to get rule base from context. Skipping sending notification."; + return; + } + + if (m_seen_assets_id.find(rule_by_ctx.getAssetId()) == m_seen_assets_id.end()) { + dbgTrace(D_WAAP) << "First request for asset id: '" << rule_by_ctx.getAssetId() + << "'. Sending notification"; + m_seen_assets_id.insert(rule_by_ctx.getAssetId()); + sendNotificationForFirstRequest( + rule_by_ctx.getAssetId(), + rule_by_ctx.getAssetName(), + severity + ); + } +} + +void WaapComponent::Impl::sendNotificationForFirstRequest( + const std::string& asset_id, + const std::string& asset_name, + const ReportIS::Severity severity +) +{ + dbgTrace(D_WAAP) << "Got first request for asset: '" << asset_name<< "' sending a notification"; + FirstRequestNotificationObject obj(asset_id, asset_name, severity); + I_MainLoop* mainloop = Singleton::Consume::by(); + mainloop->addOneTimeRoutine( + I_MainLoop::RoutineType::System, + [asset_name, obj]() + { + ReportMessaging( + "First request for asset '" + asset_name + "'", + ReportIS::AudienceTeam::WAAP, + obj, + ReportIS::Tags::WAF, + ReportIS::Notification::FIRST_REQUEST_FOR_ASSET + ); + }, + "Report WAAP asset first request inspection" + ); +} + +bool +WaapComponent::Impl::waf2_proc_start(const std::string& sigsFname, const std::string& scoresFname) +{ + // WAAP uses libxml library, which requires process-level initialization when process starts +#if 0 // TODO:: silence the error messages printed by libxml2 + xmlSetGenericErrorFunc(NULL, (xmlGenericErrorFunc)my_libxml2_err); + xmlSetStructuredErrorFunc(NULL, my_libxml_structured_err); +#endif + ::xmlInitParser(); + + return + Singleton::Consume::by()->initBasicWaapSigs(sigsFname, scoresFname); +} + +void +WaapComponent::Impl::waf2_proc_exit() +{ + ::xmlCleanupParser(); +} diff --git a/components/security_apps/waap/waap_component_impl.h b/components/security_apps/waap/waap_component_impl.h new file mode 100755 index 0000000..85ca964 --- /dev/null +++ b/components/security_apps/waap/waap_component_impl.h @@ -0,0 +1,88 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __WAAP_COMPONENT_IMPL_H__ +#define __WAAP_COMPONENT_IMPL_H__ + +#include "waap.h" +#include "config.h" +#include "table_opaque.h" +#include "i_transaction.h" +#include "waap_clib/DeepAnalyzer.h" +#include "waap_clib/WaapAssetState.h" +#include "waap_clib/WaapAssetStatesManager.h" +#include "reputation_features_agg.h" + +// WaapComponent implementation +class WaapComponent::Impl + : + public Listener, + public Listener, + public Listener, + public Listener, + public Listener, + public Listener, + public Listener, + public Listener +{ +public: + explicit Impl(); + virtual ~Impl(); + + void init(); + void fini(); + + std::string getListenerName() const override; + + EventVerdict respond(const NewHttpTransactionEvent &event) override; + EventVerdict respond(const HttpRequestHeaderEvent &event) override; + EventVerdict respond(const HttpRequestBodyEvent &event) override; + EventVerdict respond(const EndRequestEvent &) override; + + EventVerdict respond(const ResponseCodeEvent &event) override; + EventVerdict respond(const HttpResponseHeaderEvent &event) override; + EventVerdict respond(const HttpResponseBodyEvent &event) override; + EventVerdict respond(const EndTransactionEvent &) override; + +private: + void init(const std::string &sigs_file_path, const std::string &sigs_scores_file_path); + + EventVerdict waapDecisionAfterHeaders(IWaf2Transaction& waf2Transaction); + EventVerdict waapDecision(IWaf2Transaction& waf2Transaction); + void finishTransaction(IWaf2Transaction& waf2Transaction); + + bool waf2_proc_start(const std::string& sigsFname, const std::string& scoresFname); + void waf2_proc_exit(); + void validateFirstRequestForAsset(const ReportIS::Severity severity); + void sendNotificationForFirstRequest( + const std::string& asset_id, + const std::string& asset_name, + const ReportIS::Severity severity + ); + + EventVerdict pending_response; + EventVerdict accept_response; + EventVerdict drop_response; + WaapMetricWrapper waap_metric; + AssetsMetric assets_metric; + I_Table* waapStateTable; + // Count of transactions processed by this WaapComponent instance + uint64_t transactionsCount; + // instance of singleton classes + DeepAnalyzer deepAnalyzer; + WaapAssetStatesManager waapAssetStatesManager; + ReputationFeaturesAgg reputationAggregator; + std::unordered_set m_seen_assets_id; +}; + +#endif // __WAAP_COMPONENT_IMPL_H__ diff --git a/components/signal_handler/CMakeLists.txt b/components/signal_handler/CMakeLists.txt new file mode 100755 index 0000000..fe4a687 --- /dev/null +++ b/components/signal_handler/CMakeLists.txt @@ -0,0 +1,2 @@ +add_library(signal_handler signal_handler.cc) +target_compile_definitions(signal_handler PUBLIC) diff --git a/components/signal_handler/signal_handler.cc b/components/signal_handler/signal_handler.cc new file mode 100755 index 0000000..ab1f2b3 --- /dev/null +++ b/components/signal_handler/signal_handler.cc @@ -0,0 +1,479 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "signal_handler.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(use_unwind) +#if defined(alpine) || defined(PLATFORM_x86) +#define UNW_LOCAL_ONLY +#include +#include +#endif // defined(alpine) || defined(PLATFORM_x86) +#endif // defined(use_unwind) + +#if !defined(alpine) && !defined(arm32_musl) +#include +#endif // not alpine && not arm32_musl + +#include "debug.h" +#include "common.h" +#include "config.h" +#include "mainloop.h" +#include "report/log_rest.h" +#include "report/report.h" +#include "agent_core_utilities.h" + +#define stack_trace_max_len 64 + +using namespace std; +using namespace ReportIS; + +USE_DEBUG_FLAG(D_SIGNAL_HANDLER); + +class SignalHandler::Impl : Singleton::Provide::From +{ +public: + void + fini() + { + if (out_trace_file_fd != -1) close(out_trace_file_fd); + out_trace_file_fd = -1; + } + + void + dumpErrorReport(const string &error) override + { + ofstream export_error_file(trace_file_path); + export_error_file << error << endl; + } + + void + init() + { + addSignalHandlerRoutine(); + addReloadConfigurationRoutine(); + } + + Maybe> + getBacktrace() override + { + vector symbols; +#if defined(_UCLIBC_) || defined(arm32_musl) || !defined(use_unwind) + return genError("Could not print any backtrace entries using uclibc (backtrace_symbols not supported)"); +#else // not (_UCLIBC_ || arm32_musl) +#if defined(alpine) || defined(PLATFORM_x86) + unw_cursor_t cursor; + unw_context_t context; + + // Initialize cursor to current frame for local unwinding. + unw_getcontext(&context); + unw_init_local(&cursor, &context); + + char buf[1024]; + // Unwind frames one by one, going up the frame stack. + while (unw_step(&cursor) > 0) { + unw_word_t offset, pc; + unw_get_reg(&cursor, UNW_REG_IP, &pc); + if (pc == 0) break; + + char sym[256]; + if (unw_get_proc_name(&cursor, sym, sizeof(sym), &offset) == 0) { + char *nameptr = sym; + int status; + char *demangled = abi::__cxa_demangle(sym, nullptr, nullptr, &status); + if (status == 0) { + nameptr = demangled; + } + snprintf(buf, sizeof(buf), "(%s+0x%lx) [0x%lx]", nameptr, offset, pc); + free(demangled); + } else { + snprintf(buf, sizeof(buf), "-- error: unable to obtain symbol name for this frame"); + } + symbols.push_back(buf); + } +#else // not (alpine || PLATFORM_x86) + auto stack_trace_list = vector(stack_trace_max_len); + + uint trace_len = backtrace(stack_trace_list.data(), stack_trace_list.size()); + if (trace_len == 0 ) return genError("Could not find any backtrace entries in the current process"); + + char **trace_prints = backtrace_symbols(stack_trace_list.data(), trace_len); + if (trace_prints == nullptr) return genError("Could not convert backtrace entries to symbol strings"); + + symbols.reserve(trace_len); + for (uint i = 0; i < trace_len; ++i) { + symbols.emplace_back(trace_prints[i]); + } + free(trace_prints); + +#endif // alpine || PLATFORM_x86 +#endif // _UCLIBC_ + return symbols; + } + +private: + void + addSignalHandlerRoutine() + { + Singleton::Consume::by()->addOneTimeRoutine( + I_MainLoop::RoutineType::Offline, + [this] () + { + string service_name = "Unnamed Nano Service"; + if (Singleton::exists()) { + auto name = Singleton::Consume::by()->get( + "Service Name" + ); + if (name.ok()) service_name = *name; + } + string service_underscore_name = service_name; + replace(service_underscore_name.begin(), service_underscore_name.end(), ' ', '_'); + + trace_file_path = getConfigurationWithDefault( + "/var/log/nano_agent/trace_export_files/" + service_underscore_name + "_trace_file.dbg", + "SignalHandler", + "outputFilePath" + ); + ifstream in_trace_file(trace_file_path); + + if (in_trace_file.peek() != ifstream::traits_type::eof()) { + stringstream buffer; + buffer << in_trace_file.rdbuf(); + if (buffer.str() != " " && buffer.str() != "\n") { + const boost::regex reg("(\\+0x[A-z0-9]*)|( [0x[A-z0-9]*])"); + const string fixed_trace_str = NGEN::Regex::regexReplace( + __FILE__, + __LINE__, + buffer.str(), + reg, + "" + ); + generateLog(fixed_trace_str); + dbgInfo(D_SIGNAL_HANDLER) + << "Service started after crash ERROR: " + << endl + << fixed_trace_str; + } + } + + in_trace_file.close(); + + ofstream out_trace_file(trace_file_path, ofstream::out | ofstream::trunc); + out_trace_file.close(); + + setSignalHanlders(); + }, + "Send crash trace report" + ); + } + + void + generateLog(const string &trace_file_data) + { + auto i_time = Singleton::Consume::by(); + chrono::microseconds curr_time = i_time!=nullptr ? i_time->getWalltime() : chrono::microseconds(0); + + if (!Singleton::exists()) return; + + AudienceTeam audience_team = AudienceTeam::NONE; + if (Singleton::exists()) { + auto team = Singleton::Consume::by()->get("Audience Team"); + if (team.ok()) audience_team = *team; + } + + set tags; + Report message_to_fog( + "Nano service startup after crash", + curr_time, + Type::EVENT, + Level::LOG, + LogLevel::ERROR, + Audience::INTERNAL, + audience_team, + Severity::HIGH, + Priority::HIGH, + chrono::seconds(0), + LogField("agentId", Singleton::Consume::by()->getAgentId()), + tags, + Tags::INFORMATIONAL + ); + + message_to_fog << LogField("eventMessage", trace_file_data); + + string fog_signalHandler_uri = getConfigurationWithDefault( + "/api/v1/agents/events", + "SignalHandler", + "fogSignalHandlerURI" + ); + + LogRest signalHandler_client_rest(message_to_fog); + + Singleton::Consume::by()->sendObjectWithPersistence( + signalHandler_client_rest, + I_Messaging::Method::POST, + fog_signalHandler_uri, + "", + true, + MessageTypeTag::REPORT + ); + + dbgInfo(D_SIGNAL_HANDLER) << "Sent crash log to fog" << endl; + } + + void + setSignalHanlders() + { + out_trace_file_fd = open(trace_file_path.c_str(), O_CREAT | O_RDWR, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); + int errno_copy = errno; + if (out_trace_file_fd < 0) { + dbgError(D_SIGNAL_HANDLER) + << "Failed to open signal handler backtrace file. Path: " + << trace_file_path + << ", Errno: " + << to_string(errno_copy) + << ", Error: " + << strerror(errno_copy); + } + + static const vector signals = { + SIGABRT, + SIGKILL, + SIGQUIT, + SIGINT, + SIGTERM, + SIGSEGV, + SIGBUS, + SIGILL, + SIGFPE, + SIGPIPE, + SIGUSR2 + }; + + for (int sig : signals) { + signal(sig, signalHandlerCB); + } + } + +// LCOV_EXCL_START Reason: Cannot crash unitest or send signal during execution + static bool + writeData(const char *data, uint32_t len) + { + uint32_t bytes_sent = 0; + while (bytes_sent < len) { + int res = write(out_trace_file_fd, data + bytes_sent, len - bytes_sent); + if (res <= 0) return false; + + bytes_sent += res; + } + + return true; + } + + static void + signalHandlerCB(int _signal) + { + const char *signal_name = ""; + char signal_num[3]; + switch(_signal) { + case SIGABRT: { + signal_name = "SIGABRT"; + fini_signal_flag = true; + return; + } + case SIGKILL: { + signal_name = "SIGKILL"; + fini_signal_flag = true; + return; + } + case SIGQUIT: { + signal_name = "SIGQUIT"; + fini_signal_flag = true; + return; + } + case SIGINT: { + signal_name = "SIGINT"; + fini_signal_flag = true; + return; + } + case SIGTERM: { + signal_name = "SIGTERM"; + fini_signal_flag = true; + return; + } + case SIGSEGV: { + signal_name = "SIGSEGV"; + break; + } + case SIGBUS: { + signal_name = "SIGBUS"; + break; + } + case SIGILL: { + signal_name = "SIGILL"; + break; + } + case SIGFPE: { + signal_name = "SIGFPE"; + break; + } + case SIGPIPE: { + signal_name = "SIGPIPE"; + return; + } + case SIGUSR2: { + reload_settings_flag = true; + return; + } + } + + if (out_trace_file_fd == -1) exit(_signal); + + for (uint i = 0; i < sizeof(signal_num); ++i) { + uint placement = sizeof(signal_num) - 1 - i; + signal_num[placement] = _signal%10 + '0'; + _signal /= 10; + } + const char *signal_error_prefix = "Caught signal "; + writeData(signal_error_prefix, strlen(signal_error_prefix)); + writeData(signal_num, sizeof(signal_num)); + if (strlen(signal_name)) { + const char *open_braces = "("; + writeData(open_braces, strlen(open_braces)); + writeData(signal_name, strlen(signal_name)); + const char *close_braces = ")\n"; + writeData(close_braces, strlen(close_braces)); + } + + printStackTrace(); + + close(out_trace_file_fd); + out_trace_file_fd = -1; + + exit(_signal); + } + + static void + printStackTrace() + { + if (out_trace_file_fd == -1) return; + + const char *stack_trace_title = "Stack trace:\n"; + writeData(stack_trace_title, strlen(stack_trace_title)); + +#if defined(_UCLIBC_) || defined(arm32_musl) || !defined(use_unwind) + const char *uclibc_error = + "Could not print any backtrace entries using uclibc (backtrace_symbols not supported)\n"; + writeData(uclibc_error, strlen(uclibc_error)); + return; +#else // not (_UCLIBC_ || arm32_musl) +#ifdef alpine + unw_cursor_t cursor; + unw_context_t uc; + unw_getcontext(&uc); + if (unw_init_local(&cursor, &uc) < 0) { + const char *unw_init_local_error = "unw_init_local failed!\n"; + writeData(unw_init_local_error, strlen(unw_init_local_error)); + return; + } + + char name[256]; + unw_word_t ip, sp, off; + for (uint i = 0 ; i < stack_trace_max_len ; i++) { + unw_get_reg(&cursor, UNW_REG_IP, &ip); + unw_get_reg(&cursor, UNW_REG_SP, &sp); + + if (unw_get_proc_name(&cursor, name, sizeof(name), &off) == 0) { + const char *open_braces = "<"; + writeData(open_braces, strlen(open_braces)); + writeData(name, strlen(name)); + const char *close_braces = ">\n"; + writeData(close_braces, strlen(close_braces)); + } + + + if (unw_step(&cursor) <= 0) return; + } +#else // not alpine + void *stack_trace_list[stack_trace_max_len]; + + uint actual_trace_len = backtrace(stack_trace_list, stack_trace_max_len); + if (actual_trace_len == 0 ) { + const char *no_bt_found_error = "Could not find any backtrace entries in the current process\n"; + writeData(no_bt_found_error, strlen(no_bt_found_error)); + return; + } + + backtrace_symbols_fd(stack_trace_list, actual_trace_len, out_trace_file_fd); +#endif // alpine +#endif // _UCLIBC_ || arm32_musl + } +// LCOV_EXCL_STOP + + void + addReloadConfigurationRoutine() + { + Singleton::Consume::by()->addOneTimeRoutine( + I_MainLoop::RoutineType::System, + [&] () + { + while (true) { + if (reload_settings_flag == true) { + reload_settings_flag = false; + if (reloadConfiguration("")) { + dbgInfo(D_SIGNAL_HANDLER) << "Reloaded configuration"; + } else { + dbgWarning(D_SIGNAL_HANDLER) << "Failed to reload configuration"; + } + } + Singleton::Consume::by()->yield(chrono::seconds(1)); + } + }, + "Reload configuration signal handler" + ); + } + + static string trace_file_path; + static bool reload_settings_flag; + static int out_trace_file_fd; +}; + +string SignalHandler::Impl::trace_file_path; +bool SignalHandler::Impl::reload_settings_flag = false; +int SignalHandler::Impl::out_trace_file_fd = -1; + +SignalHandler::SignalHandler() : Component("SignalHandler"), pimpl(make_unique()) {} +SignalHandler::~SignalHandler() {} + +void SignalHandler::init() { pimpl->init(); } + +void +SignalHandler::preload() +{ + registerExpectedConfiguration("SignalHandler", "outputFilePath"); + registerExpectedConfiguration("SignalHandler", "fogSignalHandlerURI"); +} diff --git a/components/utils/CMakeLists.txt b/components/utils/CMakeLists.txt new file mode 100644 index 0000000..3863672 --- /dev/null +++ b/components/utils/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(ip_utilities) +add_subdirectory(pm) diff --git a/components/utils/ip_utilities/CMakeLists.txt b/components/utils/ip_utilities/CMakeLists.txt new file mode 100755 index 0000000..a6dd7e2 --- /dev/null +++ b/components/utils/ip_utilities/CMakeLists.txt @@ -0,0 +1 @@ +add_library(ip_utilities ip_utilities.cc) diff --git a/components/utils/ip_utilities/ip_utilities.cc b/components/utils/ip_utilities/ip_utilities.cc new file mode 100644 index 0000000..c467516 --- /dev/null +++ b/components/utils/ip_utilities/ip_utilities.cc @@ -0,0 +1,347 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ip_utilities.h" + +#include "connkey.h" + +using namespace std; + +// LCOV_EXCL_START Reason: temporary until we add relevant UT until 07/10 +bool +operator<(const IpAddress &this_ip_addr, const IpAddress &other_ip_addr) +{ + if (this_ip_addr.ip_type < other_ip_addr.ip_type) return true; + if (this_ip_addr.ip_type == IP_VERSION_4) return this_ip_addr.addr4_t.s_addr < other_ip_addr.addr4_t.s_addr; + return memcmp(&this_ip_addr.addr6_t, &other_ip_addr.addr6_t, sizeof(struct in6_addr)) < 0; +} + +bool +operator==(const IpAddress &this_ip_addr, const IpAddress &other_ip_addr) +{ + if (this_ip_addr.ip_type != other_ip_addr.ip_type) return false; + if (this_ip_addr.ip_type == IP_VERSION_4) return this_ip_addr.addr4_t.s_addr == other_ip_addr.addr4_t.s_addr; + return memcmp(&this_ip_addr.addr6_t, &other_ip_addr.addr6_t, sizeof(struct in6_addr)) == 0; +} +// LCOV_EXCL_STOP + +Maybe> +extractAddressAndMaskSize(const string &cidr) +{ + size_t delimiter_pos = cidr.find("/"); + if (delimiter_pos == string::npos) return genError("provided value is not in CIDR notation: " + cidr); + string address = cidr.substr(0, delimiter_pos); + string mask_size = cidr.substr(delimiter_pos + 1, cidr.size() - delimiter_pos - 1); + try { + return make_pair(address, stoi(mask_size)); + } catch(...) { + return genError("failed to cast provided value to integer: " + mask_size); + } + return genError("failed to parse provided string as a CIDR: " + cidr); +} + +template +pair +applyMaskOnAddress(const vector &oct, Integer mask) +{ + Integer start = (oct[0] | oct[1] | oct[2] | oct[3]) & mask; + Integer end = (oct[0] | oct[1] | oct[2] | oct[3]) | (~mask); + return make_pair(start, end); +} + +Maybe> +createRangeFromCidrV4(const pair &cidr_values) +{ + string address = cidr_values.first; + int mask_size = cidr_values.second; + vector oct; + for (int i=3; i>=0; i--) { + size_t delimiter_pos = address.find("."); + string oct_str = address.substr(0, delimiter_pos); + try { + oct.push_back(static_cast(stoul(oct_str)) << (i * 8)); + } catch (...) { + return genError("failed to cast provided value to integer: " + oct_str); + } + if ((i == 0) != (delimiter_pos == string::npos)) { + return genError("provided value is not in a correct ipv4 structure: " + makeSeparatedStr(oct, ".")); + } + address.erase(0, delimiter_pos + 1); + } + + unsigned int mask = 0xffffffff; + mask <<= (32 - mask_size); + + unsigned int start, end; + tie(start, end) = applyMaskOnAddress(oct, mask); + + auto construct_address = [](unsigned int value) + { + stringstream address_stream; + for (int i = 3; i >= 0; i--) { + address_stream << ((value >> (i * 8)) & 0xff) << (i > 0 ? "." : ""); + } + return address_stream.str(); + }; + + return make_pair(construct_address(start), construct_address(end)); +} + +// LCOV_EXCL_START Reason: it is tested, but for some reason coverage doesn't catch it +Maybe> +createRangeFromCidrV6(const pair &cidr_values) +{ + string address = cidr_values.first; + int mask_size = cidr_values.second; + // fill compressed zeros + struct in6_addr v6; + if (inet_pton(AF_INET6, address.c_str(), &v6) == -1) { + return genError("faild to convert provided value to ipv6: " + address); + }; + struct in6_addr *addr = &v6; + vector oct_from_str; + for (int i=0; i<15; i+=2){ + char hex[8]; + unsigned int num; + sprintf(hex, "%02x%02x", static_cast(addr->s6_addr[i]), static_cast(addr->s6_addr[i+1])); + sscanf(hex, "%x", &num); + oct_from_str.push_back(num); + } + + uint64_t mask = 0xffffffffffffffff; + function construct_address; + int oct_offset; + + if (mask_size > 64) { + oct_offset = 7; + mask <<= (128 - mask_size); + construct_address = [oct_from_str](uint64_t value, bool is_start) + { + (void)is_start; + stringstream address_stream; + for (int i = 0; i < 4; i++) { + address_stream << hex << oct_from_str[i] << ":"; + } + for (int i = 3; i >= 0; i--) { + address_stream << hex << (unsigned int)((value >> (i * 16)) & 0xffff) << (i > 0 ? ":" : ""); + } + return address_stream.str(); + }; + } else { + oct_offset = 3; + mask <<= (64 - mask_size); + construct_address = [](uint64_t value, bool is_start) + { + stringstream address_stream; + for (int i = 3; i >= 0; i--) { + address_stream << hex << (unsigned int)((value >> (i * 16)) & 0xffff) << ":"; + } + address_stream << (is_start ? "0:0:0:0" : "ffff:ffff:ffff:ffff"); + return address_stream.str(); + }; + } + + uint64_t start, end; + vector oct; + for (int i = 3; i >= 0; i--) { + oct.push_back(static_cast(oct_from_str[oct_offset - i]) << (i * 16)); + } + tie(start, end) = applyMaskOnAddress(oct, mask); + return make_pair( + construct_address(start, true), + construct_address(end, false) + ); +} +// LCOV_EXCL_STOP + +namespace IPUtilities { +Maybe> +getInterfaceIPs() +{ + struct ifaddrs *if_addr_list = nullptr; + if (getifaddrs(&if_addr_list) == -1) { + return genError(string("Failed to get interface IP's. Error: ") + strerror(errno)); + } + + map interface_ips; + for (struct ifaddrs *if_addr = if_addr_list; if_addr != nullptr; if_addr = if_addr->ifa_next) { + if (if_addr->ifa_addr == nullptr) continue; + if (if_addr->ifa_addr->sa_family != AF_INET && if_addr->ifa_addr->sa_family != AF_INET6) continue; + + char address_buffer[INET6_ADDRSTRLEN] = { '\0' }; + if (if_addr->ifa_addr->sa_family == AF_INET) { + struct in_addr addr = reinterpret_cast(if_addr->ifa_addr)->sin_addr; + inet_ntop(AF_INET, &addr, address_buffer, INET_ADDRSTRLEN); + string address_string(address_buffer); + if (address_string.find("127.0.0.1") != string::npos) continue; + + IpAddress ip_addr; + ip_addr.ip_type = IP_VERSION_4; + memcpy(&ip_addr.ip.ipv4, &addr, sizeof(ip_addr.ip.ipv4)); + interface_ips.emplace(ip_addr, address_string); + } else { + struct in6_addr addr = reinterpret_cast(if_addr->ifa_addr)->sin6_addr; + inet_ntop(AF_INET6, &addr, address_buffer, INET6_ADDRSTRLEN); + string address_string(address_buffer); + if (address_string.find("::1") != string::npos) continue; + + IpAddress ip_addr; + ip_addr.ip_type = IP_VERSION_6; + memcpy(&ip_addr.ip.ipv6, &addr, sizeof(ip_addr.ip.ipv6)); + interface_ips.emplace(ip_addr, address_string); + } + } + + if (if_addr_list != nullptr) freeifaddrs(if_addr_list); + + return interface_ips; +} + +Maybe> +createRangeFromCidr(const string &cidr) +{ + auto cidr_values = extractAddressAndMaskSize(cidr); + if (!cidr_values.ok()) return genError("Failed to create range from Cidr: " + cidr_values.getErr()); + return cidr.find(".") != string::npos + ? createRangeFromCidrV4(cidr_values.unpack()) + : createRangeFromCidrV6(cidr_values.unpack()); +} + +bool +isIpAddrInRange(const IPRange &rule_ip_range, const IpAddress &ip_addr) +{ + IpAddress min_ip = rule_ip_range.start; + IpAddress max_ip = rule_ip_range.end; + + if (ip_addr.ip_type == IP_VERSION_4) { + if (max_ip.ip_type != IP_VERSION_4) return 0; + return + memcmp(&ip_addr.ip.ipv4, &min_ip.ip.ipv4, sizeof(struct in_addr)) >= 0 && + memcmp(&ip_addr.ip.ipv4, &max_ip.ip.ipv4, sizeof(struct in_addr)) <= 0; + } + if (ip_addr.ip_type == IP_VERSION_6) { + if (max_ip.ip_type != IP_VERSION_6) return 0; + return + memcmp(&ip_addr.ip.ipv6, &min_ip.ip.ipv6, sizeof(struct in6_addr)) >= 0 && + memcmp(&ip_addr.ip.ipv6, &max_ip.ip.ipv6, sizeof(struct in6_addr)) <= 0; + } + return 0; +} + +string +IpAddrToString(const IpAddress &address) +{ + if (address.ip_type == IP_VERSION_6) { + char ip_str[INET6_ADDRSTRLEN]; + struct sockaddr_in6 sa6; + + sa6.sin6_family = AF_INET6; + sa6.sin6_addr = address.ip.ipv6; + + inet_ntop(AF_INET6, &(sa6.sin6_addr), ip_str, INET6_ADDRSTRLEN); + return move(string(ip_str)); + } + + char ip_str[INET_ADDRSTRLEN]; + struct sockaddr_in sa; + + sa.sin_family = AF_INET; + sa.sin_addr = address.ip.ipv4; + + inet_ntop(AF_INET, &(sa.sin_addr), ip_str, INET_ADDRSTRLEN); + return move(string(ip_str)); +} + +IpAddress +createIpFromString(const string &ip_string) +{ + IpAddress res_address = {0, IP_VERSION_ANY}; + if (ip_string == "any") return res_address; + auto maybe_ip_addr = IPAddr::createIPAddr(ip_string); + if (!maybe_ip_addr.ok()) { + return res_address; + } + IPAddr ip_addr = maybe_ip_addr.unpack(); + res_address.ip_type = static_cast(ip_addr.getType()); + if (ip_addr.getType() == IPType::V4) { + res_address.addr4_t = ip_addr.getIPv4(); + } else { + res_address.addr6_t = ip_addr.getIPv6(); + } + return res_address; +} + +IpAddress +ConvertToIpAddress(const IPAddr &addr) { + IpAddress address; + switch (addr.getType()) { + case IPType::UNINITIALIZED: { + address.addr4_t = {0}; + address.ip_type = IP_VERSION_ANY; + break; + } + case IPType::V4: { + address.addr4_t = addr.getIPv4(); // reference to a local variable ? + address.ip_type = IP_VERSION_4; + break; + } + case IPType::V6: { + address.addr6_t = addr.getIPv6(); + address.ip_type = IP_VERSION_6; + break; + } + default: + dbgAssert(false) << "Unsupported IP type"; + } + return address; +} + +IpAttrFromString::operator Maybe() +{ + auto ip_addr = IPAddr::createIPAddr(data); + if (!ip_addr.ok()) return genError("Could not create IP address. Error: " + ip_addr.getErr()); + return ConvertToIpAddress(ip_addr.unpackMove()); +} + +IpAttrFromString::operator Maybe() +{ + int value; + try { + value = stoi(data); + } catch (...) { + return genError("provided value is not a legal number. Value: " + data); + } + + if (value > static_cast(UINT8_MAX) || value < 0) { + return genError("provided value is not a legal ip protocol number. Value: " + data); + } + + return static_cast(value); +} + +IpAttrFromString::operator Maybe() +{ + int value; + try { + value = stoi(data); + } catch (...) { + return genError("provided value is not a legal number. Value: " + data); + } + + if (value > static_cast(UINT16_MAX) || value < 0) { + return genError("provided value is not a legal port number. Value: " + data); + } + + return static_cast(value); +} +} diff --git a/components/utils/pm/CMakeLists.txt b/components/utils/pm/CMakeLists.txt new file mode 100644 index 0000000..a953975 --- /dev/null +++ b/components/utils/pm/CMakeLists.txt @@ -0,0 +1,3 @@ +add_library(pm general_adaptor.cc kiss_hash.cc kiss_patterns.cc kiss_pm_stats.cc kiss_thin_nfa.cc kiss_thin_nfa_analyze.cc kiss_thin_nfa_build.cc kiss_thin_nfa_compile.cc pm_adaptor.cc pm_hook.cc debugpm.cc) + +add_subdirectory(pm_ut) diff --git a/components/utils/pm/debugpm.cc b/components/utils/pm/debugpm.cc new file mode 100755 index 0000000..8e2fbdb --- /dev/null +++ b/components/utils/pm/debugpm.cc @@ -0,0 +1,63 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "debug.h" +#include +#include +#include +#include +#include +#include +#include "sasal.h" + +using namespace std; + +USE_DEBUG_FLAG(D_PM); + +SASAL_START // Multiple Pattern Matcher +// Helper class for printing C format string +class CFmtPrinter +{ +public: + char buf[500]; // Length limit. + explicit CFmtPrinter(const char *fmt, va_list va) + { + vsnprintf(buf, sizeof(buf), fmt, va); + buf[sizeof(buf)-1] = '\0'; + } +}; + +static ostream & +operator<<(ostream &os, const CFmtPrinter &p) +{ + return os << p.buf; +} + +void +panicCFmt(const string &func, uint line, const char *fmt, ...) +{ + va_list va; + va_start(va, fmt); + Debug("PM", func, line).getStreamAggr() << CFmtPrinter(fmt, va); + va_end(va); +} + +void +debugPrtCFmt(const char *func, uint line, const char *fmt, ...) +{ + va_list va; + va_start(va, fmt); + Debug("PM", func, line, Debug::DebugLevel::TRACE, D_PM).getStreamAggr() << CFmtPrinter(fmt, va); + va_end(va); +} +SASAL_END diff --git a/components/utils/pm/debugpm.h b/components/utils/pm/debugpm.h new file mode 100755 index 0000000..9d4ce48 --- /dev/null +++ b/components/utils/pm/debugpm.h @@ -0,0 +1,39 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __DEBUGPM_H__ +#define __DEBUGPM_H__ + +#include +#include + +#include "debug.h" + +// Assertions + +// C-style BC functions (e.g. for PM). +void debugPrtCFmt(const std::string &func, uint line, const char *fmt, ...) __attribute__((format (printf, 3, 4))); +#define debugCFmt(flag, fmt, ...) \ + if (!Debug::isDebugSet(flag)) \ + { \ + } else \ + debugPrtCFmt(__FUNCTION__, __LINE__, fmt, ##__VA_ARGS__) + +void panicCFmt(const std::string &func, uint line, const char *fmt, ...) __attribute__((format (printf, 3, 4))); +#define assertCondCFmt(cond, fmt, ...) \ + if (CP_LIKELY(cond)) \ + { \ + } else \ + panicCFmt(__FUNCTION__, __LINE__, fmt, ##__VA_ARGS__) + +#endif // __DEBUGPM_H__ diff --git a/components/utils/pm/general_adaptor.cc b/components/utils/pm/general_adaptor.cc new file mode 100644 index 0000000..33939b4 --- /dev/null +++ b/components/utils/pm/general_adaptor.cc @@ -0,0 +1,65 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "general_adaptor.h" +#include +#include "sasal.h" + +SASAL_START // Multiple Pattern Matcher +void fw_kfree(void *addr, CP_MAYBE_UNUSED size_t size, CP_MAYBE_UNUSED const char *caller) +{ + free(addr); + return; +} + +void *fw_kmalloc(size_t size, CP_MAYBE_UNUSED const char *caller) +{ + return malloc(size); +} + +void *fw_kmalloc_ex(size_t size, CP_MAYBE_UNUSED const char *caller, CP_MAYBE_UNUSED int flags) +{ + return malloc(size); +} + +void *fw_kmalloc_sleep(size_t size, CP_MAYBE_UNUSED const char *caller) +{ + return malloc(size); +} + +void *kiss_pmglob_memory_kmalloc_ex_( + u_int size, + CP_MAYBE_UNUSED const char *caller, + CP_MAYBE_UNUSED int flags, + CP_MAYBE_UNUSED const char *file, + CP_MAYBE_UNUSED int line) +{ + return malloc(size); +} + +void *kiss_pmglob_memory_kmalloc_ex(u_int size, CP_MAYBE_UNUSED const char *caller, CP_MAYBE_UNUSED int flags) +{ + return malloc(size); +} + +void *kiss_pmglob_memory_kmalloc(u_int size, CP_MAYBE_UNUSED const char *caller) +{ + return malloc(size); +} + +void kiss_pmglob_memory_kfree(void *addr, CP_MAYBE_UNUSED size_t size, CP_MAYBE_UNUSED const char *caller) +{ + free(addr); + return; +} +SASAL_END diff --git a/components/utils/pm/general_adaptor.h b/components/utils/pm/general_adaptor.h new file mode 100644 index 0000000..f489782 --- /dev/null +++ b/components/utils/pm/general_adaptor.h @@ -0,0 +1,80 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __general_adaptor_h__ +#define __general_adaptor_h__ + +#include "stdint.h" +#include +#include +#include +#include "common.h" +#include "debug.h" +#include "debugpm.h" + +typedef unsigned int u_int; +typedef unsigned char u_char; +typedef unsigned short u_short; +typedef bool boolean_cpt; +typedef bool BOOL; +typedef uint64_t u_int64; + +#define TRUE true +#define FALSE false + +#define CP_INLINE inline +#define CP_CACHELINE_SIZE 64 +#define CP_CACHELINE_ALIGNED __attribute__((__aligned__(CP_CACHELINE_SIZE))) +#define CP_MAYBE_UNUSED CP_UNUSED + +#define KISS_OFFSETOF(str_name, field_name) offsetof(str_name, field_name) + +#define KISS_ASSERT_COMPILE_TIME(cond) extern int __kiss_assert_dummy[(cond)?1:-1] + +#define KISS_ASSERT_PERF(...) +#define ASSERT_LOCKED +#define kiss_multik_this_instance_num (0) + +typedef enum { + KISS_ERROR = -1, + KISS_OK = 0 +} kiss_ret_val; + +#define KISS_ASSERT assertCondCFmt +#define KISS_ASSERT_CRASH assertCondCFmt + +#define FW_KMEM_SLEEP 0 + +#define herror(a, b, ...) + +#define kdprintf printf +#define kdprintf_no_prefix printf + + +void fw_kfree(void *addr, size_t size, const char *caller); +void *fw_kmalloc(size_t size, const char *caller); +void *fw_kmalloc_ex(size_t size, const char *caller, int flags); +void *fw_kmalloc_sleep(size_t size, const char *caller); +void *kiss_pmglob_memory_kmalloc_ex_(u_int size, const char *caller, int flags, const char *file, int line); +void *kiss_pmglob_memory_kmalloc_ex(u_int size, const char *caller, int flags); +void *kiss_pmglob_memory_kmalloc(u_int size, const char *caller); +void kiss_pmglob_memory_kfree(void *addr, size_t size, const char *caller); + +#define ENUM_SET_FLAG(e, flag) e = static_cast(((u_int)e | (u_int)flag)) +#define ENUM_UNSET_FLAG(e, flag) e = static_cast(((u_int)e & (~(u_int)flag))) + +#define MAX(x, y) (((x)>(y))?(x):(y)) +#define MIN(x, y) (((x)<(y))?(x):(y)) + + +#endif // __general_adaptor_h__ diff --git a/components/utils/pm/kiss_hash.cc b/components/utils/pm/kiss_hash.cc new file mode 100644 index 0000000..13755ff --- /dev/null +++ b/components/utils/pm/kiss_hash.cc @@ -0,0 +1,1783 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "general_adaptor.h" +#include "sasal.h" + +SASAL_START // Multiple Pattern Matcher +#ifndef KERNEL + +#if defined(VXWORKS) || defined(freebsd) || defined (solaris2) +#include +#elif defined (ARMON) +#include +// #include +#elif defined(SYS_PSOS) +#include +#else +#include +#endif + +#endif // KERNEL + +#include "kiss_hash.h" + +// we provide hash_craete, hash_create_with_destr function implementations +#undef kiss_hash_create +#undef kiss_hash_create_with_destr + +#ifndef NULL +#define NULL 0 +#endif +#ifndef HASH_DEFAULT_SIZE +#define HASH_DEFAULT_SIZE 1024 +#endif + +static void KissHashResizeMode_reset_parameters(KissHashResizeMode *resize_mode); +static void KissHashResizeMode_set_default_parameters(KissHashResizeMode *resize_mode); +static int KissHashResizeMode_verify_method(const KissHashResizeMode *resize_mode); +static int KissHashResizeMode_verify_value(const KissHashResizeMode *resize_mode); +static int KissHashResizeMode_verify_trigger_ratio(const KissHashResizeMode *resize_mode); +static int KissHashResizeMode_verify_direction(const KissHashResizeMode *resize_mode); +static int kiss_hash_do_resize(kiss_hash_t hp, const KissHashResizeMode *resize_mode); +static boolean_cpt kiss_hash_resize_check_for_resize(kiss_hash_t hp, KissHashResizeDirection direction); + +struct _KissHashResizeMode { + u_int max_size; + KissHashResizeMethod method; + KissHashResizeDirection direction; + u_int value; + u_int trigger_ratio; + HashResizeCb_t cb; +}; + +struct kiss_hash { + char *file; // source file name where hash was created + int line; // line number where hash was created + int hash_index; + struct kiss_hashent **h_tab; + int h_nelements; + int h_sz; + int h_orig_size; + KissHashResizeMode h_resize_mode; + int h_dodestr; + uintptr_t (*h_keyfunc)(const void *key, void *info); + int (*h_keycmp)(const void *key1, const void *key2, void *info); + void (*h_val_destr)(void *val); + void (*h_key_destr)(void *key); + void *h_info; +}; + +struct kiss_hash_iter { + kiss_hash_t hash; + int slot; + struct kiss_hashent *pntr; +}; + +// pointers to created hash tables +#ifdef HASH_DEBUG +#define MAX_HASHES 1024 +static kiss_hash_t kiss_hashes[MAX_HASHES]; +static int kiss_curr_hash = 0; +static int do_kiss_hash_debug = 0; +static int kiss_checked_env = 0; + +static void dbg_register_hash(kiss_hash_t hash, int line, const char *file) { + + hash->line = line; + hash->file = (char *) file; + + if (kiss_checked_env && !do_kiss_hash_debug) + return; + + if (!kiss_checked_env) { + if (getenv("CP_HASH_DEBUG")) + do_kiss_hash_debug = 1; + kiss_checked_env = 1; + } + + MtBeginCS(); + if (kiss_curr_hash != MAX_HASHES) { + kiss_hashes[kiss_curr_hash] = hash; + hash->hash_index = kiss_curr_hash++; + } + else + hash->hash_index = -1; + MtEndCS(); + +} + +static void dbg_deregister_hash(kiss_hash_t hash) { + + if ((kiss_checked_env && !do_kiss_hash_debug) || !kiss_checked_env) + return; + + if (hash->hash_index == -1) + return; + + MtBeginCS(); + if (kiss_curr_hash > 0) { + kiss_curr_hash--; + kiss_hashes[hash->hash_index] = kiss_hashes[kiss_curr_hash]; + } + MtEndCS(); + +} + + +// @name Hash functions +// +// +// +// Debug single hash. + +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// This function calculates and prints the following statistics: +// \begin{itemize} +// \item hash pointer +// \item file name and line number where \Ref{hash_create} or \Ref{hash_create_with_destr} was called +// \item number of elements in hash +// \item number of slots in hash - hash size +// \item size in bytes of memory occupied by hash maintenance structures +// \item slot utilzation - percentage of hash slots used to store elements +// \item average number of lookups - average length of lists of elements +// \end{itemize} +// +// @param hash pointer to hash +// @return size in bytes of memory occupied by hash maintenance structures. +// @see kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, kiss_hash_delete, +// kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, +// kiss_hash_intvalue, kiss_hash_bytevalue, kiss_hash_bytecmp, +// kiss_hash_debug_all +int kiss_hash_debug(kiss_hash_t hash) { + + int slot, used_slots=0; + double slot_utilization, avg_lookup; + int kiss_hash_size = hash->h_sz; + int mem_size = + sizeof(struct kiss_hash) + + kiss_hash_size * sizeof(struct kiss_hashent*) + + hash->h_nelements*sizeof(struct kiss_hashent); + + // check slot utilization + for (slot=0; sloth_tab[slot]) used_slots++; + } + + slot_utilization = (double) used_slots/kiss_hash_size; + avg_lookup = (used_slots) ? (double) hash->h_nelements/used_slots : 0; + + error( + 0, + 0, + "hash 0x%x created in %s:%d : nelements=%d kiss_hash_size=%d " + "mem_size=%d slot_utilzation %f (%d of %d) avg lookup %f", + hash, + hash->file, + hash->line, + hash->h_nelements, + kiss_hash_size, + mem_size, + slot_utilization, + used_slots, + kiss_hash_size, + avg_lookup + ); + + return mem_size; +} + + +// Debug single hash. +// +// \begin{description} +// \item[ MT-Level: ] Safe +// \end{description} +// +// Iterates a list of all hash tables craeted in the current process and +// for each hash calls function \Ref{kiss_hash_debug}. In addition the total +// memory usage of hash maintenance structures is printed. +// +// @see kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, kiss_hash_delete, +// kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, +// kiss_hash_intvalue, kiss_hash_bytevalue, kiss_hash_bytecmp, kiss_hash_debug +void kiss_hash_debug_all() { + int i, total_mem_size=0; + + if ((kiss_checked_env && !do_kiss_hash_debug) || !kiss_checked_env) return; + + MtBeginCS(); + error(0, 0, "[%s] Hash Debug", ltime(0)); + for (i=0; ih_dodestr) { + H_DESTR(hp->h_val_destr, he->val); + H_DESTR(hp->h_key_destr, he->key); + } +} + + +// Number of hash elements. +// +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// @param hash hash table +// @return number of elements +// @see kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, kiss_hash_delete, +// kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, +// kiss_hash_intvalue, kiss_hash_bytevalue, kiss_hash_bytecmp +int +kiss_hash_nelements(kiss_hash_t hash) +{ + return hash->h_nelements; +} + + +// Hash size. +// +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// @param hash hash table +// @return Size of hash +// @see kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, kiss_hash_delete, +// kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, +// kiss_hash_intvalue, kiss_hash_bytevalue, kiss_hash_bytecmp +int +kiss_hash_get_size(kiss_hash_t hash) +{ + return hash->h_sz + 1; // In hash create we decrease by 1 the application size +} + + +// Hash orignal size. +// +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// @param hash hash table +// @return Original size of hash (for hash tables with dynamic size). +// @see kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, kiss_hash_delete, +// kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, +// kiss_hash_intvalue, kiss_hash_bytevalue, kiss_hash_bytecmp +int kiss_hash_orig_size(kiss_hash_t hash) +{ + return hash->h_orig_size + 1; // In hash create we decrease by 1 the application size +} + +static kiss_hash_t +kiss_hash_create_do(size_t hsize, + hkeyfunc_t keyfunc, + hcmpfunc_t keycmp, + void *info, + boolean_cpt do_kernel_sleep) +{ + extern int roundtwo(int n); + + kiss_hash_t hp; + + if (hsize == 0) hsize = HASH_DEFAULT_SIZE; + + hsize = roundtwo(hsize); + + if(do_kernel_sleep) { + hp = (kiss_hash_t)kiss_pmglob_memory_kmalloc_ex_( + sizeof(struct kiss_hash), + "kiss_hash_create", + FW_KMEM_SLEEP, + __FILE__, + __LINE__ + ); + } else { + hp = (kiss_hash_t)kiss_pmglob_memory_kmalloc((sizeof(struct kiss_hash)), "kiss_hash_create"); + } + + if (hp == NULL) return NULL; + memset(hp, 0, sizeof(struct kiss_hash)); + + if(do_kernel_sleep) { + hp->h_tab = (struct kiss_hashent **)kiss_pmglob_memory_kmalloc_ex_( + (sizeof(struct kiss_hashent *)) * hsize, + "kiss_hash_create", + FW_KMEM_SLEEP, + __FILE__, + __LINE__ + ); + } else { + hp->h_tab = (struct kiss_hashent **)kiss_pmglob_memory_kmalloc( + (sizeof(struct kiss_hashent *)) * hsize, + "kiss_hash_create" + ); + } + + if (!hp->h_tab) { + kiss_pmglob_memory_kfree(hp, sizeof(struct kiss_hash), "kiss_hash_create"); + return NULL; + } + + memset(hp->h_tab, 0, (sizeof(struct kiss_hashent *) * hsize)); + + hp->h_sz = hsize - 1; + hp->h_orig_size = hp->h_sz; + hp->hash_index = -1; + hp->h_keyfunc = keyfunc == (hkeyfunc_t)kiss_hash_intvalue ? 0 : keyfunc; + hp->h_keycmp = keycmp == (hcmpfunc_t)kiss_hash_intcmp ? 0 : keycmp; + hp->h_val_destr = hp->h_key_destr = NULL; + hp->h_info = info; + hp->h_nelements = 0; + hp->h_dodestr = 0; + KissHashResizeMode_reset_parameters(&(hp->h_resize_mode)); + + return hp; +} + + +// Create Hash Table. +// +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// @param hsize hash size +// @param keyfunc key hashing function +// @param keycmp key comparison function +// @param info// opaque for use of {\tt keyfunc} and {\tt keycmp} functions. +// @return hash pointer or NULL upon failure. +// @see kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, kiss_hash_delete, +// kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, +// kiss_hash_intvalue, kiss_hash_bytevalue, kiss_hash_bytecmp, kiss_hash_debug, kiss_hash_debug_all +// note: to create a large hash in kernel mode using kmalloc_sleep call function _kiss_hash_create_with_ksleep or +// Macro _kiss_hash_create_with_ksleep. +kiss_hash_t +kiss_hash_create(size_t hsize, + hkeyfunc_t keyfunc, + hcmpfunc_t keycmp, + void *info) +{ + return kiss_hash_create_do(hsize, keyfunc, keycmp, info, FALSE); +} + +kiss_hash_t +_kiss_hash_create(size_t hsize, + hkeyfunc_t keyfunc, + hcmpfunc_t keycmp, + void *info, CP_MAYBE_UNUSED const char *file, CP_MAYBE_UNUSED int line) +{ + kiss_hash_t hash; + hash = kiss_hash_create_do(hsize, keyfunc, keycmp, info, FALSE); + +#ifdef HASH_DEBUG + if (hash) dbg_register_hash(hash, line, file); +#endif + + return hash; +} + +kiss_hash_t +_kiss_hash_create_with_ksleep(size_t hsize, + hkeyfunc_t keyfunc, + hcmpfunc_t keycmp, + void *info, CP_MAYBE_UNUSED const char *file, CP_MAYBE_UNUSED int line) +{ + kiss_hash_t hash; + hash = kiss_hash_create_do(hsize, keyfunc, keycmp, info, TRUE); + +#ifdef HASH_DEBUG + if (hash) dbg_register_hash(hash, line, file); +#endif + + return hash; +} + + +// Set destructor for hash elements. +// +// Keys and values detsructors are called for every hash key-value pair +// when the hash is destroyed. +// +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// @param hp hash +// @param val_destr destructor for the values of the hash +// @param key_destr destructor for the keys of the hash +// @return hash pointer +// @see kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, kiss_hash_delete, +// kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, +// kiss_hash_intvalue, kiss_hash_bytevalue, kiss_hash_bytecmp +kiss_hash_t +kiss_hash_set_destr(kiss_hash_t hp, freefunc_t val_destr, freefunc_t key_destr) +{ + if (!hp) return NULL; + + hp->h_val_destr = val_destr; + hp->h_key_destr = key_destr; + + return hp; +} + + +// This tells the hash to automaticly call destructors when an entry gets +// deleted from the hash. Usualy this is not the case ! +// +// Enable hash element detsruction. +// +// Hash is created with destruction of elements disabled by default. +// This functions enables destruction upon a call to \ref{kiss_hash_destroy}. + +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// @param hp hash +// @see kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, kiss_hash_delete, +// kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, +// kiss_hash_intvalue, kiss_hash_bytevalue, kiss_hash_bytecmp +void +kiss_hash_dodestr(kiss_hash_t hp) +{ + hp->h_dodestr=1; +} + + +// What's done must (have a way to) be undone. +// +// +// Disable hash element detsruction. +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// @param hp hash +// @see kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, kiss_hash_delete, +// kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, +// kiss_hash_intvalue, kiss_hash_bytevalue, kiss_hash_bytecmp +void +kiss_hash_undo_destr(kiss_hash_t hp) +{ + hp->h_dodestr = 0; +} + + +// Create Hash Table with Destructor. +// +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// @param hsize hash size +// @param keyfunc key hashing function +// @param keycmp key comparison function +// @param val_destr destructor for the values of the hash +// @param key_destr destructor for the keys of the hash +// @param info// opaque for use of {\tt keyfunc} and {\tt keycmp} functions. +// @return hash pointer or NULL upon failure. +// @see kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, kiss_hash_delete, +// kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, +// kiss_hash_intvalue, kiss_hash_bytevalue, kiss_hash_bytecmp +kiss_hash_t +kiss_hash_create_with_destr( + size_t hsize, + hkeyfunc_t keyfunc, + hcmpfunc_t keycmp, + freefunc_t val_destr, + freefunc_t key_destr, + void *info +) +{ + kiss_hash_t hp; + + if ((hp = kiss_hash_create(hsize, keyfunc, keycmp, info)) == NULL) return NULL; + + return kiss_hash_set_destr(hp, val_destr, key_destr); +} + +kiss_hash_t +_kiss_hash_create_with_destr( + size_t hsize, + hkeyfunc_t keyfunc, + hcmpfunc_t keycmp, + freefunc_t val_destr, + freefunc_t key_destr, + void *info, CP_MAYBE_UNUSED const char *file, + CP_MAYBE_UNUSED int line) +{ + kiss_hash_t hash; + hash = kiss_hash_create_with_destr(hsize, keyfunc, keycmp, val_destr, key_destr, info); + +#ifdef HASH_DEBUG + if (hash) dbg_register_hash(hash, line, file); +#endif + + return hash; +} + + +// Find hash entry. +// +// The next routine is used as an efficient but somewhat ugly interface for +// find/insert operation. What it does is to return an adrress of a pointer +// to a hashent structure containing the key/val pair if found. If not it +// returns the address of the pointer in which we can append the new val/pair +// thus avoiding an unnceccessary repeated search. We can check if key was +// found by checking whether the pointer is zero or not. This function is usually +// used with \Ref{kiss_hash_insert_at}. +// +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// @param hp hash pointer +// @param key hash key +// @return hash entry +// @see kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, kiss_hash_delete, +// kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, +// kiss_hash_intvalue, kiss_hash_bytevalue, kiss_hash_bytecmp +// +// @args (kiss_hash_t hp, const void *key) +// @type struct hashent ** +// @name kiss_hash_find_hashent. +struct kiss_hashent ** +kiss_hash_find_hashent(kiss_hash_t hp, const void *key) +{ + intptr_t slot = ((hp->h_keyfunc ? (*hp->h_keyfunc)(key, (hp)->h_info) : + ((intptr_t)key + ((intptr_t)key >> 16))) & (hp)->h_sz); + + struct kiss_hashent **pnt = hp->h_tab + slot; + struct kiss_hashent *he; + + if (hp->h_keycmp) { + for (he = *pnt; he != NULL; pnt = &(he->next), he = *pnt) { + if ((*hp->h_keycmp)(he->key, key, hp->h_info) == 0) return pnt; + } + } else { + for (he = *pnt; he != NULL; pnt = &(he->next), he = *pnt) { + if (he->key == key) return pnt; + } + } + + return pnt; +} + + +// Return address of the pointer to the value in the hash table. +// +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// @param hp hash pointer +// @param key hash key +// @return hash entry +// @see kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, kiss_hash_delete, +// kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, +// kiss_hash_intvalue, kiss_hash_bytevalue, kiss_hash_bytecmp +void ** +kiss_hash_findaddr(kiss_hash_t hp, const void *key) +{ + struct kiss_hashent **he = kiss_hash_find_hashent(hp, key); + + if (!*he) return NULL; + + return &((*he)->val); +} + + +// Insert hash element at specified position. +// This function should be used together with \Ref{kiss_hash_find_hashent} to insert +// the value in case it was not found at the hash. +// +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// @param hp hash pointer +// @param key hash key +// @param key hash val +// @return 0 - upon failure or number of hash elements after insertion in case of success. +// @see kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, kiss_hash_delete, +// kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, +// kiss_hash_intvalue, kiss_hash_bytevalue, kiss_hash_bytecmp +int +kiss_hash_insert_at(kiss_hash_t hp, void *key, void *val, struct kiss_hashent **hloc) +{ + struct kiss_hashent *he; + + he = (struct kiss_hashent *)kiss_pmglob_memory_kmalloc(sizeof(struct kiss_hashent), "kiss_hash_insert_at"); + + if (he == NULL) return 0; + + memset(he, 0, sizeof(struct kiss_hashent)); + + he->key = key; + he->val = val; + he->next = 0; + + *hloc = he; + hp->h_nelements++; + + if (kiss_hash_resize_check_for_resize(hp, KISS_HASH_SIZE_INCREASE) == TRUE) { + kiss_hash_do_resize(hp, &(hp->h_resize_mode)); + } + + return hp->h_nelements; +} + + +// Insert hash element. +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// @param hp hash pointer +// @param key hash key +// @param key hash val +// @return 0 - upon failure, positive number on success. +// @see kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, kiss_hash_delete, +// kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, +// kiss_hash_intvalue, kiss_hash_bytevalue, kiss_hash_bytecmp +int +kiss_hash_insert(kiss_hash_t hp, void *key, void *val) +{ + struct kiss_hashent **hloc = kiss_hash_find_hashent(hp, key); + + if (*hloc) { + hent_destroy(hp, *hloc, 0); + (*hloc)->val = val; + (*hloc)->key = key; + return 1; + } + + return kiss_hash_insert_at(hp, key, val, hloc); +} + + +// Lookup hash value. +// +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// @param hp hash pointer +// @param key hash key +// @return hash value or NULL upon failure. +// @see kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, kiss_hash_delete, +// kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, +// kiss_hash_intvalue, kiss_hash_bytevalue, kiss_hash_bytecmp +void * +kiss_hash_lookup(kiss_hash_t hp, const void *key) +{ + struct kiss_hashent **he = kiss_hash_find_hashent(hp, key); + + if (*he) return (*he)->val; + + return NULL; +} + + +// Lookup hash key. +// +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// @param hp hash pointer +// @param key hash key that hash a value equal to that of the key stored in the hash. +// @return hash key or NULL upon failure. +// @see kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, kiss_hash_delete, +// kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, +// kiss_hash_intvalue, kiss_hash_bytevalue, kiss_hash_bytecmp +void * +kiss_hash_lookkey(kiss_hash_t hp, const void *key) +{ + struct kiss_hashent **he = kiss_hash_find_hashent(hp, key); + + if (*he) return (*he)->key; + + return NULL; +} + + +// Delete hash element. +// +// Delete hash element and return a value for the key. +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// @param hp hash pointer +// @param key hash key +// @return hash val or NULL upon failure. +// @see kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, kiss_hash_delete, +// kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, +// kiss_hash_intvalue, kiss_hash_bytevalue, kiss_hash_bytecmp +void * +kiss_hash_delete(kiss_hash_t hp, const void *key) +{ + struct kiss_hashent **hloc = kiss_hash_find_hashent(hp, key); + struct kiss_hashent *he = *hloc; + + if (he) { + void *val = he->val; + *hloc = he->next; + hp->h_nelements--; + hent_destroy(hp, he, 0); + + kiss_pmglob_memory_kfree(he, sizeof(struct kiss_hashent), "kiss_hash_delete"); + + if (kiss_hash_resize_check_for_resize(hp, KISS_HASH_SIZE_DECREASE) == TRUE) + kiss_hash_do_resize(hp, &(hp->h_resize_mode)); + + return val; + } + + return NULL; +} + + +// Destroy hash. +// +// If detsructor functions were defined in the call to \Ref{kiss_hash_with_create_destr} or \Ref{kiss_hash_set_destr} +// function \Ref{kiss_hash_dodestr} must be called to enable element detsruction. +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// @param hp hash pointer +// @see kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, kiss_hash_delete, +// kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, +// kiss_hash_intvalue, kiss_hash_bytevalue, kiss_hash_bytecmp +void +kiss_hash_destroy(kiss_hash_t hp) +{ + int i; + struct kiss_hashent *he, *np; + + for (i = 0; i <= hp->h_sz; i++) { + for (he = hp->h_tab[i]; he != NULL; he = np) { + np = he->next; + hent_destroy(hp, he, 1); + kiss_pmglob_memory_kfree(he, sizeof(struct kiss_hashent), "kiss_hash_destory"); + } + } + + if (hp->h_tab) { + kiss_pmglob_memory_kfree(hp->h_tab, (sizeof(struct kiss_hashent *) * (hp->h_sz+1)), "kiss_hash_destroy"); + } + +#ifdef HASH_DEBUG + dbg_deregister_hash(hp); +#endif + + kiss_pmglob_memory_kfree(hp, sizeof(struct kiss_hash), "kiss_hash_destroy"); + return; +} + + +// @name Hash iteration +// +// Create hash iterator. +// +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// @param hp hash +// @return iterator object, or NULL upon failure. +// @see kiss_hash_iterator_create, kiss_hash_iterator_next, kiss_hash_iterator_next_key, kiss_hash_iterator_destroy +kiss_hash_iterator +kiss_hash_iterator_create(kiss_hash_t hp) +{ + kiss_hash_iterator hit = (kiss_hash_iterator)kiss_pmglob_memory_kmalloc( + sizeof (struct kiss_hash_iter), + "kiss_hash_iterator_create" + ); + + if (hit == NULL) return NULL; + + memset(hit, 0, sizeof (struct kiss_hash_iter)); + + hit->hash = hp; + hit->slot = 0; + hit->pntr = hit->hash->h_tab[0]; + + if (!hit->pntr) kiss_hash_iterator_next_ent(hit); + + return hit; +} + + +// Return next hash value. +// +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// @param hit hash iterator +// @return next hash value, or NULL upon failure. +// @see kiss_hash_iterator_create, kiss_hash_iterator_next, kiss_hash_iterator_next_key, kiss_hash_iterator_destroy +void* +kiss_hash_iterator_next(kiss_hash_iterator hit) +{ + struct kiss_hashent *hent; + void *output; + + if (!(hent = hit->pntr)) { + int slot = hit->slot + 1; + struct kiss_hashent ** htab = hit->hash->h_tab; + int sz = hit->hash->h_sz; + + while (slot <= sz && ! (hent = htab[slot])) { + slot++; + } + + hit->slot = slot; + if (slot > sz) return NULL; + } + + output = hent->val; + hit->pntr = hent->next; + + return output; +} + + +// Return next hash key. +// +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// @param hit hash iterator +// @return next hash key, or NULL upon failure. +// @see kiss_hash_iterator_create, kiss_hash_iterator_next, kiss_hash_iterator_next_key, kiss_hash_iterator_destroy +void* +kiss_hash_iterator_next_key(kiss_hash_iterator hit) +{ + struct kiss_hashent *hent; + void *output; + + if (!(hent = hit->pntr)) { + int slot = hit->slot + 1; + struct kiss_hashent ** htab = hit->hash->h_tab; + int sz = hit->hash->h_sz; + + while (slot <= sz && ! (hent=htab[slot])) + slot++; + + hit->slot = slot; + if (slot > sz) return NULL; + } + + output = hent->key; + hit->pntr = hent->next; + + return output; +} + + +// Destroy hash iterator. +// +// \begin{description} +// \item[ MT-Level: ] Reentrant +// \end{description} +// +// @param hit hash iterator +// @see kiss_hash_iterator_create, kiss_hash_iterator_next, kiss_hash_iterator_next_key, kiss_hash_iterator_destroy +void +kiss_hash_iterator_destroy (kiss_hash_iterator hit) +{ + kiss_pmglob_memory_kfree(hit, sizeof(struct kiss_hash_iter), "kiss_hash_iterator_destroy"); +} + + +int +kiss_hash_iterator_end(kiss_hash_iterator hit) +{ + return hit->slot == -1; +} + + +int +kiss_hash_iterator_next_ent(kiss_hash_iterator hit) +{ + struct kiss_hashent *hent; + + if (kiss_hash_iterator_end(hit)) return 0; + + if (! hit->pntr || ! hit->pntr->next) { + int slot = hit->slot + 1; + struct kiss_hashent ** htab = hit->hash->h_tab; + int sz = hit->hash->h_sz; + + while (slot <= sz && ! (hent=htab[slot])) { + slot++; + } + + if (slot > sz) { + kiss_hash_iterator_set_end(hit); + return 0; + } + else { + hit->slot = slot; + hit->pntr = hent; + } + } + else { + hit->pntr = hit->pntr->next; + } + + return 1; +} + + +void * +kiss_hash_iterator_get_key(kiss_hash_iterator hit) +{ + return hit->pntr ? hit->pntr->key : NULL; +} + + +void * +kiss_hash_iterator_get_val(kiss_hash_iterator hit) +{ + return hit->pntr ? hit->pntr->val : NULL; +} + + +struct kiss_hashent * +kiss_hash_iterator_get_hashent(kiss_hash_iterator hit) +{ + return hit->pntr; +} + +int +kiss_hash_iterator_equal(kiss_hash_iterator hit1, kiss_hash_iterator hit2) +{ + if (hit1->pntr || hit2->pntr) { + return hit1->pntr == hit2->pntr; + } + return hit1->slot == hit2->slot && hit1->hash == hit2->hash; +} + + +kiss_hash_iterator +kiss_hash_iterator_copy(kiss_hash_iterator hit) +{ + kiss_hash_iterator new_hit = (kiss_hash_iterator)kiss_pmglob_memory_kmalloc( + sizeof(struct kiss_hash_iter), + "kiss_hash_iterator_copy" + ); + if (hit == NULL || new_hit == NULL) return NULL; + + memset(new_hit, 0, sizeof (struct kiss_hash_iter)); + + new_hit->hash = hit->hash; + new_hit->slot = hit->slot; + new_hit->pntr = hit->pntr; + + return new_hit; +} + + +void +kiss_hash_iterator_free(kiss_hash_iterator hit) +{ + if (hit) kiss_pmglob_memory_kfree(hit, sizeof(struct kiss_hash_iter), "kiss_hash_iterator_free"); +} + +void +kiss_hash_iterator_set_begin(kiss_hash_iterator hit) +{ + hit->slot = 0; + hit->pntr = hit->hash->h_tab[0]; + + if (!hit->pntr) kiss_hash_iterator_next_ent(hit); +} + +void +kiss_hash_iterator_set_end(kiss_hash_iterator hit) +{ + hit->slot = -1; + hit->pntr = 0; +} + + +kiss_hash_iterator +kiss_hash_find_hashent_new(kiss_hash_t hp, const void *key) +{ + int slot = ((hp->h_keyfunc ? (*hp->h_keyfunc)(key, (hp)->h_info) : + ((intptr_t)key + ((intptr_t)key >> 16))) & (hp)->h_sz); + + struct kiss_hashent *pnt = hp->h_tab[slot]; + + kiss_hash_iterator iter; + + iter = kiss_hash_iterator_create(hp); + + if (hp->h_keycmp) { + for (; pnt != NULL; pnt = pnt->next) { + if ((*hp->h_keycmp)(pnt->key, key, hp->h_info) == 0) break; + } + } else { + for (; pnt != NULL; pnt = pnt->next) { + if (pnt->key == key) break; + } + } + + if (pnt == NULL) { + kiss_hash_iterator_set_end(iter); + } else { + iter->slot = slot; + iter->pntr = pnt; + } + + return iter; +} + + +void +kiss_hash_delete_by_iter(kiss_hash_iterator hit) +{ + if (hit == NULL || + kiss_hash_iterator_end(hit) || + kiss_hash_iterator_get_hashent(hit) == NULL) + return; + + kiss_hash_delete(hit->hash, kiss_hash_iterator_get_key(hit)); + + return; +} + +//= == === ==== ===== ====== ======= ======== +//= == === ==== ===== ====== ======= +// H a s h r e s i z e m e c h a n i s m +//= == === ==== ===== ====== ======= +//= == === ==== ===== ====== ======= ======== + + +// ----------------------------- +// KissHashResizeMode access API +// ----------------------------- +#ifdef KERNEL +#define herror // this is done due to compilation errors after the merge from Trini to Dal +#endif + +int +KissHashResizeMode_create(KissHashResizeMode **resize_mode) +{ + KissHashResizeMode *_resize_mode = NULL; + + if (!resize_mode) { + herror(0, 0, "KissKissHashResizeMode_create: NULL resize-mode pointer"); + return -1; + } + _resize_mode = (KissHashResizeMode *)kiss_pmglob_memory_kmalloc( + sizeof(KissHashResizeMode), + "KissHashResizeMode_create" + ); + + if (!_resize_mode) { + herror(0, 0, "KissHashResizeMode_create: Unable to allocate space for KissHashResizeMode object"); + return -1; + } + + memset(_resize_mode, 0, sizeof(KissHashResizeMode)); + + // Set default resize parameters + KissHashResizeMode_set_default_parameters(_resize_mode); + + *resize_mode = _resize_mode; + + return 0; +} + +void +KissHashResizeMode_destroy(KissHashResizeMode *resize_mode) +{ + if (!resize_mode) { + herror(0, 0, "KissHashResizeMode_destroy: NULL resize-mode pointer"); + return; + } + kiss_pmglob_memory_kfree(resize_mode, sizeof(KissHashResizeMode), "KissHashResizeMode_destroy"); + + return; +} + +int +KissHashResizeMode_set_method( + KissHashResizeMode *resize_mode, + KissHashResizeMethod method, + u_int value, + u_int trigger_ratio +) +{ + KissHashResizeMode _resize_mode; + int rc = 0; + + if (!resize_mode) { + herror(0, 0, "KissHashResizeMode_set_method: NULL resize-mode pointer"); + return -1; + } + + // set method + _resize_mode.method = method; + rc = KissHashResizeMode_verify_method(&_resize_mode); + if (rc < 0) return -1; + + // set value + _resize_mode.value = value; + if (KissHashResizeMode_verify_value(&_resize_mode) < 0) return -1; + + // set trigger ratio + _resize_mode.trigger_ratio = trigger_ratio; + if (KissHashResizeMode_verify_trigger_ratio(&_resize_mode) < 0) return -1; + + resize_mode->method = method; + resize_mode->value = value; + resize_mode->trigger_ratio = trigger_ratio; + + return 0; +} + +int +KissHashResizeMode_get_method( + const KissHashResizeMode *resize_mode, + KissHashResizeMethod *method, + u_int *value, + u_int *trigger_ratio +) +{ + if (!resize_mode || !method || !value || !trigger_ratio) { + herror( + 0, + 0, + "KissHashResizeMode_get_method: NULL parameter (mode=%p, method=%p, value=%p, trig=%p)", + resize_mode, + method, + value, + trigger_ratio + ); + return -1; + } + *method = resize_mode->method; + *value = resize_mode->value; + *trigger_ratio = resize_mode->trigger_ratio; + + return 0; +} + +int +KissHashResizeMode_set_direction(KissHashResizeMode *resize_mode, KissHashResizeDirection direction) +{ + if (!resize_mode) { + herror(0, 0, "KissHashResizeMode_set_direction: NULL resize-mode pointer"); + return -1; + } + resize_mode->direction = direction; + + if (KissHashResizeMode_verify_direction(resize_mode) < 0) { + resize_mode->direction = KISS_HASH_SIZE_INC_DEC; + return -1; + } + + return 0; +} + +int +KissHashResizeMode_get_direction(const KissHashResizeMode *resize_mode, KissHashResizeDirection *direction) +{ + if (!resize_mode || !direction) { + herror( + 0, + 0, + "KissHashResizeMode_get_direction: NULL parameter (mode=%p; direction=%p)", + resize_mode, + direction + ); + return -1; + } + *direction = resize_mode->direction; + + return 0; +} + +int +KissHashResizeMode_set_max_size(KissHashResizeMode *resize_mode, u_int max_size) +{ + if (!resize_mode) { + herror(0, 0, "KissHashResizeMode_set_max_size: NULL resize-mode pointer"); + return -1; + } + resize_mode->max_size = max_size; + + return 0; +} + +int +KissHashResizeMode_get_max_size(const KissHashResizeMode *resize_mode, u_int *max_size) +{ + if (!resize_mode || !max_size) { + herror(0, 0, "KissHashResizeMode_get_max_size: NULL parameter (mode=%p; max_size=%p)", resize_mode, max_size); + return -1; + } + *max_size = resize_mode->max_size; + + return 0; +} + +int +kiss_hash_set_resize_cb(kiss_hash_t hp, HashResizeCb_t resize_callback) +{ + if (!hp) { + herror(0, 0, "kiss_hash_set_resize_cb: NULL hash pointer"); + return -1; + } + hp->h_resize_mode.cb = resize_callback; + + return 0; +} + +static void +KissHashResizeMode_reset_parameters(KissHashResizeMode *resize_mode) +{ + resize_mode->max_size = DEFAULT_KISS_HASH_SIZE; + resize_mode->method = KISS_HASH_RESIZE_METHOD_UNKNOWN; + resize_mode->direction = KISS_HASH_SIZE_STATIC; + resize_mode->value = 0; + resize_mode->trigger_ratio = 0; + + return; +} + +static void +KissHashResizeMode_set_default_parameters(KissHashResizeMode *resize_mode) +{ + resize_mode->max_size = DEFAULT_KISS_HASH_SIZE; + resize_mode->method = KISS_HASH_RESIZE_BY_FACTOR; + resize_mode->direction = KISS_HASH_SIZE_INC_DEC; + resize_mode->value = DEFAULT_KISS_HASH_RESIZE_FACTOR_VALUE; + resize_mode->trigger_ratio = DEFAULT_KISS_HASH_RESIZE_FACTOR_TRIG_RATIO; + + return; +} + +// ------------------------------------------------------------------------ +// KissHashResizeMode parameters verification & default values function +// ------------------------------------------------------------------------ +// Min & max values for a single hash resize +#define HASH_RESIZE_MIN_FACTOR_VALUE 2 +#define HASH_RESIZE_MAX_FACTOR_VALUE 8 +#define HASH_RESIZE_MIN_TRIG_FACTOR 2 +#define HASH_RESIZE_MAX_TRIG_FACTOR 8 + + +static int +KissHashResizeMode_verify_method(const KissHashResizeMode *resize_mode) +{ + if (resize_mode->method != KISS_HASH_RESIZE_BY_FACTOR) { + herror(0, 0, "KissHashResizeMode_verify_method: Illegal resize method (%d)", resize_mode->method); + return -1; + } + + return 0; +} + +static int +KissHashResizeMode_verify_value(const KissHashResizeMode *resize_mode) +{ + if (resize_mode->value == 0) + return -1; + + if (resize_mode->method == KISS_HASH_RESIZE_BY_FACTOR) { + if ( (resize_mode->value < HASH_RESIZE_MIN_FACTOR_VALUE) || + (resize_mode->value > HASH_RESIZE_MAX_FACTOR_VALUE) ) { + herror( + 0, + 0, + "KissHashResizeMode_verify_value: Illegal factor value (%d) - should be %d..%d", + resize_mode->value, + HASH_RESIZE_MIN_FACTOR_VALUE, + HASH_RESIZE_MAX_FACTOR_VALUE + ); + return -1; + } + } else { + return -1; + } + + return 0; +} + +static int +KissHashResizeMode_verify_trigger_ratio(const KissHashResizeMode *resize_mode) +{ + if (resize_mode->method == KISS_HASH_RESIZE_BY_FACTOR) { + if ((resize_mode->trigger_ratio < HASH_RESIZE_MIN_TRIG_FACTOR) || + (resize_mode->trigger_ratio > HASH_RESIZE_MAX_TRIG_FACTOR)) { + herror( + 0, + 0, + "KissHashResizeMode_verify_trigger_ratio: Illegal trigger value (%d) - should be %d..%d", + resize_mode->trigger_ratio, + HASH_RESIZE_MIN_TRIG_FACTOR, + HASH_RESIZE_MAX_TRIG_FACTOR + ); + return -1; + } + } else { + return -1; + } + + return 0; +} + +static int +KissHashResizeMode_verify_direction(const KissHashResizeMode *resize_mode) +{ + if ((resize_mode->direction != KISS_HASH_SIZE_STATIC) && + (resize_mode->direction != KISS_HASH_SIZE_INCREASE) && + (resize_mode->direction != KISS_HASH_SIZE_DECREASE) && + (resize_mode->direction != KISS_HASH_SIZE_INC_DEC) ) { + herror(0, 0, "KissHashResizeMode_verify_direction: Illegal resize direction (%d)", resize_mode->direction); + return -1; + } + return 0; +} + +static int +KissHashResizeMode_verify_max_size(const kiss_hash_t hp, const KissHashResizeMode *resize_mode) +{ + if (kiss_hash_get_size(hp) > (int)resize_mode->max_size) { + herror( + 0, + 0, + "KissHashResizeMode_verify_max_size: Max size (%d) is lower than current hash size (%d)", + resize_mode->max_size, + kiss_hash_get_size(hp) + ); + return -1; + } + return 0; +} + +int +KissHashResizeMode_verify_params(const kiss_hash_t hp, const KissHashResizeMode *resize_mode) +{ + int rc = 0; + + if (!resize_mode) { + herror(0, 0, "KissHashResizeMode_verify_params: NULL resize-mode pointer"); + return -1; + } + + rc = KissHashResizeMode_verify_method(resize_mode); + if (rc==0) rc = KissHashResizeMode_verify_value(resize_mode); + if (rc==0) rc = KissHashResizeMode_verify_trigger_ratio(resize_mode); + if (rc==0) rc = KissHashResizeMode_verify_direction(resize_mode); + if (rc==0) rc = KissHashResizeMode_verify_max_size(hp, resize_mode); + + return rc; +} + +// ----------------------------------- +// Set hash to have dynamic size +// ----------------------------------- +int +kiss_hash_set_dynamic_size(kiss_hash_t hp, const KissHashResizeMode *resize_mode) +{ + if (!hp || !resize_mode) { + herror(0, 0, "kiss_hash_set_dynamic_size: NULL parameter (hp=%p; mode=%p)", hp, resize_mode); + return -1; + } + + if (KissHashResizeMode_verify_params(hp, resize_mode) < 0) { + herror(0, 0, "kiss_hash_set_dynamic_size: Illegal resize parameters"); + return -1; + } + + hp->h_resize_mode.max_size = resize_mode->max_size; + hp->h_resize_mode.method = resize_mode->method; + hp->h_resize_mode.direction = resize_mode->direction; + hp->h_resize_mode.value = resize_mode->value; + hp->h_resize_mode.trigger_ratio = resize_mode->trigger_ratio; + hp->h_resize_mode.cb = resize_mode->cb; + + return 0; +} + +int +kiss_hash_get_dynamic_size(kiss_hash_t hp, const KissHashResizeMode **resize_mode) +{ + if (!hp || !resize_mode) { + herror(0, 0, "kiss_hash_get_dynamic_size: NULL parameter (hp=%p; mode=%p)", hp, resize_mode); + return -1; + } + *resize_mode = &(hp->h_resize_mode); + + return 0; +} + +// -------------------------- +// "Manual" hash resizing +// -------------------------- +// +// This API will cause an immediate resizing of hash +// table, according to the parameters, given in the +// input KissHashResizeMode object. +// Note that the KissHashResizeMode object parameters are +// not kept on the hash handle for future resize oprations. +int +kiss_hash_trigger_resize(kiss_hash_t hp, const KissHashResizeMode *resize_mode) +{ + const KissHashResizeMode *mode = resize_mode ? resize_mode : &(hp->h_resize_mode); + + if (mode->direction == KISS_HASH_SIZE_STATIC) { + herror(0, 0, "kiss_hash_trigger_resize: Static resize mode"); + return -1; + } + + herror(0, 0, "kiss_hash_trigger_resize: Triggering hash resize"); + return kiss_hash_do_resize(hp, mode); +} + +// ----------------------- +// Resize hash table +// ----------------------- +// +// Check if resize should be triggered +static +boolean_cpt kiss_hash_resize_check_for_resize(kiss_hash_t hp, KissHashResizeDirection direction) +{ + if (!hp) return FALSE; + + // Static hash size remains fixed + if (hp->h_resize_mode.direction == KISS_HASH_SIZE_STATIC) return FALSE; + + // + // Size cannot change before number of elements + // is larger than original hash size. + if ((kiss_hash_get_size(hp) == kiss_hash_orig_size(hp)) && (kiss_hash_nelements(hp) < kiss_hash_orig_size(hp))) { + return FALSE; + } + + + // Do not expand hash with less elements than hash size. + // Do not shrink hash with more elements than hash size. + if (kiss_hash_nelements(hp) < kiss_hash_get_size(hp)) { + if ((hp->h_resize_mode.direction == KISS_HASH_SIZE_INCREASE) || (direction == KISS_HASH_SIZE_INCREASE)) { + return FALSE; + } + } + + if (kiss_hash_nelements(hp) > kiss_hash_get_size(hp)) { + if ((hp->h_resize_mode.direction == KISS_HASH_SIZE_DECREASE) || (direction == KISS_HASH_SIZE_DECREASE)) { + return FALSE; + } + } + + + if (hp->h_resize_mode.method == KISS_HASH_RESIZE_BY_FACTOR) { + if (kiss_hash_nelements(hp) >= (kiss_hash_get_size(hp) * (int)hp->h_resize_mode.trigger_ratio)) + return TRUE; + + if (kiss_hash_nelements(hp) <= (kiss_hash_get_size(hp) / (int)hp->h_resize_mode.value)) + return TRUE; + } + + return FALSE; +} + + +// Calculate a new hash size for hash resizing operation. +// +// Please note that new size is calculated differently upon +// increase & decrease operations (refer to design doc for +// more details). +static int +kiss_hash_resize_calc_new_size(const kiss_hash_t hp, const KissHashResizeMode *resize_mode) +{ + KissHashResizeDirection direction; + int h_new_size = -1; + + // Determine whether to increase or decrease hash size + if ((resize_mode->direction == KISS_HASH_SIZE_INCREASE) || (resize_mode->direction == KISS_HASH_SIZE_DECREASE)) { + direction = resize_mode->direction; + } else { + if (resize_mode->direction == KISS_HASH_SIZE_INC_DEC) { + if (kiss_hash_nelements(hp) >= kiss_hash_get_size(hp)) { + direction = KISS_HASH_SIZE_INCREASE; + } else { + direction = KISS_HASH_SIZE_DECREASE; + } + } else { + return -1; + } + } + + // Set new hash size + if (resize_mode->method == KISS_HASH_RESIZE_BY_FACTOR) { + if (direction == KISS_HASH_SIZE_INCREASE) { + h_new_size = kiss_hash_get_size(hp) * resize_mode->value; + } else { + h_new_size = kiss_hash_get_size(hp) / resize_mode->trigger_ratio; + } + } + else{ + return -1; + } + + // Hash sizes are rounded to the nearest power of 2. Same as in hash create + h_new_size = roundtwo(h_new_size); + + // Check that the new size does not break the allowed size limits + if (h_new_size > (int)resize_mode->max_size) { + herror( + 0, + 0, + "kiss_hash_resize_calc_new_size: New size (%d) exceeds the size limit (%d)", + h_new_size, + resize_mode->max_size + ); + return -1; + } + + // Hash size cannot decrease below its original value + if (h_new_size < kiss_hash_orig_size(hp)) { + herror( + 0, + 0, + "kiss_hash_resize_calc_new_size: New size (%d) is lower than the original size (%d)", + h_new_size, + kiss_hash_orig_size(hp) + ); + return -1; + } + + return h_new_size; +} + + +// Hash resize function. +// This function does the actual resize operation: +// 1. A temporary hash is created, with the new size +// 2. All elements from the original hash are inserted into the temp hash +// 3. Hash elements & size are switched between the orig & temp hash tables. +// 4. Temporary hash is destroyed. +// returns a negative value upon failure or new hash size on success. +#define EXIT_RESIZE(msg, rc) \ + if (temp_hash) { kiss_hash_destroy(temp_hash);} \ + if (orig_kiss_hash_iter) {kiss_hash_iterator_free(orig_kiss_hash_iter);} \ + if (msg != nullptr) {herror(0, 0, "kiss_hash_do_resize: %s", msg);} \ + return rc; + +static int +kiss_hash_do_resize(kiss_hash_t hp, const KissHashResizeMode *resize_mode) +{ + int orig_h_sz = 0, h_new_size = 0, rc = 0; + kiss_hash_t temp_hash = NULL; + struct kiss_hashent **orig_h_tab = NULL; + kiss_hash_iterator orig_kiss_hash_iter = NULL; + void *kiss_hash_key = NULL, *kiss_hash_val = NULL; + + if (!hp || !resize_mode) { + EXIT_RESIZE("NULL parameter", -1); + } + else + + if (KissHashResizeMode_verify_params(hp, resize_mode) < 0) { + EXIT_RESIZE("Illegal resize parameters", -1); + } + + // Calculate new hash size + h_new_size = kiss_hash_resize_calc_new_size(hp, resize_mode); + if (h_new_size <= 0) { + EXIT_RESIZE("Unable to set new hash size or hash cannot resize", -1); + } + + // Check that new & original hash tables do not have the same size + // (might happen due to the hash sizes being rounded to the nearest + // power of two, higher than the calculated size) + if (h_new_size == kiss_hash_get_size(hp)) { + EXIT_RESIZE("Original & new hash have the same size. No resize will be done.", -1); + } + + herror( + 0, + 0, + "kiss_hash_do_resize: Resizing hash from %d to %d (n_elements=%d)", + kiss_hash_get_size(hp), + h_new_size, kiss_hash_nelements(hp) + ); + + // Create a temporary hash table + temp_hash = kiss_hash_create(h_new_size, hp->h_keyfunc, hp->h_keycmp, hp->h_info); + if (!temp_hash) { + EXIT_RESIZE("Unable to allocate temporary hash", -1); + } + + // Move elements from original hash to temporary hash + orig_kiss_hash_iter = kiss_hash_iterator_create(hp); + if (!orig_kiss_hash_iter) { + EXIT_RESIZE("Failed to create hash iterator", -1); + } + + do { + if (!(kiss_hash_iterator_get_hashent(orig_kiss_hash_iter))) continue; + + kiss_hash_key = kiss_hash_iterator_get_key(orig_kiss_hash_iter); + kiss_hash_val = kiss_hash_iterator_get_val(orig_kiss_hash_iter); + rc = kiss_hash_insert(temp_hash, kiss_hash_key, kiss_hash_val); + if (!rc) { + herror(0, 0, "kiss_hash_do_resize: Failed to add to hash (key=%x; val=%x)", kiss_hash_key, kiss_hash_val); + EXIT_RESIZE("", -1); + } + } while(kiss_hash_iterator_next_ent(orig_kiss_hash_iter)); + + kiss_hash_iterator_free(orig_kiss_hash_iter); + orig_kiss_hash_iter = NULL; + + + // Replace original and temporary table-pointers and sizes + orig_h_tab = hp->h_tab; + orig_h_sz = hp->h_sz; + + hp->h_tab = temp_hash->h_tab; + hp->h_sz = temp_hash->h_sz; + + temp_hash->h_tab = orig_h_tab; + temp_hash->h_sz = orig_h_sz; + + // Destroy temporary hash. + // No application data is deleted since the temporary hash + // has no value or key destructors, and the h_dodestr flag + // is not set. + kiss_hash_destroy(temp_hash); + + // Notify application on hash resize + if (resize_mode->cb) resize_mode->cb(hp, hp->h_info); + + return kiss_hash_get_size(hp); +} +#undef EXIT_RESIZE + + +// Hashing fuction for string hash. +// This function is used by hash_strcreate(). +// @param vs key +// @param info opaque +// @return value of the hash function. +uintptr_t +kiss_hash_strvalue(const void *vs, CP_MAYBE_UNUSED void *info) +{ + unsigned int val; + const char* s = (const char *)vs; + + for(val = 0; *s; s++) { + val = ((val >> 3) ^ (val<<5)) + *s; + } + return val; +} + + +// Comparison fuction for string hash. +// This function is used by hash_strcreate(). +// +// @param vk1 key +// @param vk2 key +// @param info opaque +// @return 0 - keys are equal, otherwise different. +int +kiss_hash_strcmp(const void* vk1, const void* vk2, CP_MAYBE_UNUSED void *info) +{ + const char* k1 = (const char *)vk1; + const char* k2 = (const char *)vk2; + return strcmp(k1, k2); +} + + +// Hashing fuction for integer hash. +// This function is used by hash_intcreate(). +// @param v key +// @param info opaque +// @return value of the hash function. +uintptr_t +kiss_hash_intvalue(const void* v, CP_MAYBE_UNUSED void *info) +{ + return (uintptr_t)v; +} + + +// Comparison fuction for integer hash. +// This function is used by hash_intcreate(). +// +// @param vv1 key +// @param vv2 key +// @param info opaque +// @return 0 - keys are equal, otherwise different. +int +kiss_hash_intcmp(const void* vv1, const void* vv2, CP_MAYBE_UNUSED void *info) +{ + intptr_t v1 = (intptr_t)vv1; + intptr_t v2 = (intptr_t)vv2; + return v1 - v2; +} + + +#ifdef KERNEL +#undef herror +#endif +SASAL_END diff --git a/components/utils/pm/kiss_hash.h b/components/utils/pm/kiss_hash.h new file mode 100644 index 0000000..a424d68 --- /dev/null +++ b/components/utils/pm/kiss_hash.h @@ -0,0 +1,586 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __KISS_HASH_H__ +#define __KISS_HASH_H__ + +#include "general_adaptor.h" + +typedef struct kiss_hash *kiss_hash_t; + +struct kiss_hashent { + void *key; + void *val; + struct kiss_hashent *next; +}; + +typedef uintptr_t (*hkeyfunc_t)(const void *key, void *info); +typedef int (*hcmpfunc_t)(const void *key1, const void *key2, void *info); +typedef void (*freefunc_t)(void *info); + +// {group: API for KISS_HASH} +#define H_DESTR(destr, addr) \ +if (destr && (((uintptr_t)(addr)) > 0x10)) (*destr)(addr); + +// {group: API for KISS_HASH} +// Description: Create Hash Table. MT-Level: Reentrant +// Parameters: +// hsize - hash size +// keyfunc - key hashing function +// keycmp - key comparison function +// info - opaque for use of keyfunc and keycmp functions. +// Return values: +// o hash pointer +// o NULL upon failure +// See also: kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, +// kiss_hash_delete, kiss_hash_destroy, kiss_hash_find_kiss_hashent, kiss_hash_insert_at, kiss_hash_strvalue, +// kiss_hash_strcmp, kiss_hash_intvalue, kiss_hash_bytevalue, +// kiss_hash_bytecmp, kiss_hash_debug, kiss_hash_debug_all +kiss_hash_t kiss_hash_create (size_t hsize, hkeyfunc_t keyfunc, hcmpfunc_t keycmp, void *info); + +// {group: API for HASH} +// Description: Create Hash Table with Destructor. MT-Level: Reentrant +// Parameters: +// hsize - hash size +// keyfunc - key hashing function +// keycmp - key comparison function +// val_destr - destructor for the values of the hash +// key_destr - destructor for the keys of the hash +// info - opaque for use of keyfunc and keycmp functions. +// Return values: +// o hash pointer +// o NULL upon failure +// See also: kiss_hash_create, kiss_hash_set_destr, kiss_hash_dodestr, kiss_hash_undo_destr, kiss_hash_nelements, +// iss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, kiss_hash_delete, kiss_hash_destroy, +// kiss_hash_find_kiss_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, kiss_hash_intvalue, +// kiss_hash_bytevalue, kiss_hash_bytecmp, kiss_hash_debug, kiss_hash_debug_all +kiss_hash_t +kiss_hash_create_with_destr ( + size_t hsize, + hkeyfunc_t keyfunc, + hcmpfunc_t keycmp, + freefunc_t val_destr, + freefunc_t key_destr, + void *info +); + +#define kiss_hash_create(hsize, hkeyfunc, hcmpfunc, info) \ + _kiss_hash_create (hsize, hkeyfunc, hcmpfunc, info, __FILE__, __LINE__) + +#define kiss_hash_create_with_destr(hsize, hkeyfunc, hcmpfunc, freefunc1, freefunc2, info) \ + _kiss_hash_create_with_destr (hsize, hkeyfunc, hcmpfunc, freefunc1, freefunc2, info, __FILE__, __LINE__) + +kiss_hash_t +_kiss_hash_create_with_ksleep(size_t hsize, hkeyfunc_t, hcmpfunc_t, void *info, const char *file, int line); + +#define kiss_hash_create_with_ksleep(hsize, hkeyfunc, hcmpfunc, info) \ + _kiss_hash_create_with_ksleep (hsize, hkeyfunc, hcmpfunc, info, __FILE__, __LINE__) + + +// {group: API for HASH} +// Description: Debug single hash. MT-Level: Reentrant +//This function calculates and prints the following statistics: +//o hash pointer +//o file name and line number where kiss_hash_create or kiss_hash_create_with_destr was called +//o number of elements in kiss_hash +//o number of slots in hash - hash size +//o size in bytes of memory occupied by hash maintenance structures +//o slot utilzation - percentage of hash slots used to store elements +//o average number of lookups - average length of lists of elements +// Parameters: +// hash - pointer to hash +// Return values: +// size in bytes of memory occupied by hash maintenance structures. +// See also: hash_create, hash_create_with_destr, hash_set_destr, hash_dodestr, hash_undo_destr, +// hash_nelements, hash_findaddr, hash_lookup, hash_lookkey, hash_insert, hash_delete, hash_destroy, +// hash_find_hashent, hash_insert_at, hash_strvalue, hash_strcmp, hash_intvalue, hash_bytevalue, +// hash_bytecmp, hash_debug_all +int kiss_hash_debug(kiss_hash_t hp); + +// {group: API for HASH} +// Description: Debug single hash. MT-Level: Safe +//Iterates a list of all hash tables craeted in the current process and +//for each hash calls function hash_debug. In addition the total +//memory usage of hash maintenance structures is printed. +// See also: kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, +// kiss_hash_undo_destr, kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, +// kiss_hash_insert, kiss_hash_delete, kiss_hash_destroy, +// kiss_hash_find_kiss_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, kiss_hash_intvalue, +// kiss_hash_bytevalue, kiss_hash_bytecmp, kiss_hash_debug +void kiss_hash_debug_all(); + +// {group: API for kiss_hash} +kiss_hash_t _kiss_hash_create (size_t hsize, hkeyfunc_t, hcmpfunc_t, void *info, const char *file, int line); + +// {group: API for HASH} +kiss_hash_t _kiss_hash_create_with_destr (size_t hsize, hkeyfunc_t, hcmpfunc_t, freefunc_t, freefunc_t, + void *info, const char *file, int line); + +// {group: API for HASH} +// Description: Set destructor for hash elements. MT-Level: ] Reentrant +//Keys and values detsructors are called for every hash key-value pair when the hash is destroyed. +// Parameters: +// hp - hash +// val_destr - destructor for the values of the hash +// key_destr - destructor for the keys of the hash +// Return values: +// hash pointer +// See also: kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_dodestr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, +// kiss_hash_delete, kiss_hash_destroy, kiss_hash_find_kiss_hashent, kiss_hash_insert_at, kiss_hash_strvalue, +// kiss_hash_strcmp, kiss_hash_intvalue, kiss_hash_bytevalue, +// kiss_hash_bytecmp, kiss_hash_debug, kiss_hash_debug_all +kiss_hash_t kiss_hash_set_destr (kiss_hash_t hp, freefunc_t val_destr, freefunc_t key_destr); + +// {group: API for kiss_hash} +// Description: Enable hash element detsruction. MT-Level: Reentrant +//Hash is created with destruction of elements disabled by default. +//This function enables destruction upon a call to kiss_hash_destroy. +//Meaning, the hash will automaticly call destructors when an entry gets +//deleted from the hash. Usualy this is not the case ! +// Parameters: +// hp - hash +// See also: kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_undo_destr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, +// kiss_hash_delete, kiss_hash_destroy, kiss_hash_find_kiss_hashent, kiss_hash_insert_at, kiss_hash_strvalue, +// kiss_hash_strcmp, kiss_hash_intvalue, kiss_hash_bytevalue, +// kiss_hash_bytecmp, kiss_hash_debug, kiss_hash_debug_all +void kiss_hash_dodestr (kiss_hash_t hp); + +// {group: API for HASH} +// Description: Disable hash element detsruction. MT-Level: Reentrant +// Parameters: +// hp - hash +// See also: kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, +// kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, +// kiss_hash_delete, kiss_hash_destroy, +// kiss_hash_find_kiss_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, kiss_hash_intvalue, +// kiss_hash_bytevalue, kiss_hash_bytecmp, kiss_hash_debug, kiss_hash_debug_all +void kiss_hash_undo_destr (kiss_hash_t hp); + +// {group: API for HASH} +// Description: Number of hash elements. MT-Level: Reentrant +// Parameters: +// hash - hash table +// Return values: +// number of elements +// See also: kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, +// kiss_hash_undo_destr, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, +// kiss_hash_delete, kiss_hash_destroy, +// kiss_hash_find_kiss_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, kiss_hash_intvalue, +// kiss_hash_bytevalue, kiss_hash_bytecmp, kiss_kiss_hash_debug, kiss_hash_debug_all +int kiss_hash_nelements (kiss_hash_t hash); + +// {group: API for HASH} +// Description: Hash size. MT-Level: Reentrant +// Parameters: +// hash - hash table +// Return values: +// Size of hash +// See also: kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, +// kiss_hash_undo_destr, kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, +// kiss_hash_insert, kiss_hash_delete, kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, +// kiss_hash_strvalue, kiss_hash_strcmp, kiss_hash_intvalue, kiss_hash_bytevalue, +// kiss_hash_bytecmp, kiss_hash_debug, kiss_hash_debug_all +int kiss_hash_get_size (kiss_hash_t hash); + +// {group: API for HASH} +// Description: Return address of the pointer to the value in the hash table. +// Parameters: +// hp - hash pointer +// key - hash key +// Return values: +// hash entry +// See also: kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, +// kiss_hash_undo_destr, kiss_hash_nelements, kiss_hash_lookup, kiss_hash_lookkey, kiss_hash_insert, +// kiss_hash_delete, kiss_hash_destroy, +// kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, kiss_hash_intvalue, +// kiss_hash_bytevalue, kiss_hash_bytecmp, kiss_hash_debug, kiss_hash_debug_all +void **kiss_hash_findaddr (kiss_hash_t hp, const void *key); + +// {group: API for HASH} +// Description: Lookup hash value. MT-Level: Reentrant +// Parameters: +// hp - hash pointer +// key - hash key +// Return values: +// o hash value +// o NULL upon failure +// See also: kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, +// kiss_hash_undo_destr, kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookkey, kiss_hash_insert, +// kiss_hash_delete, kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, +// kiss_hash_strcmp, kiss_hash_intvalue, kiss_hash_bytevalue, +// kiss_hash_bytecmp, kiss_hash_debug, kiss_hash_debug_all +void *kiss_hash_lookup (kiss_hash_t hp, const void *key); + +// {group: API for HASH} +// Description: Lookup hash key. MT-Level: Reentrant +//Returns the key pointer as stored in the hash table. +// Parameters: +// hp - hash pointer +// key - hash key that hash a value equal to that of the key stored in the hash. +// Return values: +// o hash key +// o NULL upon failure +// See also: kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, +// kiss_hash_undo_destr, kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_insert, +// kiss_hash_delete, kiss_hash_destroy,kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, +// kiss_hash_strcmp, kiss_hash_intvalue, kiss_hash_bytevalue, +// kiss_hash_bytecmp, kiss_hash_debug, kiss_hash_debug_all +void *kiss_hash_lookkey (kiss_hash_t hp, const void *key); + +// {group: API for HASH} +// Description: Insert hash element. MT-Level: Reentrant +// Parameters: +// hp - hash pointer +// key - hash key +// val - hash val +// Return values: +// >0 - success +// 0 - upon failure +// See also: kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, +// kiss_hash_undo_destr, kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, +// kiss_hash_delete, kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, +// kiss_hash_strcmp, kiss_hash_intvalue, kiss_hash_bytevalue, +// kiss_hash_bytecmp, kiss_hash_debug, kiss_hash_debug_all +int kiss_hash_insert (kiss_hash_t hp, void *key, void *val); + +// {group: API for HASH} +// Description: Delete hash element. MT-Level: Reentrant +//Delete hash element and return a value for the key. +// Parameters: +// hp - hash pointer +// key - hash key +// Return values: +// o hash val +// o NULL upon failure +// See also: kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, +// kiss_hash_undo_destr, kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, +// kiss_hash_insert, kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, +// kiss_hash_strcmp, kiss_hash_intvalue, kiss_hash_bytevalue, +// kiss_hash_bytecmp, kiss_hash_debug, kiss_hash_debug_all +void *kiss_hash_delete (kiss_hash_t hash, const void *key); + +// {group: API for HASH} +// Description: Destroy hash. MT-Level: Reentrant +//If detsructor functions were defined in the call to kiss_hash_with_create_destr or kiss_hash_set_destr +//function kiss_hash_dodestr must be called to enable element detsruction. +// Parameters: +// hp - hash pointer +// See also: kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, +// kiss_hash_undo_destr,kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, +// kiss_hash_insert, kiss_hash_delete, kiss_hash_find_hashent, kiss_hash_insert_at, kiss_hash_strvalue, +// kiss_hash_strcmp, kiss_hash_intvalue, kiss_hash_bytevalue, kiss_hash_bytecmp, kiss_hash_debug, +// kiss_hash_debug_all +void kiss_hash_destroy (kiss_hash_t hp); + +// {group: API for HASH} +// Description: Find hash entry. MT-Level: Reentrant +//Used as an efficient but somewhat ugly interface for find/insert operation. +//What it does is to return an adrress of a pointer to a hashent structure containing the key/val pair if found. +//If not it returns the address of the pointer in which we can append the new val/pair +//thus avoiding an unnceccessary repeated search. +//We can check if key was found by checking whether the pointer is zero or not. +//This function is usually used with kiss_hash_insert_at. +// Parameters: +// hp - hash pointer +// key - hash key +// Return values: +// hash entry +// See also: kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, +// kiss_hash_undo_destr, kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, +// kiss_hash_insert, kiss_hash_delete, kiss_hash_destroy, kiss_hash_insert_at, kiss_hash_strvalue, kiss_hash_strcmp, +// kiss_hash_intvalue, kiss_hash_bytevalue, kiss_hash_bytecmp, kiss_hash_debug, kiss_hash_debug_all +struct kiss_hashent ** kiss_hash_find_hashent(kiss_hash_t hp, const void *key); + +// {group: API for HASH} +// Description: Insert hash element at specified position. MT-Level: Reentrant +//This function should be used together with kiss_hash_find_hashent to insert +//the value in case it was not found at the hash. +// Parameters: +// hp - hash pointer +// key - hash key +// val - hash val +// hloc - +// Return values: +// o 0 upon failure +// o number of hash elements after insertion in case of success. +// See also: kiss_hash_create, kiss_hash_create_with_destr, kiss_hash_set_destr, kiss_hash_dodestr, +// kiss_hash_undo_destr, kiss_hash_nelements, kiss_hash_findaddr, kiss_hash_lookup, kiss_hash_lookkey, +// kiss_hash_insert, kiss_hash_delete, +// kiss_hash_destroy, kiss_hash_find_hashent, kiss_hash_strvalue, kiss_hash_strcmp, kiss_hash_intvalue, +// kiss_hash_bytevalue, kiss_hash_bytecmp, kiss_hash_debug, kiss_hash_debug_all +int kiss_hash_insert_at (kiss_hash_t hp, void *key, void *val, struct kiss_hashent**hloc); + + +#define kiss_hash_strcreate(sz) \ + kiss_hash_create(sz, (hkeyfunc_t)kiss_hash_strvalue, (hcmpfunc_t)kiss_hash_strcmp, NULL) + +#define kiss_hash_intcreate(sz) \ + kiss_hash_create(sz, (hkeyfunc_t)kiss_hash_intvalue, (hcmpfunc_t)kiss_hash_intcmp, NULL) + +#define kiss_hash_bytecreate(n, esz) \ + kiss_hash_create(n, (hkeyfunc_t)kiss_hash_bytevalue, (hcmpfunc_t)kiss_hash_bytecmp, (void *)esz) + +// The following provide hash table data type interface, +// These functions can be provided by the user, +// The default provided functions provide string hash + +// {group: API for HASH} +// Description: Hashing fuction for string hash. +//This function is used by kiss_hash_strcreate(). +// Parameters: +// vs - key +// info - opaque +// Return values: +// value of the hash function. +uintptr_t kiss_hash_strvalue (const void *vs, void *info); + +// {group: API for HASH} +// Description: Comparison fuction for string hash. +//This function is used by kiss_hash_strcreate(). +// Parameters: +// vk1 - key +// vk2 - key +// info - opaque +// Return values: +// 0 - keys are equal +// !0 - keys are different +int kiss_hash_strcmp (const void *vk1, const void *vk2, void *info); + +// {group: API for HASH} +// Description: Hashing fuction for integer hash. +//This function is used by kiss_hash_intcreate(). +// Parameters: +// v - key +// info - opaque +// Return values: +// value of the hash function. +uintptr_t kiss_hash_intvalue (const void* v, void *info); + +// {group: API for HASH} +// Description: Comparison fuction for integer hash. +//This function is used by kiss_hash_intcreate(). +// Parameters: +// vv1 - key +// vv2 - key +// info - opaque +// Return values: +// 0 - keys are equal +// !0 - keys are different +int kiss_hash_intcmp (const void* vv1, const void* vv2, void *info); + +// {group: API for HASH} +// Description: Hashing fuction for byte hash. +//This function is used by kiss_hash_bytecreate(). +// Parameters: +// data - key +// info - opaque +// Return values: +// value of the hash function. +uintptr_t kiss_hash_bytevalue (const void *data, void *info); + +// {group: API for HASH} +// Description: Comparison fuction for byte hash. +//This function is used by kiss_hash_bytecreate(). +// Parameters: +// d1 - key +// d2 - key +// info - opaque +// Return values: +// 0 - keys are equal +// !0 - keys are different +int kiss_hash_bytecmp (const void *d1, const void *d2, void *info); + +// {group: API for HASH ITERATOR} +typedef struct kiss_hash_iter *kiss_hash_iterator; + +// {group: API for HASH ITERATOR} +// Description: Create hash iterator. MT-Level: Reentrant +// Parameters: +// hp - hash +// Return values: +// o iterator object +// o NULL upon failure +// See also: +// kiss_hash_iterator_next, kiss_hash_iterator_next_key, kiss_hash_iterator_destroy +kiss_hash_iterator kiss_hash_iterator_create (kiss_hash_t hp); + +// {group: API for HASH ITERATOR} +// Description: Return next hash value. MT-Level: Reentrant +// Parameters: +// hit - hash iterator +// Return values: +// o next hash value +// o NULL upon failure +// See also: +// kiss_hash_iterator_create, kiss_hash_iterator_next_key, kiss_hash_iterator_destroy +void *kiss_hash_iterator_next (kiss_hash_iterator hit); + +// {group: API for HASH ITERATOR} +// Description: Return next hash key. MT-Level: Reentrant +// Parameters: +// hit - hash iterator +// Return values: +// o next hash key +// o NULL upon failure +// See also: +// kiss_hash_iterator_create, kiss_hash_iterator_next, kiss_hash_iterator_destroy +void *kiss_hash_iterator_next_key (kiss_hash_iterator hit); + +// {group: API for HASH ITERATOR} +// Description: Destroy hash iterator. MT-Level: Reentrant +// Parameters: +// hit - hash iterator +// See also: +// kiss_hash_iterator_create, kiss_hash_iterator_next, kiss_hash_iterator_next_key +void kiss_hash_iterator_destroy (kiss_hash_iterator hit); + +// {group: API for ITERATOR} +int kiss_hash_iterator_next_ent(kiss_hash_iterator hit); + +// {group: API for ITERATOR} +void * kiss_hash_iterator_get_key(kiss_hash_iterator hit); + +// {group: API for ITERATOR} +void * kiss_hash_iterator_get_val(kiss_hash_iterator hit); + +// {group: API for ITERATOR} +struct kiss_hashent * kiss_hash_iterator_get_hashent(kiss_hash_iterator hit); + +// {group: API for ITERATOR} +int kiss_hash_iterator_equal(kiss_hash_iterator hit1, kiss_hash_iterator hit2); + +// {group: API for ITERATOR} +kiss_hash_iterator kiss_hash_iterator_copy(kiss_hash_iterator hit); + +// {group: API for ITERATOR} +void kiss_hash_iterator_free(kiss_hash_iterator hit); + +// {group: API for ITERATOR} +void kiss_hash_iterator_set_begin(kiss_hash_iterator hit); + +// {group: API for ITERATOR} +void kiss_hash_iterator_set_end(kiss_hash_iterator hit); + +// {group: API for HASH} +kiss_hash_iterator kiss_hash_find_hashent_new(kiss_hash_t hp, const void *key); + +// {group: API for HASH ITERATOR} +void kiss_hash_delete_by_iter(kiss_hash_iterator hit); + +// - - - - - - - - - - - - - - - +// Hash resize mechanism +// - - - - - - - - - - - - - - - + +// {group: API for HASH RESIZE} +// Determine if hash size can increase, decrease or both. +typedef enum { + KISS_HASH_SIZE_STATIC = 0, // hash size is kept fixed + KISS_HASH_SIZE_INCREASE = 1, + KISS_HASH_SIZE_DECREASE = 2, + KISS_HASH_SIZE_INC_DEC = 3 +} KissHashResizeDirection; + +// {group: API for HASH RESIZE} +typedef enum { + KISS_HASH_RESIZE_METHOD_UNKNOWN = 0, + KISS_HASH_RESIZE_BY_FACTOR = 1 +} KissHashResizeMethod; + +// {group: API for HASH RESIZE} +// Default maximal hash size: +// Hash size will not increase beyond this value unless stated o/w by the application +#define DEFAULT_KISS_HASH_SIZE (1<<17) + +// {group: API for HASH RESIZE} +// Default value for hash factorial resizing +#define DEFAULT_KISS_HASH_RESIZE_FACTOR_VALUE 4 +// {group: API for HASH RESIZE} +// Default value for hash factorial resizing trigger ratio +#define DEFAULT_KISS_HASH_RESIZE_FACTOR_TRIG_RATIO 2 + +// {group: API for HASH RESIZE} +// Resize application callback: This callback will be invoked at every successful resize operation. +typedef int (* HashResizeCb_t) (kiss_hash_t hp, void *app_info); + + +// Hash resize mode object & accsess API. +// Used for setting resize parameters hash. + +// {group: API for HASH RESIZE} +typedef struct _KissHashResizeMode KissHashResizeMode; + +// {group: API for HASH RESIZE} +int KissHashResizeMode_create(KissHashResizeMode **resize_mode); + +// {group: API for HASH RESIZE} +void KissHashResizeMode_destroy(KissHashResizeMode *resize_mode); + +// {group: API for HASH RESIZE} +int KissHashResizeMode_set_method( + KissHashResizeMode *resize_mode, + KissHashResizeMethod method, + u_int value, + u_int trigger_ratio); + +// {group: API for HASH RESIZE} +int KissHashResizeMode_get_method( + const KissHashResizeMode *resize_mode, + KissHashResizeMethod *method, + u_int *value, + u_int *trigger_ratio); + +// {group: API for HASH RESIZE} +int KissHashResizeMode_set_direction(KissHashResizeMode *resize_mode, KissHashResizeDirection direction); + +// {group: API for HASH RESIZE} +int KissHashResizeMode_get_direction(const KissHashResizeMode *resize_mode, KissHashResizeDirection *direction); + +// {group: API for HASH RESIZE} +int KissHashResizeMode_set_max_size(KissHashResizeMode *resize_mode, u_int max_size); + +// {group: API for HASH RESIZE} +int KissHashResizeMode_get_max_size(const KissHashResizeMode *resize_mode, u_int *max_size); + +// {group: API for HASH RESIZE} +int kiss_hash_set_resize_cb(kiss_hash_t hp, HashResizeCb_t resize_callback); + +// {group: API for HASH RESIZE} +// Description: Set hash dynamic size parameters. +// Parameters: +// hp - [in] pointer to hash table +// resize_mode - [in] should be created and set using the access API to the KissHashResizeMode object. +// After using the set API, this object can be destroyed. +// +int kiss_hash_set_dynamic_size(kiss_hash_t hp, const KissHashResizeMode *resize_mode); + +// {group: API for HASH RESIZE} +// Description: Get hash dynamic size parameters. +// Parameters: +// hp - [in] pointer to hash table +// resize_mode - [out] a read-only parameter that should not be changed by the application. +int kiss_hash_get_dynamic_size(kiss_hash_t hp, const KissHashResizeMode **resize_mode); + +// {group: API for HASH RESIZE} +// Description: This API will cause an immediate resizing of hash +// table, according to the parameters, given in the input +// KissHashResizeMode object (if NULL, the resize will be done +// according to the parameters as last set by the application). +// +// Note that the KissHashResizeMode object parameters are +// not kept on the hash handle for future resize oprations. +int kiss_hash_trigger_resize(kiss_hash_t hp, const KissHashResizeMode *resize_mode); + +#endif // __KISS_HASH_H__ diff --git a/components/utils/pm/kiss_patterns.cc b/components/utils/pm/kiss_patterns.cc new file mode 100644 index 0000000..17abfb7 --- /dev/null +++ b/components/utils/pm/kiss_patterns.cc @@ -0,0 +1,134 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "kiss_patterns.h" +#include +#include +#include "general_adaptor.h" +#include "pm_adaptor.h" +#include "sasal.h" + +SASAL_START // Multiple Pattern Matcher +// Add a character's printable representation to a buffer. +// Returns the number of bytes written. +static u_int +pm_str_one_char_to_debug_buf(u_char *buf, int len, u_char ch, BOOL for_csv) +{ + char single_char_buf[10]; + int single_char_len; + + // Get a printable representation of the character + if (isprint(ch) && !(ch == '"' && for_csv)) { + single_char_buf[0] = ch; + single_char_len = 1; + } else { + snprintf(single_char_buf, sizeof(single_char_buf), "\\x%02x", ch); + single_char_buf[sizeof(single_char_buf)-1] = '\0'; + single_char_len = strlen(single_char_buf); + } + + if (single_char_len > len) { + // See that we don't exceed the buffer, and leave room for \0. + single_char_len = len; + } + + bcopy(single_char_buf, buf, single_char_len); + return single_char_len; +} + +// Debug only - Returns a printable character pointer for the non null-terminated string +static const u_char * +pm_str_to_debug_charp_ex(const u_char *str, u_int size, BOOL for_csv) +{ + static u_char buf[200]; + u_int i; + u_char *buf_p; + + // Copy the string. But replace unprintable characters (most importantly \0) with underscores. + buf_p = &buf[0]; + for (i=0; i(buffer), size, _pattern_id, _flags) +{ +} + +kiss_pmglob_string_s::kiss_pmglob_string_s(const u_char *buffer, size_t size, int _pattern_id, u_int _flags) +{ + dbgAssert(buffer && size > 0) << "Illegal arguments"; + buf.resize(size); + memcpy(buf.data(), buffer, size); + pattern_id = _pattern_id; + flags = _flags; + return; +} + + +// Returns the pattern of the pattern as u_char* +int +kiss_pmglob_string_get_id(const kiss_pmglob_string_s *pm_string) +{ + KISS_ASSERT(pm_string != nullptr, "Illegal arguments"); + return pm_string->pattern_id; +} + + +// Returns the size of the pattern +u_int +kiss_pmglob_string_get_size(const kiss_pmglob_string_s * pm_string) +{ + KISS_ASSERT(pm_string != nullptr, "Illegal arguments"); + return pm_string->buf.size(); +} + +// Returns the pattern of the pattern as u_char* +const u_char * +kiss_pmglob_string_get_pattern(const kiss_pmglob_string_s *pm_string) +{ + KISS_ASSERT(pm_string != nullptr, "Illegal arguments"); + return pm_string->buf.data(); +} + + +// Debug only - Returns a printable character pointer for the string +const u_char * +kiss_pmglob_string_to_debug_charp(const kiss_pmglob_string_s *pm_string) +{ + return pm_str_to_debug_charp(kiss_pmglob_string_get_pattern(pm_string), kiss_pmglob_string_get_size(pm_string)); +} + + +u_int +kiss_pmglob_string_get_flags(const kiss_pmglob_string_s *pm_string) +{ + KISS_ASSERT(pm_string != nullptr, "Illegal arguments"); + return pm_string->flags; +} +SASAL_END diff --git a/components/utils/pm/kiss_patterns.h b/components/utils/pm/kiss_patterns.h new file mode 100644 index 0000000..523594b --- /dev/null +++ b/components/utils/pm/kiss_patterns.h @@ -0,0 +1,74 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __kiss_patterns_h__ +#define __kiss_patterns_h__ + +#include +#include +#include "pm_adaptor.h" + +// kiss_pmglob_string functions + +class kiss_pmglob_string_s { + public: + explicit kiss_pmglob_string_s(const char *buffer, size_t size, int _pattern_id, u_int _flags); + explicit kiss_pmglob_string_s(const u_char *buffer, size_t size, int _pattern_id, u_int _flags); + + std::vector buf; + int pattern_id; + u_int flags; +}; + + +// Returns the size of pattern +// +// Parameters: +// pattern - the pattern. +// Return value: +// int - the size that this pattern represents. +KISS_APPS_CPAPI +u_int kiss_pmglob_string_get_size(const kiss_pmglob_string_s *pattern); + +// Returns the pattern of the pattern as u_char* +// +// Parameters: +// patterns - the pattern. +// Return value: +// u_char * - the pattern that this pattern represents. +KISS_APPS_CPAPI +const u_char *kiss_pmglob_string_get_pattern(const kiss_pmglob_string_s *pattern); + +// For debugging only - returns a printable pointer for the string. +// Replaces unprintable characters with underscores. +// +// Note: In multithreaded situations, the buffer returned may be overrun by another thread. +// At worst, this would lead to an incorrect string being printed. +KISS_APPS_CPAPI +const u_char *kiss_pmglob_string_to_debug_charp(const kiss_pmglob_string_s *pm_string); + +// Returns the id of pattern +// +// Parameters: +// patterns - the pattern. +// Return value: +// id - the pattern_id that this pattern represents. +KISS_APPS_CPAPI +int kiss_pmglob_string_get_id(const kiss_pmglob_string_s *pattern); + + +KISS_APPS_CPAPI +u_int kiss_pmglob_string_get_flags(const kiss_pmglob_string_s *pattern); + + +#endif // __kiss_patterns_h__ diff --git a/components/utils/pm/kiss_pm_stats.cc b/components/utils/pm/kiss_pm_stats.cc new file mode 100644 index 0000000..62d7e0e --- /dev/null +++ b/components/utils/pm/kiss_pm_stats.cc @@ -0,0 +1,429 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "general_adaptor.h" +#include "sasal.h" +// ********************* INCLUDES ************************** +#include "kiss_pm_stats.h" +// ********************* INCLUDES ************************** + +SASAL_START // Multiple Pattern Matcher +// ********************* FUNCTIONS ************************** + + +// Initialize the common statistics +kiss_ret_val +kiss_pm_stats_common_init(kiss_pm_stats_common new_stats) +{ + static const char rname[] = "kiss_pm_stats_common_init"; + + if (new_stats == NULL) { + kiss_debug_err(K_PM, ("%s: stats is zero\n", rname)); + return KISS_ERROR; + } + + bzero(new_stats, sizeof(struct kiss_pm_stats_common_s)); + +#if 0 + if (kiss_pm_stats_take_exec_time) { + new_stats->exec_num_cpus = kiss_multik_instance_num; + new_stats->exec = kiss_pmglob_memory_kmalloc_ex( + new_stats->exec_num_cpus * sizeof(struct kiss_pm_stats_dynamic_aligned_s), + rname, + (FW_KMEM_NOSLEEP| FW_KMEM_RETURN_ALIGN_PTR) + ); + + if (!new_stats->exec) { + kiss_debug_err(K_PM, ("%s: Error in allocating the execution stats\n", rname)); + return KISS_ERROR; + } + + bzero(new_stats->exec, new_stats->exec_num_cpus*sizeof(struct kiss_pm_stats_dynamic_aligned_s)); + } +#endif + + return KISS_OK; + +} + +#define KISS_MULTIK_MAX_INSTANCE_NUM 40 + +// Free the common statistics +void +kiss_pm_stats_common_free(kiss_pm_stats_common stats) +{ + static const char rname[] = "kiss_pm_stats_common_free"; + BOOL should_free_stats_exec = + stats && + stats->exec && + stats->exec_num_cpus > 0 && + stats->exec_num_cpus < KISS_MULTIK_MAX_INSTANCE_NUM; + if (should_free_stats_exec) { + kiss_pmglob_memory_kfree( + stats->exec, + stats->exec_num_cpus * sizeof(struct kiss_pm_stats_dynamic_aligned_s), + rname + ); + stats->exec = NULL; + } + return; +} + +// Update build-time statistics +void +kiss_pm_stats_common_update_compile(kiss_pm_stats_common stats, u_int bytes, u_int compilation_time, + enum kiss_pm_stats_update_compile_type type) +{ + KISS_ASSERT_PERF(stats, ("Illegal arguments")); + + switch (type) { + case UPDATE_COMPILE_STATS_MEM: + stats->compile.memory_bytes = bytes; + return; + case UPDATE_COMPILE_STATS_TIME: + stats->compile.compilation_time = compilation_time; + return; + case UPDATE_COMPILE_STATS_BOTH: + stats->compile.memory_bytes = bytes; + stats->compile.compilation_time = compilation_time; + return; + } +} + + +// Will adding to an unsigned variable cause it to wrap around? +#define ADDITION_WOULD_WRAP_AROUND(old_val, delta) \ + ((old_val) + (delta) < (old_val)) + +// Reset buffer length statistics, so we can add a buffer without wraparound +static void +handle_buflen_stats_wraparound(struct kiss_pm_stats_dynamic_s *cur_kern_inst_stats) +{ + cur_kern_inst_stats->buflen.total = 0; + cur_kern_inst_stats->buflen.sample_num = 0; +} + +// Reset execution time statistics, so we can add a sample without wraparound +static void +handle_runtime_stats_wraparound(struct kiss_pm_stats_dynamic_s *cur_kern_inst_stats) +{ + cur_kern_inst_stats->runtime.total_exec_time = 0; + cur_kern_inst_stats->runtime.user_cb_exec_time = 0; + cur_kern_inst_stats->runtime.sample_num = 0; +} + + +// Update run-time statistics +void +kiss_pm_stats_common_update_exec(kiss_pm_stats_common stats, u_int buf_size, u_int num_of_matches) +{ + struct kiss_pm_stats_dynamic_s *cur_kern_inst_stats; + KISS_ASSERT_PERF(stats, ("Illegal arguments")); + if(stats->exec) { + ASSERT_LOCKED; + cur_kern_inst_stats = &(stats->exec[kiss_multik_this_instance_num].stats); + + // Buffer length statistics + if (ADDITION_WOULD_WRAP_AROUND(cur_kern_inst_stats->buflen.total, buf_size)) { + handle_buflen_stats_wraparound(cur_kern_inst_stats); + } + cur_kern_inst_stats->buflen.total += buf_size; + cur_kern_inst_stats->buflen.sample_num++; + if (buf_size > cur_kern_inst_stats->buflen.max) { + cur_kern_inst_stats->buflen.max = buf_size; + } + + // General statistics + cur_kern_inst_stats->num_of_buffs++; + cur_kern_inst_stats->num_of_matches += num_of_matches; + if (num_of_matches > cur_kern_inst_stats->max_matches_on_buf) { + cur_kern_inst_stats->max_matches_on_buf = num_of_matches; + } + } + + return; +} + +// Update run-time (execution) statistics +void +kiss_pm_stats_common_update_exec_time(kiss_pm_stats_common stats, u_int exec_time, u_int user_cb_time) +{ + struct kiss_pm_stats_dynamic_s *cur_kern_inst_stats; + if(stats && stats->exec) { + ASSERT_LOCKED; + cur_kern_inst_stats = &(stats->exec[kiss_multik_this_instance_num].stats); + + // The execution time includes the callback, but we want the net time. + exec_time -= user_cb_time; + + // take care of wrap around + if (ADDITION_WOULD_WRAP_AROUND(cur_kern_inst_stats->runtime.total_exec_time, exec_time) || + ADDITION_WOULD_WRAP_AROUND(cur_kern_inst_stats->runtime.user_cb_exec_time, user_cb_time)) { + handle_runtime_stats_wraparound(cur_kern_inst_stats); + } + cur_kern_inst_stats->runtime.total_exec_time += exec_time; + cur_kern_inst_stats->runtime.user_cb_exec_time += user_cb_time; + cur_kern_inst_stats->runtime.sample_num++; + + // Updating the max values + if (exec_time > cur_kern_inst_stats->runtime.max_exec_time){ + cur_kern_inst_stats->runtime.max_exec_time = exec_time; + } + if (user_cb_time > cur_kern_inst_stats->runtime.user_cb_max_time){ + cur_kern_inst_stats->runtime.user_cb_max_time = user_cb_time; + } + } + return; +} + + +// Clear all runtime statistics +void +kiss_pm_stats_common_reset_exec(kiss_pm_stats_common stats) +{ + u_int i; + if(stats && stats->exec) { + for (i = 0; i < stats->exec_num_cpus; i++) { + struct kiss_pm_stats_dynamic_s *cur_cpu_stats; + cur_cpu_stats = &(stats->exec[i].stats); + bzero(cur_cpu_stats, sizeof(*cur_cpu_stats)); + } + } +} + + +// Aggregate the run-time statistics from all cpus in src to dst +static void +kiss_pm_stats_common_aggregate_cpus(struct kiss_pm_stats_dynamic_s *dst, const struct kiss_pm_stats_common_s *src) +{ + u_int i; + KISS_ASSERT_PERF(src, ("Illegal arguments")); + if(src && src->exec) + { + for (i = 0; i < src->exec_num_cpus; i++) { + struct kiss_pm_stats_dynamic_s *cur_cpu_src = &(src->exec[i].stats); + + // Buffer length statistics - add and avoid wrap-around + if (ADDITION_WOULD_WRAP_AROUND(dst->buflen.total, cur_cpu_src->buflen.total)) { + handle_buflen_stats_wraparound(dst); + } + dst->buflen.total += cur_cpu_src->buflen.total; + dst->buflen.sample_num += cur_cpu_src->buflen.sample_num; + dst->buflen.max = MAX(dst->buflen.max, cur_cpu_src->buflen.max); + + // General statistics + dst->num_of_matches += cur_cpu_src->num_of_matches; + dst->num_of_stage1_matches += cur_cpu_src->num_of_stage1_matches; + dst->num_of_stage22_matches += cur_cpu_src->num_of_stage22_matches; + dst->num_of_stage23_matches += cur_cpu_src->num_of_stage23_matches; + + dst->num_of_buffs += cur_cpu_src->num_of_buffs; + if (dst->max_matches_on_buf < cur_cpu_src->max_matches_on_buf) { + dst->max_matches_on_buf = cur_cpu_src->max_matches_on_buf; + } + + // Execution time statistics - add and avoid wrap-around + if (ADDITION_WOULD_WRAP_AROUND(dst->runtime.total_exec_time, cur_cpu_src->runtime.total_exec_time) || + ADDITION_WOULD_WRAP_AROUND(dst->runtime.user_cb_exec_time, cur_cpu_src->runtime.user_cb_exec_time)) { + handle_runtime_stats_wraparound(dst); + } + dst->runtime.total_exec_time += cur_cpu_src->runtime.total_exec_time; + dst->runtime.user_cb_exec_time += cur_cpu_src->runtime.user_cb_exec_time; + dst->runtime.sample_num += cur_cpu_src->runtime.sample_num; + dst->runtime.max_exec_time = MAX(dst->runtime.max_exec_time, cur_cpu_src->runtime.max_exec_time); + dst->runtime.user_cb_max_time = MAX(dst->runtime.user_cb_max_time, cur_cpu_src->runtime.user_cb_max_time); + } + } + return; +} + +#define TOTAL_MICORSEC_TO_AVG_NSEC(total, samples) \ + ((samples)==0 ? 0 : (u_int)((u_int64)(total) * 1000 / (u_int64)(samples))) + +// Print the common statistics +void +kiss_pm_stats_common_print( + kiss_pm_stats_common stats, + enum kiss_pm_stats_type type, + enum kiss_pm_stats_format format, + BOOL print_headline +) +{ + struct kiss_pm_stats_dynamic_s dynamic_stats; + KISS_ASSERT_PERF((stats && !print_headline) || print_headline, ("Illegal arguments")); + + if (type != KISS_PM_DYNAMIC_STATS) { + if (format == KISS_PM_TEXT_FORMAT_STATS) { + kdprintf("Memory comsumption for this handle is %u bytes\n", stats->compile.memory_bytes); + kdprintf("Compilation time for this handle is %u microseconds\n", stats->compile.compilation_time); + } else if (format == KISS_PM_CSV_FORMAT_STATS) { + if (print_headline) { + kdprintf("Memory consumption;Compilation time (microsec);"); + } else { + kdprintf("%u;%u;", stats->compile.memory_bytes, stats->compile.compilation_time); + } + } + } + + if (!print_headline) { + bzero(&dynamic_stats, sizeof(struct kiss_pm_stats_dynamic_s )); + kiss_pm_stats_common_aggregate_cpus(&dynamic_stats, stats); + } + + if (type != KISS_PM_STATIC_STATS) { + if (format == KISS_PM_TEXT_FORMAT_STATS) { + kdprintf("Number of executed buffers is %u\n", dynamic_stats.num_of_buffs); + kdprintf("Max buffer length is %u\n", dynamic_stats.buflen.max); + kdprintf("Avg buffer length is %u\n", + dynamic_stats.buflen.sample_num ? (dynamic_stats.buflen.total/dynamic_stats.buflen.sample_num) : 0); + kdprintf("Number of matches is %u\n", dynamic_stats.num_of_matches); + kdprintf("Number of matches after stage1 is %u\n", dynamic_stats.num_of_stage1_matches); + kdprintf("Number of matches after start-anchor is %u\n", dynamic_stats.num_of_stage22_matches); + kdprintf("Number of matches after end-anchor is %u\n", dynamic_stats.num_of_stage23_matches); + kdprintf("Max number of matches on buffer is %u\n", dynamic_stats.max_matches_on_buf); + // Average execution time - display in nanosecond so rounding down won't lose too much + kdprintf("Avg execution time is %u ns for PM, %u ns for callbacks\n", + TOTAL_MICORSEC_TO_AVG_NSEC(dynamic_stats.runtime.total_exec_time, dynamic_stats.runtime.sample_num), + TOTAL_MICORSEC_TO_AVG_NSEC(dynamic_stats.runtime.user_cb_exec_time, dynamic_stats.runtime.sample_num)); + // Maximum execution time - display in nanosecond for consistency with average. + // concatenate 000 instead of multiplying, + // to avoid overflow (in very extreme, yet very interesting, cases). + kdprintf("Max execution time is %u000 ns for PM, %u000 ns for callbacks\n", + dynamic_stats.runtime.max_exec_time, dynamic_stats.runtime.user_cb_max_time); + } else if (format == KISS_PM_CSV_FORMAT_STATS) { + if (print_headline) { + kdprintf( + "Executed buffers #;" + "Max buffer length;" + "Avg buffer length;" + "Matches #;" + "Max matches on buffer;" + "stage1 matches #;" + "2nd filter matches #;" + "3rd filter matches #;" + "Avg PM exec time (ns);" + "Max PM exec time (ns);" + "Avg callback exec time (ns);" + "Max callback exec time (ns)" + ); + } else { + kdprintf("%u;%u;%u;%u;%u;%u;%u;%u;%u;%u000;%u;%u000", + dynamic_stats.num_of_buffs, + dynamic_stats.buflen.max, + dynamic_stats.buflen.sample_num ? (dynamic_stats.buflen.total/dynamic_stats.buflen.sample_num) : 0, + dynamic_stats.num_of_matches, + dynamic_stats.max_matches_on_buf, + dynamic_stats.num_of_stage1_matches, + dynamic_stats.num_of_stage22_matches, + dynamic_stats.num_of_stage23_matches, + TOTAL_MICORSEC_TO_AVG_NSEC( + dynamic_stats.runtime.total_exec_time, + dynamic_stats.runtime.sample_num + ), + dynamic_stats.runtime.max_exec_time, + TOTAL_MICORSEC_TO_AVG_NSEC( + dynamic_stats.runtime.user_cb_exec_time, + dynamic_stats.runtime.sample_num + ), + dynamic_stats.runtime.user_cb_max_time + ); + } + } + } + + return; +} + +#define kiss_pm_serialize_during_sanity_check 0 + + +// Return the statistics from src in dst (aggregate statistics from all cpus) +kiss_ret_val +kiss_pm_stats_common_get(struct kiss_pm_stats_static_s *dst_compile, + struct kiss_pm_stats_dynamic_s *dst_exec, + const struct kiss_pm_stats_common_s *src) +{ + KISS_ASSERT_PERF((dst_compile && dst_exec && src), ("Illegal arguments")); + + if (!(dst_compile && dst_exec && src)) { + return KISS_ERROR; + } + bzero(dst_compile, sizeof(struct kiss_pm_stats_static_s)); + bzero(dst_exec, sizeof(struct kiss_pm_stats_dynamic_s)); + bcopy(&(src->compile), dst_compile, sizeof(struct kiss_pm_stats_static_s)); + + kiss_pm_stats_common_aggregate_cpus(dst_exec, src); + + // for debug purposes only! + // ignore specific statistics fields when performing a sanity check on serialization + if (kiss_pm_serialize_during_sanity_check) { + dst_compile->memory_bytes = KISS_PM_SERIALIZE_IGNORE_INT; + dst_compile->compilation_time = KISS_PM_SERIALIZE_IGNORE_INT; + } + + return KISS_OK; +} + +// Copy the statistics from src to dst +kiss_ret_val +kiss_pm_stats_common_copy(kiss_pm_stats_common dst, const struct kiss_pm_stats_common_s *src) +{ + if(src && src->exec) { + u_int num_cpus = MIN(src->exec_num_cpus, dst->exec_num_cpus); + KISS_ASSERT_PERF((dst && src), ("Illegal arguments")); + + if (!(dst && src)) { + return KISS_ERROR; + } + bcopy(&(src->compile), &(dst->compile), sizeof(struct kiss_pm_stats_static_s)); + bcopy(src->exec, dst->exec, num_cpus*sizeof(struct kiss_pm_stats_dynamic_aligned_s)); + } + return KISS_OK; +} + +// Get size of serialized common statistics. Only build-time statistics are counted +u_int +kiss_pm_stats_common_get_serialize_size() +{ + return sizeof(struct kiss_pm_stats_static_s); +} + +// Serialize common statistics. Only build-time statistics are serialized +kiss_ret_val +kiss_pm_stats_common_serialize(const struct kiss_pm_stats_common_s *stats, u_char **buf, u_int *size) +{ + KISS_ASSERT_PERF((stats), ("Illegal arguments")); + + DATA_BUFF_COPY(*buf, size, &(stats->compile), sizeof(struct kiss_pm_stats_static_s)); + + return KISS_OK; +} + +// Deserialize common statistics. Only build-time statistics are deserialized +kiss_ret_val +kiss_pm_stats_common_deserialize( + kiss_pm_stats_common stats, + u_char **buf, u_int *size, + CP_MAYBE_UNUSED kiss_vbuf vbuf, + CP_MAYBE_UNUSED kiss_vbuf_iter *vbuf_iter +) +{ + KISS_ASSERT_PERF((stats), ("Illegal arguments")); + + DATA_BUFF_READ(*buf, size, vbuf, *vbuf_iter, &(stats->compile), sizeof(struct kiss_pm_stats_static_s)); + + return KISS_OK; +} + +// ******************** FUNCTIONS ************************* +SASAL_END diff --git a/components/utils/pm/kiss_pm_stats.h b/components/utils/pm/kiss_pm_stats.h new file mode 100644 index 0000000..9f5d36b --- /dev/null +++ b/components/utils/pm/kiss_pm_stats.h @@ -0,0 +1,146 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __kiss_pm_stats_h__ +#define __kiss_pm_stats_h__ + +#include "pm_adaptor.h" + +// Common statistics + +// Common run time statistics +struct kiss_pm_stats_dynamic_s { + u_int num_of_buffs; // Number of buffers we ran this dfa on + u_int num_of_matches; // how many matches there were in this dfa + u_int max_matches_on_buf; // Maximal number of matches per one buf + + struct { // Buffer length statistics + u_int max; // Maximum buffer length + u_int total; // Total length (for average calculation) + u_int sample_num; // Number of buffers, whose lengths make up total. + } buflen; + + struct { // Execution time statistics - not collected by default + u_int total_exec_time; // PM Execution time (not including user callbacks) + u_int max_exec_time; // Maximal PM execution time + u_int user_cb_exec_time; // User callback execution time + u_int user_cb_max_time; // Maximal user callback execution time + u_int sample_num; // Number of execution time samples + } runtime; + + u_int num_of_stage1_matches; // Tier1 LSS matches, before filtering by mask + u_int num_of_stage22_matches; // Tier1 matches after ^ + u_int num_of_stage23_matches; // Tier1 matches after $ +}; + +// Common build time statistics +struct kiss_pm_stats_static_s { + u_int memory_bytes; // How many bytes does this tier consume + u_int compilation_time; // Compilation time of this tier in micro-seconds +}; + +struct CP_CACHELINE_ALIGNED kiss_pm_stats_dynamic_aligned_s { + struct kiss_pm_stats_dynamic_s stats; +}; + +struct kiss_pm_stats_common_s { + // Run time statistics, per-CPU, dynamically allocated + struct kiss_pm_stats_dynamic_aligned_s* exec; + // Size of the exec array + u_int exec_num_cpus; + // Build time statistics + struct kiss_pm_stats_static_s compile; +}; + +typedef struct kiss_pm_stats_common_s *kiss_pm_stats_common; + +enum kiss_pm_stats_update_compile_type { + UPDATE_COMPILE_STATS_MEM, + UPDATE_COMPILE_STATS_TIME, + UPDATE_COMPILE_STATS_BOTH +}; + +// In which format the statistics should be printed +enum kiss_pm_stats_format { + KISS_PM_TEXT_FORMAT_STATS = 0, // Textual, for viewing with text editor + KISS_PM_CSV_FORMAT_STATS // CSV, for opening with Excel +}; + +KISS_APPS_CPAPI +kiss_ret_val kiss_pm_stats_common_init(kiss_pm_stats_common new_stats); + +KISS_APPS_CPAPI +void kiss_pm_stats_common_free(kiss_pm_stats_common stats); + +KISS_APPS_CPAPI +void kiss_pm_stats_common_update_compile( + kiss_pm_stats_common stats, + u_int bytes, + u_int compilation_time, + enum kiss_pm_stats_update_compile_type type); + +KISS_APPS_CPAPI +void kiss_pm_stats_common_update_exec(kiss_pm_stats_common stats, u_int buf_size, u_int num_of_matches); + + +// @brief +// Updating the execution time of an execution of a buffer in tier2. +// +// @param stats - [in] The tier2 common stats. +// @param exec_time - [in] The execution time. +// @param buf_len - [in] the length of the last buffer that was executed +// +// @return Void +// +// @note +// in case one of the stats vars will warp-around, the aggregated vars will hold only the last exec stats. +KISS_APPS_CPAPI +void kiss_pm_stats_common_update_exec_time(kiss_pm_stats_common stats, u_int exec_time, u_int user_cb_time); + +KISS_APPS_CPAPI +void kiss_pm_stats_common_reset_exec(kiss_pm_stats_common stats); + +KISS_APPS_CPAPI +void kiss_pm_stats_common_print( + kiss_pm_stats_common stats, + enum kiss_pm_stats_type type, + enum kiss_pm_stats_format format, + BOOL print_headline +); + +KISS_APPS_CPAPI +kiss_ret_val kiss_pm_stats_common_get( + struct kiss_pm_stats_static_s *dst_compile, + struct kiss_pm_stats_dynamic_s *dst_exec, + const struct kiss_pm_stats_common_s *src +); + +KISS_APPS_CPAPI +kiss_ret_val kiss_pm_stats_common_copy(kiss_pm_stats_common dst, const struct kiss_pm_stats_common_s *src); + +KISS_APPS_CPAPI +u_int kiss_pm_stats_common_get_serialize_size(void); + +KISS_APPS_CPAPI +kiss_ret_val kiss_pm_stats_common_serialize(const struct kiss_pm_stats_common_s *stats, u_char **buf, u_int *size); + +KISS_APPS_CPAPI +kiss_ret_val kiss_pm_stats_common_deserialize( + kiss_pm_stats_common stats, + u_char **buf, + u_int *size, + kiss_vbuf vbuf, + kiss_vbuf_iter *vbuf_iter +); + +#endif // __kiss_pm_stats_h__ diff --git a/components/utils/pm/kiss_thin_nfa.cc b/components/utils/pm/kiss_thin_nfa.cc new file mode 100644 index 0000000..0efee15 --- /dev/null +++ b/components/utils/pm/kiss_thin_nfa.cc @@ -0,0 +1,462 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Thin NFA I/S +// ------------ +// The thin NFA allows building and executing an automaton for string search, using the +// Aho-Corasick algorithm. +// The resulting automaton is built in a compact representation. Some states are "full" - they +// have an explicit transition per character. Others are "partial" - they have some explicit transitions, +// plus a "default transition". This is an epsilon-transition. For characters which don't have an +// explicit transition, we follow the default transition, and look up the same character there. +// +// Source files +// ------------ +// kiss_thin_nfa.c (this file) - execution code. +// kiss_thin_nfa_build.c - allocation and destruction code. Contains code which is common to compilation +// and serialization/deserialization. All objects which are part of the comipled automaton are created here. +// kiss_thin_nfa_compile.c - compilation code. Contains the logic that converts a set of strings into an automaton. +// kiss_thin_nfa_analyze.c - Validation and dump. Code that reads the BNFA and tries to make sense of it. +// kiss_thin_nfa_impl.h - internal header file. APIs and definitions between the different source files. + + +// ********************* INCLUDES ************************** +#include "kiss_thin_nfa_impl.h" +#include "sasal.h" + +SASAL_START // Multiple Pattern Matcher +// Internal execution flags passed to kiss_dfa_exec_one_buf: +#define KISS_PM_EXEC_LAST_BUFF 0x00000001 // This is the last buffer (preset buffer or the last buffer in vbuf) + + +// The runtime status of the Thin NFA +struct kiss_bnfa_runtime_s { + KissThinNFA *nfa_h; // The NFA we're executing + kiss_bnfa_comp_offset_t last_bnfa_offset; // Last state reached by exec_one_buf + std::vector> *matches; // The matches we've found so far + u_int scanned_so_far; // The length of all buffers before the current buffer +}; + + +// Critical code path debugging - enabled only in debug mode. +#define THIN_NFA_TRACE_TRANS(runtime, next_off, ch, op) \ + thinnfa_debug_perf( \ + "%s: Transition by 0x%02x to %d - %s\n", \ + FILE_LINE, \ + ch, \ + kiss_bnfa_offset_decompress(next_off), \ + op \ + ) + +#define TRANSLATE_CHAR_IF_NEEED(do_char_trans, char_trans_table, ch) \ + ((u_char)((do_char_trans) ? ((char_trans_table)[ch]) : (ch))) + +// Given a match for a pattern at a given position, insert an entry to the match list. +// We may add more than one entry, depending on the number of matching patterns. +// +// Parameters: +// runtime - the current status of Thin NFA execution. +// one_buf_offset - the offset of the match, within the buffer currently scanned. +// Together with runtime->scanned_so_far we can get the real match offset. +// one_buf_len - the length of the buffer currently scanned. Used for $ processing. +// exec_flags - the flags used. +static CP_INLINE void +kiss_thin_nfa_handle_match(struct kiss_bnfa_runtime_s *runtime, u_int pat_arr_offset, + u_int one_buf_offset, u_int one_buf_len, u_int exec_flags) +{ + static const char rname[] = "kiss_thin_nfa_handle_match"; + u_int match_pos; + const kiss_thin_nfa_pattern_array_t *pat_arr; + const kiss_thin_nfa_pattern_t *curr_id; + const kiss_thin_nfa_pattern_t *pat_end; + + // Where was the match? one_buf_offset is already moved beyond the characeter that caused the match, + // so we subtract one to get this character's offset. + match_pos = runtime->scanned_so_far + (one_buf_offset - 1); + pat_arr = kiss_thin_nfa_offset_to_pat_array_ptr(runtime->nfa_h, pat_arr_offset); + // Go over the patterns and add them to the match queue. + pat_end = &(pat_arr->pattern[pat_arr->n_patterns]); + thinnfa_debug_perf(( + "%s: Going over %u patterns, starting from offset %u\n", + rname, + pat_arr->n_patterns, + pat_arr_offset + )); + for (curr_id = &(pat_arr->pattern[0]); curr_id != pat_end; curr_id++) { + thinnfa_debug(("%s: Match for pattern ID %d at %d len %d\n", rname, curr_id->id, match_pos, curr_id->len)); + + // Handle ^ - An N byte pattern at the start of the buffer would match at byte N-1. + // NOTE: If the anchored state optimization is implemented in compilation, this test isn't needed. + if ((curr_id->pattern_id_flags & KISS_PM_LSS_AT_BUF_START) && (match_pos != curr_id->len - 1)) { + thinnfa_debug_perf(("%s: Not match because of ^ %d\n", rname, curr_id->id)); + continue; + } + + // Handle $ - We must match at the buffer end, and it must be the last buffer + if ((curr_id->pattern_id_flags & KISS_PM_LSS_AT_BUF_END) && + !((one_buf_offset == one_buf_len) && (exec_flags & KISS_PM_EXEC_LAST_BUFF))) { + thinnfa_debug_perf(("%s: Not match because of $ %d\n", rname, curr_id->id)); + continue; + } + runtime->matches->emplace_back(curr_id->id, match_pos); + } + + return; +} + + +// Wrapper to kiss_thin_nfa_handle_match, gets the state offset, not the ID. +static CP_INLINE void +kiss_thin_nfa_handle_match_state(struct kiss_bnfa_runtime_s *runtime, kiss_bnfa_comp_offset_t cur_offset, + u_int one_buf_offset, u_int one_buf_len, u_int exec_flags) +{ + const kiss_bnfa_state_t *state = kiss_bnfa_comp_offset_to_state( + runtime->nfa_h->bnfa, + cur_offset, + KISS_BNFA_STATE_MATCH + ); + kiss_thin_nfa_handle_match(runtime, state->match.match_id, one_buf_offset, one_buf_len, exec_flags); +} + +// Calculate the next state's offset, given a state and character. Good for full states only. +// Faster than kiss_thin_nfa_get_next_offset. An offset peremeter is compressed 16-bit offset +// The returned offset is also compressed +static CP_INLINE kiss_bnfa_comp_offset_t +kiss_thin_nfa_get_next_offset_full(const kiss_bnfa_state_t *bnfa, kiss_bnfa_comp_offset_t offset, + unsigned char char_to_find) +{ + const kiss_bnfa_state_t *state = kiss_bnfa_comp_offset_to_state(bnfa, offset, KISS_BNFA_STATE_FULL); + return (kiss_bnfa_comp_offset_t)state->full.transitions[char_to_find]; +} + + +// Calculate the next state's offset, given a state and character. Good for partial states only. +// Also indicates whether the buffer position should be incremented (i.e. if an explicit transition was found) +static CP_INLINE kiss_bnfa_comp_offset_t +kiss_thin_nfa_get_next_offset_partial(const kiss_bnfa_state_t *bnfa, kiss_bnfa_comp_offset_t offset, + unsigned char char_to_find, BOOL *inc_pos) +{ + const kiss_bnfa_state_t *state = kiss_bnfa_comp_offset_to_state(bnfa, offset, KISS_BNFA_STATE_PARTIAL); + u_int trans_num = state->partial.trans_num; + u_int i; + + // Simple linear search is fast for a few transitions. If we have many, we use a full state anyway. + for (i = 0; i < trans_num; i++) { + const struct kiss_bnfa_partial_transition_s *tran = &state->partial.transitions[i]; + // Smaller? Keep looking. Larger? Give up (transitions are sorted). + if (tran->tran_char < char_to_find) continue; + if (tran->tran_char > char_to_find) break; + + // Found the character (explicit transition) - consume a characeter and move the automaton + *inc_pos = TRUE; + return tran->next_state_offset; + } + + // No explicit transition found - move to the fail state, without consuming a character. + *inc_pos = FALSE; + return state->partial.fail_state_offset; +} + + +// Calculate the next state's offset, when the current is a match state. +// Doesn't consume a character (epsilon transition) +static CP_INLINE kiss_bnfa_comp_offset_t +kiss_thin_nfa_get_next_offset_match(CP_MAYBE_UNUSED const kiss_bnfa_state_t *bnfa, kiss_bnfa_comp_offset_t offset) +{ + // After a match state we just move to the next consecutive state. + return offset + (sizeof(kiss_bnfa_match_state_t) / KISS_BNFA_STATE_ALIGNMENT); +} + +#define PARALLEL_SCANS_NUM 4 // 4 heads scanning the buffer +#define UNROLL_FACTOR 4 // Advance each head 4 bytes per loop + + +// Move one head of the state machine. bnfa_offset must not be a match state. +static CP_INLINE kiss_bnfa_comp_offset_t +parallel_scan_advance_one(const kiss_bnfa_state_t *bnfa, kiss_bnfa_comp_offset_t bnfa_offset, const unsigned char ch) +{ + while (bnfa_offset >= 0) { + BOOL inc_pos; + // Partial state - Look for an explicit transition, or use the fail state + bnfa_offset = kiss_thin_nfa_get_next_offset_partial(bnfa, bnfa_offset, ch, &inc_pos); + if (inc_pos) { + // Found an explicit transition - can move to the next state. + return bnfa_offset; + } + } + + // Full state (either we started with full, or the fail state chain reached one) + return kiss_thin_nfa_get_next_offset_full(bnfa, bnfa_offset, ch); +} + + +// Check if all heads are on a full state. +// If they are - advance all heads and return TRUE. +// If they aren't - do nothing and return FALSE. +static CP_INLINE BOOL +parallel_scan_advance_if_full( + const kiss_bnfa_state_t *bnfa, + kiss_bnfa_comp_offset_t *bnfa_offsets, + const unsigned char **buf_pos +) +{ + kiss_bnfa_comp_offset_t offsets_and; + + // If the bitwise AND of 4 offsets (PARALLEL_SCANS_NUM) is negative, they're all negaitve, so all states are full. + offsets_and = bnfa_offsets[0] & bnfa_offsets[1] & bnfa_offsets[2] & bnfa_offsets[3]; + if (CP_UNLIKELY(offsets_and >= 0)) return FALSE; + + // All states are full - make 4 transitions (PARALLEL_SCANS_NUM). + bnfa_offsets[0] = kiss_thin_nfa_get_next_offset_full(bnfa, bnfa_offsets[0], *(buf_pos[0])); + buf_pos[0]++; + bnfa_offsets[1] = kiss_thin_nfa_get_next_offset_full(bnfa, bnfa_offsets[1], *(buf_pos[1])); + buf_pos[1]++; + bnfa_offsets[2] = kiss_thin_nfa_get_next_offset_full(bnfa, bnfa_offsets[2], *(buf_pos[2])); + buf_pos[2]++; + bnfa_offsets[3] = kiss_thin_nfa_get_next_offset_full(bnfa, bnfa_offsets[3], *(buf_pos[3])); + buf_pos[3]++; + + return TRUE; +} + + +// Repeat parallel_scan_advance_if_full up to 4 times (UNROLL_FACTOR). +// Retrurn TRUE if all 4 were done, FALSE if stopped earlier. +static CP_INLINE BOOL +parallel_scan_advance_if_full_unroll( + const kiss_bnfa_state_t *bnfa, + kiss_bnfa_comp_offset_t *bnfa_offsets, + const unsigned char **buf_pos +) +{ + if (!parallel_scan_advance_if_full(bnfa, bnfa_offsets, buf_pos)) return FALSE; + if (!parallel_scan_advance_if_full(bnfa, bnfa_offsets, buf_pos)) return FALSE; + if (!parallel_scan_advance_if_full(bnfa, bnfa_offsets, buf_pos)) return FALSE; + if (!parallel_scan_advance_if_full(bnfa, bnfa_offsets, buf_pos)) return FALSE; + return TRUE; +} + + +// Find the offset where each head should start and stop +static void +calc_head_buf_range(const u_char *buffer, u_int len, const u_char **head_start_pos, const u_char **head_end_pos) +{ + static const char rname[] = "calc_head_buf_range"; + const u_char *orig_buf = buffer; + u_int len_per_head = len / PARALLEL_SCANS_NUM; + u_int rem = len % PARALLEL_SCANS_NUM; + u_int i; + + for (i=0; i= PARALLEL_SCANS_NUM-rem) head_len++; + head_start_pos[i] = buffer; + buffer += head_len; + head_end_pos[i] = buffer; + thinnfa_debug(("%s: Head %u gets range %ld:%ld\n", rname, + i, head_start_pos[i]-orig_buf, head_end_pos[i]-orig_buf)); + } +} + +// Set the initial BNFA offset for each head +static void +set_head_bnfa_offset( + struct kiss_bnfa_runtime_s *runtime, + kiss_bnfa_comp_offset_t *bnfa_pos, + const u_char **buf_pos, + const u_char *buffer +) +{ + const KissThinNFA *nfa_h = runtime->nfa_h; + kiss_bnfa_comp_offset_t init_off = kiss_bnfa_offset_compress(nfa_h->min_bnfa_offset); + u_int i; + + if (nfa_h->flags & KISS_THIN_NFA_HAS_ANCHOR) { + // Start from the root (next full state after the anchored root) + init_off++; + } + + // Heads that scan from the beginning of the buffer, will start at previous buffer's ending state. + // The rest start anew. + // Several scanning heads will start at buffer's beginning when buffer's size is less than PARALLEL_SCANS_NUM + for (i=0; ilast_bnfa_offset; + } else { + bnfa_pos[i] = init_off; + } + } +} + + +// Run Thin NFA parallely on a single buffer. +static CP_INLINE void +kiss_thin_nfa_exec_one_buf_parallel_ex( + struct kiss_bnfa_runtime_s *runtime, + const u_char *buffer, + u_int len, u_int flags, + BOOL do_char_trans, + u_char *char_trans_table +) +{ + const kiss_bnfa_state_t *bnfa = runtime->nfa_h->bnfa; + const unsigned char *end, *buf_pos[PARALLEL_SCANS_NUM], *head_end_pos[PARALLEL_SCANS_NUM]; + kiss_bnfa_comp_offset_t bnfa_offset[PARALLEL_SCANS_NUM]; + u_int i; + u_int overlap_bytes; + int overlap_head_mask; + + // set starting position, ending position and state for each scanning head + calc_head_buf_range(buffer, len, buf_pos, head_end_pos); + set_head_bnfa_offset(runtime, bnfa_offset, buf_pos, buffer); + + end = buffer + len; + + // unroll 16 (PARALLEL_SCANS_NUM * UNROLL_FACTOR) times, while we have at least 4 input bytes to process. + while (buf_pos[PARALLEL_SCANS_NUM-1] + UNROLL_FACTOR <= end) { + // Fastpath - Advance all heads up to 4 chars, as long as they're all on a full state. + if (CP_LIKELY(parallel_scan_advance_if_full_unroll(bnfa, bnfa_offset, buf_pos))) continue; + + // At least one head is on partial or match - advance all 4 by their type. + for (i=0; i= head_end_pos[i]) continue; + if (kiss_bnfa_state_type(bnfa, bnfa_offset[i]) == KISS_BNFA_STATE_MATCH) { + // Handle a match + kiss_thin_nfa_handle_match_state(runtime, bnfa_offset[i], (u_int)(buf_pos[i] - buffer), len, flags); + bnfa_offset[i] = kiss_thin_nfa_get_next_offset_match(bnfa, bnfa_offset[i]); + } + // Advance to the next state + bnfa_offset[i] = parallel_scan_advance_one(bnfa, bnfa_offset[i], + TRANSLATE_CHAR_IF_NEEED(do_char_trans, char_trans_table, *(buf_pos[i]))); + (buf_pos[i])++; + } + } + + // Handle overlap - advance all heads into the next head's range, as long as there's a chance + // for a match which started in this head's range. + overlap_head_mask = (1<<(PARALLEL_SCANS_NUM-1))-1; // All heads except the last + for (overlap_bytes = 0; overlap_head_mask!=0; overlap_bytes++) { + // Advance each head (except the last) as long as overlap is needed for it + for (i=0; infa_h, bnfa_offset[i]); + if ((state_depth <= overlap_bytes) || (buf_pos[i] >= end)) { + overlap_head_mask &= ~my_mask; + continue; + } + + // Advance the state machine, including match handling + if (kiss_bnfa_state_type(bnfa, bnfa_offset[i]) == KISS_BNFA_STATE_MATCH) { + // Handle a match + kiss_thin_nfa_handle_match_state(runtime, bnfa_offset[i], (u_int)(buf_pos[i] - buffer), len, flags); + bnfa_offset[i] = kiss_thin_nfa_get_next_offset_match(bnfa, bnfa_offset[i]); + } + // Advance to the next state + bnfa_offset[i] = parallel_scan_advance_one(bnfa, bnfa_offset[i], + TRANSLATE_CHAR_IF_NEEED(do_char_trans, char_trans_table, *(buf_pos[i]))); + (buf_pos[i])++; + } + } + + // We may have stopped on a match state. If so - handle and advance + for (i=0; ilast_bnfa_offset = bnfa_offset[i]; + break; + } + } + + return; +} + + +// Execute a thin NFA on a buffer. +// Parameters: +// nfa_h - the NFA handle +// buf - a buffer to scan. +// matches - output - will be filled with a kiss_pmglob_match_data element for each match. +void +kiss_thin_nfa_exec(KissThinNFA *nfa_h, const Buffer& buf, std::vector> &matches) +{ + struct kiss_bnfa_runtime_s bnfa_runtime; + + dbgAssert(nfa_h != nullptr) << "kiss_thin_nfa_exec() was called with null handle"; + + if (buf.size() == 0) { + return; + } + + // Set the runtime status structure + bnfa_runtime.nfa_h = nfa_h; + bnfa_runtime.last_bnfa_offset = kiss_bnfa_offset_compress(nfa_h->min_bnfa_offset); // The initial state + bnfa_runtime.matches = &matches; + bnfa_runtime.scanned_so_far = 0; + + auto segments = buf.segRange(); + for( auto iter = segments.begin(); iter != segments.end(); iter++ ) { + const u_char * data = iter->data(); + u_int len = iter->size(); + u_int flags = ((iter+1)==segments.end()) ? KISS_PM_EXEC_LAST_BUFF : 0; + if (nfa_h->flags & KISS_THIN_NFA_USE_CHAR_XLATION) { + kiss_thin_nfa_exec_one_buf_parallel_ex(&bnfa_runtime, data, len, flags, TRUE, nfa_h->xlation_tab); + } else { + kiss_thin_nfa_exec_one_buf_parallel_ex(&bnfa_runtime, data, len, flags, FALSE, nullptr); + } + bnfa_runtime.scanned_so_far += len; + } + + return; +} +SASAL_END diff --git a/components/utils/pm/kiss_thin_nfa_analyze.cc b/components/utils/pm/kiss_thin_nfa_analyze.cc new file mode 100644 index 0000000..d2c5046 --- /dev/null +++ b/components/utils/pm/kiss_thin_nfa_analyze.cc @@ -0,0 +1,1499 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include "pm_adaptor.h" +#include "kiss_thin_nfa_impl.h" +#include "kiss_hash.h" +#include "sasal.h" + +SASAL_START // Multiple Pattern Matcher +#define hash_t kiss_hash_t +#define hash_intcreate_ex(sz, kmem_flags) \ + kiss_hash_create(sz, (hkeyfunc_t)kiss_hash_intvalue, (hcmpfunc_t)kiss_hash_intcmp, NULL) +#define hash_lookup kiss_hash_lookup +#define hash_insert kiss_hash_insert +#define hash_find_hashent kiss_hash_find_hashent +#define hashent kiss_hashent +#define hash_iterator_create kiss_hash_iterator_create +#define hash_iterator_get_hashent kiss_hash_iterator_get_hashent +#define hash_iterator kiss_hash_iterator +#define hash_iterator_next_ent kiss_hash_iterator_next_ent +#define hash_iterator_destroy kiss_hash_iterator_destroy +#define hash_destroy kiss_hash_destroy + +// Thin NFA validation code +// ------------------------ +// We validate the following things: +// 1. For each state: +// a. That it's within the BNFA bounds. +// b. If matching, the pattern offset is valid. +// 2. For each transition: +// a. That it points to a valid state. +// b. For partial states, that the normal transitions point down the tree, and fail states point up. +// 3. For the entire tree, that all states are in it. +// 4. The pattern array is valid, and all offsets are used at least once. +typedef enum { + VALIDATION_STAT_FLAG_NONE = 0x00, + VALIDATION_STATE_IS_JUMP = 0x01, + VALIDATION_STATE_IS_ANCHORED = 0x02, + VALIDATION_STATE_BNFA_ONLY = 0x04, // Match/jump states, not a real part of the tree +} validation_state_flags_t; + + +// Information we keep about a state while validating the Thin NFA +struct state_validation_data_s { + kiss_bnfa_offset_t bnfa_offset; + int level; + struct state_validation_data_s *next; + struct state_validation_data_s *parent_state; // First arrived here from this state + u_char trans_char; // First arrived here by transition using this character + kiss_bnfa_state_type_t type; + validation_state_flags_t flags; + struct thinnfa_validation_status_s *validation; // The global validation data +}; + + +// In which direction do we expect a transition to be? +typedef enum { + TRANS_DIRECTION_BACK, // Fail state - must be a lower tier + TRANS_DIRECTION_FAIL_ONLY, // Fail state of a state with not transition - leaf or jump state + TRANS_DIRECTION_FORWARD, // Partial state transition - must be a higher tier + TRANS_DIRECTION_ANY // Full state trnsition - we can't tell +} transition_direction_t; + + +struct thinnfa_validation_status_s { + const KissThinNFA *nfa; // The NFA being validated + struct state_validation_data_s *state_data; // Validation info per state + struct state_validation_data_s *root, *anchored_root; // Interesting special states + u_int state_num; // States we found in the automaton + hash_t offset_to_data; // BNFA offset -> &state_data[i] + struct state_validation_data_s *q_head, *q_tail; // Queue for BFS scan + hash_t pat_array_offset_ref_count; // Offset in the pattern array -> + // number of finite states using it +}; + + +// Callbacks for PM dump +typedef enum { + THIN_NFA_DUMP_FLAGS_NONE = 0x00, + THIN_NFA_DUMP_SKIP_ROOT_TRANS = 0x01, +} thin_nfa_dump_flags_t; + +// Callbacks provided by different dump formats +typedef struct { + void (*start_cb) (const struct thinnfa_validation_status_s *validation); + void (*state_start_cb) (const struct state_validation_data_s *state_data); + void (*transition_cb) (const struct state_validation_data_s *from_state, + u_char tran_char, + kiss_bnfa_offset_t next_state_off); + void (*state_end_cb) (const struct state_validation_data_s *state_data); + void (*end_cb) (const struct thinnfa_validation_status_s *validation); + thin_nfa_dump_flags_t flags; +} thin_nfa_dump_cbs_t; + +// Change name printing to compensate for annoying Wiki behavior with backslashes: +// "\\\x" is printed as "\x". +// There's no safe way to print a single backslash: "\\" is nothing, "\\\\" is "\\". +// You can try "\\\", which works, unless it's at the end of the string. +static int doing_wiki_dump = 0; + +// Change name printing to avoid various chars which confuse Excel in CSVs +static int doing_csv_dump = 0; + +static int +is_csv_printable(u_char c) +{ + return !( c == '\\' || c == ',' || c == '\'' || c=='"' || c=='=' || c==' ' || c=='+' || c=='-'); +} + + +// Get a printable representation of a character, suitable for inclusion in double quotes. +static const char * +char_to_printable(u_char ch) +{ + static char buf[8]; + + if (!isprint(ch) || ch==' ' || (doing_wiki_dump && ch=='\\') || (doing_csv_dump && !is_csv_printable(ch))) { + // Print the hex value if not printable + snprintf(buf, sizeof(buf), "%sx%02X", doing_wiki_dump ? "\\\\\\" : "\\", ch); + } else if (ch == '"' || ch == '\\') { + // Escape " and \ so they will behave nicely in a double-quoted string + snprintf(buf, sizeof(buf), "\\%c", ch); + } else { + // Just print the character + snprintf(buf, sizeof(buf), "%c", ch); + } + return buf; +} + + +static struct state_validation_data_s * +thin_nfa_validation_offset_to_state(struct thinnfa_validation_status_s *validation, kiss_bnfa_offset_t bnfa_offset) +{ + return (struct state_validation_data_s *)hash_lookup(validation->offset_to_data, (void *)(intptr_t)bnfa_offset); +} + +#define NUM_NAME_BUFS 3 +#define NAME_BUF_LEN 50 + +// Generate a nice printable name for the state +static const char * +state_name(const struct state_validation_data_s *state) +{ + struct thinnfa_validation_status_s *validation = state->validation; + static char name_bufs[NAME_BUF_LEN][NUM_NAME_BUFS]; + static int cur_name_buf = 0; + char *name_buf; + char *p; + const struct state_validation_data_s *tmp_state; + + // Special cases + if (state == validation->root) return "ROOT"; + if (state == validation->anchored_root) return "^ROOT"; + + // Follow transitions backwards, build the name + name_buf = name_bufs[cur_name_buf]; + cur_name_buf = (cur_name_buf+1)%NUM_NAME_BUFS; + p=&name_buf[NAME_BUF_LEN-1]; + *p = '\0'; + + if (!doing_csv_dump && state->flags&VALIDATION_STATE_BNFA_ONLY) { + // Mark matching and jump states with a suffix. + p--; + *p = (state->flags&VALIDATION_STATE_IS_JUMP) ? '#' : '*'; + } + + // Follow transitions backwards, build the name + for (tmp_state=state; tmp_state!=validation->root; tmp_state=tmp_state->parent_state) { + const char *ctext; + if (tmp_state->parent_state == NULL) { + // Possible if we haven't iterated all states yet. Just give the BNFA offset + snprintf(name_buf, NAME_BUF_LEN, "STATE_%d", state->bnfa_offset); + return name_buf; + } + + if (tmp_state->parent_state->flags&VALIDATION_STATE_BNFA_ONLY) { + // The characters are reported in real states only + continue; + } + + // Add the transition character to the name. Make sure not to add \0. + ctext = char_to_printable(tmp_state->trans_char); + if (p < name_buf+strlen(ctext)) break; + p -= strlen(ctext); + bcopy(ctext, p, strlen(ctext)); + } + + if (tmp_state != validation->root) { + // Didn't go back to the root - add ? prefix + if (p > name_buf) p--; + *p = '?'; + } + return p; +} + + +// Return a state's epsilon transition, or KISS_BNFA_OFFSET_INVALID if none. +static kiss_bnfa_offset_t +validation_state_epsilon_trans(const struct state_validation_data_s *state_data) +{ + switch (state_data->type) { + case KISS_BNFA_STATE_PARTIAL: { + const kiss_bnfa_state_t *state = kiss_bnfa_offset_to_state(state_data->validation->nfa->bnfa, + state_data->bnfa_offset); + return kiss_bnfa_offset_decompress(state->partial.fail_state_offset); + } + case KISS_BNFA_STATE_MATCH: + return state_data->bnfa_offset + sizeof(kiss_bnfa_match_state_t); + + case KISS_BNFA_STATE_FULL: + default: + return KISS_BNFA_OFFSET_INVALID; + } +} + + +// How many outgoing transitions do we have? +static u_int +validation_state_trans_num(const struct state_validation_data_s *state_data) +{ + switch (state_data->type) { + case KISS_BNFA_STATE_FULL: return KISS_PM_ALPHABET_SIZE; + case KISS_BNFA_STATE_MATCH: return 0; + case KISS_BNFA_STATE_PARTIAL: { + const kiss_bnfa_state_t *state = kiss_bnfa_offset_to_state(state_data->validation->nfa->bnfa, + state_data->bnfa_offset); + return state->partial.trans_num; + } + + case KISS_BNFA_STATE_TYPE_NUM: return 0; + } + return 0; +} + + +static void +thin_nfa_validation_queue_enq(struct state_validation_data_s *item) +{ + struct thinnfa_validation_status_s *validation = item->validation; + // Add at the tail. Set the existing tail (if any) or head (if not) to point to the new item. + if (validation->q_tail != NULL) { + validation->q_tail->next = item; + } else { + validation->q_head = item; + } + validation->q_tail = item; + item->next = NULL; +} + + +static void +thin_nfa_validation_queue_enq_head(struct state_validation_data_s *item) +{ + struct thinnfa_validation_status_s *validation = item->validation; + item->next = validation->q_head; + validation->q_head = item; + if (validation->q_tail==NULL) { + validation->q_tail = item; + } +} + + +static struct state_validation_data_s * +thin_nfa_validation_queue_deq(struct thinnfa_validation_status_s *validation) +{ + struct state_validation_data_s *item; + + // Remove from the head + item = validation->q_head; + if (!item) return NULL; + + validation->q_head = item->next; + if (validation->q_head == NULL) { + // Removed last + validation->q_tail = NULL; + } + item->next = NULL; + return item; +} + + +// Is a state within the BNFA boundaries? +static kiss_ret_val +thin_nfa_validate_offset_in_range(const KissThinNFA *nfa_handle, kiss_bnfa_offset_t bnfa_offset, u_int state_size, + const char *caller, const char *msg) +{ + if ((bnfa_offset >= nfa_handle->min_bnfa_offset) && (bnfa_offset+(int)state_size <= nfa_handle->max_bnfa_offset)) { + return KISS_OK; + } + thinnfa_debug_critical(("%s: State at BNFA offset %d %s %d - out of range (%d:%d)\n", + caller, bnfa_offset, + msg, state_size, nfa_handle->min_bnfa_offset, nfa_handle->max_bnfa_offset)); + return KISS_ERROR; +} + + +// Validate the state, which is at a given BNFA offset, is within the BNFA boundaries +static kiss_ret_val +thin_nfa_validate_state_in_range(const KissThinNFA *nfa_handle, kiss_bnfa_offset_t bnfa_offset, u_int *state_size_p) +{ + static const char rname[] = "thin_nfa_validate_state_in_range"; + u_int state_size; + + // See that the basic header fits, so we don't read outside the BNFA + if (thin_nfa_validate_offset_in_range(nfa_handle, bnfa_offset, sizeof(kiss_bnfa_minimal_state_t), + rname, "header") != KISS_OK) { + return KISS_ERROR; + } + + // Find the state's real size + state_size = kiss_bnfa_state_size(nfa_handle->bnfa, bnfa_offset); + + // See that the whole state fits in + if (thin_nfa_validate_offset_in_range(nfa_handle, bnfa_offset, state_size, rname, "size") != KISS_OK) { + return KISS_ERROR; + } + + *state_size_p = state_size; + return KISS_OK; +} + + +// Find the root and anchored root states +static kiss_ret_val +thin_nfa_validation_find_root(struct thinnfa_validation_status_s *validation) +{ + static const char rname[] = "thin_nfa_validation_find_root"; + kiss_bnfa_offset_t init_offset = validation->nfa->min_bnfa_offset; + struct state_validation_data_s *initial; + + initial = thin_nfa_validation_offset_to_state(validation, init_offset); + if (!initial) { + thinnfa_debug_critical(("%s: Initial state (offset %d) not found\n", rname, init_offset)); + return KISS_ERROR; + } + + if (validation->nfa->flags & KISS_THIN_NFA_HAS_ANCHOR) { + // The initial is the anchored root, the real root immediatey follows + kiss_bnfa_offset_t root_offset = init_offset + sizeof(kiss_bnfa_full_state_t); + struct state_validation_data_s *root = thin_nfa_validation_offset_to_state(validation, root_offset); + if (!root) { + thinnfa_debug_critical(("%s: Failed to find root (offset %u)\n", rname, root_offset)); + return KISS_ERROR; + } + validation->root = root; + validation->anchored_root = initial; + } else { + // No anchored root, the root is initial + validation->root = initial; + validation->anchored_root = NULL; + } + + thinnfa_debug(("%s: BNFA at %p, root %p anchored root %p\n", rname, validation->nfa->bnfa, + validation->nfa->bnfa + validation->root->bnfa_offset, + validation->anchored_root ? validation->nfa->bnfa + validation->anchored_root->bnfa_offset : NULL)); + + return KISS_OK; +} + + +// Set validation->state_num (so we can allocate validation data) +static kiss_ret_val +thin_nfa_validation_count_states(struct thinnfa_validation_status_s *validation) +{ + static const char rname[] = "thin_nfa_validation_count_states"; + kiss_bnfa_offset_t bnfa_offset; + + bnfa_offset = validation->nfa->min_bnfa_offset; + validation->state_num = 0; + while (bnfa_offset < validation->nfa->max_bnfa_offset) { + u_int state_size; + if (thin_nfa_validate_state_in_range(validation->nfa, bnfa_offset, &state_size) != KISS_OK) return KISS_ERROR; + validation->state_num++; + bnfa_offset += state_size; + } + + thinnfa_debug(("%s: Found %d states\n", rname, validation->state_num)); + + return KISS_OK; +} + + +// Go over all states and fill in validation data structure. +// Doesn't fill in the level - saved for a later BFS iteration. +static kiss_ret_val +thin_nfa_validation_find_states(struct thinnfa_validation_status_s *validation) +{ + static const char rname[] = "thin_nfa_validation_find_states"; + u_int i; + kiss_bnfa_offset_t bnfa_offset; + u_int states_added = 0; + + thinnfa_debug(("%s: Validating %p\n", rname, validation->nfa)); + + bnfa_offset = validation->nfa->min_bnfa_offset; + for (i = 0; i < validation->state_num; i++) { + struct state_validation_data_s *state_data; + const kiss_bnfa_state_t *state; + kiss_bnfa_state_type_t type; + u_int state_size; + u_int req_alignment; + + // See that the state fits in the BNFA + if (thin_nfa_validate_state_in_range(validation->nfa, bnfa_offset, &state_size) != KISS_OK) return KISS_ERROR; + type = kiss_bnfa_state_type(validation->nfa->bnfa, kiss_bnfa_offset_compress(bnfa_offset)); + state = kiss_bnfa_offset_to_state(validation->nfa->bnfa, bnfa_offset); + + thinnfa_debug_extended(("%s: State %u offset %d type %d size %d\n", rname, i, bnfa_offset, type, state_size)); + if (type == KISS_BNFA_STATE_MATCH) { + thinnfa_debug_extended(("%s: pattern array offset %u\n", rname, state->match.match_id)); + } + + // Verify that the offset and type agree + switch (type) { + case KISS_BNFA_STATE_FULL: + if (bnfa_offset >= 0) { + thinnfa_debug_critical(("%s: Full state at positive offset %d\n", rname, bnfa_offset)); + return KISS_ERROR; + } + req_alignment = sizeof(kiss_bnfa_full_state_t); + break; + + case KISS_BNFA_STATE_MATCH: + case KISS_BNFA_STATE_PARTIAL: + if (bnfa_offset < 0) { + // Can't really happen, because kiss_bnfa_state_type would return KISS_BNFA_STATE_FULL. + thinnfa_debug_critical(("%s: State type %d at negative offset %d\n", rname, type, bnfa_offset)); + return KISS_ERROR; + } + req_alignment = KISS_BNFA_STATE_ALIGNMENT; + break; + + default: + thinnfa_debug_critical(("%s: Invalid state type at offset %d - %d\n", rname, bnfa_offset, type)); + return KISS_ERROR; + } + + // Verify that the offset is properly aligned + if ((bnfa_offset % req_alignment) != 0) { + thinnfa_debug_critical(("%s: State offset %d - type %d but not on %d boundary\n", rname, + bnfa_offset, type, req_alignment)); + return KISS_ERROR; + } + + // OK - remember the state and advance the offset + state_data = &validation->state_data[states_added]; + states_added++; + + state_data->bnfa_offset = bnfa_offset; + state_data->next = NULL; + state_data->level = -1; // Indicating not visited. We'll calculate it during BFS traversal. + state_data->parent_state = NULL; // No parent, yet (will stay this way for the root) + state_data->trans_char = '\0'; // Meaningless when there's no parent_state + state_data->type = type; + state_data->flags = VALIDATION_STAT_FLAG_NONE; + state_data->validation = validation; + + if (type == KISS_BNFA_STATE_MATCH) { + ENUM_SET_FLAG(state_data->flags, VALIDATION_STATE_BNFA_ONLY); + } + + if (hash_insert(validation->offset_to_data, (void *)(intptr_t)bnfa_offset, state_data) == 0) { + // XXX: Failing verification on memory allocation error. Not nice. + // Can first build the hash, without any verifications, and only then verify. + thinnfa_debug_critical(("%s: validation hash insert %d->%p failed\n", rname, bnfa_offset, state_data)); + return KISS_ERROR; + } + bnfa_offset += state_size; + } + + if (bnfa_offset != validation->nfa->max_bnfa_offset) { + thinnfa_debug_critical(("%s: Found %d of %d states, reached offset %d, not %d\n", rname, + states_added, validation->state_num, bnfa_offset, validation->nfa->max_bnfa_offset)); + return KISS_ERROR; + } + + // Set pointers to root states + if (thin_nfa_validation_find_root(validation) != KISS_OK) return KISS_ERROR; + + return KISS_OK; +} + + +// Follow a transition, by adding the next state to the scan list, if it wasn't added yet. +static void +thin_nfa_validation_add_next_state( + struct state_validation_data_s *from_state_data, + struct state_validation_data_s *next_state_data, + u_char trans_char +) +{ + const KissThinNFA *nfa; + int inc_level; + + if (next_state_data->level >= 0) { + // We've already seen this state + return; + } + + if (from_state_data->flags&VALIDATION_STATE_BNFA_ONLY) { + // A matching state and the following real state are on the same level. + // Using the mathing state's incoming transition char makes the state name end up nice. + inc_level = 0; + trans_char = from_state_data->trans_char; + } else { + inc_level = 1; + } + + if (from_state_data->flags & VALIDATION_STATE_IS_ANCHORED) { + ENUM_SET_FLAG(next_state_data->flags, VALIDATION_STATE_IS_ANCHORED); + } + + nfa = from_state_data->validation->nfa; + if (nfa->flags & KISS_THIN_NFA_USE_CHAR_XLATION) { + // Use the canonic character. Without it, states pointed from partial state get the lowercase char, + // but states pointed from full states get the uppercase (because it's first) + trans_char = nfa->xlation_tab[trans_char]; + } + + // Calculate the level and enqueue + next_state_data->level = from_state_data->level + inc_level; + next_state_data->parent_state = from_state_data; + next_state_data->trans_char = trans_char; + if (inc_level) { + thin_nfa_validation_queue_enq(next_state_data); + } else { + //We want this one to be scanned immediately. + thin_nfa_validation_queue_enq_head(next_state_data); + } +} + + +// Check that a character's transition is to a valid state. +// Returns the next state's data, or NULL if invalid. +static kiss_ret_val +thin_nfa_validation_add_transition(struct state_validation_data_s *prev_state_data, + u_int trans_char, kiss_bnfa_offset_t next_state_offset, transition_direction_t expected_dir) +{ + static const char rname[] = "thin_nfa_validation_add_transition"; + struct state_validation_data_s *next_state_data; + const char *err_msg = NULL; + + // See that there's a state at the target offset + next_state_data = thin_nfa_validation_offset_to_state(prev_state_data->validation, next_state_offset); + if (!next_state_data) { + thinnfa_debug_critical(( + "%s: Transition from '%s' by %02x expected direction %d -> BNFA offset %d - no such state", + rname, + state_name(prev_state_data), trans_char, expected_dir, next_state_offset + )); + return KISS_ERROR; + } + + // Check that transitions are in the direction we expect + switch (expected_dir) { + case TRANS_DIRECTION_FORWARD: + // Partial state explicit transition - must point to a state we've never seen before + if (next_state_data->level >= 0) { + err_msg = "must be a new fail state"; + } + break; + + case TRANS_DIRECTION_BACK: + // Fail state transition - must point to a state we've already seen, on a lower level + if (next_state_data->level < 0) { + err_msg = "transition to an unknown state"; + } else if (next_state_data->level >= prev_state_data->level) { + err_msg = "transition to a higher level"; + } else if (next_state_data->type == KISS_BNFA_STATE_MATCH) { + err_msg = "transition to match the state"; + } + break; + + case TRANS_DIRECTION_FAIL_ONLY: + // Fail state of a state with no transitions. Can be either: + // Leaf state - we expect a transition to a known lower-level state. + // Jump state - we expect a transition to a new full state. + if (next_state_data->level < 0) { + // Jump state. Remember this, so we won't increment the next state's level + ENUM_SET_FLAG(prev_state_data->flags, VALIDATION_STATE_IS_JUMP); + ENUM_SET_FLAG(prev_state_data->flags, VALIDATION_STATE_BNFA_ONLY); + if (next_state_offset >= 0) { + // Jump states are meant to jump to full states. + err_msg = "Jump state to partial"; + } + } else { + // Leaf state + if (next_state_data->level >= prev_state_data->level) { + // A state's fail state must be at a lower level + err_msg = "transition to a level higher than the leaf state"; + } + } + if (err_msg==NULL && next_state_data->type == KISS_BNFA_STATE_MATCH) { + err_msg = "transition to match the leaf state"; + } + break; + + case TRANS_DIRECTION_ANY: + // Full state transition - can point anywhere + break; + } + + if (err_msg != NULL) { + thinnfa_debug_critical(( + "%s: Transition from '%s' by %02x expected dir %d -> '%s', levels %d -> %d, %s\n", + rname, + state_name(prev_state_data), + trans_char, + expected_dir, + state_name(next_state_data), + prev_state_data->level, + next_state_data->level, + err_msg + )); + return KISS_ERROR; + } + + // Add the next state to the tree + thin_nfa_validation_add_next_state(prev_state_data, next_state_data, (u_char)trans_char); + + return KISS_OK; +} + + +// Do a BFS scan of the tree and check transitions +static kiss_ret_val +thin_nfa_validation_scan_tree(struct thinnfa_validation_status_s *validation) +{ + static const char rname[] = "thin_nfa_validation_scan_tree"; + kiss_ret_val ret; + + // Initialize scan list with the root. The list contains all states, whose level + // was already calculated, but whose children were not examined yet. + validation->q_head = validation->q_tail = NULL; + validation->root->level = 0; + thin_nfa_validation_queue_enq(validation->root); + + if (validation->anchored_root) { + // ^ROOT behaves like ROOT's child, setting level 1 makes fail transitions to normal tree OK + validation->anchored_root->level = 1; + validation->anchored_root->parent_state = validation->root; + validation->anchored_root->trans_char = '^'; // Makes printing the name nice + ENUM_SET_FLAG(validation->anchored_root->flags, VALIDATION_STATE_IS_ANCHORED); + thin_nfa_validation_queue_enq(validation->anchored_root); + } + + // No errors yet + ret = KISS_OK; + + while (1) { + struct state_validation_data_s *state_data; + const kiss_bnfa_state_t *state; + + // Remove an element from the list + state_data = thin_nfa_validation_queue_deq(validation); + if (!state_data) break; + + state = kiss_bnfa_offset_to_state(validation->nfa->bnfa, state_data->bnfa_offset); + + switch (state_data->type) { + case KISS_BNFA_STATE_PARTIAL: { + // Partial State + const kiss_bnfa_partial_state_t *p_state = &state->partial; + u_int i; + + // Check the fail state (tran_char=0 is because its meaningless) + if (thin_nfa_validation_add_transition(state_data, 0, validation_state_epsilon_trans(state_data), + p_state->trans_num==0 ? TRANS_DIRECTION_FAIL_ONLY : TRANS_DIRECTION_BACK) != KISS_OK) { + ret = KISS_ERROR; + } + + // Go over the transitions to all included characters + for (i=0; itrans_num; i++) { + // Verify that the transition list is sorted. + // Actually, we removed binary search so it no longer matters. + if (i>0 && (p_state->transitions[i].tran_char <= p_state->transitions[i-1].tran_char)) { + thinnfa_debug_critical(( + "%s: Transitions from state %s not sorted - %02x after %02x\n", + rname, + state_name(state_data), + p_state->transitions[i].tran_char, + p_state->transitions[i-1].tran_char + )); + ret = KISS_ERROR; + } + + // Verify that the transition points to a valid offset + if (thin_nfa_validation_add_transition(state_data, p_state->transitions[i].tran_char, + kiss_bnfa_offset_decompress(p_state->transitions[i].next_state_offset), + TRANS_DIRECTION_FORWARD) != KISS_OK) { + ret = KISS_ERROR; + } + } + + break; + } + + case KISS_BNFA_STATE_FULL: { + // Full state + u_int i; + + // Go over the transitions to all characters + for (i=0; ifull.transitions[i]), TRANS_DIRECTION_ANY) != KISS_OK) { + ret = KISS_ERROR; + } + } + break; + } + + case KISS_BNFA_STATE_MATCH: + // Add an implicit transition to the next state + if (thin_nfa_validation_add_transition(state_data, 0, + validation_state_epsilon_trans(state_data), TRANS_DIRECTION_FORWARD) != KISS_OK) { + ret = KISS_ERROR; + } + break; + + default: + // Can't really happen - checked already in thin_nfa_validation_find_states. + thinnfa_debug_critical(( + "%s: State %s has invalid type %d\n", + rname, + state_name(state_data), + state_data->type + )); + ret = KISS_ERROR; + break; + } + } + + return ret; +} + + +// See if there are states in the BNFA which were never visited +kiss_ret_val +thin_nfa_validation_unvisited_states(struct thinnfa_validation_status_s *validation) +{ + static const char rname[] = "thin_nfa_validation_unvisited_states"; + u_int i; + kiss_ret_val ret = KISS_OK; + + for (i=0; istate_num; i++) { + struct state_validation_data_s *state_data = &validation->state_data[i]; + if (state_data->level < 0) { + thinnfa_debug_critical(("%s: State %s never visited\n", rname, state_name(state_data))); + ret = KISS_ERROR; + } + } + return ret; +} + + +// Verify that pattern arrays buffer is self-consistant, and insert offsets into hash +static kiss_ret_val +thin_nfa_validation_check_pattern_arrays(struct thinnfa_validation_status_s *validation) +{ + static const char rname[] = "thin_nfa_validation_check_pattern_arrays"; + u_int pat_arr_offset; + + if (!validation->nfa->pattern_arrays || !validation->nfa->pattern_arrays_size) { + thinnfa_debug_critical(("%s: NULL pattern array (%p) or 0 length (%u)\n", rname, + validation->nfa->pattern_arrays, validation->nfa->pattern_arrays_size)); + return KISS_ERROR; + } + + pat_arr_offset = 0; + while (pat_arr_offset < validation->nfa->pattern_arrays_size) { + const kiss_thin_nfa_pattern_array_t *pat_arr; + if (!hash_insert(validation->pat_array_offset_ref_count, (void *)(intptr_t)pat_arr_offset, (void *)0)) { + thinnfa_debug_critical(("%s: failed to insert value into hash\n", rname)); + return KISS_ERROR; + } + pat_arr = kiss_thin_nfa_offset_to_pat_array_ptr(validation->nfa, pat_arr_offset); + if (pat_arr->n_patterns == 0) { + thinnfa_debug_critical(( + "%s: encounterd a pat array with 0 pattern at offset %u\n", + rname, + pat_arr_offset + )); + return KISS_ERROR; + } + pat_arr_offset += kiss_thin_nfa_pattern_array_size(pat_arr->n_patterns); + } + + if (pat_arr_offset != validation->nfa->pattern_arrays_size) { + thinnfa_debug_critical(("%s: pat_arr_offset (%u) is past total size (%u)\n", rname, + pat_arr_offset, validation->nfa->pattern_arrays_size)); + return KISS_ERROR; + } + + return KISS_OK; +} + + +// Verify that all match states point to a valid offset and increase ref count +static kiss_ret_val +thin_nfa_validation_check_match_states(const struct thinnfa_validation_status_s *validation) +{ + static const char rname[] = "thin_nfa_validation_check_match_states"; + kiss_ret_val ret = KISS_OK; + u_int i; + + for (i=0; i < validation->state_num; i++) { + struct state_validation_data_s *state_data = &validation->state_data[i]; + const kiss_bnfa_state_t *state; + u_int pat_arr_offset; + struct hashent **he; + + if (state_data->type != KISS_BNFA_STATE_MATCH) continue; + + state = kiss_bnfa_offset_to_state(validation->nfa->bnfa, state_data->bnfa_offset); + pat_arr_offset = state->match.match_id; + thinnfa_debug_extended(("%s: Found matching state %s pattern offset %u\n", rname, + state_name(state_data), pat_arr_offset)); + + he = hash_find_hashent(validation->pat_array_offset_ref_count, (void *)(intptr_t)pat_arr_offset); + if (he && *he) { + u_int *ref_count = (u_int *)(&((*he)->val)); + (*ref_count)++; + } else { + thinnfa_debug_critical(("%s: pattern offset (%u) for state %s is not valid!\n", rname, + pat_arr_offset, state_name(state_data))); + ret = KISS_ERROR; + } + } + + return ret; +} + +// Check that all offsets are used at least once. +static kiss_ret_val +thin_nfa_validation_unused_pat_offsets(const struct thinnfa_validation_status_s *validation) +{ + static const char rname[] = "thin_nfa_validation_unused_offsets"; + hash_iterator hi; + kiss_ret_val ret = KISS_OK; + + hi = hash_iterator_create(validation->pat_array_offset_ref_count); + if (!hi) { + thinnfa_debug_critical(("%s: failed to create hash iterator\n", rname)); + return KISS_ERROR; + } + + do { + struct hashent* he = hash_iterator_get_hashent(hi); + if (!he) { + thinnfa_debug_critical(("%s: failed to get hash entry\n", rname)); + ret = KISS_ERROR; + continue; + } + if ((u_int *)he->val == 0) { + thinnfa_debug_critical(("%s: offset %p has 0 ref count\n", rname, (u_int *)he->key)); + ret = KISS_ERROR; + } + // We use hash_iterator_next_ent and not hash_iterator_next becuase we store int as value + // and if the value is 0, hash_iterator_next will indidate that the iteration is over. + } while (hash_iterator_next_ent(hi)); + + hash_iterator_destroy(hi); + + return ret; +} + +// Check that the state map is correct +static kiss_ret_val +thin_nfa_validation_depth_map(const struct thinnfa_validation_status_s *validation) +{ + static const char rname[] = "thin_nfa_validation_depth_map"; + u_int i; + kiss_ret_val ret = KISS_OK; + + for (i=0; i < validation->state_num; i++) { + struct state_validation_data_s *state_data = &validation->state_data[i]; + u_int map_depth = kiss_bnfa_offset_to_depth( + validation->nfa, + kiss_bnfa_offset_compress(state_data->bnfa_offset) + ); + u_int validation_depth = state_data->level; + + if (state_data->flags & VALIDATION_STATE_IS_ANCHORED) { + // Validation treats ^ROOT as level 1 (and its children as level 1 more than real). + validation_depth--; + } + + if (validation_depth == map_depth) continue; + if (map_depth == validation->nfa->max_pat_len && validation_depth >= KISS_THIN_NFA_MAX_ENCODABLE_DEPTH) { + // kiss_bnfa_offset_to_depth returns max_pat_len for level 255 and up. + continue; + } + + thinnfa_debug_critical(("%s: State %s found in depth %d, map says %d (flags %x)\n", rname, + state_name(state_data), validation_depth, map_depth, state_data->flags)); + ret = KISS_ERROR; + } + + return ret; +} + + +static void +thin_nfa_validation_fini(struct thinnfa_validation_status_s *validation) +{ + static const char rname[] = "thin_nfa_validation_fini"; + if (validation->state_data != NULL) { + fw_kfree(validation->state_data, validation->state_num * sizeof(struct state_validation_data_s), rname); + validation->state_data = NULL; + } + if (validation->offset_to_data != NULL) { + hash_destroy(validation->offset_to_data); + validation->offset_to_data = NULL; + } + if (validation->pat_array_offset_ref_count != NULL) { + hash_destroy(validation->pat_array_offset_ref_count); + validation->pat_array_offset_ref_count = NULL; + } + + validation->nfa = NULL; +} + + +static kiss_ret_val +thin_nfa_validation_init(const KissThinNFA *nfa, struct thinnfa_validation_status_s *validation) +{ + static const char rname[] = "thin_nfa_validation_init"; + + bzero(validation, sizeof(*validation)); + validation->nfa = nfa; + + if (thin_nfa_validation_count_states(validation) < 0) { + thinnfa_debug_err(("%s: Failed to count states\n", rname)); + goto failure; + } + + // Allocate data for state validation information + validation->state_data = (struct state_validation_data_s *)fw_kmalloc_sleep( + validation->state_num * sizeof(struct state_validation_data_s), + rname + ); + if (!validation->state_data) { + thinnfa_debug_err(("%s: Failed to allocate %d state pointers\n", rname, validation->state_num)); + goto failure; + } + + // Allocate BNFA offset -> validation data mapping + validation->offset_to_data = hash_intcreate_ex(validation->state_num, FW_KMEM_SLEEP); + if (!validation->offset_to_data) { + thinnfa_debug_err(( + "%s: Failed to allocate hash table for validating %d states\n", + rname, + validation->state_num + )); + goto failure; + } + + // Allocate pattern arrays offset -> ref count mapping + validation->pat_array_offset_ref_count = hash_intcreate_ex(nfa->match_state_num, FW_KMEM_SLEEP); + if (!validation->pat_array_offset_ref_count) { + thinnfa_debug_err(( + "%s: Failed to allocate hash table for offsets - %u finite states\n", + rname, + nfa->match_state_num + )); + goto failure; + } + + if (thin_nfa_validation_find_states(validation) != KISS_OK) { + thinnfa_debug_err(("%s: Failed to fill NFA state info\n", rname)); + goto failure; + } + + return KISS_OK; + +failure: + thin_nfa_validation_fini(validation); + return KISS_ERROR; +} + + +BOOL +kiss_thin_nfa_is_valid(const KissThinNFA *nfa_h) +{ + static const char rname[] = "kiss_thin_nfa_is_valid"; + BOOL valid = FALSE; + struct thinnfa_validation_status_s validation; + + // Allocate and initialize validation data + if (thin_nfa_validation_init(nfa_h, &validation) != KISS_OK) { + thinnfa_debug_err(("%s: Failed to initialize validation data\n", rname)); + + // We can't validate, so we assume the Thin NFA is valid. + valid = TRUE; + goto finish; + } + + // Do a BFS scan to verify relations, + // see that all states are reached, verify that pattern offsets are used correctly. + if (thin_nfa_validation_scan_tree(&validation) != KISS_OK) goto finish; + if (thin_nfa_validation_unvisited_states(&validation) != KISS_OK) goto finish; + if (thin_nfa_validation_check_pattern_arrays(&validation) != KISS_OK) goto finish; + if (thin_nfa_validation_check_match_states(&validation) != KISS_OK) goto finish; + if (thin_nfa_validation_unused_pat_offsets(&validation) != KISS_OK) goto finish; + if (thin_nfa_validation_depth_map(&validation) != KISS_OK) goto finish; + + valid = TRUE; + +finish: + if (valid) { + thinnfa_debug_major(("%s: Thin NFA %p validation succeeded\n", rname, nfa_h)); + } else { + thinnfa_debug_critical(("%s: Thin NFA %p validation failed\n", rname, nfa_h)); + } + thin_nfa_validation_fini(&validation); + return valid; +} + + +// Thin NFA Dump code: +// From here, till the end of the file, the code is about dumping the automaton in different formats. +// CSV dump - For Excel +// XML dump - For the JFlap automaton visualisation applet. +// Wiki dump - For the Wiki {graph-from-table} plugin. +static int *xml_dump_level_positions; +static u_int xml_dump_level_positions_size; + +static void +xml_dump_positions_init(const struct thinnfa_validation_status_s *validation) +{ + static const char rname[] = "xml_dump_positions_init"; + u_int i; + + // Allocate a level->position map + xml_dump_level_positions_size = validation->nfa->max_pat_len; + xml_dump_level_positions = (int *)fw_kmalloc( + xml_dump_level_positions_size * sizeof(*xml_dump_level_positions), + rname + ); + if (!xml_dump_level_positions) { + thinnfa_debug_critical(( + "%s: Failed to allocate positions array (%d entries)\n", + rname, + xml_dump_level_positions_size + )); + // All X positions will be 0. + return; + } + + for (i=0; i= xml_dump_level_positions_size) { + *y = 0; + } else { + *y = xml_dump_level_positions[level]; + xml_dump_level_positions[level] += 100; + } + *x = level*100; +} + + +static void +xml_dump_print_header(const struct thinnfa_validation_status_s *validation) +{ + xml_dump_positions_init(validation); + + kdprintf("\n"); + kdprintf(" "); + kdprintf("\n"); + kdprintf("\tfa\n"); + kdprintf("\t\n"); + kdprintf("\t\t\n"); +} + + +static void +xml_dump_print_transition_ex(const struct state_validation_data_s *from_state, + u_char tran_char, kiss_bnfa_offset_t next_state_off, BOOL is_epsilon) +{ + kdprintf("\t\t" "\n"); + kdprintf("\t\t\t" "%d\n", from_state->bnfa_offset); + kdprintf("\t\t\t" "%d\n", next_state_off); + if (is_epsilon) { + kdprintf("\t\t\t" "\n"); // Epsilon + } else { + kdprintf("\t\t\t" "%s\n", char_to_printable(tran_char)); + } + kdprintf("\t\t" "\n"); +} + + +static void +xml_dump_print_state_start(const struct state_validation_data_s *state_data) +{ + const KissThinNFA *nfa = state_data->validation->nfa; + int x, y; + kiss_bnfa_offset_t epslion_trans; + + xml_dump_get_position(state_data->level, &x, &y); + + kdprintf("\t\t" "\n", + state_data->bnfa_offset, state_name(state_data)); + kdprintf("\t\t\t" "%d\n", x); + kdprintf("\t\t\t" "%d\n", y); + if (state_data->bnfa_offset == nfa->min_bnfa_offset) { + kdprintf("\t\t\t" "\n"); + } + if (state_data->type == KISS_BNFA_STATE_MATCH) { + kdprintf("\t\t\t" "\n"); + } + kdprintf("\t\t" "\n"); + + // Print an epsilon transition, if there is one + epslion_trans = validation_state_epsilon_trans(state_data); + if (epslion_trans != KISS_BNFA_OFFSET_INVALID) { + xml_dump_print_transition_ex(state_data, '\0', epslion_trans, TRUE); + } +} + + +static void +xml_dump_print_transition(const struct state_validation_data_s *from_state, + u_char tran_char, kiss_bnfa_offset_t next_state_off) +{ + xml_dump_print_transition_ex(from_state, tran_char, next_state_off, FALSE); +} + + +static void +xml_dump_print_state_end(CP_MAYBE_UNUSED const struct state_validation_data_s *state_data) +{ + // Nothing to do +} + +static void +xml_dump_print_trailer(CP_MAYBE_UNUSED const struct thinnfa_validation_status_s *validation) +{ + kdprintf("\t\n"); + kdprintf("\n"); + + xml_dump_positions_fini(); +} + +static thin_nfa_dump_cbs_t xml_dump_cbs = { + xml_dump_print_header, + xml_dump_print_state_start, + xml_dump_print_transition, + xml_dump_print_state_end, + xml_dump_print_trailer, + THIN_NFA_DUMP_SKIP_ROOT_TRANS +}; + +static void +wiki_dump_print_header(CP_MAYBE_UNUSED const struct thinnfa_validation_status_s *validation) +{ + // Start generating state names suitable for Wiki + doing_wiki_dump = 1; + + // The graph-from-table plugin will display the table lines below as a graph + kdprintf("{graph-from-table}\n"); +} + +static const char * +wiki_dump_state_color(const struct state_validation_data_s *state_data) +{ + if (state_data->bnfa_offset==state_data->validation->nfa->min_bnfa_offset) return "cyan"; // Initial + switch (state_data->type) { + case KISS_BNFA_STATE_FULL: return "yellow"; + case KISS_BNFA_STATE_PARTIAL: return "white"; + case KISS_BNFA_STATE_MATCH: return "green"; + + case KISS_BNFA_STATE_TYPE_NUM: break; + } + return "red"; // Shouldn't happen +} + +static void +wiki_dump_print_state(const struct state_validation_data_s *state_data) +{ + kiss_bnfa_offset_t epsilon_trans; + + // Format: |from|to|trans attrs|from attrs|to attrs| + // to, trans attrs, to attrs are omitted, so we only provide the state's attributes + kdprintf("|%d| | |label=\"%s\",fillcolor=%s|\n", + state_data->bnfa_offset, + state_name(state_data), + wiki_dump_state_color(state_data)); + + // Print epsilon transition, if any + epsilon_trans = validation_state_epsilon_trans(state_data); + if (epsilon_trans != KISS_BNFA_OFFSET_INVALID) { + // Format: |from|to|trans attrs| + kdprintf("|%d|%d|color=red|\n", state_data->bnfa_offset, epsilon_trans); + } +} + + +static void +wiki_dump_print_transition(const struct state_validation_data_s *from_state, + u_char tran_char, kiss_bnfa_offset_t next_state_off) +{ + // Format: |from|to|trans attrs| + kdprintf("|%d|%d|label=\"%s\"|\n", from_state->bnfa_offset, next_state_off, char_to_printable(tran_char)); +} + + +static void +wiki_dump_print_state_end(CP_MAYBE_UNUSED const struct state_validation_data_s *state_data) +{ + // Nothing to do +} + + +static void +wiki_dump_print_trailer(CP_MAYBE_UNUSED const struct thinnfa_validation_status_s *validation) +{ + kdprintf("{graph-from-table}\n"); + doing_wiki_dump = 0; +} + + +static thin_nfa_dump_cbs_t wiki_dump_cbs = { + wiki_dump_print_header, + wiki_dump_print_state, + wiki_dump_print_transition, + wiki_dump_print_state_end, + wiki_dump_print_trailer, + THIN_NFA_DUMP_SKIP_ROOT_TRANS +}; + + +#ifdef KERNEL +#define kdprintf_no_prefix kdprintf +#endif + +static void +csv_dump_print_header(CP_MAYBE_UNUSED const struct thinnfa_validation_status_s *validation) +{ + u_int i; + + // Start generating state names suitable for CSV / Excel + doing_csv_dump = 1; + + // The graph-from-table plugin will display the table lines below as a graph + kdprintf("Tier 1 CSV Dump start\n"); + + kdprintf_no_prefix( + "state_offset,state_name,level,is_match,is_partial,num_of_transitions,match_id_offset,fail_state_offset" + ); + for (i = 0; i < KISS_PM_ALPHABET_SIZE; i++) { + u_char ch = (u_char)i; + kdprintf_no_prefix(","); + switch (ch) { + // Some printable characters are problamtic in CSV files + case '\\': kdprintf_no_prefix("bslash"); break; + case ',': kdprintf_no_prefix("comma"); break; + case '\'': kdprintf_no_prefix("quote"); break; + case '\"': kdprintf_no_prefix("dquote"); break; + case ' ': kdprintf_no_prefix("space"); break; + default: + if (isprint(ch)) { + kdprintf_no_prefix("%c", ch); + } else { + kdprintf_no_prefix("0x%02X", ch); + } + break; + } + } + kdprintf_no_prefix("\n"); +} + + +// Used to detect characters without a transition +static u_int csv_dump_next_trans; + +static void +csv_dump_print_state_start(const struct state_validation_data_s *state_data) +{ + const kiss_bnfa_state_t *bnfa = state_data->validation->nfa->bnfa; + const kiss_bnfa_state_t *state = kiss_bnfa_offset_to_state(bnfa, state_data->bnfa_offset); + kiss_bnfa_offset_t epsilon_trans = validation_state_epsilon_trans(state_data); + + // Basic data - state_offset,state_name,level,is_match,is_partial, + // num_of_transitions,match_id_offset,fail_state_offset + kdprintf_no_prefix("%d,%s,%d,%u,%u,%u", + state_data->bnfa_offset, + state_name(state_data), + state_data->level, + (state_data->type==KISS_BNFA_STATE_MATCH), + (state_data->type==KISS_BNFA_STATE_PARTIAL), + validation_state_trans_num(state_data) + ); + if (state_data->type == KISS_BNFA_STATE_MATCH) { + kdprintf_no_prefix(",%d", state->match.match_id); + } else { + kdprintf_no_prefix(", "); + } + if (epsilon_trans != KISS_BNFA_OFFSET_INVALID) { + kdprintf_no_prefix(",%d", epsilon_trans); + } else { + kdprintf_no_prefix(", "); + } + + csv_dump_next_trans = '\0'; +} + + +static void +csv_dump_print_transition(CP_MAYBE_UNUSED const struct state_validation_data_s *from_state, + u_char tran_char, kiss_bnfa_offset_t next_state_off) +{ + // Print skipped characters + while (csv_dump_next_trans < tran_char) { + kdprintf_no_prefix(", "); + csv_dump_next_trans++; + } + + kdprintf_no_prefix(",%d", next_state_off); + csv_dump_next_trans = tran_char + 1; +} + + +static void +csv_dump_print_state_end(CP_MAYBE_UNUSED const struct state_validation_data_s *state_data) +{ + // Print skipped characters at the tail + while (csv_dump_next_trans < KISS_PM_ALPHABET_SIZE) { + kdprintf_no_prefix(", "); + csv_dump_next_trans++; + } + + kdprintf_no_prefix("\n"); +} + + +static void +csv_dump_print_trailer(CP_MAYBE_UNUSED const struct thinnfa_validation_status_s *validation) +{ + kdprintf("Tier 1 CSV Dump end\n"); + doing_csv_dump = 0; +} + + +static thin_nfa_dump_cbs_t csv_dump_cbs = { + csv_dump_print_header, + csv_dump_print_state_start, + csv_dump_print_transition, + csv_dump_print_state_end, + csv_dump_print_trailer, + THIN_NFA_DUMP_FLAGS_NONE +}; + + +static void +thin_nfa_dump_state( + const struct thinnfa_validation_status_s *validation, + const struct state_validation_data_s *state_data, + const thin_nfa_dump_cbs_t *dump_format_cbs +) +{ + static const char rname[] = "thin_nfa_dump_state"; + const kiss_bnfa_state_t *state = kiss_bnfa_offset_to_state(validation->nfa->bnfa, state_data->bnfa_offset); + const kiss_bnfa_offset_t root_offset = validation->root->bnfa_offset; + u_int i, trans_num; + + // Print some stuff at the state start + dump_format_cbs->state_start_cb(state_data); + + // Print the transition table + trans_num = validation_state_trans_num(state_data); + for (i = 0; i < trans_num; i++) { + u_char tran_char; + kiss_bnfa_offset_t tran_bnfa_offset; + + // Get the transition's character and next state + switch (state_data->type) { + case KISS_BNFA_STATE_PARTIAL: + tran_char = state->partial.transitions[i].tran_char; + tran_bnfa_offset = kiss_bnfa_offset_decompress(state->partial.transitions[i].next_state_offset); + break; + + case KISS_BNFA_STATE_FULL: + tran_char = (u_char)i; + tran_bnfa_offset = kiss_bnfa_offset_decompress(state->full.transitions[i]); + break; + + default: + // KISS_BNFA_STATE_MATCH has no transitions + thinnfa_debug_critical(("%s: Bad type %d\n", rname, state_data->type)); + return; + } + + // Possibly skip root transitions + if ((tran_bnfa_offset==root_offset) && (dump_format_cbs->flags & THIN_NFA_DUMP_SKIP_ROOT_TRANS)) continue; + + // Print the transition + dump_format_cbs->transition_cb(state_data, tran_char, tran_bnfa_offset); + } + + // Print some stuff at the state end + dump_format_cbs->state_end_cb(state_data); +} + +static kiss_ret_val +thin_nfa_dump(const KissThinNFA *nfa, const thin_nfa_dump_cbs_t *dump_format_cbs) +{ + static const char rname[] = "thin_nfa_dump"; + struct thinnfa_validation_status_s validation; + u_int i; + kiss_ret_val ret = KISS_ERROR; + + // We don't want to crash or loop if the Thin NFA is corrupt, so validate first + if (thin_nfa_validation_init(nfa, &validation) != KISS_OK) { + thinnfa_debug_critical(("%s: Failed to initialize validation data\n", rname)); + goto cleanup; + } + + // Go over the tree and follow all transitions + if (thin_nfa_validation_scan_tree(&validation) != KISS_OK) { + thinnfa_debug_critical(("%s: Tree scan failed - the BNFA is corrupt\n", rname)); + // Continue despite failure. We'll end up with ugly state names. + } + + // The graph-from-table plugin will display the table lines below as a graph + dump_format_cbs->start_cb(&validation); + + // Go over states and print them + for (i=0; iend_cb(&validation); + + ret = KISS_OK; +cleanup: + thin_nfa_validation_fini(&validation); + return ret; +} + + +kiss_ret_val +kiss_thin_nfa_dump(const KissThinNFA *nfa, enum kiss_pm_dump_format_e format) +{ + static const char rname[] = "kiss_thin_nfa_dump"; + thin_nfa_dump_cbs_t *format_cbs = NULL; + + switch (format) { + case KISS_PM_DUMP_XML: + format_cbs = &xml_dump_cbs; + break; + case KISS_PM_DUMP_CSV: + format_cbs = &csv_dump_cbs; + break; + case KISS_PM_DUMP_WIKI: + format_cbs = &wiki_dump_cbs; + break; + } + + if (!format_cbs) { + thinnfa_debug_critical(("%s: Invalid dump format %d\n", rname, format)); + return KISS_ERROR; + } + + return thin_nfa_dump(nfa, format_cbs); +} +SASAL_END diff --git a/components/utils/pm/kiss_thin_nfa_base.h b/components/utils/pm/kiss_thin_nfa_base.h new file mode 100644 index 0000000..9a1830d --- /dev/null +++ b/components/utils/pm/kiss_thin_nfa_base.h @@ -0,0 +1,261 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __kiss_thin_nfa_base_h__ +#define __kiss_thin_nfa_base_h__ + +#include "general_adaptor.h" + +// ****************************** OVERVIEW ******************************* +// Contians basic Thin NFA structure, used by kiss_pm and bolt (prescan) +// *********************************************************************** + +#define KISS_THIN_NFA_ALPHABET_SIZE 256 + +// Binary representation of the Thin NFA. +// This is what's actually used during runtime. +// +// Offsets in the BNFA +// ------------------- +// Offsets are signed 32-bit integers, specifying the distance in bytes from the "offset 0" point. +// +// Offset 0 isn't the BNFA start - there are negative offsets: +// All full states are in negative offsets. This is the only way to know that a state is full. +// All other states are in positive offsets. +// +// In full states, offsets are encoded in 16 bits. +// In partial states, offsets are encoded in 24 bits. +// Offsets are compressed: +// Positive offsets are divided by 4. This is possible because all state sizes are a multiple of 4 bytes. +// Negative offsets are divided by 512 (the size of a full state). This is possible because negative offsets +// are only used for full states, so their offsets are a (negative) multiple of the state size. +// +// Structure of a BNFA state +// ------------------------- +// 1. Full state: +// a. No header. Identified by the fact that its BNFA offset is negative. +// b. 256 transitions, 16bits each (uncompressed offsets). +// 2. Common header, to partial and match states: +// a. State type - 2 bits. +// 3. Partial state: +// a. State type - 2 bits. +// b. Transition number - 6 bits. +// c. Fail state offset (compresed) - 24 bits. +// d. Per transition: +// 1) Character - 8 bits +// 2) Next state offset (compressed) - 24 bits +// 4. Match state: +// a. State type - 2 bits. +// b. Unused - 6 bits. +// c. Match ID - 24 bits. +// +// Examples: +// +// Partial state, 2 transitions - 'a'->100, 'b'->104, fail-> -3072 +// +----+---+-----+---+-----+---+-----+ +// Bits: | 2 | 6 | 24 | 8 | 24 | 8 | 24 | +// +----+---+-----+---+-----+---+-----+ +// Data: | P | 2 | -3 | a | 25 | b | 26 | +// +----+---+-----+---+-----+---+-----+ +// +// Full state, 0x00->200, 0x01->204, 0xff->280 +// +-----+-----+ +-----+ +// Bits: | 16 | 16 | | 16 | +// +-----+-----+ .... +-----+ +// Data: | 50 | 51 | | 70 | +// +-----+-----+ +-----+ + + +// Types for normal and compressed (see comment above) BNFA offsets + +typedef int kiss_bnfa_offset_t; // Offset in bytes +typedef int kiss_bnfa_comp_offset_t; // Compressed offset +typedef short kiss_bnfa_short_offset_t; // Compressed offset in 16bits (for full states) + +#define KISS_BNFA_OFFSET_INVALID ((int)0x80000000) + +// State types +typedef enum { + KISS_BNFA_STATE_PARTIAL, + KISS_BNFA_STATE_MATCH, + KISS_BNFA_STATE_FULL, + + KISS_BNFA_STATE_TYPE_NUM +} kiss_bnfa_state_type_t; + + +// State structure + +// Use some header bits for the state type +#define KISS_BNFA_STATE_TYPE_BITS 2 + +// The type must fit in KISS_BNFA_STATE_TYPE_BITS bits +KISS_ASSERT_COMPILE_TIME(KISS_BNFA_STATE_TYPE_NUM <= (1<common.type; +} + + +// State size + +// Get the size of a partial state with N transitions +static CP_INLINE u_int +kiss_bnfa_partial_state_size(u_int trans_num) +{ + // Header + transition table + return KISS_OFFSETOF(kiss_bnfa_partial_state_t, transitions) + + sizeof(struct kiss_bnfa_partial_transition_s) * (trans_num); +} + +// Get the size of an existing state +static CP_INLINE u_int +kiss_bnfa_state_size(const kiss_bnfa_state_t *bnfa, kiss_bnfa_offset_t offset) +{ + switch (kiss_bnfa_state_type(bnfa, kiss_bnfa_offset_compress(offset))) { + case KISS_BNFA_STATE_PARTIAL: { + const kiss_bnfa_state_t *state = kiss_bnfa_offset_to_state(bnfa, offset); + return kiss_bnfa_partial_state_size(state->partial.trans_num); + } + case KISS_BNFA_STATE_MATCH: return sizeof(kiss_bnfa_match_state_t); + case KISS_BNFA_STATE_FULL: return sizeof(kiss_bnfa_full_state_t); + + case KISS_BNFA_STATE_TYPE_NUM: break; // Can't happen + } + + return 0; +} + +// Flags for kiss_thin_nfa_s.flags and kiss_thin_nfa_prescan_hdr_s.flags +enum kiss_thin_nfa_flags_e { + KISS_THIN_NFA_USE_CHAR_XLATION = 0x01, // Used for caseless and/or digitless + KISS_THIN_NFA_HAS_ANCHOR = 0x02, // State at offset 0 is anchored root, not root +}; + + +#endif // __kiss_thin_nfa_base_h__ diff --git a/components/utils/pm/kiss_thin_nfa_build.cc b/components/utils/pm/kiss_thin_nfa_build.cc new file mode 100644 index 0000000..7dcbecb --- /dev/null +++ b/components/utils/pm/kiss_thin_nfa_build.cc @@ -0,0 +1,242 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Thin NFA Construction and Destruction +// ------------------------------------- +// This file contains code that builds a Thin NFA. +// The functions here may be called from compilation, serialization and de-serialization contexts. +// The code allows allocating and releasing the Thin NFA structure, as well as serializing and deserializing it. + +#include "kiss_thin_nfa_impl.h" +#include "sasal.h" + +SASAL_START // Multiple Pattern Matcher +// Allocate and fill in a pattern ID structure +kiss_ret_val +kiss_thin_nfa_add_pattern_id(kiss_thin_nfa_pattern_list_t **pat_list_p, const kiss_thin_nfa_pattern_t *new_pat) +{ + static const char rname[] = "kiss_thin_nfa_add_pattern_id"; + kiss_thin_nfa_pattern_list_t **pat_ptr; + kiss_thin_nfa_pattern_list_t *pat; + + // Go over the pattern list - look for our pattern, and find the end + for (pat_ptr = pat_list_p; *pat_ptr != NULL; pat_ptr = &((*pat_ptr)->next)) { + kiss_thin_nfa_pattern_t *list_pat = &(*pat_ptr)->pattern; + + if (list_pat->id == new_pat->id) { + // Already there - nothing to do + thinnfa_debug(( + "%s: Pattern already exists - ID=%d flags=%x(%x) len=%d(%d)\n", + rname, + new_pat->id, + new_pat->pattern_id_flags, + list_pat->pattern_id_flags, + new_pat->len, + list_pat->len + )); + return KISS_OK; + } + } + + // Allocate the pattern structure + pat = (kiss_thin_nfa_pattern_list_t *)kiss_pmglob_memory_kmalloc(sizeof(kiss_thin_nfa_pattern_list_t), rname); + if (!pat) { + thinnfa_debug_err(("%s: Failed to allocate pattern id\n", rname)); + return KISS_ERROR; + } + + // Fill in the fields + bcopy(new_pat, &pat->pattern, sizeof(pat->pattern)); + + thinnfa_debug(( + "%s: Added pattern ID=%d flags=%x len=%d\n", + rname, + new_pat->id, + new_pat->pattern_id_flags, + new_pat->len + )); + + // Add to the linked list of patternss. + *pat_ptr = pat; + pat->next = NULL; + + return KISS_OK; +} + + +// Free an entire list of pattern IDs. +void +kiss_thin_nfa_free_pattern_ids(kiss_thin_nfa_pattern_list_t *pat_list) +{ + static const char rname[] = "kiss_thin_nfa_free_pattern_ids"; + kiss_thin_nfa_pattern_list_t *pat, *next; + + for (pat = pat_list; pat != NULL; pat = next) { + next = pat->next; + thinnfa_debug(( + "%s: Releasing pattern ID=%d flags=%x len=%u\n", + rname, + pat->pattern.id, + pat->pattern.pattern_id_flags, + pat->pattern.len + )); + kiss_pmglob_memory_kfree(pat, sizeof(kiss_thin_nfa_pattern_list_t), rname); + } + return; +} + + +// Allocate and initialize statistics +static kiss_ret_val +kiss_thin_nfa_stats_init(kiss_thin_nfa_stats stats) +{ + + if (kiss_pm_stats_common_init(&(stats->common)) != KISS_OK) { + return KISS_ERROR; + } + + bzero(&(stats->specific), sizeof(struct kiss_thin_nfa_specific_stats_s)); + + return KISS_OK; +} + + +// Free statistics +static void +kiss_thin_nfa_stats_free(kiss_thin_nfa_stats stats) +{ + kiss_pm_stats_common_free(&(stats->common)); +} + + +static kiss_ret_val +kiss_thin_nfa_alloc_depth_map(KissThinNFA *nfa) +{ + static const char rname[] = "kiss_thin_nfa_alloc_depth_map"; + kiss_bnfa_comp_offset_t min_comp_off, max_comp_off; + + // The depth map is addressed by the compressed offset + min_comp_off = kiss_bnfa_offset_compress(nfa->min_bnfa_offset); + max_comp_off = kiss_bnfa_offset_compress(nfa->max_bnfa_offset); + + nfa->depth_map.size = max_comp_off - min_comp_off; + nfa->depth_map.mem_start = (u_char *)kiss_pmglob_memory_kmalloc_ex(nfa->depth_map.size, rname, FW_KMEM_SLEEP); + if (!nfa->depth_map.mem_start) { + thinnfa_debug_err(( + "%s: Error allocating the depth map, size %d (BNFA offsets %d:%d)\n", + rname, + nfa->depth_map.size, + nfa->min_bnfa_offset, + nfa->max_bnfa_offset + )); + return KISS_ERROR; + } + // Find the place for offset 0. min_comp_offset is negative, so it's after mem_start. + nfa->depth_map.offset0 = nfa->depth_map.mem_start - min_comp_off; + + return KISS_OK; +} + + +static void +kiss_thin_nfa_destroy_depth_map(KissThinNFA *nfa) +{ + static const char rname[] = "kiss_thin_nfa_destroy_depth_map"; + if (nfa->depth_map.mem_start != NULL) { + kiss_pmglob_memory_kfree(nfa->depth_map.mem_start, nfa->depth_map.size, rname); + nfa->depth_map.mem_start = NULL; + nfa->depth_map.offset0 = NULL; + } +} + + +KissThinNFA::~KissThinNFA() +{ + static const char rname[] = "~KissThinNFA"; + // the code here was once in kiss_thin_nfa_destroy + u_int bnfa_size = max_bnfa_offset - min_bnfa_offset; + + thinnfa_debug_major(("%s: Destroying Thin NFA %p, bnfa size=%d\n", rname, + this, bnfa_size)); + + if(bnfa_start != NULL) { + kiss_pmglob_memory_kfree(bnfa_start, bnfa_size, rname); + bnfa_start = NULL; + bnfa = NULL; + } + + kiss_thin_nfa_stats_free(&stats); + + if (pattern_arrays != NULL) { + kiss_pmglob_memory_kfree(pattern_arrays, pattern_arrays_size, rname); + pattern_arrays = NULL; + } + + kiss_thin_nfa_destroy_depth_map(this); +} + + +// Allocate a Thin NFA. The match info array and BNFA are left empty. +std::unique_ptr +kiss_thin_nfa_create(u_int match_state_num, kiss_bnfa_offset_t min_offset, kiss_bnfa_offset_t max_offset) +{ + static const char rname[] = "kiss_thin_nfa_create"; + + // Allocate the structure + auto nfa = std::make_unique(); + void *nfa_ptr = nfa.get(); + bzero(nfa_ptr, sizeof(*nfa)); + nfa->min_bnfa_offset = min_offset; + nfa->max_bnfa_offset = max_offset; + nfa->match_state_num = match_state_num; + + // Allocate the bnfa array. Not initialized. + u_int bnfa_size = max_offset - min_offset; + nfa->bnfa_start = (kiss_bnfa_state_t *)kiss_pmglob_memory_kmalloc_ex(bnfa_size, rname, FW_KMEM_SLEEP); + if (!nfa->bnfa_start) { + thinnfa_debug_err(( + "%s: Error allocating the bnfa - size %d (offset %d:%d)\n", + rname, + bnfa_size, + min_offset, + max_offset + )); + return nullptr; + } + + // Calculate bnfa so bnfa_start would be at offset min_offset (min_offset<0, so bnfa>bnfa_start) + nfa->bnfa = (kiss_bnfa_state_t *)((char *)nfa->bnfa_start - min_offset); + + // Init the statistics + if (kiss_thin_nfa_stats_init(&(nfa->stats)) != KISS_OK) { + thinnfa_debug_err(("%s: Error initializing statistics structure\n", rname)); + return nullptr; + } + + // Allocate the state depth map + if (kiss_thin_nfa_alloc_depth_map(nfa.get()) != KISS_OK) { + return nullptr; + } + + thinnfa_debug_major(( + "%s: Allocated Thin NFA %p, bnfa size=%d (offsets %d:%d)\n", + rname, + nfa.get(), + bnfa_size, + min_offset, + max_offset + )); + + return nfa; +} +SASAL_END diff --git a/components/utils/pm/kiss_thin_nfa_compile.cc b/components/utils/pm/kiss_thin_nfa_compile.cc new file mode 100644 index 0000000..4c0e968 --- /dev/null +++ b/components/utils/pm/kiss_thin_nfa_compile.cc @@ -0,0 +1,2232 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include "pm_adaptor.h" +#include "kiss_hash.h" +#include "kiss_thin_nfa_impl.h" +#include "kiss_patterns.h" +#include "sasal.h" + +SASAL_START // Multiple Pattern Matcher +// Flag for a Thin NFA state +typedef enum { + THIN_NFA_STATE_FULL = 0x01, // We want a full state table for this state + THIN_NFA_STATE_MATCH = 0x02, // A matching state + THIN_NFA_STATE_ROOT = 0x04, // The root or anchored root state + THIN_NFA_STATE_MAX_IDENTICAL_CHAR = 0x08, // Maximal sequence of identical characters + THIN_NFA_STATE_ANCHORED = 0x10, // A part of the anchored tree + THIN_NFA_STATE_BUILT_TABLE = 0x20, // Already built the BNFA transition table + THIN_NFA_STATE_REACH_FROM_FULL = 0x40, // The state is reachable from full state +} nfa_thin_state_flags_t; + +// A Thin NFA state, or a node in the trie, during compilation time +struct kiss_thin_nfa_state_s { + u_int state_id; // Sequencial number, starting from 0 + nfa_thin_state_flags_t flags; + u_int depth; // Level in the trie + kiss_thin_nfa_pattern_list_t *ids; // For finite state, patterns associated with it + struct kiss_thin_nfa_state_s *bfs_q; // Use for a BFS iteration on the trie + struct thin_nfa_comp_s *comp; // Saves passing this pointer around + // Outgoing transitions + struct kiss_thin_nfa_state_s *child; // First child of this state + u_int num_trans; // Number of transitions + struct kiss_thin_nfa_state_s *fail_state; + // Incoming transition + struct kiss_thin_nfa_state_s *sibling; // Next child of this state's father + u_char tran_char; // The character that takes us to this state + // BNFA offset + kiss_bnfa_offset_t bnfa_offset; // Where the real state is + kiss_bnfa_offset_t bnfa_incoming_off; // Where incoming transitions should jump (possibly a match state) + // DEBUG ONLY + const u_char *pattern_text; // Points into the user's pattern list. Not null terminated +}; +typedef struct kiss_thin_nfa_state_s kiss_thin_nfa_state_t; + + +// Blocks to hold states . A pretty simple pool mechanism. +// Not very much needed. We currently use it to iterate states by ID order, and for state pointer validation. +#define MAX_THIN_NFA_STATES_BLOCKS 1000 +#define KISS_NFA_MAX_STATES_PER_BLOCK 1000 +#define KISS_NFA_MAX_STATES_BLOCK_SIZE (KISS_NFA_MAX_STATES_PER_BLOCK * sizeof(kiss_thin_nfa_state_t)) + + +// When do we want a full state? In the first X tiers (root included) and if more than Y transitions. +u_int kiss_thin_nfa_full_tiers_num_small = 2; // Old values, for PMs which must remain small +u_int kiss_thin_nfa_full_tiers_num_medium = 3; // Used for VSX / 32bit kernel, where memory is expensive +u_int kiss_thin_nfa_full_tiers_num = 7; // New value +u_int kiss_thin_nfa_max_partial_trans = 15; // Can't exceed KISS_BNFA_MAX_TRANS_NUM anyway +u_int kiss_thin_nfa_optimize_contig_chars = 1; + + +// Character translation table for caseless/digitless comparisons. +// +// The idea: +// Each character has a canonic character. This can be itself, or another. +// In a caseless Thin NFA, 'a' and '7' are canonic themselves, 'B' has canonic character 'b'. +// In a digitless Thin NFA, '7' is not canonic - its canonic character is '0'. +// Each character is also a member of a group, containing all characters with the same canonic character. +// In a caseless Thin NFA, 'a' and 'A' are in one group. +// In a digitless Thin NFA, all digits are in one group. +// Notice that a single Thin NFA can be caseless, digitless, neither or both. +// +// The data structure: +// tab - Translates each character into its canonic characer (possibly itself). +// rev - A linked list of characters belonging to the same group. The character itself +// is used instead of a pointer. The last character in the group points to the first. +// Example: For a caseless Thin NFA, rev['a']='A' and rev['A']='a'. +struct thin_nfa_char_trans_tab_s { + u_char tab[KISS_PM_ALPHABET_SIZE]; + u_char rev[KISS_PM_ALPHABET_SIZE]; +}; + + +// Flags for an entire Thin NFA during compilation +typedef enum { + THIN_NFA_FAIL_STATES_CALCULATED = 0x01, // Once we set this, we expect all states to have fail states. + THIN_NFA_ENABLE_ANCHOR_OPT = 0x02, // Enable optimization for anochored states + THIN_NFA_USE_RECURSIVE_COMPILE = 0x04, // Build full states recursively. Faster, unsuitable for kernel +} thin_nfa_comp_flags_e; + + +// A Thin NFA which is under construction. The compiled BNFA is constructed from this later. +struct thin_nfa_comp_s { + kiss_thin_nfa_state_t *root_state; // The root state (somewhere inside state_blocks) + kiss_thin_nfa_state_t *anchored_root_state; // The root for anchored patterns + u_int full_state_tier_num; // How many tiers would be full states? + u_int state_num; // How many states do we have so far + u_int match_state_num; // How many matching states do we have? + u_int full_state_num; // How many full states do we have? + KissPMError *error; // Error to be returned to the user. + thin_nfa_comp_flags_e flags; + struct thin_nfa_char_trans_tab_s *xlation_tab; // Caseless/digitless translation table + kiss_thin_nfa_state_t *state_blocks[MAX_THIN_NFA_STATES_BLOCKS]; // Dynamically allocated memory for states + std::unique_ptr runtime_nfa; // The final NFA we're building + kiss_hash_t patterns_hash; // Pattern array to offset mapping + kiss_bnfa_offset_t min_bnfa_off, max_bnfa_off; +}; + + +#if defined(DEBUG) +#define KISS_THIN_NFA_DO_VERIFICATIONS +#endif + + +#define MAX_STATE_NAME_LEN 100 +#define MAX_STATE_NAME_BUFS 4 + + +static kiss_thin_nfa_state_t * +kiss_thin_nfa_get_state_by_id(struct thin_nfa_comp_s *nfa_comp, u_int state_id, const char *caller) +{ + u_int block_index; + u_int index_in_block; + kiss_thin_nfa_state_t *block; + + // Find the block and the place in the block + block_index = state_id / KISS_NFA_MAX_STATES_PER_BLOCK; + index_in_block = state_id % KISS_NFA_MAX_STATES_PER_BLOCK; + if (block_index >= MAX_THIN_NFA_STATES_BLOCKS) { + thinnfa_debug_critical(("%s: State %d - invalid block index %d (max %d)\n", caller, + state_id, block_index, MAX_THIN_NFA_STATES_BLOCKS)); + return NULL; + } + block = nfa_comp->state_blocks[block_index]; + if (block == NULL) { + thinnfa_debug_critical(( + "%s: State %d - block index %d is not allocated yet\n", + caller, + state_id, + block_index + )); + return NULL; + } + + return &block[index_in_block]; +} + + +// DEBUG FUNCTION - return a printable name for the state, in a static buffer. +// Accepts a NULL state. +static const char * +state_name(const kiss_thin_nfa_state_t *state) +{ + static char buffers[MAX_STATE_NAME_BUFS][MAX_STATE_NAME_LEN]; + static u_int next_buf = 0; + u_int cur_buf; + char *name, *p; + + if (!state) { + // Happens when printing the root's fail state + return "NULL/-1"; + } + + // What's a state's name? + // Each state represents a prefix of one or more patterns. This prefix is the natural name for the state. + // We have the pattern text on the state. Its depth tells us how much of it do we need. + // We add the state ID as a suffix, to prevent ambiguituies (particularly for unprintable characters). + + //Choose a buffer to use. Allows calling several times under a single debug message. + cur_buf = next_buf; + if (cur_buf >= MAX_STATE_NAME_BUFS) cur_buf = 0; + next_buf = cur_buf + 1; + name = buffers[cur_buf]; + p = name; + + if (state->flags & THIN_NFA_STATE_ANCHORED) { + // Prefix for anchored states + *p = '^'; + p++; + } + + // Fill in the state name. Not null-terminated meanwhile. + if (state->pattern_text == NULL) { + const char *state_name; + // Only the root makes sense. But deal with a missing pattern text anyway + state_name = (state->flags & THIN_NFA_STATE_ROOT) ? "ROOT" : "INVALID"; + strcpy(p, state_name); + p += strlen(state_name); + } else { + u_int i; + // Normal state - use the relevant prefix of the pattern text + for (i=0; (idepth) && (ppattern_text[i]) ? state->pattern_text[i] : '.'; + p++; + } + } + + // Append the state ID. Removes ambituities (e.g. for unprintable characters) + snprintf(p, MAX_STATE_NAME_LEN-(p-name), "/%u", state->state_id); + name[MAX_STATE_NAME_LEN-1] = '\0'; + + return name; +} + + +#if defined(KISS_THIN_NFA_DO_VERIFICATIONS) + +// DEBUG FUNCTION - Verify that a state pointer points to a valid state +static BOOL +is_valid_state_ptr(struct thin_nfa_comp_s *nfa_comp, kiss_thin_nfa_state_t *state, const char *caller) +{ + kiss_thin_nfa_state_t *state_by_id; + + if (!state) { + thinnfa_debug_critical(("%s: Null state pointer\n", caller)); + return FALSE; + } + + state_by_id = kiss_thin_nfa_get_state_by_id(nfa_comp, state->state_id, caller); + if (!state_by_id) { + return FALSE; + } + + // Is the state where we expect it to be in the block? + if (state != state_by_id) { + thinnfa_debug_critical(("%s: State %p ID %d is invalid - should be at %p\n", caller, state, + state->state_id, state_by_id)); + return FALSE; + } + + return TRUE; +} + + +// DEBUG FUNCTION - Verify a state's transition table +static void +verify_state_ex(struct thin_nfa_comp_s *nfa_comp, kiss_thin_nfa_state_t *state, const char *caller) +{ + kiss_thin_nfa_state_t *child, *prev_child; + u_int actual_tran_num; + + // Is the pointer itself OK? + KISS_ASSERT(is_valid_state_ptr(nfa_comp, state, caller), + ("%s: Invalid state pointer %p\n", caller, state)); + + // Go over the transition table + actual_tran_num = 0; + prev_child = NULL; + for (child = state->child; child != NULL; child = child->sibling) { + // Valid pointer? + KISS_ASSERT(is_valid_state_ptr(nfa_comp, child, caller), + ("%s: State %s(%p) contains an invalid child %p after %02x\n", caller, state_name(state), state, + child, prev_child ? prev_child->tran_char : 0)); + + // Sorted in ascending order? + KISS_ASSERT(!prev_child || prev_child->tran_char < child->tran_char, + ("%s: State %s(%p) transition %02x -> %s after %02x -> %s\n", caller, + state_name(state), state, + child->tran_char, state_name(child), + prev_child->tran_char, state_name(prev_child))); + + actual_tran_num++; + if (actual_tran_num > state->num_trans) { + // We may be looping + break; + } + prev_child = child; + } + + // Counter matches list? + KISS_ASSERT(actual_tran_num == state->num_trans, + ("%s: State %s(%p) has %d transitions, but it should have %d\n", caller, state_name(state), state, + actual_tran_num, state->num_trans)); + + // Fail state? + if (nfa_comp->flags & THIN_NFA_FAIL_STATES_CALCULATED) { + if (state->fail_state == NULL) { + KISS_ASSERT(state == nfa_comp->root_state, ("%s: State %s has no fail state, but it is not root", + caller, state_name(state))); + } else { + KISS_ASSERT( + is_valid_state_ptr(nfa_comp, state->fail_state, caller), + "%s: State %s has an invalid fail state %p\n", + caller, + state_name(state), + state->fail_state + ); + } + } +} + + +// Use this for sanity test on a state +#define verify_state(nfa_comp, state) verify_state_ex(nfa_comp, state, FILE_LINE) + +#else // KISS_THIN_NFA_DO_VERIFICATIONS + +// Verifications disabled +#define verify_state(nfa_comp, state) + +#endif // KISS_THIN_NFA_DO_VERIFICATIONS + + +// Mark that a state needs to be full +static void +make_state_full(kiss_thin_nfa_state_t *state) +{ + if (state->flags & THIN_NFA_STATE_FULL) return; + ENUM_SET_FLAG(state->flags, THIN_NFA_STATE_FULL); + state->comp->full_state_num++; +} + + +// Mark that a state is matching +static void +make_state_matching(kiss_thin_nfa_state_t *state) +{ + if (state->flags & THIN_NFA_STATE_MATCH) return; + ENUM_SET_FLAG(state->flags, THIN_NFA_STATE_MATCH); + state->comp->match_state_num++; +} + + +// Allocate an empty state on an NFA. +// Initializes all fields to defaults. +static kiss_thin_nfa_state_t * +kiss_thin_nfa_state_create( + struct thin_nfa_comp_s *nfa_comp, + u_int depth, + const u_char *pattern_text, + nfa_thin_state_flags_t flags +) +{ + static const char rname[] = "kiss_thin_nfa_state_create"; + u_int state_id; + u_int block_index; + u_int index_in_block; + kiss_thin_nfa_state_t *block; + kiss_thin_nfa_state_t *state; + + // Find the next ID and the block it should be in + state_id = nfa_comp->state_num; + block_index = state_id / KISS_NFA_MAX_STATES_PER_BLOCK; + index_in_block = state_id % KISS_NFA_MAX_STATES_PER_BLOCK; + + thinnfa_debug_extended(("%s: Adding state %d depth %d\n", rname, state_id, depth)); + + // No more possible blocks? + if (block_index >= MAX_THIN_NFA_STATES_BLOCKS) { + thinnfa_debug_err(("%s: State %d in block %d exceeds the limit %d\n", rname, + state_id, block_index, MAX_THIN_NFA_STATES_BLOCKS)); + return NULL; + } + + // Allocate the block if needed (first state in the block) + block = nfa_comp->state_blocks[block_index]; + if (block == NULL) { + block = (kiss_thin_nfa_state_t *)fw_kmalloc_ex(KISS_NFA_MAX_STATES_BLOCK_SIZE, rname, FW_KMEM_SLEEP); + if (block == NULL) { + thinnfa_debug_err(("%s: Failed to allocate a state block size %lu for the state %u\n", rname, + KISS_NFA_MAX_STATES_BLOCK_SIZE, state_id)); + return NULL; + } + nfa_comp->state_blocks[block_index] = block; + } + + // Initialize the state + state = &block[index_in_block]; + + state->state_id = state_id; + state->flags = flags; + state->ids = NULL; + state->bfs_q = NULL; + state->child = NULL; + state->num_trans = 0; + state->fail_state = NULL; + state->sibling = NULL; + state->tran_char = '\0'; // Will be modified, except for the root + state->pattern_text = pattern_text; + state->depth = depth; + state->comp = nfa_comp; + state->bnfa_offset = KISS_BNFA_OFFSET_INVALID; + state->bnfa_incoming_off = KISS_BNFA_OFFSET_INVALID; + + // Do we want a full state? kiss_thin_nfa_full_tiers_num=2 means tiers 0 and 1, i.e. the root plus one, are full. + if (state->flags & THIN_NFA_STATE_ROOT) { + // The root must be full, because it has no fail state. + // The anchored root (if exists) is the first state, and must be full, for the bnfa_full_state_size + // condition to work. + make_state_full(state); + } else if (depth < nfa_comp->full_state_tier_num && !(state->flags & THIN_NFA_STATE_ANCHORED)) { + make_state_full(state); + } + + // Advance the counter + nfa_comp->state_num++; + + return state; +} + + +// Release all resources on a state structure. +// Doesn't release the states, because it's part of a state block. +static void +kiss_thin_nfa_state_free(kiss_thin_nfa_state_t *state) +{ + // Clean up the pattern list + if (state->ids) { + kiss_thin_nfa_free_pattern_ids(state->ids); + state->ids = NULL; + } + + return; +} + + +// Returns the following state, by ID order. +// With prev==NULL, returns the first state. +// With prev!=NULL, returns the next. +// If prev is the last state, returns NULL. +static kiss_thin_nfa_state_t * +kiss_thin_nfa_get_subsequent_state(struct thin_nfa_comp_s *nfa_comp, kiss_thin_nfa_state_t *prev) +{ + static const char rname[] = "kiss_thin_nfa_get_subsequent_state"; + u_int state_id; + + // Find the next state's ID + state_id = prev ? prev->state_id + 1 : 0; + if (state_id >= nfa_comp->state_num) { + // prev was the last state. + return NULL; + } + + // Get the state pointer + return kiss_thin_nfa_get_state_by_id(nfa_comp, state_id, rname); +} + + +// Find the transition for a given character from a given state. +// If no transition found, returns NULL and does not check the fail state. +static kiss_thin_nfa_state_t * +kiss_thin_nfa_comp_get_next_state(kiss_thin_nfa_state_t *state, u_char ch) +{ + static const char rname[] = "kiss_thin_nfa_comp_get_next_state"; + kiss_thin_nfa_state_t *child; + + verify_state(state->comp, state); + + // Find the child in the list + for (child = state->child; child != NULL; child = child->sibling) { + u_char tran_ch = child->tran_char; + + if (tran_ch == ch) { + thinnfa_debug_extended(( + "%s: Found transition from the state %s by 0x%02x to %s\n", + rname, + state_name(state), + ch, + state_name(child) + )); + return child; + } + + // The list is sorted, so we don't need to look beyond the character. + if (tran_ch > ch) break; + } + + thinnfa_debug_extended(("%s: No transition from the state %s by 0x%02x\n", rname, state_name(state), ch)); + + return NULL; +} + + +// Mark a state as finite and accepting a given kiss_thin_nfa_pattern_t pattern +static kiss_ret_val +kiss_thin_nfa_state_set_match(kiss_thin_nfa_state_t *state, const kiss_thin_nfa_pattern_t *pat_info) +{ + static const char rname[] = "kiss_thin_nfa_state_set_match"; + + verify_state(state->comp, state); + + // Add the pattern to this state's pattern list + if (kiss_thin_nfa_add_pattern_id(&(state->ids), pat_info) != KISS_OK) { + thinnfa_debug_err(( + "%s: Could not add the 'pattern_id' %d to the final state %s\n", + rname, + pat_info->id, + state_name(state) + )); + return KISS_ERROR; + } + + thinnfa_debug(( + "Setting state %s as the matching state for the 'pattern_id' %d\n", + state_name(state), + pat_info->id + )); + make_state_matching(state); + + return KISS_OK; +} + + +// Mark a state as finite, and accepting a given kiss_pmglob_string pattern +static kiss_ret_val +kiss_thin_nfa_state_set_match_pattern(kiss_thin_nfa_state_t *state, const kiss_pmglob_string_s *pattern) +{ + kiss_thin_nfa_pattern_t pat_info; + + pat_info.id = kiss_pmglob_string_get_id(pattern); + pat_info.pattern_id_flags = kiss_pmglob_string_get_flags(pattern); + pat_info.len = kiss_pmglob_string_get_size(pattern); + + return kiss_thin_nfa_state_set_match(state, &pat_info); +} + + +// Copy the list of accepted patterns from one state to another. +// The destination state can already have patterns, and the lists would be concatenated. +static kiss_ret_val +kiss_thin_nfa_state_copy_match_ids(kiss_thin_nfa_state_t *dst, kiss_thin_nfa_state_t *src) +{ + static const char rname[] = "kiss_thin_nfa_state_copy_match_ids"; + kiss_thin_nfa_pattern_list_t *curr_id; + + verify_state(src->comp, src); + verify_state(dst->comp, dst); + + thinnfa_debug(("%s: Copying the match IDs from %s to %s\n", rname, state_name(src), state_name(dst))); + + // traversing on the state_src 'ids' adding each one to 'state_dst' list + for(curr_id = src->ids; curr_id; curr_id = curr_id->next) { + if (kiss_thin_nfa_state_set_match(dst, &curr_id->pattern) != KISS_OK) { + thinnfa_debug_err(( + "%s: Failed to set the ID %d on the state %s\n", + rname, + curr_id->pattern.id, + state_name(dst) + )); + + // NOTE: We don't release the IDs we have added. Compilation will fail and clean up anyway. + return KISS_ERROR; + } + } + + return KISS_OK; +} + + +// Destroy the NFA we're compiling +static void +kiss_thin_nfa_comp_destroy(struct thin_nfa_comp_s *nfa_comp) +{ + static const char rname[] = "kiss_thin_nfa_comp_destroy"; + u_int i; + kiss_thin_nfa_state_t *state; + + thinnfa_debug_major(("%s: Destroying the compilation information structure\n", rname)); + + // Cleanup whatever data we have on the states. + for (state = nfa_comp->root_state; state != NULL; state = kiss_thin_nfa_get_subsequent_state(nfa_comp, state)) { + kiss_thin_nfa_state_free(state); + } + + // Free the state blocks and transition blocks + for (i = 0; i < MAX_THIN_NFA_STATES_BLOCKS; i++) { + if (nfa_comp->state_blocks[i] != NULL) { + fw_kfree(nfa_comp->state_blocks[i], KISS_NFA_MAX_STATES_BLOCK_SIZE, rname); + nfa_comp->state_blocks[i] = NULL; + } + } + + if (nfa_comp->xlation_tab!= NULL) { + fw_kfree(nfa_comp->xlation_tab, sizeof(*(nfa_comp->xlation_tab)), rname); + nfa_comp->xlation_tab= NULL; + } + + nfa_comp->runtime_nfa.reset(nullptr); + + if (nfa_comp->patterns_hash) { + kiss_hash_destroy(nfa_comp->patterns_hash); + nfa_comp->patterns_hash = NULL; + } + + fw_kfree(nfa_comp, sizeof(*nfa_comp), rname); +} + + +// Allocate an empty thin NFA compilation data structure. +static struct thin_nfa_comp_s * +kiss_thin_nfa_comp_create(KissPMError *error) +{ + static const char rname[] = "kiss_thin_nfa_comp_create"; + struct thin_nfa_comp_s *nfa_comp = NULL; + + thinnfa_debug_major(("%s: Allocating the compilation information structure\n", rname)); + + // Allocate and initialize the compilation temporary structure + nfa_comp = (struct thin_nfa_comp_s *)fw_kmalloc(sizeof(*nfa_comp), rname); + if (!nfa_comp) { + thinnfa_debug_err(("%s: Failed to allocate 'nfa_comp'\n", rname)); + goto failure; + } + bzero((void *)nfa_comp, sizeof(*nfa_comp)); + + nfa_comp->error = error; + + // Build the root state + nfa_comp->root_state = kiss_thin_nfa_state_create(nfa_comp, 0, NULL, THIN_NFA_STATE_ROOT); + if (nfa_comp->root_state == NULL) { + thinnfa_debug_err(("%s: Failed to create the root state\n", rname)); + goto failure; + } + + return nfa_comp; + +failure: + + if (nfa_comp != NULL) { + kiss_thin_nfa_comp_destroy(nfa_comp); + } + return NULL; + +} + + +// Specify the error for failed Thin NFA compilation +static void +kiss_thin_nfa_set_comp_error(struct thin_nfa_comp_s *nfa_comp, const char *err_text) +{ + // We always use "internal", which is appropriate for both logical errors and resource shortage. + // We don't specify a pattern, because nothing is really pattern specific. + kiss_pm_error_set_details(nfa_comp->error, KISS_PM_ERROR_INTERNAL, err_text); +} + + +// Initialize a translation table for caseless/digitless comparison. +// According to compilation flags, builds a table to translate each character. +static kiss_ret_val +kiss_thin_nfa_create_xlation_tab(struct thin_nfa_comp_s *nfa_comp, int pm_comp_flags) +{ + static const char rname[] = "kiss_thin_nfa_create_xlation_tab"; + enum kiss_pmglob_char_xlation_flags_e xlation_flags; + + // Figure out which translations we need + xlation_flags = KISS_PMGLOB_CHAR_XLATION_NONE; + if (pm_comp_flags & KISS_PM_COMP_CASELESS) { + ENUM_SET_FLAG(xlation_flags, KISS_PMGLOB_CHAR_XLATION_CASE); + } + if (pm_comp_flags & KISS_PM_COMP_DIGITLESS) { + ENUM_SET_FLAG(xlation_flags, KISS_PMGLOB_CHAR_XLATION_DIGITS); + } + if (xlation_flags == KISS_PMGLOB_CHAR_XLATION_NONE) { + // No translation needed + nfa_comp->xlation_tab = NULL; + return KISS_OK; + } + + thinnfa_debug_major(("%s: Using%s%s translation table\n", rname, + (xlation_flags&KISS_PMGLOB_CHAR_XLATION_CASE) ? " caseless" : "", + (xlation_flags&KISS_PMGLOB_CHAR_XLATION_DIGITS) ? " digitless" : "")); + + // Allocate a translation table + nfa_comp->xlation_tab = (struct thin_nfa_char_trans_tab_s *)fw_kmalloc(sizeof(*(nfa_comp->xlation_tab)), rname); + if (!nfa_comp->xlation_tab) { + thinnfa_debug_err(("%s: Failed to allocate the translation table\n", rname)); + return KISS_ERROR; + } + + // Build the mapping - normal and reverse + kiss_pmglob_char_xlation_build(xlation_flags, nfa_comp->xlation_tab->tab); + kiss_pmglob_char_xlation_build_reverse(nfa_comp->xlation_tab->tab, nfa_comp->xlation_tab->rev); + + return KISS_OK; +} + + +// Translate a character to canonic form, if a translation table is defined. +static CP_INLINE u_char +kiss_thin_nfa_xlate_char(struct thin_nfa_comp_s *nfa_comp, u_char ch) +{ + if (!nfa_comp->xlation_tab) return ch; + return nfa_comp->xlation_tab->tab[ch]; +} + + +#if defined(KISS_THIN_NFA_DO_VERIFICATIONS) && !defined(KERNEL) + +// DEBUG FUNCTION - uses a simple&slow algorithm to verify the result of kiss_thin_nfa_are_trans_contained. +// Can't run in the kernel because of the large stack consumption. +static void +verify_trans_contains_( + kiss_thin_nfa_state_t *state_contains, + kiss_thin_nfa_state_t *state_included, + BOOL should_contain +) +{ + kiss_thin_nfa_state_t *trans_contains[KISS_PM_ALPHABET_SIZE]; + kiss_thin_nfa_state_t *trans_included[KISS_PM_ALPHABET_SIZE]; + kiss_thin_nfa_state_t *child; + u_int i; + int mismatch_pos; + + // Fill in both transition tables + bzero(trans_contains, sizeof(trans_contains)); + for (child = state_contains->child; child != NULL; child = child->sibling) { + trans_contains[child->tran_char] = child; + } + bzero(trans_included, sizeof(trans_included)); + for (child = state_included->child; child != NULL; child = child->sibling) { + trans_included[child->tran_char] = child; + } + + // Go over the table, looking for a character that's in "included" but not in "contains". + mismatch_pos = -1; + for (i=0; i %s), but the kiss_thin_nfa_are_trans_contained says it does", + state_name(state_contains), + state_name(state_included), + (u_char)mismatch_pos, + state_name(trans_included[i])) + ); + } +} + + +#define verify_trans_contains(state_contains, state_included, expected) \ + verify_trans_contains_(state_contains, state_included, expected) + +#else // KISS_THIN_NFA_DO_VERIFICATIONS + +#define verify_trans_contains(state_contains, state_included, expected) + +#endif // KISS_THIN_NFA_DO_VERIFICATIONS + + +// Do all transactions of "included" also exist in "contains"? +static BOOL +kiss_thin_nfa_are_trans_contained(kiss_thin_nfa_state_t *state_contains, kiss_thin_nfa_state_t *state_included) +{ + kiss_thin_nfa_state_t *included_child, *contains_child; + + verify_state(state_contains->comp, state_contains); + verify_state(state_included->comp, state_included); + + if (state_contains->num_trans < state_included->num_trans) { + // "contains" has fewer states - it can't include all "included" + verify_trans_contains(state_contains, state_included, FALSE); + return FALSE; + } + + // Advance both included_child and contains_child, to iterate both transition tables. + // Keep them in sync - included_child passes children one by one, and contains_child is advanced + // to the same transition character at each step. + contains_child = state_contains->child; + + // Go over the transitions in "included", see if they're in "contained" + for (included_child = state_included->child; included_child != NULL; included_child = included_child->sibling) { + // Advance "tran_contains" until we reach the character we want + for (; contains_child != NULL; contains_child = contains_child->sibling) { + if (contains_child->tran_char >= included_child->tran_char) break; + } + + // Do we have this character in "contains"? + if (contains_child == NULL || contains_child->tran_char != included_child->tran_char) { + // This character doesn't exist in state_contains + verify_trans_contains(state_contains, state_included, FALSE); + return FALSE; + } + } + + verify_trans_contains(state_contains, state_included, TRUE); + return TRUE; +} + + +// Get the root state, or the anchored root state, as appropriate for the pattern. +static kiss_thin_nfa_state_t * +kiss_thin_nfa_get_root_state(struct thin_nfa_comp_s *nfa_comp, int anchored) +{ + static const char rname[] = "kiss_thin_nfa_get_root_state"; + + if (!anchored || !(nfa_comp->flags & THIN_NFA_ENABLE_ANCHOR_OPT)) { + thinnfa_debug(("%s: Using normal root: %s, feature %s\n", rname, + anchored?"anchored":"not anchored", (nfa_comp->flags & THIN_NFA_ENABLE_ANCHOR_OPT)?"enabled":"disabled")); + return nfa_comp->root_state; + } + + if (!nfa_comp->anchored_root_state) { + // Lazy creation of the anchored root state + nfa_thin_state_flags_t flags = THIN_NFA_STATE_ROOT; + ENUM_SET_FLAG(flags, THIN_NFA_STATE_ANCHORED); + thinnfa_debug(("%s: Creating a new anchored root\n", rname)); + nfa_comp->anchored_root_state = kiss_thin_nfa_state_create(nfa_comp, 0, NULL, flags); + if (nfa_comp->anchored_root_state == NULL) { + thinnfa_debug_err(("%s: Failed to create the anchored root state\n", rname)); + return NULL; + } + } + + thinnfa_debug(("%s: Returning the anchored root (%d)\n", rname, nfa_comp->anchored_root_state->state_id)); + return nfa_comp->anchored_root_state; +} + + +// Find the state in the trie, which represents the longest prefix of a given string. +static kiss_thin_nfa_state_t * +kiss_thin_nfa_find_longest_prefix(struct thin_nfa_comp_s *nfa_comp, const u_char *text, u_int len, int anchored) +{ + u_int offset; + kiss_thin_nfa_state_t *state; + + // Following the path labeled by chars in 'pattern' (skip the states which already exist) + state = kiss_thin_nfa_get_root_state(nfa_comp, anchored); + if (!state) return NULL; + for (offset = 0; offset < len; offset++) { + kiss_thin_nfa_state_t *next_state; + u_char ch = kiss_thin_nfa_xlate_char(nfa_comp, text[offset]); + + verify_state(nfa_comp, state); + + // Do we have a node for the next character? + next_state = kiss_thin_nfa_comp_get_next_state(state, ch); + + if (next_state == NULL) { + // No next state - this is as far as we go + break; + } else { + state = next_state; + } + } + + return state; +} + + +// Add a newly allocated state to the trie. Keep the transition list sorted. +static void +kiss_thin_nfa_add_transition(kiss_thin_nfa_state_t *parent, u_char tran_char, kiss_thin_nfa_state_t *new_child) +{ + static const char rname[] = "kiss_thin_nfa_add_transition"; + kiss_thin_nfa_state_t **child_p; + + // Go over existing children and find the place to add the transition + for (child_p = &parent->child; *child_p != NULL; child_p = &(*child_p)->sibling) { + kiss_thin_nfa_state_t *child = *child_p; + if (child->tran_char > tran_char) { + // Add before this one + break; + } + } + + // Add the transition + new_child->sibling = *child_p; + *child_p = new_child; + new_child->tran_char = tran_char; + parent->num_trans++; + + thinnfa_debug_extended(("%s: Added transition from %s by 0x%2x to %s\n", rname, + state_name(parent), tran_char, state_name(new_child))); + + if (parent->num_trans > MIN(kiss_thin_nfa_max_partial_trans, KISS_BNFA_MAX_TRANS_NUM)) { + thinnfa_debug(( + "%s: State %s has %d transitions - making it full\n", + rname, + state_name(parent), + parent->num_trans + )); + make_state_full(parent); + } + + // Track states which represent a maximal sequence of identical characters + if ((parent->flags & THIN_NFA_STATE_ROOT) && !(parent->flags & THIN_NFA_STATE_ANCHORED)) { + // Single character - all characters are identical + ENUM_SET_FLAG(new_child->flags, THIN_NFA_STATE_MAX_IDENTICAL_CHAR); + } else if ((parent->flags & THIN_NFA_STATE_MAX_IDENTICAL_CHAR) && (parent->tran_char == tran_char)) { + // The child, not the parent, is now the longest + ENUM_UNSET_FLAG(parent->flags, THIN_NFA_STATE_MAX_IDENTICAL_CHAR); + ENUM_SET_FLAG(new_child->flags, THIN_NFA_STATE_MAX_IDENTICAL_CHAR); + } +} + + +// Add a pattern to the trie, which would generate the Thin NFA. +// Upon failure, doesn't clean up states it may have created. Will be cleaned up when destroying nfa_comp. +static kiss_ret_val +kiss_thin_nfa_add_pattern_to_trie(struct thin_nfa_comp_s *nfa_comp, const kiss_pmglob_string_s *sm_cur_pattern) +{ + static const char rname[] = "kiss_thin_nfa_add_pattern_to_trie"; + const u_char *pattern_text; + u_int pattern_len; + u_int i; + kiss_thin_nfa_state_t *current_state; + int anchored_pattern; + + pattern_text = kiss_pmglob_string_get_pattern(sm_cur_pattern); + pattern_len = kiss_pmglob_string_get_size(sm_cur_pattern); + anchored_pattern = kiss_pmglob_string_get_flags(sm_cur_pattern) & KISS_PM_LSS_AT_BUF_START; + + thinnfa_debug(("%s: Adding the pattern: %s flags=%x\n", rname, kiss_pmglob_string_to_debug_charp(sm_cur_pattern), + kiss_pmglob_string_get_flags(sm_cur_pattern))); + + // How much of this pattern do we already have in the tree? + current_state = kiss_thin_nfa_find_longest_prefix(nfa_comp, pattern_text, pattern_len, anchored_pattern); + if (!current_state) return KISS_ERROR; // Messages printed inside + + thinnfa_debug(("%s: State %s (flags %x) represents the longest prefix at the offset %d/%d\n", rname, + state_name(current_state), current_state->flags, current_state->depth, pattern_len)); + + // Go over the remaining bytes (if any) and add more states + for (i = current_state->depth; i < pattern_len; i++) { + kiss_thin_nfa_state_t *new_state; + u_char ch; + + // Create a new state. Depth i+1, because the first character (i=0) is at depth 1. + new_state = kiss_thin_nfa_state_create(nfa_comp, i+1, pattern_text, + (nfa_thin_state_flags_t)(current_state->flags & THIN_NFA_STATE_ANCHORED)); + if (!new_state) { + thinnfa_debug_err(("%s: Failed to allocate a new state\n", rname)); + kiss_thin_nfa_set_comp_error(nfa_comp, "Failed to allocate a new state"); + return KISS_ERROR; + } + + // Add a transition into the new state + ch = kiss_thin_nfa_xlate_char(nfa_comp, pattern_text[i]); + kiss_thin_nfa_add_transition(current_state, ch, new_state); + + thinnfa_debug(("%s: Added new state+transition %s -> %s by 0x%02x offset %d\n", rname, + state_name(current_state), state_name(new_state), ch, i)); + + verify_state(nfa_comp, current_state); + + // Add the following states after this one + current_state = new_state; + } + + // Set state as finite and add the pattern ID to the list of patterns which this state accepts. + // Note: It's OK if the state isn't one we just added. E.g. the new pattern is a prefix of an existing one. + if (kiss_thin_nfa_state_set_match_pattern(current_state, sm_cur_pattern) != KISS_OK) { + thinnfa_debug_err(( + "%s: Failed to save the pattern information for the state %s\n", + rname, + state_name(current_state) + )); + kiss_thin_nfa_set_comp_error(nfa_comp, "Failed to save the pattern information for the state"); + return KISS_ERROR; + } + + return KISS_OK; +} + + +// Find the transition from a state by a character, considering fail states. +// The state should alrady have its fail state calculated. +// +// Note: kiss_bnfa_build_full_trans_table may pass from_state=NULL. The result is returning the root, which is OK. +static kiss_thin_nfa_state_t * +kiss_thin_nfa_calc_transition(struct thin_nfa_comp_s *nfa_comp, kiss_thin_nfa_state_t *from_state, u_char tran_char) +{ + static const char rname[] = "kiss_thin_nfa_calc_transition"; + kiss_thin_nfa_state_t *state; + + // Go down the fail state chain, until we find a transition. + for (state = from_state; state != NULL; state = state->fail_state) { + kiss_thin_nfa_state_t *next_state; + + // Look up in this state's transition table + next_state = kiss_thin_nfa_comp_get_next_state(state, tran_char); + if (next_state != NULL) { + if (state == from_state) { + thinnfa_debug_extended(("%s: Found transition from %s by 0x%02x to %s\n", rname, + state_name(from_state), tran_char, state_name(next_state))); + } else { + thinnfa_debug_extended(( + "%s: Found transition from %s by 0x%02x to %s using the fail state %s\n", + rname, + state_name(from_state), + tran_char, + state_name(next_state), + state_name(state) + )); + } + return next_state; + } + } + + // We've gone down to the root, and found nothing - so the next state is the root. + thinnfa_debug_extended(("%s: No transition from %s by 0x%02x - going to root\n", rname, + state_name(from_state), tran_char)); + return nfa_comp->root_state; +} + + +// A callback function prototype for kiss_thin_nfa_iterate_trans +typedef kiss_ret_val (*kiss_thin_nfa_iterate_trans_cb)(kiss_thin_nfa_state_t *from_state, + u_char tran_char, kiss_thin_nfa_state_t *to_state); + + +// Iterate all the transitions in the trie, in BFS order. +// Note: The callback will be called once per transition, i.e. once per state, except for the initial state. +static kiss_ret_val +kiss_thin_nfa_iterate_trans_bfs(struct thin_nfa_comp_s *nfa_comp, kiss_thin_nfa_iterate_trans_cb iter_cb) +{ + static const char rname[] = "kiss_thin_nfa_iterate_trans_bfs"; + kiss_thin_nfa_state_t *bfs_q_head, *bfs_q_tail; + + thinnfa_debug(("%s: Starting BFS iteration, %d states\n", rname, nfa_comp->state_num)); + + // This queue contains states, whose children we want to iterate. + // We start with the root state followed by the anchored root state. + bfs_q_head = nfa_comp->root_state; + bfs_q_head->bfs_q = NULL; + bfs_q_tail = bfs_q_head; + if (nfa_comp->anchored_root_state) { + bfs_q_tail->bfs_q = nfa_comp->anchored_root_state; + nfa_comp->anchored_root_state->bfs_q = NULL; + bfs_q_tail = nfa_comp->anchored_root_state; + } + + // Dequeue each of the states, call the iterator for each transition and enqueue the children + while (bfs_q_head != NULL) { + kiss_thin_nfa_state_t *from_state; + kiss_thin_nfa_state_t *to_state; + + // Dequeue a state from the head + from_state = bfs_q_head; + bfs_q_head = from_state->bfs_q; + if (bfs_q_head == NULL) bfs_q_tail = NULL; + + thinnfa_debug_extended(( + "%s: Got the state %s with %d children\n", + rname, + state_name(from_state), + from_state->num_trans + )); + + // Go over the state's transitions + for (to_state = from_state->child; to_state != NULL; to_state = to_state->sibling) { + thinnfa_debug_extended(( + "%s: Got the child state %s at the depth %d\n", + rname, + state_name(to_state), + to_state->depth + )); + + // Call the iterator function + if (iter_cb(from_state, to_state->tran_char, to_state) != KISS_OK) { + return KISS_ERROR; + } + + // No need to enqueue states with no children + if (to_state->num_trans == 0) continue; + + // Enqueue the next state, so we'd iterate its transitions too + to_state->bfs_q = NULL; + if (bfs_q_tail != NULL) { + bfs_q_tail->bfs_q = to_state; + } else { + bfs_q_head = to_state; + } + bfs_q_tail = to_state; + } + } + + return KISS_OK; +} + + +// Set a state's fail state. +// To calculate this, we need the state's parent, and the character that takes us from the parent to the current. +// The parent's fail state must be calculated already. +static kiss_ret_val +kiss_thin_nfa_set_fail_state(kiss_thin_nfa_state_t *parent, u_char tran_char, kiss_thin_nfa_state_t *state) +{ + static const char rname[] = "kiss_thin_nfa_set_fail_state"; + kiss_thin_nfa_state_t *fail_state; + + // Calculate the fail state. + // The same character that takes us from parent to state would take us from parent->fail_state to state->fail_state + fail_state = kiss_thin_nfa_calc_transition(state->comp, parent->fail_state, tran_char); + state->fail_state = fail_state; + + thinnfa_debug(("%s: The fail state of %s is %s (parent %s, parent->fail_state %s, char %02x)\n", rname, + state_name(state), state_name(fail_state), state_name(parent), + state_name(parent->fail_state), tran_char)); + + + // If a state's fail state is finite, so is the state itself. + // This is because the fail state represents a suffix of the state, which is included in + // the suffix the state represents. If the shorter suffix is a match, so is the longer one. + // Example - The fail state of "abc" is "bc" (if it exists). If "bc" is a match, then so is "abc". + if (fail_state->flags & THIN_NFA_STATE_MATCH) { + thinnfa_debug(("%s: Fail state %s is finite - so is %s\n", rname, + state_name(fail_state), state_name(state))); + if (kiss_thin_nfa_state_copy_match_ids(state, fail_state)) { + thinnfa_debug_err(( + "%s: Failed to copy the pattern IDs from %s to %s\n", + rname, + state_name(fail_state), + state_name(state) + )); + kiss_thin_nfa_set_comp_error(state->comp, "Failed to copy the pattern IDs"); + return KISS_ERROR; + } + } + + // This isn't related to calculating fail states. It should be done after the trie was built, but before + // starting BNFA construction. + if (kiss_thin_nfa_optimize_contig_chars && (state->flags & THIN_NFA_STATE_MAX_IDENTICAL_CHAR)) { + // Optimization for identical character sequences. States which represent a maximal sequence of the same + // characters will be full. So for a long sequence of a single character, we'll always be in a full state. + // Great for the performance lab. + thinnfa_debug(( + "%s: State %s is a maximal identical character sequence - making it full\n", + rname, + state_name(state) + )); + make_state_full(state); + } + + return KISS_OK; +} + + +// See if we can find a better fail state for a state. +// If the fail state contains only transitions the original state has anyway, we can use its fail state instead. +static kiss_thin_nfa_state_t * +kiss_thin_nfa_find_better_fail_state(kiss_thin_nfa_state_t *state) +{ + kiss_thin_nfa_state_t *fail_state; + + if (!state->fail_state) return NULL; + + // Go down the fail state chain. + // Keep going as long as the states contain only transitions the current state has anyway. + for (fail_state = state->fail_state; fail_state->fail_state != NULL; fail_state = fail_state->fail_state) { + + verify_state(state->comp, fail_state); + + if (fail_state->flags & THIN_NFA_STATE_FULL) { + // Full state - failing to it will always give us the answer. + break; + } + + if (!kiss_thin_nfa_are_trans_contained(state, fail_state)) { + // This state has transitions that the current state doesn't - we must fail to it, + // not lower. + break; + } + } + + return fail_state; +} + + +// Change fail states to go faster up the tree, if possible. +// Normally, a fail state points one level upward. But sometimes it can be more upward. +// +// Note: This must be done after kiss_thin_nfa_set_fail_state was called for all states. This is because +// kiss_thin_nfa_set_fail_state uses the parent's fail state to calculate the child's. If the parent's fail stae +// was "reduced", we'll get the wrong fail state for the child. +static void +kiss_thin_nfa_reduce_fail_states(struct thin_nfa_comp_s *nfa_comp) +{ + static const char rname[] = "kiss_thin_nfa_reduce_fail_states"; + kiss_thin_nfa_state_t *state; + + for (state = nfa_comp->root_state; state != NULL; state = kiss_thin_nfa_get_subsequent_state(nfa_comp, state)) { + kiss_thin_nfa_state_t *fail_state; + + if (state->flags & THIN_NFA_STATE_FULL) { + // A full state's fail state isn't interesting + continue; + } + + fail_state = kiss_thin_nfa_find_better_fail_state(state); + if (fail_state != state->fail_state) { + // We have a better fail state + thinnfa_debug(("%s: Changing the fail state of %s from %s to %s\n", rname, + state_name(state), state_name(state->fail_state), state_name(fail_state))); + state->fail_state = fail_state; + } + } +} + + +// Calculate fail states for all states. +static kiss_ret_val +kiss_thin_nfa_calc_fail_states(struct thin_nfa_comp_s *nfa_comp) +{ + static const char rname[] = "kiss_thin_nfa_calc_fail_states"; + + // The root state has no fail state + nfa_comp->root_state->fail_state = NULL; + if (nfa_comp->anchored_root_state) { + // The anchored root fails to the root + nfa_comp->anchored_root_state->fail_state = nfa_comp->root_state; + } + + thinnfa_debug(("%s: Calculating the fail states for all states\n", rname)); + + // Iterate all transitions, and calculate fail states for the target states. + // This would cover all states, except the initial (whose fail state was already set). + // BFS order assures that a parent's fail state is already calculated when we reach the child. + if (kiss_thin_nfa_iterate_trans_bfs(nfa_comp, kiss_thin_nfa_set_fail_state) != KISS_OK) { + thinnfa_debug_err(("%s: Failed to calculate the fail states\n", rname)); + return KISS_ERROR; + } + + // All states now have their fail states calculated + ENUM_SET_FLAG(nfa_comp->flags, THIN_NFA_FAIL_STATES_CALCULATED); + + // Optimization - reduce fail states + kiss_thin_nfa_reduce_fail_states(nfa_comp); + + return KISS_OK; +} + + +// Set a state's BNFA offset to the size so far, and increment by the state size. +static void +set_state_offset(kiss_thin_nfa_state_t *state, kiss_bnfa_offset_t *cur_offset) +{ + static const char rname[] = "set_state_offset"; + u_int state_size=0, match_size=0; + + verify_state(state->comp, state); + + if (state->bnfa_offset == KISS_BNFA_OFFSET_INVALID) { + // Room for the actual state - negative offset for full states, positive for partial. + if ((state->flags & THIN_NFA_STATE_FULL) && (*cur_offset<0)) { + state_size = sizeof(kiss_bnfa_full_state_t); + } else if (!(state->flags & THIN_NFA_STATE_FULL) && (*cur_offset>=0)) { + state_size = kiss_bnfa_partial_state_size(state->num_trans); + } + } + + if (state->bnfa_incoming_off == KISS_BNFA_OFFSET_INVALID) { + // Room for a match state - if needed, must be a positive offset. + if ((state->flags & THIN_NFA_STATE_MATCH) && (*cur_offset >= 0)) { + match_size = sizeof(kiss_bnfa_match_state_t); + if (state->flags & THIN_NFA_STATE_FULL) { + // Need a jump state too + match_size += kiss_bnfa_partial_state_size(0); + } + } + } + + // Update the state offsets + if (match_size > 0) { + thinnfa_debug_extended(("%s: State %s was given a match offset %d size %d", rname, state_name(state), + *cur_offset, match_size)); + state->bnfa_incoming_off = *cur_offset; + *cur_offset += match_size; + } + if (state_size > 0) { + thinnfa_debug_extended(("%s: State %s was given a real offset %d size %d", rname, state_name(state), + *cur_offset, state_size)); + state->bnfa_offset = *cur_offset; + *cur_offset += state_size; + if (!(state->flags & THIN_NFA_STATE_MATCH)) { + // Incoming transitions go directly to the state + state->bnfa_incoming_off = state->bnfa_offset; + } + } +} + +// Check if compressed offset fits full state offset size +static BOOL +comp_offset_fits_short(kiss_bnfa_comp_offset_t comp_offset) +{ + if ((comp_offset) != (kiss_bnfa_short_offset_t)(comp_offset)) { + return FALSE; + } + return TRUE; +} + +// Mark all child of a given state as reacheable as reachable from full state +static void +kiss_bnfa_mark_childs_reach_from_full(kiss_thin_nfa_state_t *state) +{ + kiss_thin_nfa_state_t *child; + + for (child = state->child; child != NULL; child = child->sibling) { + ENUM_SET_FLAG(child->flags, THIN_NFA_STATE_REACH_FROM_FULL); + } +} + +// Mark all states that are reachable from a given full state, +// in order to place them at lower offsets to avoid possible overflow due to offset compression. +// If a state`s fail state is of partial type, mark it`s children too +static void +kiss_bnfa_mark_reachable_from_full(kiss_thin_nfa_state_t *state) { + + kiss_bnfa_mark_childs_reach_from_full(state); + for (state = state->fail_state; state && !(state->flags & THIN_NFA_STATE_FULL); state = state->fail_state) { + kiss_bnfa_mark_childs_reach_from_full(state); + } +} + +// Calcultate the offset of each BNFA state, and the entire BNFA size. +// Sets nfa_comp->offset_list to an array, holding the BNFA offset for each state at [state_id]. +// Sets *bnfa_size_p to the total BNFA size. +static kiss_ret_val +kiss_bnfa_calc_offsets(struct thin_nfa_comp_s *nfa_comp) +{ + static const char rname[] = "kiss_bnfa_calc_offsets"; + kiss_thin_nfa_state_t *state; + kiss_bnfa_offset_t cur_offset; + + // Full states have negative offsets. So the first state's offset depends on the number of full states. + cur_offset = -(kiss_bnfa_offset_t)(nfa_comp->full_state_num * sizeof(kiss_bnfa_full_state_t)); + nfa_comp->min_bnfa_off = cur_offset; + + // Put the anchored root state first, because it's the initial state + if (nfa_comp->anchored_root_state) { + KISS_ASSERT(nfa_comp->anchored_root_state->flags & THIN_NFA_STATE_FULL, + "%s: The anchored root %s must be a full state\n", rname, state_name(nfa_comp->anchored_root_state)); + set_state_offset(nfa_comp->anchored_root_state, &cur_offset); + } + + // If there's no anchored root, then root must be initial. If there is, validation expects it second. + set_state_offset(nfa_comp->root_state, &cur_offset); + + // in this loop we add only the full states, which have negative offsets + for (state = nfa_comp->root_state; state != NULL; state = kiss_thin_nfa_get_subsequent_state(nfa_comp, state)) { + if (state->flags & THIN_NFA_STATE_FULL) { + kiss_bnfa_mark_reachable_from_full(state); // Mark child states so they'll get low offsets + set_state_offset(state, &cur_offset); + } + } + // We added all full states and moving to partials - we must be at offset 0. + KISS_ASSERT(cur_offset==0, + "%s: Offset %d != 0 after adding %d full states\n", rname, cur_offset, nfa_comp->full_state_num); + + // in this loop we add states that are reachable from full states. We want them at low offsets to avoid + // possible overflow due to offset compression + for (state = nfa_comp->root_state; state != NULL; state = kiss_thin_nfa_get_subsequent_state(nfa_comp, state)) { + if (state->flags & THIN_NFA_STATE_REACH_FROM_FULL){ + set_state_offset(state, &cur_offset); + } + } + + // Make sure we have not exceede the limit of offsets that can be compressed to 16bit + // Note: the test is a little too strict - we check the first state that is not reachable from a full state + // instead of the last state that is reachable + if (!comp_offset_fits_short(kiss_bnfa_offset_compress(cur_offset))) { + thinnfa_debug_err(("%s: Current offset is %d, not reachable from the full state\n", rname, cur_offset)); + kiss_thin_nfa_set_comp_error(nfa_comp, "Exceeded the limit of reachable states"); + return KISS_ERROR; + } + + // in this loop we add the partial and mathing states, which weren't handled in the loop above. + for (state = nfa_comp->root_state; state != NULL; state = kiss_thin_nfa_get_subsequent_state(nfa_comp, state)) { + set_state_offset(state, &cur_offset); + } + // The current offset is the size of partial states. Add the full state size to get the total size. + nfa_comp->max_bnfa_off = cur_offset; + + thinnfa_debug_major(("%s: BNFA size - %u full states, %u partial states, total %u bytes\n", rname, + nfa_comp->full_state_num, + nfa_comp->state_num-nfa_comp->full_state_num, + nfa_comp->max_bnfa_off - nfa_comp->min_bnfa_off)); + + return KISS_OK; +} + + +// Get a state's BNFA offset. +// skip_match makes a difference for matching states: +// TRUE - Get the actual state, where the transition table is. +// FALSE - Get the match state, where incoming transitions should go. +static kiss_bnfa_offset_t +state_bnfa_offset(kiss_thin_nfa_state_t *state, BOOL skip_match) +{ + return skip_match ? state->bnfa_offset : state->bnfa_incoming_off; +} + + +// Convert a BNFA offset to a BNFA state pointer +static kiss_bnfa_state_t * +comp_bnfa_offset_to_state(struct thin_nfa_comp_s *nfa_comp, kiss_bnfa_offset_t bnfa_offset) +{ + return kiss_bnfa_offset_to_state_write(nfa_comp->runtime_nfa->bnfa, bnfa_offset); +} + + +// Get a pointer to a state in the BNFA. +// skip_match makes a difference for matching states: +// TRUE - Get the actual state, where the transition table is. +// FALSE - Get the match state, where incoming transitions should go. +static kiss_bnfa_state_t * +comp_to_bnfa_state(kiss_thin_nfa_state_t *state, BOOL skip_match) +{ + return comp_bnfa_offset_to_state(state->comp, state_bnfa_offset(state, skip_match)); +} + +// Move next to state_bnfa_offset. assert inside. +static kiss_bnfa_short_offset_t +state_bnfa_short_offset(kiss_thin_nfa_state_t *state) +{ + static const char rname[] = "state_bnfa_short_offset"; + kiss_bnfa_comp_offset_t comp_offset = kiss_bnfa_offset_compress(state_bnfa_offset(state, FALSE)); + + KISS_ASSERT(comp_offset_fits_short(comp_offset), + "%s: Compressed offset %d exceeds the allowed size\n", rname, comp_offset); + + return (kiss_bnfa_short_offset_t) comp_offset; +} + + +// If character translation is enabled, duplicate ch's transition to all equivalents +static void +add_equivalent_transitions(struct thin_nfa_comp_s *nfa_comp, kiss_bnfa_full_state_t *bnfa_state, u_char ch) +{ + static const char rname[] = "add_equivalent_transitions"; + u_char other_ch; + u_int group_size; + + if (!nfa_comp->xlation_tab) return; + + // Go over all characters within the same group + group_size = 0; + for (other_ch = nfa_comp->xlation_tab->rev[ch]; other_ch != ch; other_ch = nfa_comp->xlation_tab->rev[other_ch]) { + thinnfa_debug_extended(("%s: Setting translated transition by %02x - same as %02x\n", rname, other_ch, ch)); + + bnfa_state->transitions[other_ch] = bnfa_state->transitions[ch]; + + // Prevent looping in case the table is corrupt + group_size++; + KISS_ASSERT_CRASH(group_size <= KISS_PM_ALPHABET_SIZE, + "%s: Too many characters to translate into %02x\n", rname, ch); + } +} + + +// Add a transition to a full transition table. +// If there's a translation table, add trnasitions for all equivalent characters. +static void +add_full_transition( + struct thin_nfa_comp_s *nfa_comp, + kiss_bnfa_full_state_t *bnfa_state, + kiss_thin_nfa_state_t *next_state +) +{ + static const char rname[] = "add_full_transition"; + u_char ch = next_state->tran_char; + + thinnfa_debug_extended(("%s: Setting the transition by %02x to %s\n", rname, + next_state->tran_char, state_name(next_state))); + + // Set the transition, for ch and equivalent characters + bnfa_state->transitions[ch] = state_bnfa_short_offset(next_state); + add_equivalent_transitions(nfa_comp, bnfa_state, ch); +} + +#if !defined(KERNEL) + +// A recursive algorithm to build full state tables. +// Much faster than the previous algorithm, but shouldn't be used in the kernel. +// Allow mutual recursion between these two functions: +static void build_full_trans_table(kiss_thin_nfa_state_t *comp_state); +static void get_full_trans_table(kiss_thin_nfa_state_t *target_state, kiss_thin_nfa_state_t *source_state); + + +// Get the transition table of source_state and write it in target_state's. +// source_state is somewhere in the fail state chain of target_state. +static void +get_full_trans_table(kiss_thin_nfa_state_t *target_state, kiss_thin_nfa_state_t *source_state) +{ + kiss_thin_nfa_state_t *child; + kiss_bnfa_state_t *target_bnfa = comp_to_bnfa_state(target_state, TRUE); + + if (source_state != target_state && (source_state->flags & THIN_NFA_STATE_FULL)) { + // We've reached a full state - just copy its transition table (build it first, if needed) + build_full_trans_table(source_state); + bcopy(comp_to_bnfa_state(source_state, TRUE)->full.transitions, + target_bnfa->full.transitions, + sizeof(target_bnfa->full.transitions)); + return; + } + + // Start with our fail state's state table + if (source_state->fail_state) { + get_full_trans_table(target_state, source_state->fail_state); + } else { + int i; + kiss_bnfa_short_offset_t root_bnfa_comp_offset = state_bnfa_short_offset(source_state); + + // Reached the root - fill with transitions to root + for (i=0; ifull.transitions[i] = root_bnfa_comp_offset; + } + } + + // Override transitions which exist in this state + for (child = source_state->child; child != NULL; child = child->sibling) { + add_full_transition(target_state->comp, &target_bnfa->full, child); + } +} + + +// Recursive function for building a full state's state table. +// target_bnfa_state is the state who's table we're building. +// source_state changes when recursing over the tail state chain +static void +build_full_trans_table(kiss_thin_nfa_state_t *comp_state) +{ + if (comp_state->flags & THIN_NFA_STATE_BUILT_TABLE) return; + + get_full_trans_table(comp_state, comp_state); + + ENUM_SET_FLAG(comp_state->flags, THIN_NFA_STATE_BUILT_TABLE); +} + +#endif // KERNEL + +static CP_INLINE kiss_ret_val +verify_add_state(kiss_thin_nfa_state_t *comp_state, kiss_bnfa_state_t *bnfa_state, u_int state_size, + const char *caller, const char *type) +{ + const KissThinNFA *nfa_h = comp_state->comp->runtime_nfa.get(); + kiss_bnfa_offset_t bnfa_offset = (char *)bnfa_state - (char *)(nfa_h->bnfa); + u_int state_alignment = (bnfa_offset < 0) ? sizeof(kiss_bnfa_full_state_t) : KISS_BNFA_STATE_ALIGNMENT; + + if ((bnfa_offset < nfa_h->min_bnfa_offset) || (bnfa_offset+(int)state_size > nfa_h->max_bnfa_offset)) { + thinnfa_debug_err(("%s: Cannot add the %s state %s at the offset %d:%d - out of range %d:%d\n", caller, type, + state_name(comp_state), + bnfa_offset, bnfa_offset+state_size, + nfa_h->min_bnfa_offset, nfa_h->max_bnfa_offset)); + return KISS_ERROR; + } + + if ((bnfa_offset % state_alignment) != 0) { + thinnfa_debug_err(( + "%s: Cannot add the %s state %s at the offset %d:%d - not aligned on %d bytes\n", + caller, + type, + state_name(comp_state), + bnfa_offset, + bnfa_offset + state_size, + state_alignment + )); + return KISS_ERROR; + } + + thinnfa_debug(("%s: Adding the %s state %s, offsets %d:%d\n", caller, type, + state_name(comp_state), + bnfa_offset, bnfa_offset+state_size)); + + return KISS_OK; +} + + +// Old, non-recursive and slow version on build_full_trans_table. +static void +build_full_trans_table_no_recursion(kiss_thin_nfa_state_t *comp_state) +{ + static const char rname[] = "build_full_trans_table_no_recursion"; + struct thin_nfa_comp_s *nfa_comp = comp_state->comp; + kiss_bnfa_state_t *bnfa_state = comp_to_bnfa_state(comp_state, TRUE); + kiss_thin_nfa_state_t *child; + u_int i; + + // Go over all characters. Maintain a pointer to the next transition in the list. + // We rely on the list being sorted. + // We could simply call kiss_thin_nfa_calc_transition for each character. But it would look up again and + // again in the current state. + child = comp_state->child; + for (i = 0; i < KISS_PM_ALPHABET_SIZE; i++) { + u_char ch = (u_char)i; + kiss_thin_nfa_state_t *next_state; + + // Check if it's a canonic character (e.g. lowercase when we're case insensitive) + if (kiss_thin_nfa_xlate_char(nfa_comp, ch) != ch) { + // We'll fill this in when we reach the canonic character. + continue; + } + + if (child != NULL && child->tran_char == ch) { + // Use the explicit transition + next_state = child; + + // Go forward in the transition table + child = child->sibling; + + thinnfa_debug_extended(("%s: Setting the explicit transition by %02x to %s\n", rname, + ch, state_name(next_state))); + } else { + // Note: if comp_state is the initial, we pass from_state=NULL. + // This works as desired (returning the initial state). + next_state = kiss_thin_nfa_calc_transition(nfa_comp, comp_state->fail_state, ch); + + thinnfa_debug_extended(("%s: Setting the fail-state transition by %02x to %s\n", rname, + ch, state_name(next_state))); + } + + // Set the transition for this character and equivalents + bnfa_state->full.transitions[ch] = state_bnfa_short_offset(next_state); + add_equivalent_transitions(nfa_comp, &bnfa_state->full, ch); + } + ENUM_SET_FLAG(comp_state->flags, THIN_NFA_STATE_BUILT_TABLE); +} + + +// Build a full state's transition table in the BNFA. +// Either uses the explicit transition, or calculates using fail states. +static kiss_ret_val +kiss_bnfa_build_full_state(kiss_thin_nfa_state_t *comp_state) +{ + static const char rname[] = "kiss_bnfa_build_full_state"; + kiss_bnfa_state_t *bnfa_state = comp_to_bnfa_state(comp_state, TRUE); + + if (verify_add_state(comp_state, bnfa_state, sizeof(kiss_bnfa_full_state_t), rname, "full") != KISS_OK) { + return KISS_ERROR; + } + +#if !defined(KERNEL) + if (comp_state->comp->flags & THIN_NFA_USE_RECURSIVE_COMPILE) { + build_full_trans_table(comp_state); + return KISS_OK; + } +#endif // KERNEL + + build_full_trans_table_no_recursion(comp_state); + + return KISS_OK; +} + + +static void +kiss_bnfa_build_partial_state_header( + kiss_bnfa_partial_state_t *bnfa_state, + u_int trans_num, + kiss_bnfa_offset_t fail_offset +) +{ + bnfa_state->type = KISS_BNFA_STATE_PARTIAL; + bnfa_state->trans_num = trans_num; + bnfa_state->fail_state_offset = kiss_bnfa_offset_compress(fail_offset); +} + + +// Build a partial state's transition table in the BNFA. +// Temporary encoding - sets the state ID instead of the BNFA offset (which is yet unknown). +static kiss_ret_val +kiss_bnfa_build_partial_state(kiss_thin_nfa_state_t *comp_state) +{ + static const char rname[] = "kiss_bnfa_build_partial_state"; + kiss_bnfa_state_t *bnfa_state = comp_to_bnfa_state(comp_state, TRUE); + kiss_thin_nfa_state_t *child; + u_int trans_num; + + if (verify_add_state( + comp_state, + bnfa_state, + kiss_bnfa_partial_state_size(comp_state->num_trans), + rname, + "partial" + ) != KISS_OK) { + return KISS_ERROR; + } + + // Fill in the transition number and fail state + kiss_bnfa_build_partial_state_header(&bnfa_state->partial, comp_state->num_trans, + state_bnfa_offset(comp_state->fail_state, TRUE)); + thinnfa_debug_extended(("%s: The fail state is %s\n", rname, state_name(comp_state->fail_state))); + + // Build a transition for each existing character + trans_num = 0; + for (child = comp_state->child; child != NULL; child = child->sibling) { + thinnfa_debug_extended(("%s: Setting the transition by %02x to %s\n", rname, + child->tran_char, state_name(child))); + bnfa_state->partial.transitions[trans_num].tran_char = child->tran_char; + bnfa_state->partial.transitions[trans_num].next_state_offset = + kiss_bnfa_offset_compress(state_bnfa_offset(child, FALSE)); + trans_num++; + } + KISS_ASSERT(trans_num == comp_state->num_trans, "%s: State %s should have %d transitions, but it has %d", + rname, state_name(comp_state), comp_state->num_trans, trans_num); + ENUM_SET_FLAG(comp_state->flags, THIN_NFA_STATE_BUILT_TABLE); + + return KISS_OK; +} + + +// Build a match state. +static kiss_ret_val +kiss_bnfa_build_match_state(kiss_thin_nfa_state_t *comp_state, u_int match_id) +{ + static const char rname[] = "kiss_bnfa_build_match_state"; + kiss_bnfa_offset_t match_bnfa_offset = state_bnfa_offset(comp_state, FALSE); + kiss_bnfa_state_t *match_state = comp_bnfa_offset_to_state(comp_state->comp, match_bnfa_offset); + kiss_bnfa_offset_t following_state_offset, real_state_offset; + + if (verify_add_state(comp_state, match_state, sizeof(kiss_bnfa_match_state_t), rname, "match") != KISS_OK) { + return KISS_ERROR; + } + + // Fill in the match state + match_state->match.type = KISS_BNFA_STATE_MATCH; + match_state->match.unused = 0; + match_state->match.match_id = match_id; + + // Add a jump state if the real state isn't directly following the match state (i.e. for full-matching states). + real_state_offset = state_bnfa_offset(comp_state, TRUE); + following_state_offset = match_bnfa_offset + sizeof(kiss_bnfa_match_state_t); + if (following_state_offset != real_state_offset) { + kiss_bnfa_state_t *jump_state = comp_bnfa_offset_to_state(comp_state->comp, following_state_offset); + + // Add a jump state (a 0-transition partial state) to the real state + if (verify_add_state(comp_state, jump_state, kiss_bnfa_partial_state_size(0), rname, "jump") != KISS_OK) { + return KISS_ERROR; + } + kiss_bnfa_build_partial_state_header(&jump_state->partial, 0, real_state_offset); + } + return KISS_OK; +} + + +// Encode a state in binary NFA form. +static kiss_ret_val +kiss_bnfa_add_state(kiss_thin_nfa_state_t *comp_state, u_int offset_in_pat_match_array) +{ + if (comp_state->flags & THIN_NFA_STATE_MATCH) { + // Build a match state (a jump state too if needed) + if (kiss_bnfa_build_match_state(comp_state, offset_in_pat_match_array) != KISS_OK) return KISS_ERROR; + } + + // Add the state + if (comp_state->flags & THIN_NFA_STATE_FULL) { + if (kiss_bnfa_build_full_state(comp_state) != KISS_OK) return KISS_ERROR; + } else { + if (kiss_bnfa_build_partial_state(comp_state) != KISS_OK) return KISS_ERROR; + } + + return KISS_OK; +} + +static uintptr_t +pat_key_hash_func(const void *key, CP_MAYBE_UNUSED void *info) +{ + const kiss_thin_nfa_pattern_array_t *pat_arr = (const kiss_thin_nfa_pattern_array_t *)key; + const char* buf = (const char *)key; + const char *buf_end; + uintptr_t val = 0; + + buf_end = buf + kiss_thin_nfa_pattern_array_size(pat_arr->n_patterns); + + for ( ; buf != buf_end; buf++) { + val = ((val >> 3) ^ (val<<5)) + *buf; + } + return val; +} + +static int +pat_key_cmp_func(const void *key1, const void *key2, CP_MAYBE_UNUSED void *info) +{ + const kiss_thin_nfa_pattern_array_t *pat1 = (const kiss_thin_nfa_pattern_array_t *)key1; + const kiss_thin_nfa_pattern_array_t *pat2 = (const kiss_thin_nfa_pattern_array_t *)key2; + + if (pat1->n_patterns != pat2->n_patterns) { + return 1; // No match + } + + return memcmp(pat1, pat2, kiss_thin_nfa_pattern_array_size(pat1->n_patterns)); +} + +static u_int +pattern_list_len(const kiss_thin_nfa_pattern_list_t *pat_list) +{ + const kiss_thin_nfa_pattern_list_t *pat; + u_int n = 0; + for (pat = pat_list; pat != NULL; pat = pat->next) { + n++; + } + return n; +} + +static kiss_ret_val +kiss_bnfa_match_patterns_prepare(struct thin_nfa_comp_s *nfa_comp, KissThinNFA *nfa) +{ + static const char rname[] = "kiss_bnfa_match_patterns_prepare"; + kiss_thin_nfa_pattern_array_t *pat_arr; + kiss_thin_nfa_state_t *comp_state; + u_int total_size_for_patterns; + + total_size_for_patterns = 0; + for (comp_state = nfa_comp->root_state; + comp_state != NULL; + comp_state = kiss_thin_nfa_get_subsequent_state(nfa_comp, comp_state)) { + if (comp_state->flags & THIN_NFA_STATE_MATCH) { + if (!comp_state->ids) { + thinnfa_debug_critical(( + "%s: State %s is finite, but its IDs are null\n", + rname, + state_name(comp_state) + )); + kiss_thin_nfa_set_comp_error(nfa_comp, "The state is finite, but its IDs are null"); + return KISS_ERROR; + } + total_size_for_patterns += kiss_thin_nfa_pattern_array_size(pattern_list_len(comp_state->ids)); + } + } + + if (total_size_for_patterns == 0) { + thinnfa_debug_critical(("%s: no finite states?!\n", rname)); + kiss_thin_nfa_set_comp_error(nfa_comp, "no finite states?!"); + return KISS_ERROR; + } + + // We allocate according to maximum possible size. + // We might reduce it at the end, if duplicates exist. + thinnfa_debug(("%s: alocating %u bytes for a pattern array\n", rname, total_size_for_patterns)); + pat_arr = (kiss_thin_nfa_pattern_array_t *)kiss_pmglob_memory_kmalloc_ex( + total_size_for_patterns, + rname, + FW_KMEM_SLEEP + ); + if (!pat_arr) { + thinnfa_debug_critical(( + "%s: failed to allocate %d bytes for a complete pattern array\n", + rname, + total_size_for_patterns + )); + kiss_thin_nfa_set_comp_error(nfa_comp, "Failed to allocate memory for a complete pattern array"); + return KISS_ERROR; + } + + nfa->pattern_arrays = pat_arr; + nfa->pattern_arrays_size = total_size_for_patterns; + + nfa_comp->patterns_hash = kiss_hash_create_with_ksleep( + nfa->match_state_num, + pat_key_hash_func, + pat_key_cmp_func, + NULL + ); + if (!nfa_comp->patterns_hash) { + thinnfa_debug(( + "%s: failed to create patterns hash table for %u finite states\n", + rname, + nfa->match_state_num + )); + kiss_thin_nfa_set_comp_error(nfa_comp, "Failed to create patterns hash table for finite states"); + return KISS_ERROR; + } + + return KISS_OK; +} + +static kiss_ret_val +kiss_bnfa_match_patterns_finalize(struct thin_nfa_comp_s *nfa_comp, KissThinNFA *nfa, u_int new_size) +{ + static const char rname[] = "kiss_bnfa_match_patterns_finalize"; + kiss_thin_nfa_pattern_array_t *new_pat_arr; + + // Compact the match patter array, if needed + if (new_size == nfa->pattern_arrays_size) { + thinnfa_debug(("%s: no size change - the pattern array size is %u bytes\n", rname, new_size)); + return KISS_OK; + } + + if (new_size > nfa->pattern_arrays_size) { + thinnfa_debug_critical(( + "%s: new pattern array size (%u) is greater than the current size (%u). This should not happen.\n", + rname, + new_size, + nfa->pattern_arrays_size + )); + kiss_thin_nfa_set_comp_error(nfa_comp, "Failed to allocate a complete pattern array"); + return KISS_ERROR; + } + + new_pat_arr = (kiss_thin_nfa_pattern_array_t *)kiss_pmglob_memory_kmalloc_ex(new_size, rname, FW_KMEM_SLEEP); + if (!new_pat_arr) { + thinnfa_debug_critical(("%s: failed to allocate %d bytes for a complete pattern array\n", rname, new_size)); + kiss_thin_nfa_set_comp_error(nfa_comp, "Failed to allocate a complete pattern array"); + return KISS_ERROR; + } + + thinnfa_debug(("%s: reducing the size from %u to %u\n", rname, nfa->pattern_arrays_size, new_size)); + bcopy(nfa->pattern_arrays, new_pat_arr, new_size); + kiss_pmglob_memory_kfree(nfa->pattern_arrays, nfa->pattern_arrays_size, rname); + nfa->pattern_arrays = new_pat_arr; + nfa->pattern_arrays_size = new_size; + return KISS_OK; +} + + +static kiss_ret_val +kiss_bnfa_copy_pat_list( + const KissThinNFA *nfa, + kiss_hash_t patterns_hash, + kiss_thin_nfa_state_t *comp_state, + u_int *last_used_offset_in_pat_match_array, + u_int *offset_for_cur_state) +{ + static const char rname[] = "kiss_bnfa_copy_pat_list"; + + if (comp_state->flags & THIN_NFA_STATE_MATCH) { + kiss_thin_nfa_pattern_list_t *pat_list_ent; + kiss_thin_nfa_pattern_array_t *pat_arr; + kiss_thin_nfa_pattern_array_t *cached_pat_arr; + u_int pat_arr_size; + u_int n_patterns; + u_int i; + + n_patterns = pattern_list_len(comp_state->ids); + pat_arr_size = kiss_thin_nfa_pattern_array_size(n_patterns); + + if ((*last_used_offset_in_pat_match_array + pat_arr_size) > nfa->pattern_arrays_size) { + thinnfa_debug_critical(("%s: offset (%u) + required size (%u) exceeds the total array size (%u)\n", + rname, *last_used_offset_in_pat_match_array, pat_arr_size, nfa->pattern_arrays_size)); + return KISS_ERROR; + } + + pat_arr = kiss_thin_nfa_offset_to_pat_array_ptr(nfa, *last_used_offset_in_pat_match_array); + pat_arr->n_patterns = n_patterns; + for (i = 0, pat_list_ent = comp_state->ids; i < pat_arr->n_patterns; pat_list_ent = pat_list_ent->next, i++) { + bcopy(&(pat_list_ent->pattern), &(pat_arr->pattern[i]), sizeof(pat_list_ent->pattern)); + } + + kiss_thin_nfa_free_pattern_ids(comp_state->ids); + comp_state->ids = NULL; // Prevent release when the state is cleaned up + + cached_pat_arr = (kiss_thin_nfa_pattern_array_t *)kiss_hash_lookkey(patterns_hash, pat_arr); + if (cached_pat_arr) { + u_int cached_offset; + cached_offset = kiss_thin_nfa_pat_array_ptr_to_offset(nfa, cached_pat_arr); + // No need to move the last_used_offset + *offset_for_cur_state = cached_offset; + thinnfa_debug(( + "%s: returning cached offset of %u for the state ID %u. " + "%u patterns %u bytes. The offset stays at %u.\n", + rname, + *offset_for_cur_state, + comp_state->state_id, + n_patterns, pat_arr_size, + *last_used_offset_in_pat_match_array + )); + } else { + *offset_for_cur_state = *last_used_offset_in_pat_match_array; + if (!kiss_hash_insert(patterns_hash, pat_arr, NULL)) { + thinnfa_debug(("%s: failed to insert a pattern into a hash (non-critical error)\n", rname)); + } + *last_used_offset_in_pat_match_array += pat_arr_size; + thinnfa_debug(( + "%s: returning the offset of %u for the state ID %u. %u patterns, %u bytes. The offset moved to %u.\n", + rname, + *offset_for_cur_state, + comp_state->state_id, + n_patterns, pat_arr_size, + *last_used_offset_in_pat_match_array + )); + } + } + + return KISS_OK; +} + + +static void +kiss_bnfa_update_state_depth(kiss_thin_nfa_state_t *comp_state) +{ + struct kiss_thin_nfa_depth_map_s *map = &comp_state->comp->runtime_nfa->depth_map; + u_char depth = MIN(comp_state->depth, KISS_THIN_NFA_MAX_ENCODABLE_DEPTH); + kiss_bnfa_offset_t off; + + // Update depth at the state's offset + off = comp_state->bnfa_offset; + map->offset0[kiss_bnfa_offset_compress(off)] = depth; + + // Matching state? Update at the match state offset too. + off = comp_state->bnfa_incoming_off; + if (off == comp_state->bnfa_offset) return; + map->offset0[kiss_bnfa_offset_compress(off)] = depth; + + // Full-matching state? Update at the jump state offset too. + off += sizeof(kiss_bnfa_match_state_t); + if (off == comp_state->bnfa_offset) return; + map->offset0[kiss_bnfa_offset_compress(off)] = depth; +} + + +// Based on thin_nfa_comp_s structure we have built, create a binary Thin NFA. +// Parameter: +// nfa_comp - the NFA's compilation data structure. +// +// Performance notes: +// This function takes most of the CPU time in the compilation process (in my tests, at least). +// Within it, time is divided about equally between full and partial states. +// Full states take about 40 times more time, but there are about 40 times more partial states. +// Overall, compilation time isn't bad, but there are surely optimization options. +// Idea - when constructing a full state, start by bcopy() of its fail state transitions. This would require +// filling the states in BFS order, which isn't done today. +static kiss_ret_val +kiss_bnfa_fill_states(struct thin_nfa_comp_s *nfa_comp) +{ + static const char rname[] = "kiss_bnfa_fill_states"; + KissThinNFA *nfa = nfa_comp->runtime_nfa.get(); + kiss_thin_nfa_state_t *comp_state; + u_int last_used_offset_in_pat_match_array = 0; + + thinnfa_debug(("%s: Filling BNFA %p size %d with %d states\n", rname, + nfa->bnfa_start, nfa->max_bnfa_offset-nfa->min_bnfa_offset, nfa_comp->state_num)); + + if (kiss_bnfa_match_patterns_prepare(nfa_comp, nfa) != KISS_OK) { + return KISS_ERROR; + } + + // Go over the states and build the BNFA representation + for (comp_state = nfa_comp->root_state; + comp_state != NULL; + comp_state = kiss_thin_nfa_get_subsequent_state(nfa_comp, comp_state)) { + u_int state_id = comp_state->state_id; + u_int offset_for_cur_state = (u_int)-1; + + if (kiss_bnfa_copy_pat_list( + nfa, nfa_comp->patterns_hash, comp_state, + &last_used_offset_in_pat_match_array, + &offset_for_cur_state + ) != KISS_OK) { + thinnfa_debug_critical(( + "%s: kiss_bnfa_copy_pat_list() failed for the state %s\n", + rname, + state_name(comp_state) + )); + kiss_thin_nfa_set_comp_error(nfa_comp, "kiss_bnfa_copy_pat_list() failed"); + return KISS_ERROR; + } + + // Update the maximum pattern length (length = state depth) + if (comp_state->depth > nfa->max_pat_len) { + nfa->max_pat_len = comp_state->depth; + } + + // Build the state + if (kiss_bnfa_add_state(comp_state, offset_for_cur_state) != KISS_OK) { + thinnfa_debug_critical(("%s: Failed to add the state %d\n", rname, state_id)); + return KISS_ERROR; + } + + // Update the depth map + kiss_bnfa_update_state_depth(comp_state); + } + + if (kiss_bnfa_match_patterns_finalize(nfa_comp, nfa, last_used_offset_in_pat_match_array) != KISS_OK) { + return KISS_ERROR; + } + + return KISS_OK; +} + + +static void +kiss_thin_nfa_fill_stats(struct thin_nfa_comp_s *nfa_comp) +{ + struct kiss_thin_nfa_specific_stats_s *stats = &nfa_comp->runtime_nfa->stats.specific; + + stats->num_of_states = nfa_comp->state_num; + stats->num_of_final_states = nfa_comp->match_state_num; +} + + +// Get the nfa_comp structure and build, according to it, the runtime Thin NFA structure. +static kiss_ret_val +kiss_thin_nfa_build_bnfa(struct thin_nfa_comp_s *nfa_comp, CP_MAYBE_UNUSED u_int compile_flags) +{ + static const char rname[] = "kiss_thin_nfa_build_bnfa"; + + thinnfa_debug_major(("%s: Converting the compiled Thin NFA to the binary form\n", rname)); + + // Get the list of all BNFA offsets + if (kiss_bnfa_calc_offsets(nfa_comp) != KISS_OK) { + thinnfa_debug_err(("%s: Error allocating the offset list\n", rname)); + kiss_thin_nfa_set_comp_error(nfa_comp, "Failed to allocate the offset list"); + return KISS_ERROR; + } + + // Allocate the runtime Thin NFA structure + nfa_comp->runtime_nfa = kiss_thin_nfa_create( + nfa_comp->match_state_num, + nfa_comp->min_bnfa_off, + nfa_comp->max_bnfa_off + ); + if (!nfa_comp->runtime_nfa) { + thinnfa_debug_err(("%s: Error creating the NFA\n", rname)); + kiss_thin_nfa_set_comp_error(nfa_comp, "Failed to allocate BNFA"); + return KISS_ERROR; + } + + if (nfa_comp->anchored_root_state) { + ENUM_SET_FLAG(nfa_comp->runtime_nfa->flags, KISS_THIN_NFA_HAS_ANCHOR); + } + + // Build the BNFA we'll use on runtime + if (kiss_bnfa_fill_states(nfa_comp) != KISS_OK) { + thinnfa_debug_err(("%s: kiss_bnfa_fill_states() failed\n", rname)); + return KISS_ERROR; + } + + // Copy the character translation table + if (nfa_comp->xlation_tab) { + bcopy( + nfa_comp->xlation_tab->tab, + nfa_comp->runtime_nfa->xlation_tab, + sizeof(nfa_comp->runtime_nfa->xlation_tab) + ); + ENUM_SET_FLAG(nfa_comp->runtime_nfa->flags, KISS_THIN_NFA_USE_CHAR_XLATION); + } + + kiss_thin_nfa_fill_stats(nfa_comp); + + thinnfa_debug_major(("%s: Created the binary Thin NFA %p\n", rname, nfa_comp->runtime_nfa.get())); + return KISS_OK; +} + +static void +kiss_thin_nfa_select_options(struct thin_nfa_comp_s *nfa_comp, CP_MAYBE_UNUSED u_int compile_flags) +{ + ENUM_SET_FLAG(nfa_comp->flags, THIN_NFA_ENABLE_ANCHOR_OPT); + nfa_comp->full_state_tier_num = kiss_thin_nfa_full_tiers_num; + ENUM_SET_FLAG(nfa_comp->flags, THIN_NFA_USE_RECURSIVE_COMPILE); + return; +} + + +// Compiling the SM according to Aho-Corasick algorithm. +// +// The DFA has two types of states: +// 1. Full states - have a transition for each possible character. +// 2. Partial states - only have transitions for characters that take us forward in some string. +// For all other characters, a "fail state" is defined, and the transition is what that state would have done. +// +// Paraemters: +// patterns - a set of string patterns which the resulting automaton would search for. +// compile_flags - flags with the KISS_PM_COMP_ prefix. +// error - output - on failure, would be set to indicate the reason. +// Retuns NULL on error, pointer to a newly allocated handle on success. +std::unique_ptr +kiss_thin_nfa_compile(const std::list &patterns, u_int compile_flags, KissPMError *error) +{ + static const char rname[] = "kiss_thin_nfa_compile"; + struct thin_nfa_comp_s *nfa_comp = NULL; + std::unique_ptr nfa; + + thinnfa_debug_major(("%s: Compiling a Thin NFA, flags=%x\n", rname, compile_flags)); + + // Creates a new kiss_thin_dfa_handle with initial state allocated + nfa_comp = kiss_thin_nfa_comp_create(error); + if (nfa_comp == NULL) { + thinnfa_debug_err(("%s: Failed to create a compile time structure\n", rname)); + kiss_pm_error_set_details(error, KISS_PM_ERROR_INTERNAL, "Failed to allocate the compilation information"); + goto finish; + } + + // Enable some optimization flags as needed + kiss_thin_nfa_select_options(nfa_comp, compile_flags); + + // Handle character translation - instead of converting to lowercase, build a translation + // tabel and use it when adding patterns to the trie and building transition tables. + if (kiss_thin_nfa_create_xlation_tab(nfa_comp, compile_flags) != KISS_OK) { + thinnfa_debug_err(("%s: Function kiss_thin_nfa_create_xlation_tab() failed\n", rname)); + goto finish; + } + + // Build a trie which contains all the pattern texts. + for (auto &pattern : patterns) { + // Adding each pattern to the the Thin NFA - Aho-Corasick first phase + if (kiss_thin_nfa_add_pattern_to_trie(nfa_comp, &pattern) != KISS_OK) { + thinnfa_debug_err(("%s: Function kiss_thin_nfa_add_pattern_to_trie() failed\n", rname)); + goto finish; + } + } + + // Calculate fail states for all NFA states + if (kiss_thin_nfa_calc_fail_states(nfa_comp) != KISS_OK) { + thinnfa_debug_err(("%s: Function kiss_thin_nfa_calc_fail_states() failed\n", rname)); + goto finish; + } + + // Convert the compilation data structure to the runtime structure + if (kiss_thin_nfa_build_bnfa(nfa_comp, compile_flags) != KISS_OK) { + thinnfa_debug_err(("%s: Function kiss_thin_nfa_build_bnfa() failed\n", rname)); + goto finish; + } + + if (!kiss_thin_nfa_is_valid(nfa_comp->runtime_nfa.get())) { + thinnfa_debug_err(("%s: Function kiss_thin_nfa_is_valid() failed\n", rname)); + goto finish; + } + + // Get the resulting NFA (set NULL to protect from free) + nfa = std::move(nfa_comp->runtime_nfa); + thinnfa_debug_major(("%s: Successfully compiled the Thin NFA %p\n", rname, nfa.get())); + +finish: + if (nfa_comp != NULL) { + // We destroy the compilation data structure, whether we succeed or fail. + kiss_thin_nfa_comp_destroy(nfa_comp); + } + return nfa; +} +SASAL_END diff --git a/components/utils/pm/kiss_thin_nfa_impl.h b/components/utils/pm/kiss_thin_nfa_impl.h new file mode 100644 index 0000000..81e0ea4 --- /dev/null +++ b/components/utils/pm/kiss_thin_nfa_impl.h @@ -0,0 +1,189 @@ +// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef __h_kiss_thin_nfa_impl_h__ +#define __h_kiss_thin_nfa_impl_h__ + +// *********************** OVERVIEW ****************************** +// Thin NFA definitions, which are only used by Thin NFA files. +// 1. A list of patterns which is associated with a finite state. +// 2. APIs for building and destroying the Thin NFA structures. +// **************************************************************** + +#include +#include +#include + +#include "i_pm_scan.h" +#include "kiss_patterns.h" +#include "kiss_pm_stats.h" +#include "kiss_thin_nfa_base.h" + +KISS_ASSERT_COMPILE_TIME(KISS_PM_ALPHABET_SIZE == KISS_THIN_NFA_ALPHABET_SIZE); + +// Information we keep about a pattern +typedef struct { + int id; // PM Internal pattern ID + u_int pattern_id_flags; // KISS_PM_COMP_ prefix + u_int len; +} kiss_thin_nfa_pattern_t; + +// Linked list of pattern information - held per finite state, to indicate what it's accepting. +typedef struct kiss_thin_nfa_pattern_list_s { + struct kiss_thin_nfa_pattern_list_s *next; + kiss_thin_nfa_pattern_t pattern; +} kiss_thin_nfa_pattern_list_t; + +// Array of pattern information - offset to it held per finite state, to indicate what it's accepting. +typedef struct kiss_thin_nfa_pattern_array_s { + u_int n_patterns; + // NOTE! Always keep this last! + kiss_thin_nfa_pattern_t pattern[1]; // Dynamic array, not really 1 + // Do NOT add anything here! +} kiss_thin_nfa_pattern_array_t; + +static CP_INLINE u_int +kiss_thin_nfa_pattern_array_size(const u_int n_patterns) +{ + // assignement of NULL value so Windows compiler won't cry about unused variable. + kiss_thin_nfa_pattern_array_t CP_MAYBE_UNUSED *dummy = NULL; + + // We substract sizeof(->pattern), becuase it's already included in the sizeof + // of the whole struct. + return (sizeof(*dummy) + n_patterns * sizeof(dummy->pattern[0]) - sizeof(dummy->pattern));; +} + +// ThinNFA statistics + +// Specific ThinNFA Statistics +struct kiss_thin_nfa_specific_stats_s { + u_int num_of_states; // number of states in this thin_nfa + u_int num_of_final_states; // number of final states in this thin_nfa +}; + +// Statistics for ThinNFA +struct kiss_thin_nfa_stats_s { + struct kiss_pm_stats_common_s common; // Run-time (per-CPU, dynamic) and build-time common statistics + struct kiss_thin_nfa_specific_stats_s specific; // Build-time specific ThinNFA statistics +}; +typedef struct kiss_thin_nfa_stats_s *kiss_thin_nfa_stats; + +// Compressed BNFA offset -> state depth map +struct kiss_thin_nfa_depth_map_s { + u_char *mem_start; // Array of depth per BNFA compressed offset + u_int size; + u_char *offset0; // Positive/negative offsets are relative to this +}; + +#define KISS_THIN_NFA_MAX_ENCODABLE_DEPTH 255 // Fit in u_char + +// A Compiled Thin NFA, used at runtime +class KissThinNFA { +public: + ~KissThinNFA(); + + kiss_bnfa_state_t *bnfa_start; // The first (in memory) and initial state + kiss_bnfa_state_t *bnfa; // The state at offset 0 (somewhere in the middle) + kiss_bnfa_offset_t min_bnfa_offset; // The offset of the first (and initial) state. + kiss_bnfa_offset_t max_bnfa_offset; // The offset after the last state. + enum kiss_thin_nfa_flags_e flags; + u_int match_state_num; // Number of match states in the machine + u_int pattern_arrays_size; // Total size in bytes of concatanated pattern arrays + kiss_thin_nfa_pattern_array_t *pattern_arrays; // A pointer to a buffer holding ALL pattern arrays, for ALL states + struct kiss_thin_nfa_stats_s stats; + u_int max_pat_len; // Length of the longest string + u_char xlation_tab[KISS_PM_ALPHABET_SIZE]; // For caseless/digitless + struct kiss_thin_nfa_depth_map_s depth_map; // State -> Depth mapping +}; + +static CP_INLINE u_int +kiss_thin_nfa_pat_array_ptr_to_offset(const KissThinNFA *nfa, const kiss_thin_nfa_pattern_array_t *pat_arr) +{ + return (const char *)pat_arr - (const char *)(nfa->pattern_arrays); +} + +static CP_INLINE kiss_thin_nfa_pattern_array_t * +kiss_thin_nfa_offset_to_pat_array_ptr(const KissThinNFA *nfa, const u_int offset) +{ + return (kiss_thin_nfa_pattern_array_t *)((char *)(nfa->pattern_arrays) + offset); +} + +// Get a state's depth +// For very deep states (offset >= 255), returns the maximum pattern length, +// which would be greater/equal the real state depth. +static CP_INLINE u_int +kiss_bnfa_offset_to_depth(const KissThinNFA *nfa, kiss_bnfa_comp_offset_t comp_offset) +{ + u_int depth = nfa->depth_map.offset0[comp_offset]; + return (depth==KISS_THIN_NFA_MAX_ENCODABLE_DEPTH) ? nfa->max_pat_len : depth; +} + + +// Create a new empty Thin NFA. +// Allocates the BNFA and the match_data array, but doesn't fill them. +std::unique_ptr +kiss_thin_nfa_create( + u_int match_state_num, + kiss_bnfa_offset_t min_offset, + kiss_bnfa_offset_t max_offset +); + + +// Add a pattern (with given id, flags and length) to a list. +// pat_list should point to the head of the list, *pat_list may be modified. +kiss_ret_val +kiss_thin_nfa_add_pattern_id( + kiss_thin_nfa_pattern_list_t **pat_list, + const kiss_thin_nfa_pattern_t *pat_info +); + +// Free all patterns on a list. +void kiss_thin_nfa_free_pattern_ids(kiss_thin_nfa_pattern_list_t *pat_list); + +// Compile a Thin NFA +std::unique_ptr +kiss_thin_nfa_compile( + const std::list &patterns, + u_int compile_flags, + KissPMError *error +); + + +// Validate Thin NFA +BOOL kiss_thin_nfa_is_valid(const KissThinNFA *nfa_h); + +void +kiss_thin_nfa_exec(KissThinNFA *nfa_h, const Buffer &buffer, std::vector> &matches); + +// Dump a PM +kiss_ret_val kiss_thin_nfa_dump(const KissThinNFA *nfa_h, enum kiss_pm_dump_format_e format); + +// Debugging macro wrappers. +// All get a format string plus parameters in double parenthesis: +// thinnfa_debug(("%s: hello, world\n", rname)); +// Meaning of each macro: +// thinnfa_debug_critical - Critical error, printed by default. +// thinnfa_debug_err - Error we should live with (e.g. usage error, memory allocation), not printed by default. +// thinnfa_debug - Normal debug messages. +// thinnfa_debug_major - Debug messages about several major events in Thin NFA constuction. Use sparingly. +// thinnfa_debug_extended - Low level debug messages, which may be printed in large numbers. +// thinnfa_dbg - An "if" statement checking the debug flag (equivalent to thinnfa_debug). +#define thinnfa_debug_critical(_str) kiss_debug_err(K_ERROR, _str) +#define thinnfa_debug_err(_str) kiss_debug_err(K_THINNFA|K_PM, _str) +#define thinnfa_debug(_str) kiss_debug_info(K_THINNFA, _str) +#define thinnfa_debug_major(_str) kiss_debug_info(K_THINNFA|K_PM, _str) +#define thinnfa_debug_extended(_str) kiss_debug_info(K_THINNFA, _str) +#define thinnfa_debug_perf(_str) kiss_debug_info_perf(K_THINNFA, _str) +#define thinnfa_dbg() kiss_dbg(K_THINNFA) + +#endif // __h_kiss_thin_nfa_impl_h__ diff --git a/components/utils/pm/lss_example.txt b/components/utils/pm/lss_example.txt new file mode 100644 index 0000000..28722c0 --- /dev/null +++ b/components/utils/pm/lss_example.txt @@ -0,0 +1,13760 @@ +# {\rt +^7b5c7274 +# RIFF +^52494646 +# /JBIG2Decode +2f4a424947324465636f6465 +# %PDF +^25504446 +# \xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1 +^d0cf11e0a1b11ae1 +# %pdf- +257064662d +# 0&\xb2u\x8ef\xcf\x11\xa6\xd9\x00\xaa\x00b\xcel +^3026b2758e66cf11a6d900aa0062ce6c +# strlstrh +7374726c73747268 +# RIFF +^52494646 +# BM +^424d +# BM +^424d +# \xffWPC +^ff575043 +# C$/Documents +43242f446f63756d656e7473 +# \x0039vmpami\x18\x00\x00\x00\x01\x00\x00\x00,\x00\x00\x00 +003339766d70616d6918000000010000002c000000 +# \x06rcl\x03l\x02\x0c\x95 +0672636c036c020c95 +# c\x01wna\x9cr\x00l\x00c\x01g\xc1r\x01 +6301776e619c72006c00630167c17201 +# 39vm +3339766d +# language.eng +6c616e67756167652e656e67 +# .pdf +2e706466 +# PK\x03\x04 +504b0304 +# send_one() +73656e645f6f6e652829 +# m_obj=new activexobject( +6d5f6f626a3d6e657720616374697665786f626a65637428 +# if (m_obj==null) +696620286d5f6f626a3d3d6e756c6c29 +# onstart="document. +6f6e73746172743d22646f63756d656e742e +# %PDF- +255044462d +# history.go( +686973746f72792e676f28 +# 2-2D05CB +322d324430354342 +# EMF +20454d46 +# document.all(i).setcapture() +646f63756d656e742e616c6c2869292e736574636170747572652829 +# avi +61766920 +# LIST\xa0\x84\x01\x00movi00dc\xe3\x02\x00\x00\xd0\xd8 +4c495354a08401006d6f766930306463e3020000d0d8 +# LIST\x90\x10\x00\x00strlstrh8\x00\x00\x00vidsMJPG +4c495354901000007374726c7374726838000000766964734d4a5047 +# RIFF +^52494646 +# -moz-column- +2d6d6f7a2d636f6c756d6e2d +# -moz-column- +2d6d6f7a2d636f6c756d6e2d +# -moz-column- +2d6d6f7a2d636f6c756d6e2d +# LsCM +4c73434d +# XFIR +^58464952 +# \x0aH\x89\xecU{LSi\x16 +0a4889ec557b4c536916 +# B-5DCCC7 +422d354443434337 +# 7-DB92E7 +372d444239324537 +# F-51EBEA +462d353145424541 +# 2-5DE3D1 +322d354445334431 +# var woo=0.4444444444444444444444444444444444444444444444444444444444444444444 +76617220776f6f3d302e34343434343434343434343434343434343434343434343434343434343434343434343434343434343434343434343434343434343434343434343434343434343434 +# var pi = 3.1415926535897932384626433832795028841971693993751058209749445923078164 +766172207069203d20332e31343135393236353335383937393332333834363236343333383332373935303238383431393731363933393933373531303538323039373439343435393233303738313634 +# MM\x00* +^4d4d002a +# II*\x00 +^49492a00 +# 6F063-00 +36463036332d3030 +# 8-006097 +382d303036303937 +# 8-006097 +382d303036303937 +# C-312CA8 +432d333132434138 +# catch (e) {} ",701 +636174636820286529207b7d20222c373031 +# setInterval("try +736574496e74657276616c2822747279 +# setInterval(' +736574496e74657276616c2827 +# xdomainaccess() +78646f6d61696e6163636573732829 +# row.clearAttributes(); +726f772e636c6561724174747269627574657328293b +# tr.clearAttributes(); +74722e636c6561724174747269627574657328293b +# tHeader("Content-Length" +744865616465722822436f6e74656e742d4c656e67746822 +# estHeader("Referer" +65737448656164657228225265666572657222 +# estHeader("Host" +6573744865616465722822486f737422 +# 3-1AA39B +332d314141333942 +# F-750656 +462d373530363536 +# c-ed11e1 +632d656431316531 +# 7-79F01D +372d373946303144 +# flashObject +666c6173684f626a656374 +# flashObject +666c6173684f626a656374 +# jp2c\xffO\xffQ +6a703263ff4fff51 +# /jpxdecode +2f6a70786465636f6465 +# %pdf +^25706466 +# P\x00P\x004\x000\x00\x00\x00\x00\x00 +500050003400300000000000 +# level\x0dFifth level +6c6576656c0d4669667468206c6576656c +# LsCM +4c73434d +# XFIR +^58464952 +# var pi=3+0.14159265 +7661722070693d332b302e3134313539323635 +# "31337" + 0.31337313 +22333133333722202b20302e3331333337333133 +# +3c656d62656420747970653d27617564696f2f6d696469273e +# setTimeout(go, 1); +73657454696d656f757428676f2c2031293b +# asMimeTypes.shift +61734d696d6554797065732e7368696674 +# CollectGarbage +436f6c6c65637447617262616765 +# document.appendChild +646f63756d656e742e617070656e644368696c64 +# %PDF- +255044462d +# window.pkcs11.addmodule( +77696e646f772e706b637331312e6164646d6f64756c6528 +# this.points = f2() +746869732e706f696e7473203d2066322829 +# var aa = a[2].split('-') +766172206161203d20615b325d2e73706c697428272d2729 +# var a = arrelement.split('/') +7661722061203d20617272656c656d656e742e73706c697428272f2729 +# function f1 ( arrelement ) +66756e6374696f6e206631202820617272656c656d656e742029 +# Gs.push ( new G( Cs[i] )) +47732e707573682028206e65772047282043735b695d202929 +# var cs = [ '|h|3s', 'o|h|0,s', 'm|a|,15,','jn|a|' ] +766172206373203d205b20277c687c3373272c20276f7c687c302c73272c20276d7c617c2c31352c272c276a6e7c617c27205d +# this.os = f() +746869732e6f73203d20662829 +# function G ( d ) +66756e6374696f6e2047202820642029 +# 'Quantum|H|0,25,0,0,23 +275175616e74756d7c487c302c32352c302c302c3233 +# 'Pwnstar|H|26,12,20,9 +2750776e737461727c487c32362c31322c32302c39 +# function guild ( initData ) +66756e6374696f6e206775696c64202820696e6974446174612029 +# // Code by: Azka +2f2f20436f64652062793a20417a6b61 +# function set_timers +66756e6374696f6e207365745f74696d657273 +# <RecpA4><item> +2623363052656370413426233632262336306974656d26233632 +# +3c5265637041343e3c6974656d3e +# MM\x00* +^4d4d002a +# II*\x00 +^49492a00 +# \x03\xaf\x1e\xbe}\xeb\x06w\xed\xf5\xc4\xeb=\x8a\xbe\xdf +03af1ebe7deb0677edf5c4eb3d8abedf +# \x9fh\x9b\x0c\xa7.xo$\xf2\xe6\xd4i\x8f\xf3v +9f689b0ca72e786f24f2e6d4698ff376 +# \xfa\xa6a\xe3\xe7e\x83\x1a\xfd\xaf:\x9e7[]\xff +faa661e3e765831afdaf3a9e375b5dff +# PK\x03\x04\x14\x00\x06\x00\x08\x00\x00\x00!\x00\x89^ +504b030414000600080000002100895e +# shell: +7368656c6c3a +# testnode.onreadystatechange = function () +746573746e6f64652e6f6e726561647973746174656368616e6765203d2066756e6374696f6e202829 +# style.quotes +7374796c652e71756f746573 +# .applyElement +2e6170706c79456c656d656e74 +# scen_num\x1e\x01\x00 +7363656e5f6e756d1e0100 +# \x09\x00\x04\x00\x07\x00\x10\x00\x0b\x02\x0c +^09000400070010000b020c +# \xd7\xcd\xc6\x9a\x00\x00 +^d7cdc69a0000 +# reporttest(rep) {\x0d\x0avar +7265706f7274746573742872657029207b0d0a76617220 +# &&\x0d\x0ae.indexof('s +2026260d0a652e696e6465786f66282773 +# \x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a +2e646c6c222c20310d0a656e64207375620d0a3c2f7363726970743e0d0a3c2f626f64793e0d0a3c2f68746d6c3e0d0a +# ()">\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a \x0d\x0a +2e646c6c222c20310d0a656e64207375620d0a3c2f7363726970743e0d0a3c2f626f64793e0d0a3c2f68746d6c3e200d0a +# ()">\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a \x0d\x0a +2e646c6c222c20310d0a656e64207375620d0a3c2f7363726970743e0d0a3c2f626f64793e0d0a3c2f68746d6c3e20202020200d0a +# ()">\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a\x0d\x0a

\x0a +3c70207374796c653d22666c6f61743a6c656674223e3c2f703e0a2020202020202020202020203c61207374796c653d22666c6f61743a6c656674223e3c2f613e +# zoom:3000%;\x0d\x0a } +7a6f6f6d3a33303030253b0d0a207d +# \x1f\xa0\xab\xcd\xff\xff\xff\xff\xff\xff +^1fa0abcdffffffffffff +# \x9c\xcb\xcb\x8d\x13u\xd2\x11\x91X\x00\xc0OyV\xa4 +^9ccbcb8d1375d211915800c04f7956a4 +# \x01\x00\x09\x00\x00\x03\x11\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\xff\xff\xff\xff\x13\x022\x00\x96\x00\x03\x00\x00\x00\x00\x00 +010009000003110000000000050000000000ffffffff130232009600030000000000 +# 5c6a8bcea23c6afc538370b457dea04f4c421164f946a1f7d79d0f97aeff0b07798c39615117c420346b82e99d +356336613862636561323363366166633533383337306234353764656130346634633432313136346639343661316637643739643066393761656666306230373739386333393631353131376334323033343662383265393964 +# 2c5c5c55749918f57e1af047aef8368fdf450f9a5cec7fd7bf209a60ef8b5f82dc7701b0c7d3c267590033520f +326335633563353537343939313866353765316166303437616566383336386664663435306639613563656337666437626632303961363065663862356638326463373730316230633764336332363735393030333335323066 +# \xc4\x10\x89_\x04\x8dV\x0c\x83\xc8\xff\xf0\x0f\xc1\x02H\x85\xc0\x7f\x0a\x8b\x0e\x8b\x11\x8bB\x04V\xff\xd0\x8bL$\x10_^\x89)][Y\xc2\x04\x00\xcc\xcc\xcc\xcc\xcc\xcc\xcch\x0e\x00\x07\x80\xe8\xb6\x01\x00\ +c410895f048d560c83c8fff00fc1024885c07f0a8b0e8b118b420456ffd08b4c24105f5e89295d5b59c20400cccccccccccccc680e000780e8b6010000cccccccccccc8b542404568bf18b068b48f083e8103950087d1585d27e +# \x00\x00\x89\x8d\xb8\xbb\xff\xff\xc7\x85\xb8\xbb\xff\xff8\x18\x06\x10j\x00\x8b\x8d\xb8\xbb\xff\xff\xe8k\x12\x00\x00\xc7E\xfc\x00\x00\x00\x00\x8b\x85\xb8\xbb\xff\xff\xc7\x00\xbci\x02\x10\xc7\x85\xe4\x +0000898db8bbffffc785b8bbffff381806106a008b8db8bbffffe86b120000c745fc000000008b85b8bbffffc700bc690210c785e4bbffff512fa20168fc3f00006a008d8de8bbffff51e834f5000083c40c68040100008d95e4 +# \xec3\x09\xd2\x0e\x0f\x84\x18\x89#\x04<\x0d\xbc\x10\x81\x8c\xde\xe3\xe6\x11\x09=\xaf\xae\xdd\xe9eE\x0f\xcea\xf8\x0fh,,Z\xe0<\xba\x16\x09\xb24D\x15\xe9 Ya1\x97\x83!l2\xf8\xaa$\xb1p_8\xf4\xb8\xbe\xa8Or +ec3309d20e0f84188923043c0dbc10818cdee3e611093dafaedde965450fce61f80f682c2c5ae03cba1609b2344415e9205961319783216c32f8aa24b1705f38f4b8bea84f726fe983be73de77e39e53a46907580e910a314e72 +# \xd6\xeedh\xce\xc3{\xd8-\x8f8y\xda\xe9\xa0\xf8\x05?x\x8c\xe3\x9c\x9c\xf3m\x1a(\xce\xcd9\xb1'\xb1\xd5\x02X-\x0a\x0b\x8b(\x0as\x09\xb4\x87r\xcfy\xb7\xe6\x82F-\xe0\\x13\xe6\x06MX\xd8\x94\x18\xc1"\xc5:@\ +d6ee6468cec37bd82d8f3879dae9a0f8053f788ce39c9cf36d1a28cecd39b127b1d502582d0a0b8b280a7309b48772cf79b7e682462de05c13e6064d58d89418c122c53a4003258006dae95a6038d6c5e83a38dd629ed531b46e +# \x04\xd0fC\x15\xd2\xff\xff\xd3\xd0fD\xa0t\xd7\xd3\xd0fE\x0f\x04\x00\x00\x10\x04\x00\x00\x10\xb1\xff\xff\xd2H\x00\x00\x06\x03\x08\x0a\x0b\xe2\x02\xd00 \x82c\x07 \x82c\x06 \x82c\x05 \x82c\x04 \x82c\x03 +04d0664315d2ffffd3d06644a074d7d3d066450f0400001004000010b1ffffd24800000603080a0be202d03020826307208263062082630520826304208263032082630220800563042400630521826306d1603eb312080000d0 +# f ,2'O!\x02`"f ,4`\x0df\x1cf\x1da\x1e`"f ,4'O!\x02`\x18f\x19`\x08F$\x01\x125\x00\x00`\x18f\x19`\x08F%\x01`\x15\x87\x80\x15\xd6\xd0\xd2B\x00`\x03\x87h\x14]&\xd0f\x14O&\x01]'O'\x00]\x0f`\x10f(\xd0f +66202c32274f2102602266202c34600d661c661d611e602266202c34274f2102601866196008462401123500006018661960084625016015878015d6d0d2420060038768145d26d066144f26015d274f27005d0f60106628d066 +# fa78cdc5b7776e3ffdee01b9e960b61bc3ae2df5e073d7618f13157382531f0c6ff90dcbcdabd66d4b3f01c339 +666137386364633562373737366533666664656530316239653936306236316263336165326466356530373364373631386631333135373338323533316630633666663930646362636461626436366434623366303163333339 +# ca78e3b8b53acbc21c6f40966ee3ae2a52f3cb7b17332c127a4b577109d2ded482366a4d530eaf5ea07d57a23d +636137386533623862353361636263323163366634303936366565336165326135326633636237623137333332633132376134623537373130396432646564343832333636613464353330656166356561303764353761323364 +# x\x82hl\x84-\xac\xbd\x8bU\xf4\xcc\xfe\xfd\xa7\xdfot\xbdt`\xbf\x18\xd9/>\xa57A\xb1\xd3>\xf8\x17\x9cg\xae\xe6\x0a\x85\xfc\x17R\xfe\x0b\xa8\xf0I(\xb6\xe4\x0b\xff\x02\xe7\xb2\xb8\xbc\x0aendstream \x0aend +7882686c842dacbd8b55f4ccfefda7df6f74bd7460bf18d92f3ea53741b1d33ef8179c67aee60a85fc1752fe0ba8f04928b6e40bff02e7b2b8bc0a656e6473747265616d200a656e646f626a200a322030206f626a200a3c3c0a +# E\xa6\xc9\x8d,?\x93\x8c\x91c\xe4\x062@\xb6\x92\xe7\xb2q\xec\xad\x90\x87\x93\xe7rrE\x85vg\x1c\xa3\xa4F\xae&\xaf&}\xe4\x1e\x16\xa7R\x1dTr=\xbb\xce\xc9\xa7\x81\xace\xf17\x91\x8dd\x90\x1c!\xafb\xf1W\x91Q +45a6c98d2c3f938c9163e4063240b692e7b271ecad908793e77272458576671ca3a446ae26af267de41e16a7521d54723dbbcec9a781ac65f137918d64901c21af62f15791517288bc9c94a49bec67fd9df61e969f93f77696ef +# \x94\xcbAr\xc35`,\xe1\x1b\xa4U\x16\xc8\x0f8Ud+\x94\xa3YGm#\xe2W\xe2\x89,$Z[\x13@\x0cTR\xaa1\x94p\x01H\xb0dm\x14\x99X!\x9c\xa2\x1c\xc4\xc2A)\x14\xb8Jg\x12\x1exY9pH\xf9\xe4\xc37q\x7f\xb7gI\xe6:X+\xa2\xf6\x8a\x92v#\xeaa\x99\x02\xaf\x14 +aa807ed2f16cc431d595e3afbb7264b55f5a4ded97be8cdabff9b2fe3b580863b44f718afc3eb84a67121e7859397048f9e4c337717fb76749e63a582ba2f68a927623ea619902af14c2ce1d6f2085c0e542978b88095ee9ccb96b095e8938bf7a7d5d +# \xdfQ\x08\xa57\x12\x13t\xb8/7\xcd,\xd3R\xcd\xcd\x03\xb3\x1c\x94\xdbC\xc0N\x8c\xc7>\x15\xe3\xf6\;\xc4\x8d\x04\x85\xb8\x18\x94\xbc\xdaAu\xc7\x11\x02o`\xff`\xf9I\x80\xf8\x0a\xf2\xbe\xd3\x9c"\xf0]S,\xd5T +df5108a537121374b82f37cd2cd352cdcd03b31c94db43c04e8cc73e15e3f65c3bc48d0485b81894bcda4175c711026f60ff60f94980f80af2bed39c22f05d532cd55417e325a59ca27f541a86d2a723bb6fa55a6600733152e2ab05d2f95f2b849892 +# CWS\x08\xdc\x1f\x00\x00x\xda\x94\x18kw\x1b\xd5q\xb4\xbb\x92\xae\xe5\xbcp\x0c\xca\xc3\x09\xa6P\x12\xb7!\x84\x10\x1a\xe28\x8am9vT\x9cU\x90Mh\x9a\x06w-\xaf\xa4\x0d\x92V]\xad\x83]\xe8\x8b6@\xa0\x0dm)"\xe +43575308dc1f000078da94186b771bd571b4bb92aee5bc700ccac309a65012b72184101ae2388a6d3976549c55904d689a06772dafa40d92565dad835de88b3640a00d6d2922e11128ef67dbd3d29efe897eea39eda79ec3a79e9e9ed3dfe0ceccdd5d +# A@h\xb2\x87\xfel\x91\xc3#|\x9dC\x05k\x0c\x93\xbd2\x9d\xb6\x06C\xb6/I\x86\x89^\xe16x\xf0\xba.\xa8\xc0c\x03'\xd1m\xa5\xa5V\xb0\xb7D +414068b287fe6c91c3237c9d43056b0c93bd329db60643b62f4986895ee13678f0ba2ea8c0630327d16da5a556b0b744 +# \xfd\x98+!u\xcdo6\x85b\xb8^2c\xc1\xb6v=\xcb#+)Z\xf1\xae\x1d\x13\xb6\\xe9\xab\x1d\xa1\x02\x12\x8f\x846\xbbv\xbe\xdfg\x8f\xa4\x95\x7f +fd982b2175cd6f368562b85e3263c1b6763dcb232b295af1ae1d13b65ce9ab1da102128f8436bb76bedf678fa4957f20 +# [\x0f6\xa5@\xb1\xe5\xfa\xb2\xdd\xdf\xe9J\xf5\x85\xdd\xf5h\xc9\xad\xb8\xbe\x1b\xdd\xa0\xc0Ry\x83\xb9\xd9\x8e#w\xa3\x90)\x0a\x9c,\x8d&W\xfe\x9a\xa0\xf5\x1f +5b0f36a540b1e5fab2dddfe94af585ddf568c9adb8be1bdda0c0527983b9d98e2377a390290a9c2c8d2657fe9aa0f51f +# \xf1\xdf\xe5\xd8\xe1\xae\xeb\x07jC\xaa\x90\xe4\x03\xa2fb\xb7\xb7\x87\xec\x03j\x15\xecJ\x15=i\xfa[\x81\xc0\xad1R\x87b%`W\x86\xcb\xe5ob{\xf8\xbdW]S\xee\x9eT\xc3 +f1dfe5d8e1aeeb076a43aa90e403a26662b7b787ec036a15ec4a153d69fa5b81c0ad3152876225605786cbe56f627bf8bd575d53ee9e54c3 +# \x09\x0d\xc5\x89\xa9[\xd7\xbd\x182\x14\x1e:\xb2\xb21\x0a\x8a\x14Vr\x86\xf2\x87x\xe1\xa1\xb43m\x06f\xe0\x91\x19~\x14\xc3YK$\x8e\xec\x0e\xe8A\xda\xfdt\xbe\xf3\x9d\xef\xb2\xfb\xd7\xdf\xbf\xfe\x0e\xa0\x8 +090dc589a95bd7bd1832141e3ab2b2310a8a14567286f28778e1a1b4336d0666e091197e14c3594b248eec0ee841dafd74bef39defb2fbd7dfbffe0ea0827b26dec707063e +# .dat\x0b.H,\xcf\xb35\xe2\xf2 +2e6461740b2e482ccfb335e2f2 +# 9.\xe6i\xcf\xf2\x88\xbe\x04X4e\x92E\xba\x95\xe9dt\x9e[<\x05;!\x85\x12\x90L\xc5F\x8dd\xe9O\xe8G\x82&\xe9\)$>#i\x1d# +392ee669cff288be045834659245ba95e964749e5b3c053b218512904cc5468d64e94fe8478226e95c29243e23691d23 +# mB;M\xdf\x19\x9c\xefQ'h\x12!^\x0c\x05\x94i/P\xa1\xe8\x13[\x09qfX@\x85\xf6"\xb3\xf8?\x01\xd7\x07\x04\x08\xbc\xdb\xcfm\xba\x97\x9b\x95 +6d423b4ddf199cef51276812215e0c0594692f50a1e8135b097166584085f622b3f83f01d7070408bcdbcf6dba979b95 +# \x0b\xb7m\xdc\xc9\xe3.\xee\xd9\xb8\x9f\xc7\x12\x96\x05\xce%>;\xd5\xd8\xd3~\xcbT\xddtY\x09\xc9O\x9e\x8ey\xee\xc7\x15\x13u\x0bQ\xb8Ux\xd6\x0e\xd3\x8e +0bb76ddcc9e32eeed9b89fc7129605ce253e3bd5d8d37ecb54dd745909c94f9e8e79eec71513750b51b85578d60ed38e +# \xc1 /G\xb4\xf3Y\xaa\xb4D\x15\x96)q\x053\x14=M\xbfC\x89\xb6k\x96n\xa0H\xed\xb9\x8eYmO\xc7EN\xa5]\x8a{\xba\xfc\x13PK\x07\x08 +c1202f47b4f359aab444159629710533143d4dbf4389b66b966ea048edb98e596d4fc7454ea55d8a7bbafc13504b0708 +# \x94i/P\xa1\xe8\x13[\x09qfX@\x85\xf6"\xb3\xf8?\x01\xd7\x07\x04\x08\xbc\xdb\xcfm\xba\x97\x9b\x95\x1df\xbeI\xfb\xde\x11\xfc\x16i/\xb1\xaaC~\x07 +94692f50a1e8135b097166584085f622b3f83f01d7070408bcdbcf6dba979b951d66be49fbde11fc16692fb1aa437e07 +# \x93\xd3%\xdf\x90\xdb\x91~J\xf9L\xac\x87\xf8\xe1\x00rb\xbb\xbe\xe5-E\xcdV\xe0\xcb\xd0\xa4\x85}-Kj\x15\x9aZ\xd4h(-p\xbeX\x1aP\x11\x10 +93d325df90db917e4af94cac87f8e1007262bbbee52d45cd56e0cbd0a4857d2d4b6a159a5ad468282d70be581a501110 +# \x1e\x9dV\xa3A\xed\x1a\xce\xdc\xb7M\xd3&hg\x1e\x94\x1f\x8e\x1c\xa1\xdb\xd1N\xbd@\x961\xeb\x0boG6;\xaf\xa8\x851\xad\x08}\x17\xf3\xe0\xf0\xffxC +1e9d56a341ed1acedcb74dd32668671e941f8e1ca1dbd14ebd409631eb0b6f47363bafa88531ad087d17f3e0f0ff7843 +# * \x9d\x16\xc3r\xa1>\xaa\xb6Z<\x07\x0d\x1e\x80\x12\xd3m\xa9\x1a\xd2\xec\xf9\x96\xda\x1f\x0c\xcf\x90+\x0c\xa5\x0c\x87\xab#M^7w\xa5\xa9\xfe\xd9DS\xee +2a209d16c372a13eaab65a3c070d1e8012d36da91ad2ecf996da1f0ccf902b0ca50c87ab234d5e3777a5a9fed94453ee +# \x829\xc5d\xb2g\xf98\xe0\x8a\x90\xe0\xb5&a\x08H\xd8\xack\xc9%\xd3\x94\xda\x0e\x1c\x1e\xef&sj+T\xd6\xac\xd6\x0d>~\xe7\x80\x0c\x97\e\x013 +8239c564b267f938e08a90e0b526610848d8ac6bc925d394da0e1c1eef26736a2b54d6acd60d3e7ee7800c975c650133 +# FWS +^465753 +# 3F8A6C33-E +33463841364333332d45 +# 75-11D1-A3 +37352d313144312d4133 +# 5D08B586-3 +35443038423538362d33 +# D2D588B5-D +44324435383842352d44 +# 00022613-0 +30303032323631332d30 +# 67DCC487-A +36374443433438372d41 +# 466D66FA-9 +34363644363646412d39 +# ECABB0BF-7 +45434142423042462d37 +# B4B3AECB-D +42344233414543422d44 +# E846F0A0-D +45383436463041302d44 +# 85BBD920-4 +38354242443932302d34 +# C7B6C04A-C +43374236433034412d43 +# EEED4C20-7 +45454544344332302d37 +# D99F7670-7 +44393946373637302d37 +# B0516FF0-7 +42303531364646302d37 +# 9478F640-7 +39343738463634302d37 +# 860D28D0-8 +38363044323844302d38 +# 6D36CE10-7 +36443336434531302d37 +# 510A4910-7 +35313041343931302d37 +# 2A6EB050-7 +32413645423035302d37 +# 8EE42293-C +38454534323239332d43 +# 3050F391-9 +33303530463339312d39 +# FBEB8A05-B +46424542384130352d42 +# 7849596A-4 +37383439353936412d34 +# AF604EFE-8 +41463630344546452d38 +# 01E04581-4 +30314530343538312d34 +# 52CA3BCF-3 +35324341334243462d33 +# FD78D554-4 +46443738443535342d34 +# D2923B86-1 +44323932334238362d31 +# 31087270-D +33313038373237302d44 +# 18AB439E-F +31384142343339452d46 +# 083863F1-7 +30383338363346312d37 +# 33D9A762-9 +33334439413736322d39 +# 33D9A760-9 +33334439413736302d39 +# 4EFE2452-1 +34454645323435322d31 +# 33D9A761-9 +33334439413736312d39 +# E0F158E1-C +45304631353845312d43 +# 860BB310-5 +38363042423331302d35 +# 03D9F3F2-B +30334439463346322d42 +# _Marshaled_pUnk +5f4d61727368616c65645f70556e6b +# QuickTime.QuickTime +517569636b54696d652e517569636b54696d65 +# 02BF25D5-8C17 +30324246323544352d38433137 +# click to c +636c69636b20746f2063 +# PICT +50494354 +# +3c626f64793e3c7363726970743e61203d206e657720616374697665786f626a65637428276e6d73612e6d656469616465736372697074696f6e27293b612e6469737076616c7565203d20313b3c2f7363726970743e3c2f626f64793e3c2f68746d6c3e +# \x0d\x0aInstallTrigger.install.call(document," +0d0a3c7363726970743e0d0a496e7374616c6c547269676765722e696e7374616c6c2e63616c6c28646f63756d656e742c22 +# color=expression( +636f6c6f723d65787072657373696f6e28 +# color%3Dexpression%28 +636f6c6f7225334465787072657373696f6e253238 +# .class\xca\xfe\xba\xbe\x00\x00\x003\x00\x9d\x0a\x00$\x00N\x0a\x00#\x00O\x07\x00P\x0a\x00\x03\x00Q\x0a\x00\x03\x00R\x07\x00S\x0a\x00#\x00T\x07\x00U\x0a\x00\x08\x00N\x08\x00V\x0a\x00\x08\x00W\x09\x00X\ +2e636c617373cafebabe00000033009d0a0024004e0a0023004f0700500a000300510a000300520700530a002300540700550a0008004e0800560a0008005709005800590a0008005a08005b0a0008005c0a0006005d0a0006005e0a005f0060070061 +# \x00 \x00\x03\x00\x04\x00\x00\x00\x01\x10\x10\x00\x05\x00\x06\x00\x00\x00\x02\x00\x01\x00\x07\x00\x08\x00\x01\x00\x09\x00\x00\x00j\x00\x05\x00\x06\x00\x00\x00\x10*+\xb5\x00\x01*\x1c\x1d\x15\x04\x19\x +0020000300040000000110100005000600000002000100070008000100090000006a00050006000000102a2bb500012a1c1d15041905b70002b100000002000a0000000e00030000001e0005001f000f0020000b00000034000500000010000c000f00 +# PK\x03\x04\x0a\x00\x00\x08\x00\x00\x03r]C\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x04\x00META-INF/\xfe\xca\x00\x00PK\x03\x04\x0a\x00\x00\x08\x00\x00\x02r]C!L\x9c\x8f\xb1\x00\x00\x00\x +504b03040a000008000003725d43000000000000000000000000090004004d4554412d494e462ffeca0000504b03040a000008000002725d43214c9c8fb1000000b1000000140000004d4554412d494e462f4d414e49464553542e4d464d616e696665 +# ComboList +436f6d626f4c697374 +# ComboList +436f6d626f4c697374 +# VSFlexGrid.VSFlexGridL +5653466c6578477269642e5653466c6578477269644c +# c0a63b86-4b21-11d3-bd95-d426ef2c7949 +63306136336238362d346232312d313164332d626439352d643432366566326337393439 +# \xf4cb+\xe7\x09\xfc\x19p\x17\xcao\x06\x86\xe8e\xaf\xfc\x01\xe6\xd2\x02\xaf\\x13\x1c\x03\xae\x0fZ\xf4G!\xff7\xba78\x85\x95\xf1X\xf0j\x92\xa4g\x83_\xa0y\x12\x85\xee\xa0e\x12\xcb/\x92\x1aB'P +f463622be709fc197017ca6f0686e865affc01e6d202af5c131c03ae0f5af44721ff37ba37388595f158f06a92a467835fa0791285eea06512cb2f921a422750 +# \xfdW\x8e=*\x15\xf0\x97\xa4\x09R)G\x86\xa8\xb88\x07B\xa7<*T\x1a%\xcdW\xbd\x0e\x03\xaeo\x06\xbc\xfd\xadg7u\x16\x0a0Su0\x18e"g\xe4\xed\xf6\xf8\x98\xe3\x14666\xda\x991-\xa7 +fd578e3d2a15f097a40952294786a8b8380742a73c2a541a25cd57bd0e03ae6f06bcfdad673775160a3053753018652267e4edf6f898e314363636da99312da7 +# \xd4\xa2PQ\x14\xd1\x16\xe8G\xa5%\xa2jQ\xa5\x16\x15!\x90(mU*\xd4\x0fR\x0b\x15\xf4\xdc7\xb3\xbb\x13\x7f\xa0i\xa9Z\xa9\x8c=g\xde\xbd\xef\xbe\xfb\xee\xbb\xf3\xde\xbd\xef\xcd\xee\xbc\xfe\x04\x05\x88(\x88\ +d4a2505114d116e847a525a26a51a516152190286d552ad40f520b15f4dc37b3bb137fa069a95aa98c3d67debdefbefbeebbf3debdefcdeebcfe0405882888fb +# \x90\xe8i!\x16\x03\x7f\xe1&\xea\xb16h\xbcwy\x1d\x93\xfc\x0d\\xc8\xe3<\xe7\xa5\x0a\xac?WP\xf6\x88\xf9c\xac\x81\x0838\xfb\x9f[\xd5\xdd\xa5\x87\x1d\x98e\xbfo\xf7\xdbm\xa9N\xdds\xce\xbd\xe7\x9c{\xee\xb9\xf7\xdc[\xd5=|\xcb\xdd\xe4 "'\xae\xb7\xdf&z\x9c\xcc\xcf.z\xe7\xcfi\\x055\x7fQ +328404c83e083338fb9f5bd5dda5871d9865bf6ff7db6da94edd73cebde79c7beeb9f7dc5bd53d7ccbdde4202227aeb7df267a9ccccf2e7ae7cf695c05357f51 +# \xb7!~l\xbf\x9fW\x1d\xfc\xfa\xf4f\xe4\x89\x09\xc0 +b7217e6cbf9f571dfcfaf466e48909c0 +# SilverApp1.dllPK +53696c766572417070312e646c6c504b +# value="SilverApp1.xap" +76616c75653d2253696c766572417070312e78617022 +# \x0e\xf5L\x95\xda\xc0\xd4\x00\x89PA\x8f\xa3\xfa\xfb\x7f\xf3\xd6\xe3\x1c/\xee\xc1\xadg\x16,[-\xc4'U\xcb(\x9b9\xd5\xaab\x93\xaa\x92W\xcdv\xb9`\xe5\x0cS\xd7\xb2\xed\xf2Q\xd5\xb44\xa3\xd8\x9f\x8a'\xf8\xd +0ef54c95dac0d4008950418fa3fafb7ff3d6e31c2feec1ad67162c5b2dc42755cb289b39d5aa6293aa9257cd76b960e50c53d7b2edf251d5b434a3d89f8a27f8d72e0f9675bb6caafd45b56c9b8ade2e +# \xc6\xad\x02\xdb\xa2\xdb.7\xe0\xc6\xb0\x09cJf\x10\xc0\xd8\xc15\xb1\x8f0\xb1\x09\xe5\x80y'`\xecrn\x19\x13\x1a^\xf3\xa3\xf25\x12}\x91\xc4\x84\x95\xe8\xc8Qb\xaa\xc4}\xdb%\xe67t{\xce@\xaao\xfd\x05\x09\xe +c6ad02dba2db2e37e0c6b009634a6610c0d8c135b18f30b109e580792760ec726e19131a5ef3a3f235127d91c48495e8c85162aac47ddb25e637747bce40aa6ffd0509e682bcc9b3e12c133ce6672071a3b593edd825bcb6df730b2f98588718 +# PK\x03\x04\x14\x00\x00\x08\x08\x006\xaa\C\xca0>G\xc2\x00\x00\x00A\x01\x00\x00\x10\x00\x00\x00AppManifest.xaml\x85\x8fA\x0a\xc20\x10E\xf7\x82w\x089@\xd2\x0aV)V\x10t+E\xc5}MS\x1a\xc8$!\x89\x98\x9e\xcd\ +504b030414000008080036aa5c43ca303e47c200000041010000100000004170704d616e69666573742e78616d6c858f410ac2301045f78277083940d20a56295610742b45c57d4d531ac8242189989ecd8547f20ad6b64a1782db37f3fe9f79de1fab2d +# s\xd9\x84z\xf3\x9f\x84\xd0\xa0\xd9\x84P'{T\x86\xb0\xa1\x0b\xd0\x07.\xfa@\xce\x0f\xcb\xb4O\xad\xb1\x0d^prC\xc4/mR=\x8f^v\x9e\xbc\xb7\x0fU\x17\xd0\x81\xce\xda{\xfbQ\xbc\xe4\x91"\x0b\\xf2\xa1(J\xaa\xd4\ +73d9847af39f84d0a0d98450277b5486b0a10bd0072efa40ce0fcbb44fadb10d5e707243c42f6d523d8f5e769ebcb70f5517d081ceda7bfb51bce491220b5cf2a1284aaad4f32ee96d9768d44e6aa12e4e94f3ce42e5150babbee811845beb91 +# \x11\xd3\xc3\xa5\xa5\xbe\xebl\x1dc\xfb\x89\x0e+\x0e\xba\xf2\xea\xfd\xc7rz_!\xb5\xd9\xaf\xf8\x89v\x03\xd1,\xda\xd5\xdb\x00\x82\xb8N\xdb\xdeqY\xb5\xfc&*\xdc\xe9\xd3\x16\x9d?\x0e\xda\xf7~\xa2\xf5\xf2_\x +11d3c3a5a5beeb6c1d63fb890e2b0ebaf2eafdc7727a5f21b5d9aff8897603d12cdad5db0082b84edbde7159b5fc262adce9d3169d3f0edaf77ea2f5f25fb8e76ff29983de71b2f49e76acddc812dc5e +# PK\x03\x04\x14\x00\x00\x08\x08\x00U\xbfvC\xc0FG\x9c\xc8\x00\x00\x00]\x01\x00\x00\x10\x00\x00\x00AppManifest.xaml\x85\x8fA\x0a\xc20\x10E\xf7\x82w\x089@\xd2\x0aV)V\x10t+E\xc5}\x8dS\x0cd\x92\x90DM\xcf\x +^504b030414000008080055bf7643c046479cc80000005d010000100000004170704d616e69666573742e78616d6c858f410ac2301045f78277083940d20a56295610742b45c57d8d530c649290444dcfe6c2237905aba25410dccce2fdf9ffcfdc +# .applyElement +2e6170706c79456c656d656e74 +# .swapNode +2e737761704e6f6465 +# .appendChild(id +2e617070656e644368696c64286964 +# = document.createelement( +3d20646f63756d656e742e637265617465656c656d656e7428 +# \x00\x01\x00\x00standard jet db\x00 +^000100007374616e64617264206a657420646200 +# 7\x003\x00c\x009\x00d\x00f\x00a\x000\x00-\x007\x005\x000\x00d\x00-\x001\x001\x00e\x001\x00-\x00b\x000\x00c\x004\x00-\x000\x008\x000\x000\x002\x000\x000\x00c\x009\x00a\x006\x006 +370033006300390064006600610030002d0037003500300064002d0031003100650031002d0062003000630034002d003000380030003000320030003000630039006100360036 +# <\x08v\xa3\x80\xf90\xeb\xb49] t"\x8dB\xff\x89E\xb0\x0f\xbe\xc1\x83\xe8+t\xbcHH\x0f\x85q\xfe\xff\xff\x83M\x94\xffj\x07X\xe9z\xfd\xff\xffj\x0aXJ\x83\xf8\x0a\x0f\x85m\xfd\xff\xff\xebH3 +3c0876a380f930ebb4395d2074228d42ff8945b00fbec183e82b74bc48480f8571feffff834d94ff6a0758e97afdffff6a0a584a83f80a0f856dfdffffeb4833 +# \x0d\x0aEnd Sub\x0d\x0a\x0d\x0a\xd1\xdd\xb3\x92\xba\xdb\xaf\x9f\xe7\xbe\xafK +^4357530d80cd0000789c249b478ef4e0da96bf73fe5f88192031610d96704e43e79cb327c839967339ad80213b6205ccd8cfa13ed1ddb392badbaf9fe7beaf4b +# \xf6\xa6\x9d\xe9N\x7f8\x8a\xb6\xc4\xaf\x91\x8aoI\x12\xc9\xa9\x09+\x1d}\x07\xe4$\xd1\x8fGT\xf7LT~\xf2\x88\x07\xb8\xac\x0c\xb2\x87\x08V\xbcl\xady\x99\x82PV#\x9e\x07\xff\x88\x86bu\x9b;3\x99\xd9 +f6a69de94e7f388ab6c4af918a6f4912c9a9092b1d7d07e424d18f4754f74c547ef28807b8ac0cb2870856bc6cad7999825056239e07ff888662759b3b3399d9 +# CWS\x0dm\xed\x00\x00x\x9c$\x9bG\x0e\xf3\xda\x96\x9d\xff[\xf5\xf0\xbaU\x06j\x16\x04\x8a95\x99s\x16)\x92=\xe6\x9c3G\xe1\xe1\xb8\xe7\xae'\xe31<\xeb\xc2\x02\x045\x14x\xce\xe6\xdek}K\x94 +^4357530d6ded0000789c249b470ef3da969dff5bf5f0ba55066a16048a393599731629923de69c3347e1e1b8e7ae27e3313cebc20204351478cee6de6b7d4b94 +# \xaf\xfc\xdf\xff\xcf?\xb1\x02\xf0\x8e\xd1b +2765909035e289a7516b129cffa6af1b0155e616c5e9db4d4af6f494aba3e798e963274098399006a2e365c4710ec6291ffb3b3b6b4c0a3af2b72929953ed162 +# CWS\x0d\xa9\xdd\x00\x00x\x9c$\x9bG\xae\xf4l\xbb\x95\xdf\x8fsD\x97\x83\xc4,,\xe1\x1c\xaa\xe9\x9csv\x079g\xbb\x9c\xc3(\x18\x06\xb3`$\xcc\x83\xdeO\xbdbk7\xf7\xb6\xfd\xdca\xadk\x95U\xd2 +^4357530da9dd0000789c249b47aef46cbb95df8f73449783c42c2ce11caae99c7376073967bb9cc3281806b36024cc83de4fbd626b37f7b6fddc61ad6b9555d2 +# \xfd\xcf\x7f\x0e\xe80m}\x80l\x80\xd9\xbc\xf3y\xd0G\xd2\xebB\xc6\xdd\x8a\xd1O\x8d\x92\xcd\x17Nm\x19\xa49\xd0j\x95\xeb\x8c\xe7\x92\xfe\x0c\xc8\xc9~\x9d\x96\x0dL>\xe7Nd\x86\x84~m6\x02.\xf3d. +fdcf7f0ee8306d7d806c80d9bcf379d047d2eb42c6dd8ad14f8d92cd174e6d19a439d06a95eb8ce792fe0cc8c97e9d960d4c3ee74e6486847e6d36022ef3642e +# CWS\x0d\xc4\xca\x00\x00x\x9c$\x9a\xc7\x8e\xf3j\xd6^\xbf\xd3\xdd\xf0\xd4\xfe\x01\xdf\x05\x013\x93\xe2\x909g\x8a\x090\x0c\xe6\x9c3\xa7\x9ex\xe8;\xf2\xdc7\xd5\xd6\x81\xabP\xb3\x02D\xf1}\xf6\xb3\xd7\x92( +^4357530dc4ca0000789c249ac78ef36ad65ebfd3ddf0d4fe01df05013393e29039678a09300ce69c33a79e78e83bf2dc37d5d681ab50b30244f17df6b3d79228 +# \x00d\x00e\x00f\x00g\x00h\x00i\x00j\x00k\x00l\x00m\x00n\x00o\x00p\x00q\x00r\x00s\x00t\x00u\x00v\x00w\x00x\x00y\x00z\x00[\x00\\x00]\x00^\x00_\x00`\x00a\x00b\x00c\x00d\x00e\x00f\x00g\x00h\x00i\x00j\x00 +006400650066006700680069006a006b006c006d006e006f0070007100720073007400750076007700780079007a005b005c005d005e005f0060006100620063006400650066006700680069006a006b006c006d006e006f0070007100720073007400 +# cvt \x00d\x05\x11\x00\x00\x07\xac\x00\x00\x00\x04gasp\xff\xff\x00\x03\x00\x00v\x8c\x00\x00\x00\x08glyf\x05%ef\x00\x00\x09\xb4\x00\x00\x10\xf4head\xf56md\x00\x00\x00\xdc\x00\x00\x006hhea\x09\xf4\x04\x +6376742000640511000007ac0000000467617370ffff00030000768c00000008676c796605256566000009b4000010f468656164f5366d64000000dc000000366868656109f404050000011400000024686d74781ce60d5e000001b00000020a6c6f63 +# rw\x00\x00\xb0v\x00\x00\x01\x00\x02\x00\x00\x00\x00\x00\x02\x00\x06\x09\x00\x00\x00\x00\x00\x00\x01\x00\xf4\x01\x00\x00\x00\x00lp\x07\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x +72770000b07600000100020000000000020006090000000000000100f401000000006c70070000000200000000000000000000000100000000000000ff3ad2700000000000000000000000000000000000000c0078006500630072006500740000000c +# FTP /pub at 172.16.8.207. +3c5449544c453e465450202f707562206174203137322e31362e382e3230372e3c2f5449544c453e +# stlt +73746c74 +# stlt +73746c74 +# RIFF +^52494646 +# document.body.contentEditable +646f63756d656e742e626f64792e636f6e74656e744564697461626c65 +# curv\x00\x00\x00\x00 +6375727600000000 +# 8589-11d1-B16A +383538392d313164312d42313641 +# \xd7\xcd\xc6\x9a +^d7cdc69a +# \x0areference +0a7265666572656e6365 +# .extractContents(); +2e65787472616374436f6e74656e747328293b +# document.createRange(); +646f63756d656e742e63726561746552616e676528293b +# DOMSubtreeModified +444f4d537562747265654d6f646966696564 +# DOMNodeRemoved +444f4d4e6f646552656d6f766564 +# "repro.xml", "i +22726570726f2e786d6c222c202269 +# .applyElement +2e6170706c79456c656d656e74 +# avcC\x01B\xc0\x0d\xff\xe1\x00\x1bgB\xc0\x0d\x9at\x0a\x0f\xdf\xf8\x07\x80\x0c\x98\x80\x00\x00\x03\x00\x80 +617663430142c00dffe1001b6742c00d9a740a0fdff807800c98800000030080 +# ftyp +66747970 +# sourcecode\IE_ParentProcess_SandboxEscape\x64\Release\TestDll.pd +736f75726365636f64655c49455f506172656e7450726f636573735f53616e64626f784573636170655c7836345c52656c656173655c54657374446c6c2e7064 +# estructor'\x00\x00\x00\x00\x00\x00`vector deleting destructor'\x00\x00\x00\x00`default constru +657374727563746f722700000000000060766563746f722064656c6574696e672064657374727563746f7227000000006064656661756c7420636f6e73747275 +# eId\x00\x00\x00\x00\x00GetTickCount64\x00\x00GetFileInformationByHandleExW\x00\x00\x00SetFileI +65496400000000004765745469636b436f756e743634000047657446696c65496e666f726d6174696f6e427948616e646c6545785700000053657446696c6549 +# BBFlashBack.FBRecorder +4242466c6173684261636b2e46425265636f72646572 +# a3cd4bf9-ec17-47a4 +61336364346266392d656331372d34376134 +# 436F626A64\d\\0\d +343336463632364136345c645c5c305c64 +# 021433412\d\\ +3032313433333431325c645c5c +# \\0D1BD8B85D111B16A\d\ +5c5c30443142443842383544313131423136415c645c +# =i\xe9Ho\xb6]0twn\x8e1jB@\x07\x95\xb6\xa5\x12\x8b\x94zY\x9a\x140@\xfc\xc5,\x8dZ%3o\x19\xb2\x09\x8bS\x02&g%s\x18 +3d69e9486fb65d3074776e8e316a42400795b6a5128b947a599a143040fcc52c8d5a25336f19b2098b53022667257318 +# ocStream.java\xad\x97]o\xdb \x14\x86\xaf\xe3_\x81z\xe5L\x99\x9b\x8b\xddE\x9b\xb6\xb6 +6f6353747265616d2e6a617661ad975d6fdb201486afe35f817ae54c999b8bdd459bb6b6 +# poc.java\xadTMO\x1b1\x10=\xef\xfe\x8aQN\xde(Z\x82JC\x11\xca\xa1\x05ZE\xa2\x05\x11\xe8\x15\x19\xef$1\xdd\xd8 +706f632e6a617661ad544d4f1b31103deffe8a514ede285a824a4311caa1055a45a20511e81519ef2431ddd8 +# \x00\x00.\x00\x00\x00 +00002e000000 +# AGNI +^41474e49 +# KEY* +4b45592a +# InsertUnorderedList +496e73657274556e6f7264657265644c697374 +# InsertOrderedList +496e736572744f7264657265644c697374 +# InsertOrderedList +496e736572744f7264657265644c697374 +# \x00\x00\x00\x00\x03\x00\x00\x00\x80\x03\x00\x00\x00\x00\x00\x00\x05\x00s\x00u\x00m\x00m\x00a\x00r\x00y\x00i\x00n\x00f\x00o\x00r\x00m\x00a\x00t\x00i\x00o\x00n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\ +000000000300000080030000000000000500730075006d006d0061007200790069006e0066006f0072006d006100740069006f006e00000000000000000000000000000000000000000000000000000028000201ffffffffffffffffffffffff000000 +# \x00\x00\x00\xef\x00\x00\x00\xf0\x00\x00\x00\xf1\x00\x00\x00\xf2\x00\x00\x00\xf3\x00\x00\x00\xf4\x00\x00\x00\xf5\x00\x00\x00\xf6\x00\x00\x00\xf7\x00\x00\x00\xf8\x00\x00\x00\xf9\x00\x00\x00\xfa\x00\x0 +000000ef000000f0000000f1000000f2000000f3000000f4000000f5000000f6000000f7000000f8000000f9000000fa000000fb000000fc000000fd000000fe000000ff00000000010000010100000201000003010000040100000501000006010000 +# \xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>\x00\x03\x00\xfe\xff\x09\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x03\x00\x +d0cf11e0a1b11ae1000000000000000000000000000000003e000300feff0900060000000000000000000000030000000300000000000000001000000500000001000000feffffff00000000000000000100000002000000ffffffffffffffffffffff +# format(attr(r)); +666f726d61742861747472287229293b +# format(calc(poc));} +666f726d61742863616c6328706f6329293b7d +# ,""].join("");}< +2c22225d2e6a6f696e282222293b7d3c2f7363726970743e3c +# = [];for(var i = 0; i < 0x1800; i++) +203d205b5d3b666f72287661722069203d20303b2069203c203078313830303b20692b2b2920 +# = [];for(var i = 0; i < 0x1800; i++) { +203d205b5d3b666f72287661722069203d20303b2069203c203078313830303b20692b2b29207b +# \xf4E\x11\x00\xefz\x86WVe\x1c\x94n\xf5W\xa0\x94 4\x8c\x96\x0e1M\xdeC\xdf\xe7X8"x\x81G\x8aN\xe8\x9f\xfb\x1e}\xb0\xd3\xe6\xc2\x01\x8f\xba'\\x0e\x8e]\xe7\xbaJ\xc8\xad\xf5Z\xc6\x0aC\xf3 +f4451100ef7a865756651c946ef557a09420348c960e314dde43dfe75838227881478a4ee89ffb1e7db0d3e6c2018fba275c0e8e5de7ba4ac8adf55ac60a43f3 +# H\xbc\x9e\x88\xde0fX-\xc7\x05\x89\xdbI\xb8g?n4\xce\x00d\xe26\xde\xeaJ$\x9d9\x7f|\xbeY\x88\xfa\x16\xd8$\xf2\xc5eS+\xa6N}\x1d\xe0{\x04=\xf0\x9af\x99\xa0L\x9f\xdb\x08\x9dtU +48bc9e88de3066582dc70589db49b8673f6e34ce0064e236deea4a249d397f7cbe5988fa16d824f2c565532ba64e7d1de07b043df09a6699a04c9fdb089d7455 +# \x82\xdc\xdd\xfe\x9d\xfd$Y@f\x04S\xd6@Av\x10\xc8\xc5\xf2\xe4\xdb\xe2v\xe7 d1\xda\x84\x82l\x10\xdd/J\x03\xdf\x80f!\xb7\x0eL\)\xad\xd7\x0c\xe3\xad_S\xc7\xf8\x03[\x03=\x9b\xcf\xcf)\xb7 +82dcddfe9dfd245940660453d640417610c8c5f2e4dbe276e7206431da84826c10dd2f4a03df806621b70e4c5c29add70ce3ad5f53c7f8035b033d9bcfcf29b7 +# (sS,sSS);\x09\x09\x09\x09\x0d\x0afor (i=0;i\x09\x09\x09\x09\x09\x09\x0d\x0a\x0c\x00?\x00@\x01\x00!java/awt/image/ImagingOpException\x0c\x00A\x00\x16\x01\x00\x03 +726e656c0c0015003d0c0015003e0c003f00400100216a6176612f6177742f696d6167652f496d6167696e674f70457863657074696f6e0c00410016010003 +# \x00\x1fjava/awt/image/DataBufferUShort\x0c\x00\x15\x008\x0c\x006\x009\x07\x00:\x0c\x00;\x00<\x01\x00\x19java/awt/image/ConvolveOp\x01\x00\x15java/awt/image/Ke +001f6a6176612f6177742f696d6167652f446174614275666665725553686f72740c001500380c0036003907003a0c003b003c0100196a6176612f6177742f696d6167652f436f6e766f6c76654f700100156a6176612f6177742f696d6167652f4b65 +# \x16\x01\x00\x1djava/awt/image/DataBufferByte\x0c\x00\x15\x003\x01\x00\x0ejava/awt/Point\x0c\x00\x15\x004\x07\x005\x0c\x006\x007\x01 +1601001d6a6176612f6177742f696d6167652f44617461427566666572427974650c0015003301000e6a6176612f6177742f506f696e740c001500340700350c0036003701 +# , 21000001);\x0d\x0adocument.write( +2c203231303030303031293b0d0a646f63756d656e742e777269746528 +# ;\x0d\x0a} while (0 < -- +3b0d0a7d207768696c65202830203c202d2d +# = unescape("%u4141%u4141");\x0d\x0avar +203d20756e657363617065282225753431343125753431343122293b0d0a76617220 +# \x00\x00\x01\xba +^000001ba +# f.removeChild(document.getElementsByTagName("img")[0]) +662e72656d6f76654368696c6428646f63756d656e742e676574456c656d656e747342795461674e616d652822696d6722295b305d29 +# var f = document.getElementsByTagName("form")[0] +7661722066203d20646f63756d656e742e676574456c656d656e747342795461674e616d652822666f726d22295b305d +#
\x0a\x0a
+3c666f726d3e0a3c696d672069643d2269223e0a3c2f666f726d3e +# innerHTML = "PASS" +696e6e657248544d4c203d20225041535322 +# innerHTML = "FAIL" +696e6e657248544d4c203d20224641494c22 +# f.removeChild(document.getElementsByTagName("img")[0]) +662e72656d6f76654368696c6428646f63756d656e742e676574456c656d656e747342795461674e616d652822696d6722295b305d29 +# var f = document.getElementsByTagName("form")[0] +7661722066203d20646f63756d656e742e676574456c656d656e747342795461674e616d652822666f726d22295b305d +#
\x0a\x0a
+3c666f726d20636c6173734e616d653d2261223e0a3c696d67207372633d22222069643d2269223e0a3c2f666f726d3e +# imgBar +696d67426172 +# imgBar +696d67426172 +# imgFoo +696d67466f6f +# imgBar +696d67426172 +# imgFoo +696d67466f6f +# CURLOPT_POSTFIELDS +4355524c4f50545f504f53544649454c4453 +# openssl_seal +6f70656e73736c5f7365616c +# %3e%3c/content%3e%0a%3c/binding%3e%3c/bindings%3e);" +2533652533632f636f6e74656e742533652530612533632f62696e64696e672533652533632f62696e64696e6773253365293b22 +# binding%20id%3d%22a%22%3e%0a%3ccontent%3e%3cchildren/%3e%3c/con +62696e64696e67253230696425336425323261253232253365253061253363636f6e74656e742533652533636368696c6472656e2f2533652533632f636f6e +# xmlns%3d%22http%3a//www.mozilla.org/xbl%22%3e%0a%3cbinding%20id +786d6c6e73253364253232687474702533612f2f7777772e6d6f7a696c6c612e6f72672f78626c25323225336525306125336362696e64696e672532306964 +# g:url(data:text/xml;charset=utf-8,%3cbindings%20xmlns%3d%22http +673a75726c28646174613a746578742f786d6c3b636861727365743d7574662d382c25336362696e64696e6773253230786d6c6e7325336425323268747470 +# moz-binding:url +6d6f7a2d62696e64696e673a75726c +# binding.xml#a +62696e64696e672e786d6c2361 +# https://bugzilla.mozilla.org/attachment.cgi?id=277817#a +68747470733a2f2f6275677a696c6c612e6d6f7a696c6c612e6f72672f6174746163686d656e742e6367693f69643d3237373831372361 +# moz-binding:url +6d6f7a2d62696e64696e673a75726c +# "].random = null;[" +225d2e72616e646f6d203d206e756c6c3b5b22 +# "].random;}\x16\xf3\xf5\x12\x92\xaf\x1e\xaf>estkc\x10\xf8\xde3\x9e\xfb\xe9\xd1\xabhrc\xeb\xf5\x18|\x8b\xe8\x981~vh\x1a\xa9pq\xfab~\xbb\x80m\x1a\x0c\xd8h\xb5\xcdd\xd86i\x94`\x93\x8f\xbei\x03\xb6)\x +852a06823e16f3f51292af1eaf3e6573746b6310f8de339efbe9d1ab687263ebf5187c8be898317e76681aa97071fa627ebb806d1a0cd868b5cd64d836699460938fbe6903b629e1cbb57c29e472c8e5845c66d66e303bc1ec26985dc694a1830729b717 +# 5\xb6-\xb9\xcbvw\x06\xc8\x18}\xf4\xda\x03\xf55\xfa\x0ehh[c,'\x86\xc6\xa1\x0a\x9a\xdbi\xa36)\xad\xa1&\xbc[n\xb7]\x80^\x0f\xd1\xc3\x91\\xe8l\xf48\xd4\xd2\x06\xa7\xafx\x9cl$\xea\xd5\x10f\xb2\xde\xfd\x13 +35b62db9cb767706c8187df4da03f535fa0e68685b632c2786c6a10a9adb69a33629ada126bc5b6eb75d805e0fd1c3915ce86cf438d4d206a7af789c6c24ead51066b2defd13676a9fe8c1616d3dba776834f705dfbd193bdc97f028723dd79a651ccff0 +# %\xf1~<\xe3\x8a_\xc9\xb8\xed\x8c\xb9+\xeb6\xf3[;\xbf63`\xbbr\xbb\xac\xbb\xad3\xc0\xbd\xe1i-\x1erlv\x07too\xd0\xcc\xd1\x87fe^e\xd9_\xcb\xcc\xef<\xf7\x1b\x06\xe1!\xce\xa3\xef|\xc7y\xdbw\x90\xd0\x0f\xf0 +25f17e3ce38a5fc9b8ed8cb92beb36f35b3bbf363360bb72bbacbbad33c0bde1692d1e726c7607746f6fd0ccd18766655e65d95fcbccef3cf71b06e121cea3ef7cc779db7790d00ff09d8226e1ff9c2e327ce5d7b6c39ecb787584751499ae2bf4f766d2 +# p\x1c\xcb\x02\x8ecy@\x92,\x0b`y\x00m\x03x\x1a@\x14\x01\x80\x00\x00\x80\x02\x07\x00\x80\x00\x1b4%\x16\x07(4d%\x00\x10\x05\x00\xe0p\x14\xcb\xd24q\xe48\x96\xa5i\xa2\xc8q,k\xd3d\x91ei\x9a\xa6\x89"4k\xd3d +701ccb028e637940922c0b6079006d03781a4014018000008002070080001b342516072834642500100500e07014cbd23471e43896a569a2c8712c6bd3649165699aa68922346bd364119ee779a609cff33cd384288aa2690271346d010000050e0000 +# \xae\xeb\xba\xae\xeb\xba\xae\xeb\xba\xae\xeb\xba\xae\xeb\xba\xae\xeb\xba\xae\xeb\xba\xae\xeb\xba\xae\xeb\xba\xae\xeb\xba\xae\xeb\xba\xae\xeb\xba@h\xc8*\x00@\x02\x00@gr$gr$er$er$\x07\x08\x0dy\x05\x00\ +aeebbaaeebbaaeebbaaeebbaaeebbaaeebbaaeebbaaeebbaaeebbaaeebbaaeebbaaeebba4068c82a004002004067722467722465722465722407080d790500c800000800c0311c637224c7b22c6df3346ff334d1133dd1333d75746517080d79050000 +# =%n\x01\x05vorbis)bcv\x01\x00\x08\x00\x00\x001l \xc5\x80\xd0\x90u\x00\x00\x10\x00\x00`$)\x0e\x93fi)\xa5\x94\xa1(y\x98\x94hi)\xa5\x94\xc50\x89\x98\x94\x89\xc5\x18c\x8c1\xc6\x18c\x8c1\xc6\x18c\x8c 4d\x +3d256e0105766f7262697329626376010008000000316c20c580d0907500001000006024290e93666929a594a128799894686929a594c53089989489c518638c31c618638c31c618638c20346415000004008028098ea3e6696ace396718278e72a039 +# parseFloat("NAN(ffffeaaaaaaaa)") +7061727365466c6f617428224e414e2866666666656161616161616161292229 +# parseFloat("NAN(ffffeeeeeff0f)") +7061727365466c6f617428224e414e2866666666656565656566663066292229 +# A0X1V0O0F0F0E0R0S1V0D0I0S0P0L0A0YzutB2R0M0A0X1V0O0F0F0E0R0S1V0T0A0K0E0NzutB2R0D0O0W0N0L0O +4130583156304f3046304630453052305331563044304930533050304c304130597a7574423252304d304130583156304f30463046304530523053315630543041304b3045304e7a75744232523044304f3057304e304c304f3c2f2324404024233e +# 1M2Z2Z1Ezx0/tE1Q0o0w0ntFtA06000s1T0f0etF0c0o1HtE0i1G0s0t0.1P2V0ezs0e0x1P0?0F0F0F0F0F0F0F0E0E0E0E0E0E +314d325a325a31457a78302f74453151306f3077306e7446744130363030307331543066306574463063306f314874453069314730733074302e3150325630657a73306530783150303f3046304630463046304630463046304530453045304530453045 +# \xc7:\x06\xfa\xbd6\xd5^\xba-\xc0E\xd9\x0b{\xa0d\x16\xbf\xf4\xee\xec\xf3v\xf8\x83\xc6\xd4r=s\xcf~\xaa^Pk\xd5\xe1+\xa9\xf7\x00\x00\x00\x00\x00\x00<#$@@$#>\x120P0R0O0D0U0C0T1V0T0I0T0L0Ezu6M5Z4U6K3Y4 +c73a06fabd36d55eba2dc045d90b7ba06416bff4eeecf376f883c6d4723d73cf7eaa5e506bd5e12ba9f70000000000003c2324404024233e1230503052304f30443055304330543156305430493054304c30457a75364d355a3455364b335934 +# T0A0K0E0N0=020|0D0O0W0N0L0O0A0D0E0R0_0V0E0R0S0I0O0N0=01 +543041304b3045304e303d3032307c3044304f3057304e304c304f3041304430453052305f30563045305230533049304f304e303d30313c2f2324404024233e +# <#$@@$#>\x120P0R0O0D0U0C0T0_0T0I0T0L0E0=0C0V0E0-020001030-030900000 +3c2324404024233e1230503052304f3044305530433054305f305430493054304c3045303d304330563045302d3032303030313033302d303330393030303030 +# \x880\x00\x00\x00\x02\x02\x000\x82\x18-\x06\x09*\x86H\x86\xf7\x0d\x01\x07\x02\xa0\x82\x18\x1e0\x82\x18\x1a\x02\x01\x011\x0b0\x09\x06\x05+\x0e\x03\x02\x1a\x05\x000h\x06\x0a+\x06\x01\x04\x01\x827\x02\x +88300000000202003082182d06092a864886f70d010702a082181e3082181a020101310b300906052b0e03021a05003068060a2b060104018237020104a05a30 +# 00(wrhc&)00(wrhc&)00(wrhc&)00(wrhc&)10(wrhc&)6712(wrhc&)10 +30302877726863262930302877726863262930302877726863262930302877726863262931302877726863262936373132287772686326293130 +# edocllehsnur +65646f636c6c6568736e7572 +# hit ctrl-o, cancel, click me\x0a\xbc\x0e\xa6\xa2j\xf8 /d\xd8\xf2O(Z#\x97\xb4\xb5}\xd5\xdd\xdb\xd8\x97\xa9*\7/\xb1l\x8b\xb0e\xe4\xd2\x0fq9M\ +826748852adc7e42d2e889f3b80006fde135f976bc056fc8c4bbe510505c2f3ebc0ea6a26af8202f64d8f24f285a2397b4b57dd5dddbd897a92a5c372fb16c8bb065e4d20f71394db69ac8a71f199878b25762ab23689bf8f30f +# \xf1\xde\x1a\xed+\xb1\x0a\x12\x11C\xbc\x88f=\x9b<`j\xa2\xa4F+\x01\xec}\xe8\xcc;0d7\xd8"\xf0\xaflk\x88\x1f\xbdQ(\xbc\x884g\x07$km\x98B\x02\xac\xdc\x8f\xe2\xf8\xc8\xeeZ\xf0\x12\x8b@c\x0dImZ+\xc1\x83a\x +f1de1aed2bb10a121143bc88663d9b3c606aa2a4462b01ec7de8cc3b306437d822f0af6c6b881fbd5128bc88346707246b6d984202acdc8fe2f8c8ee5af0128b40630d496d5a2bc18361a2c5a761859aa7d7059a0e7320a29521 +# \x9a%\x83+X)\xaa\xa4\xd4x"~c\x17\x9e\x8c\xe2\xbc:\xd9[\x09pR\x1e\x9f[\xaew\x9b\xddx\x9a%\xa3*\xd6\xcc=\xc6*\x87#\x9eXh\x9bE\xe9\x1a\xa5*Q;\xf6\xa7Z\xbaAa\x0c`\x94\xe3\x81\xfd\xe3\xaa\xf0\x18\xc0F\xaa +9a25832b5829aaa4d478227e63179e8ce2bc3ad95b0970521e9f5bae779bdd789a25a32ad6cc3dc62a87239e58689b45e91aa52a513bf6a75aba41610c6094e381fde3aaf018c046aa5ea51a845d1b645d7b9a25832b5829aca4 +# \x97-\x86\xf8E\xd2\x80\xebI\x94Ik%S\x9c\x1eRs\x09[\xd9_\xec\x8c\xc6]Tsc\x1f\xf2\xae\xf1\xbfl\xaf\xfa\xea\xdf\xd5\xde\x0d\x91\xd5\xb8f\\xae\x1bU\xe4\x89\x97L%|\x81H\xc7\x1azR\x83KL\x1atL\xa2\x9a\x9a\x +972d86f845d280eb4994496b25539c1e5273095bd95fec8cc65d5473631ff2aef1bf6caffaeadfd5de0d91d5b8665cae1b55e489974c257c8148c71a7a52834b4c1a744ca29a9a8414039cc8a6d5f1ad70919049af3a9f38b514 +# |\xe9W\x9fl\xd4\xad\xc7\xdb\xcb\xe0\xa8\x92&\xdfW=\xef0\xbc\x8c\x8e\x1fl\xd5\x04\x1d\xc4\x01\xdc8Aj\xe4\xa6\x00VH\x0e\xa6{\xdb\x12\x03\x8c\xd3&Nn\x8e\^y\xec\xfd8\x91\x16\xed\xbb\xb0\xfb\x9d['`\x9fg\x +7ce9579f6cd4adc7dbcbe0a89226df573def30bc8c8e1f6cd5041dc401dc38416ae4a60056480ea67bdb12038cd3264e6e8e5c5e79ecfd389116edbbb0fb9d5b27609f67bf2b6e8a66e1b90aea16b37f0af0449c3c7850d3d389 +# yI0\x84\xb2\x11\x9bx\xc6\x15F\x09\xb5n\xeeg\x14\x04pG8\x89\xb9\x10fF\x13\x92\xd4\xb4\xa4(w\xe9Msz\xb3\x93\x9fT\xd5$\xe7\x99\xceuz2\x93\xeft\xe7;\xffQXHqz\x86\x9c\x9d\xec#?\xab\x19Jw\xc4`\x9f\x08u\xce +79493084b2119b78c6154609b56eee67140470473889b91066461392d4b4a42877e94d737ab3939f54d524e799ce757a3293ef74e73bff515848717a869c9dec233fab194a77c4609f0875ce1ff469a252e27121e14ca8449f73 +# \x849\xc1Q-\xb1\xcf\xcd\x16\x9c\xb7\xef\xb7\xe7\xba\x14\xa2A]\xd7\xa5\x97\xfd\xcb\x8d\xe1\xb0Y\x92s\x15\xe1B\x04\xcfs\xe1\x0e>+\xfc\xb7=\xfdE8\x00\x8b8\xbe\xae\x17\x85\xab~\x1b\x80\x1f\xff\x12\xc7KQ5 +8439c1512db1cfcd169cb7efb7e7ba14a2415dd7a597fdcb8de1b059927315e14204cf73e10e3e2bfcb73dfd4538008b38beae1785ab7e1b801fff12c74b5135bf285c992aca7cbaf4b6e691a9af5eae42f1eb4ebe62c1505bc6 +# \x00\x00\x1cdref\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x0curl \x00\x00\x00\x01\x00\x00\x03`stbl\x00\x00\x00\stsd\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00Lmp4a\x00\x00\x00\x00\x00\x00\x00\x01 +00001c6472656600000000000000010000000c75726c2000000001000003607374626c0000005c7374736400000000000000010000004c6d703461000000000000000100000000000000000002001000000000ac440000000000 +# \x0bg\x00\x00\x00:\x00\x00\x00\xac\x00\x00\x00\x9b\x00\x00\x009\x00\x00\x006\x00\x00\x00:\x00\x00\x006\x00\x00\x006\x00\x00\x006\x00\x00\x006\x00\x00\x006\x00\x00\x006\x00\x00\x006\x00\x00\x01O\x00\x +0b670000003a000000ac0000009b00000039000000360000003a000000360000003600000036000000360000003600000036000000360000014f0000139c00000394000003d7000003f1000004b80000044d00000631000004de +# \x02\xf8\x00\x00\x02\xf9\x00\x00\x00(stsc\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x1b\x00\x00\x00\x01\x00\x00\x00\x1d\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x0b\xf8stsz\x00\x00\x +02f8000002f900000028737473630000000000000002000000010000001b000000010000001d000000050000000100000bf87374737a0000000000000000000002f90000025200000485000004bf0000036e0000034e0000045b +# t\xc3X\xe7\xc2\xe3\x10R?\xfecC\xe3\xc8K\xc0\xdfe{\x03\x13\xf1I\x80\x80\xe8\xdf\x10\x8f\x19\x19!\xc3_\xab\xac\xcc"\x8b\xf7\xbb~\x7f\xabO\xc9\x92P?\xfc\x17\xfc\x03\x11P]\xe7qzP\xa5\x0a\xebJ\xa9\x16\x99 +74c358e7c2e310523ffe6343e3c84bc0df657b0313f1498080e8df108f191921c35fabaccc228bf7bb7e7fab4fc992503ffc17fc0311505de7717a50a50aeb4aa916999e09a8b9e0fe533e8f4ba0b813a9f28edbcc93331c3d02 +# dia\x00\x00\x00 mdhd\x00\x00\x00\x00\xce\xc8sL\xce\xc8sL\x00\x00\x03\xe8\x00\x00\x00.U\xc4\x00\x00\x00\x00\x004hdlr\x00\x00\x00\x00\x00\x00\x00\x00soun\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 +646961000000206d64686400000000cec8734ccec8734c000003e80000002e55c400000000003468646c720000000000000000736f756e0000000000000000000000004e65754c696f6e204d5034202d20417564696f00000001 +# \x05k\x00\x00\x06\x9b\x00\x00\x06\xad\x00\x00\x07\xc4\x00\x00\x08+\x00\x00\x09%\x00\x00\x0am\x00\x00\x07\x8c\x00\x00\x09\xe5\x00\x00\x09\x9b\x00\x00\x0b\x0e\x00\x00\x0b|\x00\x00\x08\x90\x00\x00\x0aD\ +056b0000069b000006ad000007c40000082b0000092500000a6d0000078c000009e50000099b00000b0e00000b7c0000089000000a4400000b1700000b0900000c9800000bf0000004ee000010670000009f00000e6c000000f1 +# \xa1\xa1\xa1\xa0\x00\x00\x00\xd0\xd0\xd0\xd0\x00\x00\x00hhhh\x00\x00\x004444\x00\x00\x00\x1a\x1a\x1a\x1a\x00\x00\x00}\x0d\x0d\x0d\x00\x00\x00\x06\x86\x86\x86\x80\x00\x00\x03\x03CCC@\x00\x00\x01\xa1\x +a1a1a1a0000000d0d0d0d000000068686868000000343434340000001a1a1a1a0000007d0d0d0d00000006868686800000030343434340000001a1a1a1a0000000d0d0d0d000000068686868000000343434340000001a1a1a1a +# \x06\xc3\x00\x00\x05G\x00\x00\x07\x80\x00\x00\x07\x04\x00\x00\x04S\x00\x00\x06\xc1\x00\x00\x06F\x00\x00\x06\xcb\x00\x00\x072\x00\x00\x07D\x00\x00\x08\x88\x00\x00\x06\xb2\x00\x00\x06M\x00\x00\x06g\x00 +06c300000547000007800000070400000453000006c100000646000006cb000007320000074400000888000006b20000064d00000667000006440000087c000006800000062a0000064900000622000007ff000005f900000696 +# \x86\x86\x80\x00\x00\x03\x03CCC@\x00\x00\x01\xa1\xa1\xa1\xa0\x00\x00\x00\xd0\xd0\xd0\xd0\x00\x00\x00hhhh\x00\x00\x004444\x00\x00\x00\x1a\x1a\x1a\x1a\x00\x00\x00\xbb\x0d\x0d\x0d\x00\x00\x00\x06\x86\x8 +8686800000030343434340000001a1a1a1a0000000d0d0d0d000000068686868000000343434340000001a1a1a1a000000bb0d0d0d00000006868686800000030343434340000001a1a1a1a0000000d0d0d0d000000068686868 +# \x04\xee\x01\x09\xcc\x1f +ce50a7ed067a424702d030da29d4e54d1416c1de9163d10e24b01ee1dc30c3022e305c2143ec276e470e623e0109cc1f +# 3G\xe00\x0a\x82(\x88\x14p\x1c\xc7(\xf8&\xb1GY\xca\xee\x80\x19\xc0\x14\xec\x01\x8c)\xe5V\xb0\x87\xcd8\x02\x1fK\x16\x00\x0e\x08\x07\x91DHF\x0f@ +3347e0300a82288814701cc728f826b14759caee8019c014ec018c29e556b087cd38021f4b16000e0807914448460f40 +# II*\x00\x84\x17\x00\x00\x80\x10`d\x18\x19\x04\xe2\x08\x04\x01\xc0\x00\xc0H\x01\xe8\xf7|\x00\x00 \x80\x08D\x10\x0d\x04:\x1f\xc0`\x80=\xf0\xf8|\xbf@\x0f\x87\xd0`2\x1a +^49492a008417000080106064181904e2080401c000c04801e8f77c000020800844100d043a1fc060803df0f87cbf400f87d060321a +# document.styleSheets[0].cssText = "p{text-overflow:ellipsis;overflow-x:hidden;} p:first-letter{float:left;}" +646f63756d656e742e7374796c655368656574735b305d2e63737354657874203d2022707b746578742d6f766572666c6f773a656c6c69707369733b6f766572666c6f772d783a68696464656e3b7d20703a66697273742d6c65747465727b666c6f61743a6c6566743b7d22 +# obj["onpropertychange"]=function(e){ obj2["applyElement"](all_elements_list[5]);} +6f626a5b226f6e70726f70657274796368616e6765225d3d66756e6374696f6e2865297b206f626a325b226170706c79456c656d656e74225d28616c6c5f656c656d656e74735f6c6973745b355d293b7d +# obj2["applyElement"](all_elements_list[5]) +6f626a325b226170706c79456c656d656e74225d28616c6c5f656c656d656e74735f6c6973745b355d29 +# var element_2=document.createElement("linearGradient") +76617220656c656d656e745f323d646f63756d656e742e637265617465456c656d656e7428226c696e6561724772616469656e742229 +# script id="a" onpropertychange="boom() +7363726970742069643d226122206f6e70726f70657274796368616e67653d22626f6f6d2829 +# Y\xc3\x90s\xdc)\xc5f\xa0\x09Y\x86 +59c39073dc29c566a0095986 +# \x9a\x89\xa6\xd8\x1a\x05ik\xae@\xb4\x87X +9a89a6d81a05696bae40b48758 +# \x80E`2\x9bK3I\xd9 +804560329b4b3349d9 +# \xb2\x9bV>\xb8_\x9fq +b29b563eb85f9f71 +# \x1f\x00\x01\xf0\x12\x05\x00\x00"\x00\x07\xf0\x0a +1f0001f012050000220007f00a +# \x00"$"#,##0_ +00222422232c2323305f +# [4\PZX54(P +5b345c505a5835342850 +# .txt\x7f\x02\x01\x80\xff\xff\xd0\xff +2e7478747f020180ffffd0ff +# \x14\x02\x00\xefZ\x083h\x0d\xed= +140200ef5a0833680ded3d +# \xde\xba\x90js\xff\xf3\x82d\xf3\x1a\x8a0\xf2\x001\xebn\x00\x00\x03H +deba906a73fff38264f31a8a30f20031eb6e00000348 +# u\xb1?\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfdm\xff\xff\x80\xff\x04\xd5 +75b13ffffffffffffffffffffd6dffff80ff04d5 +# FWS +^465753 +# \xd4\xc3\xb2\xa1 +^d4c3b2a1 +# \xa1\xb2\xc3\xd4 +^a1b2c3d4 +# width=1000 height=100>\x0d\x0a