Compare commits

..

292 Commits

Author SHA1 Message Date
Daniel-Eisenberg
78d1bcf7c4 prometheus support locally managed (#369)
Co-authored-by: Daniel Eisenberg <danielei@checkpoint.com>
2025-11-11 14:56:56 +02:00
Daniel-Eisenberg
c90862d74c Docker stop issue (#366)
* fix image entrypoint sigterm

* fix image entrypoint sigterm

---------

Co-authored-by: avigailo <avigailo@checkpoint.com>
2025-11-06 10:53:49 +02:00
Daniel-Eisenberg
b7923dfd8c update PostgreSQL configuration in deployment files (#365)
- Change PostgreSQL volume mount from /var/lib/postgresql/data to /var/lib/postgresql
  This allows PostgreSQL to manage the data directory structure internally

- Add PostgreSQL version configuration variable to all docker-compose files:
  * POSTGRES_VERSION for deployment/nginx
  * APPSEC_POSTGRES_VERSION for all deployment/docker-compose variants

- Update PostgreSQL image tag to use version variable (set to version 18)
  Changed from 'postgres' to 'postgres:' or 'postgres:'

- Add PostgreSQL version variable to all .env files with default value of 18

Co-authored-by: Nahum Perman <nahump@checkpoint.com>
2025-10-28 14:58:21 +02:00
Daniel-Eisenberg
ed4e20b010 Update open-appsec-k8s-default-config-v1beta1.yaml 2025-10-21 17:40:08 +03:00
Daniel-Eisenberg
14159402e2 Update open-appsec-k8s-prevent-config-v1beta1.yaml 2025-10-21 17:39:48 +03:00
Daniel-Eisenberg
b74957d9d4 Fix alpine ca (#354)
* fix ca loading for alpine

* fix ca loading for alpine

* fix ca loading for alpine

* change gzipped detection

* change gzipped detection

---------

Co-authored-by: Daniel Eisenberg <danielei@checkpoint.com>
2025-09-30 18:19:15 +03:00
orianelou
0c0da6d91b Update README.md 2025-09-02 10:34:35 +03:00
orianelou
ef887dd1c7 Update docker-compose.yaml 2025-08-12 11:50:21 +03:00
Daniel-Eisenberg
6bbc89712a Aug 08 2025 dev (#336)
* sync code

* sync code

* sync code

---------

Co-authored-by: Ned Wright <nedwright@proton.me>
2025-08-10 13:21:52 +03:00
orianelou
dd19bf6158 Update .env 2025-07-30 16:32:24 +03:00
orianelou
60facef890 Create kong.yaml 2025-07-30 16:31:48 +03:00
orianelou
a3ac05642c Create .env 2025-07-30 11:53:00 +03:00
orianelou
682b91684d Create docker-compose.yaml 2025-07-30 11:52:17 +03:00
orianelou
ff8c5701fe Delete deployment/docker-compose/kong-lua-plugin 2025-07-30 11:50:47 +03:00
orianelou
796c6cf935 Create kong-lua-plugin 2025-07-30 11:49:36 +03:00
orianelou
31ff6f2c72 Update docker-compose.yaml 2025-06-23 12:43:44 +03:00
orianelou
eac686216b Update docker-compose.yaml 2025-06-23 12:42:41 +03:00
orianelou
938cae1270 Update docker-compose.yaml 2025-06-23 12:41:38 +03:00
orianelou
87cdeef42f Update docker-compose.yaml 2025-06-23 12:40:40 +03:00
orianelou
d04ea7d3e2 Update docker-compose.yaml 2025-06-23 12:39:50 +03:00
orianelou
6d649cf5d5 Update docker-compose.yaml 2025-06-23 12:38:22 +03:00
orianelou
5f71946590 Update docker-compose.yaml 2025-06-23 12:36:37 +03:00
orianelou
c75f1e88b7 Update docker-compose.yaml 2025-06-23 12:35:49 +03:00
Daniel-Eisenberg
c4975497eb Update entry.sh 2025-06-12 12:55:27 +03:00
Daniel-Eisenberg
782dfeada6 Waf tag (#317)
* add waf-tag to openappsec

* fix waf tag to openappsec

---------

Co-authored-by: wiaamm <wiaamm@checkpoint.com>
2025-06-11 11:34:48 +03:00
wiaam96
bc1eac9d39 Fix Watchdog restarts (#319)
* don't exit

* fix restarting agent

* fix watchdog restarts
2025-06-09 16:11:45 +03:00
Daniel-Eisenberg
4dacd7d009 Prometheus support (#316)
* Add prometheus support

* Add prometheus support

* Add prometheus support

* Add prometheus support

* Add prometheus support

---------

Co-authored-by: avigailo <avigailo@checkpoint.com>
2025-06-05 16:28:57 +03:00
orianelou
3a34984def Merge pull request #293 from willseward/bugfix/fix-ipv6-cidr
Fix IPv6 masking
2025-05-27 13:43:59 +03:00
orianelou
5aaf787cfa Create schema_v1beta2.yaml 2025-05-13 16:21:13 +03:00
orianelou
2c7b5818e8 Update and rename schema_v1beta2.yaml to schema_v1beta1.yaml 2025-05-13 16:20:31 +03:00
orianelou
c8743d4d4b Create schema_v1beta2.yaml 2025-05-13 16:18:52 +03:00
orianelou
d703f16e35 Update README.md 2025-04-17 15:12:48 +03:00
Daniel-Eisenberg
692c430e8a Merge pull request #298 from openappsec/exception-fix
exception fix
2025-04-17 15:06:23 +03:00
Daniel Eisenberg
72c5594b10 exception fix 2025-04-17 13:37:25 +03:00
orianelou
2c6b6baa3b Update docker-compose.yaml 2025-04-01 14:24:16 +03:00
orianelou
37d0f1c45f Update bug_report.md 2025-04-01 10:14:26 +03:00
Wills Ward
2678db9d2f fix IPv6 masking 2025-03-30 14:59:26 -05:00
orianelou
52c93ad574 Merge pull request #291 from MaxShapiro/MaxShapiro-patch-1
Update .env
2025-03-30 10:22:09 +03:00
Max Shapiro
bd3a53041e Update .env 2025-03-30 09:55:33 +03:00
Daniel-Eisenberg
44f40fbd1b Merge pull request #287 from openappsec/docker-upgrade-issue
Docker upgrade issue
2025-03-25 22:47:21 +02:00
orianelou
0691f9b9cd Update open-appsec-k8s-prevent-config-v1beta2.yaml 2025-03-23 14:33:18 +02:00
orianelou
0891dcd251 Update .env 2025-03-23 14:02:41 +02:00
Daniel-Eisenberg
7669f0c89c Merge pull request #285 from openappsec/Mar_17_2025-Dev
Mar 17 2025 dev
2025-03-19 17:57:49 +02:00
orianelou
39d7884bed Update bug_report.md 2025-03-19 16:42:28 +02:00
orianelou
b8783c3065 Update nginx_version_support.md 2025-03-19 11:32:09 +02:00
orianelou
37dc9f14b4 Update config.yml 2025-03-19 11:31:32 +02:00
orianelou
9a1f1b5966 Update config.yml 2025-03-19 11:30:41 +02:00
orianelou
b0bfd3077c Update config.yml 2025-03-19 11:30:09 +02:00
orianelou
0469f5aa1f Update bug_report.md 2025-03-19 11:29:51 +02:00
orianelou
3578797214 Delete .github/ISSUE_TEMPLATE/feature_request.md 2025-03-19 11:29:28 +02:00
orianelou
16a72fdf3e Update nginx_version_support.md 2025-03-19 11:29:03 +02:00
orianelou
87d257f268 Update config.yml 2025-03-19 11:26:36 +02:00
orianelou
36d8006c26 Create config.yml 2025-03-19 11:24:55 +02:00
orianelou
8d47795d4d Delete .github/ISSUE_TEMPLATE/config.yml 2025-03-19 11:21:45 +02:00
orianelou
f3656712b0 Merge pull request #284 from openappsec/orianelou-issue-tamplates
Orianelou issue tamplates
2025-03-19 11:20:41 +02:00
orianelou
b1781234fd Create config.yml 2025-03-19 11:18:49 +02:00
orianelou
f71dca2bfa Create nginx_version_support.md 2025-03-19 11:16:52 +02:00
orianelou
bd333818ad Create feature_request.md 2025-03-19 11:12:10 +02:00
orianelou
95e776d7a4 Create bug_report.md 2025-03-19 11:10:21 +02:00
Ned Wright
51c2912434 sync code 2025-03-18 20:34:34 +00:00
Ned Wright
0246b73bbd sync code 2025-03-17 14:49:44 +00:00
avigailo
919921f6d3 Add manifest to the image creation 2025-03-17 15:26:11 +02:00
avigailo
e9098e2845 Add manifest to the image creation 2025-03-16 16:57:48 +02:00
avigailo
97d042589b Add manifest to the image creation 2025-03-16 13:41:28 +02:00
orianelou
df7be864e2 Update open-appsec-crd-v1beta2.yaml 2025-03-11 16:30:27 +02:00
orianelou
ba8ec26344 Create apisix.yaml 2025-03-09 11:43:40 +02:00
orianelou
97add465e8 Create kong.yml 2025-03-09 11:42:46 +02:00
orianelou
38cb1f2c3b Create envoy.yaml 2025-03-09 11:41:48 +02:00
orianelou
1dd9371840 Rename examples/juiceshop/nginx/swag/default.conf to examples/juiceshop/swag/default.conf 2025-03-09 11:41:13 +02:00
orianelou
f23d22a723 Rename examples/juiceshop/nginx/swag/juiceshop.subfolder.conf to examples/juiceshop/swag/juiceshop.subfolder.conf 2025-03-09 11:40:47 +02:00
orianelou
b51cf09190 Create juiceshop.subfolder.conf 2025-03-09 11:39:51 +02:00
orianelou
ceb6469a7e Create default.conf 2025-03-09 11:39:22 +02:00
orianelou
b0ae283eed Update open-appsec-crd-v1beta2.yaml 2025-03-06 14:19:07 +02:00
orianelou
5fcb9bdc4a Update open-appsec-crd-v1beta2.yaml 2025-03-06 13:54:49 +02:00
orianelou
fb5698360b Merge pull request #267 from openappsec/namspace-crds
Update open-appsec-crd-v1beta2.yaml
2025-03-06 13:38:34 +02:00
orianelou
147626bc7f Update open-appsec-crd-v1beta2.yaml 2025-03-06 13:31:20 +02:00
orianelou
448991ef75 Update docker-compose.yaml 2025-03-03 11:54:03 +02:00
orianelou
2b1ee84280 Update docker-compose.yaml 2025-03-03 11:53:53 +02:00
orianelou
77dd288eee Update docker-compose.yaml 2025-03-03 11:52:47 +02:00
orianelou
3cb4def82e Update docker-compose.yaml 2025-03-03 11:52:26 +02:00
orianelou
a0dd7dd614 Update docker-compose.yaml 2025-03-03 11:51:13 +02:00
orianelou
88eed946ec Update docker-compose.yaml 2025-03-03 11:50:49 +02:00
orianelou
3e1ad8b0f7 Update docker-compose.yaml 2025-03-03 11:50:23 +02:00
Daniel-Eisenberg
bd35c421c6 Merge pull request #263 from openappsec/Feb_27_2025-Dev
Feb 27 2025 dev
2025-03-02 18:23:10 +02:00
Ned Wright
9d6e883724 sync code 2025-02-27 16:08:31 +00:00
Ned Wright
cd020a7ddd sync code 2025-02-27 16:03:28 +00:00
orianelou
bb35eaf657 Update open-appsec-k8s-prevent-config-v1beta2.yaml 2025-02-26 16:16:16 +02:00
orianelou
648f9ae2b1 Update open-appsec-k8s-default-config-v1beta2.yaml 2025-02-26 16:15:54 +02:00
orianelou
47e47d706a Update open-appsec-k8s-default-config-v1beta2.yaml 2025-02-26 16:15:39 +02:00
orianelou
b852809d1a Update open-appsec-crd-v1beta2.yaml 2025-02-19 13:35:51 +02:00
orianelou
a77732f84c Update open-appsec-k8s-prevent-config-v1beta1.yaml 2025-02-17 16:08:50 +02:00
orianelou
a1a8e28019 Update open-appsec-k8s-default-config-v1beta1.yaml 2025-02-17 16:08:32 +02:00
orianelou
a99c2ec4a3 Update open-appsec-k8s-prevent-config-v1beta1.yaml 2025-02-17 16:06:02 +02:00
orianelou
f1303c1703 Update open-appsec-crd-v1beta1.yaml 2025-02-17 15:52:02 +02:00
Daniel Eisenberg
bd8174ead3 fix connection 2025-02-17 12:20:20 +02:00
Daniel-Eisenberg
4ddcd2462a Feb 10 2025 dev (#255)
* sync code

* sync code

* code sync

* code sync

---------

Co-authored-by: Ned Wright <nedwright@proton.me>
Co-authored-by: Daniel Eisenberg <danielei@checkpoint.com>
2025-02-12 10:56:44 +02:00
orianelou
81433bac25 Create local_policy.yaml 2025-02-11 15:42:20 +02:00
orianelou
8d03b49176 Update open-appsec-k8s-full-example-config-v1beta2.yaml 2025-02-10 10:34:40 +02:00
orianelou
84f9624c00 Update open-appsec-k8s-full-example-config-v1beta2.yaml 2025-02-10 10:23:00 +02:00
orianelou
3ecda7b979 Update docker-compose.yaml 2025-02-09 15:57:29 +02:00
orianelou
8f05508e02 Update docker-compose.yaml 2025-02-09 15:41:55 +02:00
orianelou
f5b9c93fbe Update docker-compose.yaml 2025-02-09 15:40:03 +02:00
orianelou
62b74c9a10 Update docker-compose.yaml 2025-02-09 15:32:02 +02:00
orianelou
e3163cd4fa Create .env 2025-02-03 16:34:47 +02:00
orianelou
1e98fc8c66 Add files via upload 2025-02-03 16:16:50 +02:00
orianelou
6fbe272378 Delete deployment/docker-compose/envoy directory 2025-02-03 16:16:31 +02:00
orianelou
7b3320ce10 Rename default.conf to default.conf 2025-01-21 14:04:01 +02:00
orianelou
25cc2d66e7 Rename .env to .env 2025-01-21 14:03:28 +02:00
orianelou
66e2112afb Rename docker-compose.yaml to docker-compose.yaml 2025-01-21 14:03:05 +02:00
orianelou
ba7c9afd52 Create .env 2025-01-20 15:14:44 +02:00
orianelou
2aa0993d7e Create .env 2025-01-20 15:13:52 +02:00
orianelou
0cdfc9df90 Create .env 2025-01-20 15:13:28 +02:00
orianelou
010814d656 Update .env 2025-01-20 14:36:03 +02:00
orianelou
3779dd360d Create .env 2025-01-20 14:34:54 +02:00
orianelou
0e7dc2133d Update .env 2025-01-20 14:31:39 +02:00
orianelou
c9095acbef Create .env 2025-01-20 14:29:39 +02:00
orianelou
e47e29321d Create .env 2025-01-20 14:24:03 +02:00
orianelou
25a66e77df Create default.conf 2025-01-20 14:16:18 +02:00
orianelou
6eea40f165 Create docker-compose.yaml 2025-01-20 14:15:35 +02:00
orianelou
cee6ed511a Create .env 2025-01-20 14:15:12 +02:00
orianelou
4f145fd74f Update .env 2025-01-20 14:14:31 +02:00
orianelou
3fe5c5b36f Create .env 2025-01-20 14:14:15 +02:00
orianelou
7542a85ddb Update docker-compose.yaml 2025-01-20 14:14:04 +02:00
orianelou
fae4534e5c Merge pull request #226 from openappsec/oriane-23.12.24-adding-new-composes
Oriane 23.12.24 adding new composes
2025-01-20 12:02:00 +02:00
orianelou
923a8a804b Add files via upload 2025-01-20 12:00:49 +02:00
orianelou
b1731237d1 Delete deployment directory 2025-01-20 11:58:01 +02:00
orianelou
3d3d6e73b9 Rename deployment/envoy/docker-compose.yaml to deployment/docker-compose/envoy/docker-compose.yaml 2025-01-20 11:49:03 +02:00
Daniel-Eisenberg
3f80127ec5 Merge pull request #224 from openappsec/Jan_12_2025-Dev
Jan 12 2025 dev
2025-01-19 11:16:59 +02:00
Ned Wright
abdee954bb fix log-file-handler 2025-01-15 12:22:16 +00:00
Ned Wright
9a516899e8 central nginx manager 2025-01-14 16:14:25 +00:00
Ned Wright
4fd2aa6c6b central nginx manager 2025-01-14 16:00:54 +00:00
Ned Wright
0db666ac4f central nginx manager - add new package to packages list 2025-01-13 14:29:58 +00:00
Ned Wright
493d9a6627 central nginx manager 2025-01-13 13:25:05 +00:00
Ned Wright
6db87fc7fe central nginx manager 2025-01-13 12:35:42 +00:00
orianelou
d2b9bc8c9c Create envoy.yaml 2025-01-13 14:23:49 +02:00
orianelou
886a5befe1 Create .env 2025-01-13 14:23:17 +02:00
orianelou
1f2502f9e4 Create docker-compose.yaml 2025-01-13 14:22:57 +02:00
orianelou
9e4c5014ce Create .env 2025-01-13 14:21:50 +02:00
orianelou
024423cce9 Create docker-compose.yaml 2025-01-13 14:21:35 +02:00
orianelou
dc4b546bd1 Update .env 2025-01-13 14:20:38 +02:00
orianelou
a86aca13b4 Update docker-compose.yaml 2025-01-13 14:20:21 +02:00
orianelou
87b34590d4 Update .env 2025-01-13 14:18:04 +02:00
orianelou
e0198a1a95 Update docker-compose.yaml 2025-01-13 14:17:49 +02:00
orianelou
d024ad5845 Update .env 2025-01-13 14:15:28 +02:00
orianelou
46d42c8fa3 Update docker-compose.yaml 2025-01-13 14:15:15 +02:00
orianelou
f6c36f3363 Update .env 2025-01-13 14:14:07 +02:00
orianelou
63541a4c3c Update docker-compose.yaml 2025-01-13 14:13:53 +02:00
orianelou
d14fa7a468 Update docker-compose.yaml 2025-01-13 14:13:23 +02:00
orianelou
ae0de5bf14 Update docker-compose.yaml 2025-01-13 14:13:12 +02:00
orianelou
d39919f348 Update .env 2025-01-13 14:12:32 +02:00
orianelou
4f215e1409 Update docker-compose.yaml 2025-01-13 14:12:09 +02:00
orianelou
f05b5f8cee Create default.conf 2025-01-13 14:11:47 +02:00
orianelou
949b656b13 Update .env 2025-01-13 14:11:02 +02:00
orianelou
bbe293d215 Update docker-compose.yaml 2025-01-13 14:10:48 +02:00
Daniel-Eisenberg
35b2df729f Merge pull request #214 from openappsec/Dec_29_2024-Dev
Dec 29 2024 dev
2025-01-02 10:56:50 +02:00
orianelou
7600b6218f Rename examples/juiceshop/default.conf to examples/juiceshop/nginx/default.conf 2025-01-02 10:21:02 +02:00
orianelou
20e8e65e14 Update open-appsec-k8s-full-example-config-v1beta2.yaml 2024-12-30 16:52:26 +02:00
orianelou
414130a789 Update open-appsec-k8s-full-example-config-v1beta2.yaml 2024-12-30 16:40:13 +02:00
orianelou
9d704455e8 Update open-appsec-k8s-full-example-config-v1beta2.yaml 2024-12-30 16:30:36 +02:00
orianelou
602442fed4 Update open-appsec-k8s-full-example-config-v1beta2.yaml 2024-12-30 16:24:12 +02:00
orianelou
4e9a90db01 Update open-appsec-k8s-full-example-config-v1beta2.yaml 2024-12-30 16:21:15 +02:00
orianelou
20f92afbc2 Update open-appsec-k8s-full-example-config-v1beta2.yaml 2024-12-30 16:18:59 +02:00
orianelou
ee7adc37d0 Update open-appsec-k8s-full-example-config-v1beta2.yaml 2024-12-30 16:13:51 +02:00
orianelou
c0b3e9c0d0 Update open-appsec-k8s-full-example-config-v1beta2.yaml 2024-12-30 14:34:17 +02:00
orianelou
f1f4b13327 Update open-appsec-k8s-prevent-config-v1beta2.yaml 2024-12-30 13:51:59 +02:00
orianelou
4354a98d37 Update open-appsec-k8s-default-config-v1beta2.yaml 2024-12-30 13:51:19 +02:00
orianelou
09fa11516c Update open-appsec-k8s-full-example-config-v1beta2.yaml 2024-12-30 13:39:09 +02:00
orianelou
446b043128 Rename pen-appsec-k8s-full-example-config-v1beta2.yaml to open-appsec-k8s-full-example-config-v1beta2.yaml 2024-12-30 13:36:31 +02:00
orianelou
91bcadf930 Create pen-appsec-k8s-full-example-config-v1beta2.yaml 2024-12-30 13:35:05 +02:00
orianelou
0824cf4b23 Update README.md 2024-12-30 09:42:10 +02:00
Ned Wright
108abdb35e sync code 2024-12-29 12:47:25 +00:00
Ned Wright
64ebf013eb sync code 2024-12-29 12:13:27 +00:00
orianelou
2c91793f08 Create .env 2024-12-24 11:04:38 +02:00
orianelou
72a263d25a Create docker-compose.yaml 2024-12-24 11:00:58 +02:00
orianelou
4e14ff9a58 Create default.conf 2024-12-23 17:25:23 +02:00
orianelou
1fb28e14d6 Create juiceshop.subfolder.conf 2024-12-23 17:24:26 +02:00
orianelou
e38bb9525c Create .env 2024-12-23 17:22:40 +02:00
orianelou
63b8bb22c2 Create docker-compose.yaml 2024-12-23 17:21:53 +02:00
orianelou
11c97330f5 Create apisix.yaml 2024-12-23 16:59:40 +02:00
orianelou
e56fb0bc1a Create .env 2024-12-23 16:59:07 +02:00
orianelou
4571d563f4 Create docker-compose.yaml 2024-12-23 16:58:35 +02:00
orianelou
02c1db01f6 Create default.conf 2024-12-23 16:47:53 +02:00
orianelou
c557affd9b Create .env 2024-12-23 16:46:38 +02:00
orianelou
8889c3c054 Create docker-compose.yaml 2024-12-23 16:46:16 +02:00
orianelou
f67eff87bc Create kong.yaml 2024-12-23 16:19:32 +02:00
orianelou
fa6a2e4233 Create .env 2024-12-23 16:18:53 +02:00
orianelou
b7e2efbf7e Create docker-compose.yaml 2024-12-23 10:20:02 +02:00
orianelou
96ce290e5f Update open-appsec-crd-v1beta2.yaml 2024-12-19 14:42:51 +02:00
orianelou
de8e2d9970 Merge pull request #210 from openappsec/orianelou-test-as-top-level-7
Update local_policy.yaml
2024-12-12 12:50:29 +02:00
orianelou
0048708af1 Update local_policy.yaml 2024-12-12 12:49:40 +02:00
orianelou
4fe0f44e88 Update local_policy.yaml 2024-12-12 12:45:22 +02:00
orianelou
5f139d13d7 Update docker-compose.yaml 2024-12-09 10:59:01 +02:00
orianelou
919d775a73 Update docker-compose.yaml 2024-12-05 14:42:04 +02:00
orianelou
ac8e353598 Update docker-compose.yaml 2024-12-05 13:43:23 +02:00
Daniel-Eisenberg
0663f20691 Merge pull request #207 from openappsec/Nov_28_2024-Dev
Nov 28 2024 dev
2024-12-01 11:53:26 +02:00
Ned Wright
2dda6231f6 sync code 2024-11-28 10:53:40 +00:00
Ned Wright
1c1f0b7e29 sync code 2024-11-28 10:41:59 +00:00
orianelou
6255e1f30d Rename docker-compose.yaml to docker-compose.yaml 2024-11-06 14:57:50 +02:00
orianelou
454aacf622 Rename .env to .env 2024-11-06 14:57:31 +02:00
orianelou
c91ccba5a8 Create .env 2024-11-06 14:01:40 +02:00
orianelou
b1f897191c Create docker-compose.yaml 2024-11-06 14:01:20 +02:00
Daniel-Eisenberg
027ddfea21 Merge pull request #200 from openappsec/Oct_14_2024-Dev
Oct 14 2024 dev
2024-11-05 12:12:10 +02:00
orianelou
d1a2906b29 Create default.conf 2024-11-03 14:23:34 +02:00
Ned Wright
b1ade9bba0 code sync 2024-10-15 06:57:25 +00:00
Ned Wright
36d302b77e code sync 2024-10-14 16:43:58 +00:00
Ned Wright
1d7d38b0a6 code sync 2024-10-14 16:39:35 +00:00
Ned Wright
1b7eafaa23 code sync 2024-10-14 16:32:23 +00:00
Ned Wright
c2ea2cda6d sync code 2024-10-14 14:51:28 +00:00
orianelou
b58f7781e6 Update local_policy.yaml 2024-10-01 13:05:23 +03:00
orianelou
7153d222c0 Update local_policy.yaml 2024-10-01 13:03:59 +03:00
orianelou
f1ec8959b7 Update apisix-standalone.yaml 2024-10-01 12:49:25 +03:00
Daniel-Eisenberg
4a7336b276 Merge pull request #190 from openappsec/Sep_17_2024-Dev
sync code
2024-09-30 14:53:51 +03:00
orianelou
4d0042e933 Create apisix-standalone.yaml 2024-09-30 14:10:35 +03:00
orianelou
015915497a Create docker-compose.yaml 2024-09-30 14:09:43 +03:00
Ned Wright
586150fe4f sync code 2024-09-17 10:53:09 +00:00
orianelou
3fe0b42fcd Merge pull request #189 from openappsec/Sep_15_2024-Dev
sync code
2024-09-15 17:25:26 +03:00
orianelou
84e10c7129 Merge pull request #186 from chkp-omriat2/main
Updating crowdsec auxiliary
2024-09-15 17:25:13 +03:00
Ned Wright
eddd250409 sync code 2024-09-15 02:49:26 +00:00
chkp-omriat2
294cb600f8 Updating crowdsec auxiliary 2024-09-10 06:09:54 +00:00
Ned Wright
f4bad4c4d9 Remove non-active files 2024-09-02 14:16:01 +03:00
WrightNed
6e916599d9 Merge pull request #179 from openappsec/Aug_20_2024-Dev
Aug 20th update
2024-08-27 12:33:46 +03:00
orianelou
24d53aed53 Update docker-compose.yaml 2024-08-27 10:50:25 +03:00
WrightNed
93fb3da2f8 Merge pull request #177 from wiaam96/patch-1
Update entry.sh
2024-08-22 15:17:49 +03:00
wiaam96
e7378c9a5f Update entry.sh 2024-08-22 15:15:24 +03:00
Ned Wright
110f0c8bd2 Aug 20th update 2024-08-21 08:42:14 +00:00
WrightNed
ca31aac08a Merge pull request #174 from openappsec/orianelou-patch-6
Update docker-compose.yaml
2024-08-20 15:17:02 +03:00
orianelou
161b6dd180 Update docker-compose.yaml 2024-08-20 14:50:01 +03:00
WrightNed
84327e0b19 Merge pull request #170 from openappsec/orianelou-patch-4
Create docker-compose.yaml
2024-08-05 13:12:40 +03:00
orianelou
b9723ba6ce Create docker-compose.yaml
added compose for docker SWAG
2024-08-05 12:06:37 +03:00
WrightNed
00e183b8c6 Merge pull request #169 from openappsec/Jul_31_2024-Dev
Jul 31st update
2024-08-01 18:10:44 +03:00
WrightNed
e859c167ed Merge pull request #167 from openappsec/orianelou-crds
Orianelou crds
2024-08-01 18:10:11 +03:00
Ned Wright
384b59cc87 Jul 31st update 2024-07-31 17:15:35 +00:00
orianelou
805e958cb9 Create open-appsec-crd-latest.yaml 2024-07-25 12:06:59 +03:00
orianelou
5bcd7cfcf1 Create open-appsec-crd-v1beta2.yaml 2024-07-25 12:05:57 +03:00
orianelou
ae6f2faeec Create open-appsec-crd-v1beta1.yaml 2024-07-25 12:04:22 +03:00
WrightNed
705a5e6061 Merge pull request #166 from openappsec/Jul_23_2024-Dev
Jul 23rd update
2024-07-24 16:01:45 +03:00
WrightNed
c33b74a970 Merge pull request #164 from chkp-omris/main
update intelligence
2024-07-24 15:54:58 +03:00
chkp-omris
2da9fbc385 update intelligence 2024-07-23 13:15:33 +00:00
Ned Wright
f58e9a6128 Jul 23rd update 2024-07-23 11:08:24 +00:00
WrightNed
57ea5c72c5 Merge pull request #156 from openappsec/Jul_04_2024-Dev
Jul 4th update
2024-07-07 08:47:38 +03:00
Ned Wright
962bd31d46 Jul 4th update 2024-07-04 14:10:34 +00:00
WrightNed
01770475ec Merge pull request #153 from openappsec/Jun_26_2024-Dev
June 27th update
2024-07-01 11:42:11 +03:00
Ned Wright
78b114a274 June 27th update 2024-06-27 12:05:38 +00:00
WrightNed
81b1aec487 Merge pull request #148 from openappsec/orianelou-new-policy-files
Orianelou new policy files
2024-06-19 16:18:41 +03:00
orianelou
be6591a670 Update local_policy.yaml 2024-06-17 13:49:48 +03:00
orianelou
663782009c Update local_policy.yaml 2024-06-17 13:49:18 +03:00
orianelou
9392bbb26c Update local_policy.yaml 2024-06-17 13:49:01 +03:00
orianelou
46682bcdce Update local_policy.yaml 2024-06-17 13:48:39 +03:00
orianelou
057bc42375 Update local_policy.yaml 2024-06-17 13:47:24 +03:00
orianelou
88e0ccd308 Rename open-appsec-k8s-default-config-v1beta21.yaml to open-appsec-k8s-default-config-v1beta1.yaml 2024-06-17 13:45:02 +03:00
orianelou
4241b9c574 Create open-appsec-k8s-prevent-config-v1beta2.yaml 2024-06-17 13:44:45 +03:00
orianelou
4af9f18ada Create open-appsec-k8s-default-config-v1beta2.yaml 2024-06-17 13:44:25 +03:00
orianelou
3b533608b1 Create open-appsec-k8s-prevent-config-v1beta1.yaml 2024-06-17 13:42:13 +03:00
orianelou
74bb3086ec Create open-appsec-k8s-default-config-v1beta21.yaml 2024-06-17 13:41:29 +03:00
orianelou
504d1415a5 Create local_policy.yaml 2024-06-17 13:39:40 +03:00
orianelou
18b1b63c42 Create local_policy.yaml 2024-06-17 13:38:31 +03:00
orianelou
ded2a5ffc2 Create local_policy.yaml 2024-06-17 13:36:23 +03:00
orianelou
1254bb37b2 Create local_policy.yaml 2024-06-17 13:34:35 +03:00
orianelou
cf16343caa Create open-appsec-k8s-prevent-config-v1beta2.yaml 2024-06-16 10:56:16 +03:00
orianelou
78c4209406 Rename config/k8s/v1beta2/default/open-appsec-k8s-default-config-v1beta2.yaml to config/k8s/v1beta2/open-appsec-k8s-default-config-v1beta2.yaml 2024-06-16 10:55:23 +03:00
orianelou
3c8672c565 Rename config/k8s/v1beta2/open-appsec-k8s-default-config-v1beta2.yaml to config/k8s/v1beta2/default/open-appsec-k8s-default-config-v1beta2.yaml 2024-06-16 10:54:05 +03:00
orianelou
48d6baed3b Rename config/linux/v1beta2/local_policy.yaml to config/linux/v1beta2/default/local_policy.yaml 2024-06-16 10:44:39 +03:00
orianelou
8770257a60 Create local_policy.yaml for linux prevent 2024-06-16 10:44:21 +03:00
Ned Wright
fd5d093b24 Add --no-upgrade option to docker 2024-06-03 14:19:41 +00:00
WrightNed
d6debf8d8d Merge pull request #141 from openappsec/May_27_2024-Dev
May 27 2024 dev
2024-06-02 10:15:10 +03:00
Ned Wright
395b754575 Add dammy get-cloud-metadata.sh script 2024-05-29 11:01:17 +00:00
Ned Wright
dc000372c4 Turn on optimization by default 2024-05-27 12:05:16 +00:00
Ned Wright
941c641174 Change cloud default logging 2024-05-27 11:56:05 +00:00
Ned Wright
fdc148aa9b May 27 update 2024-05-27 09:05:33 +00:00
orianelou
307fd8897d Rename config/k8s/open-appsec-k8s-default-config-v1beta2.yaml to config/k8s/v1beta2/open-appsec-k8s-default-config-v1beta2.yaml 2024-05-21 15:24:55 +03:00
orianelou
afd2b4930b Create open-appsec-k8s-default-config-v1beta2.yaml 2024-05-21 15:24:33 +03:00
orianelou
1fb9a29223 Create local_policy.yaml 2024-05-21 15:22:54 +03:00
WrightNed
253ca70de6 Merge pull request #136 from chkp-omris/main
Update agent-intelligence-service package
2024-05-19 15:30:22 +03:00
chkp-omris
938f625535 Merge branch 'openappsec:main' into main 2024-05-19 15:21:46 +03:00
chkp-omris
183d14fc55 Update agent-intelligence-service package 2024-05-19 12:21:10 +00:00
WrightNed
1f3d4ed5e1 Merge pull request #135 from openappsec/Apr_21_2024-Dev
Apr 21 2024 dev
2024-05-19 11:08:26 +03:00
WrightNed
fdbd6d3786 Merge pull request #115 from openappsec/orianelou-patch-3
Update docker-compose.yaml
2024-05-19 11:07:22 +03:00
Ned Wright
4504138a4a Change all deployments to embedded 2024-04-22 09:46:50 +00:00
Ned Wright
66ed4a8d81 April 21th 2024 update 2024-04-21 13:57:46 +00:00
WrightNed
189c9209c9 Merge pull request #122 from openappsec/Apr_14_2024-Dev
Apr 14 2024 dev
2024-04-17 12:40:41 +03:00
Ned Wright
1a1580081c Add watchdog changes 2024-04-16 14:06:43 +00:00
Ned Wright
942b2ef8b4 2024 April 14th update 2024-04-14 12:55:54 +00:00
Ned Wright
7a7f65a77a Detect docker on http transaction installation 2024-04-14 11:28:53 +00:00
Ned Wright
98639d9cb6 configuration loading changes 2024-04-04 17:11:06 +00:00
Ned Wright
b3de81d9d9 Updating orchestration_package.sh 2024-03-31 08:48:55 +00:00
Ned Wright
a77fd9a6d0 Remove old data 2024-03-27 14:30:40 +00:00
Ned Wright
8454b2dd9b Open Appsec helm chart automation Wed Mar 27 16:27:33 IST 2024 latest 2024-03-27 16:27:33 +02:00
Ned Wright
3913e1e8b3 Update entry.sh 2024-03-26 16:05:23 +00:00
WrightNed
262b2e59ff Merge pull request #117 from openappsec/Mar_21_2024-Dev
Mar 21 2024 dev
2024-03-26 13:53:49 +02:00
Ned Wright
a01c65994a Edit components/security_apps/layer_7_access_control/layer_7_access_control.cc 2024-03-25 14:53:52 +00:00
WrightNed
1d13973ae2 Update entry.sh 2024-03-25 15:56:00 +02:00
Ned Wright
c20fa9f966 Mar 21st 2024 update 2024-03-21 15:31:38 +00:00
643 changed files with 170629 additions and 87386 deletions

36
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,36 @@
---
name: "Bug Report"
about: "Report a bug with open-appsec"
labels: [bug]
---
**Checklist**
- Have you checked the open-appsec troubleshooting guides - https://docs.openappsec.io/troubleshooting/troubleshooting
- Yes / No
- Have you checked the existing issues and discussions in github for the same issue
- Yes / No
- Have you checked the knwon limitations same issue - https://docs.openappsec.io/release-notes#limitations
- Yes / No
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Run '...'
3. See error '...'
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots or Logs**
If applicable, add screenshots or logs to help explain the issue.
**Environment (please complete the following information):**
- open-appsec version:
- Deployment type (Docker, Kubernetes, etc.):
- OS:
**Additional context**
Add any other context about the problem here.

8
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@@ -0,0 +1,8 @@
blank_issues_enabled: false
contact_links:
- name: "Documentation & Troubleshooting"
url: "https://docs.openappsec.io/"
about: "Check the documentation before submitting an issue."
- name: "Feature Requests & Discussions"
url: "https://github.com/openappsec/openappsec/discussions"
about: "Please open a discussion for feature requests."

View File

@@ -0,0 +1,17 @@
---
name: "Nginx Version Support Request"
about: "Request for a specific Nginx version to be supported"
---
**Nginx & OS Version:**
Which Nginx and OS version are you using?
**Output of nginx -V**
Share the output of nginx -v
**Expected Behavior:**
What do you expect to happen with this version?
**Checklist**
- Have you considered a docker based deployment - find more information here https://docs.openappsec.io/getting-started/start-with-docker?
- Yes / No

View File

@@ -1,7 +1,7 @@
cmake_minimum_required (VERSION 2.8.4)
project (ngen)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall -Wno-terminate")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2 -fPIC -Wall -Wno-terminate")
execute_process(COMMAND grep -c "Alpine Linux" /etc/os-release OUTPUT_VARIABLE IS_ALPINE)
if(NOT IS_ALPINE EQUAL "0")

View File

@@ -6,7 +6,7 @@
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/6629/badge)](https://bestpractices.coreinfrastructure.org/projects/6629)
# About
[open-appsec](https://www.openappsec.io) (openappsec.io) builds on machine learning to provide preemptive web app & API threat protection against OWASP-Top-10 and zero-day attacks. It can be deployed as an add-on to Kubernetes Ingress, NGINX, Envoy (soon), and API Gateways.
[open-appsec](https://www.openappsec.io) (openappsec.io) builds on machine learning to provide preemptive web app & API threat protection against OWASP-Top-10 and zero-day attacks. It can be deployed as an add-on to Linux, Docker or K8s deployments, on NGINX, Kong, APISIX, or Envoy.
The open-appsec engine learns how users normally interact with your web application. It then uses this information to automatically detect requests that fall outside of normal operations, and conducts further analysis to decide whether the request is malicious or not.
@@ -39,13 +39,13 @@ open-appsec can be managed using multiple methods:
* [Using SaaS Web Management](https://docs.openappsec.io/getting-started/using-the-web-ui-saas)
open-appsec Web UI:
![image](https://github.com/openappsec/openappsec/assets/114033741/22d99379-df52-45c8-984f-1b820635f3b9)
<img width="1854" height="775" alt="image" src="https://github.com/user-attachments/assets/4c6f7b0a-14f3-4f02-9ab0-ddadc9979b8d" />
## Deployment Playgrounds (Virtual labs)
You can experiment with open-appsec using [Playgrounds](https://www.openappsec.io/playground)
![image](https://github.com/openappsec/openappsec/assets/114033741/14d35d69-4577-48fc-ae87-ea344888e94d)
<img width="781" height="878" alt="image" src="https://github.com/user-attachments/assets/0ddee216-5cdf-4288-8c41-cc28cfbf3297" />
# Resources
* [Project Website](https://openappsec.io)
@@ -54,27 +54,21 @@ You can experiment with open-appsec using [Playgrounds](https://www.openappsec.i
# Installation
For Kubernetes (NGINX Ingress) using the installer:
For Kubernetes (NGINX /Kong / APISIX / Istio) using Helm: follow [documentation](https://docs.openappsec.io/getting-started/start-with-kubernetes)
```bash
$ wget https://downloads.openappsec.io/open-appsec-k8s-install && chmod +x open-appsec-k8s-install
$ ./open-appsec-k8s-install
```
For Kubernetes (NGINX or Kong) using Helm: follow [documentation](https://docs.openappsec.io/getting-started/start-with-kubernetes/install-using-helm-ingress-nginx-and-kong) use this method if youve built your own containers.
For Linux (NGINX or Kong) using the installer (list of supported/pre-compiled NGINX attachments is available [here](https://downloads.openappsec.io/packages/supported-nginx.txt)):
For Linux (NGINX / Kong / APISIX) using the installer (list of supported/pre-compiled NGINX attachments is available [here](https://downloads.openappsec.io/packages/supported-nginx.txt)):
```bash
$ wget https://downloads.openappsec.io/open-appsec-install && chmod +x open-appsec-install
$ ./open-appsec-install --auto
```
For kong Lua Based plug in follow [documentation](https://docs.openappsec.io/getting-started/start-with-linux)
For Linux, if youve built your own package use the following commands:
```bash
$ install-cp-nano-agent.sh --install --hybrid_mode
$ install-cp-nano-service-http-transaction-handler.sh install
$ install-cp-nano-service-http-transaction-handler.sh --install
$ install-cp-nano-attachment-registration-manager.sh --install
```
You can add the ```--token <token>``` and ```--email <email address>``` options to the first command, to get a token follow [documentation](https://docs.openappsec.io/getting-started/using-the-web-ui-saas/connect-deployed-agents-to-saas-management-k8s-and-linux).
@@ -177,7 +171,7 @@ open-appsec code was audited by an independent third party in September-October
See the [full report](https://github.com/openappsec/openappsec/blob/main/LEXFO-CHP20221014-Report-Code_audit-OPEN-APPSEC-v1.2.pdf).
### Reporting security vulnerabilities
If you've found a vulnerability or a potential vulnerability in open-appsec please let us know at securityalert@openappsec.io. We'll send a confirmation email to acknowledge your report within 24 hours, and we'll send an additional email when we've identified the issue positively or negatively.
If you've found a vulnerability or a potential vulnerability in open-appsec please let us know at security-alert@openappsec.io. We'll send a confirmation email to acknowledge your report within 24 hours, and we'll send an additional email when we've identified the issue positively or negatively.
# License

View File

@@ -95,6 +95,18 @@ getFailOpenHoldTimeout()
return conf_data.getNumericalValue("fail_open_hold_timeout");
}
unsigned int
getHoldVerdictPollingTime()
{
return conf_data.getNumericalValue("hold_verdict_polling_time");
}
unsigned int
getHoldVerdictRetries()
{
return conf_data.getNumericalValue("hold_verdict_retries");
}
unsigned int
getMaxSessionsPerMinute()
{
@@ -155,6 +167,30 @@ getWaitingForVerdictThreadTimeout()
return conf_data.getNumericalValue("waiting_for_verdict_thread_timeout_msec");
}
unsigned int
getMinRetriesForVerdict()
{
return conf_data.getNumericalValue("min_retries_for_verdict");
}
unsigned int
getMaxRetriesForVerdict()
{
return conf_data.getNumericalValue("max_retries_for_verdict");
}
unsigned int
getReqBodySizeTrigger()
{
return conf_data.getNumericalValue("body_size_trigger");
}
unsigned int
getRemoveResServerHeader()
{
return conf_data.getNumericalValue("remove_server_header");
}
int
isIPAddress(c_str ip_str)
{

View File

@@ -63,32 +63,44 @@ TEST_F(HttpAttachmentUtilTest, GetValidAttachmentConfiguration)
"\"waiting_for_verdict_thread_timeout_msec\": 75,\n"
"\"req_header_thread_timeout_msec\": 10,\n"
"\"ip_ranges\": " + createIPRangesString(ip_ranges) + ",\n"
"\"static_resources_path\": \"" + static_resources_path + "\""
"\"static_resources_path\": \"" + static_resources_path + "\",\n"
"\"min_retries_for_verdict\": 1,\n"
"\"max_retries_for_verdict\": 3,\n"
"\"hold_verdict_retries\": 3,\n"
"\"hold_verdict_polling_time\": 1,\n"
"\"body_size_trigger\": 777,\n"
"\"remove_server_header\": 1\n"
"}\n";
ofstream valid_configuration_file(attachment_configuration_file_name);
valid_configuration_file << valid_configuration;
valid_configuration_file.close();
EXPECT_EQ(initAttachmentConfig(attachment_configuration_file_name.c_str()), 1);
EXPECT_EQ(getDbgLevel(), 2);
EXPECT_EQ(getDbgLevel(), 2u);
EXPECT_EQ(getStaticResourcesPath(), static_resources_path);
EXPECT_EQ(isFailOpenMode(), 0);
EXPECT_EQ(getFailOpenTimeout(), 1234);
EXPECT_EQ(getFailOpenTimeout(), 1234u);
EXPECT_EQ(isFailOpenHoldMode(), 1);
EXPECT_EQ(getFailOpenHoldTimeout(), 4321);
EXPECT_EQ(getFailOpenHoldTimeout(), 4321u);
EXPECT_EQ(isFailOpenOnSessionLimit(), 1);
EXPECT_EQ(getMaxSessionsPerMinute(), 0);
EXPECT_EQ(getNumOfNginxIpcElements(), 200);
EXPECT_EQ(getKeepAliveIntervalMsec(), 10000);
EXPECT_EQ(getResProccessingTimeout(), 420);
EXPECT_EQ(getReqProccessingTimeout(), 42);
EXPECT_EQ(getRegistrationThreadTimeout(), 101);
EXPECT_EQ(getReqHeaderThreadTimeout(), 10);
EXPECT_EQ(getReqBodyThreadTimeout(), 155);
EXPECT_EQ(getResHeaderThreadTimeout(), 1);
EXPECT_EQ(getResBodyThreadTimeout(), 0);
EXPECT_EQ(getWaitingForVerdictThreadTimeout(), 75);
EXPECT_EQ(getMaxSessionsPerMinute(), 0u);
EXPECT_EQ(getNumOfNginxIpcElements(), 200u);
EXPECT_EQ(getKeepAliveIntervalMsec(), 10000u);
EXPECT_EQ(getResProccessingTimeout(), 420u);
EXPECT_EQ(getReqProccessingTimeout(), 42u);
EXPECT_EQ(getRegistrationThreadTimeout(), 101u);
EXPECT_EQ(getReqHeaderThreadTimeout(), 10u);
EXPECT_EQ(getReqBodyThreadTimeout(), 155u);
EXPECT_EQ(getResHeaderThreadTimeout(), 1u);
EXPECT_EQ(getResBodyThreadTimeout(), 0u);
EXPECT_EQ(getMinRetriesForVerdict(), 1u);
EXPECT_EQ(getMaxRetriesForVerdict(), 3u);
EXPECT_EQ(getReqBodySizeTrigger(), 777u);
EXPECT_EQ(getWaitingForVerdictThreadTimeout(), 75u);
EXPECT_EQ(getInspectionMode(), ngx_http_inspection_mode::BLOCKING_THREAD);
EXPECT_EQ(getRemoveResServerHeader(), 1u);
EXPECT_EQ(getHoldVerdictRetries(), 3u);
EXPECT_EQ(getHoldVerdictPollingTime(), 1u);
EXPECT_EQ(isDebugContext("1.2.3.4", "5.6.7.8", 80, "GET", "test", "/abc"), 1);
EXPECT_EQ(isDebugContext("1.2.3.9", "5.6.7.8", 80, "GET", "test", "/abc"), 0);

View File

@@ -3,4 +3,4 @@ dependencies:
repository: https://charts.bitnami.com/bitnami
version: 12.2.8
digest: sha256:0d13b8b0c66b8e18781eac510ce58b069518ff14a6a15ad90375e7f0ffad71fe
generated: "2024-02-18T16:45:15.395307713Z"
generated: "2024-03-26T14:53:49.928153508Z"

View File

@@ -1,7 +1,5 @@
annotations:
artifacthub.io/changes: |-
- "update web hook cert gen to latest release v20231226-1a7112e06"
- "Update Ingress-Nginx version controller-v1.9.6"
artifacthub.io/changes: '- "Update Ingress-Nginx version controller-v1.10.0"'
artifacthub.io/prerelease: "false"
apiVersion: v2
appVersion: latest
@@ -17,4 +15,4 @@ kubeVersion: '>=1.20.0-0'
name: open-appsec-k8s-nginx-ingress
sources:
- https://github.com/kubernetes/ingress-nginx
version: 4.9.1
version: 4.10.0

View File

@@ -2,7 +2,7 @@
[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer
![Version: 4.9.1](https://img.shields.io/badge/Version-4.9.1-informational?style=flat-square) ![AppVersion: 1.9.6](https://img.shields.io/badge/AppVersion-1.9.6-informational?style=flat-square)
![Version: 4.10.0](https://img.shields.io/badge/Version-4.10.0-informational?style=flat-square) ![AppVersion: 1.10.0](https://img.shields.io/badge/AppVersion-1.10.0-informational?style=flat-square)
To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources.
@@ -253,11 +253,11 @@ As of version `1.26.0` of this chart, by simply not providing any clusterIP valu
| controller.admissionWebhooks.namespaceSelector | object | `{}` | |
| controller.admissionWebhooks.objectSelector | object | `{}` | |
| controller.admissionWebhooks.patch.enabled | bool | `true` | |
| controller.admissionWebhooks.patch.image.digest | string | `"sha256:25d6a5f11211cc5c3f9f2bf552b585374af287b4debf693cacbe2da47daa5084"` | |
| controller.admissionWebhooks.patch.image.digest | string | `"sha256:44d1d0e9f19c63f58b380c5fddaca7cf22c7cee564adeff365225a5df5ef3334"` | |
| controller.admissionWebhooks.patch.image.image | string | `"ingress-nginx/kube-webhook-certgen"` | |
| controller.admissionWebhooks.patch.image.pullPolicy | string | `"IfNotPresent"` | |
| controller.admissionWebhooks.patch.image.registry | string | `"registry.k8s.io"` | |
| controller.admissionWebhooks.patch.image.tag | string | `"v20231226-1a7112e06"` | |
| controller.admissionWebhooks.patch.image.tag | string | `"v1.4.0"` | |
| controller.admissionWebhooks.patch.labels | object | `{}` | Labels to be added to patch job resources |
| controller.admissionWebhooks.patch.networkPolicy.enabled | bool | `false` | Enable 'networkPolicy' or not |
| controller.admissionWebhooks.patch.nodeSelector."kubernetes.io/os" | string | `"linux"` | |
@@ -317,7 +317,7 @@ As of version `1.26.0` of this chart, by simply not providing any clusterIP valu
| controller.hostname | object | `{}` | Optionally customize the pod hostname. |
| controller.image.allowPrivilegeEscalation | bool | `false` | |
| controller.image.chroot | bool | `false` | |
| controller.image.digest | string | `"sha256:1405cc613bd95b2c6edd8b2a152510ae91c7e62aea4698500d23b2145960ab9c"` | |
| controller.image.digest | string | `"sha256:42b3f0e5d0846876b1791cd3afeb5f1cbbe4259d6f35651dcc1b5c980925379c"` | |
| controller.image.digestChroot | string | `"sha256:7eb46ff733429e0e46892903c7394aff149ac6d284d92b3946f3baf7ff26a096"` | |
| controller.image.image | string | `"ingress-nginx/controller"` | |
| controller.image.pullPolicy | string | `"IfNotPresent"` | |
@@ -326,7 +326,7 @@ As of version `1.26.0` of this chart, by simply not providing any clusterIP valu
| controller.image.runAsNonRoot | bool | `true` | |
| controller.image.runAsUser | int | `101` | |
| controller.image.seccompProfile.type | string | `"RuntimeDefault"` | |
| controller.image.tag | string | `"v1.9.6"` | |
| controller.image.tag | string | `"v1.10.0"` | |
| controller.ingressClass | string | `"nginx"` | For backwards compatibility with ingress.class annotation, use ingressClass. Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation |
| controller.ingressClassByName | bool | `false` | Process IngressClass per name (additionally as per spec.controller). |
| controller.ingressClassResource.controllerValue | string | `"k8s.io/ingress-nginx"` | Controller-value of the controller that is processing this ingressClass |

View File

@@ -0,0 +1,9 @@
# Changelog
This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
### 4.10.0
* - "Update Ingress-Nginx version controller-v1.10.0"
**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-4.9.1...helm-chart-4.10.0

View File

@@ -29,7 +29,7 @@
- --watch-namespace={{ default "$(POD_NAMESPACE)" .Values.controller.scope.namespace }}
{{- end }}
{{- if and (not .Values.controller.scope.enabled) .Values.controller.scope.namespaceSelector }}
- --watch-namespace-selector={{ default "" .Values.controller.scope.namespaceSelector }}
- --watch-namespace-selector={{ .Values.controller.scope.namespaceSelector }}
{{- end }}
{{- if and .Values.controller.reportNodeInternalIp .Values.controller.hostNetwork }}
- --report-node-internal-ip-address={{ .Values.controller.reportNodeInternalIp }}
@@ -54,6 +54,9 @@
{{- if .Values.controller.watchIngressWithoutClass }}
- --watch-ingress-without-class=true
{{- end }}
{{- if not .Values.controller.metrics.enabled }}
- --enable-metrics={{ .Values.controller.metrics.enabled }}
{{- end }}
{{- if .Values.controller.enableTopologyAwareRouting }}
- --enable-topology-aware-routing=true
{{- end }}

View File

@@ -1,4 +1,4 @@
{{- if and ( .Values.controller.metrics.enabled ) ( .Values.controller.metrics.prometheusRule.enabled ) ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) -}}
{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.prometheusRule.enabled -}}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:

View File

@@ -34,7 +34,7 @@ spec:
http-headers: false
request-body: false
log-destination:
cloud: false
cloud: true
stdout:
format: json-formatted
---

View File

@@ -15,3 +15,37 @@ tests:
- equal:
path: metadata.name
value: RELEASE-NAME-open-appsec-k8s-nginx-ingress-controller
- it: should create a DaemonSet with argument `--enable-metrics=false` if `controller.metrics.enabled` is false
set:
controller.kind: DaemonSet
kind: Vanilla
controller.metrics.enabled: false
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: --enable-metrics=false
- it: should create a DaemonSet without argument `--enable-metrics=false` if `controller.metrics.enabled` is true
set:
controller.kind: DaemonSet
kind: Vanilla
controller.metrics.enabled: true
asserts:
- notContains:
path: spec.template.spec.containers[0].args
content: --enable-metrics=false
- it: should create a DaemonSet with resource limits if `controller.resources.limits` is set
set:
controller.kind: DaemonSet
kind: Vanilla
controller.resources.limits.cpu: 500m
controller.resources.limits.memory: 512Mi
asserts:
- equal:
path: spec.template.spec.containers[0].resources.limits.cpu
value: 500m
- equal:
path: spec.template.spec.containers[0].resources.limits.memory
value: 512Mi

View File

@@ -4,8 +4,6 @@ templates:
tests:
- it: should create a Deployment
set:
kind: Vanilla
asserts:
- hasDocuments:
count: 1
@@ -24,6 +22,22 @@ tests:
path: spec.replicas
value: 3
- it: should create a Deployment with argument `--enable-metrics=false` if `controller.metrics.enabled` is false
set:
controller.metrics.enabled: false
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: --enable-metrics=false
- it: should create a Deployment without argument `--enable-metrics=false` if `controller.metrics.enabled` is true
set:
controller.metrics.enabled: true
asserts:
- notContains:
path: spec.template.spec.containers[0].args
content: --enable-metrics=false
- it: should create a Deployment with resource limits if `controller.resources.limits` is set
set:
controller.resources.limits.cpu: 500m

View File

@@ -26,8 +26,8 @@ controller:
## for backwards compatibility consider setting the full image url via the repository value below
## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail
## repository:
tag: "v1.9.6"
digest: sha256:1405cc613bd95b2c6edd8b2a152510ae91c7e62aea4698500d23b2145960ab9c
tag: "v1.10.0"
digest: sha256:42b3f0e5d0846876b1791cd3afeb5f1cbbe4259d6f35651dcc1b5c980925379c
digestChroot: sha256:7eb46ff733429e0e46892903c7394aff149ac6d284d92b3946f3baf7ff26a096
pullPolicy: IfNotPresent
runAsNonRoot: true
@@ -781,8 +781,8 @@ controller:
## for backwards compatibility consider setting the full image url via the repository value below
## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail
## repository:
tag: v20231226-1a7112e06
digest: sha256:25d6a5f11211cc5c3f9f2bf552b585374af287b4debf693cacbe2da47daa5084
tag: v1.4.0
digest: sha256:44d1d0e9f19c63f58b380c5fddaca7cf22c7cee564adeff365225a5df5ef3334
pullPolicy: IfNotPresent
# -- Provide a priority class name to the webhook patching job
##
@@ -1198,7 +1198,7 @@ appsec:
image:
registry: ghcr.io/openappsec
image: smartsync-tuning
tag: 1.1.3
tag: latest
enabled: false
replicaCount: 1
securityContext:

View File

@@ -1,5 +1,27 @@
# Changelog
## 2.38.0
### Changes
* Added support for setting `SVC.tls.appProtocol` and `SVC.http.appProtocol` values to configure the appProtocol fields
for Kubernetes Service HTTP and TLS ports. It might be useful for integration with external load balancers like GCP.
[#1018](https://github.com/Kong/charts/pull/1018)
## 2.37.1
* Rename the controller status port. This fixes a collision with the proxy status port in the Prometheus ServiceMonitor.
[#1008](https://github.com/Kong/charts/pull/1008)
## 2.37.0
### Changes
* Bumped default `kong/kubernetes-ingress-controller` image tag and updated CRDs to 3.1.
[#1011](https://github.com/Kong/charts/pull/1011)
* Bumped default `kong` image tag to 3.6.
[#1011](https://github.com/Kong/charts/pull/1011)
## 2.36.0
### Fixed

View File

@@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 1.1.6
appVersion: 1.1.8
dependencies:
- condition: postgresql.enabled
name: postgresql
@@ -14,4 +14,4 @@ maintainers:
name: open-appsec-kong
sources:
- https://github.com/Kong/charts/tree/main/charts/kong
version: 2.36.0
version: 2.38.0

View File

@@ -666,40 +666,42 @@ nodes.
mixed TCP/UDP LoadBalancer Services). It _does not_ support the `http`, `tls`,
or `ingress` sections, as it is used only for stream listens.
| Parameter | Description | Default |
|------------------------------------|---------------------------------------------------------------------------------------|--------------------------|
| SVC.enabled | Create Service resource for SVC (admin, proxy, manager, etc.) | |
| SVC.http.enabled | Enables http on the service | |
| SVC.http.servicePort | Service port to use for http | |
| SVC.http.containerPort | Container port to use for http | |
| SVC.http.nodePort | Node port to use for http | |
| SVC.http.hostPort | Host port to use for http | |
| SVC.http.parameters | Array of additional listen parameters | `[]` |
| SVC.tls.enabled | Enables TLS on the service | |
| SVC.tls.containerPort | Container port to use for TLS | |
| SVC.tls.servicePort | Service port to use for TLS | |
| SVC.tls.nodePort | Node port to use for TLS | |
| SVC.tls.hostPort | Host port to use for TLS | |
| SVC.tls.overrideServiceTargetPort | Override service port to use for TLS without touching Kong containerPort | |
| SVC.tls.parameters | Array of additional listen parameters | `["http2"]` |
| SVC.type | k8s service type. Options: NodePort, ClusterIP, LoadBalancer | |
| SVC.clusterIP | k8s service clusterIP | |
| SVC.loadBalancerClass | loadBalancerClass to use for LoadBalancer provisionning | |
| SVC.loadBalancerSourceRanges | Limit service access to CIDRs if set and service type is `LoadBalancer` | `[]` |
| SVC.loadBalancerIP | Reuse an existing ingress static IP for the service | |
| SVC.externalIPs | IPs for which nodes in the cluster will also accept traffic for the servic | `[]` |
| SVC.externalTrafficPolicy | k8s service's externalTrafficPolicy. Options: Cluster, Local | |
| SVC.ingress.enabled | Enable ingress resource creation (works with SVC.type=ClusterIP) | `false` |
| SVC.ingress.ingressClassName | Set the ingressClassName to associate this Ingress with an IngressClass | |
| SVC.ingress.hostname | Ingress hostname | `""` |
| SVC.ingress.path | Ingress path. | `/` |
| SVC.ingress.pathType | Ingress pathType. One of `ImplementationSpecific`, `Exact` or `Prefix` | `ImplementationSpecific` |
| SVC.ingress.hosts | Slice of hosts configurations, including `hostname`, `path` and `pathType` keys | `[]` |
| SVC.ingress.tls | Name of secret resource or slice of `secretName` and `hosts` keys | |
| SVC.ingress.annotations | Ingress annotations. See documentation for your ingress controller for details | `{}` |
| SVC.ingress.labels | Ingress labels. Additional custom labels to add to the ingress. | `{}` |
| SVC.annotations | Service annotations | `{}` |
| SVC.labels | Service labels | `{}` |
| Parameter | Description | Default |
|-----------------------------------|-------------------------------------------------------------------------------------------|--------------------------|
| SVC.enabled | Create Service resource for SVC (admin, proxy, manager, etc.) | |
| SVC.http.enabled | Enables http on the service | |
| SVC.http.servicePort | Service port to use for http | |
| SVC.http.containerPort | Container port to use for http | |
| SVC.http.nodePort | Node port to use for http | |
| SVC.http.hostPort | Host port to use for http | |
| SVC.http.parameters | Array of additional listen parameters | `[]` |
| SVC.http.appProtocol | `appProtocol` to be set in a Service's port. If left empty, no `appProtocol` will be set. | |
| SVC.tls.enabled | Enables TLS on the service | |
| SVC.tls.containerPort | Container port to use for TLS | |
| SVC.tls.servicePort | Service port to use for TLS | |
| SVC.tls.nodePort | Node port to use for TLS | |
| SVC.tls.hostPort | Host port to use for TLS | |
| SVC.tls.overrideServiceTargetPort | Override service port to use for TLS without touching Kong containerPort | |
| SVC.tls.parameters | Array of additional listen parameters | `["http2"]` |
| SVC.tls.appProtocol | `appProtocol` to be set in a Service's port. If left empty, no `appProtocol` will be set. | |
| SVC.type | k8s service type. Options: NodePort, ClusterIP, LoadBalancer | |
| SVC.clusterIP | k8s service clusterIP | |
| SVC.loadBalancerClass | loadBalancerClass to use for LoadBalancer provisionning | |
| SVC.loadBalancerSourceRanges | Limit service access to CIDRs if set and service type is `LoadBalancer` | `[]` |
| SVC.loadBalancerIP | Reuse an existing ingress static IP for the service | |
| SVC.externalIPs | IPs for which nodes in the cluster will also accept traffic for the servic | `[]` |
| SVC.externalTrafficPolicy | k8s service's externalTrafficPolicy. Options: Cluster, Local | |
| SVC.ingress.enabled | Enable ingress resource creation (works with SVC.type=ClusterIP) | `false` |
| SVC.ingress.ingressClassName | Set the ingressClassName to associate this Ingress with an IngressClass | |
| SVC.ingress.hostname | Ingress hostname | `""` |
| SVC.ingress.path | Ingress path. | `/` |
| SVC.ingress.pathType | Ingress pathType. One of `ImplementationSpecific`, `Exact` or `Prefix` | `ImplementationSpecific` |
| SVC.ingress.hosts | Slice of hosts configurations, including `hostname`, `path` and `pathType` keys | `[]` |
| SVC.ingress.tls | Name of secret resource or slice of `secretName` and `hosts` keys | |
| SVC.ingress.annotations | Ingress annotations. See documentation for your ingress controller for details | `{}` |
| SVC.ingress.labels | Ingress labels. Additional custom labels to add to the ingress. | `{}` |
| SVC.annotations | Service annotations | `{}` |
| SVC.labels | Service labels | `{}` |
#### Admin Service mTLS

View File

@@ -9,8 +9,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
spec:
@@ -33,9 +33,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
version: \"3.6\"
spec:
automountServiceAccountToken: false
containers:
@@ -90,7 +90,7 @@ SnapShot = """
value: \"off\"
- name: KONG_NGINX_DAEMON
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -205,7 +205,7 @@ SnapShot = """
value: 0.0.0.0:8100, [::]:8100
- name: KONG_STREAM_LISTEN
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: clear-stale-pid
resources: {}
@@ -274,8 +274,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-custom-dbless-config
namespace: default
- object:
@@ -286,8 +286,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-admin
namespace: default
spec:
@@ -309,8 +309,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-manager
namespace: default
spec:
@@ -336,9 +336,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
enable-metrics: \"true\"
helm.sh/chart: kong-2.36.0
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -364,8 +364,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
"""

View File

@@ -9,8 +9,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validations
namespace: default
webhooks:
@@ -84,8 +84,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
spec:
@@ -108,9 +108,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
version: \"3.6\"
spec:
automountServiceAccountToken: false
containers:
@@ -138,7 +138,7 @@ SnapShot = """
value: https://localhost:8444
- name: CONTROLLER_PUBLISH_SERVICE
value: default/chartsnap-kong-proxy
image: kong/kubernetes-ingress-controller:3.0
image: kong/kubernetes-ingress-controller:3.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
@@ -159,7 +159,7 @@ SnapShot = """
name: cmetrics
protocol: TCP
- containerPort: 10254
name: status
name: cstatus
protocol: TCP
readinessProbe:
failureThreshold: 3
@@ -240,7 +240,7 @@ SnapShot = """
value: \"off\"
- name: KONG_NGINX_DAEMON
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -350,7 +350,7 @@ SnapShot = """
value: 0.0.0.0:8100, [::]:8100
- name: KONG_STREAM_LISTEN
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: clear-stale-pid
resources: {}
@@ -408,8 +408,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
rules:
- apiGroups:
@@ -617,6 +617,38 @@ SnapShot = """
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
@@ -657,8 +689,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -677,8 +709,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
rules:
@@ -742,8 +774,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
roleRef:
@@ -766,8 +798,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-ca-keypair
namespace: default
type: kubernetes.io/tls
@@ -783,8 +815,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-keypair
namespace: default
type: kubernetes.io/tls
@@ -797,8 +829,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-manager
namespace: default
spec:
@@ -825,9 +857,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
enable-metrics: \"true\"
helm.sh/chart: kong-2.36.0
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -854,8 +886,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook
namespace: default
spec:
@@ -870,8 +902,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
- object:
apiVersion: v1
kind: ServiceAccount
@@ -881,8 +913,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
"""

View File

@@ -8,8 +8,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validations
namespace: default
webhooks:
@@ -82,8 +82,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
spec:
@@ -105,9 +105,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
version: \"3.6\"
spec:
automountServiceAccountToken: false
containers:
@@ -137,7 +137,7 @@ SnapShot = """
value: https://localhost:8444
- name: CONTROLLER_PUBLISH_SERVICE
value: default/chartsnap-kong-proxy
image: kong/kubernetes-ingress-controller:3.0
image: kong/kubernetes-ingress-controller:3.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
@@ -158,7 +158,7 @@ SnapShot = """
name: cmetrics
protocol: TCP
- containerPort: 10254
name: status
name: cstatus
protocol: TCP
readinessProbe:
failureThreshold: 3
@@ -241,7 +241,7 @@ SnapShot = """
value: \"off\"
- name: KONG_NGINX_DAEMON
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -353,7 +353,7 @@ SnapShot = """
value: 0.0.0.0:8100, [::]:8100
- name: KONG_STREAM_LISTEN
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: clear-stale-pid
resources: {}
@@ -410,8 +410,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
rules:
- apiGroups:
@@ -619,6 +619,38 @@ SnapShot = """
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
@@ -658,8 +690,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -677,8 +709,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
rules:
@@ -741,8 +773,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
roleRef:
@@ -764,8 +796,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-ca-keypair
namespace: default
type: kubernetes.io/tls
@@ -780,8 +812,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-keypair
namespace: default
type: kubernetes.io/tls
@@ -793,8 +825,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-manager
namespace: default
spec:
@@ -820,9 +852,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
enable-metrics: \"true\"
helm.sh/chart: kong-2.36.0
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -848,8 +880,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook
namespace: default
spec:
@@ -863,8 +895,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
- object:
apiVersion: v1
kind: ServiceAccount
@@ -873,8 +905,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
"""

View File

@@ -8,8 +8,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validations
namespace: default
webhooks:
@@ -82,8 +82,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
spec:
@@ -105,9 +105,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
version: \"3.6\"
spec:
automountServiceAccountToken: false
containers:
@@ -135,7 +135,7 @@ SnapShot = """
value: https://localhost:8444
- name: CONTROLLER_PUBLISH_SERVICE
value: default/chartsnap-kong-proxy
image: kong/kubernetes-ingress-controller:3.0
image: kong/kubernetes-ingress-controller:3.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
@@ -156,7 +156,7 @@ SnapShot = """
name: cmetrics
protocol: TCP
- containerPort: 10254
name: status
name: cstatus
protocol: TCP
readinessProbe:
failureThreshold: 3
@@ -237,7 +237,7 @@ SnapShot = """
value: \"off\"
- name: KONG_NGINX_DAEMON
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -347,7 +347,7 @@ SnapShot = """
value: 0.0.0.0:8100, [::]:8100
- name: KONG_STREAM_LISTEN
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: clear-stale-pid
resources: {}
@@ -404,8 +404,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -430,8 +430,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
rules:
- apiGroups:
@@ -639,6 +639,38 @@ SnapShot = """
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
@@ -678,8 +710,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -697,8 +729,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
rules:
@@ -761,8 +793,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
roleRef:
@@ -784,8 +816,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-ca-keypair
namespace: default
type: kubernetes.io/tls
@@ -800,8 +832,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-keypair
namespace: default
type: kubernetes.io/tls
@@ -822,8 +854,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-manager
namespace: default
spec:
@@ -849,9 +881,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
enable-metrics: \"true\"
helm.sh/chart: kong-2.36.0
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -877,8 +909,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook
namespace: default
spec:
@@ -892,8 +924,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
- object:
apiVersion: v1
kind: ServiceAccount
@@ -902,8 +934,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
"""

View File

@@ -8,8 +8,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validations
namespace: default
webhooks:
@@ -82,8 +82,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
spec:
@@ -105,9 +105,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
version: \"3.6\"
spec:
automountServiceAccountToken: false
containers:
@@ -135,7 +135,7 @@ SnapShot = """
value: https://localhost:8444
- name: CONTROLLER_PUBLISH_SERVICE
value: default/chartsnap-kong-proxy
image: kong/kubernetes-ingress-controller:3.0
image: kong/kubernetes-ingress-controller:3.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
@@ -156,7 +156,7 @@ SnapShot = """
name: cmetrics
protocol: TCP
- containerPort: 10254
name: status
name: cstatus
protocol: TCP
readinessProbe:
failureThreshold: 3
@@ -237,7 +237,7 @@ SnapShot = """
value: \"off\"
- name: KONG_NGINX_DAEMON
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -347,7 +347,7 @@ SnapShot = """
value: 0.0.0.0:8100, [::]:8100
- name: KONG_STREAM_LISTEN
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: clear-stale-pid
resources: {}
@@ -404,8 +404,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -432,8 +432,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
rules:
- apiGroups:
@@ -641,6 +641,38 @@ SnapShot = """
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
@@ -680,8 +712,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -699,8 +731,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
rules:
@@ -763,8 +795,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
roleRef:
@@ -786,8 +818,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-ca-keypair
namespace: default
type: kubernetes.io/tls
@@ -802,8 +834,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-keypair
namespace: default
type: kubernetes.io/tls
@@ -824,8 +856,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-manager
namespace: default
spec:
@@ -851,9 +883,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
enable-metrics: \"true\"
helm.sh/chart: kong-2.36.0
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -879,8 +911,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook
namespace: default
spec:
@@ -894,8 +926,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
- object:
apiVersion: v1
kind: ServiceAccount
@@ -904,8 +936,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
"""

View File

@@ -8,8 +8,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validations
namespace: default
webhooks:
@@ -82,8 +82,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
spec:
@@ -105,9 +105,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
version: \"3.6\"
spec:
automountServiceAccountToken: false
containers:
@@ -135,7 +135,7 @@ SnapShot = """
value: https://localhost:8444
- name: CONTROLLER_PUBLISH_SERVICE
value: default/chartsnap-kong-proxy
image: kong/kubernetes-ingress-controller:3.0
image: kong/kubernetes-ingress-controller:3.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
@@ -156,7 +156,7 @@ SnapShot = """
name: cmetrics
protocol: TCP
- containerPort: 10254
name: status
name: cstatus
protocol: TCP
readinessProbe:
failureThreshold: 3
@@ -237,7 +237,7 @@ SnapShot = """
value: \"off\"
- name: KONG_NGINX_DAEMON
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -347,7 +347,7 @@ SnapShot = """
value: 0.0.0.0:8100, [::]:8100
- name: KONG_STREAM_LISTEN
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: clear-stale-pid
resources: {}
@@ -404,8 +404,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -428,8 +428,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
rules:
- apiGroups:
@@ -637,6 +637,38 @@ SnapShot = """
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
@@ -676,8 +708,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -695,8 +727,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
rules:
@@ -759,8 +791,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
roleRef:
@@ -782,8 +814,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-ca-keypair
namespace: default
type: kubernetes.io/tls
@@ -798,8 +830,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-keypair
namespace: default
type: kubernetes.io/tls
@@ -811,8 +843,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-manager
namespace: default
spec:
@@ -838,9 +870,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
enable-metrics: \"true\"
helm.sh/chart: kong-2.36.0
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -866,8 +898,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook
namespace: default
spec:
@@ -881,8 +913,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
- object:
apiVersion: v1
kind: ServiceAccount
@@ -891,8 +923,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
"""

View File

@@ -8,8 +8,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validations
namespace: default
webhooks:
@@ -82,8 +82,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
spec:
@@ -105,9 +105,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
version: \"3.6\"
spec:
automountServiceAccountToken: false
containers:
@@ -135,7 +135,7 @@ SnapShot = """
value: https://localhost:8444
- name: CONTROLLER_PUBLISH_SERVICE
value: default/chartsnap-kong-proxy
image: kong/kubernetes-ingress-controller:3.0
image: kong/kubernetes-ingress-controller:3.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
@@ -156,7 +156,7 @@ SnapShot = """
name: cmetrics
protocol: TCP
- containerPort: 10254
name: status
name: cstatus
protocol: TCP
readinessProbe:
failureThreshold: 3
@@ -237,7 +237,7 @@ SnapShot = """
value: \"off\"
- name: KONG_NGINX_DAEMON
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -347,7 +347,7 @@ SnapShot = """
value: 0.0.0.0:8100, [::]:8100
- name: KONG_STREAM_LISTEN
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: clear-stale-pid
resources: {}
@@ -404,8 +404,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -463,8 +463,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
rules:
- apiGroups:
@@ -672,6 +672,38 @@ SnapShot = """
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
@@ -711,8 +743,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -730,8 +762,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
rules:
@@ -794,8 +826,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
roleRef:
@@ -817,8 +849,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-ca-keypair
namespace: default
type: kubernetes.io/tls
@@ -833,8 +865,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-keypair
namespace: default
type: kubernetes.io/tls
@@ -864,8 +896,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-manager
namespace: default
spec:
@@ -891,9 +923,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
enable-metrics: \"true\"
helm.sh/chart: kong-2.36.0
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -919,8 +951,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook
namespace: default
spec:
@@ -934,8 +966,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
- object:
apiVersion: v1
kind: ServiceAccount
@@ -944,8 +976,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
"""

View File

@@ -8,8 +8,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validations
namespace: default
webhooks:
@@ -82,8 +82,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
spec:
@@ -105,9 +105,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
version: \"3.6\"
spec:
automountServiceAccountToken: false
containers:
@@ -158,7 +158,7 @@ SnapShot = """
name: cmetrics
protocol: TCP
- containerPort: 10254
name: status
name: cstatus
protocol: TCP
readinessProbe:
failureThreshold: 3
@@ -241,7 +241,7 @@ SnapShot = """
value: \"off\"
- name: KONG_NGINX_DAEMON
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -353,7 +353,7 @@ SnapShot = """
value: 0.0.0.0:8100, [::]:8100
- name: KONG_STREAM_LISTEN
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: clear-stale-pid
resources: {}
@@ -410,8 +410,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
rules:
- apiGroups:
@@ -690,8 +690,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -709,8 +709,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
rules:
@@ -773,8 +773,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
roleRef:
@@ -796,8 +796,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-ca-keypair
namespace: default
type: kubernetes.io/tls
@@ -812,8 +812,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-keypair
namespace: default
type: kubernetes.io/tls
@@ -825,8 +825,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-manager
namespace: default
spec:
@@ -852,9 +852,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
enable-metrics: \"true\"
helm.sh/chart: kong-2.36.0
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -880,8 +880,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook
namespace: default
spec:
@@ -895,8 +895,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
- object:
apiVersion: v1
kind: ServiceAccount
@@ -905,8 +905,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
"""

View File

@@ -0,0 +1,908 @@
[proxy-appprotocol-values]
SnapShot = """
- object:
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validations
namespace: default
webhooks:
- admissionReviewVersions:
- v1beta1
clientConfig:
caBundle: '###DYNAMIC_FIELD###'
service:
name: chartsnap-kong-validation-webhook
namespace: default
failurePolicy: Ignore
name: validations.kong.konghq.com
objectSelector:
matchExpressions:
- key: owner
operator: NotIn
values:
- helm
rules:
- apiGroups:
- configuration.konghq.com
apiVersions:
- '*'
operations:
- CREATE
- UPDATE
resources:
- kongconsumers
- kongplugins
- kongclusterplugins
- kongingresses
- apiGroups:
- \"\"
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- secrets
- services
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
- apiGroups:
- gateway.networking.k8s.io
apiVersions:
- v1alpha2
- v1beta1
- v1
operations:
- CREATE
- UPDATE
resources:
- gateways
- httproutes
sideEffects: None
- object:
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/component: app
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: app
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/name: kong
template:
metadata:
annotations:
kuma.io/gateway: enabled
kuma.io/service-account-token-volume: chartsnap-kong-token
traffic.sidecar.istio.io/includeInboundPorts: \"\"
labels:
app: chartsnap-kong
app.kubernetes.io/component: app
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
version: \"3.6\"
spec:
automountServiceAccountToken: false
containers:
- args: null
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CONTROLLER_ADMISSION_WEBHOOK_LISTEN
value: 0.0.0.0:8080
- name: CONTROLLER_ELECTION_ID
value: kong-ingress-controller-leader-kong
- name: CONTROLLER_INGRESS_CLASS
value: kong
- name: CONTROLLER_KONG_ADMIN_TLS_SKIP_VERIFY
value: \"true\"
- name: CONTROLLER_KONG_ADMIN_URL
value: https://localhost:8444
- name: CONTROLLER_PUBLISH_SERVICE
value: default/chartsnap-kong-proxy
image: kong/kubernetes-ingress-controller:3.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: ingress-controller
ports:
- containerPort: 8080
name: webhook
protocol: TCP
- containerPort: 10255
name: cmetrics
protocol: TCP
- containerPort: 10254
name: cstatus
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: 10254
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /admission-webhook
name: webhook-cert
readOnly: true
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: chartsnap-kong-token
readOnly: true
- env:
- name: KONG_ADMIN_ACCESS_LOG
value: /dev/stdout
- name: KONG_ADMIN_ERROR_LOG
value: /dev/stderr
- name: KONG_ADMIN_GUI_ACCESS_LOG
value: /dev/stdout
- name: KONG_ADMIN_GUI_ERROR_LOG
value: /dev/stderr
- name: KONG_ADMIN_LISTEN
value: 127.0.0.1:8444 http2 ssl, [::1]:8444 http2 ssl
- name: KONG_CLUSTER_LISTEN
value: \"off\"
- name: KONG_DATABASE
value: \"off\"
- name: KONG_KIC
value: \"on\"
- name: KONG_LUA_PACKAGE_PATH
value: /opt/?.lua;/opt/?/init.lua;;
- name: KONG_NGINX_WORKER_PROCESSES
value: \"2\"
- name: KONG_PORTAL_API_ACCESS_LOG
value: /dev/stdout
- name: KONG_PORTAL_API_ERROR_LOG
value: /dev/stderr
- name: KONG_PORT_MAPS
value: 80:8000, 443:8443
- name: KONG_PREFIX
value: /kong_prefix/
- name: KONG_PROXY_ACCESS_LOG
value: /dev/stdout
- name: KONG_PROXY_ERROR_LOG
value: /dev/stderr
- name: KONG_PROXY_LISTEN
value: 0.0.0.0:8000, [::]:8000, 0.0.0.0:8443 http2 ssl, [::]:8443 http2 ssl
- name: KONG_PROXY_STREAM_ACCESS_LOG
value: /dev/stdout basic
- name: KONG_PROXY_STREAM_ERROR_LOG
value: /dev/stderr
- name: KONG_ROUTER_FLAVOR
value: traditional
- name: KONG_STATUS_ACCESS_LOG
value: \"off\"
- name: KONG_STATUS_ERROR_LOG
value: /dev/stderr
- name: KONG_STATUS_LISTEN
value: 0.0.0.0:8100, [::]:8100
- name: KONG_STREAM_LISTEN
value: \"off\"
- name: KONG_NGINX_DAEMON
value: \"off\"
image: kong:3.6
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- kong
- quit
- --wait=15
livenessProbe:
failureThreshold: 3
httpGet:
path: /status
port: status
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: proxy
ports:
- containerPort: 8000
name: proxy
protocol: TCP
- containerPort: 8443
name: proxy-tls
protocol: TCP
- containerPort: 8100
name: status
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /status/ready
port: status
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /kong_prefix/
name: chartsnap-kong-prefix-dir
- mountPath: /tmp
name: chartsnap-kong-tmp
initContainers:
- command:
- rm
- -vrf
- $KONG_PREFIX/pids
env:
- name: KONG_ADMIN_ACCESS_LOG
value: /dev/stdout
- name: KONG_ADMIN_ERROR_LOG
value: /dev/stderr
- name: KONG_ADMIN_GUI_ACCESS_LOG
value: /dev/stdout
- name: KONG_ADMIN_GUI_ERROR_LOG
value: /dev/stderr
- name: KONG_ADMIN_LISTEN
value: 127.0.0.1:8444 http2 ssl, [::1]:8444 http2 ssl
- name: KONG_CLUSTER_LISTEN
value: \"off\"
- name: KONG_DATABASE
value: \"off\"
- name: KONG_KIC
value: \"on\"
- name: KONG_LUA_PACKAGE_PATH
value: /opt/?.lua;/opt/?/init.lua;;
- name: KONG_NGINX_WORKER_PROCESSES
value: \"2\"
- name: KONG_PORTAL_API_ACCESS_LOG
value: /dev/stdout
- name: KONG_PORTAL_API_ERROR_LOG
value: /dev/stderr
- name: KONG_PORT_MAPS
value: 80:8000, 443:8443
- name: KONG_PREFIX
value: /kong_prefix/
- name: KONG_PROXY_ACCESS_LOG
value: /dev/stdout
- name: KONG_PROXY_ERROR_LOG
value: /dev/stderr
- name: KONG_PROXY_LISTEN
value: 0.0.0.0:8000, [::]:8000, 0.0.0.0:8443 http2 ssl, [::]:8443 http2 ssl
- name: KONG_PROXY_STREAM_ACCESS_LOG
value: /dev/stdout basic
- name: KONG_PROXY_STREAM_ERROR_LOG
value: /dev/stderr
- name: KONG_ROUTER_FLAVOR
value: traditional
- name: KONG_STATUS_ACCESS_LOG
value: \"off\"
- name: KONG_STATUS_ERROR_LOG
value: /dev/stderr
- name: KONG_STATUS_LISTEN
value: 0.0.0.0:8100, [::]:8100
- name: KONG_STREAM_LISTEN
value: \"off\"
image: kong:3.6
imagePullPolicy: IfNotPresent
name: clear-stale-pid
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /kong_prefix/
name: chartsnap-kong-prefix-dir
- mountPath: /tmp
name: chartsnap-kong-tmp
securityContext: {}
serviceAccountName: chartsnap-kong
terminationGracePeriodSeconds: 30
volumes:
- emptyDir:
sizeLimit: 256Mi
name: chartsnap-kong-prefix-dir
- emptyDir:
sizeLimit: 1Gi
name: chartsnap-kong-tmp
- name: chartsnap-kong-token
projected:
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
- name: webhook-cert
secret:
secretName: chartsnap-kong-validation-webhook-keypair
- object:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
rules:
- apiGroups:
- configuration.konghq.com
resources:
- kongupstreampolicies
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- kongupstreampolicies/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
- kongconsumergroups
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- kongconsumergroups/status
verbs:
- get
- patch
- update
- apiGroups:
- \"\"
resources:
- events
verbs:
- create
- patch
- apiGroups:
- \"\"
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- \"\"
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- \"\"
resources:
- secrets
verbs:
- list
- watch
- apiGroups:
- \"\"
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- \"\"
resources:
- services/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
- ingressclassparameterses
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- kongconsumers
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- kongconsumers/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
- kongingresses
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- kongingresses/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
- kongplugins
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- kongplugins/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
- tcpingresses
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- tcpingresses/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
- udpingresses
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- udpingresses/status
verbs:
- get
- patch
- update
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses/status
verbs:
- get
- patch
- update
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- get
- patch
- update
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
- kongclusterplugins
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- kongclusterplugins/status
verbs:
- get
- patch
- update
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- object:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: chartsnap-kong
subjects:
- kind: ServiceAccount
name: chartsnap-kong
namespace: default
- object:
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
rules:
- apiGroups:
- \"\"
resources:
- configmaps
- pods
- secrets
- namespaces
verbs:
- get
- apiGroups:
- \"\"
resourceNames:
- kong-ingress-controller-leader-kong-kong
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- \"\"
resources:
- configmaps
verbs:
- create
- apiGroups:
- \"\"
- coordination.k8s.io
resources:
- configmaps
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- \"\"
resources:
- events
verbs:
- create
- patch
- apiGroups:
- \"\"
resources:
- services
verbs:
- get
- object:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: chartsnap-kong
subjects:
- kind: ServiceAccount
name: chartsnap-kong
namespace: default
- object:
apiVersion: v1
data:
tls.crt: '###DYNAMIC_FIELD###'
tls.key: '###DYNAMIC_FIELD###'
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-ca-keypair
namespace: default
type: kubernetes.io/tls
- object:
apiVersion: v1
data:
tls.crt: '###DYNAMIC_FIELD###'
tls.key: '###DYNAMIC_FIELD###'
kind: Secret
metadata:
labels:
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-keypair
namespace: default
type: kubernetes.io/tls
- object:
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-manager
namespace: default
spec:
ports:
- name: kong-manager
port: 8002
protocol: TCP
targetPort: 8002
- name: kong-manager-tls
port: 8445
protocol: TCP
targetPort: 8445
selector:
app.kubernetes.io/component: app
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/name: kong
type: NodePort
- object:
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.6\"
enable-metrics: \"true\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
ports:
- appProtocol: http
name: kong-proxy
port: 80
protocol: TCP
targetPort: 8000
- appProtocol: https
name: kong-proxy-tls
port: 443
protocol: TCP
targetPort: 8443
selector:
app.kubernetes.io/component: app
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/name: kong
type: LoadBalancer
- object:
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook
namespace: default
spec:
ports:
- name: webhook
port: 443
protocol: TCP
targetPort: webhook
selector:
app.kubernetes.io/component: app
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
- object:
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
"""

View File

@@ -8,8 +8,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validations
namespace: default
webhooks:
@@ -82,8 +82,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
spec:
@@ -105,9 +105,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
version: \"3.6\"
spec:
automountServiceAccountToken: false
containers:
@@ -135,7 +135,7 @@ SnapShot = """
value: https://localhost:8444
- name: CONTROLLER_PUBLISH_SERVICE
value: default/chartsnap-kong-proxy
image: kong/kubernetes-ingress-controller:3.0
image: kong/kubernetes-ingress-controller:3.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
@@ -156,7 +156,7 @@ SnapShot = """
name: cmetrics
protocol: TCP
- containerPort: 10254
name: status
name: cstatus
protocol: TCP
readinessProbe:
failureThreshold: 3
@@ -237,7 +237,7 @@ SnapShot = """
value: \"off\"
- name: KONG_NGINX_DAEMON
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -347,7 +347,7 @@ SnapShot = """
value: 0.0.0.0:8100, [::]:8100
- name: KONG_STREAM_LISTEN
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: clear-stale-pid
resources: {}
@@ -404,8 +404,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
rules:
- apiGroups:
@@ -613,6 +613,38 @@ SnapShot = """
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
@@ -652,8 +684,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -671,8 +703,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
rules:
@@ -735,8 +767,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
roleRef:
@@ -758,8 +790,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-ca-keypair
namespace: default
type: kubernetes.io/tls
@@ -774,8 +806,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-keypair
namespace: default
type: kubernetes.io/tls
@@ -787,8 +819,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-manager
namespace: default
spec:
@@ -814,9 +846,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
enable-metrics: \"true\"
helm.sh/chart: kong-2.36.0
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -842,8 +874,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook
namespace: default
spec:
@@ -857,8 +889,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
- object:
apiVersion: v1
kind: ServiceAccount
@@ -867,8 +899,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: my-kong-sa
namespace: default
"""

View File

@@ -8,8 +8,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validations
namespace: default
webhooks:
@@ -82,8 +82,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
spec:
@@ -105,9 +105,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
version: \"3.6\"
spec:
automountServiceAccountToken: false
containers:
@@ -158,7 +158,7 @@ SnapShot = """
name: cmetrics
protocol: TCP
- containerPort: 10254
name: status
name: cstatus
protocol: TCP
readinessProbe:
failureThreshold: 3
@@ -410,8 +410,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
rules:
- apiGroups:
@@ -619,6 +619,38 @@ SnapShot = """
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
@@ -658,8 +690,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -677,8 +709,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
rules:
@@ -741,8 +773,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
roleRef:
@@ -764,8 +796,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-ca-keypair
namespace: default
type: kubernetes.io/tls
@@ -780,8 +812,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-keypair
namespace: default
type: kubernetes.io/tls
@@ -793,8 +825,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-manager
namespace: default
spec:
@@ -820,9 +852,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
enable-metrics: \"true\"
helm.sh/chart: kong-2.36.0
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -848,8 +880,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook
namespace: default
spec:
@@ -863,8 +895,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
- object:
apiVersion: v1
kind: ServiceAccount
@@ -873,8 +905,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
"""

View File

@@ -9,8 +9,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
spec:
@@ -32,9 +32,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
version: \"3.6\"
spec:
automountServiceAccountToken: false
containers:
@@ -249,8 +249,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-manager
namespace: default
spec:
@@ -276,9 +276,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
enable-metrics: \"true\"
helm.sh/chart: kong-2.36.0
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -304,8 +304,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
"""

View File

@@ -8,8 +8,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validations
namespace: default
webhooks:
@@ -82,8 +82,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
spec:
@@ -104,10 +104,10 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
environment: test
helm.sh/chart: kong-2.36.0
version: \"3.5\"
helm.sh/chart: kong-2.38.0
version: \"3.6\"
spec:
automountServiceAccountToken: false
containers:
@@ -139,7 +139,7 @@ SnapShot = """
value: https://localhost:8444
- name: CONTROLLER_PUBLISH_SERVICE
value: default/chartsnap-kong-proxy
image: kong/kubernetes-ingress-controller:3.0
image: kong/kubernetes-ingress-controller:3.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
@@ -160,7 +160,7 @@ SnapShot = """
name: cmetrics
protocol: TCP
- containerPort: 10254
name: status
name: cstatus
protocol: TCP
readinessProbe:
failureThreshold: 3
@@ -252,7 +252,7 @@ SnapShot = """
value: \"off\"
- name: KONG_NGINX_DAEMON
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -370,7 +370,7 @@ SnapShot = """
value: 0.0.0.0:8100, [::]:8100
- name: KONG_STREAM_LISTEN
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: clear-stale-pid
resources: {}
@@ -447,8 +447,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
spec:
@@ -473,8 +473,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -497,8 +497,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
rules:
- apiGroups:
@@ -706,6 +706,38 @@ SnapShot = """
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
@@ -745,8 +777,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -764,8 +796,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
rules:
@@ -828,8 +860,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
roleRef:
@@ -851,8 +883,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-ca-keypair
namespace: default
type: kubernetes.io/tls
@@ -867,8 +899,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-keypair
namespace: default
type: kubernetes.io/tls
@@ -880,8 +912,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-manager
namespace: default
spec:
@@ -907,9 +939,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
enable-metrics: \"true\"
helm.sh/chart: kong-2.36.0
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -935,8 +967,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook
namespace: default
spec:
@@ -950,8 +982,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
- object:
apiVersion: v1
kind: ServiceAccount
@@ -960,8 +992,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
"""

View File

@@ -8,8 +8,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validations
namespace: default
webhooks:
@@ -83,8 +83,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
spec:
@@ -111,9 +111,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
version: \"3.6\"
spec:
automountServiceAccountToken: false
containers:
@@ -150,7 +150,7 @@ SnapShot = """
envFrom:
- configMapRef:
name: env-config
image: kong/kubernetes-ingress-controller:3.0
image: kong/kubernetes-ingress-controller:3.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
@@ -171,7 +171,7 @@ SnapShot = """
name: cmetrics
protocol: TCP
- containerPort: 10254
name: status
name: cstatus
protocol: TCP
readinessProbe:
failureThreshold: 3
@@ -270,7 +270,7 @@ SnapShot = """
envFrom:
- configMapRef:
name: env-config
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -404,7 +404,7 @@ SnapShot = """
envFrom:
- configMapRef:
name: env-config
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: clear-stale-pid
resources: {}
@@ -507,7 +507,7 @@ SnapShot = """
envFrom:
- configMapRef:
name: env-config
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: wait-for-db
resources: {}
@@ -724,8 +724,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-init-migrations
namespace: default
spec:
@@ -740,8 +740,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: kong-init-migrations
spec:
automountServiceAccountToken: false
@@ -819,7 +819,7 @@ SnapShot = """
envFrom:
- configMapRef:
name: env-config
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: kong-migrations
resources: {}
@@ -924,7 +924,7 @@ SnapShot = """
envFrom:
- configMapRef:
name: env-config
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: wait-for-postgres
resources: {}
@@ -977,8 +977,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-post-upgrade-migrations
namespace: default
spec:
@@ -993,8 +993,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: kong-post-upgrade-migrations
spec:
automountServiceAccountToken: false
@@ -1072,7 +1072,7 @@ SnapShot = """
envFrom:
- configMapRef:
name: env-config
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: kong-post-upgrade-migrations
resources: {}
@@ -1177,7 +1177,7 @@ SnapShot = """
envFrom:
- configMapRef:
name: env-config
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: wait-for-postgres
resources: {}
@@ -1232,8 +1232,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-pre-upgrade-migrations
namespace: default
spec:
@@ -1248,8 +1248,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: kong-pre-upgrade-migrations
spec:
automountServiceAccountToken: false
@@ -1327,7 +1327,7 @@ SnapShot = """
envFrom:
- configMapRef:
name: env-config
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: kong-upgrade-migrations
resources: {}
@@ -1432,7 +1432,7 @@ SnapShot = """
envFrom:
- configMapRef:
name: env-config
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: wait-for-postgres
resources: {}
@@ -1481,8 +1481,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -1505,10 +1505,26 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
rules:
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
@@ -1548,8 +1564,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -1567,8 +1583,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
rules:
@@ -1631,8 +1647,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-default
namespace: default
rules:
@@ -1841,6 +1857,22 @@ SnapShot = """
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses/status
verbs:
- get
- patch
- update
- object:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
@@ -1849,8 +1881,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
roleRef:
@@ -1869,8 +1901,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-default
namespace: default
roleRef:
@@ -1895,8 +1927,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-bash-wait-for-postgres
namespace: default
- object:
@@ -1917,8 +1949,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-ca-keypair
namespace: default
type: kubernetes.io/tls
@@ -1933,8 +1965,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-keypair
namespace: default
type: kubernetes.io/tls
@@ -1961,8 +1993,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-manager
namespace: default
spec:
@@ -1988,9 +2020,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
enable-metrics: \"true\"
helm.sh/chart: kong-2.36.0
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -2024,8 +2056,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook
namespace: default
spec:
@@ -2039,8 +2071,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
- object:
apiVersion: v1
kind: Service
@@ -2099,8 +2131,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
"""

View File

@@ -9,8 +9,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
spec:
@@ -33,9 +33,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
version: \"3.6\"
spec:
automountServiceAccountToken: false
containers:
@@ -92,7 +92,7 @@ SnapShot = """
value: \"off\"
- name: KONG_NGINX_DAEMON
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -208,7 +208,7 @@ SnapShot = """
value: 0.0.0.0:8100, [::]:8100
- name: KONG_STREAM_LISTEN
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: clear-stale-pid
resources: {}
@@ -295,8 +295,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-custom-dbless-config
namespace: default
- object:
@@ -307,8 +307,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-manager
namespace: default
spec:
@@ -334,9 +334,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
enable-metrics: \"true\"
helm.sh/chart: kong-2.36.0
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -362,8 +362,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
"""

View File

@@ -9,8 +9,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
spec:
@@ -33,9 +33,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
version: \"3.6\"
spec:
automountServiceAccountToken: false
containers:
@@ -92,7 +92,7 @@ SnapShot = """
value: 0.0.0.0:9000, [::]:9000, 0.0.0.0:9001 ssl, [::]:9001 ssl
- name: KONG_NGINX_DAEMON
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -212,7 +212,7 @@ SnapShot = """
value: 0.0.0.0:8100, [::]:8100
- name: KONG_STREAM_LISTEN
value: 0.0.0.0:9000, [::]:9000, 0.0.0.0:9001 ssl, [::]:9001 ssl
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: clear-stale-pid
resources: {}
@@ -271,8 +271,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -304,8 +304,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-custom-dbless-config
namespace: default
- object:
@@ -316,8 +316,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-manager
namespace: default
spec:
@@ -343,9 +343,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
enable-metrics: \"true\"
helm.sh/chart: kong-2.36.0
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -379,8 +379,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
"""

View File

@@ -8,8 +8,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validations
namespace: default
webhooks:
@@ -82,8 +82,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
spec:
@@ -110,9 +110,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
version: \"3.6\"
spec:
automountServiceAccountToken: false
containers:
@@ -142,7 +142,7 @@ SnapShot = """
value: https://localhost:8444
- name: CONTROLLER_PUBLISH_SERVICE
value: default/chartsnap-kong-proxy
image: kong/kubernetes-ingress-controller:3.0
image: kong/kubernetes-ingress-controller:3.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
@@ -163,7 +163,7 @@ SnapShot = """
name: cmetrics
protocol: TCP
- containerPort: 10254
name: status
name: cstatus
protocol: TCP
readinessProbe:
failureThreshold: 3
@@ -261,7 +261,7 @@ SnapShot = """
value: \"off\"
- name: KONG_NGINX_DAEMON
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
@@ -388,7 +388,7 @@ SnapShot = """
value: 0.0.0.0:8100, [::]:8100
- name: KONG_STREAM_LISTEN
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: clear-stale-pid
resources: {}
@@ -477,7 +477,7 @@ SnapShot = """
value: 0.0.0.0:8100, [::]:8100
- name: KONG_STREAM_LISTEN
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: wait-for-db
resources: {}
@@ -694,8 +694,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-init-migrations
namespace: default
spec:
@@ -710,8 +710,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: kong-init-migrations
spec:
automountServiceAccountToken: false
@@ -788,7 +788,7 @@ SnapShot = """
value: \"off\"
- name: KONG_NGINX_DAEMON
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: kong-migrations
resources: {}
@@ -879,7 +879,7 @@ SnapShot = """
value: \"off\"
- name: KONG_NGINX_DAEMON
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: wait-for-postgres
resources: {}
@@ -932,8 +932,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-post-upgrade-migrations
namespace: default
spec:
@@ -948,8 +948,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: kong-post-upgrade-migrations
spec:
automountServiceAccountToken: false
@@ -1026,7 +1026,7 @@ SnapShot = """
value: \"off\"
- name: KONG_NGINX_DAEMON
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: kong-post-upgrade-migrations
resources: {}
@@ -1117,7 +1117,7 @@ SnapShot = """
value: \"off\"
- name: KONG_NGINX_DAEMON
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: wait-for-postgres
resources: {}
@@ -1172,8 +1172,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-pre-upgrade-migrations
namespace: default
spec:
@@ -1188,8 +1188,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: kong-pre-upgrade-migrations
spec:
automountServiceAccountToken: false
@@ -1266,7 +1266,7 @@ SnapShot = """
value: \"off\"
- name: KONG_NGINX_DAEMON
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: kong-upgrade-migrations
resources: {}
@@ -1357,7 +1357,7 @@ SnapShot = """
value: \"off\"
- name: KONG_NGINX_DAEMON
value: \"off\"
image: kong:3.5
image: kong:3.6
imagePullPolicy: IfNotPresent
name: wait-for-postgres
resources: {}
@@ -1406,8 +1406,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -1430,8 +1430,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
rules:
- apiGroups:
@@ -1639,6 +1639,38 @@ SnapShot = """
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- konglicenses/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults
verbs:
- get
- list
- watch
- apiGroups:
- configuration.konghq.com
resources:
- kongvaults/status
verbs:
- get
- patch
- update
- apiGroups:
- configuration.konghq.com
resources:
@@ -1678,8 +1710,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -1697,8 +1729,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
rules:
@@ -1761,8 +1793,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
roleRef:
@@ -1787,8 +1819,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-bash-wait-for-postgres
namespace: default
- object:
@@ -1802,8 +1834,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-ca-keypair
namespace: default
type: kubernetes.io/tls
@@ -1818,8 +1850,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook-keypair
namespace: default
type: kubernetes.io/tls
@@ -1846,8 +1878,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-manager
namespace: default
spec:
@@ -1873,9 +1905,9 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
app.kubernetes.io/version: \"3.6\"
enable-metrics: \"true\"
helm.sh/chart: kong-2.36.0
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-proxy
namespace: default
spec:
@@ -1901,8 +1933,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong-validation-webhook
namespace: default
spec:
@@ -1916,8 +1948,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
- object:
apiVersion: v1
kind: Service
@@ -1976,8 +2008,8 @@ SnapShot = """
app.kubernetes.io/instance: chartsnap
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: kong
app.kubernetes.io/version: \"3.5\"
helm.sh/chart: kong-2.36.0
app.kubernetes.io/version: \"3.6\"
helm.sh/chart: kong-2.38.0
name: chartsnap-kong
namespace: default
"""

View File

@@ -0,0 +1,7 @@
# This values test that the `proxy.*.appProtocol` can be set to a custom value.
proxy:
http:
appProtocol: "http"
tls:
appProtocol: "https"

View File

@@ -213,6 +213,9 @@ spec:
- name: kong-{{ .serviceName }}
port: {{ .http.servicePort }}
targetPort: {{ .http.containerPort }}
{{- if .http.appProtocol }}
appProtocol: {{ .http.appProtocol }}
{{- end }}
{{- if (and (or (eq .type "LoadBalancer") (eq .type "NodePort")) (not (empty .http.nodePort))) }}
nodePort: {{ .http.nodePort }}
{{- end }}
@@ -223,6 +226,9 @@ spec:
- name: kong-{{ .serviceName }}-tls
port: {{ .tls.servicePort }}
targetPort: {{ .tls.overrideServiceTargetPort | default .tls.containerPort }}
{{- if .tls.appProtocol }}
appProtocol: {{ .tls.appProtocol }}
{{- end }}
{{- if (and (or (eq .type "LoadBalancer") (eq .type "NodePort")) (not (empty .tls.nodePort))) }}
nodePort: {{ .tls.nodePort }}
{{- end }}
@@ -890,7 +896,7 @@ The name of the Service which will be used by the controller to update the Ingre
containerPort: 10255
protocol: TCP
{{- end }}
- name: status
- name: cstatus
containerPort: 10254
protocol: TCP
env:

View File

@@ -34,7 +34,7 @@ spec:
http-headers: false
request-body: false
log-destination:
cloud: false
cloud: true
stdout:
format: json-formatted
---

View File

@@ -130,7 +130,7 @@ extraLabels: {}
# Specify Kong's Docker image and repository details here
image:
repository: kong
tag: "3.5"
tag: "3.6"
# Kong Enterprise
# repository: kong/kong-gateway
# tag: "3.5"
@@ -317,6 +317,10 @@ proxy:
parameters:
- http2
# Specify the Service's TLS port's appProtocol. This can be useful when integrating with
# external load balancers that require the `appProtocol` field to be set (e.g. GCP).
appProtocol: ""
# Define stream (TCP) listen
# To enable, remove "[]", uncomment the section below, and select your desired
# ports and parameters. Listens are dynamically named after their containerPort,
@@ -525,7 +529,7 @@ ingressController:
enabled: true
image:
repository: kong/kubernetes-ingress-controller
tag: "3.0"
tag: "3.1"
# Optionally set a semantic version for version-gated features. This can normally
# be left unset. You only need to set this if your tag is not a semver string,
# such as when you are using a "next" tag. Set this to the effective semantic
@@ -1255,7 +1259,7 @@ appsec:
#registry:
repository: ghcr.io/openappsec
image: "agent"
tag: "1.1.6"
tag: "1.1.8"
pullPolicy: Always
securityContext:
@@ -1269,7 +1273,7 @@ appsec:
kong:
image:
repository: "ghcr.io/openappsec/kong-attachment"
tag: "1.1.6"
tag: "1.1.8"
configMapName: appsec-settings-configmap
configMapContent:
crowdsec:

View File

@@ -1,4 +1,4 @@
install(FILES Dockerfile entry.sh install-cp-agent-intelligence-service.sh install-cp-crowdsec-aux.sh DESTINATION .)
install(FILES Dockerfile entry.sh install-cp-agent-intelligence-service.sh install-cp-crowdsec-aux.sh self_managed_openappsec_manifest.json DESTINATION .)
add_custom_command(
OUTPUT ${CMAKE_INSTALL_PREFIX}/agent-docker.img

View File

@@ -1,5 +1,7 @@
FROM alpine
ENV OPENAPPSEC_NANO_AGENT=TRUE
RUN apk add --no-cache -u busybox
RUN apk add --no-cache -u zlib
RUN apk add --no-cache bash
@@ -11,8 +13,12 @@ RUN apk add --no-cache libunwind
RUN apk add --no-cache gdb
RUN apk add --no-cache libxml2
RUN apk add --no-cache pcre2
RUN apk add --no-cache ca-certificates
RUN apk add --update coreutils
COPY self_managed_openappsec_manifest.json /tmp/self_managed_openappsec_manifest.json
COPY install*.sh /nano-service-installers/
COPY entry.sh /entry.sh

View File

@@ -6,18 +6,40 @@ HTTP_TRANSACTION_HANDLER_SERVICE="install-cp-nano-service-http-transaction-handl
ATTACHMENT_REGISTRATION_SERVICE="install-cp-nano-attachment-registration-manager.sh"
ORCHESTRATION_INSTALLATION_SCRIPT="install-cp-nano-agent.sh"
CACHE_INSTALLATION_SCRIPT="install-cp-nano-agent-cache.sh"
PROMETHEUS_INSTALLATION_SCRIPT="install-cp-nano-service-prometheus.sh"
NGINX_CENTRAL_MANAGER_INSTALLATION_SCRIPT="install-cp-nano-central-nginx-manager.sh"
var_fog_address=
var_proxy=
var_mode=
var_token=
var_ignore=
init=
active_watchdog_pid=
cleanup() {
local signal="$1"
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Signal ${signal} was received, exiting gracefully..." >&2
if [ -n "${active_watchdog_pid}" ] && ps -p ${active_watchdog_pid} > /dev/null 2>&1; then
kill -TERM ${active_watchdog_pid} 2>/dev/null || true
wait ${active_watchdog_pid} 2>/dev/null || true
fi
echo "Cleanup completed. Exiting now." >&2
exit 0
}
trap 'cleanup SIGTERM' SIGTERM
trap 'cleanup SIGINT' SIGINT
if [ ! -f /nano-service-installers/$ORCHESTRATION_INSTALLATION_SCRIPT ]; then
echo "Error: agent installation package doesn't exist."
exit 1
fi
if [ -z $1 ]; then
var_mode="--hybrid_mode"
fi
while true; do
if [ -z "$1" ]; then
break
@@ -27,24 +49,30 @@ while true; do
elif [ "$1" == "--proxy" ]; then
shift
var_proxy="$1"
elif [ "$1" == "--hybrid-mode" ]; then
elif [ "$1" == "--hybrid-mode" ] || [ "$1" == "--standalone" ]; then
var_mode="--hybrid_mode"
elif [ "$1" == "--no-upgrade" ]; then
var_ignore="--ignore all"
elif [ "$1" == "--token" ]; then
shift
var_token="$1"
elif [ "$1" == "--standalone" ]; then
var_mode="--hybrid_mode"
var_token="cp-3fb5c718-5e39-47e6-8d5e-99b4bc5660b74b4b7fc8-5312-451d-a763-aaf7872703c0"
fi
shift
done
if [ -z $var_token ]; then
echo "Error: Token was not provided as input argument."
exit 1
if [ -z $var_token ] && [ $var_mode != "--hybrid_mode" ]; then
var_token=$(env | grep 'AGENT_TOKEN=' | cut -d'=' -f2-)
if [ -z $var_token ]; then
echo "Error: Token was not provided as input argument."
exit 1
fi
fi
orchestration_service_installation_flags="--token $var_token --container_mode --skip_registration"
orchestration_service_installation_flags="--container_mode --skip_registration"
if [ ! -z $var_token ]; then
export AGENT_TOKEN="$var_token"
orchestration_service_installation_flags="$orchestration_service_installation_flags --token $var_token"
fi
if [ ! -z $var_fog_address ]; then
orchestration_service_installation_flags="$orchestration_service_installation_flags --fog $var_fog_address"
fi
@@ -55,6 +83,9 @@ fi
if [ ! -z $var_mode ]; then
orchestration_service_installation_flags="$orchestration_service_installation_flags $var_mode"
fi
if [ ! -z "$var_ignore" ]; then
orchestration_service_installation_flags="$orchestration_service_installation_flags $var_ignore"
fi
/nano-service-installers/$ORCHESTRATION_INSTALLATION_SCRIPT --install $orchestration_service_installation_flags
@@ -67,7 +98,15 @@ fi
/nano-service-installers/$CACHE_INSTALLATION_SCRIPT --install
/nano-service-installers/$HTTP_TRANSACTION_HANDLER_SERVICE --install
if [ ! -z $CROWDSEC_ENABLED ]; then
if [ "$PROMETHEUS" == "true" ]; then
/nano-service-installers/$PROMETHEUS_INSTALLATION_SCRIPT --install
fi
if [ "$CENTRAL_NGINX_MANAGER" == "true" ]; then
/nano-service-installers/$NGINX_CENTRAL_MANAGER_INSTALLATION_SCRIPT --install
fi
if [ "$CROWDSEC_ENABLED" == "true" ]; then
/nano-service-installers/$INTELLIGENCE_INSTALLATION_SCRIPT --install
/nano-service-installers/$CROWDSEC_INSTALLATION_SCRIPT --install
fi
@@ -79,25 +118,16 @@ if [ -f "$FILE" ]; then
fi
touch /etc/cp/watchdog/wd.startup
/etc/cp/watchdog/cp-nano-watchdog >/dev/null 2>&1 &
active_watchdog_pid=$!
while true; do
if [ -z "$init" ]; then
init=true
/etc/cp/watchdog/cp-nano-watchdog >/dev/null 2>&1 &
sleep 5
active_watchdog_pid=$(pgrep -f -x -o "/bin/bash /etc/cp/watchdog/cp-nano-watchdog")
fi
current_watchdog_pid=$(pgrep -f -x -o "/bin/bash /etc/cp/watchdog/cp-nano-watchdog")
if [ ! -f /tmp/restart_watchdog ] && [ "$current_watchdog_pid" != "$active_watchdog_pid" ]; then
echo "Error: Watchdog exited abnormally"
exit 1
elif [ -f /tmp/restart_watchdog ]; then
if [ -f /tmp/restart_watchdog ]; then
rm -f /tmp/restart_watchdog
kill -9 "$(pgrep -f -x -o "/bin/bash /etc/cp/watchdog/cp-nano-watchdog")"
/etc/cp/watchdog/cp-nano-watchdog >/dev/null 2>&1 &
sleep 5
active_watchdog_pid=$(pgrep -f -x -o "/bin/bash /etc/cp/watchdog/cp-nano-watchdog")
kill -9 ${active_watchdog_pid}
fi
if [ ! "$(ps -f | grep cp-nano-watchdog | grep ${active_watchdog_pid})" ]; then
/etc/cp/watchdog/cp-nano-watchdog >/dev/null 2>&1 &
active_watchdog_pid=$!
fi
sleep 5
done

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -1,12 +1,10 @@
add_subdirectory(report_messaging)
add_subdirectory(http_manager)
add_subdirectory(generic_rulebase)
add_subdirectory(signal_handler)
add_subdirectory(gradual_deployment)
add_subdirectory(packet)
add_subdirectory(pending_key)
add_subdirectory(health_check_manager)
add_subdirectory(utils)
add_subdirectory(attachment-intakers)
add_subdirectory(security_apps)
add_subdirectory(nginx_message_reader)

View File

@@ -39,6 +39,8 @@ USE_DEBUG_FLAG(D_ATTACHMENT_REGISTRATION);
using namespace std;
static const AlertInfo alert(AlertTeam::CORE, "attachment registrator");
class AttachmentRegistrator::Impl
{
public:
@@ -163,7 +165,7 @@ private:
break;
}
default:
dbgAssert(false) << "Unsupported Attachment " << static_cast<int>(type);
dbgAssert(false) << alert << "Unsupported Attachment " << static_cast<int>(type);
}
if (!family_id.empty()) handler_path << family_id << "_";
@@ -175,7 +177,9 @@ private:
string
genRegCommand(const string &family_id, const uint num_of_members, const AttachmentType type) const
{
dbgAssert(num_of_members > 0) << "Failed to generate a registration command for an empty group of attachments";
dbgAssert(num_of_members > 0)
<< alert
<< "Failed to generate a registration command for an empty group of attachments";
static const string registration_format = "/etc/cp/watchdog/cp-nano-watchdog --register ";
stringstream registration_command;
@@ -187,7 +191,7 @@ private:
break;
}
default:
dbgAssert(false) << "Unsupported Attachment " << static_cast<int>(type);
dbgAssert(false) << alert << "Unsupported Attachment " << static_cast<int>(type);
}
if (!family_id.empty()) registration_command << " --family " << family_id;
@@ -265,7 +269,7 @@ private:
return -1;
}
dbgAssert(new_socket.unpack() > 0) << "Generated socket is OK yet negative";
dbgAssert(new_socket.unpack() > 0) << alert << "Generated socket is OK yet negative";
return new_socket.unpack();
}
@@ -281,7 +285,7 @@ private:
}
I_Socket::socketFd client_socket = accepted_socket.unpack();
dbgAssert(client_socket > 0) << "Generated client socket is OK yet negative";
dbgAssert(client_socket > 0) << alert << "Generated client socket is OK yet negative";
auto close_socket_on_exit = make_scope_exit([&]() { i_socket->closeSocket(client_socket); });
Maybe<uint8_t> attachment_id = readNumericParam(client_socket);
@@ -375,7 +379,7 @@ private:
}
I_Socket::socketFd client_socket = accepted_socket.unpack();
dbgAssert(client_socket > 0) << "Generated client socket is OK yet negative";
dbgAssert(client_socket > 0) << alert << "Generated client socket is OK yet negative";
auto close_socket_on_exit = make_scope_exit([&]() { i_socket->closeSocket(client_socket); });
Maybe<AttachmentType> attachment_type = readAttachmentType(client_socket);

View File

@@ -31,10 +31,12 @@
#include <stdarg.h>
#include <boost/range/iterator_range.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/regex.hpp>
#include "nginx_attachment_config.h"
#include "nginx_attachment_opaque.h"
#include "generic_rulebase/evaluators/trigger_eval.h"
#include "nginx_parser.h"
#include "i_instance_awareness.h"
#include "common.h"
@@ -76,6 +78,7 @@ using namespace std;
using ChunkType = ngx_http_chunk_type_e;
static const uint32_t corrupted_session_id = CORRUPTED_SESSION_ID;
static const AlertInfo alert(AlertTeam::CORE, "nginx attachment");
class FailopenModeListener : public Listener<FailopenModeEvent>
{
@@ -128,6 +131,7 @@ class NginxAttachment::Impl
Singleton::Provide<I_StaticResourcesHandler>::From<NginxAttachment>
{
static constexpr auto INSPECT = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT;
static constexpr auto LIMIT_RESPONSE_HEADERS = ngx_http_cp_verdict_e::LIMIT_RESPONSE_HEADERS;
static constexpr auto ACCEPT = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT;
static constexpr auto DROP = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP;
static constexpr auto INJECT = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT;
@@ -259,6 +263,22 @@ public:
);
}
const char* ignored_headers_env = getenv("SAAS_IGNORED_UPSTREAM_HEADERS");
if (ignored_headers_env) {
string ignored_headers_str = ignored_headers_env;
ignored_headers_str = NGEN::Strings::trim(ignored_headers_str);
if (!ignored_headers_str.empty()) {
dbgInfo(D_HTTP_MANAGER)
<< "Ignoring SAAS_IGNORED_UPSTREAM_HEADERS environment variable: "
<< ignored_headers_str;
vector<string> ignored_headers_vec;
boost::split(ignored_headers_vec, ignored_headers_str, boost::is_any_of(";"));
for (const string &header : ignored_headers_vec) ignored_headers.insert(header);
}
}
dbgInfo(D_NGINX_ATTACHMENT) << "Successfully initialized NGINX Attachment";
}
@@ -410,7 +430,10 @@ private:
bool
registerAttachmentProcess(uint32_t nginx_user_id, uint32_t nginx_group_id, I_Socket::socketFd new_socket)
{
dbgAssert(server_sock > 0) << "Registration attempt occurred while registration socket is uninitialized";
dbgAssert(server_sock > 0)
<< alert
<< "Registration attempt occurred while registration socket is uninitialized";
#ifdef FAILURE_TEST
bool did_fail_on_purpose = false;
#endif
@@ -802,10 +825,10 @@ private:
case ChunkType::HOLD_DATA:
return "HOLD_DATA";
case ChunkType::COUNT:
dbgAssert(false) << "Invalid 'COUNT' ChunkType";
dbgAssert(false) << alert << "Invalid 'COUNT' ChunkType";
return "";
}
dbgAssert(false) << "ChunkType was not handled by the switch case";
dbgAssert(false) << alert << "ChunkType was not handled by the switch case";
return "";
}
@@ -1030,7 +1053,11 @@ private:
case ChunkType::REQUEST_START:
return handleStartTransaction(data, opaque);
case ChunkType::REQUEST_HEADER:
return handleMultiModifiableChunks(NginxParser::parseRequestHeaders(data), "request header", true);
return handleMultiModifiableChunks(
NginxParser::parseRequestHeaders(data, ignored_headers),
"request header",
true
);
case ChunkType::REQUEST_BODY:
return handleModifiableChunk(NginxParser::parseRequestBody(data), "request body", true);
case ChunkType::REQUEST_END: {
@@ -1121,28 +1148,44 @@ private:
handleCustomWebResponse(
SharedMemoryIPC *ipc,
vector<const char *> &verdict_data,
vector<uint16_t> &verdict_data_sizes)
vector<uint16_t> &verdict_data_sizes,
string web_user_response_id)
{
ngx_http_cp_web_response_data_t web_response_data;
ScopedContext ctx;
if (web_user_response_id != "") {
dbgTrace(D_NGINX_ATTACHMENT)
<< "web user response ID registered in contex: "
<< web_user_response_id;
set<string> triggers_set{web_user_response_id};
ctx.registerValue<set<GenericConfigId>>(TriggerMatcher::ctx_key, triggers_set);
}
WebTriggerConf web_trigger_conf = getConfigurationWithDefault<WebTriggerConf>(
WebTriggerConf::default_trigger_conf,
"rulebase",
"webUserResponse"
);
bool remove_event_id_param =
getProfileAgentSettingWithDefault<string>("false", "nginxAttachment.removeRedirectEventId") == "true";
string uuid;
string redirectUrl;
if (i_transaction_table->hasState<NginxAttachmentOpaque>()) {
NginxAttachmentOpaque &opaque = i_transaction_table->getState<NginxAttachmentOpaque>();
uuid = opaque.getSessionUUID();
}
web_response_data.uuid_size =
string("Incident Id: ").length() + uuid.size();
web_response_data.uuid_size = uuid.size();
if (web_trigger_conf.getDetailsLevel() == "Redirect") {
web_response_data.response_data.redirect_data.redirect_location_size =
web_trigger_conf.getRedirectURL().size();
web_response_data.response_data.redirect_data.add_event_id = web_trigger_conf.getAddEventId() ? 1 : 0;
bool add_event = web_trigger_conf.getAddEventId();
if (add_event && !remove_event_id_param) {
web_response_data.response_data.redirect_data.redirect_location_size +=
strlen("?event_id=") + uuid.size();
}
web_response_data.response_data.redirect_data.add_event_id = add_event ? 1 : 0;
web_response_data.web_repsonse_type = static_cast<uint8_t>(ngx_web_response_type_e::REDIRECT_WEB_RESPONSE);
} else {
web_response_data.response_data.custom_response_data.title_size =
@@ -1156,8 +1199,13 @@ private:
verdict_data_sizes.push_back(sizeof(ngx_http_cp_web_response_data_t));
if (web_trigger_conf.getDetailsLevel() == "Redirect") {
verdict_data.push_back(reinterpret_cast<const char *>(web_trigger_conf.getRedirectURL().data()));
verdict_data_sizes.push_back(web_trigger_conf.getRedirectURL().size());
redirectUrl = web_trigger_conf.getRedirectURL();
if (!remove_event_id_param && web_trigger_conf.getAddEventId()) {
redirectUrl += "?event-id=" + uuid;
}
verdict_data.push_back(reinterpret_cast<const char *>(redirectUrl.data()));
verdict_data_sizes.push_back(redirectUrl.size());
} else {
verdict_data.push_back(reinterpret_cast<const char *>(web_trigger_conf.getResponseTitle().data()));
verdict_data_sizes.push_back(web_trigger_conf.getResponseTitle().size());
@@ -1233,7 +1281,7 @@ private:
if (verdict.getVerdict() == DROP) {
nginx_attachment_event.addTrafficVerdictCounter(nginxAttachmentEvent::trafficVerdict::DROP);
verdict_to_send.modification_count = 1;
return handleCustomWebResponse(ipc, verdict_fragments, fragments_sizes);
return handleCustomWebResponse(ipc, verdict_fragments, fragments_sizes, verdict.getWebUserResponseID());
}
if (verdict.getVerdict() == ACCEPT) {
@@ -1459,11 +1507,17 @@ private:
opaque.activateContext();
FilterVerdict verdict = handleChunkedData(*chunked_data_type, inspection_data, opaque);
bool is_header =
*chunked_data_type == ChunkType::REQUEST_HEADER ||
*chunked_data_type == ChunkType::RESPONSE_HEADER ||
*chunked_data_type == ChunkType::CONTENT_LENGTH;
if (verdict.getVerdict() == LIMIT_RESPONSE_HEADERS) {
handleVerdictResponse(verdict, attachment_ipc, transaction_data->session_id, is_header);
popData(attachment_ipc);
verdict = FilterVerdict(INSPECT);
}
handleVerdictResponse(verdict, attachment_ipc, transaction_data->session_id, is_header);
bool is_final_verdict = verdict.getVerdict() == ACCEPT ||
@@ -1576,6 +1630,8 @@ private:
return "INJECT";
case INSPECT:
return "INSPECT";
case LIMIT_RESPONSE_HEADERS:
return "LIMIT_RESPONSE_HEADERS";
case IRRELEVANT:
return "IRRELEVANT";
case RECONF:
@@ -1583,7 +1639,7 @@ private:
case WAIT:
return "WAIT";
}
dbgAssert(false) << "Invalid EventVerdict enum: " << static_cast<int>(verdict.getVerdict());
dbgAssert(false) << alert << "Invalid EventVerdict enum: " << static_cast<int>(verdict.getVerdict());
return string();
}
@@ -1634,13 +1690,14 @@ private:
return false;
}
dbgAssert(sock.unpack() > 0) << "The generated server socket is OK, yet negative";
dbgAssert(sock.unpack() > 0) << alert << "The generated server socket is OK, yet negative";
server_sock = sock.unpack();
I_MainLoop::Routine accept_attachment_routine =
[this] ()
{
dbgAssert(inst_awareness->getUniqueID().ok())
<< alert
<< "NGINX attachment Initialized without Instance Awareness";
bool did_fail_on_purpose = false;
@@ -1653,7 +1710,7 @@ private:
<< (did_fail_on_purpose ? "Intentional Failure" : new_sock.getErr());
return;
}
dbgAssert(new_sock.unpack() > 0) << "The generated client socket is OK, yet negative";
dbgAssert(new_sock.unpack() > 0) << alert << "The generated client socket is OK, yet negative";
I_Socket::socketFd new_attachment_socket = new_sock.unpack();
Maybe<string> uid = getUidFromSocket(new_attachment_socket);
@@ -1699,7 +1756,7 @@ private:
}
};
mainloop->addFileRoutine(
I_MainLoop::RoutineType::RealTime,
I_MainLoop::RoutineType::System,
server_sock,
accept_attachment_routine,
"Nginx Attachment registration listener",
@@ -1712,7 +1769,9 @@ private:
Maybe<string>
getUidFromSocket(I_Socket::socketFd new_attachment_socket)
{
dbgAssert(server_sock > 0) << "Registration attempt occurred while registration socket is uninitialized";
dbgAssert(server_sock > 0)
<< alert
<< "Registration attempt occurred while registration socket is uninitialized";
bool did_fail_on_purpose = false;
DELAY_IF_NEEDED(IntentionalFailureHandler::FailureType::ReceiveDataFromSocket);
@@ -1794,6 +1853,7 @@ private:
HttpAttachmentConfig attachment_config;
I_MainLoop::RoutineID attachment_routine_id = 0;
bool traffic_indicator = false;
unordered_set<string> ignored_headers;
// Interfaces
I_Socket *i_socket = nullptr;

View File

@@ -42,6 +42,7 @@ HttpAttachmentConfig::init()
setNumOfNginxIpcElements();
setDebugByContextValues();
setKeepAliveIntervalMsec();
setRetriesForVerdict();
}
bool
@@ -202,6 +203,13 @@ HttpAttachmentConfig::setFailOpenTimeout()
"NGINX wait thread timeout msec"
));
conf_data.setNumericalValue("remove_server_header", getAttachmentConf<uint>(
0,
"agent.removeServerHeader.nginxModule",
"HTTP manager",
"Response server header removal"
));
uint inspection_mode = getAttachmentConf<uint>(
static_cast<uint>(ngx_http_inspection_mode_e::NON_BLOCKING_THREAD),
"agent.inspectionMode.nginxModule",
@@ -215,6 +223,46 @@ HttpAttachmentConfig::setFailOpenTimeout()
conf_data.setNumericalValue("nginx_inspection_mode", inspection_mode);
}
void
HttpAttachmentConfig::setRetriesForVerdict()
{
conf_data.setNumericalValue("min_retries_for_verdict", getAttachmentConf<uint>(
3,
"agent.minRetriesForVerdict.nginxModule",
"HTTP manager",
"Min retries for verdict"
));
conf_data.setNumericalValue("max_retries_for_verdict", getAttachmentConf<uint>(
15,
"agent.maxRetriesForVerdict.nginxModule",
"HTTP manager",
"Max retries for verdict"
));
conf_data.setNumericalValue("hold_verdict_retries", getAttachmentConf<uint>(
3,
"agent.retriesForHoldVerdict.nginxModule",
"HTTP manager",
"Retries for hold verdict"
));
conf_data.setNumericalValue("hold_verdict_polling_time", getAttachmentConf<uint>(
1,
"agent.holdVerdictPollingInterval.nginxModule",
"HTTP manager",
"Hold verdict polling interval seconds"
));
conf_data.setNumericalValue("body_size_trigger", getAttachmentConf<uint>(
200000,
"agent.reqBodySizeTrigger.nginxModule",
"HTTP manager",
"Request body size trigger"
));
}
void
HttpAttachmentConfig::setFailOpenWaitMode()
{

View File

@@ -70,6 +70,8 @@ private:
void setDebugByContextValues();
void setRetriesForVerdict();
WebTriggerConf web_trigger_conf;
HttpAttachmentConfiguration conf_data;
};

View File

@@ -19,12 +19,15 @@
#include "config.h"
#include "virtual_modifiers.h"
#include "agent_core_utilities.h"
using namespace std;
using namespace boost::uuids;
USE_DEBUG_FLAG(D_HTTP_MANAGER);
extern bool is_keep_alive_ctx;
NginxAttachmentOpaque::NginxAttachmentOpaque(HttpTransactionData _transaction_data)
:
TableOpaqueSerialize<NginxAttachmentOpaque>(this),
@@ -67,6 +70,12 @@ NginxAttachmentOpaque::NginxAttachmentOpaque(HttpTransactionData _transaction_da
ctx.registerValue(HttpTransactionData::uri_query_decoded, decoded_url.substr(question_mark_location + 1));
}
ctx.registerValue(HttpTransactionData::uri_path_decoded, decoded_url);
// Register waf_tag from transaction data if available
const std::string& waf_tag = transaction_data.getWafTag();
if (!waf_tag.empty()) {
ctx.registerValue(HttpTransactionData::waf_tag_ctx, waf_tag);
}
}
NginxAttachmentOpaque::~NginxAttachmentOpaque()
@@ -119,3 +128,47 @@ NginxAttachmentOpaque::setSavedData(const string &name, const string &data, EnvK
saved_data[name] = data;
ctx.registerValue(name, data, log_ctx);
}
bool
NginxAttachmentOpaque::setKeepAliveCtx(const string &hdr_key, const string &hdr_val)
{
if (!is_keep_alive_ctx) return false;
static pair<string, string> keep_alive_hdr;
static bool keep_alive_hdr_initialized = false;
if (keep_alive_hdr_initialized) {
if (!keep_alive_hdr.first.empty() && hdr_key == keep_alive_hdr.first && hdr_val == keep_alive_hdr.second) {
dbgTrace(D_HTTP_MANAGER) << "Registering keep alive context";
ctx.registerValue("keep_alive_request_ctx", true);
return true;
}
return false;
}
const char* saas_keep_alive_hdr_name_env = getenv("SAAS_KEEP_ALIVE_HDR_NAME");
if (saas_keep_alive_hdr_name_env) {
keep_alive_hdr.first = NGEN::Strings::trim(saas_keep_alive_hdr_name_env);
dbgInfo(D_HTTP_MANAGER) << "Using SAAS_KEEP_ALIVE_HDR_NAME environment variable: " << keep_alive_hdr.first;
}
if (!keep_alive_hdr.first.empty()) {
const char* saas_keep_alive_hdr_value_env = getenv("SAAS_KEEP_ALIVE_HDR_VALUE");
if (saas_keep_alive_hdr_value_env) {
keep_alive_hdr.second = NGEN::Strings::trim(saas_keep_alive_hdr_value_env);
dbgInfo(D_HTTP_MANAGER)
<< "Using SAAS_KEEP_ALIVE_HDR_VALUE environment variable: "
<< keep_alive_hdr.second;
}
if (!keep_alive_hdr.second.empty() && (hdr_key == keep_alive_hdr.first && hdr_val == keep_alive_hdr.second)) {
dbgTrace(D_HTTP_MANAGER) << "Registering keep alive context";
ctx.registerValue("keep_alive_request_ctx", true);
keep_alive_hdr_initialized = true;
return true;
}
}
keep_alive_hdr_initialized = true;
return false;
}

View File

@@ -85,6 +85,7 @@ public:
EnvKeyAttr::LogSection log_ctx = EnvKeyAttr::LogSection::NONE
);
void setApplicationState(const ApplicationState &app_state) { application_state = app_state; }
bool setKeepAliveCtx(const std::string &hdr_key, const std::string &hdr_val);
private:
CompressionStream *response_compression_stream;

View File

@@ -29,6 +29,7 @@ USE_DEBUG_FLAG(D_NGINX_ATTACHMENT_PARSER);
Buffer NginxParser::tenant_header_key = Buffer();
static const Buffer proxy_ip_header_key("X-Forwarded-For", 15, Buffer::MemoryType::STATIC);
static const Buffer source_ip("sourceip", 8, Buffer::MemoryType::STATIC);
bool is_keep_alive_ctx = getenv("SAAS_KEEP_ALIVE_HDR_NAME") != nullptr;
map<Buffer, CompressionType> NginxParser::content_encodings = {
{Buffer("identity"), CompressionType::NO_COMPRESSION},
@@ -177,37 +178,70 @@ getActivetenantAndProfile(const string &str, const string &deli = ",")
}
Maybe<vector<HttpHeader>>
NginxParser::parseRequestHeaders(const Buffer &data)
NginxParser::parseRequestHeaders(const Buffer &data, const unordered_set<string> &ignored_headers)
{
auto parsed_headers = genHeaders(data);
if (!parsed_headers.ok()) return parsed_headers.passErr();
auto maybe_parsed_headers = genHeaders(data);
if (!maybe_parsed_headers.ok()) return maybe_parsed_headers.passErr();
auto i_transaction_table = Singleton::Consume<I_TableSpecific<SessionID>>::by<NginxAttachment>();
auto parsed_headers = maybe_parsed_headers.unpack();
NginxAttachmentOpaque &opaque = i_transaction_table->getState<NginxAttachmentOpaque>();
for (const HttpHeader &header : *parsed_headers) {
if (is_keep_alive_ctx || !ignored_headers.empty()) {
bool is_last_header_removed = false;
parsed_headers.erase(
remove_if(
parsed_headers.begin(),
parsed_headers.end(),
[&opaque, &is_last_header_removed, &ignored_headers](const HttpHeader &header)
{
string hdr_key = static_cast<string>(header.getKey());
string hdr_val = static_cast<string>(header.getValue());
if (
opaque.setKeepAliveCtx(hdr_key, hdr_val)
|| ignored_headers.find(hdr_key) != ignored_headers.end()
) {
dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "Header was removed from headers list: " << hdr_key;
if (header.isLastHeader()) {
dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "Last header was removed from headers list";
is_last_header_removed = true;
}
return true;
}
return false;
}
),
parsed_headers.end()
);
if (is_last_header_removed) {
dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "Adjusting last header flag";
if (!parsed_headers.empty()) parsed_headers.back().setIsLastHeader();
}
}
for (const HttpHeader &header : parsed_headers) {
auto source_identifiers = getConfigurationWithDefault<UsersAllIdentifiersConfig>(
UsersAllIdentifiersConfig(),
"rulebase",
"usersIdentifiers"
);
source_identifiers.parseRequestHeaders(header);
NginxAttachmentOpaque &opaque = i_transaction_table->getState<NginxAttachmentOpaque>();
opaque.addToSavedData(
HttpTransactionData::req_headers,
static_cast<string>(header.getKey()) + ": " + static_cast<string>(header.getValue()) + "\r\n"
);
if (NginxParser::tenant_header_key == header.getKey()) {
const auto &header_key = header.getKey();
if (NginxParser::tenant_header_key == header_key) {
dbgDebug(D_NGINX_ATTACHMENT_PARSER)
<< "Identified active tenant header. Key: "
<< dumpHex(header.getKey())
<< dumpHex(header_key)
<< ", Value: "
<< dumpHex(header.getValue());
auto active_tenant_and_profile = getActivetenantAndProfile(header.getValue());
opaque.setSessionTenantAndProfile(active_tenant_and_profile[0], active_tenant_and_profile[1]);
} else if (proxy_ip_header_key == header.getKey()) {
} else if (proxy_ip_header_key == header_key) {
source_identifiers.setXFFValuesToOpaqueCtx(header, UsersAllIdentifiersConfig::ExtractType::PROXYIP);
}
}
@@ -345,12 +379,15 @@ NginxParser::parseResponseBody(const Buffer &raw_response_body, CompressionStrea
Maybe<CompressionType>
NginxParser::parseContentEncoding(const vector<HttpHeader> &headers)
{
static const Buffer content_encoding_header_key("Content-Encoding");
dbgFlow(D_NGINX_ATTACHMENT_PARSER) << "Parsing \"Content-Encoding\" header";
static const Buffer content_encoding_header_key("content-encoding");
auto it = find_if(
headers.begin(),
headers.end(),
[&] (const HttpHeader &http_header) { return http_header.getKey() == content_encoding_header_key; }
[&] (const HttpHeader &http_header) {
return http_header.getKey().isEqualLowerCase(content_encoding_header_key);
}
);
if (it == headers.end()) {
dbgTrace(D_NGINX_ATTACHMENT_PARSER)

View File

@@ -28,7 +28,10 @@ public:
static Maybe<HttpTransactionData> parseStartTrasaction(const Buffer &data);
static Maybe<ResponseCode> parseResponseCode(const Buffer &data);
static Maybe<uint64_t> parseContentLength(const Buffer &data);
static Maybe<std::vector<HttpHeader>> parseRequestHeaders(const Buffer &data);
static Maybe<std::vector<HttpHeader>> parseRequestHeaders(
const Buffer &data,
const std::unordered_set<std::string> &ignored_headers
);
static Maybe<std::vector<HttpHeader>> parseResponseHeaders(const Buffer &data);
static Maybe<HttpBody> parseRequestBody(const Buffer &data);
static Maybe<HttpBody> parseResponseBody(const Buffer &raw_response_body, CompressionStream *compression_stream);

View File

@@ -282,21 +282,39 @@ isIpTrusted(const string &value, const vector<CIDRSData> &cidr_values)
}
Maybe<string>
UsersAllIdentifiersConfig::parseXForwardedFor(const string &str) const
UsersAllIdentifiersConfig::parseXForwardedFor(const string &str, ExtractType type) const
{
vector<string> header_values = split(str);
if (header_values.empty()) return genError("No IP found in the xff header list");
vector<string> xff_values = getHeaderValuesFromConfig("x-forwarded-for");
vector<CIDRSData> cidr_values(xff_values.begin(), xff_values.end());
string last_valid_ip;
for (const string &value : header_values) {
if (!IPAddr::createIPAddr(value).ok()) {
dbgWarning(D_NGINX_ATTACHMENT_PARSER) << "Invalid IP address found in the xff header IPs list: " << value;
return genError("Invalid IP address");
for (auto it = header_values.rbegin(); it != header_values.rend() - 1; ++it) {
if (!IPAddr::createIPAddr(*it).ok()) {
dbgWarning(D_NGINX_ATTACHMENT_PARSER) << "Invalid IP address found in the xff header IPs list: " << *it;
if (last_valid_ip.empty()) {
return genError("Invalid IP address");
}
return last_valid_ip;
}
if (!isIpTrusted(value, cidr_values)) return genError("Untrusted Ip found");
last_valid_ip = *it;
if (type == ExtractType::PROXYIP) continue;
if (!isIpTrusted(*it, cidr_values)) {
dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "Found untrusted IP in the xff header IPs list: " << *it;
return *it;
}
}
if (!IPAddr::createIPAddr(header_values[0]).ok()) {
dbgWarning(D_NGINX_ATTACHMENT_PARSER)
<< "Invalid IP address found in the xff header IPs list: "
<< header_values[0];
if (last_valid_ip.empty()) {
return genError("No Valid Ip address was found");
}
return last_valid_ip;
}
return header_values[0];
@@ -306,22 +324,28 @@ UsersAllIdentifiersConfig::parseXForwardedFor(const string &str) const
void
UsersAllIdentifiersConfig::setXFFValuesToOpaqueCtx(const HttpHeader &header, ExtractType type) const
{
auto value = parseXForwardedFor(header.getValue());
auto i_transaction_table = Singleton::Consume<I_TableSpecific<SessionID>>::by<NginxAttachment>();
if (!i_transaction_table || !i_transaction_table->hasState<NginxAttachmentOpaque>()) {
dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "Can't get the transaction table";
return;
}
NginxAttachmentOpaque &opaque = i_transaction_table->getState<NginxAttachmentOpaque>();
auto value = parseXForwardedFor(header.getValue(), type);
if (!value.ok()) {
dbgTrace(D_NGINX_ATTACHMENT_PARSER) << "Could not extract source identifier from X-Forwarded-For header";
return;
};
auto i_transaction_table = Singleton::Consume<I_TableSpecific<SessionID>>::by<NginxAttachment>();
if (!i_transaction_table || !i_transaction_table->hasState<NginxAttachmentOpaque>()) {
dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "Can't get the transaction table";
return;
}
NginxAttachmentOpaque &opaque = i_transaction_table->getState<NginxAttachmentOpaque>();
if (type == ExtractType::SOURCEIDENTIFIER) {
opaque.setSourceIdentifier(header.getKey(), value.unpack());
dbgDebug(D_NGINX_ATTACHMENT_PARSER)
<< "Added source identifir to XFF "
<< "Added source identifier from XFF header"
<< value.unpack();
opaque.setSavedData(HttpTransactionData::xff_vals_ctx, header.getValue());
opaque.setSavedData(HttpTransactionData::source_identifier, value.unpack());
dbgTrace(D_NGINX_ATTACHMENT_PARSER)
<< "XFF found, set ctx with value from header: "
<< static_cast<string>(header.getValue());
} else {
opaque.setSavedData(HttpTransactionData::proxy_ip_ctx, value.unpack());
}
@@ -342,6 +366,24 @@ UsersAllIdentifiersConfig::setCustomHeaderToOpaqueCtx(const HttpHeader &header)
return;
}
void
UsersAllIdentifiersConfig::setWafTagValuesToOpaqueCtx(const HttpHeader &header) const
{
auto i_transaction_table = Singleton::Consume<I_TableSpecific<SessionID>>::by<NginxAttachment>();
if (!i_transaction_table || !i_transaction_table->hasState<NginxAttachmentOpaque>()) {
dbgDebug(D_NGINX_ATTACHMENT_PARSER) << "Can't get the transaction table";
return;
}
NginxAttachmentOpaque &opaque = i_transaction_table->getState<NginxAttachmentOpaque>();
opaque.setSavedData(HttpTransactionData::waf_tag_ctx, static_cast<string>(header.getValue()));
dbgDebug(D_NGINX_ATTACHMENT_PARSER)
<< "Added waf tag to context: "
<< static_cast<string>(header.getValue());
return;
}
Maybe<string>
UsersAllIdentifiersConfig::parseCookieElement(
const string::const_iterator &start,

View File

@@ -128,7 +128,7 @@ private:
break;
}
default:
dbgAssert(false) << "Unsupported IP type";
dbgAssert(false) << AlertInfo(AlertTeam::CORE, "gradual deployment") << "Unsupported IP type";
}
return address;
}
@@ -142,7 +142,7 @@ private:
if (temp_params_list.size() == 1) {
Maybe<IPAddr> maybe_ip = IPAddr::createIPAddr(temp_params_list[0]);
if (!maybe_ip.ok()) return genError("Could not create IP address, " + maybe_ip.getErr());
IpAddress addr = move(ConvertToIpAddress(maybe_ip.unpackMove()));
IpAddress addr = ConvertToIpAddress(maybe_ip.unpackMove());
return move(IPRange{.start = addr, .end = addr});
}
@@ -157,11 +157,11 @@ private:
IPAddr max_addr = maybe_ip_max.unpackMove();
if (min_addr > max_addr) return genError("Could not create ip range - start greater then end");
IpAddress addr_min = move(ConvertToIpAddress(move(min_addr)));
IpAddress addr_max = move(ConvertToIpAddress(move(max_addr)));
IpAddress addr_min = ConvertToIpAddress(move(min_addr));
IpAddress addr_max = ConvertToIpAddress(move(max_addr));
if (addr_max.ip_type != addr_min.ip_type) return genError("Range IP's type does not match");
return move(IPRange{.start = move(addr_min), .end = move(addr_max)});
return IPRange{.start = move(addr_min), .end = move(addr_max)};
}
return genError("Illegal range received: " + range);

View File

@@ -1,8 +0,0 @@
include_directories(${CMAKE_SOURCE_DIR}/components/include)
link_directories(${BOOST_ROOT}/lib)
add_unit_test(
health_check_manager_ut
"health_check_manager_ut.cc"
"singleton;messaging;mainloop;health_check_manager;event_is;metric;-lboost_regex"
)

View File

@@ -15,19 +15,18 @@
#include <string>
#include <map>
#include <sys/stat.h>
#include <climits>
#include <unordered_map>
#include <boost/range/iterator_range.hpp>
#include <unordered_set>
#include <boost/algorithm/string.hpp>
#include <fstream>
#include <algorithm>
#include "common.h"
#include "config.h"
#include "table_opaque.h"
#include "http_manager_opaque.h"
#include "log_generator.h"
#include "http_inspection_events.h"
#include "agent_core_utilities.h"
USE_DEBUG_FLAG(D_HTTP_MANAGER);
@@ -38,6 +37,7 @@ operator<<(ostream &os, const EventVerdict &event)
{
switch (event.getVerdict()) {
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT: return os << "Inspect";
case ngx_http_cp_verdict_e::LIMIT_RESPONSE_HEADERS: return os << "Limit Response Headers";
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT: return os << "Accept";
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP: return os << "Drop";
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT: return os << "Inject";
@@ -46,7 +46,10 @@ operator<<(ostream &os, const EventVerdict &event)
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_WAIT: return os << "Wait";
}
dbgAssert(false) << "Illegal Event Verdict value: " << static_cast<uint>(event.getVerdict());
dbgAssert(false)
<< AlertInfo(AlertTeam::CORE, "http manager")
<< "Illegal Event Verdict value: "
<< static_cast<uint>(event.getVerdict());
return os;
}
@@ -91,12 +94,14 @@ public:
ctx.registerValue(app_sec_marker_key, i_transaction_table->keyToString(), EnvKeyAttr::LogSection::MARKER);
HttpManagerOpaque &state = i_transaction_table->getState<HttpManagerOpaque>();
string event_key = static_cast<string>(event.getKey());
if (event_key == getProfileAgentSettingWithDefault<string>("", "agent.customHeaderValueLogging")) {
const auto &custom_header = getProfileAgentSettingWithDefault<string>("", "agent.customHeaderValueLogging");
if (event.getKey().isEqualLowerCase(custom_header)) {
string event_value = static_cast<string>(event.getValue());
dbgTrace(D_HTTP_MANAGER)
<< "Found header key and value - ("
<< event_key
<< custom_header
<< ": "
<< event_value
<< ") that matched agent settings";
@@ -192,7 +197,6 @@ public:
if (state.getUserDefinedValue().ok()) {
ctx.registerValue("UserDefined", state.getUserDefinedValue().unpack(), EnvKeyAttr::LogSection::DATA);
}
return handleEvent(EndRequestEvent().performNamedQuery());
}
@@ -320,9 +324,13 @@ private:
<< respond.second.getVerdict();
state.setApplicationVerdict(respond.first, respond.second.getVerdict());
state.setApplicationWebResponse(respond.first, respond.second.getWebUserResponseByPractice());
}
return state.getCurrVerdict();
FilterVerdict aggregated_verdict(state.getCurrVerdict(), state.getCurrWebUserResponse());
if (aggregated_verdict.getVerdict() == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP) {
SecurityAppsDropEvent(state.getCurrentDropVerdictCausers()).notify();
}
return aggregated_verdict;
}
static void

View File

@@ -32,6 +32,13 @@ HttpManagerOpaque::setApplicationVerdict(const string &app_name, ngx_http_cp_ver
applications_verdicts[app_name] = verdict;
}
void
HttpManagerOpaque::setApplicationWebResponse(const string &app_name, string web_user_response_id)
{
dbgTrace(D_HTTP_MANAGER) << "Security app: " << app_name << ", has web user response: " << web_user_response_id;
applications_web_user_response[app_name] = web_user_response_id;
}
ngx_http_cp_verdict_e
HttpManagerOpaque::getApplicationsVerdict(const string &app_name) const
{
@@ -51,8 +58,12 @@ HttpManagerOpaque::getCurrVerdict() const
for (const auto &app_verdic_pair : applications_verdicts) {
switch (app_verdic_pair.second) {
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP:
dbgTrace(D_HTTP_MANAGER) << "Verdict DROP for app: " << app_verdic_pair.first;
current_web_user_response = applications_web_user_response.at(app_verdic_pair.first);
dbgTrace(D_HTTP_MANAGER) << "current_web_user_response=" << current_web_user_response;
return app_verdic_pair.second;
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT:
// Sent in ResponseHeaders and ResponseBody.
verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT;
break;
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT:
@@ -60,15 +71,21 @@ HttpManagerOpaque::getCurrVerdict() const
break;
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT:
break;
case ngx_http_cp_verdict_e::LIMIT_RESPONSE_HEADERS:
// Sent in End Request.
verdict = ngx_http_cp_verdict_e::LIMIT_RESPONSE_HEADERS;
break;
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_IRRELEVANT:
dbgTrace(D_HTTP_MANAGER) << "Verdict 'Irrelevant' is not yet supported. Returning Accept";
accepted_apps++;
break;
case ngx_http_cp_verdict_e::TRAFFIC_VERDICT_WAIT:
// Sent in Request Headers and Request Body.
verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_WAIT;
break;
default:
dbgAssert(false)
<< AlertInfo(AlertTeam::CORE, "http manager")
<< "Received unknown verdict "
<< static_cast<int>(app_verdic_pair.second);
}
@@ -77,6 +94,25 @@ HttpManagerOpaque::getCurrVerdict() const
return accepted_apps == applications_verdicts.size() ? ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT : verdict;
}
std::set<std::string>
HttpManagerOpaque::getCurrentDropVerdictCausers() const
{
std::set<std::string> causers;
if (manager_verdict == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP) {
causers.insert(HTTP_MANAGER_NAME);
}
for (const auto &app_verdic_pair : applications_verdicts) {
bool was_dropped = app_verdic_pair.second == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP;
dbgTrace(D_HTTP_MANAGER)
<< "The verdict from: " << app_verdic_pair.first
<< (was_dropped ? " is \"drop\"" : " is not \"drop\" ");
if (was_dropped) {
causers.insert(app_verdic_pair.first);
}
}
return causers;
}
void
HttpManagerOpaque::saveCurrentDataToCache(const Buffer &full_data)
{

View File

@@ -20,16 +20,21 @@
#include "table_opaque.h"
#include "nginx_attachment_common.h"
static const std::string HTTP_MANAGER_NAME = "HTTP Manager";
class HttpManagerOpaque : public TableOpaqueSerialize<HttpManagerOpaque>
{
public:
HttpManagerOpaque();
void setApplicationVerdict(const std::string &app_name, ngx_http_cp_verdict_e verdict);
void setApplicationWebResponse(const std::string &app_name, std::string web_user_response_id);
ngx_http_cp_verdict_e getApplicationsVerdict(const std::string &app_name) const;
void setManagerVerdict(ngx_http_cp_verdict_e verdict) { manager_verdict = verdict; }
ngx_http_cp_verdict_e getManagerVerdict() const { return manager_verdict; }
ngx_http_cp_verdict_e getCurrVerdict() const;
const std::string & getCurrWebUserResponse() const { return current_web_user_response; };
std::set<std::string> getCurrentDropVerdictCausers() const;
void saveCurrentDataToCache(const Buffer &full_data);
void setUserDefinedValue(const std::string &value) { user_defined_value = value; }
Maybe<std::string> getUserDefinedValue() const { return user_defined_value; }
@@ -49,6 +54,8 @@ public:
private:
std::unordered_map<std::string, ngx_http_cp_verdict_e> applications_verdicts;
std::unordered_map<std::string, std::string> applications_web_user_response;
mutable std::string current_web_user_response;
ngx_http_cp_verdict_e manager_verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT;
Buffer prev_data_cache;
uint aggregated_payload_size = 0;

View File

@@ -0,0 +1,45 @@
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef __CENTRAL_NGINX_MANAGER_H__
#define __CENTRAL_NGINX_MANAGER_H__
#include "component.h"
#include "singleton.h"
#include "i_messaging.h"
#include "i_rest_api.h"
#include "i_mainloop.h"
#include "i_agent_details.h"
class CentralNginxManager
:
public Component,
Singleton::Consume<I_RestApi>,
Singleton::Consume<I_Messaging>,
Singleton::Consume<I_MainLoop>,
Singleton::Consume<I_AgentDetails>
{
public:
CentralNginxManager();
~CentralNginxManager();
void preload() override;
void init() override;
void fini() override;
private:
class Impl;
std::unique_ptr<Impl> pimpl;
};
#endif // __CENTRAL_NGINX_MANAGER_H__

View File

@@ -34,6 +34,7 @@ public:
~DetailsResolver();
void preload() override;
void init() override;
private:
class Impl;

View File

@@ -21,6 +21,7 @@
#include "url_parser.h"
#include "i_agent_details.h"
#include "i_mainloop.h"
#include "i_environment.h"
#include "singleton.h"
#include "component.h"
@@ -32,6 +33,7 @@ class Downloader
Singleton::Consume<I_Encryptor>,
Singleton::Consume<I_MainLoop>,
Singleton::Consume<I_OrchestrationTools>,
Singleton::Consume<I_Environment>,
Singleton::Consume<I_UpdateCommunication>
{
public:

View File

@@ -24,7 +24,8 @@ class ExternalSdkServer
:
public Component,
Singleton::Provide<I_ExternalSdkServer>,
Singleton::Consume<I_RestApi>
Singleton::Consume<I_RestApi>,
Singleton::Consume<I_Messaging>
{
public:
ExternalSdkServer();

View File

@@ -45,6 +45,19 @@ private:
std::string host;
};
class EqualWafTag : public EnvironmentEvaluator<bool>, Singleton::Consume<I_Environment>
{
public:
EqualWafTag(const std::vector<std::string> &params);
static std::string getName() { return "EqualWafTag"; }
Maybe<bool, Context::Error> evalVariable() const override;
private:
std::string waf_tag;
};
class EqualListeningIP : public EnvironmentEvaluator<bool>, Singleton::Consume<I_Environment>
{
public:

View File

@@ -89,7 +89,9 @@ private:
bool matchAttributesRegEx(const std::set<std::string> &values,
std::set<std::string> &matched_override_keywords) const;
bool matchAttributesString(const std::set<std::string> &values) const;
bool matchAttributesIp(const std::set<std::string> &values) const;
bool isRegEx() const;
void sortAndMergeIpRangesValues();
MatchType type;
Operators operator_type;

View File

@@ -317,12 +317,12 @@ public:
{
return url_for_cef;
}
Flags<ReportIS::StreamType> getStreams(SecurityType security_type, bool is_action_drop_or_prevent) const;
Flags<ReportIS::Enreachments> getEnrechments(SecurityType security_type) const;
private:
ReportIS::Severity getSeverity(bool is_action_drop_or_prevent) const;
ReportIS::Priority getPriority(bool is_action_drop_or_prevent) const;
Flags<ReportIS::StreamType> getStreams(SecurityType security_type, bool is_action_drop_or_prevent) const;
Flags<ReportIS::Enreachments> getEnrechments(SecurityType security_type) const;
std::string name;
std::string verbosity;
@@ -339,4 +339,32 @@ private:
bool should_format_output = false;
};
class ReportTriggerConf
{
public:
/// \brief Default constructor for ReportTriggerConf.
ReportTriggerConf() {}
/// \brief Preload function to register expected configuration.
static void
preload()
{
registerExpectedConfiguration<ReportTriggerConf>("rulebase", "report");
}
/// \brief Load function to deserialize configuration from JSONInputArchive.
/// \param archive_in The JSON input archive.
void load(cereal::JSONInputArchive &archive_in);
/// \brief Get the name.
/// \return The name.
const std::string &
getName() const
{
return name;
}
private:
std::string name;
};
#endif //__TRIGGERS_CONFIG_H__

View File

@@ -21,6 +21,7 @@
#include "i_shell_cmd.h"
#include "i_orchestration_status.h"
#include "component.h"
#include "i_service_controller.h"
class HealthChecker
:
@@ -29,7 +30,8 @@ class HealthChecker
Singleton::Consume<I_Socket>,
Singleton::Consume<I_Health_Check_Manager>,
Singleton::Consume<I_ShellCmd>,
Singleton::Consume<I_OrchestrationStatus>
Singleton::Consume<I_OrchestrationStatus>,
Singleton::Consume<I_ServiceController>
{
public:
HealthChecker();

View File

@@ -27,9 +27,18 @@ public:
verdict(_verdict)
{}
FilterVerdict(
ngx_http_cp_verdict_e _verdict,
const std::string &_web_reponse_id)
:
verdict(_verdict),
web_user_response_id(_web_reponse_id)
{}
FilterVerdict(const EventVerdict &_verdict, ModifiedChunkIndex _event_idx = -1)
:
verdict(_verdict.getVerdict())
verdict(_verdict.getVerdict()),
web_user_response_id(_verdict.getWebUserResponseByPractice())
{
if (verdict == ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INJECT) {
addModifications(_verdict.getModifications(), _event_idx);
@@ -59,10 +68,12 @@ public:
uint getModificationsAmount() const { return total_modifications; }
ngx_http_cp_verdict_e getVerdict() const { return verdict; }
const std::vector<EventModifications> & getModifications() const { return modifications; }
const std::string getWebUserResponseID() const { return web_user_response_id; }
private:
ngx_http_cp_verdict_e verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT;
std::vector<EventModifications> modifications;
std::string web_user_response_id;
uint total_modifications = 0;
};

View File

@@ -50,9 +50,11 @@ public:
position(mod_position)
{
dbgAssert(mod_type != ModificationType::APPEND || position == injection_pos_irrelevant)
<< AlertInfo(AlertTeam::CORE, "http manager")
<< "Injection position is not applicable to a modification of type \"Append\"";
dbgAssert(mod_type != ModificationType::INJECT || position >= 0)
<< AlertInfo(AlertTeam::CORE, "http manager")
<< "Invalid injection position: must be non-negative. Position: "
<< position;
}
@@ -166,6 +168,7 @@ private:
}
default:
dbgAssert(false)
<< AlertInfo(AlertTeam::CORE, "http manager")
<< "Unknown type of ModificationType: "
<< static_cast<int>(modification_type);
}
@@ -236,6 +239,7 @@ public:
const Buffer & getValue() const { return value; }
bool isLastHeader() const { return is_last_header; }
void setIsLastHeader() { is_last_header = true; }
uint8_t getHeaderIndex() const { return header_index; }
private:
@@ -372,16 +376,31 @@ public:
verdict(event_verdict)
{}
EventVerdict(
const ModificationList &mods,
ngx_http_cp_verdict_e event_verdict,
std::string response_id) :
modifications(mods),
verdict(event_verdict),
webUserResponseByPractice(response_id)
{}
// LCOV_EXCL_START - sync functions, can only be tested once the sync module exists
template <typename T> void serialize(T &ar, uint) { ar(verdict); }
// LCOV_EXCL_STOP
const ModificationList & getModifications() const { return modifications; }
ngx_http_cp_verdict_e getVerdict() const { return verdict; }
const std::string getWebUserResponseByPractice() const { return webUserResponseByPractice; }
void setWebUserResponseByPractice(const std::string id) {
dbgTrace(D_HTTP_MANAGER) << "current verdict web user response set to: " << id;
webUserResponseByPractice = id;
}
private:
ModificationList modifications;
ngx_http_cp_verdict_e verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT;
std::string webUserResponseByPractice;
};
#endif // __I_HTTP_EVENT_IMPL_H__

View File

@@ -15,7 +15,8 @@ class HttpGeoFilter
public Component,
Singleton::Consume<I_MainLoop>,
Singleton::Consume<I_GeoLocation>,
Singleton::Consume<I_GenericRulebase>
Singleton::Consume<I_GenericRulebase>,
Singleton::Consume<I_Environment>
{
public:
HttpGeoFilter();

View File

@@ -183,4 +183,16 @@ class WaitTransactionEvent : public Event<WaitTransactionEvent, EventVerdict>
{
};
class SecurityAppsDropEvent : public Event<SecurityAppsDropEvent>
{
public:
SecurityAppsDropEvent(
const std::set<std::string> &apps_names)
:
apps_names(apps_names) {}
const std::set<std::string> & getAppsNames() const { return apps_names; }
private:
const std::set<std::string> apps_names;
};
#endif // __HTTP_INSPECTION_EVENTS_H__

View File

@@ -72,7 +72,8 @@ public:
parsed_uri,
client_ip,
client_port,
response_content_encoding
response_content_encoding,
waf_tag
);
}
@@ -91,7 +92,8 @@ public:
parsed_uri,
client_ip,
client_port,
response_content_encoding
response_content_encoding,
waf_tag
);
}
// LCOV_EXCL_STOP
@@ -122,6 +124,9 @@ public:
response_content_encoding = _response_content_encoding;
}
const std::string & getWafTag() const { return waf_tag; }
void setWafTag(const std::string &_waf_tag) { waf_tag = _waf_tag; }
static const std::string http_proto_ctx;
static const std::string method_ctx;
static const std::string host_name_ctx;
@@ -136,6 +141,8 @@ public:
static const std::string req_body;
static const std::string source_identifier;
static const std::string proxy_ip_ctx;
static const std::string xff_vals_ctx;
static const std::string waf_tag_ctx;
static const CompressionType default_response_content_encoding;
@@ -152,6 +159,7 @@ private:
uint16_t client_port;
bool is_request;
CompressionType response_content_encoding;
std::string waf_tag;
};
#endif // __HTTP_TRANSACTION_DATA_H__

View File

@@ -26,10 +26,13 @@ public:
virtual Maybe<std::string> getArch() = 0;
virtual std::string getAgentVersion() = 0;
virtual bool isKernelVersion3OrHigher() = 0;
virtual bool isGw() = 0;
virtual bool isGwNotVsx() = 0;
virtual bool isVersionAboveR8110() = 0;
virtual bool isReverseProxy() = 0;
virtual Maybe<std::tuple<std::string, std::string, std::string>> parseNginxMetadata() = 0;
virtual bool isCloudStorageEnabled() = 0;
virtual Maybe<std::tuple<std::string, std::string, std::string, std::string>> parseNginxMetadata() = 0;
virtual Maybe<std::tuple<std::string, std::string, std::string, std::string, std::string>> readCloudMetadata() = 0;
virtual std::map<std::string, std::string> getResolvedDetails() = 0;
#if defined(gaia) || defined(smb)
virtual bool compareCheckpointVersion(int cp_version, std::function<bool(int, int)> compare_operator) const = 0;

View File

@@ -22,7 +22,7 @@
class I_Downloader
{
public:
virtual Maybe<std::string> downloadFileFromFog(
virtual Maybe<std::string> downloadFile(
const std::string &checksum,
Package::ChecksumTypes,
const GetResourceFile &resourse_file

View File

@@ -17,6 +17,7 @@
#include <vector>
#include "generic_rulebase/parameters_config.h"
#include "generic_rulebase/triggers_config.h"
#include "generic_rulebase/zone.h"
#include "config.h"
@@ -26,6 +27,9 @@ public:
virtual Maybe<Zone, Config::Errors> getLocalZone() const = 0;
virtual Maybe<Zone, Config::Errors> getOtherZone() const = 0;
virtual LogTriggerConf getLogTriggerConf(const std::string &trigger_Id) const = 0;
virtual ParameterException getParameterException(const std::string &parameter_Id) const = 0;
using ParameterKeyValues = std::unordered_map<std::string, std::set<std::string>>;
virtual std::set<ParameterBehavior> getBehavior(const ParameterKeyValues &key_value_pairs) const = 0;

View File

@@ -117,7 +117,7 @@ public:
const std::string &conf_path) const = 0;
virtual bool copyFile(const std::string &src_path, const std::string &dst_path) const = 0;
virtual bool doesFileExist(const std::string &file_path) const = 0;
virtual void getClusterId() const = 0;
virtual void setClusterId() const = 0;
virtual void fillKeyInJson(
const std::string &filename,
const std::string &_key,

View File

@@ -64,7 +64,9 @@ public:
const std::string &service_id
) = 0;
virtual std::map<std::string, PortNumber> getServiceToPortMap() = 0;
virtual std::map<std::string, std::vector<PortNumber>> getServiceToPortMap() = 0;
virtual bool getServicesPolicyStatus() const = 0;
protected:
virtual ~I_ServiceController() {}

View File

@@ -32,6 +32,7 @@ public:
const std::string &policy_versions
) const = 0;
virtual Maybe<void> authenticateAgent() = 0;
virtual void registerLocalAgentToFog() = 0;
virtual Maybe<void> getUpdate(CheckUpdateRequest &request) = 0;
virtual Maybe<std::string> downloadAttributeFile(
const GetResourceFile &resourse_file,

View File

@@ -25,7 +25,9 @@ struct DecisionTelemetryData
std::string source;
TrafficMethod method;
int responseCode;
uint64_t elapsedTime;
std::set<std::string> attackTypes;
bool temperatureDetected;
DecisionTelemetryData() :
blockType(NOT_BLOCKING),
@@ -36,7 +38,9 @@ struct DecisionTelemetryData
source(),
method(POST),
responseCode(0),
attackTypes()
elapsedTime(0),
attackTypes(),
temperatureDetected(false)
{
}
};

View File

@@ -28,8 +28,9 @@
// LCOV_EXCL_START Reason: temporary until we add relevant UT until 07/10
bool operator<(const IpAddress &this_ip_addr, const IpAddress &other_ip_addr);
bool operator==(const IpAddress &this_ip_addr, const IpAddress &other_ip_addr);
bool operator<=(const IpAddress &this_ip_addr, const IpAddress &other_ip_addr);
bool operator<(const IPRange &range1, const IPRange &range2);
// LCOV_EXCL_STOP
Maybe<std::pair<std::string, int>> extractAddressAndMaskSize(const std::string &cidr);

View File

@@ -4,6 +4,7 @@
#include "singleton.h"
#include "i_keywords_rule.h"
#include "i_table.h"
#include "i_mainloop.h"
#include "i_http_manager.h"
#include "i_environment.h"
#include "http_inspection_events.h"
@@ -16,7 +17,8 @@ class IPSComp
Singleton::Consume<I_KeywordsRule>,
Singleton::Consume<I_Table>,
Singleton::Consume<I_Environment>,
Singleton::Consume<I_GenericRulebase>
Singleton::Consume<I_GenericRulebase>,
Singleton::Consume<I_MainLoop>
{
public:
IPSComp();

View File

@@ -62,6 +62,7 @@ public:
private:
Maybe<std::string> downloadPackage(const Package &package, bool is_clean_installation);
std::string getCurrentTimestamp();
std::string manifest_file_path;
std::string temp_ext;

View File

@@ -0,0 +1,28 @@
#ifndef __NGINX_MESSAGE_READER_H__
#define __NGINX_MESSAGE_READER_H__
#include "singleton.h"
#include "i_mainloop.h"
#include "i_socket_is.h"
#include "component.h"
class NginxMessageReader
:
public Component,
Singleton::Consume<I_MainLoop>,
Singleton::Consume<I_Socket>
{
public:
NginxMessageReader();
~NginxMessageReader();
void init() override;
void fini() override;
void preload() override;
private:
class Impl;
std::unique_ptr<Impl> pimpl;
};
#endif //__NGINX_MESSAGE_READER_H__

View File

@@ -0,0 +1,51 @@
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef __NGINX_UTILS_H__
#define __NGINX_UTILS_H__
#include <string>
#include "maybe_res.h"
#include "singleton.h"
#include "i_shell_cmd.h"
class NginxConfCollector
{
public:
NginxConfCollector(const std::string &nginx_conf_input_path, const std::string &nginx_conf_output_path);
Maybe<std::string> generateFullNginxConf() const;
private:
std::vector<std::string> expandIncludes(const std::string &includePattern) const;
void processConfigFile(
const std::string &path,
std::ostringstream &conf_output,
std::vector<std::string> &errors
) const;
std::string main_conf_input_path;
std::string main_conf_output_path;
std::string main_conf_directory_path;
};
class NginxUtils : Singleton::Consume<I_ShellCmd>
{
public:
static std::string getModulesPath();
static std::string getMainNginxConfPath();
static Maybe<void> validateNginxConf(const std::string &nginx_conf_path);
static Maybe<void> reloadNginx(const std::string &nginx_conf_path);
};
#endif // __NGINX_UTILS_H__

View File

@@ -31,6 +31,7 @@
#include "i_environment.h"
#include "i_tenant_manager.h"
#include "i_package_handler.h"
#include "i_proxy_configuration.h"
#include "i_env_details.h"
#include "component.h"
@@ -54,7 +55,8 @@ class OrchestrationComp
Singleton::Consume<I_UpdateCommunication>,
Singleton::Consume<I_Downloader>,
Singleton::Consume<I_ManifestController>,
Singleton::Consume<I_EnvDetails>
Singleton::Consume<I_EnvDetails>,
Singleton::Consume<I_ProxyConfiguration>
{
public:
OrchestrationComp();

View File

@@ -40,7 +40,7 @@ public:
~OrchestrationStatus();
void init() override;
private:
class Impl;
std::unique_ptr<Impl> pimpl;

View File

@@ -115,7 +115,7 @@ public:
case ResourceFileType::VIRTUAL_SETTINGS: return "virtualSettings";
case ResourceFileType::VIRTUAL_POLICY: return "virtualPolicy";
default:
dbgAssert(false) << "Unknown file type";
dbgAssert(false) << AlertInfo(AlertTeam::CORE, "update process") << "Unknown file type";
}
return std::string();
}

View File

@@ -56,7 +56,7 @@ private:
if (mapped_type.second == type) return mapped_type.first;
}
dbgAssert(false) << "Unsupported type " << static_cast<int>(type);
dbgAssert(false) << AlertInfo(AlertTeam::CORE, "packaging") << "Unsupported type " << static_cast<int>(type);
// Just satisfying the compiler, this return never reached
return std::string();
}

View File

@@ -17,6 +17,7 @@
#include "i_package_handler.h"
#include "i_orchestration_tools.h"
#include "i_shell_cmd.h"
#include "i_environment.h"
#include "component.h"
class PackageHandler
@@ -24,7 +25,8 @@ class PackageHandler
public Component,
Singleton::Provide<I_PackageHandler>,
Singleton::Consume<I_ShellCmd>,
Singleton::Consume<I_OrchestrationTools>
Singleton::Consume<I_OrchestrationTools>,
Singleton::Consume<I_Environment>
{
public:
PackageHandler();

View File

@@ -0,0 +1,30 @@
#ifndef __PROMETHEUS_COMP_H__
#define __PROMETHEUS_COMP_H__
#include <memory>
#include "component.h"
#include "singleton.h"
#include "i_rest_api.h"
#include "i_messaging.h"
#include "generic_metric.h"
class PrometheusComp
:
public Component,
Singleton::Consume<I_RestApi>,
Singleton::Consume<I_Messaging>
{
public:
PrometheusComp();
~PrometheusComp();
void init() override;
private:
class Impl;
std::unique_ptr<Impl> pimpl;
};
#endif // __PROMETHEUS_COMP_H__

View File

@@ -7,15 +7,21 @@
#include "singleton.h"
#include "i_mainloop.h"
#include "i_environment.h"
#include "i_geo_location.h"
#include "i_generic_rulebase.h"
#include "i_shell_cmd.h"
#include "i_env_details.h"
class RateLimit
:
public Component,
Singleton::Consume<I_MainLoop>,
Singleton::Consume<I_TimeGet>,
Singleton::Consume<I_GeoLocation>,
Singleton::Consume<I_Environment>,
Singleton::Consume<I_GenericRulebase>
Singleton::Consume<I_GenericRulebase>,
Singleton::Consume<I_ShellCmd>,
Singleton::Consume<I_EnvDetails>
{
public:
RateLimit();

View File

@@ -7,24 +7,28 @@ static const std::string product_name = getenv("DOCKER_RPM_ENABLED") ? "CloudGua
static const std::string default_cp_cert_file = "/etc/cp/cpCert.pem";
static const std::string default_cp_key_file = "/etc/cp/cpKey.key";
static const std::string default_rpm_conf_path = "/etc/cp/conf/rpmanager/";
static const std::string default_certificate_path = "/etc/cp/rpmanager/certs";
static const std::string default_manual_certs_path = "/etc/cp/rpmanager/manualCerts/";
static const std::string default_config_path = "/etc/cp/conf/rpmanager/servers";
static const std::string default_rpm_prepare_path = "/etc/cp/conf/rpmanager/prepare/servers";
static const std::string default_nginx_log_files_path = "/var/log/nginx/";
static const std::string default_additional_files_path = "/etc/cp/conf/rpmanager/include";
static const std::string default_server_config = "additional_server_config.conf";
static const std::string default_location_config = "additional_location_config.conf";
static const std::string default_trusted_ca_suffix = "_user_ca_bundle.crt";
static const std::string default_nginx_log_files_path = "/var/log/nginx/";
static const std::string default_log_files_host_path = "/var/log/nano_agent/rpmanager/nginx_log/";
static const std::string default_config_path = "/etc/cp/conf/rpmanager/servers";
static const std::string default_template_path = "/etc/cp/conf/rpmanager/nginx-template-clear";
static const std::string default_manual_certs_path = "/etc/cp/rpmanager/manualCerts/";
static const std::string default_server_certificate_path = "/etc/cp/rpmanager/certs/sslCertificate_";
static const std::string default_server_certificate_key_path = "/etc/cp/rpmanager/certs/sslPrivateKey_";
static const std::string default_container_name = "cp_nginx_gaia";
static const std::string default_docker_image = "cp_nginx_gaia";
static const std::string default_nginx_config_file = "/etc/cp/conf/rpmanager/nginx.conf";
static const std::string default_prepare_nginx_config_file = "/etc/cp/conf/rpmanager/nginx_prepare.conf";
static const std::string default_global_conf_template = "/etc/cp/conf/rpmanager/nginx-conf-template";
static const std::string default_nginx_config_include_file =
"/etc/cp/conf/rpmanager/servers/nginx_conf_include";
"/etc/cp/conf/rpmanager/servers/00_nginx_conf_include.conf";
static const std::string default_global_conf_include_template =
"/etc/cp/conf/rpmanager/nginx-conf-include-template";
static const std::string default_global_conf_include_template_no_responses =

View File

@@ -0,0 +1,39 @@
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef __SERVICE_HEALTH_STATUS_H__
#define __SERVICE_HEALTH_STATUS_H__
#include "singleton.h"
#include "i_rest_api.h"
#include "i_environment.h"
#include "component.h"
class ServiceHealthStatus
:
public Component,
Singleton::Consume<I_RestApi>,
Singleton::Consume<I_Environment>
{
public:
ServiceHealthStatus();
~ServiceHealthStatus();
void init() override;
private:
class Impl;
std::unique_ptr<Impl> pimpl;
};
#endif // __SERVICE_HEALTH_STATUS_H__

View File

@@ -30,6 +30,7 @@
#include "generic_metric.h"
#define LOGGING_INTERVAL_IN_MINUTES 10
USE_DEBUG_FLAG(D_WAAP);
enum class AssetType { API, WEB, ALL, COUNT };
class WaapTelemetryEvent : public Event<WaapTelemetryEvent>
@@ -75,6 +76,20 @@ private:
std::unordered_set<std::string> sources_seen;
};
class WaapAdditionalTrafficTelemetrics : public WaapTelemetryBase
{
public:
void updateMetrics(const std::string &asset_id, const DecisionTelemetryData &data);
void initMetrics();
private:
MetricCalculations::Counter requests{this, "reservedNgenA"};
MetricCalculations::Counter sources{this, "reservedNgenB"};
MetricCalculations::Counter blocked{this, "reservedNgenC"};
MetricCalculations::Counter temperature_count{this, "reservedNgenD"};
std::unordered_set<std::string> sources_seen;
};
class WaapTrafficTelemetrics : public WaapTelemetryBase
{
public:
@@ -91,6 +106,7 @@ private:
MetricCalculations::Counter response_2xx{this, "reservedNgenG"};
MetricCalculations::Counter response_4xx{this, "reservedNgenH"};
MetricCalculations::Counter response_5xx{this, "reservedNgenI"};
MetricCalculations::Average<uint64_t> average_latency{this, "reservedNgenJ"};
};
class WaapAttackTypesMetrics : public WaapTelemetryBase
@@ -122,6 +138,7 @@ private:
std::map<std::string, std::shared_ptr<WaapTrafficTelemetrics>> traffic_telemetries;
std::map<std::string, std::shared_ptr<WaapAttackTypesMetrics>> attack_types;
std::map<std::string, std::shared_ptr<WaapAttackTypesMetrics>> attack_types_telemetries;
std::map<std::string, std::shared_ptr<WaapAdditionalTrafficTelemetrics>> additional_traffic_telemetries;
template <typename T>
void initializeTelemetryData(
@@ -131,6 +148,7 @@ private:
std::map<std::string, std::shared_ptr<T>>& telemetryMap
) {
if (!telemetryMap.count(asset_id)) {
dbgTrace(D_WAAP) << "creating telemetry data for asset: " << data.assetName;
telemetryMap.emplace(asset_id, std::make_shared<T>());
telemetryMap[asset_id]->init(
telemetryName,
@@ -138,7 +156,9 @@ private:
ReportIS::IssuingEngine::AGENT_CORE,
std::chrono::minutes(LOGGING_INTERVAL_IN_MINUTES),
true,
ReportIS::Audience::SECURITY
ReportIS::Audience::SECURITY,
false,
asset_id
);
telemetryMap[asset_id]->template registerContext<std::string>(
@@ -151,29 +171,30 @@ private:
std::string("Web Application"),
EnvKeyAttr::LogSection::SOURCE
);
telemetryMap[asset_id]->template registerContext<std::string>(
"assetId",
asset_id,
EnvKeyAttr::LogSection::SOURCE
);
telemetryMap[asset_id]->template registerContext<std::string>(
"assetName",
data.assetName,
EnvKeyAttr::LogSection::SOURCE
);
telemetryMap[asset_id]->template registerContext<std::string>(
"practiceId",
data.practiceId,
EnvKeyAttr::LogSection::SOURCE
);
telemetryMap[asset_id]->template registerContext<std::string>(
"practiceName",
data.practiceName,
EnvKeyAttr::LogSection::SOURCE
);
telemetryMap[asset_id]->registerListener();
}
dbgTrace(D_WAAP) << "updating telemetry data for asset: " << data.assetName;
telemetryMap[asset_id]->template registerContext<std::string>(
"assetId",
asset_id,
EnvKeyAttr::LogSection::SOURCE
);
telemetryMap[asset_id]->template registerContext<std::string>(
"assetName",
data.assetName,
EnvKeyAttr::LogSection::SOURCE
);
telemetryMap[asset_id]->template registerContext<std::string>(
"practiceId",
data.practiceId,
EnvKeyAttr::LogSection::SOURCE
);
telemetryMap[asset_id]->template registerContext<std::string>(
"practiceName",
data.practiceName,
EnvKeyAttr::LogSection::SOURCE
);
}
};

View File

@@ -35,8 +35,10 @@ public:
bool isOverSSL() const { return over_ssl; }
std::string getPort() const { return port; }
std::string getQuery() const { return query; }
std::string getHost() const;
URLProtocol getProtocol() const { return protocol; }
std::string toString() const;
void setHost(const std::string &new_host);
void setQuery(const std::string &new_query);
private:
@@ -47,6 +49,7 @@ private:
std::string base_url;
std::string port;
std::string query;
std::string host;
URLProtocol protocol;
};

View File

@@ -30,6 +30,7 @@ public:
void parseRequestHeaders(const HttpHeader &header) const;
std::vector<std::string> getHeaderValuesFromConfig(const std::string &header_key) const;
void setXFFValuesToOpaqueCtx(const HttpHeader &header, ExtractType type) const;
void setWafTagValuesToOpaqueCtx(const HttpHeader &header) const;
private:
class UsersIdentifiersConfig
@@ -58,7 +59,7 @@ private:
const std::string::const_iterator &end,
const std::string &key) const;
Buffer extractKeyValueFromCookie(const std::string &cookie_value, const std::string &key) const;
Maybe<std::string> parseXForwardedFor(const std::string &str) const;
Maybe<std::string> parseXForwardedFor(const std::string &str, ExtractType type) const;
std::vector<UsersIdentifiersConfig> user_identifiers;
};

Some files were not shown because too many files have changed in this diff Show More