Compare commits

...

110 Commits

Author SHA1 Message Date
Ned Wright
3061342b45 Nov_12_2023-Dev 2023-11-12 18:50:17 +00:00
WrightNed
0869b8f24d Merge pull request #68 from bxlxx/main
fix installation usage errors in README
2023-11-05 16:06:57 +02:00
Syin Wu
1a4ab5f0d7 fix installation errors in README 2023-11-03 11:19:44 +08:00
WrightNed
4a2d25ab65 Update CMakeLists.txt 2023-10-24 18:04:33 +03:00
Ned Wright
f2ca7301b9 Updating charts 2023-10-08 13:38:39 +00:00
Ned Wright
3d11ead170 Updating charts 2023-10-08 07:15:09 +00:00
Ned Wright
39b8c5a5ff Update core/version/build_version_vars_h.py 2023-10-08 06:26:36 +00:00
Ned Wright
de6f1033bd Merge branch 'main' of https://github.com/openappsec/openappsec 2023-10-05 13:51:42 +00:00
Ned Wright
58958b2436 Change information collection flag 2023-10-05 13:50:29 +00:00
roybarda
59e7f00b3e Merge pull request #62 from openappsec/open_appsec_add_agent_cache_for_rate_limit
Open appsec add agent cache for rate limit
2023-10-05 15:44:37 +03:00
Daniel Eisenberg
e102b25b7d add cache conf file 2023-10-05 15:27:19 +03:00
Daniel Eisenberg
0386431eee add cache conf file 2023-10-05 15:21:12 +03:00
Daniel Eisenberg
fd1a77628e add cache conf file 2023-10-05 15:16:58 +03:00
Daniel Eisenberg
da911582a5 use sh instead of bash 2023-10-05 14:49:07 +03:00
roybarda
798dd2a7d1 Merge pull request #61 from openappsec/open_appsec_add_agent_cache_for_rate_limit
adding agent cache service for docker
2023-10-05 14:04:46 +03:00
Daniel Eisenberg
6bda60ae84 adding agent cache service to entry.sh 2023-10-05 14:03:00 +03:00
roybarda
5b9769e94e Update README.md
add redis for rate limit packaging purposes
2023-10-05 13:58:09 +03:00
roybarda
6693176131 Merge pull request #60 from openappsec/open_appsec_add_agent_cache_for_rate_limit
Open appsec add agent cache for rate limit
2023-10-05 13:46:11 +03:00
Daniel Eisenberg
c2ced075eb Merge branch 'main' of https://github.com/openappsec/openappsec into open_appsec_add_agent_cache_for_rate_limit 2023-10-05 13:45:02 +03:00
WrightNed
0b4bdd3677 Merge pull request #59 from openappsec/orianelou-patch-2
Update README.md
2023-10-05 13:41:15 +03:00
orianelou
d6599cc7bc Update README.md
updated supported versions
2023-10-05 13:34:25 +03:00
Daniel Eisenberg
4db7a54c27 adding agent cache service 2023-10-04 21:35:52 +03:00
Ned Wright
f3ede0c60e Update nodes/orchestration/scripts/cp-nano-makefile-generator.sh 2023-10-04 14:37:21 +00:00
roybarda
79bac9f501 adding model version to AI model 2023-10-03 09:52:26 +03:00
Ned Wright
89263f6f34 Change default cloud logging 2023-10-02 10:04:52 +00:00
Ned Wright
5feb12f7e4 Refactor rate limit 2023-10-01 10:03:24 +00:00
WrightNed
a2ee6ca839 Merge pull request #57 from openappsec/Sep_24_2023-Dev
Sep 24 2023 dev
2023-09-28 19:43:02 +03:00
Ned Wright
1c10a12f6f Minor fixes 2023-09-28 16:40:42 +00:00
Ned Wright
e9f6ebd02b Minor fixes 2023-09-28 16:38:31 +00:00
Ned Wright
433c7c2d91 Remove old files 2023-09-28 13:11:47 +00:00
Ned Wright
582791e37a Sep_24_2023-Dev 2023-09-24 10:28:57 +00:00
Ned Wright
a4d1fb6f7f Ading explict k8s check update invocation 2023-09-19 09:16:27 +00:00
bilbogh
dfbfdca1a9 Update README.md 2023-09-15 10:07:49 +02:00
bilbogh
36f511f449 Update README.md 2023-09-14 23:21:06 +02:00
bilbogh
f91f283b77 Added new section 2023-09-14 22:59:28 +02:00
bilbogh
7c762e97a3 Added charts 2023-09-14 22:36:50 +02:00
Ned Wright
aaa1fbe8ed Adding links to command line tools 2023-09-12 16:42:52 +00:00
Ned Wright
67e68c84c3 Fix finding libraries 2023-09-11 12:40:26 +03:00
Ned Wright
149a7305b7 Remove old deafult fog 2023-09-07 14:54:40 +00:00
Ned Wright
ea20a51689 Add upload option to open-appsec-ctl 2023-09-04 16:15:06 +00:00
bilbogh
19f2383ae2 Update README.md
Brought back lines that were accidentally deleted
2023-09-01 14:18:16 +02:00
WrightNed
4038c18bda Update CMakeLists.txt 2023-08-24 20:26:18 +03:00
WrightNed
a9b6d2e715 Merge pull request #47 from openappsec/Aug_23_2023-Dev
Aug 23 2023 dev
2023-08-24 15:10:14 +03:00
Ned Wright
81c75495cc temporarily disabling some unit-tests 2023-08-24 11:56:16 +00:00
WrightNed
5505022f47 Update cp-nano-package-list 2023-08-23 20:46:45 +03:00
Ned Wright
b25fd8def5 Aug_23_2023-Dev 2023-08-23 14:15:32 +00:00
Ned Wright
702c1184ea Merge branch 'main' of https://github.com/openappsec/openappsec 2023-08-17 17:40:54 +00:00
Ned Wright
b3cfd7e9d8 Check /etc/environment exists before using it 2023-08-17 17:40:22 +00:00
WrightNed
e36b990161 Merge pull request #43 from openappsec/changing_socket_readiness_testing
Change socket readiness testing
2023-08-10 00:22:37 +03:00
NedWright
09868e6d7c Change socket readiness testing 2023-08-09 20:59:26 +00:00
bilbogh
e25f517c19 Update README.md 2023-08-06 18:22:31 +02:00
Ned Wright
42a31e37b1 Add check if obfuscation is enabled 2023-07-28 15:46:08 +00:00
Ned Wright
abe275c828 Add check if obfuscation is enabled 2023-07-27 15:41:41 +00:00
WrightNed
71d198f41a Merge pull request #40 from openappsec/support-advance-model
support-advanced-model in all flavors
2023-07-25 14:31:40 +03:00
WrightNed
3ed569fe35 Merge pull request #41 from openappsec/Jul_24_23_chart_update
Update charts
2023-07-25 13:51:59 +03:00
roybarda
c7cb494e2b support-advanced-model in all flavors 2023-07-25 10:06:48 +03:00
noam
edd357f297 Update charts 2023-07-24 17:24:40 +03:00
Ned Wright
08583fdb4c Create log_utils.h 2023-07-19 11:22:06 +00:00
Ned Wright
e5ef6c5ad4 Create log_utils.h 2023-07-19 11:16:19 +00:00
WrightNed
3c24666643 Update CMakeLists.txt 2023-07-19 14:03:40 +03:00
WrightNed
19e8906704 Merge pull request #37 from openappsec/Jul_06_2023-Dev
Jul 06 2023 dev
2023-07-16 15:53:39 +03:00
Ned Wright
01b6544ca5 Fix manifest handling 2023-07-16 12:51:20 +00:00
WrightNed
ebc2b2be0d Merge pull request #36 from openappsec/WrightNed-patch-1
Update cp-agent-info.sh
2023-07-12 19:04:46 +03:00
WrightNed
fc6355a3b2 Update cp-agent-info.sh
Better checks which tar options exists
2023-07-12 18:02:52 +03:00
Ned Wright
c89001b6e0 Change library locating 2023-07-06 09:34:00 +00:00
Ned Wright
a59f079ef7 Jul 5th update 2023-07-05 23:32:39 +00:00
orianelou
22f1a984aa Merge pull request #32 from openappsec/orianelou-patch-1
Update README.md
2023-07-02 11:03:32 +03:00
orianelou
7c98ba9834 Update README.md
replaced --hybrid-mode with --standalone
2023-07-02 10:48:23 +03:00
Ned Wright
5192380549 Ignoring null packages list 2023-06-20 09:31:14 +00:00
Ned Wright
a270456278 Ignoring null packages list 2023-06-19 10:46:52 +00:00
Ned Wright
795d07bd41 Updating Kong helm chart 2023-06-01 16:15:31 +00:00
Ned Wright
45e51ddbf7 Adding an example of local policy yml file 2023-05-29 17:15:02 +00:00
Ned Wright
36f65b9b1f Updating the NGINX Ingress helm chart 2023-05-29 16:30:53 +00:00
orianelou
dfcc71c8c2 Update CONTRIBUTING.md 2023-05-25 11:19:29 +03:00
orianelou
1e8540f166 Create CONTRIBUTING.md
added contrib file and updated CONTRIBUTING.md
2023-05-25 11:18:55 +03:00
WrightNed
a754082405 Merge pull request #24 from openappsec/May_11_2023-Dev
My 11th 2023 update
2023-05-14 21:53:29 +03:00
Ned Wright
3aa0885f74 Merge branch 'main' into May_11_2023-Dev 2023-05-14 18:52:57 +00:00
WrightNed
3b49cfec54 Merge pull request #23 from openappsec/crowdsec
Crowdsec
2023-05-14 21:46:58 +03:00
Ned Wright
d7494b1bbc Adding crowdsec auxilary to monitored processes 2023-05-14 17:29:44 +00:00
Ned Wright
29bd82d125 My 11th 2023 update 2023-05-11 18:54:44 +00:00
Ned Wright
c49debe5d9 Enabling crowdsec 2023-05-11 10:40:06 +00:00
roybarda
240f58217a Adding open-appsec-kong helm chart to repo based on kong 2.16.1 2023-05-02 14:30:33 +03:00
WrightNed
2c750513a1 Merge pull request #22 from openappsec/Apr_27_2023-Dev
Apr 27th Update
2023-05-01 13:52:37 +03:00
Ned Wright
fd2d9fa081 Apr 27th Update 2023-04-27 19:05:49 +00:00
orianelou
cd4fb6e3e8 Update README.md 2023-04-18 14:09:34 +03:00
orianelou
bfb5fcb50d Update README.md
updated installation instructions for Kong
2023-04-18 14:04:29 +03:00
orianelou
cf14e6f383 Update README.md
type and grammar fix
2023-04-16 12:09:08 +03:00
orianelou
413da6f7a1 Update README.md
Updates killerkoda to instruct links
2023-04-10 12:18:00 +03:00
WrightNed
997d2e4b42 Merge pull request #18 from openappsec/Mar_26_2023-Dev
Mar 26th 2023 Dev
2023-03-27 19:38:12 +03:00
Ned Wright
3f5a3b27a4 Mar 26th 2023 Dev 2023-03-27 15:06:15 +00:00
WrightNed
5848f1d7e3 Merge pull request #17 from openappsec/Mar_13_2023-Dev
Mar 13th 2023 update
2023-03-13 21:04:46 +02:00
Ned Wright
a9f917d638 Mar 13th 2023 update 2023-03-13 19:02:52 +00:00
Ned Wright
03f4d6bf39 Replace LogGen with direct send of report to fog 2023-03-07 16:30:42 +00:00
Ned Wright
61bb2ce4c7 Changes to fog working in hybrid mode 2023-03-07 15:02:46 +00:00
WrightNed
6480357fde Merge pull request #16 from openappsec/Mar_02_2023-Dev
Mar 2nd 2023 update
2023-03-05 11:27:08 +02:00
Ned Wright
2a7ddf0666 Mar 2nd 2023 update 2023-03-02 17:08:49 +00:00
WrightNed
fef95b12b3 Merge pull request #15 from openappsec/Feb_22_2023-Dev
Feb 22nd 2023 update
2023-02-24 14:26:51 +02:00
Ned Wright
38e6e1bbcf Feb 22nd 2023 update 2023-02-22 17:20:21 +00:00
WrightNed
fd6239f44a Adding indication that policy was applied. 2023-02-22 16:36:19 +02:00
WrightNed
408ac667de Update CMakeLists.txt
Fix Typo.
2023-02-20 10:39:20 +02:00
WrightNed
2072eba07a Update CMakeLists.txt
Excluding separate installation of GraphQL library.
2023-02-20 10:33:03 +02:00
WrightNed
1f24c12a34 Merge pull request #14 from openappsec/Feb_15_2023-Dev
Feb 15 2023 dev
2023-02-19 21:30:00 +02:00
Ned Wright
161fea1361 Reording calls 2023-02-19 19:25:46 +00:00
Ned Wright
f3ff6c0f4d Feb 15th 2023 update - adding Apache License preamble 2023-02-15 19:49:30 +00:00
Ned Wright
6a9b33ff93 Feb 15th 2023 update 2023-02-15 19:09:38 +00:00
WrightNed
f7934cd09d Merge pull request #10 from openappsec/Jan_22_2023-Dev
Fix installation issues
2023-01-22 20:30:56 +02:00
Ned Wright
d130454275 Fix installation issues 2023-01-22 09:42:37 +00:00
WrightNed
baa1d9ae03 Merge pull request #9 from openappsec/Jan_18_2023-Dev
Jan 18th update
2023-01-18 21:19:12 +02:00
Ned Wright
bb0714e4cb Jan 18th update 2023-01-18 16:36:27 +00:00
WrightNed
35b6a4aebf Merge pull request #8 from openappsec/Jan_16_2023
Jun 16th update
2023-01-17 17:36:21 +02:00
742 changed files with 148648 additions and 10603 deletions

2
.gitattributes vendored Normal file
View File

@@ -0,0 +1,2 @@
build_system/docker/install-cp-agent-intelligence-service.sh binary
build_system/docker/install-cp-crowdsec-aux.sh binary

View File

@@ -3,6 +3,11 @@ project (ngen)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall -Wno-terminate -Dalpine")
execute_process(COMMAND grep -c "Alpine Linux" /etc/os-release OUTPUT_VARIABLE IS_ALPINE)
if(IS_ALPINE EQUAL "1")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Dalpine")
endif()
find_package(Boost REQUIRED)
find_package(ZLIB REQUIRED)
find_package(GTest REQUIRED)
@@ -28,6 +33,7 @@ include_directories(core/include/services_sdk/interfaces)
include_directories(core/include/services_sdk/resources)
include_directories(core/include/services_sdk/utilities)
include_directories(core/include/attachments)
include_directories(events/include)
include_directories(components/include)
add_subdirectory(build_system)

View File

@@ -1,37 +1,64 @@
# open-appsec Contributing Guide
Thank you for your interest in open-appsec. We welcome everyone that wishes to share their knowledge and expertise to enhance and expand the project.
# open-appsec Contributing Guide🌴
Read our [Code of Conduct](./CODE_OF_CONDUCT.md) to keep our community approachable and respectable.
Thank you for your interest in open-appsec. We welcome contributions of all kinds, there is no need to do code to be helpful! All of the following tasks are noble and worthy contributions that you can make without coding:
In this guide we will provide an overview of the various contribution options' guidelines - from reporting or fixing a bug, to suggesting an enhancement.
- Reporting security vulnerabilities
- Reporting a bug
- Helping a member of the community
- Notes about our documentation
- Providing feedback and feature requests
Before making any kind of contribution, read our [Code of Conduct](./CODE_OF_CONDUCT.md) to keep our community approachable and respectable.
This guide will provide an overview of the various contribution options' guidelines - from reporting or fixing a bug to suggesting an enhancement.
## Reporting security vulnerabilities
If you've found a vulnerability or a potential vulnerability in open-appsec please let us know at [security-alert@openappsec.io](mailto:security-alert@openappsec.io). We'll send a confirmation email to acknowledge your report within 24 hours, and we'll send an additional email when we've identified the issue positively or negatively.
If you've found a vulnerability or a potential vulnerability in open-appsec please let us know at [security-alert@openappsec.io](mailto:security-alert@openappsec.io). We'll send a confirmation email to acknowledge your report within 24 hours and send an additional email when we've identified the issue positively or negatively.
An internal process will be activated upon determining the validity of a reported security vulnerability, which will end with releasing a fix and deciding on the applicable disclosure actions. The reporter of the issue will receive updates of this process' progress.
An internal process will be activated upon determining the validity of a reported security vulnerability, which will end with releasing a fix and deciding on the appropriate disclosure actions. The reporter of the issue will receive updates on this process' progress.
## Reporting a bug
**Important - If the bug you wish to report regards a suspicion of a security vulnerability, please refer to the "Reporting security vulnerability" section**
**Important - If the bug you wish to report regards a suspicion of a security vulnerability, please refer to the [Reporting security vulnerability](#Reporting-security-vulnerabilities) section**
To report a bug, you can either open a new issue using a relevant [issue form](https://github.com/github/docs/issues/new/choose), or, alternatively, [contact us via our open-appsec open source distribution list](mailto:opensource@openappsec.io).
To report a bug, you can either open a new issue using a relevant [issue form](https://github.com/github/docs/issues/new/choose) or, [contact us via our open-appsec open source distribution list](mailto:opensource@openappsec.io).
Be sure to include a **title and clear description**, as much relevant information as possible, and a **code sample** or an **executable test case** demonstrating the expected behavior that is not occurring.
## Contributing a fix to a bug
Please [contact us via our open-appsec open source distribution list](mailto:opensource@openappsec.io) before writing your code. We will want to make sure we understand the boundaries of the proposed fix, that the relevant coding style is clear for the proposed fix's location in the code, and that the proposed contribution is relevant and eligible.
Please [contact us via our open-appsec open source distribution list](mailto:opensource@openappsec.io) before writing your code. We will want to make sure we understand the boundaries of the proposed fix, that the relevant coding style is clear for the proposed fix's location in the code, and that the suggested contribution is relevant and eligible.
Once you've received our confirmation follow the next steps:
1. Fork the repository to your GitHub account.
2. Clone your forked repository to your local machine.
3. Add your contributions to relevant locations in the local copy of the codebase.
4. Push your changes back to your forked repository.
5. Open a pull request (PR) against the main branch of the original repository.
## Contributing code-independent enhancements
For any code-independent enhancements (such as docker-compose files, or instructions on how to compile on different OSs) please follow the next steps:
1. [suggest your change via our open-appsec open-source distribution list](mailto:opensource@openappsec.io) to inform us about your possible contribution and wait for our confirmation.
2. Fork the repository to your GitHub account.
3. Clone your forked repository to your local machine.
4. Add your contributions to the "contrib" Folder in the local copy of the codebase.
5. Push your changes back to your forked repository.
6. Open a pull request (PR) against the main branch of the original repository.
Please note that during the PR review we might adjust the location of the contributions.
## Proposing an enhancement
Please [suggest your change via our open-appsec open source distribution list](mailto:opensource@openappsec.io) before writing your code. We will contact you to make sure we understand the boundaries of the proposed fix, that the relevant coding style is clear for the proposed fix's location in the code, and that the proposed contribution is relevant and eligible. There may be additional considerations that we would like to discuss with you before implementing the enhancement.
Please [suggest your change via our open-appsec open-source distribution list](mailto:opensource@openappsec.io) before writing your code. We will contact you to make sure we understand the boundaries of the proposed fix, that the relevant coding style is clear for the proposed fix's location in the code, and that the suggested contribution is relevant and eligible. There may be additional considerations that we would like to discuss with you before implementing the enhancement.
## Open Source documentation issues
For reporting or suggesting a change, of any issue detected in the documentation files of our open source repositories, please use the same guidelines as bug reports/fixes.
to propose changes to our [documentation](https://docs.openappsec.io/?utm_medium=web&utm_source=wix&utm_content=top_menu) you can either open a new issue using a relevant [issue form](https://github.com/github/docs/issues/new/choose) or, [contact us via our open-appsec open source distribution list](mailto:opensource@openappsec.io).
# Final Thanks
We value all efforts to read, suggest changes and/or contribute to our open source files. Thank you for your time and efforts.
# Final thanks
We value all efforts to read, suggest changes, and/or contribute to our open-source files. Thank you for your time and efforts.
The open-appsec Team

View File

@@ -6,7 +6,7 @@
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/6629/badge)](https://bestpractices.coreinfrastructure.org/projects/6629)
# About
[open-appsec](https://www.openappsec.io) (openappsec.io) builds on machine learning to provide preemptive web app & API threat protection against OWASP-Top-10 and zero-day attacks. It can be deployed as add-on to Kubernetes Ingress, NGINX, Envoy (soon) and API Gateways.
[open-appsec](https://www.openappsec.io) (openappsec.io) builds on machine learning to provide preemptive web app & API threat protection against OWASP-Top-10 and zero-day attacks. It can be deployed as an add-on to Kubernetes Ingress, NGINX, Envoy (soon), and API Gateways.
The open-appsec engine learns how users normally interact with your web application. It then uses this information to automatically detect requests that fall outside of normal operations, and conducts further analysis to decide whether the request is malicious or not.
@@ -14,52 +14,78 @@ Upon every HTTP request, all parts are decoded, JSON and XML sections are extrac
Every request to the application goes through two phases:
1. Multiple variables are fed to the machine learning engine. These variables, which are either directly extracted from the HTTP request or decoded from different parts of the payload, include attack indicators, IP addresses, user agents, fingerprints, and many other considerations. The supervised model of the machine learning engine uses these variables to compare the request with many common attack patterns found across the globe.
1. Multiple variables are fed to the machine-learning engine. These variables, which are either directly extracted from the HTTP request or decoded from different parts of the payload, include attack indicators, IP addresses, user agents, fingerprints, and many other considerations. The supervised model of the machine learning engine uses these variables to compare the request with many common attack patterns found across the globe.
2. If the request is identified as a valid and legitimate request, the request is allowed, and forwarded to your application. If, however, the request is considered suspicious or high risk, it then gets evaluated by the unsupervised model, which was trained in your specific environment. This model uses information such as the URL and the users involved to create a final confidence score that determines whether the request should be allowed or blocked.
2. If the request is identified as a valid and legitimate request the request is allowed, and forwarded to your application. If, however, the request is considered suspicious or high risk, it then gets evaluated by the unsupervised model, which was trained in your specific environment. This model uses information such as the URL and the users involved to create a final confidence score that determines whether the request should be allowed or blocked.
The project is currently in Beta and feedback is most welcomed!
![image](https://github.com/openappsec/openappsec/assets/114033741/f32f1c99-9c45-4d21-aa85-61408a16a18e)
## Machine Learning models
open-appsec uses two models:
open-appsec uses two machine learning models:
1. A supervised model that was trained offline based on millions of requests, both malicious and benign.
* A basic model is provided as part of this repository. It is recommended for use in Monitor-Only and Test environments.
* An advanced model which is more accurate and recommended for Production use, can be downloaded from [open-appsec portal](https://my.openappsec.io). User Menu->Download advanced ML model. This model updates from time to time and you will get an email when these updates happen.
* A **basic model** is provided as part of this repository. It is recommended for use in Monitor-Only and Test environments.
* An **advanced model** which is more accurate and **recommended for Production** use can be downloaded from the [open-appsec portal](https://my.openappsec.io)->User Menu->Download advanced ML model. This model updates from time to time and you will get an email when these updates happen.
2. An unsupervised model that is being built in real time in the protected environment. This model uses traffic patterns specific to the environment.
# Management
open-appsec can be managed using multiple methods:
* [Declarative configuration files](https://docs.openappsec.io/getting-started/getting-started)
* [Kubernetes Helm Charts and annotations](https://docs.openappsec.io/getting-started/getting-started)
* [Using SaaS Web Management](https://docs.openappsec.io/getting-started/using-the-web-ui-saas)
open-appsec Web UI:
![image](https://github.com/openappsec/openappsec/assets/114033741/22d99379-df52-45c8-984f-1b820635f3b9)
## Deployment Playgrounds (Virtual labs)
You can experiment with open-appsec using [Playgrounds](https://www.openappsec.io/playground)
![image](https://github.com/openappsec/openappsec/assets/114033741/14d35d69-4577-48fc-ae87-ea344888e94d)
# Resources
* [Project Website](https://openappsec.io)
* [Offical documentation](https://docs.openappsec.io/)
* [Video Tutorial](https://www.youtube.com/playlist?list=PL8pzPlPbjDY0V2u7E-KZQrzIiw41fWB0h)
* [Live Kubernetes Playground](https://killercoda.com/open-appsec/scenario/simple-appsec-kubernetes-ingress)
* [Live Linux/NGINX Playground](https://killercoda.com/open-appsec/scenario/simple-appsec-for-nginx)
* [Offical Documentation](https://docs.openappsec.io/)
* [Video Tutorials](https://www.openappsec.io/tutorials)
# Installation
# open-appsec Installation
Installer for Kubernetes:
For Kubernetes (NGINX Ingress) using the installer:
```bash
wget https://downloads.openappsec.io/open-appsec-k8s-install && chmod +x open-appsec-k8s-install
./open-appsec-k8s-install
$ wget https://downloads.openappsec.io/open-appsec-k8s-install && chmod +x open-appsec-k8s-install
$ ./open-appsec-k8s-install
```
Installer for standard NGINX (list of supported/pre-compiled NGINX attachements is available [here](https://downloads.openappsec.io/supported-nginx.txt)):
For Kubernetes (NGINX or Kong) using Helm: follow [documentation](https://docs.openappsec.io/getting-started/start-with-kubernetes/install-using-helm-ingress-nginx-and-kong) use this method if youve built your own containers.
For Linux (NGINX or Kong) using the installer (list of supported/pre-compiled NGINX attachments is available [here](https://downloads.openappsec.io/packages/supported-nginx.txt)):
```bash
wget https://downloads.openappsec.io/open-appsec-nginx-install && chmod +x open-appsec-nginx-install
./open-appsec-nginx-install
$ wget https://downloads.openappsec.io/open-appsec-install && chmod +x open-appsec-install
$ ./open-appsec-install --auto
```
It is recommended to read the documentation or follow the video tutorial.
For Linux, if youve built your own package use the following commands:
```bash
$ install-cp-nano-agent.sh --install --hybrid_mode
$ install-cp-nano-service-http-transaction-handler.sh install
$ install-cp-nano-attachment-registration-manager.sh --install
```
You can add the ```--token <token>``` and ```--email <email address>``` options to the first command, to get a token follow [documentation](https://docs.openappsec.io/getting-started/using-the-web-ui-saas/connect-deployed-agents-to-saas-management-k8s-and-linux).
For Docker: follow [documentation](https://docs.openappsec.io/getting-started/start-with-docker)
For more information read the [documentation](https://docs.openappsec.io/) or follow the [video tutorials](https://www.openappsec.io/tutorials).
# Repositories
open-appsec GitHub includes four main repositores:
open-appsec GitHub includes four main repositories:
* [openappsec/openappsec](https://github.com/openappsec/openappsec) the main code and logic of open-appsec. Developed in C++.
* [openappsec/attachment](https://github.com/openappsec/attachment) connects between processes that provide HTTP data (e.g NGINX) and the open-appsec Agent security logic. Developed in C.
@@ -78,12 +104,14 @@ Before compiling the services, you'll need to ensure the latest development vers
* GTest
* GMock
* cURL
* Redis
* Hiredis
An example of installing the packages on Alpine:
```bash
$ apk update
$ apk add boost-dev openssl-dev pcre2-dev libxml2-dev gtest-dev curl-dev
$ apk add boost-dev openssl-dev pcre2-dev libxml2-dev gtest-dev curl-dev hiredis-dev redis
```
## Compiling and packaging the agent code
@@ -102,7 +130,7 @@ An example of installing the packages on Alpine:
## Placing the agent code inside an Alpine docker image
Once the agent code has been compiled and packaged, an Alpine image running it can be created. This requires permissions to excute the `docker` command.
Once the agent code has been compiled and packaged, an Alpine image running it can be created. This requires permissions to execute the `docker` command.
```bash
$ make docker
@@ -110,26 +138,26 @@ Once the agent code has been compiled and packaged, an Alpine image running it c
This will create a local image for your docker called `agent-docker`.
## Deployment of the agent docker image as container
## Deployment of the agent docker image as a container
To run a Nano-Agent as a container the following steps are required:
1. If you are using a container management system / plan on deploying the container using your CI, add the agent docker image to an accessible registry.
2. If you are planning to manage the agent using the open-appsec UI, then make sure to obtain an agent token from the Management Portal and Enforce.
3. Run the agent with the follwing command (where e https_proxy parameter is optional):
3. Run the agent with the following command (where -e https_proxy parameter is optional):
`docker run -d --name=agent-container --ipc=host -v=<path to persistent location for agent config>:/etc/cp/conf -v=<path to persistent location for agent data files>:/etc/cp/data -v=<path to persistent location for agent debugs and logs>:/var/log/nano_agent e https_proxy=<user:password@Proxy address:port> -it <agent-image> /cp-nano-agent [--token <token> | --hybrid-mode]`
`docker run -d --name=agent-container --ipc=host -v=<path to persistent location for agent config>:/etc/cp/conf -v=<path to persistent location for agent data files>:/etc/cp/data -v=<path to persistent location for agent debugs and logs>:/var/log/nano_agent -e https_proxy=<user:password@Proxy address:port> -it <agent-image> /cp-nano-agent [--token <token> | --standalone]`
Example:
```bash
$ docker run -d --name=agent-container --ipc=host -v=/home/admin/agent/conf:/etc/cp/conf -v=/home/admin/agent/data:/etc/cp/data -v=/home/admin/agent/logs:/var/log/nano_agent e https_proxy=user:password@1.2.3.4:8080 -it agent-docker /cp-nano-agent --hybrid-mode
$ docker run -d --name=agent-container --ipc=host -v=/home/admin/agent/conf:/etc/cp/conf -v=/home/admin/agent/data:/etc/cp/data -v=/home/admin/agent/logs:/var/log/nano_agent e https_proxy=user:password@1.2.3.4:8080 -it agent-docker /cp-nano-agent --standalone
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
1e67f2abbfd4 agent-docker "/cp-nano-agent --hybrid-mode" 1 minute ago Up 1 minute agent-container
```
Note that you are not requiered to use a token from the Management Portal if you are managing your security policy locally. However, you are required to use the --hybryd-mode flag in such case. In addition, the voliums in the command are mandatory only if you wish to have persistency upon restart/upgrade/crash of the agent and its re execution.
Lastly, --ipc=host argument is mandatory in order for the agent to have access to shared memory with a protected attachment (nginx server).
Note that you are not required to use a token from the Management Portal if you are managing your security policy locally. However, you are required to use the --standalone flag in such cases. In addition, the volumes in the command are mandatory only if you wish to have persistency upon restart/upgrade/crash of the agent and its re-execution.
Lastly, --ipc=host argument is mandatory in order for the agent to have access to shared memory with a protected attachment (NGINX server).
4. Create or replace the NGINX container using the [Attachment Repository](https://github.com/openappsec/attachment).

View File

@@ -31,5 +31,7 @@ DEFINE_KDEBUG_FLAG(statefulValidation)
DEFINE_KDEBUG_FLAG(statelessValidation)
DEFINE_KDEBUG_FLAG(kernelMetric)
DEFINE_KDEBUG_FLAG(tproxy)
DEFINE_KDEBUG_FLAG(tenantStats)
DEFINE_KDEBUG_FLAG(uuidTranslation)
#endif // DEFINE_KDEBUG_FLAG

View File

@@ -2,39 +2,124 @@
This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
### 4.4.0
* Adding support for disabling liveness and readiness probes to the Helm chart by @njegosrailic in https://github.com/kubernetes/ingress-nginx/pull/9238
* add:(admission-webhooks) ability to set securityContext by @ybelMekk in https://github.com/kubernetes/ingress-nginx/pull/9186
* #7652 - Updated Helm chart to use the fullname for the electionID if not specified. by @FutureMatt in https://github.com/kubernetes/ingress-nginx/pull/9133
* Rename controller-wehbooks-networkpolicy.yaml. by @Gacko in https://github.com/kubernetes/ingress-nginx/pull/9123
### 4.3.0
- Support for Kubernetes v.1.25.0 was added and support for endpoint slices
- Support for Kubernetes v1.20.0 and v1.21.0 was removed
- [8890](https://github.com/kubernetes/ingress-nginx/pull/8890) migrate to endpointslices
- [9059](https://github.com/kubernetes/ingress-nginx/pull/9059) kubewebhookcertgen sha change after go1191
- [9046](https://github.com/kubernetes/ingress-nginx/pull/9046) Parameterize metrics port name
- [9104](https://github.com/kubernetes/ingress-nginx/pull/9104) Fix yaml formatting error with multiple annotations
### 4.2.1
- The sha of kube-webhook-certgen image & the opentelemetry image, in values file, was changed to new images built on alpine-v3.16.1
- "[8896](https://github.com/kubernetes/ingress-nginx/pull/8896) updated to new images built today"
### 4.2.0
- Support for Kubernetes v1.19.0 was removed
- "[8810](https://github.com/kubernetes/ingress-nginx/pull/8810) Prepare for v1.3.0"
- "[8808](https://github.com/kubernetes/ingress-nginx/pull/8808) revert arch var name"
- "[8805](https://github.com/kubernetes/ingress-nginx/pull/8805) Bump k8s.io/klog/v2 from 2.60.1 to 2.70.1"
- "[8803](https://github.com/kubernetes/ingress-nginx/pull/8803) Update to nginx base with alpine v3.16"
- "[8802](https://github.com/kubernetes/ingress-nginx/pull/8802) chore: start v1.3.0 release process"
- "[8798](https://github.com/kubernetes/ingress-nginx/pull/8798) Add v1.24.0 to test matrix"
- "[8796](https://github.com/kubernetes/ingress-nginx/pull/8796) fix: add MAC_OS variable for static-check"
- "[8793](https://github.com/kubernetes/ingress-nginx/pull/8793) changed to alpine-v3.16"
- "[8781](https://github.com/kubernetes/ingress-nginx/pull/8781) Bump github.com/stretchr/testify from 1.7.5 to 1.8.0"
- "[8778](https://github.com/kubernetes/ingress-nginx/pull/8778) chore: remove stable.txt from release process"
- "[8775](https://github.com/kubernetes/ingress-nginx/pull/8775) Remove stable"
- "[8773](https://github.com/kubernetes/ingress-nginx/pull/8773) Bump github/codeql-action from 2.1.14 to 2.1.15"
- "[8772](https://github.com/kubernetes/ingress-nginx/pull/8772) Bump ossf/scorecard-action from 1.1.1 to 1.1.2"
- "[8771](https://github.com/kubernetes/ingress-nginx/pull/8771) fix bullet md format"
- "[8770](https://github.com/kubernetes/ingress-nginx/pull/8770) Add condition for monitoring.coreos.com/v1 API"
- "[8769](https://github.com/kubernetes/ingress-nginx/pull/8769) Fix typos and add links to developer guide"
- "[8767](https://github.com/kubernetes/ingress-nginx/pull/8767) change v1.2.0 to v1.2.1 in deploy doc URLs"
- "[8765](https://github.com/kubernetes/ingress-nginx/pull/8765) Bump github/codeql-action from 1.0.26 to 2.1.14"
- "[8752](https://github.com/kubernetes/ingress-nginx/pull/8752) Bump github.com/spf13/cobra from 1.4.0 to 1.5.0"
- "[8751](https://github.com/kubernetes/ingress-nginx/pull/8751) Bump github.com/stretchr/testify from 1.7.2 to 1.7.5"
- "[8750](https://github.com/kubernetes/ingress-nginx/pull/8750) added announcement"
- "[8740](https://github.com/kubernetes/ingress-nginx/pull/8740) change sha e2etestrunner and echoserver"
- "[8738](https://github.com/kubernetes/ingress-nginx/pull/8738) Update docs to make it easier for noobs to follow step by step"
- "[8737](https://github.com/kubernetes/ingress-nginx/pull/8737) updated baseimage sha"
- "[8736](https://github.com/kubernetes/ingress-nginx/pull/8736) set ld-musl-path"
- "[8733](https://github.com/kubernetes/ingress-nginx/pull/8733) feat: migrate leaderelection lock to leases"
- "[8726](https://github.com/kubernetes/ingress-nginx/pull/8726) prometheus metric: upstream_latency_seconds"
- "[8720](https://github.com/kubernetes/ingress-nginx/pull/8720) Ci pin deps"
- "[8719](https://github.com/kubernetes/ingress-nginx/pull/8719) Working OpenTelemetry sidecar (base nginx image)"
- "[8714](https://github.com/kubernetes/ingress-nginx/pull/8714) Create Openssf scorecard"
- "[8708](https://github.com/kubernetes/ingress-nginx/pull/8708) Bump github.com/prometheus/common from 0.34.0 to 0.35.0"
- "[8703](https://github.com/kubernetes/ingress-nginx/pull/8703) Bump actions/dependency-review-action from 1 to 2"
- "[8701](https://github.com/kubernetes/ingress-nginx/pull/8701) Fix several typos"
- "[8699](https://github.com/kubernetes/ingress-nginx/pull/8699) fix the gosec test and a make target for it"
- "[8698](https://github.com/kubernetes/ingress-nginx/pull/8698) Bump actions/upload-artifact from 2.3.1 to 3.1.0"
- "[8697](https://github.com/kubernetes/ingress-nginx/pull/8697) Bump actions/setup-go from 2.2.0 to 3.2.0"
- "[8695](https://github.com/kubernetes/ingress-nginx/pull/8695) Bump actions/download-artifact from 2 to 3"
- "[8694](https://github.com/kubernetes/ingress-nginx/pull/8694) Bump crazy-max/ghaction-docker-buildx from 1.6.2 to 3.3.1"
### 4.1.2
- "[8587](https://github.com/kubernetes/ingress-nginx/pull/8587) Add CAP_SYS_CHROOT to DS/PSP when needed"
- "[8458](https://github.com/kubernetes/ingress-nginx/pull/8458) Add portNamePreffix Helm chart parameter"
- "[8522](https://github.com/kubernetes/ingress-nginx/pull/8522) Add documentation for controller.service.loadBalancerIP in Helm chart"
### 4.1.0
- "[8481](https://github.com/kubernetes/ingress-nginx/pull/8481) Fix log creation in chroot script"
- "[8479](https://github.com/kubernetes/ingress-nginx/pull/8479) changed nginx base img tag to img built with alpine3.14.6"
- "[8478](https://github.com/kubernetes/ingress-nginx/pull/8478) update base images and protobuf gomod"
- "[8468](https://github.com/kubernetes/ingress-nginx/pull/8468) Fallback to ngx.var.scheme for redirectScheme with use-forward-headers when X-Forwarded-Proto is empty"
- "[8456](https://github.com/kubernetes/ingress-nginx/pull/8456) Implement object deep inspector"
- "[8455](https://github.com/kubernetes/ingress-nginx/pull/8455) Update dependencies"
- "[8454](https://github.com/kubernetes/ingress-nginx/pull/8454) Update index.md"
- "[8447](https://github.com/kubernetes/ingress-nginx/pull/8447) typo fixing"
- "[8446](https://github.com/kubernetes/ingress-nginx/pull/8446) Fix suggested annotation-value-word-blocklist"
- "[8444](https://github.com/kubernetes/ingress-nginx/pull/8444) replace deprecated topology key in example with current one"
- "[8443](https://github.com/kubernetes/ingress-nginx/pull/8443) Add dependency review enforcement"
- "[8434](https://github.com/kubernetes/ingress-nginx/pull/8434) added new auth-tls-match-cn annotation"
- "[8426](https://github.com/kubernetes/ingress-nginx/pull/8426) Bump github.com/prometheus/common from 0.32.1 to 0.33.0"
### 4.0.18
"[8291](https://github.com/kubernetes/ingress-nginx/pull/8291) remove git tag env from cloud build"
"[8286](https://github.com/kubernetes/ingress-nginx/pull/8286) Fix OpenTelemetry sidecar image build"
"[8277](https://github.com/kubernetes/ingress-nginx/pull/8277) Add OpenSSF Best practices badge"
"[8273](https://github.com/kubernetes/ingress-nginx/pull/8273) Issue#8241"
"[8267](https://github.com/kubernetes/ingress-nginx/pull/8267) Add fsGroup value to admission-webhooks/job-patch charts"
"[8262](https://github.com/kubernetes/ingress-nginx/pull/8262) Updated confusing error"
"[8256](https://github.com/kubernetes/ingress-nginx/pull/8256) fix: deny locations with invalid auth-url annotation"
"[8253](https://github.com/kubernetes/ingress-nginx/pull/8253) Add a certificate info metric"
"[8236](https://github.com/kubernetes/ingress-nginx/pull/8236) webhook: remove useless code."
"[8227](https://github.com/kubernetes/ingress-nginx/pull/8227) Update libraries in webhook image"
"[8225](https://github.com/kubernetes/ingress-nginx/pull/8225) fix inconsistent-label-cardinality for prometheus metrics: nginx_ingress_controller_requests"
"[8221](https://github.com/kubernetes/ingress-nginx/pull/8221) Do not validate ingresses with unknown ingress class in admission webhook endpoint"
"[8210](https://github.com/kubernetes/ingress-nginx/pull/8210) Bump github.com/prometheus/client_golang from 1.11.0 to 1.12.1"
"[8209](https://github.com/kubernetes/ingress-nginx/pull/8209) Bump google.golang.org/grpc from 1.43.0 to 1.44.0"
"[8204](https://github.com/kubernetes/ingress-nginx/pull/8204) Add Artifact Hub lint"
"[8203](https://github.com/kubernetes/ingress-nginx/pull/8203) Fix Indentation of example and link to cert-manager tutorial"
"[8201](https://github.com/kubernetes/ingress-nginx/pull/8201) feat(metrics): add path and method labels to requests countera"
"[8199](https://github.com/kubernetes/ingress-nginx/pull/8199) use functional options to reduce number of methods creating an EchoDeployment"
"[8196](https://github.com/kubernetes/ingress-nginx/pull/8196) docs: fix inconsistent controller annotation"
"[8191](https://github.com/kubernetes/ingress-nginx/pull/8191) Using Go install for misspell"
"[8186](https://github.com/kubernetes/ingress-nginx/pull/8186) prometheus+grafana using servicemonitor"
"[8185](https://github.com/kubernetes/ingress-nginx/pull/8185) Append elements on match, instead of removing for cors-annotations"
"[8179](https://github.com/kubernetes/ingress-nginx/pull/8179) Bump github.com/opencontainers/runc from 1.0.3 to 1.1.0"
"[8173](https://github.com/kubernetes/ingress-nginx/pull/8173) Adding annotations to the controller service account"
"[8163](https://github.com/kubernetes/ingress-nginx/pull/8163) Update the $req_id placeholder description"
"[8162](https://github.com/kubernetes/ingress-nginx/pull/8162) Versioned static manifests"
"[8159](https://github.com/kubernetes/ingress-nginx/pull/8159) Adding some geoip variables and default values"
"[8155](https://github.com/kubernetes/ingress-nginx/pull/8155) #7271 feat: avoid-pdb-creation-when-default-backend-disabled-and-replicas-gt-1"
"[8151](https://github.com/kubernetes/ingress-nginx/pull/8151) Automatically generate helm docs"
"[8143](https://github.com/kubernetes/ingress-nginx/pull/8143) Allow to configure delay before controller exits"
"[8136](https://github.com/kubernetes/ingress-nginx/pull/8136) add ingressClass option to helm chart - back compatibility with ingress.class annotations"
"[8126](https://github.com/kubernetes/ingress-nginx/pull/8126) Example for JWT"
- "[8291](https://github.com/kubernetes/ingress-nginx/pull/8291) remove git tag env from cloud build"
- "[8286](https://github.com/kubernetes/ingress-nginx/pull/8286) Fix OpenTelemetry sidecar image build"
- "[8277](https://github.com/kubernetes/ingress-nginx/pull/8277) Add OpenSSF Best practices badge"
- "[8273](https://github.com/kubernetes/ingress-nginx/pull/8273) Issue#8241"
- "[8267](https://github.com/kubernetes/ingress-nginx/pull/8267) Add fsGroup value to admission-webhooks/job-patch charts"
- "[8262](https://github.com/kubernetes/ingress-nginx/pull/8262) Updated confusing error"
- "[8256](https://github.com/kubernetes/ingress-nginx/pull/8256) fix: deny locations with invalid auth-url annotation"
- "[8253](https://github.com/kubernetes/ingress-nginx/pull/8253) Add a certificate info metric"
- "[8236](https://github.com/kubernetes/ingress-nginx/pull/8236) webhook: remove useless code."
- "[8227](https://github.com/kubernetes/ingress-nginx/pull/8227) Update libraries in webhook image"
- "[8225](https://github.com/kubernetes/ingress-nginx/pull/8225) fix inconsistent-label-cardinality for prometheus metrics: nginx_ingress_controller_requests"
- "[8221](https://github.com/kubernetes/ingress-nginx/pull/8221) Do not validate ingresses with unknown ingress class in admission webhook endpoint"
- "[8210](https://github.com/kubernetes/ingress-nginx/pull/8210) Bump github.com/prometheus/client_golang from 1.11.0 to 1.12.1"
- "[8209](https://github.com/kubernetes/ingress-nginx/pull/8209) Bump google.golang.org/grpc from 1.43.0 to 1.44.0"
- "[8204](https://github.com/kubernetes/ingress-nginx/pull/8204) Add Artifact Hub lint"
- "[8203](https://github.com/kubernetes/ingress-nginx/pull/8203) Fix Indentation of example and link to cert-manager tutorial"
- "[8201](https://github.com/kubernetes/ingress-nginx/pull/8201) feat(metrics): add path and method labels to requests countera"
- "[8199](https://github.com/kubernetes/ingress-nginx/pull/8199) use functional options to reduce number of methods creating an EchoDeployment"
- "[8196](https://github.com/kubernetes/ingress-nginx/pull/8196) docs: fix inconsistent controller annotation"
- "[8191](https://github.com/kubernetes/ingress-nginx/pull/8191) Using Go install for misspell"
- "[8186](https://github.com/kubernetes/ingress-nginx/pull/8186) prometheus+grafana using servicemonitor"
- "[8185](https://github.com/kubernetes/ingress-nginx/pull/8185) Append elements on match, instead of removing for cors-annotations"
- "[8179](https://github.com/kubernetes/ingress-nginx/pull/8179) Bump github.com/opencontainers/runc from 1.0.3 to 1.1.0"
- "[8173](https://github.com/kubernetes/ingress-nginx/pull/8173) Adding annotations to the controller service account"
- "[8163](https://github.com/kubernetes/ingress-nginx/pull/8163) Update the $req_id placeholder description"
- "[8162](https://github.com/kubernetes/ingress-nginx/pull/8162) Versioned static manifests"
- "[8159](https://github.com/kubernetes/ingress-nginx/pull/8159) Adding some geoip variables and default values"
- "[8155](https://github.com/kubernetes/ingress-nginx/pull/8155) #7271 feat: avoid-pdb-creation-when-default-backend-disabled-and-replicas-gt-1"
- "[8151](https://github.com/kubernetes/ingress-nginx/pull/8151) Automatically generate helm docs"
- "[8143](https://github.com/kubernetes/ingress-nginx/pull/8143) Allow to configure delay before controller exits"
- "[8136](https://github.com/kubernetes/ingress-nginx/pull/8136) add ingressClass option to helm chart - back compatibility with ingress.class annotations"
- "[8126](https://github.com/kubernetes/ingress-nginx/pull/8126) Example for JWT"
### 4.0.15
@@ -44,7 +129,7 @@ This file documents all notable changes to [ingress-nginx](https://github.com/ku
- [8118] https://github.com/kubernetes/ingress-nginx/pull/8118 Remove deprecated libraries, update other libs
- [8117] https://github.com/kubernetes/ingress-nginx/pull/8117 Fix codegen errors
- [8115] https://github.com/kubernetes/ingress-nginx/pull/8115 chart/ghaction: set the correct permission to have access to push a release
- [8098] https://github.com/kubernetes/ingress-nginx/pull/8098 generating SHA for CA only certs in backend_ssl.go + comparision of P…
- [8098] https://github.com/kubernetes/ingress-nginx/pull/8098 generating SHA for CA only certs in backend_ssl.go + comparison of P…
- [8088] https://github.com/kubernetes/ingress-nginx/pull/8088 Fix Edit this page link to use main branch
- [8072] https://github.com/kubernetes/ingress-nginx/pull/8072 Expose GeoIP2 Continent code as variable
- [8061] https://github.com/kubernetes/ingress-nginx/pull/8061 docs(charts): using helm-docs for chart
@@ -54,7 +139,7 @@ This file documents all notable changes to [ingress-nginx](https://github.com/ku
- [8046] https://github.com/kubernetes/ingress-nginx/pull/8046 Report expired certificates (#8045)
- [8044] https://github.com/kubernetes/ingress-nginx/pull/8044 remove G109 check till gosec resolves issues
- [8042] https://github.com/kubernetes/ingress-nginx/pull/8042 docs_multiple_instances_one_cluster_ticket_7543
- [8041] https://github.com/kubernetes/ingress-nginx/pull/8041 docs: fix typo'd executible name
- [8041] https://github.com/kubernetes/ingress-nginx/pull/8041 docs: fix typo'd executable name
- [8035] https://github.com/kubernetes/ingress-nginx/pull/8035 Comment busy owners
- [8029] https://github.com/kubernetes/ingress-nginx/pull/8029 Add stream-snippet as a ConfigMap and Annotation option
- [8023] https://github.com/kubernetes/ingress-nginx/pull/8023 fix nginx compilation flags
@@ -71,7 +156,7 @@ This file documents all notable changes to [ingress-nginx](https://github.com/ku
- [7996] https://github.com/kubernetes/ingress-nginx/pull/7996 doc: improvement
- [7983] https://github.com/kubernetes/ingress-nginx/pull/7983 Fix a couple of misspellings in the annotations documentation.
- [7979] https://github.com/kubernetes/ingress-nginx/pull/7979 allow set annotations for admission Jobs
- [7977] https://github.com/kubernetes/ingress-nginx/pull/7977 Add ssl_reject_handshake to defaul server
- [7977] https://github.com/kubernetes/ingress-nginx/pull/7977 Add ssl_reject_handshake to default server
- [7975] https://github.com/kubernetes/ingress-nginx/pull/7975 add legacy version update v0.50.0 to main changelog
- [7972] https://github.com/kubernetes/ingress-nginx/pull/7972 updated service upstream definition
@@ -119,11 +204,11 @@ This file documents all notable changes to [ingress-nginx](https://github.com/ku
- [7707] https://github.com/kubernetes/ingress-nginx/pull/7707 Release v1.0.2 of ingress-nginx
### 4.0.2
### 4.0.2
- [7681] https://github.com/kubernetes/ingress-nginx/pull/7681 Release v1.0.1 of ingress-nginx
### 4.0.1
### 4.0.1
- [7535] https://github.com/kubernetes/ingress-nginx/pull/7535 Release v1.0.0 ingress-nginx

View File

@@ -1,22 +1,14 @@
annotations:
artifacthub.io/changes: |
- "[8459](https://github.com/kubernetes/ingress-nginx/pull/8459) Update default allowed CORS headers"
- "[8202](https://github.com/kubernetes/ingress-nginx/pull/8202) disable modsecurity on error page"
- "[8178](https://github.com/kubernetes/ingress-nginx/pull/8178) Add header Host into mirror annotations"
- "[8213](https://github.com/kubernetes/ingress-nginx/pull/8213) feat: always set auth cookie"
- "[8548](https://github.com/kubernetes/ingress-nginx/pull/8548) Implement reporting status classes in metrics"
- "[8612](https://github.com/kubernetes/ingress-nginx/pull/8612) move so files under /etc/nginx/modules"
- "[8624](https://github.com/kubernetes/ingress-nginx/pull/8624) Add patch to remove root and alias directives"
- "[8623](https://github.com/kubernetes/ingress-nginx/pull/8623) Improve path rule"
- "Update Ingress-Nginx version controller-v1.9.1"
artifacthub.io/prerelease: "false"
apiVersion: v2
appVersion: 1.2.1
appVersion: latest
keywords:
- ingress
- nginx
kubeVersion: '>=1.19.0-0'
kubeVersion: '>=1.20.0-0'
name: open-appsec-k8s-nginx-ingress
sources:
- https://github.com/kubernetes/ingress-nginx
type: application
version: 4.1.4
version: 4.8.1

View File

@@ -2,16 +2,15 @@
[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer
![Version: 4.0.19](https://img.shields.io/badge/Version-4.0.19-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.1.3](https://img.shields.io/badge/AppVersion-1.1.3-informational?style=flat-square)
![Version: 4.8.1](https://img.shields.io/badge/Version-4.8.1-informational?style=flat-square) ![AppVersion: 1.9.1](https://img.shields.io/badge/AppVersion-1.9.1-informational?style=flat-square)
To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources.
This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
## Requirements
- Chart version 3.x.x: Kubernetes v1.16+
- Chart version 4.x.x and above: Kubernetes v1.19+
Kubernetes: `>=1.20.0-0`
## Get Repo Info
@@ -52,10 +51,6 @@ helm upgrade [RELEASE_NAME] [CHART] --install
_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
### Upgrading With Zero Downtime in Production
By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8).
### Migrating from stable/nginx-ingress
There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart:
@@ -66,7 +61,6 @@ There are two main ways to migrate a release from `stable/nginx-ingress` to `ing
1. Redirect your DNS traffic from the old controller to the new controller
1. Log traffic from both controllers during this changeover
1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it
1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production)
Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts.
@@ -85,14 +79,14 @@ else it would make it impossible to evacuate a node. See [gh issue #7127](https:
### Prometheus Metrics
The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`.
The Ingress-Nginx Controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`.
You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`.
Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`)
### ingress-nginx nginx\_status page/stats server
Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller:
Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in Ingress-Nginx Controller:
- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed
- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost.
@@ -100,7 +94,7 @@ Previous versions of this chart had a `controller.stats.*` configuration block,
### ExternalDNS Service Configuration
Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service:
Add an [ExternalDNS](https://github.com/kubernetes-sigs/external-dns) annotation to the LoadBalancer service:
```yaml
controller:
@@ -111,7 +105,7 @@ controller:
### AWS L7 ELB with SSL Termination
Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/main/deploy/aws/l7/service-l7.yaml):
Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/ab3a789caae65eec4ad6e3b46b19750b481b6bce/deploy/aws/l7/service-l7.yaml):
```yaml
controller:
@@ -126,19 +120,6 @@ controller:
service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600'
```
### AWS route53-mapper
To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/tree/master/addons/route53-mapper), add the `domainName` annotation and `dns` label:
```yaml
controller:
service:
labels:
dns: "route53"
annotations:
domainName: "kubernetes-example.com"
```
### Additional Internal Load Balancer
This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application.
@@ -162,8 +143,10 @@ controller:
internal:
enabled: true
annotations:
# Create internal ELB
service.beta.kubernetes.io/aws-load-balancer-internal: "true"
# Create internal NLB
service.beta.kubernetes.io/aws-load-balancer-scheme: "internal"
# Create internal ELB(Deprecated)
# service.beta.kubernetes.io/aws-load-balancer-internal: "true"
# Any other annotation can be declared here.
```
@@ -175,7 +158,7 @@ controller:
internal:
enabled: true
annotations:
# Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing
# Create internal LB. More information: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing
# For GKE versions 1.17 and later
networking.gke.io/load-balancer-type: "Internal"
# For earlier versions
@@ -206,17 +189,34 @@ controller:
# Any other annotation can be declared here.
```
The load balancer annotations of more cloud service providers can be found: [Internal load balancer](https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer).
An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object.
Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`.
### Ingress Admission Webhooks
With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster.
With nginx-ingress-controller version 0.25+, the Ingress-Nginx Controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster.
**This feature is enabled by default since 0.31.0.**
With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521)
#### How the Chart Configures the Hooks
A validating and configuration requires the endpoint to which the request is sent to use TLS. It is possible to set up custom certificates to do this, but in most cases, a self-signed certificate is enough. The setup of this component requires some more complex orchestration when using helm. The steps are created to be idempotent and to allow turning the feature on and off without running into helm quirks.
1. A pre-install hook provisions a certificate into the same namespace using a format compatible with provisioning using end user certificates. If the certificate already exists, the hook exits.
2. The Ingress-Nginx Controller pod is configured to use a TLS proxy container, which will load that certificate.
3. Validating and Mutating webhook configurations are created in the cluster.
4. A post-install hook reads the CA from the secret created by step 1 and patches the Validating and Mutating webhook configurations. This process will allow a custom CA provisioned by some other process to also be patched into the webhook configurations. The chosen failure policy is also patched into the webhook configurations
#### Alternatives
It should be possible to use [cert-manager/cert-manager](https://github.com/cert-manager/cert-manager) if a more complete solution is required.
You can enable automatic self-signed TLS certificate provisioning via cert-manager by setting the `controller.admissionWebhooks.certManager.enabled` value to true.
Please ensure that cert-manager is correctly installed and configured.
### Helm Error When Upgrading: spec.clusterIP: Invalid value: ""
If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this:
@@ -229,10 +229,6 @@ Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13
As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered.
## Requirements
Kubernetes: `>=1.19.0-0`
## Values
| Key | Type | Default | Description |
@@ -240,38 +236,46 @@ Kubernetes: `>=1.19.0-0`
| commonLabels | object | `{}` | |
| controller.addHeaders | object | `{}` | Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers |
| controller.admissionWebhooks.annotations | object | `{}` | |
| controller.admissionWebhooks.certManager.admissionCert.duration | string | `""` | |
| controller.admissionWebhooks.certManager.enabled | bool | `false` | |
| controller.admissionWebhooks.certManager.rootCert.duration | string | `""` | |
| controller.admissionWebhooks.certificate | string | `"/usr/local/certificates/cert"` | |
| controller.admissionWebhooks.createSecretJob.resources | object | `{}` | |
| controller.admissionWebhooks.createSecretJob.securityContext.allowPrivilegeEscalation | bool | `false` | |
| controller.admissionWebhooks.enabled | bool | `true` | |
| controller.admissionWebhooks.existingPsp | string | `""` | Use an existing PSP instead of creating one |
| controller.admissionWebhooks.failurePolicy | string | `"Fail"` | |
| controller.admissionWebhooks.extraEnvs | list | `[]` | Additional environment variables to set |
| controller.admissionWebhooks.failurePolicy | string | `"Fail"` | Admission Webhook failure policy to use |
| controller.admissionWebhooks.key | string | `"/usr/local/certificates/key"` | |
| controller.admissionWebhooks.labels | object | `{}` | Labels to be added to admission webhooks |
| controller.admissionWebhooks.namespaceSelector | object | `{}` | |
| controller.admissionWebhooks.objectSelector | object | `{}` | |
| controller.admissionWebhooks.patch.enabled | bool | `true` | |
| controller.admissionWebhooks.patch.fsGroup | int | `2000` | |
| controller.admissionWebhooks.patch.image.digest | string | `"sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660"` | |
| controller.admissionWebhooks.patch.image.digest | string | `"sha256:543c40fd093964bc9ab509d3e791f9989963021f1e9e4c9c7b6700b02bfb227b"` | |
| controller.admissionWebhooks.patch.image.image | string | `"ingress-nginx/kube-webhook-certgen"` | |
| controller.admissionWebhooks.patch.image.pullPolicy | string | `"IfNotPresent"` | |
| controller.admissionWebhooks.patch.image.registry | string | `"k8s.gcr.io"` | |
| controller.admissionWebhooks.patch.image.tag | string | `"v1.1.1"` | |
| controller.admissionWebhooks.patch.image.registry | string | `"registry.k8s.io"` | |
| controller.admissionWebhooks.patch.image.tag | string | `"v20230407"` | |
| controller.admissionWebhooks.patch.labels | object | `{}` | Labels to be added to patch job resources |
| controller.admissionWebhooks.patch.nodeSelector."kubernetes.io/os" | string | `"linux"` | |
| controller.admissionWebhooks.patch.podAnnotations | object | `{}` | |
| controller.admissionWebhooks.patch.priorityClassName | string | `""` | Provide a priority class name to the webhook patching job |
| controller.admissionWebhooks.patch.runAsUser | int | `2000` | |
| controller.admissionWebhooks.patch.priorityClassName | string | `""` | Provide a priority class name to the webhook patching job # |
| controller.admissionWebhooks.patch.securityContext.fsGroup | int | `2000` | |
| controller.admissionWebhooks.patch.securityContext.runAsNonRoot | bool | `true` | |
| controller.admissionWebhooks.patch.securityContext.runAsUser | int | `2000` | |
| controller.admissionWebhooks.patch.tolerations | list | `[]` | |
| controller.admissionWebhooks.patchWebhookJob.resources | object | `{}` | |
| controller.admissionWebhooks.patchWebhookJob.securityContext.allowPrivilegeEscalation | bool | `false` | |
| controller.admissionWebhooks.port | int | `8443` | |
| controller.admissionWebhooks.service.annotations | object | `{}` | |
| controller.admissionWebhooks.service.externalIPs | list | `[]` | |
| controller.admissionWebhooks.service.loadBalancerSourceRanges | list | `[]` | |
| controller.admissionWebhooks.service.servicePort | int | `443` | |
| controller.admissionWebhooks.service.type | string | `"ClusterIP"` | |
| controller.affinity | object | `{}` | Affinity and anti-affinity rules for server scheduling to nodes |
| controller.allowSnippetAnnotations | bool | `true` | This configuration defines if Ingress Controller should allow users to set their own *-snippet annotations, otherwise this is forbidden / dropped when users add those annotations. Global snippets in ConfigMap are still respected |
| controller.annotations | object | `{}` | Annotations to be added to the controller Deployment or DaemonSet |
| controller.affinity | object | `{}` | Affinity and anti-affinity rules for server scheduling to nodes # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity # |
| controller.allowSnippetAnnotations | bool | `false` | This configuration defines if Ingress Controller should allow users to set their own *-snippet annotations, otherwise this is forbidden / dropped when users add those annotations. Global snippets in ConfigMap are still respected |
| controller.annotations | object | `{}` | Annotations to be added to the controller Deployment or DaemonSet # |
| controller.autoscaling.annotations | object | `{}` | |
| controller.autoscaling.behavior | object | `{}` | |
| controller.autoscaling.enabled | bool | `false` | |
| controller.autoscaling.maxReplicas | int | `11` | |
@@ -288,30 +292,35 @@ Kubernetes: `>=1.19.0-0`
| controller.customTemplate.configMapName | string | `""` | |
| controller.dnsConfig | object | `{}` | Optionally customize the pod dnsConfig. |
| controller.dnsPolicy | string | `"ClusterFirst"` | Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. |
| controller.electionID | string | `"ingress-controller-leader"` | Election ID to use for status update |
| controller.enableMimalloc | bool | `true` | Enable mimalloc as a drop-in replacement for malloc. |
| controller.electionID | string | `""` | Election ID to use for status update, by default it uses the controller name combined with a suffix of 'leader' |
| controller.enableAnnotationValidations | bool | `false` | |
| controller.enableMimalloc | bool | `true` | Enable mimalloc as a drop-in replacement for malloc. # ref: https://github.com/microsoft/mimalloc # |
| controller.enableTopologyAwareRouting | bool | `false` | This configuration enables Topology Aware Routing feature, used together with service annotation service.kubernetes.io/topology-mode="auto" Defaults to false |
| controller.existingPsp | string | `""` | Use an existing PSP instead of creating one |
| controller.extraArgs | object | `{}` | Additional command line arguments to pass to nginx-ingress-controller E.g. to specify the default SSL certificate you can use |
| controller.extraArgs | object | `{}` | Additional command line arguments to pass to Ingress-Nginx Controller E.g. to specify the default SSL certificate you can use |
| controller.extraContainers | list | `[]` | Additional containers to be added to the controller pod. See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. |
| controller.extraEnvs | list | `[]` | Additional environment variables to set |
| controller.extraInitContainers | list | `[]` | Containers, which are run before the app containers are started. |
| controller.extraModules | list | `[]` | |
| controller.extraModules | list | `[]` | Modules, which are mounted into the core nginx image. See values.yaml for a sample to add opentelemetry module |
| controller.extraVolumeMounts | list | `[]` | Additional volumeMounts to the controller main container. |
| controller.extraVolumes | list | `[]` | Additional volumes to the controller pod. |
| controller.healthCheckHost | string | `""` | Address to bind the health check endpoint. It is better to set this option to the internal node address if the ingress nginx controller is running in the `hostNetwork: true` mode. |
| controller.healthCheckHost | string | `""` | Address to bind the health check endpoint. It is better to set this option to the internal node address if the Ingress-Nginx Controller is running in the `hostNetwork: true` mode. |
| controller.healthCheckPath | string | `"/healthz"` | Path of the health check endpoint. All requests received on the port defined by the healthz-port parameter are forwarded internally to this path. |
| controller.hostAliases | list | `[]` | Optionally customize the pod hostAliases. |
| controller.hostNetwork | bool | `false` | Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 is merged |
| controller.hostPort.enabled | bool | `false` | Enable 'hostPort' or not |
| controller.hostPort.ports.http | int | `80` | 'hostPort' http port |
| controller.hostPort.ports.https | int | `443` | 'hostPort' https port |
| controller.hostname | object | `{}` | Optionally customize the pod hostname. |
| controller.image.allowPrivilegeEscalation | bool | `true` | |
| controller.image.digest | string | `"sha256:31f47c1e202b39fadecf822a9b76370bd4baed199a005b3e7d4d1455f4fd3fe2"` | |
| controller.image.chroot | bool | `false` | |
| controller.image.digest | string | `"sha256:605a737877de78969493a4b1213b21de4ee425d2926906857b98050f57a95b25"` | |
| controller.image.digestChroot | string | `"sha256:2ac744ef08850ee86ad7162451a6879f47c1a41c6a757f6b6f913c52103b8836"` | |
| controller.image.image | string | `"ingress-nginx/controller"` | |
| controller.image.pullPolicy | string | `"IfNotPresent"` | |
| controller.image.registry | string | `"k8s.gcr.io"` | |
| controller.image.registry | string | `"registry.k8s.io"` | |
| controller.image.runAsUser | int | `101` | |
| controller.image.tag | string | `"v1.1.3"` | |
| controller.image.tag | string | `"v1.9.1"` | |
| controller.ingressClass | string | `"nginx"` | For backwards compatibility with ingress.class annotation, use ingressClass. Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation |
| controller.ingressClassByName | bool | `false` | Process IngressClass per name (additionally as per spec.controller). |
| controller.ingressClassResource.controllerValue | string | `"k8s.io/ingress-nginx"` | Controller-value of the controller that is processing this ingressClass |
@@ -330,8 +339,8 @@ Kubernetes: `>=1.19.0-0`
| controller.keda.scaledObject.annotations | object | `{}` | |
| controller.keda.triggers | list | `[]` | |
| controller.kind | string | `"Deployment"` | Use a `DaemonSet` or `Deployment` |
| controller.labels | object | `{}` | Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels |
| controller.lifecycle | object | `{"preStop":{"exec":{"command":["/wait-shutdown"]}}}` | Improve connection draining when ingress controller pod is deleted using a lifecycle hook: With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds to 300, allowing the draining of connections up to five minutes. If the active connections end before that, the pod will terminate gracefully at that time. To effectively take advantage of this feature, the Configmap feature worker-shutdown-timeout new value is 240s instead of 10s. |
| controller.labels | object | `{}` | Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels # |
| controller.lifecycle | object | `{"preStop":{"exec":{"command":["/wait-shutdown"]}}}` | Improve connection draining when ingress controller pod is deleted using a lifecycle hook: With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds to 300, allowing the draining of connections up to five minutes. If the active connections end before that, the pod will terminate gracefully at that time. To effectively take advantage of this feature, the Configmap feature worker-shutdown-timeout new value is 240s instead of 10s. # |
| controller.livenessProbe.failureThreshold | int | `5` | |
| controller.livenessProbe.httpGet.path | string | `"/healthz"` | |
| controller.livenessProbe.httpGet.port | int | `10254` | |
@@ -340,14 +349,16 @@ Kubernetes: `>=1.19.0-0`
| controller.livenessProbe.periodSeconds | int | `10` | |
| controller.livenessProbe.successThreshold | int | `1` | |
| controller.livenessProbe.timeoutSeconds | int | `1` | |
| controller.maxmindLicenseKey | string | `""` | Maxmind license key to download GeoLite2 Databases. |
| controller.maxmindLicenseKey | string | `""` | Maxmind license key to download GeoLite2 Databases. # https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases |
| controller.metrics.enabled | bool | `false` | |
| controller.metrics.port | int | `10254` | |
| controller.metrics.portName | string | `"metrics"` | |
| controller.metrics.prometheusRule.additionalLabels | object | `{}` | |
| controller.metrics.prometheusRule.enabled | bool | `false` | |
| controller.metrics.prometheusRule.rules | list | `[]` | |
| controller.metrics.service.annotations | object | `{}` | |
| controller.metrics.service.externalIPs | list | `[]` | List of IP addresses at which the stats-exporter service is available |
| controller.metrics.service.externalIPs | list | `[]` | List of IP addresses at which the stats-exporter service is available # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips # |
| controller.metrics.service.labels | object | `{}` | Labels to be added to the metrics service resource |
| controller.metrics.service.loadBalancerSourceRanges | list | `[]` | |
| controller.metrics.service.servicePort | int | `10254` | |
| controller.metrics.service.type | string | `"ClusterIP"` | |
@@ -359,11 +370,16 @@ Kubernetes: `>=1.19.0-0`
| controller.metrics.serviceMonitor.relabelings | list | `[]` | |
| controller.metrics.serviceMonitor.scrapeInterval | string | `"30s"` | |
| controller.metrics.serviceMonitor.targetLabels | list | `[]` | |
| controller.minAvailable | int | `1` | |
| controller.minReadySeconds | int | `0` | `minReadySeconds` to avoid killing pods before we are ready |
| controller.minAvailable | int | `1` | Minimum available pods set in PodDisruptionBudget. Define either 'minAvailable' or 'maxUnavailable', never both. |
| controller.minReadySeconds | int | `0` | `minReadySeconds` to avoid killing pods before we are ready # |
| controller.name | string | `"controller"` | |
| controller.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for controller pod assignment |
| controller.podAnnotations | object | `{}` | Annotations to be added to controller pods |
| controller.networkPolicy.enabled | bool | `false` | Enable 'networkPolicy' or not |
| controller.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for controller pod assignment # Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ # |
| controller.opentelemetry.containerSecurityContext.allowPrivilegeEscalation | bool | `false` | |
| controller.opentelemetry.enabled | bool | `false` | |
| controller.opentelemetry.image | string | `"registry.k8s.io/ingress-nginx/opentelemetry:v20230721-3e2062ee5@sha256:13bee3f5223883d3ca62fee7309ad02d22ec00ff0d7033e3e9aca7a9f60fd472"` | |
| controller.opentelemetry.resources | object | `{}` | |
| controller.podAnnotations | object | `{}` | Annotations to be added to controller pods # |
| controller.podLabels | object | `{}` | Labels to add to the pod container metadata |
| controller.podSecurityContext | object | `{}` | Security Context policies for controller pods |
| controller.priorityClassName | string | `""` | |
@@ -380,25 +396,30 @@ Kubernetes: `>=1.19.0-0`
| controller.readinessProbe.successThreshold | int | `1` | |
| controller.readinessProbe.timeoutSeconds | int | `1` | |
| controller.replicaCount | int | `1` | |
| controller.reportNodeInternalIp | bool | `false` | Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply |
| controller.reportNodeInternalIp | bool | `false` | Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network Ingress status was blank because there is no Service exposing the Ingress-Nginx Controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply |
| controller.resources.requests.cpu | string | `"100m"` | |
| controller.resources.requests.memory | string | `"90Mi"` | |
| controller.scope.enabled | bool | `false` | Enable 'scope' or not |
| controller.scope.namespace | string | `""` | Namespace to limit the controller to; defaults to $(POD_NAMESPACE) |
| controller.scope.namespaceSelector | string | `""` | When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. |
| controller.service.annotations | object | `{}` | |
| controller.service.appProtocol | bool | `true` | If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http It allows choosing the protocol for each backend specified in the Kubernetes service. See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 Will be ignored for Kubernetes versions older than 1.20 |
| controller.service.annotations | object | `{}` | Annotations are mandatory for the load balancer to come up. Varies with the cloud service. Values passed through helm tpl engine. |
| controller.service.appProtocol | bool | `true` | If enabled is adding an appProtocol option for Kubernetes service. An appProtocol field replacing annotations that were using for setting a backend protocol. Here is an example for AWS: service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http It allows choosing the protocol for each backend specified in the Kubernetes service. See the following GitHub issue for more details about the purpose: https://github.com/kubernetes/kubernetes/issues/40244 Will be ignored for Kubernetes versions older than 1.20 # |
| controller.service.enableHttp | bool | `true` | |
| controller.service.enableHttps | bool | `true` | |
| controller.service.enabled | bool | `true` | |
| controller.service.external.enabled | bool | `true` | |
| controller.service.externalIPs | list | `[]` | List of IP addresses at which the controller services are available |
| controller.service.internal.annotations | object | `{}` | Annotations are mandatory for the load balancer to come up. Varies with the cloud service. |
| controller.service.externalIPs | list | `[]` | List of IP addresses at which the controller services are available # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips # |
| controller.service.internal.annotations | object | `{}` | Annotations are mandatory for the load balancer to come up. Varies with the cloud service. Values passed through helm tpl engine. |
| controller.service.internal.enabled | bool | `false` | Enables an additional internal load balancer (besides the external one). |
| controller.service.internal.loadBalancerIP | string | `""` | Used by cloud providers to connect the resulting internal LoadBalancer to a pre-existing static IP. Make sure to add to the service the needed annotation to specify the subnet which the static IP belongs to. For instance, `networking.gke.io/internal-load-balancer-subnet` for GCP and `service.beta.kubernetes.io/aws-load-balancer-subnets` for AWS. |
| controller.service.internal.loadBalancerSourceRanges | list | `[]` | Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. |
| controller.service.ipFamilies | list | `["IPv4"]` | List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. |
| controller.service.ipFamilyPolicy | string | `"SingleStack"` | Represents the dual-stack-ness requested or required by this Service. Possible values are SingleStack, PreferDualStack or RequireDualStack. The ipFamilies and clusterIPs fields depend on the value of this field. |
| controller.service.internal.ports | object | `{}` | Custom port mapping for internal service |
| controller.service.internal.targetPorts | object | `{}` | Custom target port mapping for internal service |
| controller.service.ipFamilies | list | `["IPv4"]` | List of IP families (e.g. IPv4, IPv6) assigned to the service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ |
| controller.service.ipFamilyPolicy | string | `"SingleStack"` | Represents the dual-stack-ness requested or required by this Service. Possible values are SingleStack, PreferDualStack or RequireDualStack. The ipFamilies and clusterIPs fields depend on the value of this field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/ |
| controller.service.labels | object | `{}` | |
| controller.service.loadBalancerClass | string | `""` | Used by cloud providers to select a load balancer implementation other than the cloud provider default. https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class |
| controller.service.loadBalancerIP | string | `""` | Used by cloud providers to connect the resulting `LoadBalancer` to a pre-existing static IP according to https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer |
| controller.service.loadBalancerSourceRanges | list | `[]` | |
| controller.service.nodePorts.http | string | `""` | |
| controller.service.nodePorts.https | string | `""` | |
@@ -409,16 +430,16 @@ Kubernetes: `>=1.19.0-0`
| controller.service.targetPorts.http | string | `"http"` | |
| controller.service.targetPorts.https | string | `"https"` | |
| controller.service.type | string | `"LoadBalancer"` | |
| controller.shareProcessNamespace | bool | `false` | This can be used for example to signal log rotation using `kill -USR1` from a sidecar. |
| controller.shareProcessNamespace | bool | `false` | |
| controller.sysctls | object | `{}` | See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls |
| controller.tcp.annotations | object | `{}` | Annotations to be added to the tcp config configmap |
| controller.tcp.configMapNamespace | string | `""` | Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) |
| controller.terminationGracePeriodSeconds | int | `300` | `terminationGracePeriodSeconds` to avoid killing pods before we are ready |
| controller.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints |
| controller.topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. |
| controller.terminationGracePeriodSeconds | int | `300` | `terminationGracePeriodSeconds` to avoid killing pods before we are ready # wait up to five minutes for the drain of connections # |
| controller.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # |
| controller.topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. # Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ # |
| controller.udp.annotations | object | `{}` | Annotations to be added to the udp config configmap |
| controller.udp.configMapNamespace | string | `""` | Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) |
| controller.updateStrategy | object | `{}` | The update strategy to apply to the Deployment or DaemonSet |
| controller.updateStrategy | object | `{}` | The update strategy to apply to the Deployment or DaemonSet # |
| controller.watchIngressWithoutClass | bool | `false` | Process Ingress objects without ingressClass annotation/ingressClassName field Overrides value for --watch-ingress-without-class flag of the controller binary Defaults to false |
| defaultBackend.affinity | object | `{}` | |
| defaultBackend.autoscaling.annotations | object | `{}` | |
@@ -427,7 +448,7 @@ Kubernetes: `>=1.19.0-0`
| defaultBackend.autoscaling.minReplicas | int | `1` | |
| defaultBackend.autoscaling.targetCPUUtilizationPercentage | int | `50` | |
| defaultBackend.autoscaling.targetMemoryUtilizationPercentage | int | `50` | |
| defaultBackend.containerSecurityContext | object | `{}` | Security Context policies for controller main container. See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls |
| defaultBackend.containerSecurityContext | object | `{}` | Security Context policies for controller main container. See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # |
| defaultBackend.enabled | bool | `false` | |
| defaultBackend.existingPsp | string | `""` | Use an existing PSP instead of creating one |
| defaultBackend.extraArgs | object | `{}` | |
@@ -438,7 +459,7 @@ Kubernetes: `>=1.19.0-0`
| defaultBackend.image.image | string | `"defaultbackend-amd64"` | |
| defaultBackend.image.pullPolicy | string | `"IfNotPresent"` | |
| defaultBackend.image.readOnlyRootFilesystem | bool | `true` | |
| defaultBackend.image.registry | string | `"k8s.gcr.io"` | |
| defaultBackend.image.registry | string | `"registry.k8s.io"` | |
| defaultBackend.image.runAsNonRoot | bool | `true` | |
| defaultBackend.image.runAsUser | int | `65534` | |
| defaultBackend.image.tag | string | `"1.5"` | |
@@ -449,11 +470,13 @@ Kubernetes: `>=1.19.0-0`
| defaultBackend.livenessProbe.successThreshold | int | `1` | |
| defaultBackend.livenessProbe.timeoutSeconds | int | `5` | |
| defaultBackend.minAvailable | int | `1` | |
| defaultBackend.minReadySeconds | int | `0` | `minReadySeconds` to avoid killing pods before we are ready # |
| defaultBackend.name | string | `"defaultbackend"` | |
| defaultBackend.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for default backend pod assignment |
| defaultBackend.podAnnotations | object | `{}` | Annotations to be added to default backend pods |
| defaultBackend.networkPolicy.enabled | bool | `false` | Enable 'networkPolicy' or not |
| defaultBackend.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for default backend pod assignment # Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ # |
| defaultBackend.podAnnotations | object | `{}` | Annotations to be added to default backend pods # |
| defaultBackend.podLabels | object | `{}` | Labels to add to the pod container metadata |
| defaultBackend.podSecurityContext | object | `{}` | Security Context policies for controller pods See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls |
| defaultBackend.podSecurityContext | object | `{}` | Security Context policies for controller pods See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for notes on enabling and using sysctls # |
| defaultBackend.port | int | `8080` | |
| defaultBackend.priorityClassName | string | `""` | |
| defaultBackend.readinessProbe.failureThreshold | int | `6` | |
@@ -464,24 +487,25 @@ Kubernetes: `>=1.19.0-0`
| defaultBackend.replicaCount | int | `1` | |
| defaultBackend.resources | object | `{}` | |
| defaultBackend.service.annotations | object | `{}` | |
| defaultBackend.service.externalIPs | list | `[]` | List of IP addresses at which the default backend service is available |
| defaultBackend.service.externalIPs | list | `[]` | List of IP addresses at which the default backend service is available # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips # |
| defaultBackend.service.loadBalancerSourceRanges | list | `[]` | |
| defaultBackend.service.servicePort | int | `80` | |
| defaultBackend.service.type | string | `"ClusterIP"` | |
| defaultBackend.serviceAccount.automountServiceAccountToken | bool | `true` | |
| defaultBackend.serviceAccount.create | bool | `true` | |
| defaultBackend.serviceAccount.name | string | `""` | |
| defaultBackend.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints |
| dhParam | string | `nil` | A base64-encoded Diffie-Hellman parameter. This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` |
| imagePullSecrets | list | `[]` | Optional array of imagePullSecrets containing private registry credentials |
| defaultBackend.tolerations | list | `[]` | Node tolerations for server scheduling to nodes with taints # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # |
| defaultBackend.updateStrategy | object | `{}` | The update strategy to apply to the Deployment or DaemonSet # |
| dhParam | string | `""` | A base64-encoded Diffie-Hellman parameter. This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` # Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param |
| imagePullSecrets | list | `[]` | Optional array of imagePullSecrets containing private registry credentials # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ |
| podSecurityPolicy.enabled | bool | `false` | |
| portNamePrefix | string | `""` | Prefix for TCP and UDP ports names in ingress controller service # Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration |
| rbac.create | bool | `true` | |
| rbac.scope | bool | `false` | |
| revisionHistoryLimit | int | `10` | Rollback limit |
| revisionHistoryLimit | int | `10` | Rollback limit # |
| serviceAccount.annotations | object | `{}` | Annotations for the controller service account |
| serviceAccount.automountServiceAccountToken | bool | `true` | |
| serviceAccount.create | bool | `true` | |
| serviceAccount.name | string | `""` | |
| tcp | object | `{}` | TCP service key:value pairs |
| udp | object | `{}` | UDP service key:value pairs |
| tcp | object | `{}` | TCP service key-value pairs # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md # |
| udp | object | `{}` | UDP service key-value pairs # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md # |

View File

@@ -7,10 +7,7 @@ To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.c
This chart bootstraps an ingress-nginx deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Chart version 3.x.x: Kubernetes v1.16+
- Chart version 4.x.x and above: Kubernetes v1.19+
{{ template "chart.requirementsSection" . }}
## Get Repo Info
@@ -51,10 +48,6 @@ helm upgrade [RELEASE_NAME] [CHART] --install
_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
### Upgrading With Zero Downtime in Production
By default the ingress-nginx controller has service interruptions whenever it's pods are restarted or redeployed. In order to fix that, see the excellent blog post by Lindsay Landry from Codecademy: [Kubernetes: Nginx and Zero Downtime in Production](https://medium.com/codecademy-engineering/kubernetes-nginx-and-zero-downtime-in-production-2c910c6a5ed8).
### Migrating from stable/nginx-ingress
There are two main ways to migrate a release from `stable/nginx-ingress` to `ingress-nginx/ingress-nginx` chart:
@@ -65,7 +58,6 @@ There are two main ways to migrate a release from `stable/nginx-ingress` to `ing
1. Redirect your DNS traffic from the old controller to the new controller
1. Log traffic from both controllers during this changeover
1. [Uninstall](#uninstall-chart) the old controller once traffic has fully drained from it
1. For details on all of these steps see [Upgrading With Zero Downtime in Production](#upgrading-with-zero-downtime-in-production)
Note that there are some different and upgraded configurations between the two charts, described by Rimas Mocevicius from JFrog in the "Upgrading to ingress-nginx Helm chart" section of [Migrating from Helm chart nginx-ingress to ingress-nginx](https://rimusz.net/migrating-to-ingress-nginx). As the `ingress-nginx/ingress-nginx` chart continues to update, you will want to check current differences by running [helm configuration](#configuration) commands on both charts.
@@ -84,14 +76,14 @@ else it would make it impossible to evacuate a node. See [gh issue #7127](https:
### Prometheus Metrics
The Nginx ingress controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`.
The Ingress-Nginx Controller can export Prometheus metrics, by setting `controller.metrics.enabled` to `true`.
You can add Prometheus annotations to the metrics service using `controller.metrics.service.annotations`.
Alternatively, if you use the Prometheus Operator, you can enable ServiceMonitor creation using `controller.metrics.serviceMonitor.enabled`. And set `controller.metrics.serviceMonitor.additionalLabels.release="prometheus"`. "release=prometheus" should match the label configured in the prometheus servicemonitor ( see `kubectl get servicemonitor prometheus-kube-prom-prometheus -oyaml -n prometheus`)
### ingress-nginx nginx\_status page/stats server
Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in nginx ingress controller:
Previous versions of this chart had a `controller.stats.*` configuration block, which is now obsolete due to the following changes in Ingress-Nginx Controller:
- In [0.16.1](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0161), the vts (virtual host traffic status) dashboard was removed
- In [0.23.0](https://github.com/kubernetes/ingress-nginx/blob/main/Changelog.md#0230), the status page at port 18080 is now a unix socket webserver only available at localhost.
@@ -99,7 +91,7 @@ Previous versions of this chart had a `controller.stats.*` configuration block,
### ExternalDNS Service Configuration
Add an [ExternalDNS](https://github.com/kubernetes-incubator/external-dns) annotation to the LoadBalancer service:
Add an [ExternalDNS](https://github.com/kubernetes-sigs/external-dns) annotation to the LoadBalancer service:
```yaml
controller:
@@ -110,7 +102,7 @@ controller:
### AWS L7 ELB with SSL Termination
Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/main/deploy/aws/l7/service-l7.yaml):
Annotate the controller as shown in the [nginx-ingress l7 patch](https://github.com/kubernetes/ingress-nginx/blob/ab3a789caae65eec4ad6e3b46b19750b481b6bce/deploy/aws/l7/service-l7.yaml):
```yaml
controller:
@@ -125,19 +117,6 @@ controller:
service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600'
```
### AWS route53-mapper
To configure the LoadBalancer service with the [route53-mapper addon](https://github.com/kubernetes/kops/tree/master/addons/route53-mapper), add the `domainName` annotation and `dns` label:
```yaml
controller:
service:
labels:
dns: "route53"
annotations:
domainName: "kubernetes-example.com"
```
### Additional Internal Load Balancer
This setup is useful when you need both external and internal load balancers but don't want to have multiple ingress controllers and multiple ingress objects per application.
@@ -161,8 +140,10 @@ controller:
internal:
enabled: true
annotations:
# Create internal ELB
service.beta.kubernetes.io/aws-load-balancer-internal: "true"
# Create internal NLB
service.beta.kubernetes.io/aws-load-balancer-scheme: "internal"
# Create internal ELB(Deprecated)
# service.beta.kubernetes.io/aws-load-balancer-internal: "true"
# Any other annotation can be declared here.
```
@@ -174,7 +155,7 @@ controller:
internal:
enabled: true
annotations:
# Create internal LB. More informations: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing
# Create internal LB. More information: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing
# For GKE versions 1.17 and later
networking.gke.io/load-balancer-type: "Internal"
# For earlier versions
@@ -205,17 +186,34 @@ controller:
# Any other annotation can be declared here.
```
The load balancer annotations of more cloud service providers can be found: [Internal load balancer](https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer).
An use case for this scenario is having a split-view DNS setup where the public zone CNAME records point to the external balancer URL while the private zone CNAME records point to the internal balancer URL. This way, you only need one ingress kubernetes object.
Optionally you can set `controller.service.loadBalancerIP` if you need a static IP for the resulting `LoadBalancer`.
### Ingress Admission Webhooks
With nginx-ingress-controller version 0.25+, the nginx ingress controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster.
With nginx-ingress-controller version 0.25+, the Ingress-Nginx Controller pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent bad ingress from being added to the cluster.
**This feature is enabled by default since 0.31.0.**
With nginx-ingress-controller in 0.25.* work only with kubernetes 1.14+, 0.26 fix [this issue](https://github.com/kubernetes/ingress-nginx/pull/4521)
#### How the Chart Configures the Hooks
A validating and configuration requires the endpoint to which the request is sent to use TLS. It is possible to set up custom certificates to do this, but in most cases, a self-signed certificate is enough. The setup of this component requires some more complex orchestration when using helm. The steps are created to be idempotent and to allow turning the feature on and off without running into helm quirks.
1. A pre-install hook provisions a certificate into the same namespace using a format compatible with provisioning using end user certificates. If the certificate already exists, the hook exits.
2. The Ingress-Nginx Controller pod is configured to use a TLS proxy container, which will load that certificate.
3. Validating and Mutating webhook configurations are created in the cluster.
4. A post-install hook reads the CA from the secret created by step 1 and patches the Validating and Mutating webhook configurations. This process will allow a custom CA provisioned by some other process to also be patched into the webhook configurations. The chosen failure policy is also patched into the webhook configurations
#### Alternatives
It should be possible to use [cert-manager/cert-manager](https://github.com/cert-manager/cert-manager) if a more complete solution is required.
You can enable automatic self-signed TLS certificate provisioning via cert-manager by setting the `controller.admissionWebhooks.certManager.enabled` value to true.
Please ensure that cert-manager is correctly installed and configured.
### Helm Error When Upgrading: spec.clusterIP: Invalid value: ""
If you are upgrading this chart from a version between 0.31.0 and 1.2.2 then you may get an error like this:
@@ -228,8 +226,4 @@ Detail of how and why are in [this issue](https://github.com/helm/charts/pull/13
As of version `1.26.0` of this chart, by simply not providing any clusterIP value, `invalid: spec.clusterIP: Invalid value: "": field is immutable` will no longer occur since `clusterIP: ""` will not be rendered.
{{ template "chart.requirementsSection" . }}
{{ template "chart.valuesSection" . }}
{{ template "helm-docs.versionFooter" . }}

View File

@@ -0,0 +1,9 @@
# Changelog
This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
### {{ .NewHelmChartVersion }}
{{ with .HelmUpdates }}
{{ range . }}* {{ . }}
{{ end }}{{ end }}
**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-{{ .PreviousHelmChartVersion }}...helm-chart-{{ .NewHelmChartVersion }}

View File

@@ -0,0 +1,13 @@
# Changelog
This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
### 4.5.2
* add lint on chart before release (#9570)
* ci: remove setup-helm step (#9404)
* feat(helm): Optionally use cert-manager instead admission patch (#9279)
* run helm release on main only and when the chart/value changes only (#9290)
* Update Ingress-Nginx version controller-v1.6.4
**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-4.4.3...helm-chart-4.5.2

View File

@@ -0,0 +1,24 @@
# Changelog
This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
### 4.5.3
* docs(helm): fix value key in readme for enabling certManager (#9640)
* Upgrade alpine 3.17.2
* Upgrade golang 1.20
* Drop testing/support for Kubernetes 1.23
* docs(helm): fix value key in readme for enabling certManager (#9640)
* Update Ingress-Nginx version controller-v1.7.0
* feat: OpenTelemetry module integration (#9062)
* canary-weight-total annotation ignored in rule backends (#9729)
* fix controller psp's volume config (#9740)
* Fix several Helm YAML issues with extraModules and extraInitContainers (#9709)
* Chart: Drop `controller.headers`, rework DH param secret. (#9659)
* Deployment/DaemonSet: Label pods using `ingress-nginx.labels`. (#9732)
* HPA: autoscaling/v2beta1 deprecated, bump apiVersion to v2 for defaultBackend (#9731)
* Fix incorrect annotation name in upstream hashing configuration (#9617)
* Update Ingress-Nginx version controller-v1.7.0
**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-4.5.2...helm-chart-4.6.0

View File

@@ -0,0 +1,11 @@
# Changelog
This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
### 4.6.1
* [helm] Support custom port configuration for internal service (#9846)
* Adding resource type to default HPA configuration to resolve issues with Terraform helm chart usage (#9803)
* Update Ingress-Nginx version controller-v1.7.1
**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-4.6.0...helm-chart-4.6.1

View File

@@ -0,0 +1,14 @@
# Changelog
This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
### 4.7.0
* helm: Fix opentelemetry module installation for daemonset (#9792)
* Update charts/* to keep project name display aligned (#9931)
* HPA: Use capabilites & align manifests. (#9521)
* PodDisruptionBudget spec logic update (#9904)
* add option for annotations in PodDisruptionBudget (#9843)
* Update Ingress-Nginx version controller-v1.8.0
**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-4.6.1...helm-chart-4.7.0

View File

@@ -0,0 +1,12 @@
# Changelog
This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
### 4.7.1
* Added a doc line to the missing helm value service.internal.loadBalancerIP (#9406)
* feat(helm): Add loadBalancerClass (#9562)
* added helmshowvalues example (#10019)
* Update Ingress-Nginx version controller-v1.8.1
**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-4.7.0...helm-chart-4.7.1

View File

@@ -0,0 +1,9 @@
# Changelog
This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
### 4.7.2
* Update Ingress-Nginx version controller-v1.8.2
**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-4.7.1...helm-chart-4.7.2

View File

@@ -0,0 +1,13 @@
# Changelog
This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
### 4.8.0-beta.0
* ci(helm): fix Helm Chart release action 422 error (#10237)
* helm: Use .Release.Namespace as default for ServiceMonitor namespace (#10249)
* [helm] configure allow to configure hostAliases (#10180)
* [helm] pass service annotations through helm tpl engine (#10084)
* Update Ingress-Nginx version controller-v1.9.0-beta.0
**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-4.7.2...helm-chart-4.8.0-beta.0

View File

@@ -0,0 +1,13 @@
# Changelog
This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
### 4.8.0
* ci(helm): fix Helm Chart release action 422 error (#10237)
* helm: Use .Release.Namespace as default for ServiceMonitor namespace (#10249)
* [helm] configure allow to configure hostAliases (#10180)
* [helm] pass service annotations through helm tpl engine (#10084)
* Update Ingress-Nginx version controller-v1.9.0
**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-4.7.2...helm-chart-4.8.0

View File

@@ -0,0 +1,9 @@
# Changelog
This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
### 4.8.1
* Update Ingress-Nginx version controller-v1.9.1
**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-4.8.0...helm-chart-4.8.1

View File

@@ -0,0 +1,6 @@
controller:
admissionWebhooks:
certManager:
enabled: true
service:
type: ClusterIP

View File

@@ -0,0 +1,8 @@
controller:
service:
type: ClusterIP
containerSecurityContext:
allowPrivilegeEscalation: false
extraModules:
- name: opentelemetry
image: busybox

View File

@@ -0,0 +1,8 @@
controller:
service:
type: ClusterIP
extraModules:
- name: opentelemetry
image: busybox
containerSecurityContext:
allowPrivilegeEscalation: false

View File

@@ -7,3 +7,9 @@ controller:
enabled: true
annotations:
service.beta.kubernetes.io/aws-load-balancer-internal: "true"
ports:
http: 443
https: 80
targetPorts:
http: 443
https: 80

View File

@@ -0,0 +1,12 @@
controller:
service:
type: ClusterIP
admissionWebhooks:
enabled: true
extraEnvs:
- name: FOO
value: foo
- name: TEST
value: test
patch:
enabled: true

View File

@@ -109,7 +109,7 @@ spec:
configmap:
type: array
items:
type: string
type: string
openapi-schema-validation:
type: object
properties:

View File

@@ -1,40 +1,40 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata :
name : sourcesidentifiers.openappsec.io
spec:
group: openappsec.io
versions:
- name: v1beta1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: array
items:
type: object
properties:
sourceIdentifier:
type: string
enum:
- headerkey
- JWTKey
- cookie
- sourceip
- x-forwarded-for
value:
type: array
items:
type: string
scope: Cluster
names:
plural: sourcesidentifiers
singular: sourcesidentifier
kind: SourcesIdentifier
shortNames:
- sourcesidentifier
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata :
name : sourcesidentifiers.openappsec.io
spec:
group: openappsec.io
versions:
- name: v1beta1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: array
items:
type: object
properties:
sourceIdentifier:
type: string
enum:
- headerkey
- JWTKey
- cookie
- sourceip
- x-forwarded-for
value:
type: array
items:
type: string
scope: Cluster
names:
plural: sourcesidentifiers
singular: sourcesidentifier
kind: SourcesIdentifier
shortNames:
- sourcesidentifier

View File

@@ -1,32 +1,32 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata :
name : trustedsources.openappsec.io
spec:
group: openappsec.io
versions:
- name: v1beta1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
minNumOfSources:
type: integer
sourcesIdentifiers:
type: array
items:
type: string
scope: Cluster
names:
plural: trustedsources
singular: trustedsource
kind: TrustedSource
shortNames:
- trustedsource
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata :
name : trustedsources.openappsec.io
spec:
group: openappsec.io
versions:
- name: v1beta1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
minNumOfSources:
type: integer
sourcesIdentifiers:
type: array
items:
type: string
scope: Cluster
names:
plural: trustedsources
singular: trustedsource
kind: TrustedSource
shortNames:
- trustedsource

View File

@@ -85,6 +85,16 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
{{- printf "%s-%s" (include "ingress-nginx.fullname" .) .Values.controller.name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Construct a unique electionID.
Users can provide an override for an explicit electionID if they want via `.Values.controller.electionID`
*/}}
{{- define "ingress-nginx.controller.electionID" -}}
{{- $defElectionID := printf "%s-leader" (include "ingress-nginx.fullname" .) -}}
{{- $electionID := default $defElectionID .Values.controller.electionID -}}
{{- print $electionID -}}
{{- end -}}
{{/*
Construct the path for the publish-service.
@@ -183,3 +193,25 @@ IngressClass parameters.
{{ toYaml .Values.controller.ingressClassResource.parameters | indent 4}}
{{ end }}
{{- end -}}
{{/*
Extra modules.
*/}}
{{- define "extraModules" -}}
- name: {{ .name }}
image: {{ .image }}
{{- if .distroless | default false }}
command: ['/init_module']
{{- else }}
command: ['sh', '-c', '/usr/local/bin/init_module.sh']
{{- end }}
{{- if .containerSecurityContext }}
securityContext: {{ .containerSecurityContext | toYaml | nindent 4 }}
{{- end }}
{{- if .resources }}
resources: {{ .resources | toYaml | nindent 4 }}
{{- end }}
volumeMounts:
- name: {{ toYaml "modules"}}
mountPath: {{ toYaml "/modules_mount"}}
{{- end -}}

View File

@@ -1,5 +1,8 @@
{{- define "ingress-nginx.params" -}}
- /nginx-ingress-controller
{{- if .Values.controller.enableAnnotationValidations }}
- --enable-annotation-validation=true
{{- end }}
{{- if .Values.defaultBackend.enabled }}
- --default-backend-service=$(POD_NAMESPACE)/{{ include "ingress-nginx.defaultBackend.fullname" . }}
{{- end }}
@@ -10,7 +13,7 @@
- --publish-service={{ template "ingress-nginx.controller.publishServicePath" . }}-internal
{{- end }}
{{- end }}
- --election-id={{ .Values.controller.electionID }}
- --election-id={{ include "ingress-nginx.controller.electionID" . }}
- --controller-class={{ .Values.controller.ingressClassResource.controllerValue }}
{{- if .Values.controller.ingressClass }}
- --ingress-class={{ .Values.controller.ingressClass }}
@@ -51,6 +54,9 @@
{{- if .Values.controller.watchIngressWithoutClass }}
- --watch-ingress-without-class=true
{{- end }}
{{- if .Values.controller.enableTopologyAwareRouting }}
- --enable-topology-aware-routing=true
{{- end }}
{{- range $key, $value := .Values.controller.extraArgs }}
{{- /* Accept keys without values or with false as value */}}
{{- if eq ($value | quote | len) 2 }}

View File

@@ -0,0 +1,63 @@
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.certManager.enabled -}}
{{- if not .Values.controller.admissionWebhooks.certManager.issuerRef -}}
# Create a selfsigned Issuer, in order to create a root CA certificate for
# signing webhook serving certificates
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {{ include "ingress-nginx.fullname" . }}-self-signed-issuer
namespace: {{ .Release.Namespace }}
spec:
selfSigned: {}
---
# Generate a CA Certificate used to sign certificates for the webhook
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ include "ingress-nginx.fullname" . }}-root-cert
namespace: {{ .Release.Namespace }}
spec:
secretName: {{ include "ingress-nginx.fullname" . }}-root-cert
duration: {{ .Values.controller.admissionWebhooks.certManager.rootCert.duration | default "43800h0m0s" | quote }}
issuerRef:
name: {{ include "ingress-nginx.fullname" . }}-self-signed-issuer
commonName: "ca.webhook.ingress-nginx"
isCA: true
subject:
organizations:
- ingress-nginx
---
# Create an Issuer that uses the above generated CA certificate to issue certs
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: {{ include "ingress-nginx.fullname" . }}-root-issuer
namespace: {{ .Release.Namespace }}
spec:
ca:
secretName: {{ include "ingress-nginx.fullname" . }}-root-cert
{{- end }}
---
# generate a server certificate for the apiservices to use
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ include "ingress-nginx.fullname" . }}-admission
namespace: {{ .Release.Namespace }}
spec:
secretName: {{ include "ingress-nginx.fullname" . }}-admission
duration: {{ .Values.controller.admissionWebhooks.certManager.admissionCert.duration | default "8760h0m0s" | quote }}
issuerRef:
{{- if .Values.controller.admissionWebhooks.certManager.issuerRef }}
{{- toYaml .Values.controller.admissionWebhooks.certManager.issuerRef | nindent 4 }}
{{- else }}
name: {{ include "ingress-nginx.fullname" . }}-root-issuer
{{- end }}
dnsNames:
- {{ include "ingress-nginx.controller.fullname" . }}-admission
- {{ include "ingress-nginx.controller.fullname" . }}-admission.{{ .Release.Namespace }}
- {{ include "ingress-nginx.controller.fullname" . }}-admission.{{ .Release.Namespace }}.svc
subject:
organizations:
- ingress-nginx-admission
{{- end -}}

View File

@@ -1,4 +1,4 @@
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled (not .Values.controller.admissionWebhooks.certManager.enabled) -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:

View File

@@ -1,8 +1,8 @@
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled (not .Values.controller.admissionWebhooks.certManager.enabled) -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "ingress-nginx.fullname" . }}-admission
name: {{ include "ingress-nginx.fullname" . }}-admission
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded

View File

@@ -1,4 +1,4 @@
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled (not .Values.controller.admissionWebhooks.certManager.enabled) -}}
apiVersion: batch/v1
kind: Job
metadata:
@@ -56,8 +56,12 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
allowPrivilegeEscalation: false
{{- if .Values.controller.admissionWebhooks.extraEnvs }}
{{- toYaml .Values.controller.admissionWebhooks.extraEnvs | nindent 12 }}
{{- end }}
{{- if .Values.controller.admissionWebhooks.createSecretJob.securityContext }}
securityContext: {{ toYaml .Values.controller.admissionWebhooks.createSecretJob.securityContext | nindent 12 }}
{{- end }}
{{- if .Values.controller.admissionWebhooks.createSecretJob.resources }}
resources: {{ toYaml .Values.controller.admissionWebhooks.createSecretJob.resources | nindent 12 }}
{{- end }}
@@ -69,8 +73,8 @@ spec:
{{- if .Values.controller.admissionWebhooks.patch.tolerations }}
tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }}
{{- end }}
{{- if .Values.controller.admissionWebhooks.patch.securityContext }}
securityContext:
runAsNonRoot: true
runAsUser: {{ .Values.controller.admissionWebhooks.patch.runAsUser }}
fsGroup: {{ .Values.controller.admissionWebhooks.patch.fsGroup }}
{{- toYaml .Values.controller.admissionWebhooks.patch.securityContext | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -1,4 +1,4 @@
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled (not .Values.controller.admissionWebhooks.certManager.enabled) -}}
apiVersion: batch/v1
kind: Job
metadata:
@@ -58,8 +58,12 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
allowPrivilegeEscalation: false
{{- if .Values.controller.admissionWebhooks.extraEnvs }}
{{- toYaml .Values.controller.admissionWebhooks.extraEnvs | nindent 12 }}
{{- end }}
{{- if .Values.controller.admissionWebhooks.patchWebhookJob.securityContext }}
securityContext: {{ toYaml .Values.controller.admissionWebhooks.patchWebhookJob.securityContext | nindent 12 }}
{{- end }}
{{- if .Values.controller.admissionWebhooks.patchWebhookJob.resources }}
resources: {{ toYaml .Values.controller.admissionWebhooks.patchWebhookJob.resources | nindent 12 }}
{{- end }}
@@ -71,8 +75,8 @@ spec:
{{- if .Values.controller.admissionWebhooks.patch.tolerations }}
tolerations: {{ toYaml .Values.controller.admissionWebhooks.patch.tolerations | nindent 8 }}
{{- end }}
{{- if .Values.controller.admissionWebhooks.patch.securityContext }}
securityContext:
runAsNonRoot: true
runAsUser: {{ .Values.controller.admissionWebhooks.patch.runAsUser }}
fsGroup: {{ .Values.controller.admissionWebhooks.patch.fsGroup }}
{{- toYaml .Values.controller.admissionWebhooks.patch.securityContext | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,26 @@
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled (not .Values.controller.admissionWebhooks.certManager.enabled) -}}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ include "ingress-nginx.fullname" . }}-admission
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
app.kubernetes.io/component: admission-webhook
{{- with .Values.controller.admissionWebhooks.patch.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
podSelector:
matchLabels:
{{- include "ingress-nginx.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: admission-webhook
policyTypes:
- Ingress
- Egress
egress:
- {}
{{- end }}

View File

@@ -1,3 +1,4 @@
{{- if (semverCompare "<1.25.0-0" .Capabilities.KubeVersion.Version) }}
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled .Values.podSecurityPolicy.enabled (empty .Values.controller.admissionWebhooks.existingPsp) -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
@@ -37,3 +38,4 @@ spec:
- secret
- downwardAPI
{{- end }}
{{- end }}

View File

@@ -1,8 +1,8 @@
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled (not .Values.controller.admissionWebhooks.certManager.enabled) -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "ingress-nginx.fullname" . }}-admission
name: {{ include "ingress-nginx.fullname" . }}-admission
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade

View File

@@ -1,4 +1,4 @@
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled (not .Values.controller.admissionWebhooks.certManager.enabled) -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:

View File

@@ -1,4 +1,4 @@
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled -}}
{{- if and .Values.controller.admissionWebhooks.enabled .Values.controller.admissionWebhooks.patch.enabled (not .Values.controller.admissionWebhooks.certManager.enabled) -}}
apiVersion: v1
kind: ServiceAccount
metadata:

View File

@@ -4,8 +4,13 @@
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
annotations:
{{- if .Values.controller.admissionWebhooks.certManager.enabled }}
certmanager.k8s.io/inject-ca-from: {{ printf "%s/%s-admission" .Release.Namespace (include "ingress-nginx.fullname" .) | quote }}
cert-manager.io/inject-ca-from: {{ printf "%s/%s-admission" .Release.Namespace (include "ingress-nginx.fullname" .) | quote }}
{{- end }}
{{- if .Values.controller.admissionWebhooks.annotations }}
annotations: {{ toYaml .Values.controller.admissionWebhooks.annotations | nindent 4 }}
{{- toYaml .Values.controller.admissionWebhooks.annotations | nindent 4 }}
{{- end }}
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}

View File

@@ -1,4 +1,5 @@
{{- if and (eq "stand-alone" .Values.appsec.mode) (eq .Values.appsec.persistence.enabled true) -}}
{{- if not (eq .Values.kind "Vanilla") -}}
{{- if and (eq "standalone" .Values.appsec.mode) (eq .Values.appsec.persistence.enabled true) -}}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
@@ -17,4 +18,5 @@ spec:
{{- else }}
storageClassName: {{ required "A storage class for learning data is required" .Values.appsec.persistence.learning.storageClass.name }}
{{- end -}}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,4 +1,4 @@
{{- if (eq .Values.controller.kind "Deployment") -}}
{{- if (and (eq .Values.kind "AppSec") .Values.appsec.persistence.enabled) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:

View File

@@ -0,0 +1,32 @@
{{- if not (eq .Values.kind "Vanilla") -}}
{{- if .Values.appsec.configMapContent }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.appsec.configMapName | default "appsec-settings-configmap" | quote }}
data:
{{- if .Values.appsec.configMapContent.crowdsec }}
CROWDSEC_ENABLED: {{ .Values.appsec.configMapContent.crowdsec.enabled | default "false" | quote }}
{{- if .Values.appsec.configMapContent.crowdsec.api }}
CROWDSEC_API_URL: {{ .Values.appsec.configMapContent.crowdsec.api.url | default "http://crowdsec-service:8080/v1/decisions/stream" }}
{{- else }}
CROWDSEC_API_URL: "http://crowdsec-service:8080/v1/decisions/stream"
{{- end }}
{{- if .Values.appsec.configMapContent.crowdsec.auth }}
CROWDSEC_AUTH_METHOD: {{ .Values.appsec.configMapContent.crowdsec.auth.method | default "apikey" }}
{{- else }}
CROWDSEC_AUTH_METHOD: "apikey"
{{- end }}
{{- if .Values.appsec.configMapContent.crowdsec.mode }}
CROWDSEC_MODE: {{ .Values.appsec.configMapContent.crowdsec.mode | default "prevent" }}
{{- else }}
CROWDSEC_MODE: "prevent"
{{- end }}
{{- if .Values.appsec.configMapContent.crowdsec.logging }}
CROWDSEC_LOGGING: {{ .Values.appsec.configMapContent.crowdsec.logging | default "enabled" }}
{{- else }}
CROWDSEC_LOGGING: "enabled"
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,12 @@
{{- if not (eq .Values.kind "Vanilla") -}}
{{ if .Values.appsec.secretContent }}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.appsec.secretName | default "appsec-settings-secret" | quote }}
data:
{{- if and .Values.appsec.secretContent.crowdsec .Values.appsec.secretContent.crowdsec.auth }}
CROWDSEC_AUTH_DATA: {{ .Values.appsec.secretContent.crowdsec.auth.data | b64enc }}
{{- end }}
{{ end }}
{{ end }}

View File

@@ -1,6 +1,15 @@
{{- if or (eq .Values.controller.kind "StatefulSet") (eq .Values.controller.kind "Both") -}}
{{- if (not (eq .Values.kind "Vanilla")) }}
{{- include "isControllerTagValid" . -}}
apiVersion: apps/v1
{{- if (eq .Values.kind "AppSec") }}
{{- if (eq .Values.controller.kind "DaemonSet") }}
kind: DaemonSet
{{- else }}
kind: Deployment
{{- end }}
{{- else if eq .Values.kind "AppSecStateful" }}
kind: StatefulSet
{{- end }}
metadata:
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
@@ -18,16 +27,25 @@ spec:
matchLabels:
{{- include "ingress-nginx.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: controller
{{- if not .Values.controller.autoscaling.enabled }}
{{- if not (or .Values.controller.autoscaling.enabled .Values.controller.keda.enabled) }}
{{- if eq .Values.kind "AppSecStateful" }}
serviceName: "open-appsec-stateful-set"
{{- end }}
{{- if or (not (eq .Values.controller.kind "DaemonSet")) (and (eq .Values.kind "AppSecStateful") (eq .Values.controller.kind "DaemonSet")) }}
replicas: {{ .Values.controller.replicaCount }}
{{- end }}
{{- end }}
revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
{{- if .Values.controller.updateStrategy }}
strategy:
{{ toYaml .Values.controller.updateStrategy | nindent 4 }}
{{- if (and (not (eq .Values.kind "AppSecStateful")) (eq .Values.controller.kind "DaemonSet")) }}
updateStrategy: {{ toYaml .Values.controller.updateStrategy | nindent 4 }}:
{{- else }}
strategy: {{ toYaml .Values.controller.updateStrategy | nindent 4 }}
{{- end }}
{{- end }}
{{- if (eq .Values.kind "AppSec") }}
minReadySeconds: {{ .Values.controller.minReadySeconds }}
{{- end }}
#minReadySeconds: {{ .Values.controller.minReadySeconds }}
template:
metadata:
{{- if .Values.controller.podAnnotations }}
@@ -37,7 +55,7 @@ spec:
{{- end }}
{{- end }}
labels:
{{- include "ingress-nginx.selectorLabels" . | nindent 8 }}
{{- include "ingress-nginx.labels" . | nindent 8 }}
app.kubernetes.io/component: controller
{{- with .Values.controller.labels }}
{{- toYaml . | nindent 8 }}
@@ -49,6 +67,9 @@ spec:
{{- if .Values.controller.dnsConfig }}
dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }}
{{- end }}
{{- if .Values.controller.hostAliases }}
hostAliases: {{ tpl (toYaml .Values.controller.hostAliases) $ | nindent 8 }}
{{- end }}
{{- if .Values.controller.hostname }}
hostname: {{ toYaml .Values.controller.hostname | nindent 8 }}
{{- end }}
@@ -79,14 +100,18 @@ spec:
- name: {{ .Values.appsec.name }}
securityContext:
{{ toYaml .Values.appsec.securityContext | nindent 12 }}
{{- $tag := .Values.appsec.image.tag }}
{{- if .Values.appsec.configMapContent.crowdsec.enabled }}
{{- $tag = "crowdsec-1.2314-rc1" }}
{{- end }}
{{- with .Values.appsec.image }}
image: "{{- if .registry }}{{ .registry }}/{{- end }}{{- if .repository }}{{ .repository }}/{{- end }}{{ .image }}{{- if .tag }}:{{ .tag }}{{- end }}{{- if (.digest) -}} @{{.digest}} {{- end }}"
{{- end }}
command:
- {{ .Values.appsec.command }}
imagePullPolicy: {{ .Values.appsec.image.pullPolicy }}
args:
{{- if (eq "stand-alone" .Values.appsec.mode) }}
args:
{{- if (eq "standalone" .Values.appsec.mode) }}
- --hybrid-mode
- --token
- cp-3fb5c718-5e39-47e6-8d5e-99b4bc5660b74b4b7fc8-5312-451d-a763-aaf7872703c0
@@ -101,9 +126,13 @@ spec:
{{- if .Values.appsec.proxy }}
- --proxy
- {{ .Values.appsec.proxy }}
{{- end }}
{{- end }}
imagePullPolicy: {{ .Values.appsec.image.pullPolicy }}
env:
- name: user_email
value: {{ .Values.appsec.userEmail }}
- name: registered_server
value: "NGINX Server"
{{- if eq .Values.appsec.playground false }}
- name: SHARED_STORAGE_HOST
value: {{ .Values.appsec.storage.name }}-svc
@@ -113,26 +142,35 @@ spec:
- name: PLAYGROUND
value: "true"
{{- end }}
envFrom:
- configMapRef:
name: {{ .Values.appsec.configMapName | default "appsec-settings-configmap" }}
- secretRef:
name: {{ .Values.appsec.secretName | default "appsec-settings-secret" }}
resources:
{{ toYaml .Values.resources | nindent 12 }}
volumeMounts:
- name: advanced-model
mountPath: /advanced-model
{{- if .Values.appsec.persistence.enabled }}
{{- if (eq .Values.appsec.persistence.enabled true) }}
- name: appsec-conf
mountPath: /etc/cp/conf
- name: appsec-data
mountPath: /etc/cp/data
{{- end }}
mountPath: /etc/cp/data
{{- end }}
- name: {{ .Values.controller.containerName }}
{{- with .Values.controller.image }}
image: "{{- if .registry }}{{ .registry }}/{{- end }}{{- if .repository }}{{ .repository }}/{{- end }}{{ .image }}{{- if .tag }}:{{ .tag }}{{- end }}{{- if (.digest) -}} @{{.digest}} {{- end }}"
{{- $tag := .Values.appsec.nginx.image.tag }}
{{- if .Values.appsec.configMapContent.crowdsec.enabled }}
{{- $tag = "1.2303.1-rc1-v1.3.0" }}
{{- end }}
{{- with .Values.appsec.nginx.image }}
image: "{{ .repository }}:{{ .tag }}"
{{- end }}
imagePullPolicy: {{ .Values.controller.image.pullPolicy }}
{{- if .Values.controller.lifecycle }}
lifecycle: {{ toYaml .Values.controller.lifecycle | nindent 12 }}
{{- end }}
args:
args:
{{- include "ingress-nginx.params" . | nindent 12 }}
securityContext: {{ include "controller.containerSecurityContext" . | nindent 12 }}
env:
@@ -150,12 +188,16 @@ spec:
{{- end }}
{{- if .Values.controller.extraEnvs }}
{{- toYaml .Values.controller.extraEnvs | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.controller.startupProbe }}
startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }}
{{- end }}
{{- if .Values.controller.livenessProbe }}
livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }}
{{- end }}
{{- if .Values.controller.readinessProbe }}
readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }}
{{- end }}
ports:
{{- range $key, $value := .Values.controller.containerPort }}
- name: {{ $key }}
@@ -166,7 +208,7 @@ spec:
{{- end }}
{{- end }}
{{- if .Values.controller.metrics.enabled }}
- name: metrics
- name: {{ .Values.controller.metrics.portName }}
containerPort: {{ .Values.controller.metrics.port }}
protocol: TCP
{{- end }}
@@ -191,11 +233,15 @@ spec:
hostPort: {{ $key }}
{{- end }}
{{- end }}
{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules) }}
{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules .Values.controller.opentelemetry.enabled) }}
volumeMounts:
{{- if .Values.controller.extraModules }}
{{- if (or .Values.controller.extraModules .Values.controller.opentelemetry.enabled) }}
- name: modules
{{ if .Values.controller.image.chroot }}
mountPath: /chroot/modules_mount
{{ else }}
mountPath: /modules_mount
{{ end }}
{{- end }}
{{- if .Values.controller.customTemplate.configMapName }}
- mountPath: /etc/nginx/template
@@ -217,21 +263,22 @@ spec:
{{- if .Values.controller.extraContainers }}
{{ toYaml .Values.controller.extraContainers | nindent 8 }}
{{- end }}
{{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules) }}
{{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules .Values.controller.opentelemetry.enabled) }}
initContainers:
{{- if .Values.controller.extraInitContainers }}
{{ toYaml .Values.controller.extraInitContainers | nindent 8 }}
{{- end }}
{{- if .Values.controller.extraModules }}
{{- range .Values.controller.extraModules }}
- name: {{ .name }}
image: {{ .image }}
command: ['sh', '-c', '/usr/local/bin/init_module.sh']
volumeMounts:
- name: modules
mountPath: /modules_mount
{{- $containerSecurityContext := .containerSecurityContext | default $.Values.controller.containerSecurityContext }}
{{- include "extraModules" (dict "name" .name "image" .image "containerSecurityContext" $containerSecurityContext) | nindent 8 }}
{{- end }}
{{- end }}
{{- if .Values.controller.opentelemetry.enabled}}
{{- $otelContainerSecurityContext := $.Values.controller.opentelemetry.containerSecurityContext | default $.Values.controller.containerSecurityContext }}
{{ $otelResources := $.Values.controller.opentelemetry.resources | default dict }}
{{- include "extraModules" (dict "name" "opentelemetry" "image" .Values.controller.opentelemetry.image "containerSecurityContext" $otelContainerSecurityContext "distroless" true "resources" $otelResources) | nindent 8}}
{{- end}}
{{- end }}
{{- if .Values.controller.hostNetwork }}
hostNetwork: {{ .Values.controller.hostNetwork }}
@@ -246,17 +293,25 @@ spec:
affinity: {{ toYaml .Values.controller.affinity | nindent 8 }}
{{- end }}
{{- if .Values.controller.topologySpreadConstraints }}
topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }}
topologySpreadConstraints: {{ tpl (toYaml .Values.controller.topologySpreadConstraints) $ | nindent 8 }}
{{- end }}
serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }}
terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }}
{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules) }}
{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules .Values.controller.opentelemetry.enabled) }}
volumes:
- name: advanced-model
configMap:
name: advanced-model-config
optional: true
{{- if .Values.controller.extraModules }}
{{- if (and (eq .Values.kind "AppSec") .Values.appsec.persistence.enabled) }}
- name: appsec-conf
persistentVolumeClaim:
claimName: {{ .Values.appsec.name }}-conf
- name: appsec-data
persistentVolumeClaim:
claimName: {{ .Values.appsec.name }}-data
{{- end }}
{{- if (or .Values.controller.extraModules .Values.controller.opentelemetry.enabled)}}
- name: modules
emptyDir: {}
{{- end }}
@@ -272,12 +327,19 @@ spec:
- name: webhook-cert
secret:
secretName: {{ include "ingress-nginx.fullname" . }}-admission
{{- if .Values.controller.admissionWebhooks.certManager.enabled }}
items:
- key: tls.crt
path: cert
- key: tls.key
path: key
{{- end }}
{{- end }}
{{- if .Values.controller.extraVolumes }}
{{ toYaml .Values.controller.extraVolumes | nindent 8 }}
{{- end }}
{{- end }}
{{- if .Values.appsec.persistence.enabled }}
{{- if (and (eq .Values.kind "AppSecStateful") .Values.appsec.persistence.enabled) }}
volumeClaimTemplates:
- metadata:
name: appsec-conf

View File

@@ -29,6 +29,13 @@ rules:
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
{{- if and .Values.controller.scope.enabled .Values.controller.scope.namespace }}
- apiGroups:
- ""
@@ -82,6 +89,14 @@ rules:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
- apiGroups:
- openappsec.io
resources:

View File

@@ -1,4 +1,4 @@
{{- if or .Values.controller.proxySetHeaders .Values.controller.headers -}}
{{- if .Values.controller.proxySetHeaders -}}
apiVersion: v1
kind: ConfigMap
metadata:
@@ -10,10 +10,5 @@ metadata:
{{- end }}
name: {{ include "ingress-nginx.fullname" . }}-custom-proxy-headers
namespace: {{ .Release.Namespace }}
data:
{{- if .Values.controller.proxySetHeaders }}
{{ toYaml .Values.controller.proxySetHeaders | indent 2 }}
{{ else if and .Values.controller.headers (not .Values.controller.proxySetHeaders) }}
{{ toYaml .Values.controller.headers | indent 2 }}
{{- end }}
data: {{ toYaml .Values.controller.proxySetHeaders | nindent 2 }}
{{- end }}

View File

@@ -17,13 +17,12 @@ data:
{{- if .Values.controller.addHeaders }}
add-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-add-headers
{{- end }}
{{- if or .Values.controller.proxySetHeaders .Values.controller.headers }}
{{- if .Values.controller.proxySetHeaders }}
proxy-set-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-proxy-headers
{{- end }}
{{- if .Values.dhParam }}
ssl-dh-param: {{ printf "%s/%s" .Release.Namespace (include "ingress-nginx.controller.fullname" .) }}
ssl-dh-param: {{ .Release.Namespace }}/{{ include "ingress-nginx.controller.fullname" . }}
{{- end }}
{{- range $key, $value := .Values.controller.config }}
{{- $key | nindent 2 }}: {{ $value | quote }}
{{- $key | nindent 2 }}: {{ $value | quote }}
{{- end }}

View File

@@ -1,4 +1,4 @@
{{- if or (eq .Values.controller.kind "DaemonSet") (eq .Values.controller.kind "Both") -}}
{{- if and (eq .Values.kind "Vanilla") (or (eq .Values.controller.kind "DaemonSet") (eq .Values.controller.kind "Both")) -}}
{{- include "isControllerTagValid" . -}}
apiVersion: apps/v1
kind: DaemonSet
@@ -33,7 +33,7 @@ spec:
{{- end }}
{{- end }}
labels:
{{- include "ingress-nginx.selectorLabels" . | nindent 8 }}
{{- include "ingress-nginx.labels" . | nindent 8 }}
app.kubernetes.io/component: controller
{{- with .Values.controller.labels }}
{{- toYaml . | nindent 8 }}
@@ -45,6 +45,9 @@ spec:
{{- if .Values.controller.dnsConfig }}
dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }}
{{- end }}
{{- if .Values.controller.hostAliases }}
hostAliases: {{ tpl (toYaml .Values.controller.hostAliases) $ | nindent 8 }}
{{- end }}
{{- if .Values.controller.hostname }}
hostname: {{ toYaml .Values.controller.hostname | nindent 8 }}
{{- end }}
@@ -53,12 +56,12 @@ spec:
imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }}
{{- end }}
{{- if .Values.controller.priorityClassName }}
priorityClassName: {{ .Values.controller.priorityClassName }}
priorityClassName: {{ .Values.controller.priorityClassName | quote }}
{{- end }}
{{- if or .Values.controller.podSecurityContext .Values.controller.sysctls }}
securityContext:
{{- end }}
{{- if .Values.controller.podSecurityContext }}
{{- if .Values.controller.podSecurityContext }}
{{- toYaml .Values.controller.podSecurityContext | nindent 8 }}
{{- end }}
{{- if .Values.controller.sysctls }}
@@ -102,8 +105,12 @@ spec:
{{- if .Values.controller.startupProbe }}
startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }}
{{- end }}
{{- if .Values.controller.livenessProbe }}
livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }}
{{- end }}
{{- if .Values.controller.readinessProbe }}
readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }}
{{- end }}
ports:
{{- range $key, $value := .Values.controller.containerPort }}
- name: {{ $key }}
@@ -114,7 +121,7 @@ spec:
{{- end }}
{{- end }}
{{- if .Values.controller.metrics.enabled }}
- name: metrics
- name: {{ .Values.controller.metrics.portName }}
containerPort: {{ .Values.controller.metrics.port }}
protocol: TCP
{{- end }}
@@ -139,11 +146,15 @@ spec:
hostPort: {{ $key }}
{{- end }}
{{- end }}
{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules) }}
{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules .Values.controller.opentelemetry.enabled) }}
volumeMounts:
{{- if .Values.controller.extraModules }}
{{- if (or .Values.controller.extraModules .Values.controller.opentelemetry.enabled) }}
- name: modules
{{ if .Values.controller.image.chroot }}
mountPath: /chroot/modules_mount
{{ else }}
mountPath: /modules_mount
{{ end }}
{{- end }}
{{- if .Values.controller.customTemplate.configMapName }}
- mountPath: /etc/nginx/template
@@ -165,20 +176,22 @@ spec:
{{- if .Values.controller.extraContainers }}
{{ toYaml .Values.controller.extraContainers | nindent 8 }}
{{- end }}
{{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules) }}
{{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules .Values.controller.opentelemetry.enabled) }}
initContainers:
{{- if .Values.controller.extraInitContainers }}
{{ toYaml .Values.controller.extraInitContainers | nindent 8 }}
{{- end }}
{{- if .Values.controller.extraModules }}
{{- range .Values.controller.extraModules }}
- name: {{ .Name }}
image: {{ .Image }}
command: ['sh', '-c', '/usr/local/bin/init_module.sh']
{{- $containerSecurityContext := .containerSecurityContext | default $.Values.controller.containerSecurityContext }}
{{- include "extraModules" (dict "name" .name "image" .image "containerSecurityContext" $containerSecurityContext) | nindent 8 }}
{{- end }}
{{- end }}
{{- if .Values.controller.opentelemetry.enabled}}
{{- $otelContainerSecurityContext := $.Values.controller.opentelemetry.containerSecurityContext | default $.Values.controller.containerSecurityContext }}
{{ $otelResources := $.Values.controller.opentelemetry.resources | default dict }}
{{- include "extraModules" (dict "name" "opentelemetry" "image" .Values.controller.opentelemetry.image "containerSecurityContext" $otelContainerSecurityContext "distroless" true "resources" $otelResources) | nindent 8}}
{{- end}}
{{- end }}
{{- if .Values.controller.hostNetwork }}
hostNetwork: {{ .Values.controller.hostNetwork }}
@@ -193,13 +206,13 @@ spec:
affinity: {{ toYaml .Values.controller.affinity | nindent 8 }}
{{- end }}
{{- if .Values.controller.topologySpreadConstraints }}
topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }}
topologySpreadConstraints: {{ tpl (toYaml .Values.controller.topologySpreadConstraints) $ | nindent 8 }}
{{- end }}
serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }}
terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }}
{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules) }}
{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules .Values.controller.opentelemetry.enabled) }}
volumes:
{{- if .Values.controller.extraModules }}
{{- if (or .Values.controller.extraModules .Values.controller.opentelemetry.enabled)}}
- name: modules
emptyDir: {}
{{- end }}
@@ -215,6 +228,13 @@ spec:
- name: webhook-cert
secret:
secretName: {{ include "ingress-nginx.fullname" . }}-admission
{{- if .Values.controller.admissionWebhooks.certManager.enabled }}
items:
- key: tls.crt
path: cert
- key: tls.key
path: key
{{- end }}
{{- end }}
{{- if .Values.controller.extraVolumes }}
{{ toYaml .Values.controller.extraVolumes | nindent 8 }}

View File

@@ -1,4 +1,4 @@
{{- if or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both") -}}
{{- if and (eq .Values.kind "Vanilla") (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}}
{{- include "isControllerTagValid" . -}}
apiVersion: apps/v1
kind: Deployment
@@ -19,13 +19,12 @@ spec:
matchLabels:
{{- include "ingress-nginx.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: controller
{{- if not .Values.controller.autoscaling.enabled }}
{{- if not (or .Values.controller.autoscaling.enabled .Values.controller.keda.enabled) }}
replicas: {{ .Values.controller.replicaCount }}
{{- end }}
revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
{{- if .Values.controller.updateStrategy }}
strategy:
{{ toYaml .Values.controller.updateStrategy | nindent 4 }}
strategy: {{ toYaml .Values.controller.updateStrategy | nindent 4 }}
{{- end }}
minReadySeconds: {{ .Values.controller.minReadySeconds }}
template:
@@ -37,7 +36,7 @@ spec:
{{- end }}
{{- end }}
labels:
{{- include "ingress-nginx.selectorLabels" . | nindent 8 }}
{{- include "ingress-nginx.labels" . | nindent 8 }}
app.kubernetes.io/component: controller
{{- with .Values.controller.labels }}
{{- toYaml . | nindent 8 }}
@@ -49,6 +48,9 @@ spec:
{{- if .Values.controller.dnsConfig }}
dnsConfig: {{ toYaml .Values.controller.dnsConfig | nindent 8 }}
{{- end }}
{{- if .Values.controller.hostAliases }}
hostAliases: {{ tpl (toYaml .Values.controller.hostAliases) $ | nindent 8 }}
{{- end }}
{{- if .Values.controller.hostname }}
hostname: {{ toYaml .Values.controller.hostname | nindent 8 }}
{{- end }}
@@ -78,7 +80,7 @@ spec:
containers:
- name: {{ .Values.controller.containerName }}
{{- with .Values.controller.image }}
image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ .image }}{{- end -}}{{- if .tag }}:{{ .tag }}{{- end -}}{{- if (.digest) -}} @{{.digest}} {{- end -}}"
image: "{{- if .repository -}}{{ .repository }}{{ else }}{{ .registry }}/{{ include "ingress-nginx.image" . }}{{- end -}}:{{ .tag }}{{ include "ingress-nginx.imageDigest" . }}"
{{- end }}
imagePullPolicy: {{ .Values.controller.image.pullPolicy }}
{{- if .Values.controller.lifecycle }}
@@ -106,8 +108,12 @@ spec:
{{- if .Values.controller.startupProbe }}
startupProbe: {{ toYaml .Values.controller.startupProbe | nindent 12 }}
{{- end }}
{{- if .Values.controller.livenessProbe }}
livenessProbe: {{ toYaml .Values.controller.livenessProbe | nindent 12 }}
{{- end }}
{{- if .Values.controller.readinessProbe }}
readinessProbe: {{ toYaml .Values.controller.readinessProbe | nindent 12 }}
{{- end }}
ports:
{{- range $key, $value := .Values.controller.containerPort }}
- name: {{ $key }}
@@ -118,7 +124,7 @@ spec:
{{- end }}
{{- end }}
{{- if .Values.controller.metrics.enabled }}
- name: metrics
- name: {{ .Values.controller.metrics.portName }}
containerPort: {{ .Values.controller.metrics.port }}
protocol: TCP
{{- end }}
@@ -143,11 +149,15 @@ spec:
hostPort: {{ $key }}
{{- end }}
{{- end }}
{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules) }}
{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraModules .Values.controller.opentelemetry.enabled) }}
volumeMounts:
{{- if .Values.controller.extraModules }}
{{- if (or .Values.controller.extraModules .Values.controller.opentelemetry.enabled) }}
- name: modules
{{ if .Values.controller.image.chroot }}
mountPath: /chroot/modules_mount
{{ else }}
mountPath: /modules_mount
{{ end }}
{{- end }}
{{- if .Values.controller.customTemplate.configMapName }}
- mountPath: /etc/nginx/template
@@ -166,51 +176,25 @@ spec:
{{- if .Values.controller.resources }}
resources: {{ toYaml .Values.controller.resources | nindent 12 }}
{{- end }}
- name: {{ .Values.appsec.name }}
securityContext:
{{ toYaml .Values.appsec.securityContext | nindent 12 }}
{{- with .Values.appsec.image }}
image: "{{- if .registry }}{{ .registry }}/{{- end }}{{- if .repository }}{{ .repository }}/{{- end }}{{ .image }}{{- if .tag }}:{{ .tag }}{{- end }}{{- if (.digest) -}} @{{.digest}} {{- end }}"
{{- end }}
args:
- --token
- {{ .Values.appsec.agentToken }}
{{- if .Values.appsec.customFog.enabled }}
- --fog
- {{ .Values.appsec.customFog.fogAddress }}
{{- end }}
{{- if .Values.appsec.proxy }}
- --proxy
- {{ .Values.appsec.proxy }}
{{- end }}
command:
- {{ .Values.appsec.command }}
imagePullPolicy: {{ .Values.appsec.image.pullPolicy }}
resources:
{{ toYaml .Values.resources | nindent 12 }}
volumeMounts:
- name: appsec-conf
mountPath: /etc/cp/conf
- name: appsec-data
mountPath: /etc/cp/data
{{- if .Values.controller.extraContainers }}
{{ toYaml .Values.controller.extraContainers | nindent 8 }}
{{- end }}
{{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules) }}
{{- if (or .Values.controller.extraInitContainers .Values.controller.extraModules .Values.controller.opentelemetry.enabled) }}
initContainers:
{{- if .Values.controller.extraInitContainers }}
{{ toYaml .Values.controller.extraInitContainers | nindent 8 }}
{{- end }}
{{- if .Values.controller.extraModules }}
{{- range .Values.controller.extraModules }}
- name: {{ .name }}
image: {{ .image }}
command: ['sh', '-c', '/usr/local/bin/init_module.sh']
volumeMounts:
- name: modules
mountPath: /modules_mount
{{- $containerSecurityContext := .containerSecurityContext | default $.Values.controller.containerSecurityContext }}
{{- include "extraModules" (dict "name" .name "image" .image "containerSecurityContext" $containerSecurityContext) | nindent 8 }}
{{- end }}
{{- end }}
{{- if .Values.controller.opentelemetry.enabled}}
{{- $otelContainerSecurityContext := $.Values.controller.opentelemetry.containerSecurityContext | default $.Values.controller.containerSecurityContext }}
{{ $otelResources := $.Values.controller.opentelemetry.resources | default dict }}
{{- include "extraModules" (dict "name" "opentelemetry" "image" .Values.controller.opentelemetry.image "containerSecurityContext" $otelContainerSecurityContext "distroless" true "resources" $otelResources) | nindent 8}}
{{- end}}
{{- end }}
{{- if .Values.controller.hostNetwork }}
hostNetwork: {{ .Values.controller.hostNetwork }}
@@ -225,19 +209,13 @@ spec:
affinity: {{ toYaml .Values.controller.affinity | nindent 8 }}
{{- end }}
{{- if .Values.controller.topologySpreadConstraints }}
topologySpreadConstraints: {{ toYaml .Values.controller.topologySpreadConstraints | nindent 8 }}
topologySpreadConstraints: {{ tpl (toYaml .Values.controller.topologySpreadConstraints) $ | nindent 8 }}
{{- end }}
serviceAccountName: {{ template "ingress-nginx.serviceAccountName" . }}
terminationGracePeriodSeconds: {{ .Values.controller.terminationGracePeriodSeconds }}
{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules .Values.controller.opentelemetry.enabled) }}
volumes:
- name: appsec-conf
persistentVolumeClaim:
claimName: {{ .Values.appsec.name }}-conf
- name: appsec-data
persistentVolumeClaim:
claimName: {{ .Values.appsec.name }}-data
{{- if (or .Values.controller.customTemplate.configMapName .Values.controller.extraVolumeMounts .Values.controller.admissionWebhooks.enabled .Values.controller.extraVolumes .Values.controller.extraModules) }}
{{- if .Values.controller.extraModules }}
{{- if (or .Values.controller.extraModules .Values.controller.opentelemetry.enabled)}}
- name: modules
emptyDir: {}
{{- end }}
@@ -253,6 +231,13 @@ spec:
- name: webhook-cert
secret:
secretName: {{ include "ingress-nginx.fullname" . }}-admission
{{- if .Values.controller.admissionWebhooks.certManager.enabled }}
items:
- key: tls.crt
path: cert
- key: tls.key
path: key
{{- end }}
{{- end }}
{{- if .Values.controller.extraVolumes }}
{{ toYaml .Values.controller.extraVolumes | nindent 8 }}

View File

@@ -1,12 +1,9 @@
{{- if and .Values.controller.autoscaling.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}}
{{- if not .Values.controller.keda.enabled }}
apiVersion: autoscaling/v2beta2
{{- if and (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) .Values.controller.autoscaling.enabled (not .Values.controller.keda.enabled) -}}
apiVersion: {{ ternary "autoscaling/v2" "autoscaling/v2beta2" (.Capabilities.APIVersions.Has "autoscaling/v2") }}
kind: HorizontalPodAutoscaler
metadata:
annotations:
{{- with .Values.controller.autoscaling.annotations }}
{{- toYaml . | trimSuffix "\n" | nindent 4 }}
annotations: {{ toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
@@ -48,5 +45,3 @@ spec:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -25,6 +25,11 @@ spec:
cooldownPeriod: {{ .Values.controller.keda.cooldownPeriod }}
minReplicaCount: {{ .Values.controller.keda.minReplicas }}
maxReplicaCount: {{ .Values.controller.keda.maxReplicas }}
{{- with .Values.controller.keda.fallback }}
fallback:
failureThreshold: {{ .failureThreshold | default 3 }}
replicas: {{ .replicas | default $.Values.controller.keda.maxReplicas }}
{{- end }}
triggers:
{{- with .Values.controller.keda.triggers }}
{{ toYaml . | indent 2 }}

View File

@@ -0,0 +1,45 @@
{{- if .Values.controller.networkPolicy.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
app.kubernetes.io/component: controller
{{- with .Values.controller.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "ingress-nginx.controller.fullname" . }}
namespace: {{ .Release.Namespace }}
spec:
podSelector:
matchLabels:
{{- include "ingress-nginx.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: controller
policyTypes:
- Ingress
- Egress
ingress:
- ports:
{{- range $key, $value := .Values.controller.containerPort }}
- protocol: TCP
port: {{ $value }}
{{- end }}
{{- if .Values.controller.metrics.enabled }}
- protocol: TCP
port: {{ .Values.controller.metrics.port }}
{{- end }}
{{- if .Values.controller.admissionWebhooks.enabled }}
- protocol: TCP
port: {{ .Values.controller.admissionWebhooks.port }}
{{- end }}
{{- range $key, $value := .Values.tcp }}
- protocol: TCP
port: {{ $key }}
{{- end }}
{{- range $key, $value := .Values.udp }}
- protocol: UDP
port: {{ $key }}
{{- end }}
egress:
- {}
{{- end }}

View File

@@ -10,10 +10,17 @@ metadata:
{{- end }}
name: {{ include "ingress-nginx.controller.fullname" . }}
namespace: {{ .Release.Namespace }}
{{- if .Values.controller.annotations }}
annotations: {{ toYaml .Values.controller.annotations | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{- include "ingress-nginx.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: controller
{{- if and .Values.controller.minAvailable (not (hasKey .Values.controller "maxUnavailable")) }}
minAvailable: {{ .Values.controller.minAvailable }}
{{- else if .Values.controller.maxUnavailable }}
maxUnavailable: {{ .Values.controller.maxUnavailable }}
{{- end }}
{{- end }}

View File

@@ -1,4 +1,4 @@
{{- if and .Values.controller.metrics.enabled .Values.controller.metrics.prometheusRule.enabled -}}
{{- if and ( .Values.controller.metrics.enabled ) ( .Values.controller.metrics.prometheusRule.enabled ) ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) -}}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:

View File

@@ -1,3 +1,4 @@
{{- if (semverCompare "<1.25.0-0" .Capabilities.KubeVersion.Version) }}
{{- if and .Values.podSecurityPolicy.enabled (empty .Values.controller.existingPsp) -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
@@ -27,9 +28,9 @@ spec:
volumes:
- 'configMap'
- 'emptyDir'
#- 'projected'
- 'projected'
- 'secret'
#- 'downwardAPI'
- 'downwardAPI'
{{- if .Values.controller.hostNetwork }}
hostNetwork: {{ .Values.controller.hostNetwork }}
{{- end }}
@@ -90,3 +91,4 @@ spec:
seLinux:
rule: 'RunAsAny'
{{- end }}
{{- end }}

View File

@@ -59,18 +59,18 @@ rules:
- list
- watch
- apiGroups:
- ""
- coordination.k8s.io
resources:
- configmaps
- leases
resourceNames:
- {{ .Values.controller.electionID }}
- {{ include "ingress-nginx.controller.electionID" . }}
verbs:
- get
- update
- apiGroups:
- ""
- coordination.k8s.io
resources:
- configmaps
- leases
verbs:
- create
- apiGroups:
@@ -80,6 +80,14 @@ rules:
verbs:
- create
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
- apiGroups:
- openappsec.io
resources:

View File

@@ -0,0 +1,15 @@
{{- if .Values.dhParam -}}
apiVersion: v1
kind: Secret
metadata:
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
app.kubernetes.io/component: controller
{{- with .Values.controller.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "ingress-nginx.controller.fullname" . }}
namespace: {{ .Release.Namespace }}
data:
dhparam.pem: {{ .Values.dhParam }}
{{- end }}

View File

@@ -4,7 +4,7 @@ kind: Service
metadata:
annotations:
{{- range $key, $value := .Values.controller.service.internal.annotations }}
{{ $key }}: {{ $value | quote }}
{{ $key }}: {{ tpl ($value | toString) $ | quote }}
{{- end }}
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
@@ -29,9 +29,9 @@ spec:
{{- $setNodePorts := (or (eq .Values.controller.service.type "NodePort") (eq .Values.controller.service.type "LoadBalancer")) }}
{{- if .Values.controller.service.enableHttp }}
- name: http
port: {{ .Values.controller.service.ports.http }}
port: {{ .Values.controller.service.internal.ports.http | default .Values.controller.service.ports.http }}
protocol: TCP
targetPort: {{ .Values.controller.service.targetPorts.http }}
targetPort: {{ .Values.controller.service.internal.targetPorts.http | default .Values.controller.service.targetPorts.http }}
{{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }}
appProtocol: http
{{- end }}
@@ -41,9 +41,9 @@ spec:
{{- end }}
{{- if .Values.controller.service.enableHttps }}
- name: https
port: {{ .Values.controller.service.ports.https }}
port: {{ .Values.controller.service.internal.ports.https | default .Values.controller.service.ports.https }}
protocol: TCP
targetPort: {{ .Values.controller.service.targetPorts.https }}
targetPort: {{ .Values.controller.service.internal.targetPorts.https | default .Values.controller.service.targetPorts.https }}
{{- if semverCompare ">=1.20" .Capabilities.KubeVersion.Version }}
appProtocol: https
{{- end }}

View File

@@ -31,10 +31,10 @@ spec:
externalTrafficPolicy: {{ .Values.controller.metrics.service.externalTrafficPolicy }}
{{- end }}
ports:
- name: metrics
- name: {{ .Values.controller.metrics.portName }}
port: {{ .Values.controller.metrics.service.servicePort }}
protocol: TCP
targetPort: metrics
targetPort: {{ .Values.controller.metrics.portName }}
{{- $setNodePorts := (or (eq .Values.controller.metrics.service.type "NodePort") (eq .Values.controller.metrics.service.type "LoadBalancer")) }}
{{- if (and $setNodePorts (not (empty .Values.controller.metrics.service.nodePort))) }}
nodePort: {{ .Values.controller.metrics.service.nodePort }}

View File

@@ -4,7 +4,7 @@ kind: Service
metadata:
annotations:
{{- range $key, $value := .Values.controller.service.annotations }}
{{ $key }}: {{ $value | quote }}
{{ $key }}: {{ tpl ($value | toString) $ | quote }}
{{- end }}
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
@@ -28,6 +28,9 @@ spec:
{{- if .Values.controller.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges: {{ toYaml .Values.controller.service.loadBalancerSourceRanges | nindent 4 }}
{{- end }}
{{- if .Values.controller.service.loadBalancerClass }}
loadBalancerClass: {{ .Values.controller.service.loadBalancerClass }}
{{- end }}
{{- if .Values.controller.service.externalTrafficPolicy }}
externalTrafficPolicy: {{ .Values.controller.service.externalTrafficPolicy }}
{{- end }}

View File

@@ -11,8 +11,7 @@ metadata:
name: {{ template "ingress-nginx.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- if .Values.serviceAccount.annotations }}
annotations:
{{ toYaml .Values.serviceAccount.annotations | indent 4 }}
annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
{{- end }}

View File

@@ -5,6 +5,8 @@ metadata:
name: {{ include "ingress-nginx.controller.fullname" . }}
{{- if .Values.controller.metrics.serviceMonitor.namespace }}
namespace: {{ .Values.controller.metrics.serviceMonitor.namespace | quote }}
{{- else }}
namespace: {{ .Release.Namespace }}
{{- end }}
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
@@ -14,7 +16,7 @@ metadata:
{{- end }}
spec:
endpoints:
- port: metrics
- port: {{ .Values.controller.metrics.portName }}
interval: {{ .Values.controller.metrics.serviceMonitor.scrapeInterval }}
{{- if .Values.controller.metrics.serviceMonitor.honorLabels }}
honorLabels: true

View File

@@ -19,6 +19,11 @@ spec:
replicas: {{ .Values.defaultBackend.replicaCount }}
{{- end }}
revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
{{- if .Values.defaultBackend.updateStrategy }}
strategy:
{{ toYaml .Values.defaultBackend.updateStrategy | nindent 4 }}
{{- end }}
minReadySeconds: {{ .Values.defaultBackend.minReadySeconds }}
template:
metadata:
{{- if .Values.defaultBackend.podAnnotations }}

View File

@@ -1,33 +1,40 @@
{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
apiVersion: {{ ternary "autoscaling/v2" "autoscaling/v2beta2" (.Capabilities.APIVersions.Has "autoscaling/v2") }}
kind: HorizontalPodAutoscaler
metadata:
{{- with .Values.defaultBackend.autoscaling.annotations }}
annotations: {{ toYaml . | nindent 4 }}
{{- end }}
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
app.kubernetes.io/component: default-backend
{{- with .Values.defaultBackend.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ template "ingress-nginx.defaultBackend.fullname" . }}
name: {{ include "ingress-nginx.defaultBackend.fullname" . }}
namespace: {{ .Release.Namespace }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ template "ingress-nginx.defaultBackend.fullname" . }}
name: {{ include "ingress-nginx.defaultBackend.fullname" . }}
minReplicas: {{ .Values.defaultBackend.autoscaling.minReplicas }}
maxReplicas: {{ .Values.defaultBackend.autoscaling.maxReplicas }}
metrics:
{{- with .Values.defaultBackend.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ . }}
{{- end }}
{{- with .Values.defaultBackend.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ . }}
{{- end }}
{{- with .Values.defaultBackend.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ . }}
{{- end }}
{{- with .Values.defaultBackend.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ . }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,25 @@
{{- if and .Values.defaultBackend.enabled .Values.defaultBackend.networkPolicy.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
{{- include "ingress-nginx.labels" . | nindent 4 }}
app.kubernetes.io/component: default-backend
{{- with .Values.defaultBackend.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "ingress-nginx.defaultBackend.fullname" . }}
namespace: {{ .Release.Namespace }}
spec:
podSelector:
matchLabels:
{{- include "ingress-nginx.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: default-backend
policyTypes:
- Ingress
- Egress
ingress:
- ports:
- protocol: TCP
port: {{ .Values.defaultBackend.port }}
{{- end }}

View File

@@ -1,3 +1,4 @@
{{- if (semverCompare "<1.25.0-0" .Capabilities.KubeVersion.Version) }}
{{- if and .Values.podSecurityPolicy.enabled .Values.defaultBackend.enabled (empty .Values.defaultBackend.existingPsp) -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
@@ -34,3 +35,4 @@ spec:
- secret
- downwardAPI
{{- end }}
{{- end }}

View File

@@ -1,10 +0,0 @@
{{- with .Values.dhParam -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "ingress-nginx.controller.fullname" $ }}
labels:
{{- include "ingress-nginx.labels" $ | nindent 4 }}
data:
dhparam.pem: {{ . }}
{{- end }}

View File

@@ -1,4 +1,5 @@
{{- if and (eq "stand-alone" .Values.appsec.mode) (eq .Values.appsec.playground false) }}
{{- if not (eq .Values.kind "Vanilla") -}}
{{- if and (eq "standalone" .Values.appsec.mode) (eq .Values.appsec.playground false) }}
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -53,7 +54,7 @@ spec:
timeoutSeconds: 10
env:
- name: APPSEC_MODE
value: {{ .Values.appsec.mode }}
value: stand-alone
- name: RP_BASEURL
value: http://{{ .Values.appsec.storage.name }}-svc/api
- name: K8S_NAMESPACE
@@ -137,3 +138,4 @@ spec:
claimName: {{ .Values.appsec.name }}-storage
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,4 +1,5 @@
{{- if and (eq "stand-alone" .Values.appsec.mode) (eq .Values.appsec.playground false) }}
{{- if not (eq .Values.kind "Vanilla") -}}
{{- if and (eq "standalone" .Values.appsec.mode) (eq .Values.appsec.playground false) }}
apiVersion: v1
kind: Service
metadata:
@@ -31,3 +32,4 @@ spec:
selector:
app: {{ .Values.appsec.storage.name }}-lbl
{{- end }}
{{- end }}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
OWNERS

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,6 @@
dependencies:
- name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 11.9.13
digest: sha256:051285066cef2799e39e2953c4abd405c36510a09e9e1bd1833a29224daffddb
generated: "2022-12-19T11:56:46.951582785-08:00"

View File

@@ -0,0 +1,19 @@
apiVersion: v2
appVersion: 1.1.0
dependencies:
- condition: postgresql.enabled
name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 11.9.13
description: The Cloud-Native Ingress and API-management
home: https://konghq.com/
icon: https://s3.amazonaws.com/downloads.kong/universe/assets/icon-kong-inc-large.png
maintainers:
- email: harry@konghq.com
name: hbagdi
- email: traines@konghq.com
name: rainest
name: open-appsec-kong
sources:
- https://github.com/Kong/charts/tree/main/charts/kong
version: 2.29.0

View File

@@ -0,0 +1,139 @@
# Frequently Asked Questions (FAQs)
Despite the title, this is more a list of common problems.
#### Kong cannot connect to a fresh Postgres install and fails to start
If Kong is reporting that it cannot connect to Postgres because of an invalid
password on a fresh install, you likely have a leftover PersistentVolume from a
previous install using the same name. You should delete your install, delete
the associated PersistentVolumeClaim, and install again.
Postgres PVCs [are not deleted when the chart install is
deleted](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-helm-chart-issues/#persistence-volumes-pvs-retained-from-previous-releases),
and will be reused by subsequent installs if still present. Since the `kong`
user password is written to disk during database initialization only, that old
user's password is expected, not the new user's.
PVC names use the pattern `data-<release name>-postgresql-<replica index>`. If
you named your install `foo` and did not increase the Postgres replica count,
you will have a single `data-foo-postgresql-0` PVC that needs to be deleted:
```
kubectl delete pvc data-foo-postgresql-0
```
If you use a workflow that frequently deletes and re-creates installs, you
should make sure to delete PVCs when you delete the release:
```
helm delete foo; kubectl delete pvc data-foo-postgresql-0
```
#### Upgrading a release fails due to missing ServiceAccount
When upgrading a release, some configuration changes result in this error:
```
Error creating: pods "releasename-kong-pre-upgrade-migrations-" is forbidden: error looking up service account releasename-kong: serviceaccount "releasename-kong" not found
```
Enabling the ingress controller or PodSecurityPolicy requires that the Kong
chart also create a ServiceAccount. When upgrading from a configuration that
previously had neither of these features enabled, the pre-upgrade-migrations
Job attempts to use this ServiceAccount before it is created. It is [not
possible to easily handle this case automatically](https://github.com/Kong/charts/pull/31).
Users encountering this issue should temporarily modify their
[pre-upgrade-migrations template](https://github.com/Kong/charts/blob/main/charts/kong/templates/migrations-pre-upgrade.yaml),
adding the following at the bottom:
```
{{ if or .Values.podSecurityPolicy.enabled (and .Values.ingressController.enabled .Values.ingressController.serviceAccount.create) -}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "kong.serviceAccountName" . }}
namespace: {{ template "kong.namespace" . }}
annotations:
"helm.sh/hook": pre-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
labels:
{{- include "kong.metaLabels" . | nindent 4 }}
{{- end -}}
```
Upgrading with this in place will create a temporary service account before
creating the actual service account. After this initial upgrade, users must
revert to the original pre-upgrade migrations template, as leaving the
temporary ServiceAccount template in place will [cause permissions issues on
subsequent upgrades](https://github.com/Kong/charts/issues/30).
#### Running "helm upgrade" fails because of old init-migrations Job
When running `helm upgrade`, the upgrade fails and Helm reports an error
similar to the following:
```
Error: UPGRADE FAILED: cannot patch "RELEASE-NAME-kong-init-migrations" with
kind Job: Job.batch "RELEASE-NAME-kong-init-migrations" is invalid ... field
is immutable
```
This occurs if a `RELEASE-NAME-kong-init-migrations` Job is left over from a
previous `helm install` or `helm upgrade`. Deleting it with
`kubectl delete job RELEASE-NAME-kong-init-migrations` will allow the upgrade
to proceed. Chart versions greater than 1.5.0 delete the job automatically.
#### DB-backed instances do not start when deployed within a service mesh
Service meshes, such as Istio and Kuma, if deployed in a mode that injects
a sidecar to Kong, don't make the mesh available to `InitContainer`s,
because the sidecar starts _after_ all `InitContainer`s finish.
By default, this chart uses init containers to ensure that the database is
online and has migrations applied before starting Kong. This provides for a
smoother startup, but isn't compatible with service mesh sidecar requirements
if Kong is to access the database through the mesh.
Setting `waitImage.enabled=false` in values.yaml disables these init containers
and resolves this issue. However, during the initial install, your Kong
Deployment will enter the CrashLoopBackOff state while waiting for migrations
to complete. It will eventually exit this state and enter Running as long as
there are no issues finishing migrations, usually within 2 minutes.
If your Deployment is stuck in CrashLoopBackoff for longer, check the init
migrations Job logs to see if it is unable to connect to the database or unable
to complete migrations for some other reason. Resolve any issues you find,
delete the release, and attempt to install again.
#### Kong fails to start after `helm upgrade` when Postgres is used
As of Kong chart 2.8, this issue is no longer present. 2.8 updates the Postgres
sub-chart to a version that checks for existing password Secrets and leaves
them as-is rather than overwriting them.
You may be running into this issue: https://github.com/helm/charts/issues/12575.
This issue is caused due to: https://github.com/helm/helm/issues/3053.
The problem that happens is that Postgres database has the old password but
the new secret has a different password, which is used by Kong, and password
based authentication fails.
The solution to the problem is to specify a password to the `postgresql` chart.
This is to ensure that the password is not generated randomly but is set to
the same one that is user-provided on each upgrade.
The Postgres chart provides [two options](https://github.com/bitnami/charts/tree/master/bitnami/postgresql#postgresql-common-parameters)
for setting a password:
- `auth.password` sets a password directly in values.yaml, in cleartext. This
is fine if you are using the instance for testing and have no security
concerns.
- `auth.existingSecret` specifies a Secret that contains [specific keys](https://github.com/bitnami/charts/blob/a6146a1ed392c8683c30b21e3fef905d86b0d2d6/bitnami/postgresql/values.yaml#L134-L143).
This should be used if you need to properly secure the Postgres instance.
If you have already upgraded, the old password is lost. You will need to
delete the Helm release and the Postgres PersistentVolumeClaim before
re-installing with a non-random password.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,805 @@
# Upgrade considerations
New versions of the Kong chart may add significant new functionality or
deprecate/entirely remove old functionality. This document covers how and why
users should update their chart configuration to take advantage of new features
or migrate away from deprecated features.
In general, breaking changes deprecate their old features before removing them
entirely. While support for the old functionality remains, the chart will show
a warning about the outdated configuration when running `helm
install/status/upgrade`.
Note that not all versions contain breaking changes. If a version is not
present in the table of contents, it requires no version-specific changes when
upgrading from a previous version.
## Table of contents
- [Upgrade considerations for all versions](#upgrade-considerations-for-all-versions)
- [2.26.0](#2260)
- [2.19.0](#2190)
- [2.13.0](#2130)
- [2.8.0](#280)
- [2.7.0](#270)
- [2.4.0](#240)
- [2.3.0](#230)
- [2.2.0](#220)
- [2.1.0](#210)
- [2.0.0](#200)
- [1.14.0](#1140)
- [1.11.0](#1110)
- [1.10.0](#1100)
- [1.9.0](#190)
- [1.6.0](#160)
- [1.5.0](#150)
- [1.4.0](#140)
- [1.3.0](#130)
## Upgrade considerations for all versions
The chart automates the
[upgrade migration process](https://github.com/Kong/kong/blob/master/UPGRADE.md).
When running `helm upgrade`, the chart spawns an initial job to run `kong
migrations up` and then spawns new Kong pods with the updated version. Once
these pods become ready, they begin processing traffic and old pods are
terminated. Once this is complete, the chart spawns another job to run `kong
migrations finish`.
If you split your Kong deployment across multiple Helm releases (to create
proxy-only and admin-only nodes, for example), you must
[set which migration jobs run based on your upgrade order](https://github.com/Kong/charts/blob/main/charts/kong/README.md#separate-admin-and-proxy-nodes).
However, this does not apply to hybrid mode, which can run both migrations but
requires [upgrading the control plane version
first](https://docs.konghq.com/gateway/latest/plan-and-deploy/hybrid-mode/#version-compatibility).
While the migrations themselves are automated, the chart does not automatically
ensure that you follow the recommended upgrade path. If you are upgrading from
more than one minor Kong version back, check the [upgrade path
recommendations for Kong open source](https://github.com/Kong/kong/blob/master/UPGRADE.md#3-suggested-upgrade-path)
or [Kong Enterprise](https://docs.konghq.com/enterprise/latest/deployment/migrations/).
Although not required, users should upgrade their chart version and Kong
version indepedently. In the even of any issues, this will help clarify whether
the issue stems from changes in Kubernetes resources or changes in Kong.
Users may encounter an error when upgrading which displays a large block of
text ending with `field is immutable`. This is typically due to a bug with the
`init-migrations` job, which was not removed automatically prior to 1.5.0.
If you encounter this error, deleting any existing `init-migrations` jobs will
clear it.
### Updates to CRDs
Helm installs CRDs at initial install but [does not update them
after](https://github.com/helm/community/blob/main/hips/hip-0011.md). Some
chart releases include updates to CRDs that must be applied to successfully
upgrade. Because Helm does not handle these updates, you must manually apply
them before upgrading your release.
``` kubectl apply -f
https://raw.githubusercontent.com/Kong/charts/kong-<version>/charts/kong/crds/custom-resource-definitions.yaml
```
For example, if your release is 2.6.4, you would apply
`https://raw.githubusercontent.com/Kong/charts/kong-2.6.4/charts/kong/crds/custom-resource-definitions.yaml`.
## 2.26.0
If you are using controller version 2.10 or lower and proxy version 3.3 or
higher in separate Deployments (such as when using the `ingress` chart), proxy
Pods will not become ready unless you override the default readiness endpoint:
```
readinessProbe:
httpGet:
path: /status
```
This section goes under the `gateway` section when using the `ingress` chart.
2.26 changes the default proxy readiness endpoint to the `/status/ready`
endpoint introduced in Kong 3.3. This endpoint reports true when Kong has
configuration available, whereas the previous `/status` endpoint returned true
immediately after start, and could result in proxy instances attempting to
serve requests before they had configuration.
The chart has logic to fall back to the older endpoint if the proxy and
controller versions do not work well with the new endpoint. However, the chart
detection cannot determine the controller version when the controller is in a
separate Deployment, and will always use the new endpoint if the Kong image
version is 3.3 or higher.
Kong recommends Kong 3.3 and higher users update to controller 2.11 at their
earliest convenience to take advantage of the improved readiness behavior.
## 2.19.0
2.19 sets a default [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
that declares a read-only root filesystem for Kong containers. The base Kong and KIC
images are compatible with this setting. The chart mounts temporary writeable
emptyDir filesystems for locations that require writeable files (`/tmp` and
`/kong_prefix/`).
This setting limit attack surface and should be compatible with most
installations. However, if you use custom plugins that write to disk, you must
either mount a writeable emptyDir for them or override the new defaults by
setting:
```
containerSecurityContext:
readOnlyRootFilesystem: false
```
in your values.yaml.
## 2.13.0
2.13.0 includes updated CRDs. You must [apply these manually](#updates-to-crds)
before upgrading an existing release.
2.13 changes the default Kong tag to 3.0 and the default KIC tag to 2.6. We
recommend that you set these versions (`image.tag` and
`ingressController.image.tag`) in your values.yaml to allow updating the chart
without also updating the container versions. If you do update to these
container image versions, you should first review the Kong 3.0 breaking changes
(see the [open
source](https://github.com/Kong/kong/blob/master/CHANGELOG.md#300) and
[Enterprise](https://docs.konghq.com/gateway/changelog/#3000) Kong changelogs)
and the [ingress controller upgrade guide for Kong
3.x](https://docs.konghq.com/kubernetes-ingress-controller/2.6.x/guides/upgrade-kong-3x).
Kong 3.0 requires KIC version 2.6 at minimum. It will not work with any
previous versions. Changes to regular expression paths in Kong 3.x furthermore
require changes to Ingresses that use regular expression paths in rules.
## 2.8.0
### IngressClass controller name change requires manual delete
2.8 updates the chart-managed IngressClass's controller name to match the
controller name used elsewhere in Kong's documenation. Controller names are
immutable, so Helm cannot actually update existing IngressClass resources.
Prior to your upgrade, you must delete the existing IngressClass. Helm will
create a new IngressClass with the new controller name during the upgrade:
```
kubectl delete ingressclass <class name, "kong" by default>
helm upgrade RELEASE_NAME kong/kong ...
```
Removing the IngressClass will not affect configuration: the controller
IngressClass implementation is still in progress, and it will still ingest
resources whose `ingress.class` annotation or `ingressClassName` value matches
the the `CONTROLLER_INGRESS_CLASS` value in the controller environment even if
no matching IngressClass exists.
### Postgres subchart version update
2.8 updates the Postgres subchart version from 8.6.8 to 11.1.15. This changes
a number of values.yaml keys and the default Postgres version. The previous
default Postgres version was [11.7.0-debian-10-r37](https://github.com/bitnami/charts/blob/590c6b0f4e07161614453b12efe71f22e0c00a46/bitnami/postgresql/values.yaml#L18).
To use the new version on an existing install, you should [follow Bitnami's
instructions for updating values.yaml keys and upgrading their chart]() as well
as [the Postgres upgrade instructions](https://www.postgresql.org/docs/current/upgrading.html).
You can alternately use the new chart without upgrading Postgres by setting
`postgresql.image.tag=11.7.0-debian-10-r37` or use the old version of the
chart. Helm documentation is unclear on whether ignoring a subchart version
change for a release is possible, so we recommend [dumping the
database](https://www.postgresql.org/docs/current/backup-dump.html) and
creating a separate release if you wish to continue using 8.6.8:
```
$ helm install my-release -f values.yaml --version 8.6.8 bitnami/postgresql
```
Afterwords, you will upgrade your Kong chart release with
`postgresql.enabled=false` and `env.pg_host` and `env.pg_password` set to the
appropriate hostname and Secret reference for your new release (these are set
automatically when the subchart is enabled, but will not be set automatically
with a separate release).
## 2.7.0
2.7 updates CRDs to the version released in KIC 2.1.0. Helm does not upgrade
CRDs automatically; you must `kubectl apply -f https://raw.githubusercontent.com/Kong/charts/kong-2.7.0/charts/kong/crds/custom-resource-definitions.yaml`
manually before upgrading.
You should not apply the updated CRDs until you are prepared to upgrade to KIC
2.1 or higher, and [must have first upgraded to 2.0](https://github.com/Kong/kubernetes-ingress-controller/blob/v2.1.1/CHANGELOG.md#breaking-changes)
and applied the [previous version of the CRDs](https://raw.githubusercontent.com/Kong/charts/kong-2.6.4/charts/kong/crds/custom-resource-definitions.yaml).
## 2.4.0
### Disable ingress controller prior to 2.x upgrade when using PostgreSQL
Chart version 2.4 is the first Kong chart version that defaults to the 2.x
series of ingress controller releases. 2.x uses a different leader election
system than 1.x. If both versions are running simultaneously, both controller
versions will attempt to interact with the admin API, potentially setting
inconsistent configuration in the database when PostgreSQL is the backend.
If you are configured with the following:
- ingressController.enabled=true
- postgresql.enabled=true
and do not override the ingress controller version, you must perform the
upgrade in multiple steps:
First, pin the controller version and upgrade to chart 2.4.0:
```console
$ helm upgrade --wait \
--set ingressController.image.tag=<CURRENT_CONTROLLER_VERSION> \
--version 2.4.0 \
--namespace <YOUR_RELEASE_NAMESPACE> \
<YOUR_RELEASE_NAME> kong/kong
```
Second, temporarily disable the ingress controller:
```console
$ helm upgrade --wait \
--set ingressController.enabled=false \
--set deployment.serviceaccount.create=true \
--version 2.4.0 \
--namespace <YOUR_RELEASE_NAMESPACE> \
<YOUR_RELEASE_NAME> kong/kong
```
Finally, re-enable the ingress controller at the new version:
```console
$ helm upgrade --wait \
--set ingressController.enabled=true \
--set ingressController.image.tag=<NEW_CONTROLLER_VERSION> \
--version 2.4.0 \
--namespace <YOUR_RELEASE_NAMESPACE> \
<YOUR_RELEASE_NAME> kong/kong
```
While the controller is disabled, changes to Kubernetes configuration (Ingress
resources, KongPlugin resources, Service Endpoints, etc.) will not update Kong
proxy configuration. We recommend you establish an active maintenance window
under which to perform this upgrade and inform users and stakeholders so as to
avoid unexpected disruption.
### Changed ServiceAccount configuration location
2.4.0 moved ServiceAccount configuration from
`ingressController.serviceAccount` to `deployment.serviceAccount` to accomodate
configurations that required a ServiceAccount but did not use the controller.
The chart now creates a ServiceAccount by default. When enabled, upgrade
migration hooks require the ServiceAccount, but Helm will not create it before
the hooks run, and the migration jobs will fail. To avoid this, first perform
an initial chart upgrade that does not update the Kong image version and sets
`migrations.preUpgrade=false` and `migrations.postUpgrade=false`. This will
create the account for future upgrades, and you can re-enable migrations and
upgrade your Kong version after.
If you disable ServiceAccount or override its name, you must move your
configuration under `deployment.serviceAccount`. The chart will warn you if it
detects non-default configuration in the original location when you upgrade.
You can use `helm upgrade --dry-run` to see if you are affected before actually
upgrading.
## 2.3.0
### Updated CRDs and CRD API version
2.3.0 adds new and updated CRDs for KIC 2.x. These CRDs are compatible with
KIC 1.x also. The CRD API version is now v1, replacing the deprecated v1beta1,
to support Kubernetes 1.22 and onward. API version v1 requires Kubernetes 1.16
and newer.
Helm 2-style CRD management will upgrade CRDs automatically. You can check to
see if you are using Helm 2-style management by running:
```
kubectl get crd kongconsumers.configuration.konghq.com -o yaml | grep "meta.helm.sh/release-name"
```
If you see output, you are using Helm 2-style CRD management.
Helm 3-style CRD management (the default) does not upgrade CRDs automatically.
You must apply the changes manually by running:
```
kubectl apply -f https://raw.githubusercontent.com/Kong/charts/kong-2.2.0/charts/kong/crds/custom-resource-definitions.yaml
```
Although not recommended, you can remain on an older Kubernetes version and not
upgrade your CRDs if you are using Helm 3-style CRD management. However, you
will not be able to run KIC 2.x, and these configurations are considered
unsupported.
### Ingress controller feature detection
2.3.0 includes some features that are enabled by default, but require KIC 2.x.
KIC 2.x is not yet the default ingress controller version because there are
currently only preview releases for it. To maintain compatibility with KIC 1.x,
the chart automatically detects the KIC image version and disables incompatible
features. This feature detection requires a semver image tag, and the chart
cannot render successfully if the image tag is not semver-compliant.
Standard KIC images do use semver-compliant tags, and you do not need to make
any configuration changes if you use one. If you use a non-semver tag, such as
`next`, you must set the new `ingressController.image.effectiveSemver` field to
your approximate semver version. For example, if your `next` tag is for an
unreleased `2.1.0` KIC version, you should set `effectiveSemver: 2.1.0`.
## 2.2.0
### Changes to pod disruption budget defaults
Prior to 2.2.0, the default values.yaml included
`podDisruptionBudget.maxUnavailable: 50%`. This prevented setting
`podDisruptionBudget.minUnavailable` at all. To allow use of
`podDisruptionBudget.minUnavailable`, we have removed the
`podDisruptionBudget.maxUnavailable` default. If you previously relied on this
default (you set `podDisruptionBudget.enabled: true` but did not set
`podDisruptionBudget.maxUnavailable`), you now must explicitly set
`podDisruptionBudget.maxUnavailable: 50%` in your values.yaml.
## 2.1.0
### Migration off Bintray
Bintray, the Docker registry previously used for several images used by this
chart, is [sunsetting May 1,
2021](https://jfrog.com/blog/into-the-sunset-bintray-jcenter-gocenter-and-chartcenter/).
The chart default `values.yaml` now uses the new Docker Hub repositories for all
affected images. You should check your release `values.yaml` files to confirm that
they do not still reference Bintray repositories. If they do, update them to
use the Docker Hub repositories now in the default `values.yaml`.
## 2.0.0
### Support for Helm 2 dropped
2.0.0 takes advantage of template functionality that is only available in Helm
3 and reworks values defaults to target Helm 3 CRD handling, and requires Helm
3 as such. If you are not already using Helm 3, you must migrate to it before
updating to 2.0.0 or later:
https://helm.sh/docs/topics/v2_v3_migration/
If desired, you can migrate your Kong chart releases without migrating charts'
releases.
### Support for deprecated 1.x features removed
Several previous 1.x chart releases reworked sections of values.yaml while
maintaining support for the older version of those settings. 2.x drops support
for the older versions of these settings entirely:
* [Portal auth settings](#removal-of-dedicated-portal-authentication-configuration-parameters)
* [The `runMigrations` setting](#changes-to-migration-job-configuration)
* [Single-stack admin API Service configuration](#changes-to-kong-service-configuration)
* [Multi-host proxy configuration](#removal-of-multi-host-proxy-ingress)
Each deprecated setting is accompanied by a warning that appears at the end of
`helm upgrade` output on a 1.x release:
```
WARNING: You are currently using legacy ...
```
If you do not see any such warnings when upgrading a release using chart
1.15.0, you are not using deprecated configuration and are ready to upgrade to
2.0.0. If you do see these warnings, follow the linked instructions to migrate
to the current settings format.
## 1.14.0
### Removal of multi-host proxy Ingress
Most of the chart's Ingress templates support a single hostname and TLS Secret.
The proxy Ingress template originally differed, and allowed multiple hostnames
and TLS configurations. As of chart 1.14.0, we have deprecated the unique proxy
Ingress configuration; it is now identical to all other Kong services. If you
do not need to configure multiple Ingress rules for your proxy, you will
change:
```yaml
ingress:
hosts: ["proxy.kong.example"]
tls:
- hosts:
- proxy.kong.example
secretName: example-tls-secret
path: /
```
to:
```yaml
ingress:
tls: example-tls-secret
hostname: proxy.kong.example
path: /
```
We plan to remove support for the multi-host configuration entirely in version
2.0 of the chart. If you currently use multiple hosts, we recommend that you
either:
- Define Ingresses for each application, e.g. if you proxy applicationA at
`foo.kong.example` and applicationB at `bar.kong.example`, you deploy those
applications with their own Ingress resources that target the proxy.
- Define a multi-host Ingress manually. Before upgrading, save your current
proxy Ingress, delete labels from the saved copy, and set
`proxy.ingress.enabled=false`. After upgrading, create your Ingress from the
saved copy and edit it directly to add new rules.
We expect that most users do not need a built-in multi-host proxy Ingress or
even a proxy Ingress at all: the old configuration predates the Kong Ingress
Controller and is most useful if you place Kong behind some other controller.
If you are interested in preserving this functionality, please [discuss your
use case with us](https://github.com/Kong/charts/issues/73). If there is
sufficient interest, we will explore options for continuing to support the
original proxy Ingress configuration format.
### Default custom server block replaced with status listen
Earlier versions of the chart included [a custom server block](https://github.com/Kong/charts/blob/kong-1.13.0/charts/kong/templates/config-custom-server-blocks.yaml)
to provide `/status` and `/metrics` endpoints. This server block simplified
RBAC-enabled Enterprise deployments by providing access to these endpoints
outside the (protected) admin API.
Current versions (Kong 1.4.0+ and Kong Enterprise 1.5.0+) have a built-in
status listen that provides the same functionality, and chart 1.14.0 uses it
for readiness/liveness probes and the Prometheus service monitor.
If you are using a version that supports the new status endpoint, you do not
need to make any changes to your values unless you include `readinessProbe` and
`livenessProbe` in them. If you do, you must change the port from `metrics` to
`status`.
If you are using an older version that does not support the status listen, you
will need to:
- Create the server block ConfigMap independent of the chart. You will need to
set the ConfigMap name and namespace manually and remove the labels block.
- Add an `extraConfigMaps` values entry for your ConfigMap.
- Set `env.nginx_http_include` to `/path/to/your/mount/servers.conf`.
- Add the [old readiness/liveness probe blocks](https://github.com/Kong/charts/blob/kong-1.13.0/charts/kong/values.yaml#L437-L458)
to your values.yaml.
- If you use the Prometheus service monitor, edit it after installing the chart
and set `targetPort` to `9542`. This cannot be set from values.yaml, but Helm
3 will preserve the change on subsequent upgrades.
## 1.11.0
### `KongCredential` custom resources no longer supported
1.11.0 updates the default Kong Ingress Controller version to 1.0. Controller
1.0 removes support for the deprecated KongCredential resource. Before
upgrading to chart 1.11.0, you must convert existing KongCredential resources
to [credential Secrets](https://github.com/Kong/kubernetes-ingress-controller/blob/next/docs/guides/using-consumer-credential-resource.md#provision-a-consumer).
Custom resource management varies depending on your exact chart configuration.
By default, Helm 3 only creates CRDs in the `crds` directory if they are not
already present, and does not modify or remove them after. If you use this
management method, you should create a manifest file that contains [only the
KongCredential CRD](https://github.com/Kong/charts/blob/kong-1.10.0/charts/kong/crds/custom-resource-definitions.yaml#L35-L68)
and then [delete it](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#delete-a-customresourcedefinition).
Helm 2 and Helm 3 both allow managing CRDs via the chart. In Helm 2, this is
required; in Helm 3, it is optional. When using this method, only a single
release will actually manage the CRD. Check to see which release has
`ingressController.installCRDs: true` to determine which does so if you have
multiple releases. When using this management method, upgrading a release to
chart 1.11.0 will delete the KongCredential CRD during the upgrade, which will
_delete any existing KongCredential resources_. To avoid losing configuration,
check to see if your CRD is managed:
```
kubectl get crd kongcredentials.configuration.konghq.com -o yaml | grep "app.kubernetes.io/managed-by: Helm"
```
If that command returns output, your CRD is managed and you must convert to
credential Secrets before upgrading (you should do so regardless, but are not
at risk of losing data, and can downgrade to an older chart version if you have
issues).
### Changes to CRDs
Controller 1.0 [introduces a status field](https://github.com/Kong/kubernetes-ingress-controller/blob/main/CHANGELOG.md#added)
for its custom resources. By default, Helm 3 does not apply updates to custom
resource definitions if those definitions are already present on the Kubernetes
API server (and they will be if you are upgrading a release from a previous
chart version). To update your custom resources:
```
kubectl apply -f https://raw.githubusercontent.com/Kong/charts/main/charts/kong/crds/custom-resource-definitions.yaml
```
### Deprecated controller flags/environment variables and annotations removed
Kong Ingress Controller 0.x versions had a number of deprecated
flags/environment variables and annotations. Version 1.0 removes support for
these, and you must update your configuration to use their modern equivalents
before upgrading to chart 1.11.0.
The [controller changelog](https://github.com/Kong/kubernetes-ingress-controller/blob/master/CHANGELOG.md#breaking-changes)
provides links to lists of deprecated configuration and their replacements.
## 1.10.0
### `KongClusterPlugin` replaces global `KongPlugin`s
Kong Ingress Controller 0.10.0 no longer supports `KongPlugin`s with a `global: true` label. See the [KIC changelog for 0.10.0](https://github.com/Kong/kubernetes-ingress-controller/blob/main/CHANGELOG.md#0100---20200915) for migration hints.
### Dropping support for resources not specifying an ingress class
Kong Ingress Controller 0.10.0 drops support for certain kinds of resources without a `kubernetes.io/ingress.class` annotation. See the [KIC changelog for 0.10.0](https://github.com/Kong/kubernetes-ingress-controller/blob/main/CHANGELOG.md#0100---20200915) for the exact list of those kinds, and for possible migration paths.
## 1.9.0
### New image for Enterprise controller-managed DB-less deployments
As of Kong Enterprise 2.1.3.0, there is no longer a separate image
(`kong-enterprise-k8s`) for controller-managed DB-less deployments. All Kong
Enterprise deployments now use the `kong-enterprise-edition` image.
Existing users of the `kong-enterprise-k8s` image can use the latest
`kong-enterprise-edition` image as a drop-in replacement for the
`kong-enterprise-k8s` image. You will also need to [create a Docker registry
secret](https://github.com/Kong/charts/blob/main/charts/kong/README.md#kong-enterprise-docker-registry-access)
for the `kong-enterprise-edition` registry and add it to `image.pullSecrets` in
values.yaml if you do not have one already.
### Changes to wait-for-postgres image
Prior to 1.9.0, the chart launched a busybox initContainer for migration Pods
to check Postgres' reachability [using
netcat](https://github.com/Kong/charts/blob/kong-1.8.0/charts/kong/templates/_helpers.tpl#L626).
As of 1.9.0, the chart uses a [bash
script](https://github.com/Kong/charts/blob/kong-1.9.0/charts/kong/templates/wait-for-postgres-script.yaml)
to perform the same connectivity check. The default `waitImage.repository`
value is now `bash` rather than `busybox`. Double-check your values.yaml to
confirm that you do not set `waitImage.repository` and `waitImage.tag` to the
old defaults: if you do, remove that configuration before upgrading.
The Helm upgrade cycle requires this script be available for upgrade jobs. On
existing installations, you must first perform an initial `helm upgrade --set
migrations.preUpgrade=false --migrations.postUpgrade=false` to chart 1.9.0.
Perform this initial upgrade without making changes to your Kong image version:
if you are upgrading Kong along with the chart, perform a separate upgrade
after with the migration jobs re-enabled.
If you do not override `waitImage.repository` in your releases, you do not need
to make any other configuration changes when upgrading to 1.9.0.
If you do override `waitImage.repository` to use a custom image, you must
switch to a custom image that provides a `bash` executable. Note that busybox
images, or images derived from it, do _not_ include a `bash` executable. We
recommend switching to an image derived from the public bash Docker image or a
base operating system image that provides a `bash` executable.
## 1.6.0
### Changes to Custom Resource Definitions
The KongPlugin and KongClusterPlugin resources have changed. Helm 3's CRD
management system does not modify CRDs during `helm upgrade`, and these must be
updated manually:
```
kubectl apply -f https://raw.githubusercontent.com/Kong/charts/kong-1.6.0/charts/kong/crds/custom-resource-definitions.yaml
```
Existing plugin resources do not require changes; the CRD update only adds new
fields.
### Removal of default security context UID setting
Versions of Kong prior to 2.0 and Kong Enterprise prior to 1.3 use Docker
images that required setting a UID via Kubernetes in some environments
(primarily OpenShift). This is no longer necessary with modern Docker images
and can cause issues depending on other environment settings, so it was
removed.
Most users should not need to take any action, but if you encounter permissions
errors when upgrading (`kubectl describe pod PODNAME` should contain any), you
can restore it by adding the following to your values.yaml:
```
securityContext:
runAsUser: 1000
```
## 1.5.0
### PodSecurityPolicy defaults to read-only root filesystem
1.5.0 defaults to using a read-only root container filesystem if
`podSecurityPolicy.enabled: true` is set in values.yaml. This improves
security, but is incompatible with Kong Enterprise versions prior to 1.5. If
you use an older version and enable PodSecurityPolicy, you must set
`podSecurityPolicy.spec.readOnlyRootFilesystem: false`.
Kong open-source and Kong for Kubernetes Enterprise are compatible with a
read-only root filesystem on all versions.
### Changes to migration job configuration
Previously, all migration jobs were enabled/disabled through a single
`runMigrations` setting. 1.5.0 splits these into toggles for each of the
individual upgrade migrations:
```
migrations:
preUpgrade: true
postUpgrade: true
```
Initial migration jobs are now only run during `helm install` and are deleted
automatically when users first run `helm upgrade`.
Users should replace `runMigrations` with the above block from the latest
values.yaml.
The new format addresses several needs:
* The initial migrations job are only created during the initial install,
preventing [conflicts on upgrades](https://github.com/Kong/charts/blob/main/charts/kong/FAQs.md#running-helm-upgrade-fails-because-of-old-init-migrations-job).
* The upgrade migrations jobs can be disabled as need for managing
[multi-release clusters](https://github.com/Kong/charts/blob/main/charts/kong/README.md#separate-admin-and-proxy-nodes).
This enables management of clusters that have nodes with different roles,
e.g. nodes that only run the proxy and nodes that only run the admin API.
* Migration jobs now allow specifying annotations, and provide a default set
of annotations that disable some service mesh sidecars. Because sidecar
containers do not terminate, they [prevent the jobs from completing](https://github.com/kubernetes/kubernetes/issues/25908).
## 1.4.0
### Changes to default Postgres permissions
The [Postgres sub-chart](https://github.com/bitnami/charts/tree/master/bitnami/postgresql)
used by this chart has modified the way their chart handles file permissions.
This is not an issue for new installations, but prevents Postgres from starting
if its PVC was created with an older version. If affected, your Postgres pod
logs will show:
```
postgresql 19:16:04.03 INFO ==> ** Starting PostgreSQL **
2020-03-27 19:16:04.053 GMT [1] FATAL: data directory "/bitnami/postgresql/data" has group or world access
2020-03-27 19:16:04.053 GMT [1] DETAIL: Permissions should be u=rwx (0700).
```
You can restore the old permission handling behavior by adding two settings to
the `postgresql` block in values.yaml:
```yaml
postgresql:
enabled: true
postgresqlDataDir: /bitnami/postgresql/data
volumePermissions:
enabled: true
```
For background, see https://github.com/helm/charts/issues/13651
### `strip_path` now defaults to `false` for controller-managed routes
1.4.0 defaults to version 0.8 of the ingress controller, which changes the
default value of the `strip_path` route setting from `true` to `false`. To
understand how this works in practice, compare the upstream path for these
requests when `strip_path` is toggled:
| Ingress path | `strip_path` | Request path | Upstream path |
|--------------|--------------|--------------|---------------|
| /foo/bar | true | /foo/bar/baz | /baz |
| /foo/bar | false | /foo/bar/baz | /foo/bar/baz |
This change brings the controller in line with the Kubernetes Ingress
specification, which expects that controllers will not modify the request
before passing it upstream unless explicitly configured to do so.
To preserve your existing route handling, you should add this annotation to
your ingress resources:
```
konghq.com/strip-path: "true"
```
This is a new annotation that is equivalent to the `route.strip_path` setting
in KongIngress resources. Note that if you have already set this to `false`,
you should leave it as-is and not add an annotation to the ingress.
### Changes to Kong service configuration
1.4.0 reworks the templates and configuration used to generate Kong
configuration and Kuberenetes resources for Kong's services (the admin API,
proxy, Developer Portal, etc.). For the admin API, this requires breaking
changes to the configuration format in values.yaml. Prior to 1.4.0, the admin
API allowed a single listen only, which could be toggled between HTTPS and
HTTP:
```yaml
admin:
enabled: false # create Service
useTLS: true
servicePort: 8444
containerPort: 8444
```
In 1.4.0+, the admin API allows enabling or disabling the HTTP and TLS listens
independently. The equivalent of the above configuration is:
```yaml
admin:
enabled: false # create Service
http:
enabled: false # create HTTP listen
servicePort: 8001
containerPort: 8001
parameters: []
tls:
enabled: true # create HTTPS listen
servicePort: 8444
containerPort: 8444
parameters:
- http2
```
All Kong services now support `SERVICE.enabled` parameters: these allow
disabling the creation of a Kubernetes Service resource for that Kong service,
which is useful in configurations where nodes have different roles, e.g. where
some nodes only handle proxy traffic and some only handle admin API traffic. To
disable a Kong service completely, you should also set `SERVICE.http.enabled:
false` and `SERVICE.tls.enabled: false`. Disabling creation of the Service
resource only leaves the Kong service enabled, but only accessible within its
pod. The admin API is configured with only Service creation disabled to allow
the ingress controller to access it without allowing access from other pods.
Services now also include a new `parameters` section that allows setting
additional listen options, e.g. the `reuseport` and `backlog=16384` parameters
from the [default 2.0.0 proxy
listen](https://github.com/Kong/kong/blob/2.0.0/kong.conf.default#L186). For
compatibility with older Kong versions, the chart defaults do not enable most
of the newer parameters, only HTTP/2 support. Users of versions 1.3.0 and newer
can safely add the new parameters.
## 1.3.0
### Removal of dedicated Portal authentication configuration parameters
1.3.0 deprecates the `enterprise.portal.portal_auth` and
`enterprise.portal.session_conf_secret` settings in values.yaml in favor of
placing equivalent configuration under `env`. These settings are less important
in Kong Enterprise 0.36+, as they can both be set per workspace in Kong
Manager.
These settings provide the default settings for Portal instances: when the
"Authentication plugin" and "Session Config" dropdowns at
https://manager.kong.example/WORKSPACE/portal/settings/ are set to "Default",
the settings from `KONG_PORTAL_AUTH` and `KONG_PORTAL_SESSION_CONF` are used.
If these environment variables are not set, the defaults are to use
`basic-auth` and `{}` (which applies the [session plugin default
configuration](https://docs.konghq.com/hub/kong-inc/session/)).
If you set nonstandard defaults and wish to keep using these settings, or use
Kong Enterprise 0.35 (which did not provide a means to set per-workspace
session configuration) you should convert them to environment variables. For
example, if you currently have:
```yaml
portal:
enabled: true
portal_auth: basic-auth
session_conf_secret: portal-session
```
You should remove the `portal_auth` and `session_conf_secret` entries and
replace them with their equivalents under the `env` block:
```yaml
env:
portal_auth: basic-auth
portal_session_conf:
valueFrom:
secretKeyRef:
name: portal-session
key: portal_session_conf
```

View File

@@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@@ -0,0 +1,6 @@
dependencies:
- name: common
repository: https://charts.bitnami.com/bitnami
version: 2.0.4
digest: sha256:ec5726c5d8f1e474cc6c9ca90c18efc35f4dbd15ccaf2df148764947d5ad6a6c
generated: "2022-10-25T14:40:27.273494162Z"

View File

@@ -0,0 +1,30 @@
annotations:
category: Database
apiVersion: v2
appVersion: 14.5.0
dependencies:
- name: common
repository: https://charts.bitnami.com/bitnami
tags:
- bitnami-common
version: 2.x.x
description: PostgreSQL (Postgres) is an open source object-relational database known
for reliability and data integrity. ACID-compliant, it supports foreign keys, joins,
views, triggers and stored procedures.
home: https://github.com/bitnami/charts/tree/main/bitnami/postgresql
icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-220x234.png
keywords:
- postgresql
- postgres
- database
- sql
- replication
- cluster
maintainers:
- name: Bitnami
url: https://github.com/bitnami/charts
name: postgresql
sources:
- https://github.com/bitnami/containers/tree/main/bitnami/postgresql
- https://www.postgresql.org/
version: 11.9.13

View File

@@ -0,0 +1,683 @@
<!--- app-name: PostgreSQL -->
# PostgreSQL packaged by Bitnami
PostgreSQL (Postgres) is an open source object-relational database known for reliability and data integrity. ACID-compliant, it supports foreign keys, joins, views, triggers and stored procedures.
[Overview of PostgreSQL](http://www.postgresql.org)
Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement.
## TL;DR
```bash
helm repo add my-repo https://charts.bitnami.com/bitnami
helm install my-release my-repo/postgresql
```
## Introduction
This chart bootstraps a [PostgreSQL](https://github.com/bitnami/containers/tree/main/bitnami/postgresql) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
For HA, please see [this repo](https://github.com/bitnami/charts/tree/main/bitnami/postgresql-ha)
Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
## Prerequisites
- Kubernetes 1.19+
- Helm 3.2.0+
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
helm install my-release my-repo/postgresql
```
The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console
helm delete my-release
```
The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release.
To delete the PVC's associated with `my-release`:
```bash
kubectl delete pvc -l release=my-release
```
> **Note**: Deleting the PVC's will delete postgresql data as well. Please be cautious before doing it.
## Parameters
### Global parameters
| Name | Description | Value |
| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----- |
| `global.imageRegistry` | Global Docker image registry | `""` |
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
| `global.postgresql.auth.postgresPassword` | Password for the "postgres" admin user (overrides `auth.postgresPassword`) | `""` |
| `global.postgresql.auth.username` | Name for a custom user to create (overrides `auth.username`) | `""` |
| `global.postgresql.auth.password` | Password for the custom user to create (overrides `auth.password`) | `""` |
| `global.postgresql.auth.database` | Name for a custom database to create (overrides `auth.database`) | `""` |
| `global.postgresql.auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`). | `""` |
| `global.postgresql.auth.secretKeys.adminPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` |
| `global.postgresql.auth.secretKeys.userPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` |
| `global.postgresql.auth.secretKeys.replicationPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` |
| `global.postgresql.service.ports.postgresql` | PostgreSQL service port (overrides `service.ports.postgresql`) | `""` |
### Common parameters
| Name | Description | Value |
| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- |
| `kubeVersion` | Override Kubernetes version | `""` |
| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` |
| `fullnameOverride` | String to fully override common.names.fullname template | `""` |
| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` |
| `extraDeploy` | Array of extra objects to deploy with the release (evaluated as a template) | `[]` |
| `commonLabels` | Add labels to all the deployed resources | `{}` |
| `commonAnnotations` | Add annotations to all the deployed resources | `{}` |
| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` |
| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` |
| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` |
### PostgreSQL common parameters
| Name | Description | Value |
| ---------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- |
| `image.registry` | PostgreSQL image registry | `docker.io` |
| `image.repository` | PostgreSQL image repository | `bitnami/postgresql` |
| `image.tag` | PostgreSQL image tag (immutable tags are recommended) | `14.5.0-debian-11-r35` |
| `image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | PostgreSQL image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify image pull secrets | `[]` |
| `image.debug` | Specify if debug values should be set | `false` |
| `auth.enablePostgresUser` | Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user | `true` |
| `auth.postgresPassword` | Password for the "postgres" admin user. Ignored if `auth.existingSecret` with key `postgres-password` is provided | `""` |
| `auth.username` | Name for a custom user to create | `""` |
| `auth.password` | Password for the custom user to create. Ignored if `auth.existingSecret` with key `password` is provided | `""` |
| `auth.database` | Name for a custom database to create | `""` |
| `auth.replicationUsername` | Name of the replication user | `repl_user` |
| `auth.replicationPassword` | Password for the replication user. Ignored if `auth.existingSecret` with key `replication-password` is provided | `""` |
| `auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. | `""` |
| `auth.secretKeys.adminPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `postgres-password` |
| `auth.secretKeys.userPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `password` |
| `auth.secretKeys.replicationPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `replication-password` |
| `auth.usePasswordFiles` | Mount credentials as a files instead of using an environment variable | `false` |
| `architecture` | PostgreSQL architecture (`standalone` or `replication`) | `standalone` |
| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` |
| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. | `0` |
| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` |
| `containerPorts.postgresql` | PostgreSQL container port | `5432` |
| `audit.logHostname` | Log client hostnames | `false` |
| `audit.logConnections` | Add client log-in operations to the log file | `false` |
| `audit.logDisconnections` | Add client log-outs operations to the log file | `false` |
| `audit.pgAuditLog` | Add operations to log using the pgAudit extension | `""` |
| `audit.pgAuditLogCatalog` | Log catalog using pgAudit | `off` |
| `audit.clientMinMessages` | Message log level to share with the user | `error` |
| `audit.logLinePrefix` | Template for log line prefix (default if not set) | `""` |
| `audit.logTimezone` | Timezone for the log timestamps | `""` |
| `ldap.enabled` | Enable LDAP support | `false` |
| `ldap.server` | IP address or name of the LDAP server. | `""` |
| `ldap.port` | Port number on the LDAP server to connect to | `""` |
| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `""` |
| `ldap.suffix` | String to append to the user name when forming the DN to bind | `""` |
| `ldap.basedn` | Root DN to begin the search for the user in | `""` |
| `ldap.binddn` | DN of user to bind to LDAP | `""` |
| `ldap.bindpw` | Password for the user to bind to LDAP | `""` |
| `ldap.searchAttribute` | Attribute to match against the user name in the search | `""` |
| `ldap.searchFilter` | The search filter to use when doing search+bind authentication | `""` |
| `ldap.scheme` | Set to `ldaps` to use LDAPS | `""` |
| `ldap.tls.enabled` | Se to true to enable TLS encryption | `false` |
| `ldap.uri` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored. | `""` |
| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql/data` |
| `postgresqlSharedPreloadLibraries` | Shared preload libraries (comma-separated list) | `pgaudit` |
| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) | `true` |
| `shmVolume.sizeLimit` | Set this to enable a size limit on the shm tmpfs | `""` |
| `tls.enabled` | Enable TLS traffic support | `false` |
| `tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` |
| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` |
| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `""` |
| `tls.certFilename` | Certificate filename | `""` |
| `tls.certKeyFilename` | Certificate key filename | `""` |
| `tls.certCAFilename` | CA Certificate filename | `""` |
| `tls.crlFilename` | File containing a Certificate Revocation List | `""` |
### PostgreSQL Primary parameters
| Name | Description | Value |
| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------------- |
| `primary.name` | Name of the primary database (eg primary, master, leader, ...) | `primary` |
| `primary.configuration` | PostgreSQL Primary main configuration to be injected as ConfigMap | `""` |
| `primary.pgHbaConfiguration` | PostgreSQL Primary client authentication configuration | `""` |
| `primary.existingConfigmap` | Name of an existing ConfigMap with PostgreSQL Primary configuration | `""` |
| `primary.extendedConfiguration` | Extended PostgreSQL Primary configuration (appended to main or default configuration) | `""` |
| `primary.existingExtendedConfigmap` | Name of an existing ConfigMap with PostgreSQL Primary extended configuration | `""` |
| `primary.initdb.args` | PostgreSQL initdb extra arguments | `""` |
| `primary.initdb.postgresqlWalDir` | Specify a custom location for the PostgreSQL transaction log | `""` |
| `primary.initdb.scripts` | Dictionary of initdb scripts | `{}` |
| `primary.initdb.scriptsConfigMap` | ConfigMap with scripts to be run at first boot | `""` |
| `primary.initdb.scriptsSecret` | Secret with scripts to be run at first boot (in case it contains sensitive information) | `""` |
| `primary.initdb.user` | Specify the PostgreSQL username to execute the initdb scripts | `""` |
| `primary.initdb.password` | Specify the PostgreSQL password to execute the initdb scripts | `""` |
| `primary.standby.enabled` | Whether to enable current cluster's primary as standby server of another cluster or not | `false` |
| `primary.standby.primaryHost` | The Host of replication primary in the other cluster | `""` |
| `primary.standby.primaryPort` | The Port of replication primary in the other cluster | `""` |
| `primary.extraEnvVars` | Array with extra environment variables to add to PostgreSQL Primary nodes | `[]` |
| `primary.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes | `""` |
| `primary.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for PostgreSQL Primary nodes | `""` |
| `primary.command` | Override default container command (useful when using custom images) | `[]` |
| `primary.args` | Override default container args (useful when using custom images) | `[]` |
| `primary.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL Primary containers | `true` |
| `primary.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` |
| `primary.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
| `primary.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
| `primary.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
| `primary.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
| `primary.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL Primary containers | `true` |
| `primary.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
| `primary.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
| `primary.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` |
| `primary.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` |
| `primary.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
| `primary.startupProbe.enabled` | Enable startupProbe on PostgreSQL Primary containers | `false` |
| `primary.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` |
| `primary.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
| `primary.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` |
| `primary.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` |
| `primary.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
| `primary.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
| `primary.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
| `primary.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
| `primary.lifecycleHooks` | for the PostgreSQL Primary container to automate configuration before or after startup | `{}` |
| `primary.resources.limits` | The resources limits for the PostgreSQL Primary containers | `{}` |
| `primary.resources.requests.memory` | The requested memory for the PostgreSQL Primary containers | `256Mi` |
| `primary.resources.requests.cpu` | The requested cpu for the PostgreSQL Primary containers | `250m` |
| `primary.podSecurityContext.enabled` | Enable security context | `true` |
| `primary.podSecurityContext.fsGroup` | Group ID for the pod | `1001` |
| `primary.containerSecurityContext.enabled` | Enable container security context | `true` |
| `primary.containerSecurityContext.runAsUser` | User ID for the container | `1001` |
| `primary.hostAliases` | PostgreSQL primary pods host aliases | `[]` |
| `primary.hostNetwork` | Specify if host network should be enabled for PostgreSQL pod (postgresql primary) | `false` |
| `primary.hostIPC` | Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) | `false` |
| `primary.labels` | Map of labels to add to the statefulset (postgresql primary) | `{}` |
| `primary.annotations` | Annotations for PostgreSQL primary pods | `{}` |
| `primary.podLabels` | Map of labels to add to the pods (postgresql primary) | `{}` |
| `primary.podAnnotations` | Map of annotations to add to the pods (postgresql primary) | `{}` |
| `primary.podAffinityPreset` | PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `primary.podAntiAffinityPreset` | PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` |
| `primary.nodeAffinityPreset.type` | PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `primary.nodeAffinityPreset.key` | PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. | `""` |
| `primary.nodeAffinityPreset.values` | PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. | `[]` |
| `primary.affinity` | Affinity for PostgreSQL primary pods assignment | `{}` |
| `primary.nodeSelector` | Node labels for PostgreSQL primary pods assignment | `{}` |
| `primary.tolerations` | Tolerations for PostgreSQL primary pods assignment | `[]` |
| `primary.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
| `primary.priorityClassName` | Priority Class to use for each pod (postgresql primary) | `""` |
| `primary.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` |
| `primary.terminationGracePeriodSeconds` | Seconds PostgreSQL primary pod needs to terminate gracefully | `""` |
| `primary.updateStrategy.type` | PostgreSQL Primary statefulset strategy type | `RollingUpdate` |
| `primary.updateStrategy.rollingUpdate` | PostgreSQL Primary statefulset rolling update configuration parameters | `{}` |
| `primary.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) | `[]` |
| `primary.extraVolumes` | Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) | `[]` |
| `primary.sidecars` | Add additional sidecar containers to the PostgreSQL Primary pod(s) | `[]` |
| `primary.initContainers` | Add additional init containers to the PostgreSQL Primary pod(s) | `[]` |
| `primary.extraPodSpec` | Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) | `{}` |
| `primary.service.type` | Kubernetes Service type | `ClusterIP` |
| `primary.service.ports.postgresql` | PostgreSQL service port | `5432` |
| `primary.service.nodePorts.postgresql` | Node port for PostgreSQL | `""` |
| `primary.service.clusterIP` | Static clusterIP or None for headless services | `""` |
| `primary.service.annotations` | Annotations for PostgreSQL primary service | `{}` |
| `primary.service.loadBalancerIP` | Load balancer IP if service type is `LoadBalancer` | `""` |
| `primary.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` |
| `primary.service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` |
| `primary.service.extraPorts` | Extra ports to expose in the PostgreSQL primary service | `[]` |
| `primary.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
| `primary.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
| `primary.persistence.enabled` | Enable PostgreSQL Primary data persistence using PVC | `true` |
| `primary.persistence.existingClaim` | Name of an existing PVC to use | `""` |
| `primary.persistence.mountPath` | The path the volume will be mounted at | `/bitnami/postgresql` |
| `primary.persistence.subPath` | The subdirectory of the volume to mount to | `""` |
| `primary.persistence.storageClass` | PVC Storage Class for PostgreSQL Primary data volume | `""` |
| `primary.persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `["ReadWriteOnce"]` |
| `primary.persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` |
| `primary.persistence.annotations` | Annotations for the PVC | `{}` |
| `primary.persistence.labels` | Labels for the PVC | `{}` |
| `primary.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` |
| `primary.persistence.dataSource` | Custom PVC data source | `{}` |
### PostgreSQL read only replica parameters (only used when `architecture` is set to `replication`)
| Name | Description | Value |
| ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------------- |
| `readReplicas.name` | Name of the read replicas database (eg secondary, slave, ...) | `read` |
| `readReplicas.replicaCount` | Number of PostgreSQL read only replicas | `1` |
| `readReplicas.extendedConfiguration` | Extended PostgreSQL read only replicas configuration (appended to main or default configuration) | `""` |
| `readReplicas.extraEnvVars` | Array with extra environment variables to add to PostgreSQL read only nodes | `[]` |
| `readReplicas.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes | `""` |
| `readReplicas.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for PostgreSQL read only nodes | `""` |
| `readReplicas.command` | Override default container command (useful when using custom images) | `[]` |
| `readReplicas.args` | Override default container args (useful when using custom images) | `[]` |
| `readReplicas.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL read only containers | `true` |
| `readReplicas.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` |
| `readReplicas.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
| `readReplicas.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
| `readReplicas.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
| `readReplicas.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
| `readReplicas.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL read only containers | `true` |
| `readReplicas.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
| `readReplicas.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
| `readReplicas.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` |
| `readReplicas.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` |
| `readReplicas.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
| `readReplicas.startupProbe.enabled` | Enable startupProbe on PostgreSQL read only containers | `false` |
| `readReplicas.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` |
| `readReplicas.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
| `readReplicas.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` |
| `readReplicas.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` |
| `readReplicas.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
| `readReplicas.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
| `readReplicas.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
| `readReplicas.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
| `readReplicas.lifecycleHooks` | for the PostgreSQL read only container to automate configuration before or after startup | `{}` |
| `readReplicas.resources.limits` | The resources limits for the PostgreSQL read only containers | `{}` |
| `readReplicas.resources.requests.memory` | The requested memory for the PostgreSQL read only containers | `256Mi` |
| `readReplicas.resources.requests.cpu` | The requested cpu for the PostgreSQL read only containers | `250m` |
| `readReplicas.podSecurityContext.enabled` | Enable security context | `true` |
| `readReplicas.podSecurityContext.fsGroup` | Group ID for the pod | `1001` |
| `readReplicas.containerSecurityContext.enabled` | Enable container security context | `true` |
| `readReplicas.containerSecurityContext.runAsUser` | User ID for the container | `1001` |
| `readReplicas.hostAliases` | PostgreSQL read only pods host aliases | `[]` |
| `readReplicas.hostNetwork` | Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) | `false` |
| `readReplicas.hostIPC` | Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) | `false` |
| `readReplicas.labels` | Map of labels to add to the statefulset (PostgreSQL read only) | `{}` |
| `readReplicas.annotations` | Annotations for PostgreSQL read only pods | `{}` |
| `readReplicas.podLabels` | Map of labels to add to the pods (PostgreSQL read only) | `{}` |
| `readReplicas.podAnnotations` | Map of annotations to add to the pods (PostgreSQL read only) | `{}` |
| `readReplicas.podAffinityPreset` | PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `readReplicas.podAntiAffinityPreset` | PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` |
| `readReplicas.nodeAffinityPreset.type` | PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `readReplicas.nodeAffinityPreset.key` | PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. | `""` |
| `readReplicas.nodeAffinityPreset.values` | PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. | `[]` |
| `readReplicas.affinity` | Affinity for PostgreSQL read only pods assignment | `{}` |
| `readReplicas.nodeSelector` | Node labels for PostgreSQL read only pods assignment | `{}` |
| `readReplicas.tolerations` | Tolerations for PostgreSQL read only pods assignment | `[]` |
| `readReplicas.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` |
| `readReplicas.priorityClassName` | Priority Class to use for each pod (PostgreSQL read only) | `""` |
| `readReplicas.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` |
| `readReplicas.terminationGracePeriodSeconds` | Seconds PostgreSQL read only pod needs to terminate gracefully | `""` |
| `readReplicas.updateStrategy.type` | PostgreSQL read only statefulset strategy type | `RollingUpdate` |
| `readReplicas.updateStrategy.rollingUpdate` | PostgreSQL read only statefulset rolling update configuration parameters | `{}` |
| `readReplicas.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) | `[]` |
| `readReplicas.extraVolumes` | Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) | `[]` |
| `readReplicas.sidecars` | Add additional sidecar containers to the PostgreSQL read only pod(s) | `[]` |
| `readReplicas.initContainers` | Add additional init containers to the PostgreSQL read only pod(s) | `[]` |
| `readReplicas.extraPodSpec` | Optionally specify extra PodSpec for the PostgreSQL read only pod(s) | `{}` |
| `readReplicas.service.type` | Kubernetes Service type | `ClusterIP` |
| `readReplicas.service.ports.postgresql` | PostgreSQL service port | `5432` |
| `readReplicas.service.nodePorts.postgresql` | Node port for PostgreSQL | `""` |
| `readReplicas.service.clusterIP` | Static clusterIP or None for headless services | `""` |
| `readReplicas.service.annotations` | Annotations for PostgreSQL read only service | `{}` |
| `readReplicas.service.loadBalancerIP` | Load balancer IP if service type is `LoadBalancer` | `""` |
| `readReplicas.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` |
| `readReplicas.service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` |
| `readReplicas.service.extraPorts` | Extra ports to expose in the PostgreSQL read only service | `[]` |
| `readReplicas.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
| `readReplicas.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
| `readReplicas.persistence.enabled` | Enable PostgreSQL read only data persistence using PVC | `true` |
| `readReplicas.persistence.existingClaim` | Name of an existing PVC to use | `""` |
| `readReplicas.persistence.mountPath` | The path the volume will be mounted at | `/bitnami/postgresql` |
| `readReplicas.persistence.subPath` | The subdirectory of the volume to mount to | `""` |
| `readReplicas.persistence.storageClass` | PVC Storage Class for PostgreSQL read only data volume | `""` |
| `readReplicas.persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `["ReadWriteOnce"]` |
| `readReplicas.persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` |
| `readReplicas.persistence.annotations` | Annotations for the PVC | `{}` |
| `readReplicas.persistence.labels` | Labels for the PVC | `{}` |
| `readReplicas.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` |
| `readReplicas.persistence.dataSource` | Custom PVC data source | `{}` |
### NetworkPolicy parameters
| Name | Description | Value |
| ------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `networkPolicy.enabled` | Enable network policies | `false` |
| `networkPolicy.metrics.enabled` | Enable network policies for metrics (prometheus) | `false` |
| `networkPolicy.metrics.namespaceSelector` | Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace. | `{}` |
| `networkPolicy.metrics.podSelector` | Monitoring pod selector labels. These labels will be used to identify the Prometheus pods. | `{}` |
| `networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled` | Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin. | `false` |
| `networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector` | Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s). | `{}` |
| `networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector` | Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s). | `{}` |
| `networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules` | Custom network policy for the PostgreSQL primary node. | `{}` |
| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled` | Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin. | `false` |
| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector` | Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s). | `{}` |
| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector` | Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s). | `{}` |
| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules` | Custom network policy for the PostgreSQL read-only nodes. | `{}` |
| `networkPolicy.egressRules.denyConnectionsToExternal` | Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53). | `false` |
| `networkPolicy.egressRules.customRules` | Custom network policy rule | `{}` |
### Volume Permissions parameters
| Name | Description | Value |
| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------- |
| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r45` |
| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` |
| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` |
| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` |
| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` |
### Other Parameters
| Name | Description | Value |
| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `serviceAccount.create` | Enable creation of ServiceAccount for PostgreSQL pod | `false` |
| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` |
| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` |
| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` |
| `rbac.rules` | Custom RBAC rules to set | `[]` |
| `psp.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` |
### Metrics Parameters
| Name | Description | Value |
| ----------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | --------------------------- |
| `metrics.enabled` | Start a prometheus exporter | `false` |
| `metrics.image.registry` | PostgreSQL Prometheus Exporter image registry | `docker.io` |
| `metrics.image.repository` | PostgreSQL Prometheus Exporter image repository | `bitnami/postgres-exporter` |
| `metrics.image.tag` | PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) | `0.11.1-debian-11-r22` |
| `metrics.image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `metrics.image.pullPolicy` | PostgreSQL Prometheus Exporter image pull policy | `IfNotPresent` |
| `metrics.image.pullSecrets` | Specify image pull secrets | `[]` |
| `metrics.customMetrics` | Define additional custom metrics | `{}` |
| `metrics.extraEnvVars` | Extra environment variables to add to PostgreSQL Prometheus exporter | `[]` |
| `metrics.containerSecurityContext.enabled` | Enable PostgreSQL Prometheus exporter containers' Security Context | `true` |
| `metrics.containerSecurityContext.runAsUser` | Set PostgreSQL Prometheus exporter containers' Security Context runAsUser | `1001` |
| `metrics.containerSecurityContext.runAsNonRoot` | Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot | `true` |
| `metrics.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL Prometheus exporter containers | `true` |
| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` |
| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` |
| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` |
| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
| `metrics.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL Prometheus exporter containers | `true` |
| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` |
| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` |
| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
| `metrics.startupProbe.enabled` | Enable startupProbe on PostgreSQL Prometheus exporter containers | `false` |
| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` |
| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` |
| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` |
| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
| `metrics.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
| `metrics.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
| `metrics.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
| `metrics.containerPorts.metrics` | PostgreSQL Prometheus exporter metrics container port | `9187` |
| `metrics.resources.limits` | The resources limits for the PostgreSQL Prometheus exporter container | `{}` |
| `metrics.resources.requests` | The requested resources for the PostgreSQL Prometheus exporter container | `{}` |
| `metrics.service.ports.metrics` | PostgreSQL Prometheus Exporter service port | `9187` |
| `metrics.service.clusterIP` | Static clusterIP or None for headless services | `""` |
| `metrics.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` |
| `metrics.service.annotations` | Annotations for Prometheus to auto-discover the metrics endpoint | `{}` |
| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` |
| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` |
| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` |
| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` |
| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` |
| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` |
| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` |
| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` |
| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` |
| `metrics.prometheusRule.enabled` | Create a PrometheusRule for Prometheus Operator | `false` |
| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` |
| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` |
| `metrics.prometheusRule.rules` | PrometheusRule definitions | `[]` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```bash
$ helm install my-release \
--set auth.postgresPassword=secretpassword
my-repo/postgresql
```
The above command sets the PostgreSQL `postgres` account password to `secretpassword`.
> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available.
> **Warning** Setting a password will be ignored on new installation in case when previous Posgresql release was deleted through the helm command. In that case, old PVC will have an old password, and setting it through helm won't take effect. Deleting persistent volumes (PVs) will solve the issue. Refer to [issue 2061](https://github.com/bitnami/charts/issues/2061) for more details
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```bash
helm install my-release -f values.yaml my-repo/postgresql
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## Configuration and installation details
### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
### Customizing primary and read replica services in a replicated configuration
At the top level, there is a service object which defines the services for both primary and readReplicas. For deeper customization, there are service objects for both the primary and read types individually. This allows you to override the values in the top level service object so that the primary and read can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the primary and read to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the primary.service or readReplicas.service objects will take precedence over the top level service object.
### Use a different PostgreSQL version
To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter. Refer to the [chart documentation for more information on these parameters and how to use them with images from a private registry](https://docs.bitnami.com/kubernetes/infrastructure/postgresql/configuration/change-image-version/).
### postgresql.conf / pg_hba.conf files as configMap
This helm chart also supports to customize the PostgreSQL configuration file. You can add additional PostgreSQL configuration parameters using the `primary.extendedConfiguration`/`readReplicas.extendedConfiguration` parameters as a string. Alternatively, to replace the entire default configuration use `primary.configuration`.
You can also add a custom pg_hba.conf using the `primary.pgHbaConfiguration` parameter.
In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `primary.existingConfigmap` parameter. Note that this will override the two previous options.
### Initialize a fresh instance
The [Bitnami PostgreSQL](https://github.com/bitnami/containers/tree/main/bitnami/postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, you can specify custom scripts using the `primary.initdb.scripts` parameter as a string.
In addition, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `primary.initdb.scriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `primary.initdb.scriptsSecret` parameter.
The allowed extensions are `.sh`, `.sql` and `.sql.gz`.
### Securing traffic using TLS
TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart:
- `tls.enabled`: Enable TLS support. Defaults to `false`
- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults.
- `tls.certFilename`: Certificate filename. No defaults.
- `tls.certKeyFilename`: Certificate key filename. No defaults.
For example:
- First, create the secret with the cetificates files:
```console
kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt
```
- Then, use the following parameters:
```console
volumePermissions.enabled=true
tls.enabled=true
tls.certificatesSecret="certificates-tls-secret"
tls.certFilename="cert.crt"
tls.certKeyFilename="cert.key"
```
> Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `containerSecurityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected.
### Sidecars
If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec.
```yaml
# For the PostgreSQL primary
primary:
sidecars:
- name: your-image-name
image: your-image
imagePullPolicy: Always
ports:
- name: portname
containerPort: 1234
# For the PostgreSQL replicas
readReplicas:
sidecars:
- name: your-image-name
image: your-image
imagePullPolicy: Always
ports:
- name: portname
containerPort: 1234
```
### Metrics
The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml).
The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details.
### Use of global variables
In more complex scenarios, we may have the following tree of dependencies
```
+--------------+
| |
+------------+ Chart 1 +-----------+
| | | |
| --------+------+ |
| | |
| | |
| | |
| | |
v v v
+-------+------+ +--------+------+ +--------+------+
| | | | | |
| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 |
| | | | | |
+--------------+ +---------------+ +---------------+
```
The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters:
```
postgresql.auth.username=testuser
subchart1.postgresql.auth.username=testuser
subchart2.postgresql.auth.username=testuser
postgresql.auth.password=testpass
subchart1.postgresql.auth.password=testpass
subchart2.postgresql.auth.password=testpass
postgresql.auth.database=testdb
subchart1.postgresql.auth.database=testdb
subchart2.postgresql.auth.database=testdb
```
If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows:
```
global.postgresql.auth.username=testuser
global.postgresql.auth.password=testpass
global.postgresql.auth.database=testdb
```
This way, the credentials will be available in all of the subcharts.
## Persistence
The [Bitnami PostgreSQL](https://github.com/bitnami/containers/tree/main/bitnami/postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container.
Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube.
See the [Parameters](#parameters) section to configure the PVC or to disable persistence.
If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to the [code present in the container repository](https://github.com/bitnami/containers/tree/main/bitnami/postgresql). If you need to use those data, please covert them to sql and import after `helm install` finished.
## NetworkPolicy
To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`.
For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace:
```bash
kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}"
```
With NetworkPolicy enabled, traffic will be limited to just port 5432.
For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL.
This label will be displayed in the output of a successful install.
## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image
- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image.
- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift.
- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,containerSecurityContext.enabled=false,shmVolume.chmod.enabled=false
### Setting Pod's affinity
This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters.
## Troubleshooting
Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
## Upgrading
Refer to the [chart documentation for more information about how to upgrade from previous releases](https://docs.bitnami.com/kubernetes/infrastructure/postgresql/administration/upgrade/).
## License
Copyright &copy; 2022 Bitnami
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,23 @@
annotations:
category: Infrastructure
apiVersion: v2
appVersion: 2.0.4
description: A Library Helm Chart for grouping common logic between bitnami charts.
This chart is not deployable by itself.
home: https://github.com/bitnami/charts/tree/main/bitnami/common
icon: https://bitnami.com/downloads/logos/bitnami-mark.png
keywords:
- common
- helper
- template
- function
- bitnami
maintainers:
- name: Bitnami
url: https://github.com/bitnami/charts
name: common
sources:
- https://github.com/bitnami/charts
- https://www.bitnami.com/
type: library
version: 2.0.4

View File

@@ -0,0 +1,350 @@
# Bitnami Common Library Chart
A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts.
## TL;DR
```yaml
dependencies:
- name: common
version: 1.x.x
repository: https://charts.bitnami.com/bitnami
```
```bash
$ helm dependency update
```
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "common.names.fullname" . }}
data:
myvalue: "Hello World"
```
## Introduction
This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager.
Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
## Prerequisites
- Kubernetes 1.19+
- Helm 3.2.0+
## Parameters
The following table lists the helpers available in the library which are scoped in different sections.
### Affinities
| Helper identifier | Description | Expected Input |
|-------------------------------|------------------------------------------------------|------------------------------------------------|
| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` |
| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` |
| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` |
| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` |
### Capabilities
| Helper identifier | Description | Expected Input |
|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------|
| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context |
| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context |
| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context |
| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context |
| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context |
| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context |
| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context |
| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context |
| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context |
| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context |
| `common.capabilities.hpa.apiVersion` | Return the appropriate apiVersion for Horizontal Pod Autoscaler | `.` Chart context |
| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context |
### Errors
| Helper identifier | Description | Expected Input |
|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------|
| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` |
### Images
| Helper identifier | Description | Expected Input |
|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------|
| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. |
| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` |
| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` |
### Ingress
| Helper identifier | Description | Expected Input |
|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences |
| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context |
| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context |
| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` |
### Labels
| Helper identifier | Description | Expected Input |
|-----------------------------|-----------------------------------------------------------------------------|-------------------|
| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context |
| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context |
### Names
| Helper identifier | Description | Expected Input |
|-----------------------------------|-----------------------------------------------------------------------|-------------------|
| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context |
| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context |
| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context |
| `common.names.fullname.namespace` | Create a fully qualified app name adding the installation's namespace | `.` Chart context |
| `common.names.chart` | Chart name plus version | `.` Chart context |
### Secrets
| Helper identifier | Description | Expected Input |
|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. |
| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. |
| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. |
| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` |
### Storage
| Helper identifier | Description | Expected Input |
|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------|
| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. |
### TplValues
| Helper identifier | Description | Expected Input |
|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------|
| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` |
### Utils
| Helper identifier | Description | Expected Input |
|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------|
| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` |
| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` |
| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` |
| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` |
### Validations
| Helper identifier | Description | Expected Input |
|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) |
| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) |
| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. |
| `common.validations.values.mysql.passwords` | This helper will ensure required password for MySQL are not empty. It returns a shared error for all the values. | `dict "secret" "mysql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mysql chart and the helper. |
| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. |
| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis&reg; are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. |
| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. |
| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB&reg; are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. |
### Warnings
| Helper identifier | Description | Expected Input |
|------------------------------|----------------------------------|------------------------------------------------------------|
| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. |
## Special input schemas
### ImageRoot
```yaml
registry:
type: string
description: Docker registry where the image is located
example: docker.io
repository:
type: string
description: Repository and image name
example: bitnami/nginx
tag:
type: string
description: image tag
example: 1.16.1-debian-10-r63
pullPolicy:
type: string
description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
pullSecrets:
type: array
items:
type: string
description: Optionally specify an array of imagePullSecrets (evaluated as templates).
debug:
type: boolean
description: Set to true if you would like to see extra information on logs
example: false
## An instance would be:
# registry: docker.io
# repository: bitnami/nginx
# tag: 1.16.1-debian-10-r63
# pullPolicy: IfNotPresent
# debug: false
```
### Persistence
```yaml
enabled:
type: boolean
description: Whether enable persistence.
example: true
storageClass:
type: string
description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning.
example: "-"
accessMode:
type: string
description: Access mode for the Persistent Volume Storage.
example: ReadWriteOnce
size:
type: string
description: Size the Persistent Volume Storage.
example: 8Gi
path:
type: string
description: Path to be persisted.
example: /bitnami
## An instance would be:
# enabled: true
# storageClass: "-"
# accessMode: ReadWriteOnce
# size: 8Gi
# path: /bitnami
```
### ExistingSecret
```yaml
name:
type: string
description: Name of the existing secret.
example: mySecret
keyMapping:
description: Mapping between the expected key name and the name of the key in the existing secret.
type: object
## An instance would be:
# name: mySecret
# keyMapping:
# password: myPasswordKey
```
#### Example of use
When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets.
```yaml
# templates/secret.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: {{ include "common.names.fullname" . }}
labels:
app: {{ include "common.names.fullname" . }}
type: Opaque
data:
password: {{ .Values.password | b64enc | quote }}
# templates/dpl.yaml
---
...
env:
- name: PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }}
key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }}
...
# values.yaml
---
name: mySecret
keyMapping:
password: myPasswordKey
```
### ValidateValue
#### NOTES.txt
```console
{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}}
{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}}
{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
```
If we force those values to be empty we will see some alerts
```console
$ helm install test mychart --set path.to.value00="",path.to.value01=""
'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value:
export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d)
'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value:
export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d)
```
## Upgrading
### To 1.0.0
[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
**What changes were introduced in this major version?**
- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information.
- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts
**Considerations when upgrading to this version**
- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues
- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore
- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3
**Useful links**
- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/
- https://helm.sh/docs/topics/v2_v3_migration/
- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/
## License
Copyright &copy; 2022 Bitnami
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,102 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Return a soft nodeAffinity definition
{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
*/}}
{{- define "common.affinities.nodes.soft" -}}
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: {{ .key }}
operator: In
values:
{{- range .values }}
- {{ . | quote }}
{{- end }}
weight: 1
{{- end -}}
{{/*
Return a hard nodeAffinity definition
{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
*/}}
{{- define "common.affinities.nodes.hard" -}}
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: {{ .key }}
operator: In
values:
{{- range .values }}
- {{ . | quote }}
{{- end }}
{{- end -}}
{{/*
Return a nodeAffinity definition
{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
*/}}
{{- define "common.affinities.nodes" -}}
{{- if eq .type "soft" }}
{{- include "common.affinities.nodes.soft" . -}}
{{- else if eq .type "hard" }}
{{- include "common.affinities.nodes.hard" . -}}
{{- end -}}
{{- end -}}
{{/*
Return a soft podAffinity/podAntiAffinity definition
{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}}
*/}}
{{- define "common.affinities.pods.soft" -}}
{{- $component := default "" .component -}}
{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }}
{{- if not (empty $component) }}
{{ printf "app.kubernetes.io/component: %s" $component }}
{{- end }}
{{- range $key, $value := $extraMatchLabels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
namespaces:
- {{ include "common.names.namespace" .context | quote }}
topologyKey: kubernetes.io/hostname
weight: 1
{{- end -}}
{{/*
Return a hard podAffinity/podAntiAffinity definition
{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}}
*/}}
{{- define "common.affinities.pods.hard" -}}
{{- $component := default "" .component -}}
{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }}
{{- if not (empty $component) }}
{{ printf "app.kubernetes.io/component: %s" $component }}
{{- end }}
{{- range $key, $value := $extraMatchLabels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
namespaces:
- {{ include "common.names.namespace" .context | quote }}
topologyKey: kubernetes.io/hostname
{{- end -}}
{{/*
Return a podAffinity/podAntiAffinity definition
{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
*/}}
{{- define "common.affinities.pods" -}}
{{- if eq .type "soft" }}
{{- include "common.affinities.pods.soft" . -}}
{{- else if eq .type "hard" }}
{{- include "common.affinities.pods.hard" . -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,154 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Return the target Kubernetes version
*/}}
{{- define "common.capabilities.kubeVersion" -}}
{{- if .Values.global }}
{{- if .Values.global.kubeVersion }}
{{- .Values.global.kubeVersion -}}
{{- else }}
{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
{{- end -}}
{{- else }}
{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for poddisruptionbudget.
*/}}
{{- define "common.capabilities.policy.apiVersion" -}}
{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "policy/v1beta1" -}}
{{- else -}}
{{- print "policy/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for networkpolicy.
*/}}
{{- define "common.capabilities.networkPolicy.apiVersion" -}}
{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "extensions/v1beta1" -}}
{{- else -}}
{{- print "networking.k8s.io/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for cronjob.
*/}}
{{- define "common.capabilities.cronjob.apiVersion" -}}
{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "batch/v1beta1" -}}
{{- else -}}
{{- print "batch/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for deployment.
*/}}
{{- define "common.capabilities.deployment.apiVersion" -}}
{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "extensions/v1beta1" -}}
{{- else -}}
{{- print "apps/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for statefulset.
*/}}
{{- define "common.capabilities.statefulset.apiVersion" -}}
{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "apps/v1beta1" -}}
{{- else -}}
{{- print "apps/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for ingress.
*/}}
{{- define "common.capabilities.ingress.apiVersion" -}}
{{- if .Values.ingress -}}
{{- if .Values.ingress.apiVersion -}}
{{- .Values.ingress.apiVersion -}}
{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "extensions/v1beta1" -}}
{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "networking.k8s.io/v1beta1" -}}
{{- else -}}
{{- print "networking.k8s.io/v1" -}}
{{- end }}
{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "extensions/v1beta1" -}}
{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "networking.k8s.io/v1beta1" -}}
{{- else -}}
{{- print "networking.k8s.io/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for RBAC resources.
*/}}
{{- define "common.capabilities.rbac.apiVersion" -}}
{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "rbac.authorization.k8s.io/v1beta1" -}}
{{- else -}}
{{- print "rbac.authorization.k8s.io/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for CRDs.
*/}}
{{- define "common.capabilities.crd.apiVersion" -}}
{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "apiextensions.k8s.io/v1beta1" -}}
{{- else -}}
{{- print "apiextensions.k8s.io/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for APIService.
*/}}
{{- define "common.capabilities.apiService.apiVersion" -}}
{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "apiregistration.k8s.io/v1beta1" -}}
{{- else -}}
{{- print "apiregistration.k8s.io/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for Horizontal Pod Autoscaler.
*/}}
{{- define "common.capabilities.hpa.apiVersion" -}}
{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}}
{{- if .beta2 -}}
{{- print "autoscaling/v2beta2" -}}
{{- else -}}
{{- print "autoscaling/v2beta1" -}}
{{- end -}}
{{- else -}}
{{- print "autoscaling/v2" -}}
{{- end -}}
{{- end -}}
{{/*
Returns true if the used Helm version is 3.3+.
A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure.
This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error.
**To be removed when the catalog's minimun Helm version is 3.3**
*/}}
{{- define "common.capabilities.supportsHelmVersion" -}}
{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }}
{{- true -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,23 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Through error when upgrading using empty passwords values that must not be empty.
Usage:
{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}}
{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}}
{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }}
Required password params:
- validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error.
- context - Context - Required. Parent context.
*/}}
{{- define "common.errors.upgrade.passwords.empty" -}}
{{- $validationErrors := join "" .validationErrors -}}
{{- if and $validationErrors .context.Release.IsUpgrade -}}
{{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}}
{{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}}
{{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}}
{{- $errorString = print $errorString "\n%s" -}}
{{- printf $errorString $validationErrors | fail -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,76 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Return the proper image name
{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }}
*/}}
{{- define "common.images.image" -}}
{{- $registryName := .imageRoot.registry -}}
{{- $repositoryName := .imageRoot.repository -}}
{{- $separator := ":" -}}
{{- $termination := .imageRoot.tag | toString -}}
{{- if .global }}
{{- if .global.imageRegistry }}
{{- $registryName = .global.imageRegistry -}}
{{- end -}}
{{- end -}}
{{- if .imageRoot.digest }}
{{- $separator = "@" -}}
{{- $termination = .imageRoot.digest | toString -}}
{{- end -}}
{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
{{- end -}}
{{/*
Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead)
{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }}
*/}}
{{- define "common.images.pullSecrets" -}}
{{- $pullSecrets := list }}
{{- if .global }}
{{- range .global.imagePullSecrets -}}
{{- $pullSecrets = append $pullSecrets . -}}
{{- end -}}
{{- end -}}
{{- range .images -}}
{{- range .pullSecrets -}}
{{- $pullSecrets = append $pullSecrets . -}}
{{- end -}}
{{- end -}}
{{- if (not (empty $pullSecrets)) }}
imagePullSecrets:
{{- range $pullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}
{{- end -}}
{{/*
Return the proper Docker Image Registry Secret Names evaluating values as templates
{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }}
*/}}
{{- define "common.images.renderPullSecrets" -}}
{{- $pullSecrets := list }}
{{- $context := .context }}
{{- if $context.Values.global }}
{{- range $context.Values.global.imagePullSecrets -}}
{{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
{{- end -}}
{{- end -}}
{{- range .images -}}
{{- range .pullSecrets -}}
{{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}}
{{- end -}}
{{- end -}}
{{- if (not (empty $pullSecrets)) }}
imagePullSecrets:
{{- range $pullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,68 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Generate backend entry that is compatible with all Kubernetes API versions.
Usage:
{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }}
Params:
- serviceName - String. Name of an existing service backend
- servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer.
- context - Dict - Required. The context for the template evaluation.
*/}}
{{- define "common.ingress.backend" -}}
{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}}
{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}}
serviceName: {{ .serviceName }}
servicePort: {{ .servicePort }}
{{- else -}}
service:
name: {{ .serviceName }}
port:
{{- if typeIs "string" .servicePort }}
name: {{ .servicePort }}
{{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }}
number: {{ .servicePort | int }}
{{- end }}
{{- end -}}
{{- end -}}
{{/*
Print "true" if the API pathType field is supported
Usage:
{{ include "common.ingress.supportsPathType" . }}
*/}}
{{- define "common.ingress.supportsPathType" -}}
{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}}
{{- print "false" -}}
{{- else -}}
{{- print "true" -}}
{{- end -}}
{{- end -}}
{{/*
Returns true if the ingressClassname field is supported
Usage:
{{ include "common.ingress.supportsIngressClassname" . }}
*/}}
{{- define "common.ingress.supportsIngressClassname" -}}
{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "false" -}}
{{- else -}}
{{- print "true" -}}
{{- end -}}
{{- end -}}
{{/*
Return true if cert-manager required annotations for TLS signed
certificates are set in the Ingress annotations
Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations
Usage:
{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }}
*/}}
{{- define "common.ingress.certManagerRequest" -}}
{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }}
{{- true -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,18 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Kubernetes standard labels
*/}}
{{- define "common.labels.standard" -}}
app.kubernetes.io/name: {{ include "common.names.name" . }}
helm.sh/chart: {{ include "common.names.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector
*/}}
{{- define "common.labels.matchLabels" -}}
app.kubernetes.io/name: {{ include "common.names.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}

View File

@@ -0,0 +1,70 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "common.names.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "common.names.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "common.names.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create a default fully qualified dependency name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
Usage:
{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }}
*/}}
{{- define "common.names.dependency.fullname" -}}
{{- if .chartValues.fullnameOverride -}}
{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .chartName .chartValues.nameOverride -}}
{{- if contains $name .context.Release.Name -}}
{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts.
*/}}
{{- define "common.names.namespace" -}}
{{- if .Values.namespaceOverride -}}
{{- .Values.namespaceOverride -}}
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- end -}}
{{/*
Create a fully qualified app name adding the installation's namespace.
*/}}
{{- define "common.names.fullname.namespace" -}}
{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}}
{{- end -}}

View File

@@ -0,0 +1,140 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Generate secret name.
Usage:
{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }}
Params:
- existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
- defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment.
- context - Dict - Required. The context for the template evaluation.
*/}}
{{- define "common.secrets.name" -}}
{{- $name := (include "common.names.fullname" .context) -}}
{{- if .defaultNameSuffix -}}
{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- with .existingSecret -}}
{{- if not (typeIs "string" .) -}}
{{- with .name -}}
{{- $name = . -}}
{{- end -}}
{{- else -}}
{{- $name = . -}}
{{- end -}}
{{- end -}}
{{- printf "%s" $name -}}
{{- end -}}
{{/*
Generate secret key.
Usage:
{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }}
Params:
- existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user
to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility.
+info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret
- key - String - Required. Name of the key in the secret.
*/}}
{{- define "common.secrets.key" -}}
{{- $key := .key -}}
{{- if .existingSecret -}}
{{- if not (typeIs "string" .existingSecret) -}}
{{- if .existingSecret.keyMapping -}}
{{- $key = index .existingSecret.keyMapping $.key -}}
{{- end -}}
{{- end }}
{{- end -}}
{{- printf "%s" $key -}}
{{- end -}}
{{/*
Generate secret password or retrieve one if already created.
Usage:
{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }}
Params:
- secret - String - Required - Name of the 'Secret' resource where the password is stored.
- key - String - Required - Name of the key in the secret.
- providedValues - List<String> - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value.
- length - int - Optional - Length of the generated random password.
- strong - Boolean - Optional - Whether to add symbols to the generated random password.
- chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart.
- context - Context - Required - Parent context.
The order in which this function returns a secret password:
1. Already existing 'Secret' resource
(If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned)
2. Password provided via the values.yaml
(If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned)
3. Randomly generated secret password
(A new random secret password with the length specified in the 'length' parameter will be generated and returned)
*/}}
{{- define "common.secrets.passwords.manage" -}}
{{- $password := "" }}
{{- $subchart := "" }}
{{- $chartName := default "" .chartName }}
{{- $passwordLength := default 10 .length }}
{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }}
{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }}
{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }}
{{- if $secretData }}
{{- if hasKey $secretData .key }}
{{- $password = index $secretData .key | quote }}
{{- else }}
{{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}}
{{- end -}}
{{- else if $providedPasswordValue }}
{{- $password = $providedPasswordValue | toString | b64enc | quote }}
{{- else }}
{{- if .context.Values.enabled }}
{{- $subchart = $chartName }}
{{- end -}}
{{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}}
{{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}}
{{- $passwordValidationErrors := list $requiredPasswordError -}}
{{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}}
{{- if .strong }}
{{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }}
{{- $password = randAscii $passwordLength }}
{{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }}
{{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }}
{{- else }}
{{- $password = randAlphaNum $passwordLength | b64enc | quote }}
{{- end }}
{{- end -}}
{{- printf "%s" $password -}}
{{- end -}}
{{/*
Returns whether a previous generated secret already exists
Usage:
{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }}
Params:
- secret - String - Required - Name of the 'Secret' resource where the password is stored.
- context - Context - Required - Parent context.
*/}}
{{- define "common.secrets.exists" -}}
{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }}
{{- if $secret }}
{{- true -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,23 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Return the proper Storage Class
{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }}
*/}}
{{- define "common.storage.class" -}}
{{- $storageClass := .persistence.storageClass -}}
{{- if .global -}}
{{- if .global.storageClass -}}
{{- $storageClass = .global.storageClass -}}
{{- end -}}
{{- end -}}
{{- if $storageClass -}}
{{- if (eq "-" $storageClass) -}}
{{- printf "storageClassName: \"\"" -}}
{{- else }}
{{- printf "storageClassName: %s" $storageClass -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,13 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Renders a value that contains template.
Usage:
{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }}
*/}}
{{- define "common.tplvalues.render" -}}
{{- if typeIs "string" .value }}
{{- tpl .value .context }}
{{- else }}
{{- tpl (.value | toYaml) .context }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,62 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Print instructions to get a secret value.
Usage:
{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }}
*/}}
{{- define "common.utils.secret.getvalue" -}}
{{- $varname := include "common.utils.fieldToEnvVar" . -}}
export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d)
{{- end -}}
{{/*
Build env var name given a field
Usage:
{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }}
*/}}
{{- define "common.utils.fieldToEnvVar" -}}
{{- $fieldNameSplit := splitList "-" .field -}}
{{- $upperCaseFieldNameSplit := list -}}
{{- range $fieldNameSplit -}}
{{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}}
{{- end -}}
{{ join "_" $upperCaseFieldNameSplit }}
{{- end -}}
{{/*
Gets a value from .Values given
Usage:
{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }}
*/}}
{{- define "common.utils.getValueFromKey" -}}
{{- $splitKey := splitList "." .key -}}
{{- $value := "" -}}
{{- $latestObj := $.context.Values -}}
{{- range $splitKey -}}
{{- if not $latestObj -}}
{{- printf "please review the entire path of '%s' exists in values" $.key | fail -}}
{{- end -}}
{{- $value = ( index $latestObj . ) -}}
{{- $latestObj = $value -}}
{{- end -}}
{{- printf "%v" (default "" $value) -}}
{{- end -}}
{{/*
Returns first .Values key with a defined value or first of the list if all non-defined
Usage:
{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }}
*/}}
{{- define "common.utils.getKeyFromList" -}}
{{- $key := first .keys -}}
{{- $reverseKeys := reverse .keys }}
{{- range $reverseKeys }}
{{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }}
{{- if $value -}}
{{- $key = . }}
{{- end -}}
{{- end -}}
{{- printf "%s" $key -}}
{{- end -}}

View File

@@ -0,0 +1,14 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Warning about using rolling tag.
Usage:
{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }}
*/}}
{{- define "common.warnings.rollingTag" -}}
{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }}
WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
+info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,72 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Validate Cassandra required passwords are not empty.
Usage:
{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
Params:
- secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret"
- subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
*/}}
{{- define "common.validations.values.cassandra.passwords" -}}
{{- $existingSecret := include "common.cassandra.values.existingSecret" . -}}
{{- $enabled := include "common.cassandra.values.enabled" . -}}
{{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}}
{{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}}
{{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
{{- $requiredPasswords := list -}}
{{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}}
{{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
{{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
{{- end -}}
{{- end -}}
{{/*
Auxiliary function to get the right value for existingSecret.
Usage:
{{ include "common.cassandra.values.existingSecret" (dict "context" $) }}
Params:
- subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
*/}}
{{- define "common.cassandra.values.existingSecret" -}}
{{- if .subchart -}}
{{- .context.Values.cassandra.dbUser.existingSecret | quote -}}
{{- else -}}
{{- .context.Values.dbUser.existingSecret | quote -}}
{{- end -}}
{{- end -}}
{{/*
Auxiliary function to get the right value for enabled cassandra.
Usage:
{{ include "common.cassandra.values.enabled" (dict "context" $) }}
*/}}
{{- define "common.cassandra.values.enabled" -}}
{{- if .subchart -}}
{{- printf "%v" .context.Values.cassandra.enabled -}}
{{- else -}}
{{- printf "%v" (not .context.Values.enabled) -}}
{{- end -}}
{{- end -}}
{{/*
Auxiliary function to get the right value for the key dbUser
Usage:
{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }}
Params:
- subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false
*/}}
{{- define "common.cassandra.values.key.dbUser" -}}
{{- if .subchart -}}
cassandra.dbUser
{{- else -}}
dbUser
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,103 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Validate MariaDB required passwords are not empty.
Usage:
{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }}
Params:
- secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret"
- subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
*/}}
{{- define "common.validations.values.mariadb.passwords" -}}
{{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}}
{{- $enabled := include "common.mariadb.values.enabled" . -}}
{{- $architecture := include "common.mariadb.values.architecture" . -}}
{{- $authPrefix := include "common.mariadb.values.key.auth" . -}}
{{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}}
{{- $valueKeyUsername := printf "%s.username" $authPrefix -}}
{{- $valueKeyPassword := printf "%s.password" $authPrefix -}}
{{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}}
{{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}}
{{- $requiredPasswords := list -}}
{{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}}
{{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}}
{{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }}
{{- if not (empty $valueUsername) -}}
{{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}}
{{- $requiredPasswords = append $requiredPasswords $requiredPassword -}}
{{- end -}}
{{- if (eq $architecture "replication") -}}
{{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}}
{{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}}
{{- end -}}
{{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}}
{{- end -}}
{{- end -}}
{{/*
Auxiliary function to get the right value for existingSecret.
Usage:
{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }}
Params:
- subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
*/}}
{{- define "common.mariadb.values.auth.existingSecret" -}}
{{- if .subchart -}}
{{- .context.Values.mariadb.auth.existingSecret | quote -}}
{{- else -}}
{{- .context.Values.auth.existingSecret | quote -}}
{{- end -}}
{{- end -}}
{{/*
Auxiliary function to get the right value for enabled mariadb.
Usage:
{{ include "common.mariadb.values.enabled" (dict "context" $) }}
*/}}
{{- define "common.mariadb.values.enabled" -}}
{{- if .subchart -}}
{{- printf "%v" .context.Values.mariadb.enabled -}}
{{- else -}}
{{- printf "%v" (not .context.Values.enabled) -}}
{{- end -}}
{{- end -}}
{{/*
Auxiliary function to get the right value for architecture
Usage:
{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }}
Params:
- subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
*/}}
{{- define "common.mariadb.values.architecture" -}}
{{- if .subchart -}}
{{- .context.Values.mariadb.architecture -}}
{{- else -}}
{{- .context.Values.architecture -}}
{{- end -}}
{{- end -}}
{{/*
Auxiliary function to get the right value for the key auth
Usage:
{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }}
Params:
- subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false
*/}}
{{- define "common.mariadb.values.key.auth" -}}
{{- if .subchart -}}
mariadb.auth
{{- else -}}
auth
{{- end -}}
{{- end -}}

Some files were not shown because too many files have changed in this diff Show More