mirror of
https://github.com/openappsec/openappsec.git
synced 2025-11-16 17:31:52 +03:00
Compare commits
12 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2da3dc4052 | ||
|
|
4084cb4f6d | ||
|
|
3eb17167cb | ||
|
|
b2781d63d4 | ||
|
|
7f712b21e4 | ||
|
|
331c24acb0 | ||
|
|
bc3359a1c1 | ||
|
|
cec0276df1 | ||
|
|
3061342b45 | ||
|
|
0869b8f24d | ||
|
|
1a4ab5f0d7 | ||
|
|
4a2d25ab65 |
@@ -1,7 +1,12 @@
|
||||
cmake_minimum_required (VERSION 2.8.4)
|
||||
project (ngen)
|
||||
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall -Wno-terminate -Dalpine")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall -Wno-terminate")
|
||||
|
||||
execute_process(COMMAND grep -c "Alpine Linux" /etc/os-release OUTPUT_VARIABLE IS_ALPINE)
|
||||
if(IS_ALPINE EQUAL "1")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Dalpine")
|
||||
endif()
|
||||
|
||||
find_package(Boost REQUIRED)
|
||||
find_package(ZLIB REQUIRED)
|
||||
|
||||
@@ -67,7 +67,7 @@ For Linux (NGINX or Kong) using the installer (list of supported/pre-compiled NG
|
||||
|
||||
```bash
|
||||
$ wget https://downloads.openappsec.io/open-appsec-install && chmod +x open-appsec-install
|
||||
$ ./open-appsec-install –auto
|
||||
$ ./open-appsec-install --auto
|
||||
```
|
||||
|
||||
For Linux, if you’ve built your own package use the following commands:
|
||||
|
||||
@@ -32,5 +32,6 @@ DEFINE_KDEBUG_FLAG(statelessValidation)
|
||||
DEFINE_KDEBUG_FLAG(kernelMetric)
|
||||
DEFINE_KDEBUG_FLAG(tproxy)
|
||||
DEFINE_KDEBUG_FLAG(tenantStats)
|
||||
DEFINE_KDEBUG_FLAG(uuidTranslation)
|
||||
|
||||
#endif // DEFINE_KDEBUG_FLAG
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
annotations:
|
||||
artifacthub.io/changes: |
|
||||
- "Update Ingress-Nginx version controller-v1.9.1"
|
||||
artifacthub.io/changes: '- "Update Ingress-Nginx version controller-v1.9.4"'
|
||||
artifacthub.io/prerelease: "false"
|
||||
apiVersion: v2
|
||||
appVersion: latest
|
||||
@@ -11,4 +10,4 @@ kubeVersion: '>=1.20.0-0'
|
||||
name: open-appsec-k8s-nginx-ingress
|
||||
sources:
|
||||
- https://github.com/kubernetes/ingress-nginx
|
||||
version: 4.8.1
|
||||
version: 4.8.3
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
[ingress-nginx](https://github.com/kubernetes/ingress-nginx) Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
To use, add `ingressClassName: nginx` spec field or the `kubernetes.io/ingress.class: nginx` annotation to your Ingress resources.
|
||||
|
||||
@@ -251,11 +251,11 @@ As of version `1.26.0` of this chart, by simply not providing any clusterIP valu
|
||||
| controller.admissionWebhooks.namespaceSelector | object | `{}` | |
|
||||
| controller.admissionWebhooks.objectSelector | object | `{}` | |
|
||||
| controller.admissionWebhooks.patch.enabled | bool | `true` | |
|
||||
| controller.admissionWebhooks.patch.image.digest | string | `"sha256:543c40fd093964bc9ab509d3e791f9989963021f1e9e4c9c7b6700b02bfb227b"` | |
|
||||
| controller.admissionWebhooks.patch.image.digest | string | `"sha256:a7943503b45d552785aa3b5e457f169a5661fb94d82b8a3373bcd9ebaf9aac80"` | |
|
||||
| controller.admissionWebhooks.patch.image.image | string | `"ingress-nginx/kube-webhook-certgen"` | |
|
||||
| controller.admissionWebhooks.patch.image.pullPolicy | string | `"IfNotPresent"` | |
|
||||
| controller.admissionWebhooks.patch.image.registry | string | `"registry.k8s.io"` | |
|
||||
| controller.admissionWebhooks.patch.image.tag | string | `"v20230407"` | |
|
||||
| controller.admissionWebhooks.patch.image.tag | string | `"v20231011-8b53cabe0"` | |
|
||||
| controller.admissionWebhooks.patch.labels | object | `{}` | Labels to be added to patch job resources |
|
||||
| controller.admissionWebhooks.patch.nodeSelector."kubernetes.io/os" | string | `"linux"` | |
|
||||
| controller.admissionWebhooks.patch.podAnnotations | object | `{}` | |
|
||||
@@ -314,13 +314,13 @@ As of version `1.26.0` of this chart, by simply not providing any clusterIP valu
|
||||
| controller.hostname | object | `{}` | Optionally customize the pod hostname. |
|
||||
| controller.image.allowPrivilegeEscalation | bool | `true` | |
|
||||
| controller.image.chroot | bool | `false` | |
|
||||
| controller.image.digest | string | `"sha256:605a737877de78969493a4b1213b21de4ee425d2926906857b98050f57a95b25"` | |
|
||||
| controller.image.digestChroot | string | `"sha256:2ac744ef08850ee86ad7162451a6879f47c1a41c6a757f6b6f913c52103b8836"` | |
|
||||
| controller.image.digest | string | `"sha256:5b161f051d017e55d358435f295f5e9a297e66158f136321d9b04520ec6c48a3"` | |
|
||||
| controller.image.digestChroot | string | `"sha256:5976b1067cfbca8a21d0ba53d71f83543a73316a61ea7f7e436d6cf84ddf9b26"` | |
|
||||
| controller.image.image | string | `"ingress-nginx/controller"` | |
|
||||
| controller.image.pullPolicy | string | `"IfNotPresent"` | |
|
||||
| controller.image.registry | string | `"registry.k8s.io"` | |
|
||||
| controller.image.runAsUser | int | `101` | |
|
||||
| controller.image.tag | string | `"v1.9.1"` | |
|
||||
| controller.image.tag | string | `"v1.9.4"` | |
|
||||
| controller.ingressClass | string | `"nginx"` | For backwards compatibility with ingress.class annotation, use ingressClass. Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation |
|
||||
| controller.ingressClassByName | bool | `false` | Process IngressClass per name (additionally as per spec.controller). |
|
||||
| controller.ingressClassResource.controllerValue | string | `"k8s.io/ingress-nginx"` | Controller-value of the controller that is processing this ingressClass |
|
||||
@@ -498,6 +498,7 @@ As of version `1.26.0` of this chart, by simply not providing any clusterIP valu
|
||||
| defaultBackend.updateStrategy | object | `{}` | The update strategy to apply to the Deployment or DaemonSet # |
|
||||
| dhParam | string | `""` | A base64-encoded Diffie-Hellman parameter. This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` # Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param |
|
||||
| imagePullSecrets | list | `[]` | Optional array of imagePullSecrets containing private registry credentials # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ |
|
||||
| namespaceOverride | string | `""` | Override the deployment namespace; defaults to .Release.Namespace |
|
||||
| podSecurityPolicy.enabled | bool | `false` | |
|
||||
| portNamePrefix | string | `""` | Prefix for TCP and UDP ports names in ingress controller service # Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration |
|
||||
| rbac.create | bool | `true` | |
|
||||
|
||||
@@ -0,0 +1,10 @@
|
||||
# Changelog
|
||||
|
||||
This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
|
||||
|
||||
### 4.8.2
|
||||
|
||||
* - "update nginx base, httpbun, e2e, helm webhook cert gen (#10506)"
|
||||
* - "Update Ingress-Nginx version controller-v1.9.3"
|
||||
|
||||
**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-4.8.1...helm-chart-4.8.2
|
||||
@@ -0,0 +1,8 @@
|
||||
# Changelog
|
||||
|
||||
This file documents all notable changes to [ingress-nginx](https://github.com/kubernetes/ingress-nginx) Helm Chart. The release numbering uses [semantic versioning](http://semver.org).
|
||||
|
||||
### 4.8.3
|
||||
* Update Ingress-Nginx version controller-v1.9.4
|
||||
|
||||
**Full Changelog**: https://github.com/kubernetes/ingress-nginx/compare/helm-chart-4.8.2...helm-chart-4.8.3
|
||||
@@ -30,6 +30,17 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
|
||||
*/}}
|
||||
{{- define "ingress-nginx.namespace" -}}
|
||||
{{- if .Values.namespaceOverride -}}
|
||||
{{- .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- .Release.Namespace -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{/*
|
||||
Container SecurityContext.
|
||||
|
||||
@@ -6,7 +6,7 @@ apiVersion: cert-manager.io/v1
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: {{ include "ingress-nginx.fullname" . }}-self-signed-issuer
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
spec:
|
||||
selfSigned: {}
|
||||
---
|
||||
@@ -15,7 +15,7 @@ apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ include "ingress-nginx.fullname" . }}-root-cert
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
spec:
|
||||
secretName: {{ include "ingress-nginx.fullname" . }}-root-cert
|
||||
duration: {{ .Values.controller.admissionWebhooks.certManager.rootCert.duration | default "43800h0m0s" | quote }}
|
||||
@@ -32,7 +32,7 @@ apiVersion: cert-manager.io/v1
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: {{ include "ingress-nginx.fullname" . }}-root-issuer
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
spec:
|
||||
ca:
|
||||
secretName: {{ include "ingress-nginx.fullname" . }}-root-cert
|
||||
@@ -43,7 +43,7 @@ apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ include "ingress-nginx.fullname" . }}-admission
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
spec:
|
||||
secretName: {{ include "ingress-nginx.fullname" . }}-admission
|
||||
duration: {{ .Values.controller.admissionWebhooks.certManager.admissionCert.duration | default "8760h0m0s" | quote }}
|
||||
@@ -55,8 +55,8 @@ spec:
|
||||
{{- end }}
|
||||
dnsNames:
|
||||
- {{ include "ingress-nginx.controller.fullname" . }}-admission
|
||||
- {{ include "ingress-nginx.controller.fullname" . }}-admission.{{ .Release.Namespace }}
|
||||
- {{ include "ingress-nginx.controller.fullname" . }}-admission.{{ .Release.Namespace }}.svc
|
||||
- {{ include "ingress-nginx.controller.fullname" . }}-admission.{{ include "ingress-nginx.namespace" . }}
|
||||
- {{ include "ingress-nginx.controller.fullname" . }}-admission.{{ include "ingress-nginx.namespace" . }}.svc
|
||||
subject:
|
||||
organizations:
|
||||
- ingress-nginx-admission
|
||||
|
||||
@@ -19,5 +19,5 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "ingress-nginx.fullname" . }}-admission
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
namespace: {{ (include "ingress-nginx.namespace" .) | quote }}
|
||||
{{- end }}
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ include "ingress-nginx.fullname" . }}-admission-create
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ include "ingress-nginx.fullname" . }}-admission-patch
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-install,post-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: {{ include "ingress-nginx.fullname" . }}-admission
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "ingress-nginx.fullname" . }}-admission
|
||||
namespace: {{ .Release.Namespace }}
|
||||
name: {{ include "ingress-nginx.fullname" . }}-admission
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "ingress-nginx.fullname" . }}-admission
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
@@ -20,5 +20,5 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "ingress-nginx.fullname" . }}-admission
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
namespace: {{ (include "ingress-nginx.namespace" .) | quote }}
|
||||
{{- end }}
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "ingress-nginx.fullname" . }}-admission
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
|
||||
@@ -38,7 +38,7 @@ webhooks:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
namespace: {{ (include "ingress-nginx.namespace" .) | quote }}
|
||||
name: {{ include "ingress-nginx.controller.fullname" . }}-admission
|
||||
path: /networking/v1/ingresses
|
||||
{{- if .Values.controller.admissionWebhooks.timeoutSeconds }}
|
||||
|
||||
@@ -18,7 +18,7 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.controller.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
{{- if .Values.controller.annotations }}
|
||||
annotations: {{ toYaml .Values.controller.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -15,5 +15,5 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "ingress-nginx.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
namespace: {{ (include "ingress-nginx.namespace" .) | quote }}
|
||||
{{- end }}
|
||||
|
||||
@@ -9,6 +9,6 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.fullname" . }}-custom-add-headers
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
data: {{ toYaml .Values.controller.addHeaders | nindent 2 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -9,6 +9,6 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.fullname" . }}-custom-proxy-headers
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
data: {{ toYaml .Values.controller.proxySetHeaders | nindent 2 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -12,6 +12,6 @@ metadata:
|
||||
annotations: {{ toYaml .Values.controller.tcp.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.fullname" . }}-tcp
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
data: {{ tpl (toYaml .Values.tcp) . | nindent 2 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -12,6 +12,6 @@ metadata:
|
||||
annotations: {{ toYaml .Values.controller.udp.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.fullname" . }}-udp
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
data: {{ tpl (toYaml .Values.udp) . | nindent 2 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -11,17 +11,17 @@ metadata:
|
||||
annotations: {{ toYaml .Values.controller.configAnnotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.controller.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
data:
|
||||
allow-snippet-annotations: "{{ .Values.controller.allowSnippetAnnotations }}"
|
||||
{{- if .Values.controller.addHeaders }}
|
||||
add-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-add-headers
|
||||
add-headers: {{ include "ingress-nginx.namespace" . }}/{{ include "ingress-nginx.fullname" . }}-custom-add-headers
|
||||
{{- end }}
|
||||
{{- if .Values.controller.proxySetHeaders }}
|
||||
proxy-set-headers: {{ .Release.Namespace }}/{{ include "ingress-nginx.fullname" . }}-custom-proxy-headers
|
||||
proxy-set-headers: {{ include "ingress-nginx.namespace" . }}/{{ include "ingress-nginx.fullname" . }}-custom-proxy-headers
|
||||
{{- end }}
|
||||
{{- if .Values.dhParam }}
|
||||
ssl-dh-param: {{ .Release.Namespace }}/{{ include "ingress-nginx.controller.fullname" . }}
|
||||
ssl-dh-param: {{ include "ingress-nginx.namespace" . }}/{{ include "ingress-nginx.controller.fullname" . }}
|
||||
{{- end }}
|
||||
{{- range $key, $value := .Values.controller.config }}
|
||||
{{- $key | nindent 2 }}: {{ $value | quote }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{{- if and (eq .Values.kind "Vanilla") (or (eq .Values.controller.kind "DaemonSet") (eq .Values.controller.kind "Both")) -}}
|
||||
{{- if and (eq .Values.kind "Vanilla") (eq .Values.controller.kind "DaemonSet") -}}
|
||||
{{- include "isControllerTagValid" . -}}
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
@@ -10,7 +10,7 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.controller.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
{{- if .Values.controller.annotations }}
|
||||
annotations: {{ toYaml .Values.controller.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{{- if and (eq .Values.kind "Vanilla") (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}}
|
||||
{{- if and (eq .Values.kind "Vanilla") (eq .Values.controller.kind "Deployment") -}}
|
||||
{{- include "isControllerTagValid" . -}}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
@@ -10,7 +10,7 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.controller.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
{{- if .Values.controller.annotations }}
|
||||
annotations: {{ toYaml .Values.controller.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{{- if and (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) .Values.controller.autoscaling.enabled (not .Values.controller.keda.enabled) -}}
|
||||
{{- if and (eq .Values.controller.kind "Deployment") .Values.controller.autoscaling.enabled (not .Values.controller.keda.enabled) -}}
|
||||
apiVersion: {{ ternary "autoscaling/v2" "autoscaling/v2beta2" (.Capabilities.APIVersions.Has "autoscaling/v2") }}
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
@@ -12,7 +12,7 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.controller.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{{- if and .Values.controller.keda.enabled (or (eq .Values.controller.kind "Deployment") (eq .Values.controller.kind "Both")) -}}
|
||||
{{- if and .Values.controller.keda.enabled (eq .Values.controller.kind "Deployment") -}}
|
||||
# https://keda.sh/docs/
|
||||
|
||||
apiVersion: {{ .Values.controller.keda.apiVersion }}
|
||||
|
||||
@@ -9,7 +9,7 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.controller.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
|
||||
@@ -9,7 +9,7 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.controller.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
{{- if .Values.controller.annotations }}
|
||||
annotations: {{ toYaml .Values.controller.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -9,7 +9,7 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
||||
@@ -9,7 +9,7 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -17,5 +17,5 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "ingress-nginx.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
namespace: {{ (include "ingress-nginx.namespace" .) | quote }}
|
||||
{{- end }}
|
||||
|
||||
@@ -9,7 +9,7 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.controller.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
data:
|
||||
dhparam.pem: {{ .Values.dhParam }}
|
||||
{{- end }}
|
||||
|
||||
@@ -13,7 +13,7 @@ metadata:
|
||||
{{- toYaml .Values.controller.service.labels | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.controller.fullname" . }}-internal
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
spec:
|
||||
type: "{{ .Values.controller.service.type }}"
|
||||
{{- if .Values.controller.service.internal.loadBalancerIP }}
|
||||
|
||||
@@ -12,7 +12,7 @@ metadata:
|
||||
{{- toYaml .Values.controller.metrics.service.labels | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.controller.fullname" . }}-metrics
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
spec:
|
||||
type: {{ .Values.controller.metrics.service.type }}
|
||||
{{- if .Values.controller.metrics.service.clusterIP }}
|
||||
|
||||
@@ -12,7 +12,7 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.controller.fullname" . }}-admission
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
spec:
|
||||
type: {{ .Values.controller.admissionWebhooks.service.type }}
|
||||
{{- if .Values.controller.admissionWebhooks.service.clusterIP }}
|
||||
|
||||
@@ -13,7 +13,7 @@ metadata:
|
||||
{{- toYaml .Values.controller.service.labels | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.controller.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
spec:
|
||||
type: {{ .Values.controller.service.type }}
|
||||
{{- if .Values.controller.service.clusterIP }}
|
||||
|
||||
@@ -9,7 +9,7 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "ingress-nginx.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
{{- if .Values.serviceAccount.annotations }}
|
||||
annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
{{- if .Values.controller.metrics.serviceMonitor.namespace }}
|
||||
namespace: {{ .Values.controller.metrics.serviceMonitor.namespace | quote }}
|
||||
{{- else }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "ingress-nginx.labels" . | nindent 4 }}
|
||||
@@ -35,7 +35,7 @@ spec:
|
||||
{{- else }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
- {{ include "ingress-nginx.namespace" . }}
|
||||
{{- end }}
|
||||
{{- if .Values.controller.metrics.serviceMonitor.targetLabels }}
|
||||
targetLabels:
|
||||
|
||||
@@ -9,7 +9,7 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.defaultBackend.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
|
||||
@@ -12,7 +12,7 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.defaultBackend.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
|
||||
@@ -9,7 +9,7 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.defaultBackend.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
|
||||
@@ -10,7 +10,7 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.defaultBackend.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
|
||||
@@ -9,7 +9,7 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.fullname" . }}-backend
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
rules:
|
||||
- apiGroups: [{{ template "podSecurityPolicy.apiGroup" . }}]
|
||||
resources: ['podsecuritypolicies']
|
||||
|
||||
@@ -9,7 +9,7 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.fullname" . }}-backend
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -17,5 +17,5 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace | quote }}
|
||||
namespace: {{ (include "ingress-nginx.namespace" .) | quote }}
|
||||
{{- end }}
|
||||
|
||||
@@ -12,7 +12,7 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "ingress-nginx.defaultBackend.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
spec:
|
||||
type: {{ .Values.defaultBackend.service.type }}
|
||||
{{- if .Values.defaultBackend.service.clusterIP }}
|
||||
|
||||
@@ -9,6 +9,6 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "ingress-nginx.defaultBackend.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "ingress-nginx.namespace" . }}
|
||||
automountServiceAccountToken: {{ .Values.defaultBackend.serviceAccount.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
|
||||
@@ -7,6 +7,9 @@
|
||||
# nameOverride:
|
||||
# fullnameOverride:
|
||||
|
||||
# -- Override the deployment namespace; defaults to .Release.Namespace
|
||||
namespaceOverride: ""
|
||||
|
||||
## Labels to apply to all resources
|
||||
##
|
||||
commonLabels: {}
|
||||
@@ -24,9 +27,9 @@ controller:
|
||||
## for backwards compatibility consider setting the full image url via the repository value below
|
||||
## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail
|
||||
## repository:
|
||||
tag: "v1.9.1"
|
||||
digest: sha256:605a737877de78969493a4b1213b21de4ee425d2926906857b98050f57a95b25
|
||||
digestChroot: sha256:2ac744ef08850ee86ad7162451a6879f47c1a41c6a757f6b6f913c52103b8836
|
||||
tag: "v1.9.4"
|
||||
digest: sha256:5b161f051d017e55d358435f295f5e9a297e66158f136321d9b04520ec6c48a3
|
||||
digestChroot: sha256:5976b1067cfbca8a21d0ba53d71f83543a73316a61ea7f7e436d6cf84ddf9b26
|
||||
pullPolicy: IfNotPresent
|
||||
# www-data -> uid 101
|
||||
runAsUser: 101
|
||||
@@ -640,8 +643,8 @@ controller:
|
||||
## for backwards compatibility consider setting the full image url via the repository value below
|
||||
## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail
|
||||
## repository:
|
||||
tag: v20230407
|
||||
digest: sha256:543c40fd093964bc9ab509d3e791f9989963021f1e9e4c9c7b6700b02bfb227b
|
||||
tag: v20231011-8b53cabe0
|
||||
digest: sha256:a7943503b45d552785aa3b5e457f169a5661fb94d82b8a3373bcd9ebaf9aac80
|
||||
pullPolicy: IfNotPresent
|
||||
# -- Provide a priority class name to the webhook patching job
|
||||
##
|
||||
@@ -699,7 +702,7 @@ controller:
|
||||
## jobLabel: "app.kubernetes.io/name"
|
||||
namespace: ""
|
||||
namespaceSelector: {}
|
||||
## Default: scrape .Release.Namespace only
|
||||
## Default: scrape .Release.Namespace or namespaceOverride only
|
||||
## To scrape all, use the following:
|
||||
## namespaceSelector:
|
||||
## any: true
|
||||
|
||||
@@ -4,10 +4,59 @@
|
||||
|
||||
Nothing yet.
|
||||
|
||||
## 2.32.0
|
||||
|
||||
### Improvements
|
||||
|
||||
* Add new `deployment.hostname` value to make identifying instances in
|
||||
controlplane/dataplane configurations easier.
|
||||
[#943](https://github.com/Kong/charts/pull/943)
|
||||
|
||||
## 2.31.0
|
||||
|
||||
### Improvements
|
||||
|
||||
* Added controller's RBAC rules for `KongUpstreamPolicy` CRD.
|
||||
[#917](https://github.com/Kong/charts/pull/917)
|
||||
* Added services resource to admission webhook config for KIC >= 3.0.0.
|
||||
[#919](https://github.com/Kong/charts/pull/919)
|
||||
* Update default ingress controller version to v3.0
|
||||
[#929](https://github.com/Kong/charts/pull/929)
|
||||
[#930](https://github.com/Kong/charts/pull/930)
|
||||
|
||||
### Fixed
|
||||
|
||||
* The target port for cmetrics should only be applied if the ingress controller is enabled.
|
||||
[#926](https://github.com/Kong/charts/pull/926)
|
||||
* Fix RBAC for Gateway API v1.
|
||||
[#928](https://github.com/Kong/charts/pull/928)
|
||||
* Enable Admission webhook for Gateway API v1 resources.
|
||||
[#928](https://github.com/Kong/charts/pull/928)
|
||||
|
||||
## 2.30.0
|
||||
|
||||
### Improvements
|
||||
|
||||
* Prevent installing PodDisruptionBudget for `replicaCount: 1` or `autoscaling.minReplicas: 1`.
|
||||
[#896](https://github.com/Kong/charts/pull/896)
|
||||
* The admission webhook now will be triggered on Secrets creation for KIC 2.12.1+.
|
||||
[#907](https://github.com/Kong/charts/pull/907)
|
||||
* Container security context defaults now comply with the restricted pod
|
||||
security standard. This includes an enforced run as user ID set to 1000. UID
|
||||
1000 is used for official Kong images other than Alpine images (which use UID
|
||||
100) and for KIC images 3.0.0+ (older images use UID 65532). Images that do
|
||||
not use UID 1000 can still run with this user, as static image files are
|
||||
world-accessible and runtime-created files are created in temporary
|
||||
directories created for the run as user.
|
||||
[#911](https://github.com/Kong/charts/pull/911)
|
||||
* Allow using templates (via `tpl`) when specifying `proxy.nameOverride`.
|
||||
[#914](https://github.com/Kong/charts/pull/914)
|
||||
|
||||
## 2.29.0
|
||||
|
||||
### Improvements
|
||||
* Make it possible to set the admission webhook's `timeoutSeconds`.
|
||||
[#894](https://github.com/Kong/charts/pull/894)
|
||||
|
||||
## 2.28.1
|
||||
|
||||
@@ -16,6 +65,7 @@ Nothing yet.
|
||||
* The admission webhook now includes Gateway API resources and Ingress
|
||||
resources for controller versions 2.12+. This version introduces new
|
||||
validations for Kong's regex path implementation.
|
||||
[#892](https://github.com/Kong/charts/pull/892)
|
||||
|
||||
## 2.28.0
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
apiVersion: v2
|
||||
appVersion: 1.1.0
|
||||
appVersion: 1.1.1
|
||||
dependencies:
|
||||
- condition: postgresql.enabled
|
||||
name: postgresql
|
||||
@@ -9,11 +9,9 @@ description: The Cloud-Native Ingress and API-management
|
||||
home: https://konghq.com/
|
||||
icon: https://s3.amazonaws.com/downloads.kong/universe/assets/icon-kong-inc-large.png
|
||||
maintainers:
|
||||
- email: harry@konghq.com
|
||||
name: hbagdi
|
||||
- email: traines@konghq.com
|
||||
name: rainest
|
||||
- email: team-k8s@konghq.com
|
||||
name: team-k8s-bot
|
||||
name: open-appsec-kong
|
||||
sources:
|
||||
- https://github.com/Kong/charts/tree/main/charts/kong
|
||||
version: 2.29.0
|
||||
version: 2.32.0
|
||||
|
||||
@@ -11,10 +11,10 @@ This chart bootstraps all the components needed to run Kong on a
|
||||
## TL;DR;
|
||||
|
||||
```bash
|
||||
$ helm repo add kong https://charts.konghq.com
|
||||
$ helm repo update
|
||||
helm repo add kong https://charts.konghq.com
|
||||
helm repo update
|
||||
|
||||
$ helm install kong/kong --generate-name
|
||||
helm install kong/kong --generate-name
|
||||
```
|
||||
|
||||
## Table of contents
|
||||
@@ -91,10 +91,10 @@ $ helm install kong/kong --generate-name
|
||||
To install Kong:
|
||||
|
||||
```bash
|
||||
$ helm repo add kong https://charts.konghq.com
|
||||
$ helm repo update
|
||||
helm repo add kong https://charts.konghq.com
|
||||
helm repo update
|
||||
|
||||
$ helm install kong/kong --generate-name
|
||||
helm install kong/kong --generate-name
|
||||
```
|
||||
|
||||
## Uninstall
|
||||
@@ -102,7 +102,7 @@ $ helm install kong/kong --generate-name
|
||||
To uninstall/delete a Helm release `my-release`:
|
||||
|
||||
```bash
|
||||
$ helm delete my-release
|
||||
helm delete my-release
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the
|
||||
@@ -451,6 +451,11 @@ documentation on Service
|
||||
DNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/)
|
||||
for more detail.
|
||||
|
||||
If you use multiple Helm releases to manage different data plane configurations
|
||||
attached to the same control plane, setting the `deployment.hostname` field
|
||||
will help you keep track of which is which in the `/clustering/data-plane`
|
||||
endpoint.
|
||||
|
||||
### Cert Manager Integration
|
||||
|
||||
By default, Kong will create self-signed certificates on start for its TLS
|
||||
@@ -508,9 +513,9 @@ event you need to recover from unintended CRD deletion.
|
||||
|
||||
### InitContainers
|
||||
|
||||
The chart is able to deploy initcontainers along with Kong. This can be very
|
||||
The chart is able to deploy initContainers along with Kong. This can be very
|
||||
useful when there's a requirement for custom initialization. The
|
||||
`deployment.initcontainers` field in values.yaml takes an array of objects that
|
||||
`deployment.initContainers` field in values.yaml takes an array of objects that
|
||||
get appended as-is to the existing `spec.template.initContainers` array in the
|
||||
kong deployment resource.
|
||||
|
||||
@@ -581,7 +586,11 @@ namespaces. Limiting access requires several changes to configuration:
|
||||
Setting `deployment.daemonset: true` deploys Kong using a [DaemonSet
|
||||
controller](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/)
|
||||
instead of a Deployment controller. This runs a Kong Pod on every kubelet in
|
||||
the Kubernetes cluster.
|
||||
the Kubernetes cluster. For such configuration it may be desirable to configure
|
||||
Pods to use the network of the host they run on instead of a dedicated network
|
||||
namespace. The benefit of this approach is that the Kong can bind ports directly
|
||||
to Kubernetes nodes' network interfaces, without the extra network translation
|
||||
imposed by NodePort Services. It can be achieved by setting `deployment.hostNetwork: true`.
|
||||
|
||||
### Using dnsPolicy and dnsConfig
|
||||
|
||||
@@ -725,7 +734,7 @@ section of `values.yaml` file:
|
||||
|--------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------|
|
||||
| enabled | Deploy the ingress controller, rbac and crd | true |
|
||||
| image.repository | Docker image with the ingress controller | kong/kubernetes-ingress-controller |
|
||||
| image.tag | Version of the ingress controller | `2.12` |
|
||||
| image.tag | Version of the ingress controller | `3.0` |
|
||||
| image.effectiveSemver | Version of the ingress controller used for version-specific features when image.tag is not a valid semantic version | |
|
||||
| readinessProbe | Kong ingress controllers readiness probe | |
|
||||
| livenessProbe | Kong ingress controllers liveness probe | |
|
||||
@@ -791,6 +800,12 @@ Kong Ingress Controller v2.9 has introduced gateway discovery which allows
|
||||
the controller to discover Gateway instances that it should configure using
|
||||
an Admin API Kubernetes service.
|
||||
|
||||
Using this feature requires a split release installation of Gateways and Ingress Controller.
|
||||
For exemplar `values.yaml` files which use this feature please see: [examples README.md](./example-values/README.md).
|
||||
or use the [`ingress` chart](../ingress/README.md) which can handle this for you.
|
||||
|
||||
##### Configuration
|
||||
|
||||
You'll be able to configure this feature through configuration section under
|
||||
`ingressController.gatewayDiscovery`:
|
||||
|
||||
@@ -813,12 +828,17 @@ You'll be able to configure this feature through configuration section under
|
||||
the chart will generate values for `name` and `namespace` based on the current release name and
|
||||
namespace. This is useful when consuming the `kong` chart as a subchart.
|
||||
|
||||
Using this feature requires a split release installation of Gateways and Ingress Controller.
|
||||
For exemplar `values.yaml` files which use this feature please see: [examples README.md](./example-values/README.md).
|
||||
Additionally, you can control the addresses that are generated for your Gateways
|
||||
via the `--gateway-discovery-dns-strategy` CLI flag that can be set on the Ingress Controller
|
||||
(or an equivalent environment variable: `CONTROLLER_GATEWAY_DISCOVERY_DNS_STRATEGY`).
|
||||
It accepts 3 values which change the way that Gateway addresses are generated:
|
||||
- `service` - for service scoped pod DNS names: `pod-ip-address.service-name.my-namespace.svc.cluster-domain.example`
|
||||
- `pod` - for namespace scope pod DNS names: `pod-ip-address.my-namespace.pod.cluster-domain.example`
|
||||
- `ip` (default, retains behavior introduced in v2.9) - for regular IP addresses
|
||||
|
||||
When using `gatewayDiscovery`, you should consider configuring the Admin service to use mTLS client verification to make
|
||||
this interface secure. Without that, anyone who can access the Admin API from inside the cluster can configure the Gateway
|
||||
instances.
|
||||
this interface secure.
|
||||
Without that, anyone who can access the Admin API from inside the cluster can configure the Gateway instances.
|
||||
|
||||
On the controller release side, that can be achieved by setting `ingressController.adminApi.tls.client.enabled` to `true`.
|
||||
By default, Helm will generate a certificate Secret named `<release name>-admin-api-keypair` and
|
||||
@@ -838,6 +858,7 @@ On the Gateway release side, set either `admin.tls.client.secretName` to the nam
|
||||
| deployment.minReadySeconds | Minimum number of seconds for which newly created pods should be ready without any of its container crashing, for it to be considered available. | |
|
||||
| deployment.initContainers | Create initContainers. Please go to Kubernetes doc for the spec of the initContainers | |
|
||||
| deployment.daemonset | Use a DaemonSet instead of a Deployment | `false` |
|
||||
| deployment.hostname | Set the Deployment's `.spec.template.hostname`. Kong reports this as its hostname. | |
|
||||
| deployment.hostNetwork | Enable hostNetwork, which binds to the ports to the host | `false` |
|
||||
| deployment.userDefinedVolumes | Create volumes. Please go to Kubernetes doc for the spec of the volumes | |
|
||||
| deployment.userDefinedVolumeMounts | Create volumeMounts. Please go to Kubernetes doc for the spec of the volumeMounts | |
|
||||
@@ -878,7 +899,7 @@ On the Gateway release side, set either `admin.tls.client.secretName` to the nam
|
||||
| priorityClassName | Set pod scheduling priority class for Kong pods | `""` |
|
||||
| secretVolumes | Mount given secrets as a volume in Kong container to override default certs and keys. | `[]` |
|
||||
| securityContext | Set the securityContext for Kong Pods | `{}` |
|
||||
| containerSecurityContext | Set the securityContext for Containers | `{"readOnlyRootFilesystem": true}` |
|
||||
| containerSecurityContext | Set the securityContext for Containers | See values.yaml |
|
||||
| serviceMonitor.enabled | Create ServiceMonitor for Prometheus Operator | `false` |
|
||||
| serviceMonitor.interval | Scraping interval | `30s` |
|
||||
| serviceMonitor.namespace | Where to create ServiceMonitor | |
|
||||
@@ -1013,7 +1034,7 @@ If you have paid for a license, but you do not have a copy of yours, please
|
||||
contact Kong Support. Once you have it, you will need to store it in a Secret:
|
||||
|
||||
```bash
|
||||
$ kubectl create secret generic kong-enterprise-license --from-file=license=./license.json
|
||||
kubectl create secret generic kong-enterprise-license --from-file=license=./license.json
|
||||
```
|
||||
|
||||
Set the secret name in `values.yaml`, in the `.enterprise.license_secret` key.
|
||||
@@ -1031,7 +1052,7 @@ from \<your username\> \> Edit Profile \> API Key. Use this to create registry
|
||||
secrets:
|
||||
|
||||
```bash
|
||||
$ kubectl create secret docker-registry kong-enterprise-edition-docker \
|
||||
kubectl create secret docker-registry kong-enterprise-edition-docker \
|
||||
--docker-server=hub.docker.io \
|
||||
--docker-username=<username-provided-to-you> \
|
||||
--docker-password=<password-provided-to-you>
|
||||
@@ -1107,14 +1128,30 @@ whereas this is optional for the Developer Portal on versions 0.36+. Providing
|
||||
Portal session configuration in values.yaml provides the default session
|
||||
configuration, which can be overridden on a per-workspace basis.
|
||||
|
||||
```bash
|
||||
cat admin_gui_session_conf
|
||||
```
|
||||
$ cat admin_gui_session_conf
|
||||
|
||||
```json
|
||||
{"cookie_name":"admin_session","cookie_samesite":"off","secret":"admin-secret-CHANGEME","cookie_secure":true,"storage":"kong"}
|
||||
$ cat portal_session_conf
|
||||
```
|
||||
|
||||
```bash
|
||||
cat portal_session_conf
|
||||
```
|
||||
|
||||
```json
|
||||
{"cookie_name":"portal_session","cookie_samesite":"off","secret":"portal-secret-CHANGEME","cookie_secure":true,"storage":"kong"}
|
||||
$ kubectl create secret generic kong-session-config --from-file=admin_gui_session_conf --from-file=portal_session_conf
|
||||
```
|
||||
|
||||
```bash
|
||||
kubectl create secret generic kong-session-config --from-file=admin_gui_session_conf --from-file=portal_session_conf
|
||||
```
|
||||
|
||||
```bash
|
||||
secret/kong-session-config created
|
||||
```
|
||||
|
||||
The exact plugin settings may vary in your environment. The `secret` should
|
||||
always be changed for both configurations.
|
||||
|
||||
@@ -1175,7 +1212,7 @@ between the initial install and upgrades. Both operations are a "sync" in Argo
|
||||
terms. This affects when migration Jobs execute in database-backed Kong
|
||||
installs.
|
||||
|
||||
The chart sets the `Sync` and `BeforeHookCreation` deletion
|
||||
The chart sets the `Sync` and `BeforeHookCreation` deletion
|
||||
[hook policies](https://argo-cd.readthedocs.io/en/stable/user-guide/resource_hooks/)
|
||||
on the `init-migrations` and `pre-upgrade-migrations` Jobs.
|
||||
|
||||
|
||||
@@ -193,7 +193,7 @@ database](https://www.postgresql.org/docs/current/backup-dump.html) and
|
||||
creating a separate release if you wish to continue using 8.6.8:
|
||||
|
||||
```
|
||||
$ helm install my-release -f values.yaml --version 8.6.8 bitnami/postgresql
|
||||
helm install my-release -f values.yaml --version 8.6.8 bitnami/postgresql
|
||||
```
|
||||
|
||||
Afterwords, you will upgrade your Kong chart release with
|
||||
@@ -233,26 +233,28 @@ upgrade in multiple steps:
|
||||
First, pin the controller version and upgrade to chart 2.4.0:
|
||||
|
||||
```console
|
||||
$ helm upgrade --wait \
|
||||
helm upgrade --wait \
|
||||
--set ingressController.image.tag=<CURRENT_CONTROLLER_VERSION> \
|
||||
--version 2.4.0 \
|
||||
--namespace <YOUR_RELEASE_NAMESPACE> \
|
||||
<YOUR_RELEASE_NAME> kong/kong
|
||||
```
|
||||
|
||||
Second, temporarily disable the ingress controller:
|
||||
|
||||
```console
|
||||
$ helm upgrade --wait \
|
||||
helm upgrade --wait \
|
||||
--set ingressController.enabled=false \
|
||||
--set deployment.serviceaccount.create=true \
|
||||
--version 2.4.0 \
|
||||
--namespace <YOUR_RELEASE_NAMESPACE> \
|
||||
<YOUR_RELEASE_NAME> kong/kong
|
||||
```
|
||||
|
||||
Finally, re-enable the ingress controller at the new version:
|
||||
|
||||
```console
|
||||
$ helm upgrade --wait \
|
||||
helm upgrade --wait \
|
||||
--set ingressController.enabled=true \
|
||||
--set ingressController.image.tag=<NEW_CONTROLLER_VERSION> \
|
||||
--version 2.4.0 \
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# use single image strings instead of repository/tag
|
||||
|
||||
image:
|
||||
unifiedRepoTag: kong:3.4
|
||||
unifiedRepoTag: kong:3.4.1
|
||||
|
||||
env:
|
||||
anonymous_reports: "off"
|
||||
@@ -10,4 +10,4 @@ ingressController:
|
||||
env:
|
||||
anonymous_reports: "false"
|
||||
image:
|
||||
unifiedRepoTag: kong/kubernetes-ingress-controller:2.12
|
||||
unifiedRepoTag: kong/kubernetes-ingress-controller:3.0
|
||||
|
||||
@@ -45,9 +45,6 @@ proxy:
|
||||
parameters:
|
||||
- ssl
|
||||
|
||||
# - PDB is enabled
|
||||
podDisruptionBudget:
|
||||
enabled: true
|
||||
# update strategy
|
||||
updateStrategy:
|
||||
type: "RollingUpdate"
|
||||
|
||||
@@ -37,9 +37,6 @@ proxy:
|
||||
annotations: {}
|
||||
path: /
|
||||
|
||||
# - PDB is enabled
|
||||
podDisruptionBudget:
|
||||
enabled: true
|
||||
# update strategy
|
||||
updateStrategy:
|
||||
type: "RollingUpdate"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# generated using: kubectl kustomize 'github.com/kong/kubernetes-ingress-controller/config/crd?ref=v2.12.0'
|
||||
# generated using: kubectl kustomize 'github.com/kong/kubernetes-ingress-controller/config/crd?ref=v3.0.0'
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
@@ -773,7 +773,9 @@ spec:
|
||||
`Services` can be a target, OR `Endpoints` can be targets).
|
||||
properties:
|
||||
algorithm:
|
||||
description: Algorithm is the load balancing algorithm to use.
|
||||
description: 'Algorithm is the load balancing algorithm to use. Accepted
|
||||
values are: "round-robin", "consistent-hashing", "least-connections",
|
||||
"latency".'
|
||||
enum:
|
||||
- round-robin
|
||||
- consistent-hashing
|
||||
@@ -945,6 +947,13 @@ spec:
|
||||
type: integer
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: '''proxy'' field is no longer supported, use Service''s annotations
|
||||
instead'
|
||||
rule: '!has(self.proxy)'
|
||||
- message: '''route'' field is no longer supported, use Ingress'' annotations
|
||||
instead'
|
||||
rule: '!has(self.route)'
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
@@ -1198,6 +1207,387 @@ spec:
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.13.0
|
||||
labels:
|
||||
gateway.networking.k8s.io/policy: direct
|
||||
name: kongupstreampolicies.configuration.konghq.com
|
||||
spec:
|
||||
group: configuration.konghq.com
|
||||
names:
|
||||
categories:
|
||||
- kong-ingress-controller
|
||||
kind: KongUpstreamPolicy
|
||||
listKind: KongUpstreamPolicyList
|
||||
plural: kongupstreampolicies
|
||||
shortNames:
|
||||
- kup
|
||||
singular: kongupstreampolicy
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: "KongUpstreamPolicy allows configuring algorithm that should
|
||||
be used for load balancing traffic between Kong Upstream's Targets. It also
|
||||
allows configuring health checks for Kong Upstream's Targets. \n Its configuration
|
||||
is similar to Kong Upstream object (https://docs.konghq.com/gateway/latest/admin-api/#upstream-object),
|
||||
and it is applied to Kong Upstream objects created by the controller. \n
|
||||
It can be attached to Services. To attach it to a Service, it has to be
|
||||
annotated with `konghq.com/upstream-policy: <name>`, where `<name>` is the
|
||||
name of the KongUpstreamPolicy object in the same namespace as the Service.
|
||||
\n When attached to a Service, it will affect all Kong Upstreams created
|
||||
for the Service. \n When attached to a Service used in a Gateway API *Route
|
||||
rule with multiple BackendRefs, all of its Services MUST be configured with
|
||||
the same KongUpstreamPolicy. Otherwise, the controller will *ignore* the
|
||||
KongUpstreamPolicy. \n Note: KongUpstreamPolicy doesn't implement Gateway
|
||||
API's GEP-713 strictly. In particular, it doesn't use the TargetRef for
|
||||
attaching to Services and Gateway API *Routes - annotations are used instead.
|
||||
This is to allow reusing the same KongUpstreamPolicy for multiple Services
|
||||
and Gateway API *Routes."
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: Spec contains the configuration of the Kong upstream.
|
||||
properties:
|
||||
algorithm:
|
||||
description: 'Algorithm is the load balancing algorithm to use. Accepted
|
||||
values are: "round-robin", "consistent-hashing", "least-connections",
|
||||
"latency".'
|
||||
enum:
|
||||
- round-robin
|
||||
- consistent-hashing
|
||||
- least-connections
|
||||
- latency
|
||||
type: string
|
||||
hashOn:
|
||||
description: HashOn defines how to calculate hash for consistent-hashing
|
||||
load balancing algorithm. Algorithm must be set to "consistent-hashing"
|
||||
for this field to have effect.
|
||||
properties:
|
||||
cookie:
|
||||
description: Cookie is the name of the cookie to use as hash input.
|
||||
type: string
|
||||
cookiePath:
|
||||
description: CookiePath is cookie path to set in the response
|
||||
headers.
|
||||
type: string
|
||||
header:
|
||||
description: Header is the name of the header to use as hash input.
|
||||
type: string
|
||||
input:
|
||||
description: Input allows using one of the predefined inputs (ip,
|
||||
consumer, path). For other parametrized inputs, use one of the
|
||||
fields below.
|
||||
enum:
|
||||
- ip
|
||||
- consumer
|
||||
- path
|
||||
type: string
|
||||
queryArg:
|
||||
description: QueryArg is the name of the query argument to use
|
||||
as hash input.
|
||||
type: string
|
||||
uriCapture:
|
||||
description: URICapture is the name of the URI capture group to
|
||||
use as hash input.
|
||||
type: string
|
||||
type: object
|
||||
hashOnFallback:
|
||||
description: HashOnFallback defines how to calculate hash for consistent-hashing
|
||||
load balancing algorithm if the primary hash function fails. Algorithm
|
||||
must be set to "consistent-hashing" for this field to have effect.
|
||||
properties:
|
||||
cookie:
|
||||
description: Cookie is the name of the cookie to use as hash input.
|
||||
type: string
|
||||
cookiePath:
|
||||
description: CookiePath is cookie path to set in the response
|
||||
headers.
|
||||
type: string
|
||||
header:
|
||||
description: Header is the name of the header to use as hash input.
|
||||
type: string
|
||||
input:
|
||||
description: Input allows using one of the predefined inputs (ip,
|
||||
consumer, path). For other parametrized inputs, use one of the
|
||||
fields below.
|
||||
enum:
|
||||
- ip
|
||||
- consumer
|
||||
- path
|
||||
type: string
|
||||
queryArg:
|
||||
description: QueryArg is the name of the query argument to use
|
||||
as hash input.
|
||||
type: string
|
||||
uriCapture:
|
||||
description: URICapture is the name of the URI capture group to
|
||||
use as hash input.
|
||||
type: string
|
||||
type: object
|
||||
healthchecks:
|
||||
description: Healthchecks defines the health check configurations
|
||||
in Kong.
|
||||
properties:
|
||||
active:
|
||||
description: Active configures active health check probing.
|
||||
properties:
|
||||
concurrency:
|
||||
description: Concurrency is the number of targets to check
|
||||
concurrently.
|
||||
minimum: 1
|
||||
type: integer
|
||||
headers:
|
||||
additionalProperties:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
description: Headers is a list of HTTP headers to add to the
|
||||
probe request.
|
||||
type: object
|
||||
healthy:
|
||||
description: Healthy configures thresholds and HTTP status
|
||||
codes to mark targets healthy for an upstream.
|
||||
properties:
|
||||
httpStatuses:
|
||||
description: HTTPStatuses is a list of HTTP status codes
|
||||
that Kong considers a success.
|
||||
items:
|
||||
description: HTTPStatus is an HTTP status code.
|
||||
maximum: 599
|
||||
minimum: 100
|
||||
type: integer
|
||||
type: array
|
||||
interval:
|
||||
description: Interval is the interval between active health
|
||||
checks for an upstream in seconds when in a healthy
|
||||
state.
|
||||
minimum: 0
|
||||
type: integer
|
||||
successes:
|
||||
description: Successes is the number of successes to consider
|
||||
a target healthy.
|
||||
minimum: 0
|
||||
type: integer
|
||||
type: object
|
||||
httpPath:
|
||||
description: HTTPPath is the path to use in GET HTTP request
|
||||
to run as a probe.
|
||||
pattern: ^/.*$
|
||||
type: string
|
||||
httpsSni:
|
||||
description: HTTPSSNI is the SNI to use in GET HTTPS request
|
||||
to run as a probe.
|
||||
type: string
|
||||
httpsVerifyCertificate:
|
||||
description: HTTPSVerifyCertificate is a boolean value that
|
||||
indicates if the certificate should be verified.
|
||||
type: boolean
|
||||
timeout:
|
||||
description: Timeout is the probe timeout in seconds.
|
||||
minimum: 0
|
||||
type: integer
|
||||
type:
|
||||
description: Type determines whether to perform active health
|
||||
checks using HTTP or HTTPS, or just attempt a TCP connection.
|
||||
Accepted values are "http", "https", "tcp", "grpc", "grpcs".
|
||||
enum:
|
||||
- http
|
||||
- https
|
||||
- tcp
|
||||
- grpc
|
||||
- grpcs
|
||||
type: string
|
||||
unhealthy:
|
||||
description: Unhealthy configures thresholds and HTTP status
|
||||
codes to mark targets unhealthy for an upstream.
|
||||
properties:
|
||||
httpFailures:
|
||||
description: HTTPFailures is the number of failures to
|
||||
consider a target unhealthy.
|
||||
minimum: 0
|
||||
type: integer
|
||||
httpStatuses:
|
||||
description: HTTPStatuses is a list of HTTP status codes
|
||||
that Kong considers a failure.
|
||||
items:
|
||||
description: HTTPStatus is an HTTP status code.
|
||||
maximum: 599
|
||||
minimum: 100
|
||||
type: integer
|
||||
type: array
|
||||
interval:
|
||||
description: Interval is the interval between active health
|
||||
checks for an upstream in seconds when in an unhealthy
|
||||
state.
|
||||
minimum: 0
|
||||
type: integer
|
||||
tcpFailures:
|
||||
description: TCPFailures is the number of TCP failures
|
||||
in a row to consider a target unhealthy.
|
||||
minimum: 0
|
||||
type: integer
|
||||
timeouts:
|
||||
description: Timeouts is the number of timeouts in a row
|
||||
to consider a target unhealthy.
|
||||
minimum: 0
|
||||
type: integer
|
||||
type: object
|
||||
type: object
|
||||
passive:
|
||||
description: Passive configures passive health check probing.
|
||||
properties:
|
||||
healthy:
|
||||
description: Healthy configures thresholds and HTTP status
|
||||
codes to mark targets healthy for an upstream.
|
||||
properties:
|
||||
httpStatuses:
|
||||
description: HTTPStatuses is a list of HTTP status codes
|
||||
that Kong considers a success.
|
||||
items:
|
||||
description: HTTPStatus is an HTTP status code.
|
||||
maximum: 599
|
||||
minimum: 100
|
||||
type: integer
|
||||
type: array
|
||||
interval:
|
||||
description: Interval is the interval between active health
|
||||
checks for an upstream in seconds when in a healthy
|
||||
state.
|
||||
minimum: 0
|
||||
type: integer
|
||||
successes:
|
||||
description: Successes is the number of successes to consider
|
||||
a target healthy.
|
||||
minimum: 0
|
||||
type: integer
|
||||
type: object
|
||||
type:
|
||||
description: Type determines whether to perform passive health
|
||||
checks interpreting HTTP/HTTPS statuses, or just check for
|
||||
TCP connection success. Accepted values are "http", "https",
|
||||
"tcp", "grpc", "grpcs".
|
||||
enum:
|
||||
- http
|
||||
- https
|
||||
- tcp
|
||||
- grpc
|
||||
- grpcs
|
||||
type: string
|
||||
unhealthy:
|
||||
description: Unhealthy configures thresholds and HTTP status
|
||||
codes to mark targets unhealthy.
|
||||
properties:
|
||||
httpFailures:
|
||||
description: HTTPFailures is the number of failures to
|
||||
consider a target unhealthy.
|
||||
minimum: 0
|
||||
type: integer
|
||||
httpStatuses:
|
||||
description: HTTPStatuses is a list of HTTP status codes
|
||||
that Kong considers a failure.
|
||||
items:
|
||||
description: HTTPStatus is an HTTP status code.
|
||||
maximum: 599
|
||||
minimum: 100
|
||||
type: integer
|
||||
type: array
|
||||
interval:
|
||||
description: Interval is the interval between active health
|
||||
checks for an upstream in seconds when in an unhealthy
|
||||
state.
|
||||
minimum: 0
|
||||
type: integer
|
||||
tcpFailures:
|
||||
description: TCPFailures is the number of TCP failures
|
||||
in a row to consider a target unhealthy.
|
||||
minimum: 0
|
||||
type: integer
|
||||
timeouts:
|
||||
description: Timeouts is the number of timeouts in a row
|
||||
to consider a target unhealthy.
|
||||
minimum: 0
|
||||
type: integer
|
||||
type: object
|
||||
type: object
|
||||
threshold:
|
||||
description: Threshold is the minimum percentage of the upstream’s
|
||||
targets’ weight that must be available for the whole upstream
|
||||
to be considered healthy.
|
||||
type: integer
|
||||
type: object
|
||||
slots:
|
||||
description: Slots is the number of slots in the load balancer algorithm.
|
||||
If not set, the default value in Kong for the algorithm is used.
|
||||
maximum: 65536
|
||||
minimum: 10
|
||||
type: integer
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: Only one of spec.hashOn.(input|cookie|header|uriCapture|queryArg)
|
||||
can be set.
|
||||
rule: 'has(self.spec.hashOn) ? [has(self.spec.hashOn.input), has(self.spec.hashOn.cookie),
|
||||
has(self.spec.hashOn.header), has(self.spec.hashOn.uriCapture), has(self.spec.hashOn.queryArg)].filter(fieldSet,
|
||||
fieldSet == true).size() <= 1 : true'
|
||||
- message: When spec.hashOn.cookie is set, spec.hashOn.cookiePath is required.
|
||||
rule: 'has(self.spec.hashOn) && has(self.spec.hashOn.cookie) ? has(self.spec.hashOn.cookiePath)
|
||||
: true'
|
||||
- message: When spec.hashOn.cookiePath is set, spec.hashOn.cookie is required.
|
||||
rule: 'has(self.spec.hashOn) && has(self.spec.hashOn.cookiePath) ? has(self.spec.hashOn.cookie)
|
||||
: true'
|
||||
- message: spec.algorithm must be set to "consistent-hashing" when spec.hashOn
|
||||
is set.
|
||||
rule: 'has(self.spec.hashOn) ? has(self.spec.algorithm) && self.spec.algorithm
|
||||
== "consistent-hashing" : true'
|
||||
- message: Only one of spec.hashOnFallback.(input|header|uriCapture|queryArg)
|
||||
can be set.
|
||||
rule: 'has(self.spec.hashOnFallback) ? [has(self.spec.hashOnFallback.input),
|
||||
has(self.spec.hashOnFallback.header), has(self.spec.hashOnFallback.uriCapture),
|
||||
has(self.spec.hashOnFallback.queryArg)].filter(fieldSet, fieldSet == true).size()
|
||||
<= 1 : true'
|
||||
- message: spec.algorithm must be set to "consistent-hashing" when spec.hashOnFallback
|
||||
is set.
|
||||
rule: 'has(self.spec.hashOnFallback) ? has(self.spec.algorithm) && self.spec.algorithm
|
||||
== "consistent-hashing" : true'
|
||||
- message: spec.hashOnFallback.cookie must not be set.
|
||||
rule: 'has(self.spec.hashOnFallback) ? !has(self.spec.hashOnFallback.cookie)
|
||||
: true'
|
||||
- message: spec.hashOnFallback.cookiePath must not be set.
|
||||
rule: 'has(self.spec.hashOnFallback) ? !has(self.spec.hashOnFallback.cookiePath)
|
||||
: true'
|
||||
- message: spec.healthchecks.passive.healthy.interval must not be set.
|
||||
rule: 'has(self.spec.healthchecks) && has(self.spec.healthchecks.passive)
|
||||
&& has(self.spec.healthchecks.passive.healthy) ? !has(self.spec.healthchecks.passive.healthy.interval)
|
||||
: true'
|
||||
- message: spec.healthchecks.passive.unhealthy.interval must not be set.
|
||||
rule: 'has(self.spec.healthchecks) && has(self.spec.healthchecks.passive)
|
||||
&& has(self.spec.healthchecks.passive.unhealthy) ? !has(self.spec.healthchecks.passive.unhealthy.interval)
|
||||
: true'
|
||||
- message: spec.hashOnFallback must not be set when spec.hashOn.cookie is
|
||||
set.
|
||||
rule: 'has(self.spec.hashOn) && has(self.spec.hashOn.cookie) ? !has(self.spec.hashOnFallback)
|
||||
: true'
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.13.0
|
||||
|
||||
@@ -9,7 +9,6 @@ admin:
|
||||
konghq.com/https-redirect-status-code: "301"
|
||||
konghq.com/protocols: https
|
||||
konghq.com/strip-path: "true"
|
||||
kubernetes.io/ingress.class: default
|
||||
nginx.ingress.kubernetes.io/app-root: /
|
||||
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
|
||||
nginx.ingress.kubernetes.io/permanent-redirect-code: "301"
|
||||
@@ -176,8 +175,8 @@ manager:
|
||||
ingress:
|
||||
annotations:
|
||||
konghq.com/https-redirect-status-code: "301"
|
||||
kubernetes.io/ingress.class: default
|
||||
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
|
||||
ingressClassName: kong
|
||||
enabled: true
|
||||
hostname: kong.127-0-0-1.nip.io
|
||||
path: /
|
||||
@@ -209,7 +208,7 @@ portal:
|
||||
konghq.com/https-redirect-status-code: "301"
|
||||
konghq.com/protocols: https
|
||||
konghq.com/strip-path: "false"
|
||||
kubernetes.io/ingress.class: default
|
||||
ingressClassName: kong
|
||||
enabled: true
|
||||
hostname: developer.127-0-0-1.nip.io
|
||||
path: /
|
||||
@@ -232,8 +231,8 @@ portalapi:
|
||||
konghq.com/https-redirect-status-code: "301"
|
||||
konghq.com/protocols: https
|
||||
konghq.com/strip-path: "true"
|
||||
kubernetes.io/ingress.class: default
|
||||
nginx.ingress.kubernetes.io/app-root: /
|
||||
ingressClassName: kong
|
||||
enabled: true
|
||||
hostname: developer.127-0-0-1.nip.io
|
||||
path: /api
|
||||
|
||||
@@ -40,8 +40,7 @@ admin:
|
||||
enabled: true
|
||||
tls: CHANGEME-admin-tls-secret
|
||||
hostname: admin.kong.CHANGEME.example
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "kong"
|
||||
ingressClassName: kong
|
||||
path: /
|
||||
|
||||
proxy:
|
||||
@@ -148,8 +147,7 @@ portal:
|
||||
enabled: true
|
||||
tls: CHANGEME-portal-tls-secret
|
||||
hostname: portal.kong.CHANGEME.example
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "kong"
|
||||
ingressClassName: kong
|
||||
path: /
|
||||
|
||||
externalIPs: []
|
||||
@@ -177,8 +175,7 @@ portalapi:
|
||||
enabled: true
|
||||
tls: CHANGEME-portalapi-tls-secret
|
||||
hostname: portalapi.kong.CHANGEME.example
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "kong"
|
||||
ingressClassName: kong
|
||||
path: /
|
||||
|
||||
externalIPs: []
|
||||
|
||||
@@ -447,14 +447,28 @@ The name of the service used for the ingress controller's validation webhook
|
||||
{{ include "kong.fullname" . }}-validation-webhook
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{/*
|
||||
The name of the Service which will be used by the controller to update the Ingress status field.
|
||||
*/}}
|
||||
|
||||
{{- define "kong.controller-publish-service" -}}
|
||||
{{- $proxyOverride := "" -}}
|
||||
{{- if .Values.proxy.nameOverride -}}
|
||||
{{- $proxyOverride = ( tpl .Values.proxy.nameOverride . ) -}}
|
||||
{{- end -}}
|
||||
{{- (printf "%s/%s" ( include "kong.namespace" . ) ( default ( printf "%s-proxy" (include "kong.fullname" . )) $proxyOverride )) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "kong.ingressController.env" -}}
|
||||
{{/*
|
||||
====== AUTO-GENERATED ENVIRONMENT VARIABLES ======
|
||||
*/}}
|
||||
|
||||
|
||||
{{- $autoEnv := dict -}}
|
||||
{{- $_ := set $autoEnv "CONTROLLER_KONG_ADMIN_TLS_SKIP_VERIFY" true -}}
|
||||
{{- $_ := set $autoEnv "CONTROLLER_PUBLISH_SERVICE" (printf "%s/%s" ( include "kong.namespace" . ) ( .Values.proxy.nameOverride | default ( printf "%s-proxy" (include "kong.fullname" . )))) -}}
|
||||
{{- $_ := set $autoEnv "CONTROLLER_PUBLISH_SERVICE" ( include "kong.controller-publish-service" . ) -}}
|
||||
{{- $_ := set $autoEnv "CONTROLLER_INGRESS_CLASS" .Values.ingressController.ingressClass -}}
|
||||
{{- $_ := set $autoEnv "CONTROLLER_ELECTION_ID" (printf "kong-ingress-controller-leader-%s" .Values.ingressController.ingressClass) -}}
|
||||
|
||||
@@ -1253,6 +1267,24 @@ resource roles into their separate templates.
|
||||
- namespaces
|
||||
verbs:
|
||||
- list
|
||||
{{- if (semverCompare ">= 3.0.0" (include "kong.effectiveVersion" .Values.ingressController.image)) }}
|
||||
- apiGroups:
|
||||
- configuration.konghq.com
|
||||
resources:
|
||||
- kongupstreampolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- configuration.konghq.com
|
||||
resources:
|
||||
- kongupstreampolicies/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
{{- end }}
|
||||
{{- if (semverCompare ">= 2.11.0" (include "kong.effectiveVersion" .Values.ingressController.image)) }}
|
||||
- apiGroups:
|
||||
- configuration.konghq.com
|
||||
@@ -1429,7 +1461,7 @@ resource roles into their separate templates.
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
{{- if or (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1alpha2") (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1beta1") }}
|
||||
{{- if or (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1alpha2") (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1beta1") (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1")}}
|
||||
- apiGroups:
|
||||
- gateway.networking.k8s.io
|
||||
resources:
|
||||
@@ -1620,7 +1652,7 @@ Kubernetes Cluster-scoped resources it uses to build Kong configuration.
|
||||
- list
|
||||
- watch
|
||||
{{- end }}
|
||||
{{- if or (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1alpha2") (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1beta1") }}
|
||||
{{- if or (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1alpha2") (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1beta1") (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1")}}
|
||||
- apiGroups:
|
||||
- gateway.networking.k8s.io
|
||||
resources:
|
||||
|
||||
@@ -80,9 +80,15 @@ webhooks:
|
||||
apiVersions:
|
||||
- 'v1'
|
||||
operations:
|
||||
{{- if (semverCompare ">= 2.12.1" (include "kong.effectiveVersion" .Values.ingressController.image)) }}
|
||||
- CREATE
|
||||
{{- end }}
|
||||
- UPDATE
|
||||
resources:
|
||||
- secrets
|
||||
{{- if (semverCompare ">= 3.0.0" (include "kong.effectiveVersion" .Values.ingressController.image)) }}
|
||||
- services
|
||||
{{- end }}
|
||||
{{- if (semverCompare ">= 2.12.0" (include "kong.effectiveVersion" .Values.ingressController.image)) }}
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
@@ -98,6 +104,7 @@ webhooks:
|
||||
apiVersions:
|
||||
- 'v1alpha2'
|
||||
- 'v1beta1'
|
||||
- 'v1'
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
|
||||
@@ -70,6 +70,9 @@ spec:
|
||||
{{ include "kong.renderTpl" (dict "value" .Values.podLabels "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.deployment.hostname }}
|
||||
hostname: {{ .Values.deployment.hostname }}
|
||||
{{- end }}
|
||||
{{- if .Values.deployment.hostNetwork }}
|
||||
hostNetwork: true
|
||||
{{- end }}
|
||||
|
||||
@@ -63,6 +63,9 @@ spec:
|
||||
{{ include "kong.renderTpl" (dict "value" .Values.podLabels "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.deployment.hostname }}
|
||||
hostname: {{ .Values.deployment.hostname }}
|
||||
{{- end }}
|
||||
{{- if .Values.deployment.hostNetwork }}
|
||||
hostNetwork: true
|
||||
{{- end }}
|
||||
|
||||
@@ -1,4 +1,10 @@
|
||||
{{- if .Values.podDisruptionBudget.enabled }}
|
||||
{{- if and (not .Values.autoscaling.enabled) (le (int .Values.replicaCount) 1) }}
|
||||
{{- fail "Enabling PodDisruptionBudget with replicaCount: 1 and no autoscaling prevents pod restarts during upgrades" }}
|
||||
{{- end }}
|
||||
{{- if and .Values.autoscaling.enabled (le (int .Values.autoscaling.minReplicas) 1) }}
|
||||
{{- fail "Enabling PodDisruptionBudget with autoscaling.minReplicas: 1 prevents pod restarts during upgrades" }}
|
||||
{{- end }}
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
|
||||
@@ -24,7 +24,7 @@ spec:
|
||||
{{- if .Values.serviceMonitor.metricRelabelings }}
|
||||
metricRelabelings: {{ toYaml .Values.serviceMonitor.metricRelabelings | nindent 6 }}
|
||||
{{- end }}
|
||||
{{ if (semverCompare ">= 2.0.0" (include "kong.effectiveVersion" .Values.ingressController.image)) -}}
|
||||
{{- if and .Values.ingressController.enabled (semverCompare ">= 2.0.0" (include "kong.effectiveVersion" .Values.ingressController.image)) }}
|
||||
- targetPort: cmetrics
|
||||
scheme: http
|
||||
{{- if .Values.serviceMonitor.interval }}
|
||||
|
||||
@@ -32,9 +32,9 @@ metadata:
|
||||
name: "{{ .Release.Name }}-httpbin"
|
||||
annotations:
|
||||
httpbin.ingress.kubernetes.io/rewrite-target: /
|
||||
kubernetes.io/ingress.class: "kong"
|
||||
konghq.com/strip-path: "true"
|
||||
spec:
|
||||
ingressClassName: kong
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
@@ -46,14 +46,14 @@ spec:
|
||||
port:
|
||||
number: 80
|
||||
---
|
||||
apiVersion: gateway.networking.k8s.io/v1alpha2
|
||||
apiVersion: gateway.networking.k8s.io/v1beta1
|
||||
kind: GatewayClass
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}-kong-test"
|
||||
spec:
|
||||
controllerName: konghq.com/kic-gateway-controller
|
||||
---
|
||||
apiVersion: gateway.networking.k8s.io/v1alpha2
|
||||
apiVersion: gateway.networking.k8s.io/v1beta1
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}-kong-test"
|
||||
@@ -66,7 +66,7 @@ spec:
|
||||
protocol: HTTP
|
||||
port: 80
|
||||
---
|
||||
apiVersion: gateway.networking.k8s.io/v1alpha2
|
||||
apiVersion: gateway.networking.k8s.io/v1beta1
|
||||
kind: HTTPRoute
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}-httpbin"
|
||||
|
||||
@@ -60,6 +60,11 @@ deployment:
|
||||
# Use a DaemonSet controller instead of a Deployment controller
|
||||
daemonset: false
|
||||
hostNetwork: false
|
||||
# Set the Deployment's spec.template.hostname field.
|
||||
# This propagates to Kong API endpoints that report
|
||||
# the hostname, such as the admin API root and hybrid mode
|
||||
# /clustering/data-planes endpoint
|
||||
hostname: ""
|
||||
# kong_prefix empty dir size
|
||||
prefixDir:
|
||||
sizeLimit: 256Mi
|
||||
@@ -510,13 +515,13 @@ dblessConfig:
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# Kong Ingress Controller's primary purpose is to satisfy Ingress resources
|
||||
# created in k8s. It uses CRDs for more fine grained control over routing and
|
||||
# created in k8s. It uses CRDs for more fine grained control over routing and
|
||||
# for Kong specific configuration.
|
||||
ingressController:
|
||||
enabled: true
|
||||
image:
|
||||
repository: kong/kubernetes-ingress-controller
|
||||
tag: "2.12"
|
||||
tag: "3.0"
|
||||
# Optionally set a semantic version for version-gated features. This can normally
|
||||
# be left unset. You only need to set this if your tag is not a semver string,
|
||||
# such as when you are using a "next" tag. Set this to the effective semantic
|
||||
@@ -948,6 +953,14 @@ securityContext: {}
|
||||
# securityContext for containers.
|
||||
containerSecurityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
runAsUser: 1000
|
||||
runAsNonRoot: true
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
|
||||
## Optional DNS configuration for Kong pods
|
||||
# dnsPolicy: ClusterFirst
|
||||
@@ -968,7 +981,7 @@ serviceMonitor:
|
||||
# If you wish to gather metrics from a Kong instance with the proxy disabled (such as a hybrid control plane), see:
|
||||
# https://github.com/Kong/charts/blob/main/charts/kong/README.md#prometheus-operator-integration
|
||||
enabled: false
|
||||
# interval: 10s
|
||||
# interval: 30s
|
||||
# Specifies namespace, where ServiceMonitor should be installed
|
||||
# namespace: monitoring
|
||||
# labels:
|
||||
@@ -1234,7 +1247,7 @@ appsec:
|
||||
#registry:
|
||||
repository: ghcr.io/openappsec
|
||||
image: "agent"
|
||||
tag: "1.1.0"
|
||||
tag: "1.1.1"
|
||||
pullPolicy: Always
|
||||
|
||||
securityContext:
|
||||
@@ -1248,7 +1261,7 @@ appsec:
|
||||
kong:
|
||||
image:
|
||||
repository: "ghcr.io/openappsec/kong-attachment"
|
||||
tag: "1.1.0"
|
||||
tag: "1.1.1"
|
||||
configMapName: appsec-settings-configmap
|
||||
configMapContent:
|
||||
crowdsec:
|
||||
|
||||
@@ -49,6 +49,8 @@ nginxIntakerEvent::resetAllCounters()
|
||||
req_proccessing_timeout = 0;
|
||||
res_proccessing_timeout = 0;
|
||||
req_failed_to_reach_upstream = 0;
|
||||
req_overall_size = 0;
|
||||
res_overall_size = 0;
|
||||
cpu_event.setCPU(0);
|
||||
}
|
||||
|
||||
@@ -249,10 +251,22 @@ nginxIntakerEvent::addPluginMetricCounter(const ngx_http_cp_metric_data_t *recie
|
||||
cpu_event.setCPU(amount);
|
||||
break;
|
||||
}
|
||||
case ngx_http_plugin_metric_type_e::REQUEST_OVERALL_SIZE_COUNT: {
|
||||
req_overall_size += amount;
|
||||
static const uint64_t max_expected_res_size = 100ULL * 1024 * 1024 * 1024;
|
||||
if (amount > max_expected_res_size) {
|
||||
dbgWarning(D_METRICS_NGINX_ATTACHMENT) << "Requests sizes higher than expected: " << amount;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case ngx_http_plugin_metric_type_e::RESPONSE_OVERALL_SIZE_COUNT: {
|
||||
res_overall_size += amount;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
dbgWarning(D_METRICS_NGINX_ATTACHMENT)
|
||||
<< "Unsupported metric type. Type: " << static_cast<int>(metric_type);
|
||||
return;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -353,6 +367,10 @@ nginxIntakerEvent::getPluginMetricCounter(ngx_http_plugin_metric_type_e metric_t
|
||||
return req_failed_to_reach_upstream;
|
||||
case ngx_http_plugin_metric_type_e::CPU_USAGE:
|
||||
return static_cast<uint64_t>(cpu_event.getCPU());
|
||||
case ngx_http_plugin_metric_type_e::REQUEST_OVERALL_SIZE_COUNT:
|
||||
return req_overall_size;
|
||||
case ngx_http_plugin_metric_type_e::RESPONSE_OVERALL_SIZE_COUNT:
|
||||
return res_overall_size;
|
||||
default:
|
||||
dbgWarning(D_METRICS_NGINX_ATTACHMENT)
|
||||
<< "Unsupported metric type. Type: " << static_cast<int>(metric_type);
|
||||
@@ -498,5 +516,11 @@ nginxIntakerMetric::upon(const nginxIntakerEvent &event)
|
||||
req_failed_to_reach_upstream.report(
|
||||
event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::REQ_FAILED_TO_REACH_UPSTREAM)
|
||||
);
|
||||
req_overall_size.report(
|
||||
event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::REQUEST_OVERALL_SIZE_COUNT)
|
||||
);
|
||||
res_overall_size.report(
|
||||
event.getPluginMetricCounter(ngx_http_plugin_metric_type_e::RESPONSE_OVERALL_SIZE_COUNT)
|
||||
);
|
||||
event.notifyCPU();
|
||||
}
|
||||
|
||||
@@ -224,5 +224,6 @@ private:
|
||||
};
|
||||
|
||||
static const ParameterBehavior action_ignore(BehaviorKey::ACTION, BehaviorValue::IGNORE);
|
||||
static const ParameterBehavior action_accept(BehaviorKey::ACTION, BehaviorValue::ACCEPT);
|
||||
|
||||
#endif //__PARAMETERS_CONFIG_H__
|
||||
|
||||
@@ -34,7 +34,6 @@ public:
|
||||
virtual const std::string & getUpdateTime() const = 0;
|
||||
virtual const std::string & getLastManifestUpdate() const = 0;
|
||||
virtual const std::string & getPolicyVersion() const = 0;
|
||||
virtual const std::string & getWaapModelVersion() const = 0;
|
||||
virtual const std::string & getLastPolicyUpdate() const = 0;
|
||||
virtual const std::string & getLastSettingsUpdate() const = 0;
|
||||
virtual const std::string & getUpgradeMode() const = 0;
|
||||
|
||||
@@ -31,7 +31,7 @@ public:
|
||||
virtual const std::string & getPolicyVersions() const = 0;
|
||||
virtual const std::string & getPolicyVersion() const = 0;
|
||||
virtual const std::string & getUpdatePolicyVersion() const = 0;
|
||||
virtual void updateReconfStatus(int id, ReconfStatus status) = 0;
|
||||
virtual void updateReconfStatus(int id, const std::string &service_name, ReconfStatus status) = 0;
|
||||
virtual void startReconfStatus(
|
||||
int id,
|
||||
ReconfStatus status,
|
||||
|
||||
@@ -19,13 +19,19 @@
|
||||
#include "i_mainloop.h"
|
||||
#include "i_local_policy_mgmt_gen.h"
|
||||
#include "i_env_details.h"
|
||||
#include "i_shell_cmd.h"
|
||||
#include "i_orchestration_tools.h"
|
||||
|
||||
class LocalPolicyMgmtGenerator
|
||||
:
|
||||
public Component,
|
||||
Singleton::Provide<I_LocalPolicyMgmtGen>,
|
||||
Singleton::Consume<I_MainLoop>,
|
||||
Singleton::Consume<I_EnvDetails>
|
||||
Singleton::Consume<I_EnvDetails>,
|
||||
Singleton::Consume<I_ShellCmd>,
|
||||
Singleton::Consume<I_Environment>,
|
||||
Singleton::Consume<I_OrchestrationTools>,
|
||||
Singleton::Consume<I_Messaging>
|
||||
{
|
||||
public:
|
||||
LocalPolicyMgmtGenerator();
|
||||
|
||||
@@ -81,6 +81,8 @@ private:
|
||||
uint64_t req_proccessing_timeout = 0;
|
||||
uint64_t res_proccessing_timeout = 0;
|
||||
uint64_t req_failed_to_reach_upstream = 0;
|
||||
uint64_t req_overall_size = 0;
|
||||
uint64_t res_overall_size = 0;
|
||||
CPUEvent cpu_event;
|
||||
};
|
||||
|
||||
@@ -140,6 +142,8 @@ private:
|
||||
Counter thread_failure{this, "attachmentThreadFailureSum"};
|
||||
Counter req_proccessing_timeout{this, "httpRequestProcessingReachedTimeoutSum"};
|
||||
Counter res_proccessing_timeout{this, "httpResponseProcessingReachedTimeoutSum"};
|
||||
Counter req_overall_size{this, "httpRequestsSizeSum"};
|
||||
Counter res_overall_size{this, "httpResponsesSizeSum"};
|
||||
Counter req_failed_to_reach_upstream{this, "httpRequestFailedToReachWebServerUpstreamSum"};
|
||||
};
|
||||
|
||||
|
||||
@@ -24,7 +24,6 @@
|
||||
#include "i_time_get.h"
|
||||
#include "i_mainloop.h"
|
||||
#include "i_agent_details.h"
|
||||
#include "i_details_resolver.h"
|
||||
#include "customized_cereal_map.h"
|
||||
|
||||
class OrchestrationStatus
|
||||
@@ -33,7 +32,6 @@ class OrchestrationStatus
|
||||
Singleton::Provide<I_OrchestrationStatus>,
|
||||
Singleton::Consume<I_TimeGet>,
|
||||
Singleton::Consume<I_AgentDetails>,
|
||||
Singleton::Consume<I_DetailsResolver>,
|
||||
Singleton::Consume<I_OrchestrationTools>,
|
||||
Singleton::Consume<I_MainLoop>
|
||||
{
|
||||
|
||||
@@ -7,13 +7,15 @@
|
||||
#include "singleton.h"
|
||||
#include "i_mainloop.h"
|
||||
#include "i_environment.h"
|
||||
#include "i_generic_rulebase.h"
|
||||
|
||||
class RateLimit
|
||||
:
|
||||
public Component,
|
||||
Singleton::Consume<I_MainLoop>,
|
||||
Singleton::Consume<I_TimeGet>,
|
||||
Singleton::Consume<I_Environment>
|
||||
Singleton::Consume<I_Environment>,
|
||||
Singleton::Consume<I_GenericRulebase>
|
||||
{
|
||||
public:
|
||||
RateLimit();
|
||||
|
||||
@@ -1,16 +1,14 @@
|
||||
#include "layer_7_access_control.h"
|
||||
|
||||
#include <string>
|
||||
#include <unordered_set>
|
||||
#include <boost/algorithm/string/case_conv.hpp>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "config.h"
|
||||
#include "cache.h"
|
||||
#include "http_inspection_events.h"
|
||||
#include "http_transaction_common.h"
|
||||
#include "nginx_attachment_common.h"
|
||||
#include "intelligence_comp_v2.h"
|
||||
#include "intelligence_is_v2/intelligence_query_v2.h"
|
||||
#include "intelligence_is_v2/query_request_v2.h"
|
||||
#include "log_generator.h"
|
||||
|
||||
@@ -103,7 +101,7 @@ private:
|
||||
unsigned int crowdsec_event_id;
|
||||
};
|
||||
|
||||
class Layer7AccessControl::Impl : public Listener<HttpRequestHeaderEvent>
|
||||
class Layer7AccessControl::Impl : public Listener<HttpRequestHeaderEvent>, Listener<WaitTransactionEvent>
|
||||
{
|
||||
public:
|
||||
void init();
|
||||
@@ -126,27 +124,25 @@ public:
|
||||
return ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT;
|
||||
}
|
||||
|
||||
auto source_identifier = i_env->get<string>(HttpTransactionData::source_identifier);
|
||||
if (source_identifier.ok() && IPAddr::createIPAddr(source_identifier.unpack()).ok()) {
|
||||
dbgTrace(D_L7_ACCESS_CONTROL) << "Found a valid source identifier value: " << source_identifier.unpack();
|
||||
return checkReputation(source_identifier.unpack());
|
||||
}
|
||||
return handleEvent();
|
||||
}
|
||||
|
||||
auto orig_source_ip = i_env->get<IPAddr>(HttpTransactionData::client_ip_ctx);
|
||||
if (!orig_source_ip.ok()) {
|
||||
dbgWarning(D_L7_ACCESS_CONTROL) << "Could not extract the Client IP address from context";
|
||||
return ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT;
|
||||
}
|
||||
EventVerdict
|
||||
respond(const WaitTransactionEvent &) override
|
||||
{
|
||||
dbgFlow(D_L7_ACCESS_CONTROL) << "Handling wait verdict";
|
||||
|
||||
stringstream ss_client_ip;
|
||||
ss_client_ip << orig_source_ip.unpack();
|
||||
return checkReputation(ss_client_ip.str());
|
||||
return handleEvent();
|
||||
}
|
||||
|
||||
private:
|
||||
void queryIntelligence();
|
||||
void scheduleIntelligenceQuery(const string &ip);
|
||||
void processIntelligenceResponse(const string &ip, const vector<AssetReply<IntelligenceIpReputation>> &response);
|
||||
Maybe<IntelligenceIpReputation> getIpReputation(const string &ip);
|
||||
ngx_http_cp_verdict_e checkReputation(const string &source_ip);
|
||||
void generateLog(const string &source_ip, const IntelligenceIpReputation &ip_reputation) const;
|
||||
EventVerdict generateLog(const string &source_ip, const IntelligenceIpReputation &ip_reputation) const;
|
||||
EventVerdict queryIpReputation(const string &source_ip);
|
||||
EventVerdict handleEvent();
|
||||
|
||||
bool isAppEnabled() const;
|
||||
bool isPrevent() const;
|
||||
@@ -154,9 +150,12 @@ private:
|
||||
Maybe<LogField, Context::Error> genLogField(const string &log_key, const string &env_key) const;
|
||||
Maybe<LogField, Context::Error> genLogIPField(const string &log_key, const string &env_key) const;
|
||||
|
||||
bool is_intelligence_routine_running = false;
|
||||
I_Environment *i_env = nullptr;
|
||||
I_Intelligence_IS_V2 *i_intelligence = nullptr;
|
||||
I_MainLoop *i_mainloop = nullptr;
|
||||
TemporaryCache<string, IntelligenceIpReputation> ip_reputation_cache;
|
||||
unordered_set<string> pending_ips;
|
||||
};
|
||||
|
||||
bool
|
||||
@@ -177,79 +176,139 @@ Layer7AccessControl::Impl::isPrevent() const
|
||||
return mode == "prevent";
|
||||
}
|
||||
|
||||
void
|
||||
Layer7AccessControl::Impl::scheduleIntelligenceQuery(const string &ip)
|
||||
{
|
||||
dbgFlow(D_L7_ACCESS_CONTROL) << "Scheduling intelligence query about reputation of IP: " << ip;
|
||||
|
||||
pending_ips.emplace(ip);
|
||||
|
||||
if (!is_intelligence_routine_running) {
|
||||
dbgTrace(D_L7_ACCESS_CONTROL) << "Starting intelligence routine";
|
||||
is_intelligence_routine_running = true;
|
||||
i_mainloop->addOneTimeRoutine(
|
||||
I_MainLoop::RoutineType::System,
|
||||
[&] () { queryIntelligence(); },
|
||||
"Check IP reputation"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Maybe<IntelligenceIpReputation>
|
||||
Layer7AccessControl::Impl::getIpReputation(const string &ip)
|
||||
{
|
||||
dbgFlow(D_L7_ACCESS_CONTROL) << "Getting reputation of IP " << ip;
|
||||
|
||||
if (ip_reputation_cache.doesKeyExists(ip)) return ip_reputation_cache.getEntry(ip);
|
||||
|
||||
dbgTrace(D_L7_ACCESS_CONTROL) << "Not found in cache - about to query intelligence";
|
||||
dbgTrace(D_L7_ACCESS_CONTROL) << ip << " reputation was not found in cache";
|
||||
|
||||
QueryRequest request = QueryRequest(
|
||||
Condition::EQUALS,
|
||||
"ipv4Addresses",
|
||||
ip,
|
||||
true,
|
||||
AttributeKeyType::REGULAR
|
||||
);
|
||||
|
||||
auto response = i_intelligence->queryIntelligence<IntelligenceIpReputation>(request);
|
||||
|
||||
if (!response.ok()) {
|
||||
dbgWarning(D_L7_ACCESS_CONTROL) << "Failed to query intelligence about reputation of IP: " << ip;
|
||||
return genError("Failed to query intelligence");
|
||||
}
|
||||
|
||||
auto &unpacked_response = response.unpack();
|
||||
if (unpacked_response.empty()) {
|
||||
dbgTrace(D_L7_ACCESS_CONTROL) << "Intelligence reputation response collection is empty. IP is clean.";
|
||||
return IntelligenceIpReputation();
|
||||
}
|
||||
|
||||
for (const auto &intelligence_reply : unpacked_response) {
|
||||
if (intelligence_reply.getAssetType() == crowdsec_asset_type && !intelligence_reply.getData().empty()){
|
||||
dbgTrace(D_L7_ACCESS_CONTROL) << intelligence_reply.getData().front();
|
||||
return intelligence_reply.getData().front();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return IntelligenceIpReputation();
|
||||
return genError("Intelligence needed");
|
||||
}
|
||||
|
||||
ngx_http_cp_verdict_e
|
||||
Layer7AccessControl::Impl::checkReputation(const string &source_ip)
|
||||
EventVerdict
|
||||
Layer7AccessControl::Impl::queryIpReputation(const string &source_ip)
|
||||
{
|
||||
auto ip_reputation = getIpReputation(source_ip);
|
||||
if (!ip_reputation.ok()) {
|
||||
dbgWarning(D_L7_ACCESS_CONTROL) << "Could not query intelligence. Retruning default verdict";
|
||||
bool is_drop_by_default = getProfileAgentSettingWithDefault<bool>(false, "layer7AccessControl.dropByDefault");
|
||||
if (!(is_drop_by_default && isPrevent())) return ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT;
|
||||
generateLog(source_ip, IntelligenceIpReputation());
|
||||
return ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP;
|
||||
dbgTrace(D_L7_ACCESS_CONTROL) << "Scheduling Intelligence query - returning Wait verdict";
|
||||
scheduleIntelligenceQuery(source_ip);
|
||||
return ngx_http_cp_verdict_e::TRAFFIC_VERDICT_WAIT;
|
||||
}
|
||||
|
||||
if (!ip_reputation.unpack().isMalicious()) {
|
||||
dbgTrace(D_L7_ACCESS_CONTROL) << "Accepting IP: " << source_ip;
|
||||
ip_reputation_cache.deleteEntry(source_ip);
|
||||
return ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT;
|
||||
}
|
||||
|
||||
ip_reputation_cache.emplaceEntry(source_ip, ip_reputation.unpack());
|
||||
return generateLog(source_ip, ip_reputation.unpack());
|
||||
}
|
||||
|
||||
if (isPrevent()) {
|
||||
dbgTrace(D_L7_ACCESS_CONTROL) << "Dropping IP: " << source_ip;
|
||||
generateLog(source_ip, ip_reputation.unpack());
|
||||
return ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP;
|
||||
EventVerdict
|
||||
Layer7AccessControl::Impl::handleEvent()
|
||||
{
|
||||
auto source_identifier = i_env->get<string>(HttpTransactionData::source_identifier);
|
||||
if (source_identifier.ok() && IPAddr::createIPAddr(source_identifier.unpack()).ok()) {
|
||||
dbgTrace(D_L7_ACCESS_CONTROL) << "Found a valid source identifier value: " << source_identifier.unpack();
|
||||
return queryIpReputation(source_identifier.unpack());
|
||||
}
|
||||
|
||||
dbgTrace(D_L7_ACCESS_CONTROL) << "Detecting IP: " << source_ip;
|
||||
generateLog(source_ip, ip_reputation.unpack());
|
||||
auto orig_source_ip = i_env->get<IPAddr>(HttpTransactionData::client_ip_ctx);
|
||||
if (orig_source_ip.ok()) {
|
||||
stringstream ss_client_ip;
|
||||
ss_client_ip << orig_source_ip.unpack();
|
||||
return queryIpReputation(ss_client_ip.str());
|
||||
}
|
||||
|
||||
dbgWarning(D_L7_ACCESS_CONTROL) << "Could not extract the Client IP address from context";
|
||||
return ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT;
|
||||
}
|
||||
|
||||
void
|
||||
Layer7AccessControl::Impl::processIntelligenceResponse(
|
||||
const string &ip,
|
||||
const vector<AssetReply<IntelligenceIpReputation>> &response)
|
||||
{
|
||||
if (response.empty()) {
|
||||
dbgTrace(D_L7_ACCESS_CONTROL) << "Intelligence reputation response collection is empty. IP is clean.";
|
||||
ip_reputation_cache.emplaceEntry(ip, IntelligenceIpReputation());
|
||||
return;
|
||||
}
|
||||
|
||||
for (const auto &intelligence_reply : response) {
|
||||
if (intelligence_reply.getAssetType() == crowdsec_asset_type && !intelligence_reply.getData().empty()) {
|
||||
dbgTrace(D_L7_ACCESS_CONTROL) << intelligence_reply.getData().front();
|
||||
ip_reputation_cache.emplaceEntry(ip, intelligence_reply.getData().front());
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
dbgTrace(D_L7_ACCESS_CONTROL) << "Could not find a matching intelligence asset type for IP: " << ip;
|
||||
ip_reputation_cache.emplaceEntry(ip, IntelligenceIpReputation());
|
||||
}
|
||||
|
||||
void
|
||||
Layer7AccessControl::Impl::queryIntelligence()
|
||||
{
|
||||
dbgFlow(D_L7_ACCESS_CONTROL) << "Started IP reputation intelligence routine";
|
||||
|
||||
while (!pending_ips.empty()) {
|
||||
i_mainloop->yield();
|
||||
|
||||
auto ip = *(pending_ips.begin());
|
||||
pending_ips.erase(pending_ips.begin());
|
||||
|
||||
if (ip_reputation_cache.doesKeyExists(ip)) continue;
|
||||
|
||||
dbgTrace(D_L7_ACCESS_CONTROL) << "Querying intelligence about reputation of IP: " << ip;
|
||||
|
||||
QueryRequest request = QueryRequest(
|
||||
Condition::EQUALS,
|
||||
"ipv4Addresses",
|
||||
ip,
|
||||
true,
|
||||
AttributeKeyType::REGULAR
|
||||
);
|
||||
|
||||
auto response = i_intelligence->queryIntelligence<IntelligenceIpReputation>(request);
|
||||
|
||||
if (!response.ok()) {
|
||||
dbgWarning(D_L7_ACCESS_CONTROL)
|
||||
<< "Failed to query intelligence about reputation of IP: "
|
||||
<< ip
|
||||
<< ", error: "
|
||||
<< response.getErr();
|
||||
ip_reputation_cache.emplaceEntry(ip, IntelligenceIpReputation());
|
||||
continue;
|
||||
}
|
||||
|
||||
processIntelligenceResponse(ip, response.unpack());
|
||||
}
|
||||
|
||||
is_intelligence_routine_running = false;
|
||||
}
|
||||
|
||||
EventVerdict
|
||||
Layer7AccessControl::Impl::generateLog(const string &source_ip, const IntelligenceIpReputation &ip_reputation) const
|
||||
{
|
||||
dbgFlow(D_L7_ACCESS_CONTROL) << "About to generate Layer-7 Access Control log";
|
||||
@@ -287,6 +346,14 @@ Layer7AccessControl::Impl::generateLog(const string &source_ip, const Intelligen
|
||||
<< ip_reputation.getOrigin()
|
||||
<< ip_reputation.getIpv4Address()
|
||||
<< ip_reputation.getScenario();
|
||||
|
||||
if (isPrevent()) {
|
||||
dbgTrace(D_L7_ACCESS_CONTROL) << "Dropping IP: " << source_ip;
|
||||
return ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP;
|
||||
}
|
||||
|
||||
dbgTrace(D_L7_ACCESS_CONTROL) << "Detecting IP: " << source_ip;
|
||||
return ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT;
|
||||
}
|
||||
|
||||
Maybe<LogField, Context::Error>
|
||||
@@ -315,6 +382,7 @@ Layer7AccessControl::Impl::init()
|
||||
registerListener();
|
||||
i_env = Singleton::Consume<I_Environment>::by<Layer7AccessControl>();
|
||||
i_intelligence = Singleton::Consume<I_Intelligence_IS_V2>::by<Layer7AccessControl>();
|
||||
i_mainloop = Singleton::Consume<I_MainLoop>::by<Layer7AccessControl>();
|
||||
|
||||
chrono::minutes expiration(
|
||||
getProfileAgentSettingWithDefault<uint>(60u, "layer7AccessControl.crowdsec.cacheExpiration")
|
||||
@@ -322,7 +390,7 @@ Layer7AccessControl::Impl::init()
|
||||
|
||||
ip_reputation_cache.startExpiration(
|
||||
expiration,
|
||||
Singleton::Consume<I_MainLoop>::by<Layer7AccessControl>(),
|
||||
i_mainloop,
|
||||
Singleton::Consume<I_TimeGet>::by<Layer7AccessControl>()
|
||||
);
|
||||
}
|
||||
|
||||
@@ -52,6 +52,7 @@ public:
|
||||
const EventVerdict drop_verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_DROP;
|
||||
const EventVerdict accept_verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_ACCEPT;
|
||||
const EventVerdict inspect_verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_INSPECT;
|
||||
const EventVerdict wait_verdict = ngx_http_cp_verdict_e::TRAFFIC_VERDICT_WAIT;
|
||||
Layer7AccessControl l7_access_control;
|
||||
::Environment env;
|
||||
ConfigComponent config;
|
||||
@@ -62,6 +63,7 @@ public:
|
||||
NiceMock<MockRestApi> mock_rest;
|
||||
AgentDetails agent_details;
|
||||
IntelligenceComponentV2 intelligence_comp;
|
||||
I_MainLoop::Routine query_intelligence_routine;
|
||||
Context ctx;
|
||||
};
|
||||
|
||||
@@ -273,6 +275,13 @@ TEST_F(Layer7AccessControlTest, ReturnAcceptVerdict)
|
||||
const HttpHeader header2{ Buffer("date"), Buffer("Sun, 26 Mar 2023 18:45:22 GMT"), 1 };
|
||||
const HttpHeader header3{ Buffer("x-forwarded-for"), Buffer("1.2.3.4"), 2, true};
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(_, _, "Check IP reputation", _))
|
||||
.WillOnce(DoAll(SaveArg<1>(&query_intelligence_routine), Return(0))
|
||||
);
|
||||
EXPECT_CALL(mock_ml, yield(A<bool>())).Times(1);
|
||||
|
||||
EXPECT_THAT(
|
||||
HttpRequestHeaderEvent(header1).performNamedQuery(),
|
||||
ElementsAre(Pair("Layer-7 Access Control app", inspect_verdict))
|
||||
@@ -283,6 +292,13 @@ TEST_F(Layer7AccessControlTest, ReturnAcceptVerdict)
|
||||
);
|
||||
EXPECT_THAT(
|
||||
HttpRequestHeaderEvent(header3).performNamedQuery(),
|
||||
ElementsAre(Pair("Layer-7 Access Control app", wait_verdict))
|
||||
);
|
||||
|
||||
query_intelligence_routine();
|
||||
|
||||
EXPECT_THAT(
|
||||
WaitTransactionEvent().performNamedQuery(),
|
||||
ElementsAre(Pair("Layer-7 Access Control app", accept_verdict))
|
||||
);
|
||||
}
|
||||
@@ -299,6 +315,13 @@ TEST_F(Layer7AccessControlTest, ReturnDropVerdictOnMaliciousReputation)
|
||||
sendMessage(true, _, _, _, _, _, _, MessageTypeTag::INTELLIGENCE)
|
||||
).WillOnce(Return(malicious_intelligence_response));
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(_, _, "Check IP reputation", _))
|
||||
.WillOnce(DoAll(SaveArg<1>(&query_intelligence_routine), Return(0))
|
||||
);
|
||||
EXPECT_CALL(mock_ml, yield(A<bool>())).Times(1);
|
||||
|
||||
registerTransactionData();
|
||||
ctx.registerValue<string>(HttpTransactionData::source_identifier, "1.2.3.4");
|
||||
const HttpHeader header1{ Buffer("Content-Type"), Buffer("application/json"), 0 };
|
||||
@@ -310,7 +333,18 @@ TEST_F(Layer7AccessControlTest, ReturnDropVerdictOnMaliciousReputation)
|
||||
|
||||
EXPECT_THAT(HttpRequestHeaderEvent(header1).query(), ElementsAre(inspect_verdict));
|
||||
EXPECT_THAT(HttpRequestHeaderEvent(header2).query(), ElementsAre(inspect_verdict));
|
||||
EXPECT_THAT(HttpRequestHeaderEvent(header3).query(), ElementsAre(drop_verdict));
|
||||
|
||||
EXPECT_THAT(
|
||||
HttpRequestHeaderEvent(header3).performNamedQuery(),
|
||||
ElementsAre(Pair("Layer-7 Access Control app", wait_verdict))
|
||||
);
|
||||
|
||||
query_intelligence_routine();
|
||||
|
||||
EXPECT_THAT(
|
||||
WaitTransactionEvent().performNamedQuery(),
|
||||
ElementsAre(Pair("Layer-7 Access Control app", drop_verdict))
|
||||
);
|
||||
|
||||
verifyReport(report, "1.2.3.4", "Prevent");
|
||||
}
|
||||
@@ -327,6 +361,13 @@ TEST_F(Layer7AccessControlTest, ReturnDropVerdictCacheBased)
|
||||
sendMessage(true, _, _, _, _, _, _, MessageTypeTag::INTELLIGENCE)
|
||||
).WillOnce(Return(malicious_intelligence_response));
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(_, _, "Check IP reputation", _))
|
||||
.WillOnce(DoAll(SaveArg<1>(&query_intelligence_routine), Return(0))
|
||||
);
|
||||
EXPECT_CALL(mock_ml, yield(A<bool>())).Times(1);
|
||||
|
||||
registerTransactionData();
|
||||
ctx.registerValue<string>(HttpTransactionData::source_identifier, "1.2.3.4");
|
||||
const HttpHeader header1{ Buffer("Content-Type"), Buffer("application/json"), 0 };
|
||||
@@ -338,7 +379,18 @@ TEST_F(Layer7AccessControlTest, ReturnDropVerdictCacheBased)
|
||||
|
||||
EXPECT_THAT(HttpRequestHeaderEvent(header1).query(), ElementsAre(inspect_verdict));
|
||||
EXPECT_THAT(HttpRequestHeaderEvent(header2).query(), ElementsAre(inspect_verdict));
|
||||
EXPECT_THAT(HttpRequestHeaderEvent(header3).query(), ElementsAre(drop_verdict));
|
||||
|
||||
EXPECT_THAT(
|
||||
HttpRequestHeaderEvent(header3).performNamedQuery(),
|
||||
ElementsAre(Pair("Layer-7 Access Control app", wait_verdict))
|
||||
);
|
||||
|
||||
query_intelligence_routine();
|
||||
|
||||
EXPECT_THAT(
|
||||
WaitTransactionEvent().performNamedQuery(),
|
||||
ElementsAre(Pair("Layer-7 Access Control app", drop_verdict))
|
||||
);
|
||||
|
||||
verifyReport(report, "1.2.3.4", "Prevent");
|
||||
|
||||
@@ -361,6 +413,13 @@ TEST_F(Layer7AccessControlTest, AcceptOnDetect)
|
||||
sendMessage(true, _, _, _, _, _, _, MessageTypeTag::INTELLIGENCE)
|
||||
).WillOnce(Return(malicious_intelligence_response));
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(_, _, "Check IP reputation", _))
|
||||
.WillOnce(DoAll(SaveArg<1>(&query_intelligence_routine), Return(0))
|
||||
);
|
||||
EXPECT_CALL(mock_ml, yield(A<bool>())).Times(1);
|
||||
|
||||
registerTransactionData();
|
||||
ctx.registerValue<string>(HttpTransactionData::source_identifier, "1.2.3.4");
|
||||
const HttpHeader header1{ Buffer("Content-Type"), Buffer("application/json"), 0 };
|
||||
@@ -372,7 +431,18 @@ TEST_F(Layer7AccessControlTest, AcceptOnDetect)
|
||||
|
||||
EXPECT_THAT(HttpRequestHeaderEvent(header1).query(), ElementsAre(inspect_verdict));
|
||||
EXPECT_THAT(HttpRequestHeaderEvent(header2).query(), ElementsAre(inspect_verdict));
|
||||
EXPECT_THAT(HttpRequestHeaderEvent(header3).query(), ElementsAre(accept_verdict));
|
||||
|
||||
EXPECT_THAT(
|
||||
HttpRequestHeaderEvent(header3).performNamedQuery(),
|
||||
ElementsAre(Pair("Layer-7 Access Control app", wait_verdict))
|
||||
);
|
||||
|
||||
query_intelligence_routine();
|
||||
|
||||
EXPECT_THAT(
|
||||
WaitTransactionEvent().performNamedQuery(),
|
||||
ElementsAre(Pair("Layer-7 Access Control app", accept_verdict))
|
||||
);
|
||||
|
||||
verifyReport(report, "1.2.3.4", "Detect");
|
||||
}
|
||||
@@ -389,6 +459,13 @@ TEST_F(Layer7AccessControlTest, FallbackToSourceIPAndDrop)
|
||||
sendMessage(true, _, _, _, _, _, _, MessageTypeTag::INTELLIGENCE)
|
||||
).WillOnce(Return(malicious_intelligence_response));
|
||||
|
||||
EXPECT_CALL(
|
||||
mock_ml,
|
||||
addOneTimeRoutine(_, _, "Check IP reputation", _))
|
||||
.WillOnce(DoAll(SaveArg<1>(&query_intelligence_routine), Return(0))
|
||||
);
|
||||
EXPECT_CALL(mock_ml, yield(A<bool>())).Times(1);
|
||||
|
||||
registerTransactionData();
|
||||
const HttpHeader header1{ Buffer("Content-Type"), Buffer("application/json"), 0 };
|
||||
const HttpHeader header2{ Buffer("date"), Buffer("Sun, 26 Mar 2023 18:45:22 GMT"), 1, true };
|
||||
@@ -397,7 +474,18 @@ TEST_F(Layer7AccessControlTest, FallbackToSourceIPAndDrop)
|
||||
EXPECT_CALL(mock_logging, sendLog(_)).WillOnce(SaveArg<0>(&report));
|
||||
|
||||
EXPECT_THAT(HttpRequestHeaderEvent(header1).query(), ElementsAre(inspect_verdict));
|
||||
EXPECT_THAT(HttpRequestHeaderEvent(header2).query(), ElementsAre(drop_verdict));
|
||||
|
||||
EXPECT_THAT(
|
||||
HttpRequestHeaderEvent(header2).performNamedQuery(),
|
||||
ElementsAre(Pair("Layer-7 Access Control app", wait_verdict))
|
||||
);
|
||||
|
||||
query_intelligence_routine();
|
||||
|
||||
EXPECT_THAT(
|
||||
WaitTransactionEvent().performNamedQuery(),
|
||||
ElementsAre(Pair("Layer-7 Access Control app", drop_verdict))
|
||||
);
|
||||
|
||||
verifyReport(report, "", "Prevent");
|
||||
}
|
||||
|
||||
@@ -20,4 +20,5 @@ add_library(local_policy_mgmt_gen
|
||||
new_exceptions.cc
|
||||
access_control_practice.cc
|
||||
configmaps.cc
|
||||
reverse_proxy_section.cc
|
||||
)
|
||||
|
||||
@@ -316,7 +316,7 @@ TriggersInWaapSection::save(cereal::JSONOutputArchive &out_ar) const
|
||||
}
|
||||
|
||||
ParsedMatch::ParsedMatch(const string &_operator, const string &_tag, const string &_value)
|
||||
:
|
||||
:
|
||||
operator_type(_operator),
|
||||
tag(_tag),
|
||||
value(_value)
|
||||
@@ -368,7 +368,7 @@ AppSecOverride::AppSecOverride(const SourcesIdentifiers &parsed_trusted_sources)
|
||||
|
||||
// LCOV_EXCL_START Reason: no test exist
|
||||
AppSecOverride::AppSecOverride(const InnerException &parsed_exceptions)
|
||||
:
|
||||
:
|
||||
id(parsed_exceptions.getBehaviorId()),
|
||||
parsed_match(parsed_exceptions.getMatch())
|
||||
{
|
||||
@@ -413,7 +413,7 @@ WebAppSection::WebAppSection(
|
||||
const string &default_mode,
|
||||
const AppSecTrustedSources &parsed_trusted_sources,
|
||||
const vector<InnerException> &parsed_exceptions)
|
||||
:
|
||||
:
|
||||
application_urls(_application_urls),
|
||||
asset_id(_asset_id),
|
||||
asset_name(_asset_name),
|
||||
@@ -460,7 +460,7 @@ WebAppSection::WebAppSection(
|
||||
const AppsecPracticeAntiBotSection &_anti_bots,
|
||||
const LogTriggerSection &parsed_log_trigger,
|
||||
const AppSecTrustedSources &parsed_trusted_sources)
|
||||
:
|
||||
:
|
||||
application_urls(_application_urls),
|
||||
asset_id(_asset_id),
|
||||
asset_name(_asset_name),
|
||||
@@ -477,6 +477,7 @@ WebAppSection::WebAppSection(
|
||||
{
|
||||
web_attack_mitigation = true;
|
||||
web_attack_mitigation_action =
|
||||
web_attack_mitigation_mode != "Prevent" ? "Transparent" :
|
||||
web_attack_mitigation_severity == "critical" ? "low" :
|
||||
web_attack_mitigation_severity == "high" ? "balanced" :
|
||||
web_attack_mitigation_severity == "medium" ? "high" :
|
||||
@@ -584,6 +585,9 @@ ParsedRule::load(cereal::JSONInputArchive &archive_in)
|
||||
parseAppsecJSONKey<string>("custom-response", custom_response, archive_in);
|
||||
parseAppsecJSONKey<string>("source-identifiers", source_identifiers, archive_in);
|
||||
parseAppsecJSONKey<string>("trusted-sources", trusted_sources, archive_in);
|
||||
parseAppsecJSONKey<string>("upstream", rpm_upstream, archive_in);
|
||||
parseAppsecJSONKey<string>("rp-settings", rpm_settings, archive_in);
|
||||
parseAppsecJSONKey<bool>("ssl", rpm_is_ssl, archive_in);
|
||||
try {
|
||||
archive_in(cereal::make_nvp("host", host));
|
||||
} catch (const cereal::Exception &e)
|
||||
@@ -620,6 +624,24 @@ ParsedRule::getMode() const
|
||||
return mode;
|
||||
}
|
||||
|
||||
const string &
|
||||
ParsedRule::rpmGetUpstream() const
|
||||
{
|
||||
return rpm_upstream;
|
||||
}
|
||||
|
||||
const std::string &
|
||||
ParsedRule::rpmGetRPSettings() const
|
||||
{
|
||||
return rpm_settings;
|
||||
}
|
||||
|
||||
bool
|
||||
ParsedRule::rpmIsHttps() const
|
||||
{
|
||||
return rpm_is_ssl;
|
||||
}
|
||||
|
||||
void
|
||||
ParsedRule::setHost(const string &_host)
|
||||
{
|
||||
@@ -691,6 +713,7 @@ AppsecLinuxPolicy::serialize(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
dbgTrace(D_LOCAL_POLICY) << "Loading Appsec Linux Policy";
|
||||
parseAppsecJSONKey<AppsecPolicySpec>("policies", policies, archive_in);
|
||||
parseAppsecJSONKey<vector<RPMSettings>>("rp-settings", rpm_settings, archive_in);
|
||||
parseAppsecJSONKey<vector<AppSecPracticeSpec>>("practices", practices, archive_in);
|
||||
parseAppsecJSONKey<vector<AppsecTriggerSpec>>("log-triggers", log_triggers, archive_in);
|
||||
parseAppsecJSONKey<vector<AppSecCustomResponseSpec>>("custom-responses", custom_responses, archive_in);
|
||||
@@ -745,6 +768,13 @@ AppsecLinuxPolicy::getAppsecSourceIdentifierSpecs() const
|
||||
return sources_identifiers;
|
||||
}
|
||||
|
||||
|
||||
const vector<RPMSettings> &
|
||||
AppsecLinuxPolicy::rpmGetRPSettings() const
|
||||
{
|
||||
return rpm_settings;
|
||||
}
|
||||
|
||||
void
|
||||
AppsecLinuxPolicy::addSpecificRule(const ParsedRule &_rule)
|
||||
{
|
||||
|
||||
@@ -304,11 +304,13 @@ ExceptionMatch::getMatch() const
|
||||
ExceptionBehavior::ExceptionBehavior(const string &_value)
|
||||
{
|
||||
key = _value == "suppressLog" ? "log" : "action";
|
||||
value = key_to_action.at(_value);
|
||||
try {
|
||||
value = key_to_action.at(_value);
|
||||
id = to_string(boost::uuids::random_generator()());
|
||||
} catch (const boost::uuids::entropy_error &e) {
|
||||
dbgWarning(D_LOCAL_POLICY) << "Failed to generate exception behavior UUID. Error: " << e.what();
|
||||
} catch (std::exception &e) {
|
||||
dbgWarning(D_LOCAL_POLICY) << "Failed to find exception name: " << _value << ". Error: " << e.what();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
#include "triggers_section.h"
|
||||
#include "exceptions_section.h"
|
||||
#include "trusted_sources_section.h"
|
||||
#include "reverse_proxy_section.h"
|
||||
#include "new_practice.h"
|
||||
|
||||
class AppSecWebBotsURI
|
||||
@@ -148,7 +149,7 @@ public:
|
||||
PracticeAdvancedConfig() {}
|
||||
|
||||
PracticeAdvancedConfig(const AppSecPracticeSpec &parsed_appsec_spec)
|
||||
:
|
||||
:
|
||||
http_header_max_size(parsed_appsec_spec.getWebAttacks().getMaxHeaderSizeBytes()),
|
||||
http_illegal_methods_allowed(0),
|
||||
http_request_body_max_size(parsed_appsec_spec.getWebAttacks().getMaxBodySizeKb()),
|
||||
@@ -162,7 +163,7 @@ public:
|
||||
int _http_request_body_max_size,
|
||||
int _json_max_object_depth,
|
||||
int _url_max_size)
|
||||
:
|
||||
:
|
||||
http_header_max_size(_http_header_max_size),
|
||||
http_illegal_methods_allowed(0),
|
||||
http_request_body_max_size(_http_request_body_max_size),
|
||||
@@ -186,7 +187,7 @@ class TriggersInWaapSection
|
||||
{
|
||||
public:
|
||||
TriggersInWaapSection(const LogTriggerSection &log_section)
|
||||
:
|
||||
:
|
||||
trigger_type("log"),
|
||||
id(log_section.getTriggerId()),
|
||||
name(log_section.getTriggerName()),
|
||||
@@ -241,13 +242,13 @@ public:
|
||||
AppsecPracticeAntiBotSection(const NewAppSecPracticeAntiBot &anti_bot) :
|
||||
injected_uris(anti_bot.getIjectedUris()),
|
||||
validated_uris(anti_bot.getValidatedUris())
|
||||
{};
|
||||
{};
|
||||
// LCOV_EXCL_STOP
|
||||
|
||||
AppsecPracticeAntiBotSection(const AppSecPracticeAntiBot &anti_bot) :
|
||||
injected_uris(anti_bot.getIjectedUris()),
|
||||
validated_uris(anti_bot.getValidatedUris())
|
||||
{};
|
||||
{};
|
||||
|
||||
void save(cereal::JSONOutputArchive &out_ar) const;
|
||||
|
||||
@@ -278,20 +279,20 @@ public:
|
||||
);
|
||||
|
||||
WebAppSection(
|
||||
const std::string &_application_urls,
|
||||
const std::string &_asset_id,
|
||||
const std::string &_asset_name,
|
||||
const std::string &_rule_id,
|
||||
const std::string &_rule_name,
|
||||
const std::string &_practice_id,
|
||||
const std::string &_practice_name,
|
||||
const std::string &_context,
|
||||
const std::string &_web_attack_mitigation_severity,
|
||||
const std::string &_web_attack_mitigation_mode,
|
||||
const PracticeAdvancedConfig &_practice_advanced_config,
|
||||
const AppsecPracticeAntiBotSection &_anti_bots,
|
||||
const LogTriggerSection &parsed_log_trigger,
|
||||
const AppSecTrustedSources &parsed_trusted_sources);
|
||||
const std::string &_application_urls,
|
||||
const std::string &_asset_id,
|
||||
const std::string &_asset_name,
|
||||
const std::string &_rule_id,
|
||||
const std::string &_rule_name,
|
||||
const std::string &_practice_id,
|
||||
const std::string &_practice_name,
|
||||
const std::string &_context,
|
||||
const std::string &_web_attack_mitigation_severity,
|
||||
const std::string &_web_attack_mitigation_mode,
|
||||
const PracticeAdvancedConfig &_practice_advanced_config,
|
||||
const AppsecPracticeAntiBotSection &_anti_bots,
|
||||
const LogTriggerSection &parsed_log_trigger,
|
||||
const AppSecTrustedSources &parsed_trusted_sources);
|
||||
|
||||
void save(cereal::JSONOutputArchive &out_ar) const;
|
||||
|
||||
@@ -331,7 +332,7 @@ public:
|
||||
const std::string &_web_attack_mitigation_mode,
|
||||
bool _web_attack_mitigation,
|
||||
const PracticeAdvancedConfig &_practice_advanced_config)
|
||||
:
|
||||
:
|
||||
application_urls(_application_urls),
|
||||
asset_id(_asset_id),
|
||||
asset_name(_asset_name),
|
||||
@@ -345,7 +346,7 @@ public:
|
||||
web_attack_mitigation_mode(_web_attack_mitigation_mode),
|
||||
web_attack_mitigation(_web_attack_mitigation),
|
||||
practice_advanced_config(_practice_advanced_config)
|
||||
{}
|
||||
{}
|
||||
|
||||
void save(cereal::JSONOutputArchive &out_ar) const;
|
||||
|
||||
@@ -371,7 +372,7 @@ public:
|
||||
AppSecRulebase(
|
||||
std::vector<WebAppSection> _webApplicationPractices,
|
||||
std::vector<WebAPISection> _webAPIPractices)
|
||||
:
|
||||
:
|
||||
webApplicationPractices(_webApplicationPractices),
|
||||
webAPIPractices(_webAPIPractices) {}
|
||||
|
||||
@@ -387,7 +388,7 @@ class AppSecWrapper
|
||||
{
|
||||
public:
|
||||
AppSecWrapper(const AppSecRulebase &_app_sec)
|
||||
:
|
||||
:
|
||||
app_sec_rulebase(_app_sec)
|
||||
{}
|
||||
|
||||
@@ -409,6 +410,9 @@ public:
|
||||
const std::vector<std::string> & getPractices() const;
|
||||
const std::string & getHost() const;
|
||||
const std::string & getMode() const;
|
||||
const std::string &rpmGetUpstream() const;
|
||||
const std::string &rpmGetRPSettings() const;
|
||||
bool rpmIsHttps() const;
|
||||
void setHost(const std::string &_host);
|
||||
void setMode(const std::string &_mode);
|
||||
const std::string & getCustomResponse() const;
|
||||
@@ -424,6 +428,9 @@ private:
|
||||
std::string custom_response;
|
||||
std::string source_identifiers;
|
||||
std::string trusted_sources;
|
||||
std::string rpm_upstream;
|
||||
std::string rpm_settings;
|
||||
bool rpm_is_ssl = false;
|
||||
};
|
||||
|
||||
class AppsecPolicySpec : Singleton::Consume<I_Environment>
|
||||
@@ -453,7 +460,7 @@ public:
|
||||
const std::vector<AppsecException> &_exceptions,
|
||||
const std::vector<TrustedSourcesSpec> &_trusted_sources,
|
||||
const std::vector<SourceIdentifierSpecWrapper> &_sources_identifiers)
|
||||
:
|
||||
:
|
||||
policies(_policies),
|
||||
practices(_practices),
|
||||
log_triggers(_log_triggers),
|
||||
@@ -471,6 +478,7 @@ public:
|
||||
const std::vector<AppsecException> & getAppsecExceptions() const;
|
||||
const std::vector<TrustedSourcesSpec> & getAppsecTrustedSourceSpecs() const;
|
||||
const std::vector<SourceIdentifierSpecWrapper> & getAppsecSourceIdentifierSpecs() const;
|
||||
const std::vector<RPMSettings> &rpmGetRPSettings() const;
|
||||
void addSpecificRule(const ParsedRule &_rule);
|
||||
|
||||
private:
|
||||
@@ -481,6 +489,7 @@ private:
|
||||
std::vector<AppsecException> exceptions;
|
||||
std::vector<TrustedSourcesSpec> trusted_sources;
|
||||
std::vector<SourceIdentifierSpecWrapper> sources_identifiers;
|
||||
std::vector<RPMSettings> rpm_settings;
|
||||
};
|
||||
|
||||
#endif // __APPSEC_PRACTICE_SECTION_H__
|
||||
|
||||
@@ -50,7 +50,7 @@ static const std::unordered_map<std::string, TriggerType> string_to_trigger_type
|
||||
|
||||
static const std::unordered_map<std::string, std::string> key_to_practices_val = {
|
||||
{ "prevent-learn", "Prevent"},
|
||||
{ "detect-learn", "Detect"},
|
||||
{ "detect-learn", "Learn"},
|
||||
{ "prevent", "Prevent"},
|
||||
{ "detect", "Detect"},
|
||||
{ "inactive", "Inactive"}
|
||||
@@ -70,9 +70,9 @@ parseAppsecJSONKey(
|
||||
archive_in.setNextName(nullptr);
|
||||
value = default_value;
|
||||
dbgDebug(D_LOCAL_POLICY)
|
||||
<< "Could not parse the required key. Key: "
|
||||
<< "Could not parse the required key. Key: \""
|
||||
<< key_name
|
||||
<< ", Error: "
|
||||
<< "\", Error: "
|
||||
<< e.what();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,6 +59,7 @@ public:
|
||||
trusted_sources(_trusted_sources),
|
||||
sources_identifiers(_sources_identifiers) {}
|
||||
// LCOV_EXCL_STOP
|
||||
void serialize(cereal::JSONInputArchive &archive_in);
|
||||
|
||||
const NewAppsecPolicySpec & getAppsecPolicySpec() const;
|
||||
const std::vector<NewAppSecPracticeSpec> & getAppSecPracticeSpecs() const;
|
||||
|
||||
@@ -147,8 +147,8 @@ public:
|
||||
// LCOV_EXCL_STOP
|
||||
|
||||
FileSecurityProtectionsSection(
|
||||
int _file_size_limit,
|
||||
int _archive_file_size_limit,
|
||||
uint64_t _file_size_limit,
|
||||
uint64_t _archive_file_size_limit,
|
||||
bool _allow_files_without_name,
|
||||
bool _required_file_size_limit,
|
||||
bool _required_archive_extraction,
|
||||
@@ -171,8 +171,8 @@ public:
|
||||
void save(cereal::JSONOutputArchive &out_ar) const;
|
||||
|
||||
private:
|
||||
int file_size_limit;
|
||||
int archive_file_size_limit;
|
||||
uint64_t file_size_limit;
|
||||
uint64_t archive_file_size_limit;
|
||||
bool allow_files_without_name;
|
||||
bool required_file_size_limit;
|
||||
bool required_archive_extraction;
|
||||
@@ -233,13 +233,13 @@ class NewFileSecurityArchiveInspection
|
||||
public:
|
||||
void load(cereal::JSONInputArchive &archive_in);
|
||||
|
||||
int getArchiveFileSizeLimit() const;
|
||||
uint64_t getArchiveFileSizeLimit() const;
|
||||
bool getrequiredArchiveExtraction() const;
|
||||
const std::string & getMultiLevelArchiveAction() const;
|
||||
const std::string & getUnopenedArchiveAction() const;
|
||||
|
||||
private:
|
||||
int scan_max_file_size;
|
||||
uint64_t scan_max_file_size;
|
||||
bool extract_archive_files;
|
||||
std::string scan_max_file_size_unit;
|
||||
std::string archived_files_within_archived_files;
|
||||
@@ -251,11 +251,11 @@ class NewFileSecurityLargeFileInspection
|
||||
public:
|
||||
void load(cereal::JSONInputArchive &archive_in);
|
||||
|
||||
int getFileSizeLimit() const;
|
||||
uint64_t getFileSizeLimit() const;
|
||||
const std::string & getFileSizeLimitAction() const;
|
||||
|
||||
private:
|
||||
int file_size_limit;
|
||||
uint64_t file_size_limit;
|
||||
std::string file_size_limit_unit;
|
||||
std::string files_exceeding_size_limit_action;
|
||||
};
|
||||
|
||||
@@ -40,6 +40,7 @@
|
||||
#include "trusted_sources_section.h"
|
||||
#include "new_appsec_linux_policy.h"
|
||||
#include "access_control_practice.h"
|
||||
#include "reverse_proxy_section.h"
|
||||
|
||||
enum class AnnotationTypes {
|
||||
PRACTICE,
|
||||
@@ -109,11 +110,6 @@ private:
|
||||
};
|
||||
|
||||
class PolicyMakerUtils
|
||||
:
|
||||
Singleton::Consume<I_Environment>,
|
||||
Singleton::Consume<I_OrchestrationTools>,
|
||||
Singleton::Consume<I_Messaging>,
|
||||
Singleton::Consume<I_ShellCmd>
|
||||
{
|
||||
public:
|
||||
std::string proccesSingleAppsecPolicy(
|
||||
@@ -206,6 +202,7 @@ private:
|
||||
createThreatPreventionPracticeSections(
|
||||
const std::string &asset_name,
|
||||
const std::string &url,
|
||||
const std::string &port,
|
||||
const std::string &uri,
|
||||
const std::string &default_mode,
|
||||
const V1beta2AppsecLinuxPolicy &policy,
|
||||
@@ -231,6 +228,11 @@ private:
|
||||
template<class T, class R>
|
||||
void createAgentPolicyFromAppsecPolicy(const std::string &policy_name, const T &appsec_policy);
|
||||
|
||||
void rpmBuildNginxServers(const AppsecLinuxPolicy &policy);
|
||||
void rpmReportInfo(const std::string &msg);
|
||||
void rpmReportError(const std::string &msg);
|
||||
|
||||
std::string policy_version_name;
|
||||
std::map<std::string, LogTriggerSection> log_triggers;
|
||||
std::map<std::string, WebUserResponseTriggerSection> web_user_res_triggers;
|
||||
std::map<std::string, std::vector<InnerException>> inner_exceptions;
|
||||
|
||||
@@ -0,0 +1,68 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef __REVERSE_PROXY_SECTION_H__
|
||||
#define __REVERSE_PROXY_SECTION_H__
|
||||
|
||||
#include <cereal/archives/json.hpp>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "agent_core_utilities.h"
|
||||
#include "i_shell_cmd.h"
|
||||
|
||||
class ParsedRule;
|
||||
|
||||
class RPMSettings
|
||||
{
|
||||
public:
|
||||
void load(cereal::JSONInputArchive &archive_in);
|
||||
|
||||
const std::string & getName() const;
|
||||
std::string applySettings(const std::string &server_content) const;
|
||||
|
||||
private:
|
||||
std::string name;
|
||||
std::string host_hdr = "$host";
|
||||
std::string dns_resolver = "127.0.0.11";
|
||||
};
|
||||
|
||||
class ReverseProxyBuilder
|
||||
{
|
||||
public:
|
||||
static void init();
|
||||
|
||||
static Maybe<void> addNginxServerLocation(
|
||||
std::string location,
|
||||
const std::string &host,
|
||||
const ParsedRule &rule,
|
||||
const RPMSettings &rp_settings);
|
||||
|
||||
static Maybe<void> createNewNginxServer(
|
||||
const std::string &host,
|
||||
const ParsedRule &rule,
|
||||
const RPMSettings &rp_settings);
|
||||
|
||||
static std::string replaceTemplate(
|
||||
const std::string &content,
|
||||
const boost::regex &nginx_directive_template,
|
||||
const std::string &value);
|
||||
|
||||
static Maybe<void> reloadNginx();
|
||||
|
||||
private:
|
||||
static Maybe<void> createSSLNginxServer(const std::string &host, const RPMSettings &rp_settings);
|
||||
static Maybe<void> createHTTPNginxServer(const std::string &host, const RPMSettings &rp_settings);
|
||||
|
||||
static Maybe<std::string> getTemplateContent(const std::string &nginx_template_name);
|
||||
};
|
||||
#endif // __REVERSE_PROXY_SECTION_H__
|
||||
@@ -90,6 +90,7 @@ public:
|
||||
RulesConfigRulebase(
|
||||
const std::string &_name,
|
||||
const std::string &_url,
|
||||
const std::string &_port,
|
||||
const std::string &_uri,
|
||||
std::vector<PracticeSection> _practices,
|
||||
std::vector<ParametersSection> _parameters,
|
||||
|
||||
@@ -55,9 +55,7 @@ const static string default_local_mgmt_policy_path = "/conf/local_policy.yaml";
|
||||
|
||||
class LocalPolicyMgmtGenerator::Impl
|
||||
:
|
||||
public Singleton::Provide<I_LocalPolicyMgmtGen>::From<LocalPolicyMgmtGenerator>,
|
||||
public Singleton::Consume<I_MainLoop>,
|
||||
public Singleton::Consume<I_EnvDetails>
|
||||
public Singleton::Provide<I_LocalPolicyMgmtGen>::From<LocalPolicyMgmtGenerator>
|
||||
{
|
||||
|
||||
public:
|
||||
@@ -111,7 +109,6 @@ public:
|
||||
|
||||
private:
|
||||
PolicyMakerUtils policy_maker_utils;
|
||||
|
||||
};
|
||||
|
||||
LocalPolicyMgmtGenerator::LocalPolicyMgmtGenerator()
|
||||
|
||||
@@ -70,3 +70,31 @@ V1beta2AppsecLinuxPolicy::addSpecificRule(const NewParsedRule &_rule)
|
||||
policies.addSpecificRule(_rule);
|
||||
}
|
||||
// LCOV_EXCL_STOP
|
||||
|
||||
void
|
||||
V1beta2AppsecLinuxPolicy::serialize(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
dbgInfo(D_LOCAL_POLICY) << "Loading Appsec V1Beta2 Linux Policy";
|
||||
|
||||
// Check for the presence of "apiVersion" key, present only from V1Beta2
|
||||
string api_version;
|
||||
archive_in(cereal::make_nvp("apiVersion", api_version));
|
||||
if (api_version != "v1beta2") throw cereal::Exception("Failed to parse JSON as v1Beta2 version");
|
||||
|
||||
parseAppsecJSONKey<NewAppsecPolicySpec>("policies", policies, archive_in);
|
||||
parseAppsecJSONKey<vector<NewAppSecPracticeSpec>>(
|
||||
"threatPreventionPractices",
|
||||
threat_prevection_practices,
|
||||
archive_in
|
||||
);
|
||||
parseAppsecJSONKey<vector<AccessControlPracticeSpec>>(
|
||||
"accessControlPractices",
|
||||
access_control_practices,
|
||||
archive_in
|
||||
);
|
||||
parseAppsecJSONKey<vector<NewAppsecLogTrigger>>("logTriggers", log_triggers, archive_in);
|
||||
parseAppsecJSONKey<vector<NewAppSecCustomResponse>>("customResponse", custom_responses, archive_in);
|
||||
parseAppsecJSONKey<vector<NewAppsecException>>("exceptions", exceptions, archive_in);
|
||||
parseAppsecJSONKey<vector<NewTrustedSourcesSpec>>("trustedSources", trusted_sources, archive_in);
|
||||
parseAppsecJSONKey<vector<NewSourcesIdentifiers>>("sourcesIdentifiers", sources_identifiers, archive_in);
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ void
|
||||
NewAppsecException::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
dbgTrace(D_LOCAL_POLICY) << "Loading New AppSec exception";
|
||||
parseAppsecJSONKey<string>("name", name, archive_in);
|
||||
parseAppsecJSONKey<string>("name", name, archive_in, "exception");
|
||||
parseAppsecJSONKey<string>("action", action, archive_in);
|
||||
parseAppsecJSONKey<string>("appsecClassName", appsec_class_name, archive_in);
|
||||
if (valid_actions.count(action) == 0) {
|
||||
|
||||
@@ -42,7 +42,7 @@ static const std::unordered_map<std::string, std::string> key_to_mode_val = {
|
||||
{ "detect", "Detect"},
|
||||
{ "inactive", "Inactive"}
|
||||
};
|
||||
static const std::unordered_map<std::string, int> unit_to_int = {
|
||||
static const std::unordered_map<std::string, uint64_t> unit_to_int = {
|
||||
{ "bytes", 1},
|
||||
{ "KB", 1024},
|
||||
{ "MB", 1048576},
|
||||
@@ -631,8 +631,8 @@ NewIntrusionPrevention::getMode() const
|
||||
}
|
||||
|
||||
FileSecurityProtectionsSection::FileSecurityProtectionsSection(
|
||||
int _file_size_limit,
|
||||
int _archive_file_size_limit,
|
||||
uint64_t _file_size_limit,
|
||||
uint64_t _archive_file_size_limit,
|
||||
bool _allow_files_without_name,
|
||||
bool _required_file_size_limit,
|
||||
bool _required_archive_extraction,
|
||||
@@ -720,7 +720,7 @@ NewFileSecurityArchiveInspection::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
dbgTrace(D_LOCAL_POLICY) << "Loading AppSec File Security Archive Inspection practice";
|
||||
parseAppsecJSONKey<bool>("extractArchiveFiles", extract_archive_files, archive_in);
|
||||
parseAppsecJSONKey<int>("scanMaxFileSize", scan_max_file_size, archive_in, 0);
|
||||
parseAppsecJSONKey<uint64_t>("scanMaxFileSize", scan_max_file_size, archive_in, 0);
|
||||
parseAppsecJSONKey<string>("scanMaxFileSizeUnit", scan_max_file_size_unit, archive_in, "bytes");
|
||||
if (size_unit.count(scan_max_file_size_unit) == 0) {
|
||||
dbgWarning(D_LOCAL_POLICY)
|
||||
@@ -749,7 +749,7 @@ NewFileSecurityArchiveInspection::load(cereal::JSONInputArchive &archive_in)
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
uint64_t
|
||||
NewFileSecurityArchiveInspection::getArchiveFileSizeLimit() const
|
||||
{
|
||||
if (unit_to_int.find(scan_max_file_size_unit) == unit_to_int.end()) {
|
||||
@@ -784,7 +784,7 @@ void
|
||||
NewFileSecurityLargeFileInspection::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
dbgTrace(D_LOCAL_POLICY) << "Loading AppSec File Security large File Inspection practice";
|
||||
parseAppsecJSONKey<int>("fileSizeLimit", file_size_limit, archive_in);
|
||||
parseAppsecJSONKey<uint64_t>("fileSizeLimit", file_size_limit, archive_in);
|
||||
parseAppsecJSONKey<string>("fileSizeLimitUnit", file_size_limit_unit, archive_in, "bytes");
|
||||
if (size_unit.count(file_size_limit_unit) == 0) {
|
||||
dbgWarning(D_LOCAL_POLICY)
|
||||
@@ -803,7 +803,7 @@ NewFileSecurityLargeFileInspection::load(cereal::JSONInputArchive &archive_in)
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
uint64_t
|
||||
NewFileSecurityLargeFileInspection::getFileSizeLimit() const
|
||||
{
|
||||
if (unit_to_int.find(file_size_limit_unit) == unit_to_int.end()) {
|
||||
|
||||
@@ -64,7 +64,7 @@ void
|
||||
Identifier::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
dbgTrace(D_LOCAL_POLICY) << "Loading source identifiers spec";
|
||||
parseAppsecJSONKey<string>("sourceIdentifier", identifier, archive_in);
|
||||
parseAppsecJSONKey<string>("identifier", identifier, archive_in);
|
||||
if (valid_identifiers.count(identifier) == 0) {
|
||||
dbgWarning(D_LOCAL_POLICY) << "AppSec identifier invalid: " << identifier;
|
||||
}
|
||||
|
||||
@@ -13,6 +13,11 @@
|
||||
|
||||
#include "policy_maker_utils.h"
|
||||
|
||||
#include <regex>
|
||||
|
||||
#include "local_policy_mgmt_gen.h"
|
||||
#include "log_generator.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
USE_DEBUG_FLAG(D_NGINX_POLICY);
|
||||
@@ -58,7 +63,7 @@ template<class T>
|
||||
Maybe<T>
|
||||
PolicyMakerUtils::openFileAsJson(const string &path)
|
||||
{
|
||||
auto maybe_file_as_json = Singleton::Consume<I_ShellCmd>::by<PolicyMakerUtils>()->getExecOutput(
|
||||
auto maybe_file_as_json = Singleton::Consume<I_ShellCmd>::by<LocalPolicyMgmtGenerator>()->getExecOutput(
|
||||
getFilesystemPathConfig() + "/bin/yq " + path + " -o json"
|
||||
);
|
||||
|
||||
@@ -67,7 +72,7 @@ PolicyMakerUtils::openFileAsJson(const string &path)
|
||||
return genError("Could not convert policy from yaml to json. Error: " + maybe_file_as_json.getErr());
|
||||
}
|
||||
|
||||
auto i_orchestration_tools = Singleton::Consume<I_OrchestrationTools>::by<PolicyMakerUtils>();
|
||||
auto i_orchestration_tools = Singleton::Consume<I_OrchestrationTools>::by<LocalPolicyMgmtGenerator>();
|
||||
auto maybe_file = i_orchestration_tools->jsonStringToObject<T>(
|
||||
maybe_file_as_json.unpack()
|
||||
);
|
||||
@@ -136,10 +141,11 @@ PolicyMakerUtils::splitHostName(const string &host_name)
|
||||
url = url.substr(0, url.find(":"));
|
||||
}
|
||||
|
||||
if (host_name == "*") {
|
||||
if (host_name == "*" || host_name == "*:*") {
|
||||
url = "Any";
|
||||
uri = "Any";
|
||||
}
|
||||
|
||||
return make_tuple(url, port, uri);
|
||||
}
|
||||
|
||||
@@ -323,6 +329,7 @@ extractAnnotationsNames<NewParsedRule>(
|
||||
if (!trusted_sources_annotation_name.empty()) {
|
||||
rule_annotation[AnnotationTypes::TRUSTED_SOURCES] = policy_name + "/" + trusted_sources_annotation_name;
|
||||
}
|
||||
|
||||
return rule_annotation;
|
||||
}
|
||||
// LCOV_EXCL_STOP
|
||||
@@ -451,6 +458,23 @@ getAppsecCustomResponseSpec(const string &custom_response_annotation_name, const
|
||||
return *custom_response_it;
|
||||
}
|
||||
|
||||
template<class T, class R>
|
||||
R
|
||||
rpmGetAppsecRPSettingSpec(const string &rp_settings_name, const T &policy)
|
||||
{
|
||||
auto rp_settings_vec = policy.rpmGetRPSettings();
|
||||
auto rp_settings_it = extractElement(
|
||||
rp_settings_vec.begin(),
|
||||
rp_settings_vec.end(),
|
||||
rp_settings_name);
|
||||
|
||||
if (rp_settings_it == rp_settings_vec.end()) {
|
||||
dbgTrace(D_NGINX_POLICY) << "Failed to retrieve AppSec RP Settings";
|
||||
return R();
|
||||
}
|
||||
return *rp_settings_it;
|
||||
}
|
||||
|
||||
template<class T, class R>
|
||||
R
|
||||
getAppsecSourceIdentifierSpecs(const string &source_identifiers_annotation_name, const T &policy)
|
||||
@@ -843,6 +867,7 @@ createUserIdentifiers<V1beta2AppsecLinuxPolicy>(
|
||||
RulesConfigRulebase
|
||||
createMultiRulesSections(
|
||||
const string &url,
|
||||
const string &port,
|
||||
const string &uri,
|
||||
const string &practice_id,
|
||||
const string &practice_name,
|
||||
@@ -878,6 +903,7 @@ createMultiRulesSections(
|
||||
RulesConfigRulebase rules_config = RulesConfigRulebase(
|
||||
asset_name,
|
||||
url,
|
||||
port,
|
||||
uri,
|
||||
{practice},
|
||||
exceptions_result,
|
||||
@@ -890,6 +916,7 @@ createMultiRulesSections(
|
||||
RulesConfigRulebase
|
||||
createMultiRulesSections(
|
||||
const string &url,
|
||||
const string &port,
|
||||
const string &uri,
|
||||
const string &practice_id,
|
||||
const string &practice_name,
|
||||
@@ -907,7 +934,8 @@ createMultiRulesSections(
|
||||
const string &exception_name,
|
||||
const vector<InnerException> &exceptions)
|
||||
{
|
||||
ParametersSection exception_param = ParametersSection(exceptions[0].getBehaviorId(), exception_name);
|
||||
string behaviorId = exceptions.empty() ? "" : exceptions[0].getBehaviorId();
|
||||
ParametersSection exception_param = ParametersSection(behaviorId, exception_name);
|
||||
|
||||
vector<PracticeSection> practices;
|
||||
if (!practice_id.empty()) {
|
||||
@@ -934,6 +962,7 @@ createMultiRulesSections(
|
||||
RulesConfigRulebase rules_config = RulesConfigRulebase(
|
||||
asset_name,
|
||||
url,
|
||||
port,
|
||||
uri,
|
||||
practices,
|
||||
{exception_param},
|
||||
@@ -983,7 +1012,7 @@ PolicyMakerUtils::createSnortProtecionsSection(const string &file_name, const st
|
||||
auto snort_scriipt_path = getFilesystemPathConfig() + "/scripts/snort_to_ips_local.py";
|
||||
auto cmd = "python " + snort_scriipt_path + " " + path + ".rule " + path + ".out " + path + ".err";
|
||||
|
||||
auto res = Singleton::Consume<I_ShellCmd>::by<PolicyMakerUtils>()->getExecOutput(cmd);
|
||||
auto res = Singleton::Consume<I_ShellCmd>::by<LocalPolicyMgmtGenerator>()->getExecOutput(cmd);
|
||||
|
||||
if (!res.ok()) {
|
||||
dbgWarning(D_LOCAL_POLICY) << res.getErr();
|
||||
@@ -996,7 +1025,7 @@ PolicyMakerUtils::createSnortProtecionsSection(const string &file_name, const st
|
||||
return;
|
||||
}
|
||||
|
||||
auto i_orchestration_tools = Singleton::Consume<I_OrchestrationTools>::by<PolicyMakerUtils>();
|
||||
auto i_orchestration_tools = Singleton::Consume<I_OrchestrationTools>::by<LocalPolicyMgmtGenerator>();
|
||||
i_orchestration_tools->removeFile(path + ".rule");
|
||||
i_orchestration_tools->removeFile(path + ".out");
|
||||
i_orchestration_tools->removeFile(path + ".err");
|
||||
@@ -1153,12 +1182,15 @@ void
|
||||
PolicyMakerUtils::createThreatPreventionPracticeSections(
|
||||
const string &asset_name,
|
||||
const string &url,
|
||||
const string &port,
|
||||
const string &uri,
|
||||
const string &default_mode,
|
||||
const V1beta2AppsecLinuxPolicy &policy,
|
||||
map<AnnotationTypes, string> &rule_annotations)
|
||||
{
|
||||
if (rule_annotations[AnnotationTypes::PRACTICE].empty()) {
|
||||
if (rule_annotations[AnnotationTypes::PRACTICE].empty() ||
|
||||
web_apps.count(asset_name)
|
||||
) {
|
||||
return;
|
||||
}
|
||||
string practice_id = "";
|
||||
@@ -1170,6 +1202,7 @@ PolicyMakerUtils::createThreatPreventionPracticeSections(
|
||||
|
||||
RulesConfigRulebase rule_config = createMultiRulesSections(
|
||||
url,
|
||||
port,
|
||||
uri,
|
||||
practice_id,
|
||||
rule_annotations[AnnotationTypes::PRACTICE],
|
||||
@@ -1353,7 +1386,14 @@ PolicyMakerUtils::createPolicyElementsByRule(
|
||||
);
|
||||
}
|
||||
|
||||
if (!rule_annotations[AnnotationTypes::PRACTICE].empty()) {
|
||||
string full_url = rule.getHost() == "*" || rule.getHost() == "*:*"
|
||||
? "Any"
|
||||
: rule.getHost();
|
||||
|
||||
|
||||
if (!rule_annotations[AnnotationTypes::PRACTICE].empty() &&
|
||||
!web_apps.count(full_url)
|
||||
) {
|
||||
string practice_id = "";
|
||||
try {
|
||||
practice_id = to_string(boost::uuids::random_generator()());
|
||||
@@ -1362,12 +1402,10 @@ PolicyMakerUtils::createPolicyElementsByRule(
|
||||
}
|
||||
|
||||
tuple<string, string, string> splited_host_name = splitHostName(rule.getHost());
|
||||
string full_url = rule.getHost() == "*"
|
||||
? "Any"
|
||||
: rule.getHost();
|
||||
|
||||
RulesConfigRulebase rule_config = createMultiRulesSections(
|
||||
std::get<0>(splited_host_name),
|
||||
std::get<1>(splited_host_name),
|
||||
std::get<2>(splited_host_name),
|
||||
practice_id,
|
||||
rule_annotations[AnnotationTypes::PRACTICE],
|
||||
@@ -1426,7 +1464,9 @@ PolicyMakerUtils::createPolicyElementsByRule<V1beta2AppsecLinuxPolicy, NewParsed
|
||||
dbgTrace(D_LOCAL_POLICY) << "Creating policy elements from version V1beta2";
|
||||
map<AnnotationTypes, string> rule_annotations =
|
||||
extractAnnotationsNames<NewParsedRule>(rule, default_rule, policy_name);
|
||||
|
||||
if (
|
||||
rule_annotations.count(AnnotationTypes::TRIGGER) > 0 &&
|
||||
!rule_annotations[AnnotationTypes::TRIGGER].empty() &&
|
||||
!log_triggers.count(rule_annotations[AnnotationTypes::TRIGGER])
|
||||
) {
|
||||
@@ -1438,6 +1478,7 @@ PolicyMakerUtils::createPolicyElementsByRule<V1beta2AppsecLinuxPolicy, NewParsed
|
||||
}
|
||||
|
||||
if (
|
||||
rule_annotations.count(AnnotationTypes::WEB_USER_RES) > 0 &&
|
||||
!rule_annotations[AnnotationTypes::WEB_USER_RES].empty() &&
|
||||
!web_user_res_triggers.count(rule_annotations[AnnotationTypes::WEB_USER_RES])
|
||||
) {
|
||||
@@ -1449,6 +1490,7 @@ PolicyMakerUtils::createPolicyElementsByRule<V1beta2AppsecLinuxPolicy, NewParsed
|
||||
}
|
||||
|
||||
if (
|
||||
rule_annotations.count(AnnotationTypes::EXCEPTION) > 0 &&
|
||||
!rule_annotations[AnnotationTypes::EXCEPTION].empty() &&
|
||||
!inner_exceptions.count(rule_annotations[AnnotationTypes::EXCEPTION])
|
||||
) {
|
||||
@@ -1460,6 +1502,8 @@ PolicyMakerUtils::createPolicyElementsByRule<V1beta2AppsecLinuxPolicy, NewParsed
|
||||
}
|
||||
|
||||
if (
|
||||
rule_annotations.count(AnnotationTypes::TRUSTED_SOURCES) > 0 &&
|
||||
rule_annotations.count(AnnotationTypes::SOURCE_IDENTIFIERS) > 0 &&
|
||||
!rule_annotations[AnnotationTypes::TRUSTED_SOURCES].empty() &&
|
||||
!rule_annotations[AnnotationTypes::SOURCE_IDENTIFIERS].empty() &&
|
||||
!trusted_sources.count(rule_annotations[AnnotationTypes::TRUSTED_SOURCES])
|
||||
@@ -1473,6 +1517,7 @@ PolicyMakerUtils::createPolicyElementsByRule<V1beta2AppsecLinuxPolicy, NewParsed
|
||||
}
|
||||
|
||||
if (
|
||||
rule_annotations.count(AnnotationTypes::PRACTICE) > 0 &&
|
||||
!rule_annotations[AnnotationTypes::PRACTICE].empty() &&
|
||||
!web_apps.count(rule_annotations[AnnotationTypes::PRACTICE])
|
||||
) {
|
||||
@@ -1484,7 +1529,7 @@ PolicyMakerUtils::createPolicyElementsByRule<V1beta2AppsecLinuxPolicy, NewParsed
|
||||
);
|
||||
}
|
||||
|
||||
string full_url = rule.getHost() == "*"
|
||||
string full_url = rule.getHost() == "*" || rule.getHost() == "*:*"
|
||||
? "Any"
|
||||
: rule.getHost();
|
||||
tuple<string, string, string> splited_host_name = splitHostName(rule.getHost());
|
||||
@@ -1501,6 +1546,7 @@ PolicyMakerUtils::createPolicyElementsByRule<V1beta2AppsecLinuxPolicy, NewParsed
|
||||
createThreatPreventionPracticeSections(
|
||||
full_url,
|
||||
std::get<0>(splited_host_name),
|
||||
std::get<1>(splited_host_name),
|
||||
std::get<2>(splited_host_name),
|
||||
rule.getMode(),
|
||||
policy,
|
||||
@@ -1531,11 +1577,11 @@ PolicyMakerUtils::createAgentPolicyFromAppsecPolicy(const string &policy_name, c
|
||||
|
||||
R default_rule = appsec_policy.getAppsecPolicySpec().getDefaultRule();
|
||||
|
||||
// add default rule to policy
|
||||
createPolicyElementsByRule<T, R>(default_rule, default_rule, appsec_policy, policy_name);
|
||||
|
||||
vector<R> specific_rules = appsec_policy.getAppsecPolicySpec().getSpecificRules();
|
||||
createPolicyElements<T, R>(specific_rules, default_rule, appsec_policy, policy_name);
|
||||
|
||||
// add default rule to policy
|
||||
createPolicyElementsByRule<T, R>(default_rule, default_rule, appsec_policy, policy_name);
|
||||
}
|
||||
|
||||
// LCOV_EXCL_START Reason: no test exist
|
||||
@@ -1545,17 +1591,10 @@ PolicyMakerUtils::createAgentPolicyFromAppsecPolicy<V1beta2AppsecLinuxPolicy, Ne
|
||||
const string &policy_name,
|
||||
const V1beta2AppsecLinuxPolicy &appsec_policy)
|
||||
{
|
||||
dbgTrace(D_LOCAL_POLICY) << "Proccesing policy, name: " << policy_name;
|
||||
dbgTrace(D_LOCAL_POLICY) << "Proccesing v1beta2 policy, name: " << policy_name;
|
||||
|
||||
NewParsedRule default_rule = appsec_policy.getAppsecPolicySpec().getDefaultRule();
|
||||
|
||||
// add default rule to policy
|
||||
createPolicyElementsByRule<V1beta2AppsecLinuxPolicy, NewParsedRule>(
|
||||
default_rule,
|
||||
default_rule,
|
||||
appsec_policy,
|
||||
policy_name);
|
||||
|
||||
vector<NewParsedRule> specific_rules = appsec_policy.getAppsecPolicySpec().getSpecificRules();
|
||||
createPolicyElements<V1beta2AppsecLinuxPolicy, NewParsedRule>(
|
||||
specific_rules,
|
||||
@@ -1563,6 +1602,13 @@ PolicyMakerUtils::createAgentPolicyFromAppsecPolicy<V1beta2AppsecLinuxPolicy, Ne
|
||||
appsec_policy,
|
||||
policy_name
|
||||
);
|
||||
|
||||
// add default rule to policy
|
||||
createPolicyElementsByRule<V1beta2AppsecLinuxPolicy, NewParsedRule>(
|
||||
default_rule,
|
||||
default_rule,
|
||||
appsec_policy,
|
||||
policy_name);
|
||||
}
|
||||
// LCOV_EXCL_STOP
|
||||
|
||||
@@ -1572,15 +1618,31 @@ PolicyMakerUtils::proccesSingleAppsecPolicy(
|
||||
const string &policy_version,
|
||||
const string &local_appsec_policy_path)
|
||||
{
|
||||
Maybe<AppsecLinuxPolicy> maybe_policy = openFileAsJson<AppsecLinuxPolicy>(policy_path);
|
||||
if (!maybe_policy.ok()){
|
||||
dbgWarning(D_LOCAL_POLICY) << maybe_policy.getErr();
|
||||
return "";
|
||||
|
||||
Maybe<V1beta2AppsecLinuxPolicy> maybe_policy_v1beta2 = openFileAsJson<V1beta2AppsecLinuxPolicy>(policy_path);
|
||||
if (maybe_policy_v1beta2.ok()) {
|
||||
policy_version_name = "v1beta2";
|
||||
createAgentPolicyFromAppsecPolicy<V1beta2AppsecLinuxPolicy, NewParsedRule>(
|
||||
getPolicyName(policy_path),
|
||||
maybe_policy_v1beta2.unpack()
|
||||
);
|
||||
} else {
|
||||
policy_version_name = "v1beta1";
|
||||
dbgInfo(D_LOCAL_POLICY)
|
||||
<< "Failed to retrieve AppSec local policy with version: v1beta2, Trying version: v1beta1";
|
||||
|
||||
Maybe<AppsecLinuxPolicy> maybe_policy_v1beta1 = openFileAsJson<AppsecLinuxPolicy>(policy_path);
|
||||
if (!maybe_policy_v1beta1.ok()){
|
||||
dbgWarning(D_LOCAL_POLICY) << maybe_policy_v1beta1.getErr();
|
||||
return "";
|
||||
}
|
||||
createAgentPolicyFromAppsecPolicy<AppsecLinuxPolicy, ParsedRule>(
|
||||
getPolicyName(policy_path),
|
||||
maybe_policy_v1beta1.unpack()
|
||||
);
|
||||
|
||||
if (getenv("OPENAPPSEC_STANDALONE")) rpmBuildNginxServers(maybe_policy_v1beta1.unpack());
|
||||
}
|
||||
createAgentPolicyFromAppsecPolicy<AppsecLinuxPolicy, ParsedRule>(
|
||||
getPolicyName(policy_path),
|
||||
maybe_policy.unpack()
|
||||
);
|
||||
|
||||
PolicyWrapper policy_wrapper = combineElementsToPolicy(policy_version);
|
||||
return dumpPolicyToFile(
|
||||
@@ -1588,3 +1650,114 @@ PolicyMakerUtils::proccesSingleAppsecPolicy(
|
||||
local_appsec_policy_path
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
PolicyMakerUtils::rpmReportInfo(const std::string &msg)
|
||||
{
|
||||
dbgTrace(D_LOCAL_POLICY) << msg;
|
||||
|
||||
LogGen(
|
||||
msg,
|
||||
ReportIS::Audience::SECURITY,
|
||||
ReportIS::Severity::INFO,
|
||||
ReportIS::Priority::LOW,
|
||||
ReportIS::Tags::ORCHESTRATOR
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
PolicyMakerUtils::rpmReportError(const std::string &msg)
|
||||
{
|
||||
dbgWarning(D_LOCAL_POLICY) << msg;
|
||||
|
||||
LogGen(
|
||||
msg,
|
||||
ReportIS::Audience::SECURITY,
|
||||
ReportIS::Severity::CRITICAL,
|
||||
ReportIS::Priority::URGENT,
|
||||
ReportIS::Tags::ORCHESTRATOR
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
PolicyMakerUtils::rpmBuildNginxServers(const AppsecLinuxPolicy &policy)
|
||||
{
|
||||
rpmReportInfo("Started building NGINX servers");
|
||||
|
||||
ReverseProxyBuilder::init();
|
||||
bool full_success = true;
|
||||
bool partial_success = false;
|
||||
set<pair<string, bool>> processed_rules;
|
||||
for (ParsedRule const &rule : policy.getAppsecPolicySpec().getSpecificRules()) {
|
||||
tuple<string, string, string> splited_host_name = splitHostName(rule.getHost());
|
||||
string host = std::get<0>(splited_host_name);
|
||||
if (host.empty() || rule.rpmGetUpstream().empty()) continue;
|
||||
|
||||
string location = std::get<2>(splited_host_name);
|
||||
if (location.empty()) location = "/";
|
||||
|
||||
dbgTrace(D_LOCAL_POLICY)
|
||||
<< "Building NGINX server: "
|
||||
<< host
|
||||
<< ", location: "
|
||||
<< location
|
||||
<< " RP-Settings: "
|
||||
<< rule.rpmGetRPSettings();
|
||||
|
||||
RPMSettings rp_settings =
|
||||
rpmGetAppsecRPSettingSpec<AppsecLinuxPolicy, RPMSettings>(rule.rpmGetRPSettings(), policy);
|
||||
pair<string, bool> server = {host, rule.rpmIsHttps()};
|
||||
auto it = processed_rules.find(server);
|
||||
if (it != processed_rules.end()) {
|
||||
auto maybe_res = ReverseProxyBuilder::addNginxServerLocation(location, host, rule, rp_settings);
|
||||
if (!maybe_res.ok()) {
|
||||
rpmReportError(
|
||||
"Could not add an NGINX server location: " + location + " to server: " + host +
|
||||
", error: " + maybe_res.getErr()
|
||||
);
|
||||
full_success = false;
|
||||
continue;
|
||||
}
|
||||
rpmReportInfo("NGINX server location: " + location + " was successfully added to server: " + host);
|
||||
partial_success = true;
|
||||
} else {
|
||||
auto maybe_res = ReverseProxyBuilder::createNewNginxServer(host, rule, rp_settings);
|
||||
if (!maybe_res.ok()) {
|
||||
rpmReportError("Could not create a new NGINX server: " + host + ", error: " + maybe_res.getErr());
|
||||
full_success = false;
|
||||
continue;
|
||||
}
|
||||
rpmReportInfo(
|
||||
(rule.rpmIsHttps() ? string("SSL") : string("HTTP")) + " NGINX server: " + host +
|
||||
" was successfully built"
|
||||
);
|
||||
processed_rules.insert(server);
|
||||
|
||||
maybe_res = ReverseProxyBuilder::addNginxServerLocation(location, host, rule, rp_settings);
|
||||
if (!maybe_res.ok()) {
|
||||
rpmReportError(
|
||||
"Could not add an NGINX server location: " + location + " to server: " + host +
|
||||
", error: " + maybe_res.getErr()
|
||||
);
|
||||
full_success = false;
|
||||
continue;
|
||||
}
|
||||
rpmReportInfo("NGINX server location: " + location + " was successfully added to server: " + host);
|
||||
partial_success = true;
|
||||
}
|
||||
}
|
||||
|
||||
auto maybe_reload_nginx = ReverseProxyBuilder::reloadNginx();
|
||||
if (!maybe_reload_nginx.ok()) {
|
||||
rpmReportError("Could not reload NGINX, error: " + maybe_reload_nginx.getErr());
|
||||
return;
|
||||
}
|
||||
|
||||
if (full_success) {
|
||||
rpmReportInfo("NGINX configuration was loaded successfully!");
|
||||
} else if (partial_success) {
|
||||
rpmReportInfo("NGINX configuration was partially loaded");
|
||||
} else {
|
||||
rpmReportError("Could not load any NGINX configuration");
|
||||
}
|
||||
}
|
||||
|
||||
456
components/security_apps/local_policy_mgmt_gen/reverse_proxy_section.cc
Executable file
456
components/security_apps/local_policy_mgmt_gen/reverse_proxy_section.cc
Executable file
@@ -0,0 +1,456 @@
|
||||
// Copyright (C) 2022 Check Point Software Technologies Ltd. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "reverse_proxy_section.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <openssl/pem.h>
|
||||
#include <openssl/rsa.h>
|
||||
#include <openssl/x509v3.h>
|
||||
#include <fstream>
|
||||
|
||||
#include "local_policy_mgmt_gen.h"
|
||||
#include "local_policy_common.h"
|
||||
#include "appsec_practice_section.h"
|
||||
#include "debug.h"
|
||||
|
||||
using namespace std;
|
||||
|
||||
USE_DEBUG_FLAG(D_LOCAL_POLICY);
|
||||
|
||||
static string conf_base_path = "/etc/cp/conf/";
|
||||
static string certs_path = "/etc/certs/";
|
||||
static string nginx_templates_path = "/etc/nginx/nginx-templates/";
|
||||
static const string nginx_configuration_path = "openappsec-nginx-servers/";
|
||||
static const string nginx_http_server_template = "nginx-http-server";
|
||||
static const string nginx_ssl_server_template = "nginx-ssl-server";
|
||||
static const string nginx_location_template = "nginx-location-block";
|
||||
|
||||
static const boost::regex host_template("<host>");
|
||||
static const boost::regex private_key_template("<private-key>");
|
||||
static const boost::regex certificate_template("<certificate>");
|
||||
static const boost::regex location_template("<location>");
|
||||
static const boost::regex upstream_template("<upstream>");
|
||||
static const boost::regex host_header_template("<host-header>");
|
||||
static const boost::regex dns_resolver_template("<dns-resolver>");
|
||||
|
||||
class ReverseProxyCertUtils
|
||||
{
|
||||
public:
|
||||
static std::pair<std::string, std::string> findMatchingCertificate(const std::string &host);
|
||||
static void init();
|
||||
|
||||
private:
|
||||
static std::vector<std::string> getFilesByExtension(const std::string &extension);
|
||||
static void untarCertificatesPackages();
|
||||
|
||||
static Maybe<std::string> extractModulus(const std::string &path, const std::string &type);
|
||||
|
||||
static std::unordered_map<std::string, std::string>
|
||||
calculatePublicModulus(const std::vector<std::string> &certs);
|
||||
|
||||
static std::unordered_map<std::string, std::string>
|
||||
calculatePrivateModulus(const std::vector<std::string> &keys);
|
||||
|
||||
static std::unordered_map<std::string, std::string> cert_key_map;
|
||||
};
|
||||
unordered_map<string, string> ReverseProxyCertUtils::cert_key_map;
|
||||
|
||||
void
|
||||
RPMSettings::load(cereal::JSONInputArchive &archive_in)
|
||||
{
|
||||
dbgFlow(D_LOCAL_POLICY) << "Loading RP Settings";
|
||||
|
||||
parseAppsecJSONKey<string>("name", name, archive_in);
|
||||
parseAppsecJSONKey<string>("host-header", host_hdr, archive_in, "$host");
|
||||
parseAppsecJSONKey<string>("dns-resolver", dns_resolver, archive_in, "127.0.0.11");
|
||||
}
|
||||
|
||||
const string &
|
||||
RPMSettings::getName() const
|
||||
{
|
||||
return name;
|
||||
}
|
||||
|
||||
string
|
||||
RPMSettings::applySettings(const std::string &server_content) const
|
||||
{
|
||||
string new_server_content = ReverseProxyBuilder::replaceTemplate(server_content, host_header_template, host_hdr);
|
||||
return ReverseProxyBuilder::replaceTemplate(new_server_content, dns_resolver_template, dns_resolver);
|
||||
}
|
||||
|
||||
void
|
||||
ReverseProxyCertUtils::init()
|
||||
{
|
||||
certs_path = getProfileAgentSettingWithDefault<string>("/etc/certs/", "openappsec.reverseProxy.certs");
|
||||
|
||||
untarCertificatesPackages();
|
||||
cert_key_map.clear();
|
||||
auto public_modulus_map = calculatePublicModulus(getFilesByExtension(".pem"));
|
||||
auto private_modulus_map = calculatePrivateModulus(getFilesByExtension(".key"));
|
||||
for (const auto &public_modulus_entry : public_modulus_map) {
|
||||
auto public_modulus = public_modulus_entry.second;
|
||||
if (private_modulus_map.find(public_modulus) != private_modulus_map.end()) {
|
||||
dbgTrace(D_LOCAL_POLICY)
|
||||
<< "Successfully parsed certificate: "
|
||||
<< public_modulus_entry.first
|
||||
<< " with private key: "
|
||||
<< private_modulus_map[public_modulus];
|
||||
|
||||
cert_key_map[public_modulus_entry.first] = private_modulus_map[public_modulus];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vector<string>
|
||||
ReverseProxyCertUtils::getFilesByExtension(const string &extension)
|
||||
{
|
||||
auto maybe_files = NGEN::Filesystem::getDirectoryFiles(certs_path);
|
||||
if (!maybe_files.ok()) return {};
|
||||
|
||||
auto files = maybe_files.unpack();
|
||||
files.erase(
|
||||
remove_if(
|
||||
files.begin(),
|
||||
files.end(),
|
||||
[&](const string& file) { return file.length() < 4 || file.substr(file.length() - 4) != extension; }
|
||||
),
|
||||
files.end()
|
||||
);
|
||||
|
||||
for (const auto &file : files) {
|
||||
dbgTrace(D_LOCAL_POLICY) << "Found file: " << file;
|
||||
}
|
||||
|
||||
return files;
|
||||
}
|
||||
|
||||
pair<string, string>
|
||||
ReverseProxyCertUtils::findMatchingCertificate(const string &host)
|
||||
{
|
||||
dbgFlow(D_LOCAL_POLICY) << "Looking for a matching certificate to host: " << host;
|
||||
|
||||
for (const auto &entry : cert_key_map) {
|
||||
string cert_path = entry.first;
|
||||
|
||||
dbgTrace(D_LOCAL_POLICY) << "Checking match of certificate: " << cert_path;
|
||||
|
||||
// Create a BIO object to read the certificate
|
||||
BIO* cert_bio = BIO_new_file(cert_path.c_str(), "rb");
|
||||
if (!cert_bio) {
|
||||
dbgWarning(D_LOCAL_POLICY) << "Could not open certificate file: " << cert_path;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Load the PEM-encoded public key from the file
|
||||
X509 *cert = PEM_read_bio_X509(cert_bio, nullptr, nullptr, nullptr);
|
||||
if (!cert) {
|
||||
dbgWarning(D_LOCAL_POLICY) << "Could not parse X509 certificate file: " << cert_path;
|
||||
BIO_free(cert_bio);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get the subject alternative name extension
|
||||
STACK_OF(GENERAL_NAME)* san_names = static_cast<STACK_OF(GENERAL_NAME)*>(
|
||||
X509_get_ext_d2i(cert, NID_subject_alt_name, nullptr, nullptr)
|
||||
);
|
||||
|
||||
if (!san_names) {
|
||||
dbgWarning(D_LOCAL_POLICY) << "No Subject Alternative Name found in the certificate: " << cert_path;
|
||||
X509_free(cert);
|
||||
BIO_free(cert_bio);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Iterate through the SAN entries
|
||||
for (int i = 0; i < sk_GENERAL_NAME_num(san_names); ++i) {
|
||||
GENERAL_NAME* name = sk_GENERAL_NAME_value(san_names, i);
|
||||
if (name->type == GEN_DNS) {
|
||||
const char* san = reinterpret_cast<const char*>(ASN1_STRING_get0_data(name->d.dNSName));
|
||||
|
||||
if (X509_check_host(cert, host.c_str(), host.length(), 0, nullptr) == 1) {
|
||||
dbgTrace(D_LOCAL_POLICY) << "Found matching certificate: " << cert_path << ", DNS name: " << san;
|
||||
sk_GENERAL_NAME_pop_free(san_names, GENERAL_NAME_free);
|
||||
X509_free(cert);
|
||||
BIO_free(cert_bio);
|
||||
return {cert_path, cert_key_map[cert_path]};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dbgTrace(D_LOCAL_POLICY) << "Certificate: " << cert_path << " does not match host: " << host;
|
||||
|
||||
// Clean up
|
||||
sk_GENERAL_NAME_pop_free(san_names, GENERAL_NAME_free);
|
||||
X509_free(cert);
|
||||
BIO_free(cert_bio);
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
Maybe<std::string>
|
||||
ReverseProxyCertUtils::extractModulus(const string &path, const string &type)
|
||||
{
|
||||
dbgFlow(D_LOCAL_POLICY) << "Started calculating modulus of: " << path << ", type: " << type;
|
||||
|
||||
string modulus_cmd = "openssl " + type + " -noout -modulus -in " + path + "; echo $?";
|
||||
auto modulus_maybe = Singleton::Consume<I_ShellCmd>::by<LocalPolicyMgmtGenerator>()->getExecOutput(modulus_cmd);
|
||||
if (!modulus_maybe.ok()) return genError("Could not complete command, error: " + modulus_maybe.getErr());
|
||||
|
||||
auto modulus_cmd_output = NGEN::Strings::removeTrailingWhitespaces(modulus_maybe.unpack());
|
||||
if (modulus_cmd_output.back() != '0') return genError("Could not extract modulus, error: " + modulus_cmd_output);
|
||||
|
||||
modulus_cmd_output.pop_back();
|
||||
|
||||
dbgTrace(D_LOCAL_POLICY) << "Extracted modulus for: " << path << ", " << modulus_cmd_output;
|
||||
|
||||
return modulus_cmd_output;
|
||||
}
|
||||
|
||||
unordered_map<string, string>
|
||||
ReverseProxyCertUtils::calculatePublicModulus(const vector<string> &certs)
|
||||
{
|
||||
dbgFlow(D_LOCAL_POLICY) << "Calculating certificates modulus";
|
||||
|
||||
unordered_map<string, string> certs_modulus;
|
||||
for (const string &cert_file_name : certs) {
|
||||
string cert_path = certs_path + cert_file_name;
|
||||
auto modulus = extractModulus(cert_path, "x509");
|
||||
if (!modulus.ok()) {
|
||||
dbgWarning(D_LOCAL_POLICY) << modulus.getErr();
|
||||
continue;
|
||||
}
|
||||
|
||||
certs_modulus[cert_path] = modulus.unpack();
|
||||
}
|
||||
|
||||
return certs_modulus;
|
||||
}
|
||||
|
||||
unordered_map<string, string>
|
||||
ReverseProxyCertUtils::calculatePrivateModulus(const vector<string> &keys)
|
||||
{
|
||||
unordered_map<string, string> key_modulus;
|
||||
for (const string &private_key_file_name : keys) {
|
||||
string private_key_path = certs_path + private_key_file_name;
|
||||
auto modulus = extractModulus(private_key_path, "rsa");
|
||||
if (!modulus.ok()) {
|
||||
dbgWarning(D_LOCAL_POLICY) << modulus.getErr();
|
||||
continue;
|
||||
}
|
||||
|
||||
key_modulus[modulus.unpack()] = private_key_path;
|
||||
}
|
||||
|
||||
return key_modulus;
|
||||
}
|
||||
|
||||
void
|
||||
ReverseProxyCertUtils::untarCertificatesPackages()
|
||||
{
|
||||
vector<string> cert_pkgs = getFilesByExtension(".pkg");
|
||||
if (cert_pkgs.empty()) return;
|
||||
|
||||
for (const auto &cert_pkg : cert_pkgs) {
|
||||
dbgTrace(D_LOCAL_POLICY) << "Untaring certificate package: " << cert_pkg;
|
||||
string untar_cmd = "tar -C " + certs_path + " -xvf " + certs_path + cert_pkg;
|
||||
auto maybe_tar_res = Singleton::Consume<I_ShellCmd>::by<LocalPolicyMgmtGenerator>()->getExecOutput(untar_cmd);
|
||||
if (!maybe_tar_res.ok()) {
|
||||
dbgWarning(D_LOCAL_POLICY) << "Untar package error: " << maybe_tar_res.getErr();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
string
|
||||
ReverseProxyBuilder::replaceTemplate(
|
||||
const string &content,
|
||||
const boost::regex &nginx_directive_template,
|
||||
const string &value)
|
||||
{
|
||||
return NGEN::Regex::regexReplace(__FILE__, __LINE__, content, nginx_directive_template, value);
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
ReverseProxyBuilder::getTemplateContent(const string &nginx_conf_template)
|
||||
{
|
||||
ifstream nginx_template_in(nginx_templates_path + nginx_conf_template);
|
||||
if (!nginx_template_in.is_open()) return genError("Could not open the " + nginx_conf_template + " template");
|
||||
|
||||
string file_content((istreambuf_iterator<char>(nginx_template_in)), istreambuf_iterator<char>());
|
||||
nginx_template_in.close();
|
||||
|
||||
return file_content;
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
ReverseProxyBuilder::createSSLNginxServer(const string &host, const RPMSettings &rp_settings)
|
||||
{
|
||||
dbgTrace(D_LOCAL_POLICY) << "Creating SSL NGINX server: " << host;
|
||||
|
||||
pair<string, string> cert_key = ReverseProxyCertUtils::findMatchingCertificate(host);
|
||||
if (cert_key.first.empty() || cert_key.second.empty()) {
|
||||
return genError("Cannot find matching certificates to host: " + host);
|
||||
}
|
||||
|
||||
auto maybe_server_content = getTemplateContent(nginx_ssl_server_template);
|
||||
if (!maybe_server_content.ok()) return maybe_server_content.passErr();
|
||||
|
||||
string server_content = replaceTemplate(maybe_server_content.unpack(), host_template, host);
|
||||
server_content = replaceTemplate(server_content, private_key_template, cert_key.second);
|
||||
server_content = replaceTemplate(server_content, certificate_template, cert_key.first);
|
||||
server_content = rp_settings.applySettings(server_content);
|
||||
|
||||
dbgTrace(D_LOCAL_POLICY) << "NGINX SSL Server content: " << server_content;
|
||||
|
||||
string conf_path = conf_base_path + nginx_configuration_path + "/443_" + host + ".conf";
|
||||
ofstream server_file(conf_path, ofstream::out | ofstream::trunc);
|
||||
if (!server_file.is_open()) {
|
||||
return genError("Could not open the output SSL NGINX configuration file: " + conf_path);
|
||||
}
|
||||
|
||||
server_file << server_content;
|
||||
server_file.close();
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
ReverseProxyBuilder::createHTTPNginxServer(const string &host, const RPMSettings &rp_settings)
|
||||
{
|
||||
dbgFlow(D_LOCAL_POLICY) << "Creating HTTP NGINX server: " << host;
|
||||
|
||||
auto maybe_server_content = getTemplateContent(nginx_http_server_template);
|
||||
if (!maybe_server_content.ok()) return maybe_server_content.passErr();
|
||||
|
||||
string server_content = replaceTemplate(maybe_server_content.unpack(), host_template, host);
|
||||
server_content = rp_settings.applySettings(server_content);
|
||||
|
||||
dbgTrace(D_LOCAL_POLICY) << "NGINX HTTP Server content: " << server_content;
|
||||
|
||||
string http_server_conf_path = conf_base_path + nginx_configuration_path + "80_" + host + ".conf";
|
||||
ofstream server_file(http_server_conf_path, ofstream::out | ofstream::trunc);
|
||||
if (!server_file.is_open()) {
|
||||
return genError("Could not open the output HTTP NGINX configuration file: " + http_server_conf_path);
|
||||
}
|
||||
|
||||
server_file << server_content;
|
||||
server_file.close();
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
ReverseProxyBuilder::addNginxServerLocation(
|
||||
string location,
|
||||
const string &host,
|
||||
const ParsedRule &rule,
|
||||
const RPMSettings &rp_settings)
|
||||
{
|
||||
string port = rule.rpmIsHttps() ? string("443") : string("80");
|
||||
string location_conf_path = conf_base_path + nginx_configuration_path + port + '_' + host + "_locations/";
|
||||
|
||||
dbgFlow(D_LOCAL_POLICY) << "Adding a new NGINX location: " << location << " to: " << location_conf_path;
|
||||
|
||||
NGEN::Filesystem::makeDirRecursive(location_conf_path);
|
||||
|
||||
if (location.empty() || location.find_first_not_of('/') == string::npos)
|
||||
{
|
||||
location = "/";
|
||||
location_conf_path += "root_location.conf";
|
||||
}
|
||||
else
|
||||
{
|
||||
string location_conf_basename = location.substr(1, location.length() - 1) + "_location";
|
||||
replace(location_conf_basename.begin(), location_conf_basename.end(), '/', '_');
|
||||
location_conf_path += location_conf_basename + ".conf";
|
||||
}
|
||||
auto maybe_location_content = getTemplateContent(nginx_location_template);
|
||||
if (!maybe_location_content.ok()) return maybe_location_content.passErr();
|
||||
|
||||
string location_content = replaceTemplate(maybe_location_content.unpack(), location_template, location);
|
||||
location_content = replaceTemplate(location_content, upstream_template, rule.rpmGetUpstream());
|
||||
location_content = rp_settings.applySettings(location_content);
|
||||
|
||||
dbgTrace(D_LOCAL_POLICY) << "NGINX server location content: " << location_content;
|
||||
|
||||
ofstream location_file(location_conf_path, ofstream::out | ofstream::trunc);
|
||||
if (!location_file.is_open()) {
|
||||
return genError("Could not open the output NGINX location block: " + location_conf_path);
|
||||
}
|
||||
|
||||
location_file << location_content;
|
||||
location_file.close();
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
ReverseProxyBuilder::createNewNginxServer(const string &host, const ParsedRule &rule, const RPMSettings &rp_settings)
|
||||
{
|
||||
dbgFlow(D_LOCAL_POLICY) << "Creating a new NGINX server: " << host << ", SSL: " << rule.rpmIsHttps();
|
||||
|
||||
if (rule.rpmIsHttps()) {
|
||||
auto maybe_res = ReverseProxyBuilder::createSSLNginxServer(host, rp_settings);
|
||||
if (!maybe_res.ok()) {
|
||||
return genError("Could not create an SSL NGINX server configuration: " + maybe_res.getErr());
|
||||
}
|
||||
} else {
|
||||
auto maybe_res = ReverseProxyBuilder::createHTTPNginxServer(host, rp_settings);
|
||||
if (!maybe_res.ok()) {
|
||||
return genError("Could not create an HTTP NGINX server: " + maybe_res.getErr());
|
||||
}
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
Maybe<void>
|
||||
ReverseProxyBuilder::reloadNginx()
|
||||
{
|
||||
dbgFlow(D_LOCAL_POLICY) << "Reloading NGINX...";
|
||||
|
||||
auto maybe_nginx_t = Singleton::Consume<I_ShellCmd>::by<LocalPolicyMgmtGenerator>()->getExecOutput(
|
||||
"nginx -t 2>&1; echo $?"
|
||||
);
|
||||
|
||||
if (!maybe_nginx_t.ok()){
|
||||
return genError("Could not check NGINX configuration: " + maybe_nginx_t.getErr());
|
||||
}
|
||||
|
||||
string nginx_t_output = NGEN::Strings::removeTrailingWhitespaces(maybe_nginx_t.unpack());
|
||||
if (nginx_t_output.back() != '0') return genError("Invalid NGINX configuration: " + nginx_t_output);
|
||||
|
||||
auto maybe_nginx_reload = Singleton::Consume<I_ShellCmd>::by<LocalPolicyMgmtGenerator>()->getExecOutput(
|
||||
"nginx -s reload 2>&1;"
|
||||
);
|
||||
|
||||
if (!maybe_nginx_reload.ok()){
|
||||
return genError("Could not reload NGINX: " + maybe_nginx_reload.getErr());
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
void
|
||||
ReverseProxyBuilder::init()
|
||||
{
|
||||
conf_base_path = getConfigurationWithDefault<string>("/etc/cp/conf/", "Config Component", "configuration path");
|
||||
nginx_templates_path = getProfileAgentSettingWithDefault<string>(
|
||||
"/etc/nginx/nginx-templates/", "openappsec.reverseProxy.nginxTemplates"
|
||||
);
|
||||
|
||||
NGEN::Filesystem::deleteDirectory(conf_base_path + nginx_configuration_path, true);
|
||||
NGEN::Filesystem::makeDir(conf_base_path + nginx_configuration_path);
|
||||
ReverseProxyCertUtils::init();
|
||||
}
|
||||
@@ -156,6 +156,7 @@ RulesTriggerSection::save(cereal::JSONOutputArchive &out_ar) const
|
||||
RulesConfigRulebase::RulesConfigRulebase(
|
||||
const string &_name,
|
||||
const string &_url,
|
||||
const string &_port,
|
||||
const string &_uri,
|
||||
vector<PracticeSection> _practices,
|
||||
vector<ParametersSection> _parameters,
|
||||
@@ -169,39 +170,19 @@ RulesConfigRulebase::RulesConfigRulebase(
|
||||
try {
|
||||
bool any = _name == "Any" && _url == "Any" && _uri == "Any";
|
||||
id = any ? "Any" : _url+_uri;
|
||||
if (_uri != "/") {
|
||||
context = any ? "All()" : "Any("
|
||||
"All("
|
||||
"Any("
|
||||
"EqualHost(" + _url + ")"
|
||||
"),"
|
||||
"EqualListeningPort(80)" +
|
||||
string(_uri.empty() ? "" : ",BeginWithUri(" + _uri + ")") +
|
||||
"),"
|
||||
"All("
|
||||
"Any("
|
||||
"EqualHost(" + _url + ")"
|
||||
"),"
|
||||
"EqualListeningPort(443)" +
|
||||
string(_uri.empty() ? "" : ",BeginWithUri(" + _uri + ")") +
|
||||
")"
|
||||
")";
|
||||
} else {
|
||||
context = any ? "All()" : "Any("
|
||||
"All("
|
||||
"Any("
|
||||
"EqualHost(" + _url + ")"
|
||||
"),"
|
||||
"EqualListeningPort(80)"
|
||||
"),"
|
||||
"All("
|
||||
"Any("
|
||||
"EqualHost(" + _url + ")"
|
||||
"),"
|
||||
"EqualListeningPort(443)"
|
||||
")"
|
||||
")";
|
||||
if (any) {
|
||||
context ="All()";
|
||||
return;
|
||||
}
|
||||
string host_check = "Any(EqualHost(" + _url + ")),";
|
||||
string uri_check = (_uri.empty() || _uri == "/" ) ? "" : ",BeginWithUri(" + _uri + ")";
|
||||
auto ports = _port.empty() ? vector<string>({"80", "443"}) : vector<string>({_port});
|
||||
context = "Any(";
|
||||
for (auto &port : ports) {
|
||||
string check_last = (ports.size() == 1 || port == "443") ? ")" : "),";
|
||||
context += "All(" + host_check + "EqualListeningPort(" + port + ")" + uri_check + check_last;
|
||||
}
|
||||
context += ")";
|
||||
} catch (const boost::uuids::entropy_error &e) {
|
||||
dbgWarning(D_LOCAL_POLICY) << "Failed to generate rule UUID. Error: " << e.what();
|
||||
}
|
||||
@@ -284,6 +265,7 @@ UsersIdentifiersRulebase::UsersIdentifiersRulebase(
|
||||
const string &
|
||||
UsersIdentifiersRulebase::getIdentifier() const
|
||||
{
|
||||
if (source_identifiers.empty()) return source_identifier;
|
||||
return source_identifiers[0].getIdentifier();
|
||||
}
|
||||
// LCOV_EXCL_STOP
|
||||
|
||||
@@ -45,11 +45,7 @@ checkSamlPortal(const string &command_output)
|
||||
Maybe<string>
|
||||
getIDAGaia(const string &command_output)
|
||||
{
|
||||
if (command_output.find("Portal is running") != string::npos) {
|
||||
return string("ida_gaia");
|
||||
}
|
||||
|
||||
return genError("Current host does not have SAML Portal configured");
|
||||
return string("ida_gaia");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
@@ -72,6 +68,22 @@ checkIDP(shared_ptr<istream> file_stream)
|
||||
|
||||
#if defined(gaia) || defined(smb)
|
||||
|
||||
Maybe<string>
|
||||
checkIsCpviewRunning(const string &command_output)
|
||||
{
|
||||
if (command_output == "true" || command_output == "false") return command_output;
|
||||
|
||||
return genError("cpview is not running");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
checkIsCPotelcolGRET64(const string &command_output)
|
||||
{
|
||||
if (command_output == "true" || command_output == "false") return command_output;
|
||||
|
||||
return genError("CPotelcol is not installed or its take is below T64");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
checkHasSDWan(const string &command_output)
|
||||
{
|
||||
@@ -193,6 +205,12 @@ checkIfSdwanRunning(const string &command_output)
|
||||
return genError("Could not determine if sd-wan is running or not");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
getClusterObjectIP(const string &command_output)
|
||||
{
|
||||
return getAttr(command_output, "Cluster object IP was not found");
|
||||
}
|
||||
|
||||
Maybe<string>
|
||||
getSmbObjectName(const string &command_output)
|
||||
{
|
||||
|
||||
@@ -30,6 +30,14 @@
|
||||
#ifdef SHELL_CMD_HANDLER
|
||||
#if defined(gaia) || defined(smb)
|
||||
SHELL_CMD_HANDLER("cpProductIntegrationMgmtObjectType", "cpprod_util CPPROD_IsMgmtMachine", getMgmtObjType)
|
||||
SHELL_CMD_HANDLER("isCpviewRunning",
|
||||
"pidof cpview_api_service > /dev/null 2>&1 && [ -f $CPDIR/conf/cpview_api_service.version ] "
|
||||
"&& echo 'true' || echo 'false'",
|
||||
checkIsCpviewRunning)
|
||||
SHELL_CMD_HANDLER("isCPotelcolGRET64",
|
||||
"grep -A 10 '(BUNDLE_CPOTELCOL_AUTOUPDATE' ${CPDIR}/registry/HKLM_registry.data | "
|
||||
"awk '/SU_Build_Take/{val = substr($2, 2, length($2)-2); if (val >=64) print \"true\"; else print \"false\" }'",
|
||||
checkIsCPotelcolGRET64)
|
||||
SHELL_CMD_HANDLER("hasSDWan", "[ -f $FWDIR/bin/sdwan_steering ] && echo '1' || echo '0'", checkHasSDWan)
|
||||
SHELL_CMD_HANDLER(
|
||||
"canUpdateSDWanData",
|
||||
@@ -50,12 +58,20 @@ SHELL_CMD_HANDLER(
|
||||
"cat /etc/cp-release | grep -oE 'R[0-9]+(\\.[0-9]+)?'",
|
||||
getGWVersion
|
||||
)
|
||||
SHELL_CMD_HANDLER(
|
||||
"cpProductIntegrationMgmtParentObjectIP",
|
||||
"obj=\"$(cpsdwan get_data | jq -r .cluster_name)\";"
|
||||
" awk -v obj=\"$obj\" '$1 == \":\" && $2 == \"(\" obj, $1 == \":ip_address\" { if ($1 == \":ip_address\")"
|
||||
" { gsub(/[()]/, \"\", $2); print $2; exit; } }'"
|
||||
" $FWDIR/state/local/FW1/local.gateway_cluster",
|
||||
getClusterObjectIP
|
||||
)
|
||||
#endif //gaia || smb
|
||||
|
||||
#if defined(gaia)
|
||||
SHELL_CMD_HANDLER("hasSupportedBlade", "enabled_blades", checkHasSupportedBlade)
|
||||
SHELL_CMD_HANDLER("hasSamlPortal", "mpclient status saml-vpn", checkSamlPortal)
|
||||
SHELL_CMD_HANDLER("requiredNanoServices", "mpclient status saml-vpn", getIDAGaia)
|
||||
SHELL_CMD_HANDLER("requiredNanoServices", "ida_gaia", getIDAGaia)
|
||||
SHELL_CMD_HANDLER(
|
||||
"cpProductIntegrationMgmtParentObjectName",
|
||||
"cat $FWDIR/database/myself_objects.C "
|
||||
|
||||
@@ -278,36 +278,6 @@ HttpsCurl::HttpsCurl(const HttpsCurl &other) :
|
||||
HttpCurl(other),
|
||||
ca_path(other.ca_path) {}
|
||||
|
||||
bool
|
||||
HttpsCurl::downloadOpenAppsecPackages()
|
||||
{
|
||||
char errorstr[CURL_ERROR_SIZE];
|
||||
CURL* curl_handle = curl_easy_init();
|
||||
if (!curl_handle) return false;
|
||||
|
||||
curl_easy_setopt(curl_handle, CURLOPT_SSL_VERIFYPEER, 1);
|
||||
curl_easy_setopt(curl_handle, CURLOPT_SSL_VERIFYHOST, 2);
|
||||
|
||||
curl_easy_setopt(curl_handle, CURLOPT_URL, ("https://" + curl_url).c_str());
|
||||
curl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION, writeResponseCallback);
|
||||
curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, &out_file);
|
||||
|
||||
curl_easy_setopt(curl_handle, CURLOPT_VERBOSE, 1L);
|
||||
curl_easy_setopt(curl_handle, CURLOPT_ERRORBUFFER, errorstr);
|
||||
|
||||
CURLcode res = curl_easy_perform(curl_handle);
|
||||
if (res == CURLE_OK) {
|
||||
dbgTrace(D_HTTP_REQUEST) << "CURL HTTP request successfully completed.";
|
||||
} else {
|
||||
dbgWarning(D_HTTP_REQUEST) << "CURL result " + string(curl_easy_strerror(res));
|
||||
curl_easy_cleanup(curl_handle);
|
||||
return false;
|
||||
}
|
||||
|
||||
curl_easy_cleanup(curl_handle);
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
HttpsCurl::setCurlOpts(long timeout, HTTP_VERSION http_version)
|
||||
{
|
||||
@@ -347,7 +317,7 @@ HttpsCurl::setCurlOpts(long timeout, HTTP_VERSION http_version)
|
||||
curl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION, writeResponseCallback);
|
||||
curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA, &out_file);
|
||||
curl_easy_setopt(curl_handle, CURLOPT_TIMEOUT, timeout);
|
||||
curl_easy_setopt(curl_handle, CURLOPT_CAINFO, ca_path.c_str());
|
||||
if (ca_path != "") curl_easy_setopt(curl_handle, CURLOPT_CAINFO, ca_path.c_str());
|
||||
headers = curl_slist_append(headers, "Accept: */*");
|
||||
string auth = string("Authorization: Bearer ") + bearer;
|
||||
headers = curl_slist_append(headers, auth.c_str());
|
||||
|
||||
@@ -105,7 +105,6 @@ public:
|
||||
static CURLcode ssl_ctx_verify_certificate(CURL *curl, void *ssl_ctx, void *opq);
|
||||
static int verify_certificate(int preverify_ok, X509_STORE_CTX *ctx);
|
||||
void setCurlOpts(long timeout = 60L, HTTP_VERSION http_version = HTTP_VERSION::HTTP_VERSION_1_1) override;
|
||||
bool downloadOpenAppsecPackages();
|
||||
|
||||
private:
|
||||
std::string ca_path;
|
||||
|
||||
@@ -592,13 +592,8 @@ HTTPClient::curlGetFileOverSSL(const URLParser &url, ofstream &out_file, const s
|
||||
proxy_config->getProxyCredentials(ProxyProtocol::HTTPS),
|
||||
cert_file_path);
|
||||
|
||||
bool connection_ok;
|
||||
if (url.getBaseURL().unpack() == "downloads.openappsec.io") {
|
||||
connection_ok = ssl_curl_client.downloadOpenAppsecPackages();
|
||||
} else {
|
||||
ssl_curl_client.setCurlOpts();
|
||||
connection_ok = ssl_curl_client.connect();
|
||||
}
|
||||
ssl_curl_client.setCurlOpts();
|
||||
bool connection_ok = ssl_curl_client.connect();
|
||||
if (!connection_ok)
|
||||
{
|
||||
stringstream url_s;
|
||||
|
||||
@@ -256,10 +256,23 @@ private:
|
||||
if (!getenv("DOCKER_RPM_ENABLED")) return HealthCheckStatus::IGNORED;
|
||||
|
||||
static const string standalone_cmd = "/usr/sbin/cpnano -s --docker-rpm; echo $?";
|
||||
static int timeout_tolerance = 1;
|
||||
static HealthCheckStatus health_status = HealthCheckStatus::HEALTHY;
|
||||
|
||||
dbgTrace(D_HEALTH_CHECK) << "Checking the standalone docker health status with command: " << standalone_cmd;
|
||||
|
||||
auto maybe_result = Singleton::Consume<I_ShellCmd>::by<HealthChecker>()->getExecOutput(standalone_cmd, 1000);
|
||||
auto maybe_result = Singleton::Consume<I_ShellCmd>::by<HealthChecker>()->getExecOutput(standalone_cmd, 5000);
|
||||
if (!maybe_result.ok()) {
|
||||
if (maybe_result.getErr().find("Reached timeout") != string::npos) {
|
||||
dbgWarning(D_HEALTH_CHECK)
|
||||
<< "Reached timeout while querying standalone health status, attempt number: "
|
||||
<< timeout_tolerance;
|
||||
|
||||
return health_status == HealthCheckStatus::UNHEALTHY || timeout_tolerance++ > 3 ?
|
||||
HealthCheckStatus::UNHEALTHY :
|
||||
health_status;
|
||||
}
|
||||
|
||||
dbgWarning(D_HEALTH_CHECK) << "Unable to get the standalone docker status. Returning unhealthy status.";
|
||||
return HealthCheckStatus::UNHEALTHY;
|
||||
}
|
||||
@@ -267,10 +280,10 @@ private:
|
||||
|
||||
auto response = NGEN::Strings::removeTrailingWhitespaces(maybe_result.unpack());
|
||||
|
||||
if (response.back() == '0') return HealthCheckStatus::HEALTHY;
|
||||
if (response.back() == '1') return HealthCheckStatus::UNHEALTHY;
|
||||
if (response.back() == '1') return health_status = HealthCheckStatus::UNHEALTHY;
|
||||
|
||||
return HealthCheckStatus::DEGRADED;
|
||||
timeout_tolerance = 1;
|
||||
return health_status = (response.back() == '0') ? HealthCheckStatus::HEALTHY : HealthCheckStatus::DEGRADED;
|
||||
}
|
||||
|
||||
bool
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user