diff --git a/api/fast-whisper/fast-whisper-service.yaml b/api/fast-whisper/fast-whisper-service.yaml deleted file mode 100644 index d270e7c..0000000 --- a/api/fast-whisper/fast-whisper-service.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: fast-whisper-service - namespace: api-ns -spec: - type: LoadBalancer - ports: - - name: faster-whisper - port: 8000 - targetPort: 8000 - - selector: - app: fast-whisper diff --git a/api/fast-whisper/fast-whisper.yaml b/api/fast-whisper/fast-whisper.yaml deleted file mode 100644 index 00619bb..0000000 --- a/api/fast-whisper/fast-whisper.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: fast-whisper - namespace: api-ns - labels: - app: fast-whisper -spec: - replicas: 1 - selector: - matchLabels: - app: fast-whisper - template: - metadata: - labels: - app: fast-whisper - spec: - runtimeClassName: nvidia - containers: - - name: fast-whisper - image: fedirz/faster-whisper-server:latest-cuda - ports: - - containerPort: 8000 - resources: - limits: - nvidia.com/gpu: 1 - env: - - name: MAX_NO_DATA_SECONDS - value: "100.0" - - name: WHISPER_MODEL - value: "Systran/faster-distil-whisper-medium.en" diff --git a/api/general-api/api-service.yaml b/api/general-api/api-service.yaml deleted file mode 100644 index 7cee1e7..0000000 --- a/api/general-api/api-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: api-service - namespace: api-ns -spec: - type: LoadBalancer - ports: - - name: general-api - port: 8080 - targetPort: 80 - selector: - app: api-apps diff --git a/api/general-api/general-api-config-map.yaml b/api/general-api/general-api-config-map.yaml deleted file mode 100644 index a42e892..0000000 --- a/api/general-api/general-api-config-map.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: general-api-config-map - namespace: api-ns -data: - config.yaml: | - images: - access_key: ${ACCESS_KEY} - secret_key: ${SECRET_KEY} - endpoint: s3.clortox.com - bucket: backgrounds - secure: True - weather: - period: 15 diff --git a/api/general-api/general-api-deployment.yaml b/api/general-api/general-api-deployment.yaml deleted file mode 100644 index 25fb235..0000000 --- a/api/general-api/general-api-deployment.yaml +++ /dev/null @@ -1,46 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: general-api - namespace: api-ns - labels: - app: general-api -spec: - replicas: 1 - selector: - matchLabels: - app: api-apps - template: - metadata: - labels: - app: api-apps - spec: - containers: - - name: general-api - image: git.clortox.com/tyler/general-api:1.0.15 - imagePullPolicy: Always - env: - - name: ACCESS_KEY - valueFrom: - secretKeyRef: - name: general-api-secret - key: access-key - - name: SECRET_KEY - valueFrom: - secretKeyRef: - name: general-api-secret - key: secret-key - - name: CONFIG - value: "config.yaml" - ports: - - containerPort: 80 - volumesMounts: - - name: config-volume - mountPath: /media/config.yaml - volumes: - - name: config-volume - configMap: - name: general-api-config-map - items: - - key: config.yaml - path: config.yaml diff --git a/api/general-api/sealed-secret.yaml b/api/general-api/sealed-secret.yaml deleted file mode 100644 index aec029c..0000000 --- a/api/general-api/sealed-secret.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: general-api-secret - namespace: api-ns -spec: - encryptedData: - access-key: AgAPhGl0HfCpVQEmmLhArZgJgfXRUBEvokXse1mzEbsrIiSech0beSlKgTwrDwIFt//HISitEn3qtJkU1UXtVjhv8iTco9JgFLg/xmv+24eiRwG5smrtZqPN1iE7SL5kGfxM4RDvpSMxwdNnfsF53WGjsXIYaHtIVn316e2TQf85vSyLpq+BUgbgP5GqG931ur5ErZf48LHIdZ91wvxd1Lcog+C/jVKRmq0KvDKQZPJZD5qF1uyUiMz/ttY2NDLieGzkhQRpjVJmZaqHTdb6nBGcsMhdu8rI1pCkP8PHe8LsnwooRqPdZwg63Vdna7PzztrEesy5koyQwm4IOduB8xU48wY7KGiQ7ZLk4AHoamIQ1mYwK7D/Z5yvpVHItOUPsCzqo+JYbNhTMlXWVrCTWJU5D+CIvIgRUN5d4W4mM70wb75Glo5JGZr4Yw31zbxMSqCOzGeqILRwnKXP78RtM0URFU5sVfkvqbjm/1qP70YgtlowC/gBNEgHykYJV8CjeBb8tf1vjUDLOr+NgOj0FV/SrnFwan3YyMdwMirrZSoB3irta+7AEe1211ur+13JjZWhdbuJfCkP2l3uJz7zxWdGEapf2psCmC+syzyVrkEA5p1B0/Mu8H+d3dpranRmCWNOTySa1CEIPFuJ+ipxMsbQmPi7R60nQ6ZIUAOnJh/SAae1n1ixuOdc7KfYaSR+abYXMgrTBkC9riG6ew== - secret-key: AgCcGmblfLgGakV8y2p1S3PHReMe2EuqvnfM8XHs6mK8fRGlFIzUw9Rsi9R/MeEfSx5eBTHzN+Euy/ykWJAKhbWw0cEcx+YcL8RahnGAJIqFsSw+atYmv4MJ9JjsCXX+3H4svjtV5AiE019YxwwAX27QzMcEyWE3Rg7/WPNnqyvferfdI0j5NttDiFKyKQvZSrWg2knyopbfNywMijEICBGWgZMj/nRbNm2vXdgYWhFvxkGYVCuRjnbz+iU+T0PMlqWZmj1Yxs72QOoKBYa4pJxSfDjg1erTEiPQgFJPULiSiEargIrxcCdxRdbn9Us/qO26lgvTSCdtiHTzOALmeD9no8Cr6wqZDQD616OyBaFvTTcwCTa+YxaVB5mpoLHDUPOzjVBCpB7ojRH5nXXa7x3bIt9fz9dA9KNPywySsRcQ0hR/UoeMmtJfKx0I86VvxqhhhlEHAKAnUjZyCfaRvftCOkc4JfB9XZtDJr0/I47ToWNofEU1WDJlTkvm9dOJFvRsNGzsLAHhT3I/8cP+sCAY594lmI6J+MMfOjPV5Ig0xQic2my9clrKPohUbKue0R8cSUIb42OnskLOE0bx91JYXBdDeZ6lxawrWznWwPG3j7BsIslqDYSUeKFun91c4xSp2GvdliTS3Md/O/f+yqcBSKGnRkGXZaOpPEB+9MyP3PYVd2pSFt/7fXi9gFj2CxnbClVCsDNCf+hqVH52a2UB9Q758FLO+N+iSpzD61hQZg== - template: - metadata: - creationTimestamp: null - name: general-api-secret - namespace: api-ns diff --git a/authentik/helmrelease-authentik.yaml b/authentik/helmrelease-authentik.yaml deleted file mode 100644 index 72f624e..0000000 --- a/authentik/helmrelease-authentik.yaml +++ /dev/null @@ -1,1028 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: authentik - namespace: authentik-ns - annotations: - force-recreate: true -spec: - chart: - spec: - chart: authentik - sourceRef: - kind: HelmRepository - name: authentik - namespace: flux-system - interval: 15m0s - timeout: 5m - releaseName: authentik - values: - # -- Provide a name in place of `authentik`. Prefer using global.nameOverride if possible - nameOverride: "" - # -- String to fully override `"authentik.fullname"`. Prefer using global.fullnameOverride if possible - fullnameOverride: "" - # -- Override the Kubernetes version, which is used to evaluate certain manifests - kubeVersionOverride: "" - - - ## Globally shared configuration for authentik components. - global: - # -- Provide a name in place of `authentik` - nameOverride: "" - # -- String to fully override `"authentik.fullname"` - fullnameOverride: "" - # -- Common labels for all resources. - additionalLabels: {} - # app: authentik - - # Number of old deployment ReplicaSets to retain. The rest will be garbage collected. - revisionHistoryLimit: 3 - - # Default image used by all authentik components. For GeoIP configuration, see the geoip values below. - image: - # -- If defined, a repository applied to all authentik deployments - repository: ghcr.io/goauthentik/server - # -- Overrides the global authentik whose default is the chart appVersion - tag: "" - # -- If defined, an image digest applied to all authentik deployments - digest: "" - # -- If defined, an imagePullPolicy applied to all authentik deployments - pullPolicy: IfNotPresent - - # -- Secrets with credentials to pull images from a private registry - imagePullSecrets: [] - - # -- Annotations for all deployed Deployments - deploymentAnnotations: {} - - # -- Annotations for all deployed pods - podAnnotations: {} - - # -- Labels for all deployed pods - podLabels: {} - - # -- Add Prometheus scrape annotations to all metrics services. This can be used as an alternative to the ServiceMonitors. - addPrometheusAnnotations: false - - # -- Toggle and define pod-level security context. - # @default -- `{}` (See [values.yaml]) - securityContext: {} - # runAsUser: 1000 - # runAsGroup: 1000 - # fsGroup: 1000 - - # -- Mapping between IP and hostnames that will be injected as entries in the pod's hosts files - hostAliases: [] - # - ip: 10.20.30.40 - # hostnames: - # - my.hostname - - # -- Default priority class for all components - priorityClassName: "" - - # -- Default node selector for all components - nodeSelector: {} - - # -- Default tolerations for all components - tolerations: [] - - # Default affinity preset for all components - affinity: - # -- Default pod anti-affinity rules. Either: `none`, `soft` or `hard` - podAntiAffinity: soft - # Node affinity rules - nodeAffinity: - # -- Default node affinity rules. Either `none`, `soft` or `hard` - type: hard - # -- Default match expressions for node affinity - matchExpressions: [] - # - key: topology.kubernetes.io/zone - # operator: In - # values: - # - zonea - # - zoneb - - # -- Default [TopologySpreadConstraints] rules for all components - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule - - # -- Deployment strategy for all deployed Deployments - deploymentStrategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 25% - # maxUnavailable: 25% - - # -- Environment variables to pass to all deployed Deployments. Does not apply to GeoIP - # See configuration options at https://goauthentik.io/docs/installation/configuration/ - # @default -- `[]` (See [values.yaml]) - env: - - name: AUTHENTIK_SECRET_KEY - valueFrom: - secretKeyRef: - name: authentik-secret - key: secret-key - - name: AUTHENTIK_POSTGRESQL__PASSWORD - valueFrom: - secretKeyRef: - name: authentik-secret - key: postgres-password - - name: AUTHENTIK_REDIS__PASSWORD - valueFrom: - secretKeyRef: - name: authentik-secret - key: redis-password - # - name: AUTHENTIK_VAR_NAME - # value: VALUE - # - name: AUTHENTIK_VAR_OTHER - # valueFrom: - # secretKeyRef: - # name: secret-name - # key: secret-key - # - name: AUTHENTIK_VAR_ANOTHER - # valueFrom: - # configMapKeyRef: - # name: config-map-name - # key: config-map-key - - # -- envFrom to pass to all deployed Deployments. Does not apply to GeoIP - # @default -- `[]` (See [values.yaml]) - envFrom: [] - # - configMapRef: - # name: config-map-name - # - secretRef: - # name: secret-name - - # -- Additional volumeMounts to all deployed Deployments. Does not apply to GeoIP - # @default -- `[]` (See [values.yaml]) - volumeMounts: [] - # - name: custom - # mountPath: /custom - - # -- Additional volumes to all deployed Deployments. - # @default -- `[]` (See [values.yaml]) - volumes: [] - # - name: custom - # emptyDir: {} - - - ## Authentik configuration - authentik: - # -- Log level for server and worker - log_level: info - # -- Secret key used for cookie singing and unique user IDs, - # don't change this after the first install - secret_key: "" - events: - context_processors: - # -- Path for the GeoIP City database. If the file doesn't exist, GeoIP features are disabled. - geoip: /geoip/GeoLite2-City.mmdb - # -- Path for the GeoIP ASN database. If the file doesn't exist, GeoIP features are disabled. - asn: /geoip/GeoLite2-ASN.mmdb - email: - # -- SMTP Server emails are sent from, fully optional - host: "" - # -- SMTP server port - port: 587 - # -- SMTP credentials, when left empty, no authentication will be done - username: "" - # -- SMTP credentials, when left empty, no authentication will be done - password: "" - # -- Enable either use_tls or use_ssl, they can't be enabled at the same time. - use_tls: false - # -- Enable either use_tls or use_ssl, they can't be enabled at the same time. - use_ssl: false - # -- Connection timeout - timeout: 30 - # -- Email from address, can either be in the format "foo@bar.baz" or "authentik " - from: "" - outposts: - # -- Template used for managed outposts. The following placeholders can be used - # %(type)s - the type of the outpost - # %(version)s - version of your authentik install - # %(build_hash)s - only for beta versions, the build hash of the image - container_image_base: ghcr.io/goauthentik/%(type)s:%(version)s - error_reporting: - # -- This sends anonymous usage-data, stack traces on errors and - # performance data to sentry.beryju.org, and is fully opt-in - enabled: false - # -- This is a string that is sent to sentry with your error reports - environment: "k8s" - # -- Send PII (Personally identifiable information) data to sentry - send_pii: false - postgresql: - # -- set the postgresql hostname to talk to - # if unset and .Values.postgresql.enabled == true, will generate the default - # @default -- `{{ .Release.Name }}-postgresql` - host: "postgresql.postgresql-system.svc.cluster.local" - # -- postgresql Database name - # @default -- `authentik` - name: "authentik" - # -- postgresql Username - # @default -- `authentik` - user: "authentik" - #password: "" - port: 5432 - redis: - # -- set the redis hostname to talk to - # @default -- `{{ .Release.Name }}-redis-master` - host: "redis-master.redis-system.svc.cluster.local" - #host: "{{ .Release.Name }}-redis-master" - #password: "" - - - blueprints: - # -- List of config maps to mount blueprints from. - # Only keys in the configMap ending with `.yaml` will be discovered and applied. - configMaps: [] - # -- List of secrets to mount blueprints from. - # Only keys in the secret ending with `.yaml` will be discovered and applied. - secrets: [] - - - ## authentik server - server: - # -- authentik server name - name: server - - # -- The number of server pods to run - replicas: 1 - - ## authentik server Horizontal Pod Autoscaler - autoscaling: - # -- Enable Horizontal Pod Autoscaler ([HPA]) for the authentik server - enabled: false - # -- Minimum number of replicas for the authentik server [HPA] - minReplicas: 1 - # -- Maximum number of replicas for the authentik server [HPA] - maxReplicas: 5 - # -- Average CPU utilization percentage for the authentik server [HPA] - targetCPUUtilizationPercentage: 50 - # -- Average memory utilization percentage for the authentik server [HPA] - targetMemoryUtilizationPercentage: ~ - # -- Configures the scaling behavior of the target in both Up and Down directions. - behavior: {} - # scaleDown: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 1 - # periodSeconds: 180 - # scaleUp: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 2 - # periodSeconds: 60 - # -- Configures custom HPA metrics for the authentik server - # Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - metrics: [] - - ## authentik server Pod Disruption Budget - ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ - pdb: - # -- Deploy a [PodDistrubtionBudget] for the authentik server - enabled: false - # -- Labels to be added to the authentik server pdb - labels: {} - # -- Annotations to be added to the authentik server pdb - annotations: {} - # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) - # @default -- `""` (defaults to 0 if not specified) - minAvailable: "" - # -- Number of pods that are unavailable after eviction as number or percentage (eg.: 50%) - ## Has higher precedence over `server.pdb.minAvailable` - maxUnavailable: "" - - ## authentik server image - ## This should match what is deployed in the worker. Prefer using global.image - image: - # -- Repository to use to the authentik server - # @default -- `""` (defaults to global.image.repository) - repository: "" # defaults to global.image.repository - # -- Tag to use to the authentik server - # @default -- `""` (defaults to global.image.tag) - tag: "" # defaults to global.image.tag - # -- Digest to use to the authentik server - # @default -- `""` (defaults to global.image.digest) - digest: "" # defaults to global.image.digest - # -- Image pull policy to use to the authentik server - # @default -- `""` (defaults to global.image.pullPolicy) - pullPolicy: "" # defaults to global.image.pullPolicy - - # -- Secrets with credentials to pull images from a private registry - # @default -- `[]` (defaults to global.imagePullSecrets) - imagePullSecrets: [] - - # -- Environment variables to pass to the authentik server. Does not apply to GeoIP - # See configuration options at https://goauthentik.io/docs/installation/configuration/ - # @default -- `[]` (See [values.yaml]) - env: [] - # - name: AUTHENTIK_VAR_NAME - # value: VALUE - # - name: AUTHENTIK_VAR_OTHER - # valueFrom: - # secretKeyRef: - # name: secret-name - # key: secret-key - # - name: AUTHENTIK_VAR_ANOTHER - # valueFrom: - # configMapKeyRef: - # name: config-map-name - # key: config-map-key - - # -- envFrom to pass to the authentik server. Does not apply to GeoIP - # @default -- `[]` (See [values.yaml]) - envFrom: [] - # - configMapRef: - # name: config-map-name - # - secretRef: - # name: secret-name - - # -- Specify postStart and preStop lifecycle hooks for you authentik server container - lifecycle: {} - - # -- Additional containers to be added to the authentik server pod - ## Note: Supports use of custom Helm templates - extraContainers: [] - # - name: my-sidecar - # image: nginx:latest - - # -- Init containers to add to the authentik server pod - ## Note: Supports use of custom Helm templates - initContainers: [] - # - name: download-tools - # image: alpine:3 - # command: [sh, -c] - # args: - # - echo init - - # -- Additional volumeMounts to the authentik server main container - volumeMounts: [] - # - name: custom - # mountPath: /custom - - # -- Additional volumes to the authentik server pod - volumes: [] - # - name: custom - # emptyDir: {} - - # -- Annotations to be added to the authentik server Deployment - deploymentAnnotations: {} - - # -- Annotations to be added to the authentik server pods - podAnnotations: {} - - # -- Labels to be added to the authentik server pods - podLabels: {} - - # -- Resource limits and requests for the authentik server - resources: {} - # requests: - # cpu: 100m - # memory: 512Mi - # limits: - # memory: 512Mi - - # authentik server container ports - containerPorts: - # -- http container port - http: 9000 - # -- https container port - https: 9443 - # -- metrics container port - metrics: 9300 - - # -- Host Network for authentik server pods - hostNetwork: false - - # -- [DNS configuration] - dnsConfig: {} - # -- Alternative DNS policy for authentik server pods - dnsPolicy: "" - - # -- authentik server pod-level security context - # @default -- `{}` (See [values.yaml]) - securityContext: {} - # runAsUser: 1000 - # runAsGroup: 1000 - # fsGroup: 1000 - - # -- authentik server container-level security context - # @default -- See [values.yaml] - containerSecurityContext: {} - # Not all of the following has been tested. Use at your own risk. - # runAsNonRoot: true - # readOnlyRootFilesystem: true - # allowPrivilegeEscalation: false - # seccomProfile: - # type: RuntimeDefault - # capabilities: - # drop: - # - ALL - - ## Liveness, readiness and startup probes for authentik server - ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ - livenessProbe: - # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded - failureThreshold: 3 - # -- Number of seconds after the container has started before [probe] is initiated - initialDelaySeconds: 5 - # -- How often (in seconds) to perform the [probe] - periodSeconds: 10 - # -- Minimum consecutive successes for the [probe] to be considered successful after having failed - successThreshold: 1 - # -- Number of seconds after which the [probe] times out - timeoutSeconds: 1 - ## Probe configuration - httpGet: - path: /-/health/live/ - port: http - - readinessProbe: - # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded - failureThreshold: 3 - # -- Number of seconds after the container has started before [probe] is initiated - initialDelaySeconds: 5 - # -- How often (in seconds) to perform the [probe] - periodSeconds: 10 - # -- Minimum consecutive successes for the [probe] to be considered successful after having failed - successThreshold: 1 - # -- Number of seconds after which the [probe] times out - timeoutSeconds: 1 - ## Probe configuration - httpGet: - path: /-/health/ready/ - port: http - - startupProbe: - # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded - failureThreshold: 60 - # -- Number of seconds after the container has started before [probe] is initiated - initialDelaySeconds: 5 - # -- How often (in seconds) to perform the [probe] - periodSeconds: 10 - # -- Minimum consecutive successes for the [probe] to be considered successful after having failed - successThreshold: 1 - # -- Number of seconds after which the [probe] times out - timeoutSeconds: 1 - ## Probe configuration - httpGet: - path: /-/health/live/ - port: http - - # -- terminationGracePeriodSeconds for container lifecycle hook - terminationGracePeriodSeconds: 30 - - # -- Prority class for the authentik server pods - # @default -- `""` (defaults to global.priorityClassName) - priorityClassName: "" - - # -- [Node selector] - # @default -- `{}` (defaults to global.nodeSelector) - nodeSelector: {} - - # -- [Tolerations] for use with node taints - # @default -- `[]` (defaults to global.tolerations) - tolerations: [] - - # -- Assign custom [affinity] rules to the deployment - # @default -- `{}` (defaults to the global.affinity preset) - affinity: {} - - # -- Assign custom [TopologySpreadConstraints] rules to the authentik server - # @default -- `[]` (defaults to global.topologySpreadConstraints) - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment - topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule - - # -- Deployment strategy to be added to the authentik server Deployment - # @default -- `{}` (defaults to global.deploymentStrategy) - deploymentStrategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 25% - # maxUnavailable: 25% - - ## authentik server service configuration - service: - # -- authentik server service annotations - annotations: {} - # -- authentik server service labels - labels: {} - # -- authentik server service type - type: LoadBalancer - # -- authentik server service http port for NodePort service type (only if `server.service.type` is set to `NodePort`) - nodePortHttp: 30080 - # -- authentik server service https port for NodePort service type (only if `server.service.type` is set to `NodePort`) - nodePortHttps: 30443 - # -- authentik server service http port - servicePortHttp: 80 - # -- authentik server service https port - servicePortHttps: 443 - # -- authentik server service http port name - servicePortHttpName: http - # -- authentik server service https port name - servicePortHttpsName: https - # -- authentik server service http port appProtocol - # servicePortHttpAppProtocol: HTTP - # -- authentik server service https port appProtocol - # servicePortHttpsAppProtocol: HTTPS - # -- LoadBalancer will get created with the IP specified in this field - loadBalancerIP: "" - # -- Source IP ranges to allow access to service from - loadBalancerSourceRanges: [] - # -- authentik server service external IPs - externalIPs: [] - # -- Denotes if this service desires to route external traffic to node-local or cluster-wide endpoints - externalTrafficPolicy: "" - # -- Used to maintain session affinity. Supports `ClientIP` and `None` - sessionAffinity: "" - # -- Session affinity configuration - sessionAffinityConfig: {} - - ## authentik server metrics service configuration - metrics: - # -- deploy metrics service - enabled: true - service: - # -- metrics service type - type: ClusterIP - # -- metrics service clusterIP. `None` makes a "headless service" (no virtual IP) - clusterIP: "" - # -- metrics service annotations - annotations: {} - # -- metrics service labels - labels: {} - # -- metrics service port - servicePort: 9300 - # -- metrics service port name - portName: metrics - serviceMonitor: - # -- enable a prometheus ServiceMonitor - enabled: false - # -- Prometheus ServiceMonitor interval - interval: 30s - # -- Prometheus ServiceMonitor scrape timeout - scrapeTimeout: 3s - # -- Prometheus [RelabelConfigs] to apply to samples before scraping - relabelings: [] - # -- Prometheus [MetricsRelabelConfigs] to apply to samples before ingestion - metricRelabelings: [] - # -- Prometheus ServiceMonitor selector - selector: {} - # prometheus: kube-prometheus - - # -- Prometheus ServiceMonitor scheme - scheme: "" - # -- Prometheus ServiceMonitor tlsConfig - tlsConfig: {} - # -- Prometheus ServiceMonitor namespace - namespace: "" - # -- Prometheus ServiceMonitor labels - labels: {} - # -- Prometheus ServiceMonitor annotations - annotations: {} - - ingress: - # -- enable an ingress resource for the authentik server - enabled: false - # -- additional ingress annotations - annotations: {} - # -- additional ingress labels - labels: {} - # -- defines which ingress controller will implement the resource - ingressClassName: "" - # -- List of ingress hosts - hosts: [] - # - authentik.domain.tld - - # -- List of ingress paths - paths: - - / - # -- Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific` - pathType: Prefix - # -- additional ingress paths - extraPaths: [] - # - path: /* - # pathType: Prefix - # backend: - # service: - # name: ssl-redirect - # port: - # name: use-annotation - - # -- ingress TLS configuration - tls: [] - # - secretName: authentik-tls - # hosts: - # - authentik.domain.tld - - # -- uses `server.service.servicePortHttps` instead of `server.service.servicePortHttp` - https: false - - - ## authentik worker - worker: - # -- authentik worker name - name: worker - - # -- The number of worker pods to run - replicas: 1 - - ## authentik worker Horizontal Pod Autoscaler - autoscaling: - # -- Enable Horizontal Pod Autoscaler ([HPA]) for the authentik worker - enabled: true - # -- Minimum number of replicas for the authentik worker [HPA] - minReplicas: 1 - # -- Maximum number of replicas for the authentik worker [HPA] - maxReplicas: 5 - # -- Average CPU utilization percentage for the authentik worker [HPA] - targetCPUUtilizationPercentage: 50 - # -- Average memory utilization percentage for the authentik worker [HPA] - targetMemoryUtilizationPercentage: ~ - # -- Configures the scaling behavior of the target in both Up and Down directions. - behavior: {} - # scaleDown: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 1 - # periodSeconds: 180 - # scaleUp: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 2 - # periodSeconds: 60 - # -- Configures custom HPA metrics for the authentik worker - # Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - metrics: [] - - ## authentik worker Pod Disruption Budget - ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ - pdb: - # -- Deploy a [PodDistrubtionBudget] for the authentik worker - enabled: false - # -- Labels to be added to the authentik worker pdb - labels: {} - # -- Annotations to be added to the authentik worker pdb - annotations: {} - # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) - # @default -- `""` (defaults to 0 if not specified) - minAvailable: "" - # -- Number of pods that are unavailable after eviction as number or percentage (eg.: 50%) - ## Has higher precedence over `worker.pdb.minAvailable` - maxUnavailable: "" - - ## authentik worker image - ## This should match what is deployed in the server. Prefer using global.image - image: - # -- Repository to use to the authentik worker - # @default -- `""` (defaults to global.image.repository) - repository: "" # defaults to global.image.repository - # -- Tag to use to the authentik worker - # @default -- `""` (defaults to global.image.tag) - tag: "" # defaults to global.image.tag - # -- Digest to use to the authentik worker - # @default -- `""` (defaults to global.image.digest) - digest: "" # defaults to global.image.digest - # -- Image pull policy to use to the authentik worker - # @default -- `""` (defaults to global.image.pullPolicy) - pullPolicy: "" # defaults to global.image.pullPolicy - - # -- Secrets with credentials to pull images from a private registry - # @default -- `[]` (defaults to global.imagePullSecrets) - imagePullSecrets: [] - - # -- Environment variables to pass to the authentik worker. Does not apply to GeoIP - # See configuration options at https://goauthentik.io/docs/installation/configuration/ - # @default -- `[]` (See [values.yaml]) - env: - - name: AUTHENTIK_REDIS__DB - value: "1" - # - name: AUTHENTIK_VAR_NAME - # value: VALUE - # - name: AUTHENTIK_VAR_OTHER - # valueFrom: - # secretKeyRef: - # name: secret-name - # key: secret-key - # - name: AUTHENTIK_VAR_ANOTHER - # valueFrom: - # configMapKeyRef: - # name: config-map-name - # key: config-map-key - - # -- envFrom to pass to the authentik worker. Does not apply to GeoIP - # @default -- `[]` (See [values.yaml]) - envFrom: [] - # - configMapRef: - # name: config-map-name - # - secretRef: - # name: secret-name - - # -- Specify postStart and preStop lifecycle hooks for you authentik worker container - lifecycle: {} - - # -- Additional containers to be added to the authentik worker pod - ## Note: Supports use of custom Helm templates - extraContainers: [] - # - name: my-sidecar - # image: nginx:latest - - # -- Init containers to add to the authentik worker pod - ## Note: Supports use of custom Helm templates - initContainers: [] - # - name: download-tools - # image: alpine:3 - # command: [sh, -c] - # args: - # - echo init - - # -- Additional volumeMounts to the authentik worker main container - volumeMounts: [] - # - name: custom - # mountPath: /custom - - # -- Additional volumes to the authentik worker pod - volumes: [] - # - name: custom - # emptyDir: {} - - # -- Annotations to be added to the authentik worker Deployment - deploymentAnnotations: {} - - # -- Annotations to be added to the authentik worker pods - podAnnotations: {} - - # -- Labels to be added to the authentik worker pods - podLabels: {} - - # -- Resource limits and requests for the authentik worker - resources: {} - # requests: - # cpu: 100m - # memory: 512Mi - # limits: - # memory: 512Mi - - # -- Host Network for authentik worker pods - hostNetwork: false - - # -- [DNS configuration] - dnsConfig: {} - # -- Alternative DNS policy for authentik worker pods - dnsPolicy: "" - - # -- authentik worker pod-level security context - # @default -- `{}` (See [values.yaml]) - securityContext: {} - # runAsUser: 1000 - # runAsGroup: 1000 - # fsGroup: 1000 - - # -- authentik worker container-level security context - # @default -- See [values.yaml] - containerSecurityContext: {} - # Not all of the following has been tested. Use at your own risk. - # runAsNonRoot: true - # readOnlyRootFilesystem: true - # allowPrivilegeEscalation: false - # seccomProfile: - # type: RuntimeDefault - # capabilities: - # drop: - # - ALL - - livenessProbe: - # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded - failureThreshold: 3 - # -- Number of seconds after the container has started before [probe] is initiated - initialDelaySeconds: 5 - # -- How often (in seconds) to perform the [probe] - periodSeconds: 10 - # -- Minimum consecutive successes for the [probe] to be considered successful after having failed - successThreshold: 1 - # -- Number of seconds after which the [probe] times out - timeoutSeconds: 1 - ## Probe configuration - exec: - command: - - ak - - healthcheck - - readinessProbe: - # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded - failureThreshold: 3 - # -- Number of seconds after the container has started before [probe] is initiated - initialDelaySeconds: 5 - # -- How often (in seconds) to perform the [probe] - periodSeconds: 10 - # -- Minimum consecutive successes for the [probe] to be considered successful after having failed - successThreshold: 1 - # -- Number of seconds after which the [probe] times out - timeoutSeconds: 1 - ## Probe configuration - exec: - command: - - ak - - healthcheck - - startupProbe: - # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded - failureThreshold: 60 - # -- Number of seconds after the container has started before [probe] is initiated - initialDelaySeconds: 30 - # -- How often (in seconds) to perform the [probe] - periodSeconds: 10 - # -- Minimum consecutive successes for the [probe] to be considered successful after having failed - successThreshold: 1 - # -- Number of seconds after which the [probe] times out - timeoutSeconds: 1 - ## Probe configuration - exec: - command: - - ak - - healthcheck - - # -- terminationGracePeriodSeconds for container lifecycle hook - terminationGracePeriodSeconds: 30 - - # -- Prority class for the authentik worker pods - # @default -- `""` (defaults to global.priorityClassName) - priorityClassName: "" - - # -- [Node selector] - # @default -- `{}` (defaults to global.nodeSelector) - nodeSelector: {} - - # -- [Tolerations] for use with node taints - # @default -- `[]` (defaults to global.tolerations) - tolerations: [] - - # -- Assign custom [affinity] rules to the deployment - # @default -- `{}` (defaults to the global.affinity preset) - affinity: {} - - # -- Assign custom [TopologySpreadConstraints] rules to the authentik worker - # @default -- `[]` (defaults to global.topologySpreadConstraints) - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment - topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule - - # -- Deployment strategy to be added to the authentik worker Deployment - # @default -- `{}` (defaults to global.deploymentStrategy) - deploymentStrategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 25% - # maxUnavailable: 25% - - - serviceAccount: - # -- Create service account. Needed for managed outposts - create: true - # -- additional service account annotations - annotations: {} - serviceAccountSecret: - # As we use the authentik-remote-cluster chart as subchart, and that chart - # creates a service account secret by default which we don't need here, - # disable its creation - enabled: false - fullnameOverride: authentik - - - geoip: - # -- enable GeoIP sidecars for the authentik server and worker pods - enabled: false - - editionIds: "GeoLite2-City GeoLite2-ASN" - # -- GeoIP update frequency, in hours - updateInterval: 8 - # -- sign up under https://www.maxmind.com/en/geolite2/signup - accountId: "" - # -- sign up under https://www.maxmind.com/en/geolite2/signup - licenseKey: "" - ## use existing secret instead of values above - existingSecret: - # -- name of an existing secret to use instead of values above - secretName: "" - # -- key in the secret containing the account ID - accountId: "account_id" - # -- key in the secret containing the license key - licenseKey: "license_key" - - image: - # -- If defined, a repository for GeoIP images - repository: ghcr.io/maxmind/geoipupdate - # -- If defined, a tag for GeoIP images - tag: v6.0.0 - # -- If defined, an image digest for GeoIP images - digest: "" - # -- If defined, an imagePullPolicy for GeoIP images - pullPolicy: IfNotPresent - - # -- Environment variables to pass to the GeoIP containers - # @default -- `[]` (See [values.yaml]) - env: [] - # - name: GEOIPUPDATE_VAR_NAME - # value: VALUE - # - name: GEOIPUPDATE_VAR_OTHER - # valueFrom: - # secretKeyRef: - # name: secret-name - # key: secret-key - # - name: GEOIPUPDATE_VAR_ANOTHER - # valueFrom: - # configMapKeyRef: - # name: config-map-name - # key: config-map-key - - # -- envFrom to pass to the GeoIP containers - # @default -- `[]` (See [values.yaml]) - envFrom: [] - # - configMapRef: - # name: config-map-name - # - secretRef: - # name: secret-name - - # -- Additional volumeMounts to the GeoIP containers. Make sure the volumes exists for the server and the worker. - volumeMounts: [] - # - name: custom - # mountPath: /custom - - # -- Resource limits and requests for GeoIP containers - resources: {} - # requests: - # cpu: 100m - # memory: 128Mi - # limits: - # memory: 128Mi - - # -- GeoIP container-level security context - # @default -- See [values.yaml] - containerSecurityContext: {} - # Not all of the following has been tested. Use at your own risk. - # runAsNonRoot: true - # readOnlyRootFilesystem: true - # allowPrivilegeEscalation: false - # seccomProfile: - # type: RuntimeDefault - # capabilities: - # drop: - # - ALL - - - prometheus: - rules: - enabled: false - # -- PrometheusRule namespace - namespace: "" - # -- PrometheusRule selector - selector: {} - # prometheus: kube-prometheus - - # -- PrometheusRule labels - labels: {} - # -- PrometheusRule annotations - annotations: {} - - - postgresql: - # -- enable the Bitnami PostgreSQL chart. Refer to https://github.com/bitnami/charts/blob/main/bitnami/postgresql/ for possible values. - enabled: false - auth: - username: authentik - database: authentik - # password: "" - primary: - extendedConfiguration: | - max_connections = 500 - # persistence: - # enabled: true - # storageClass: - # accessModes: - # - ReadWriteOnce - - - redis: - # -- enable the Bitnami Redis chart. Refer to https://github.com/bitnami/charts/blob/main/bitnami/redis/ for possible values. - enabled: false - architecture: standalone - auth: - enabled: false - - - # -- additional resources to deploy. Those objects are templated. - additionalObjects: [] diff --git a/authentik/sealed-secret.yaml b/authentik/sealed-secret.yaml deleted file mode 100644 index 812e47b..0000000 --- a/authentik/sealed-secret.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: authentik-secret - namespace: authentik-ns -spec: - encryptedData: - postgres-password: AgBigFPSosBY6PGUxR4zdIntM+oGMyaDY9mHZBwL5xbjEEvmzNKCuCfQFuiE07WqV3fjWEp6D3o23fIMomPC3SNLWySfti8o5pyBrPGDZLR1dVYWLmkyMCj0pzbDmPgAArBuzGmQG6P+Kn4lqlkSU6F50ev/W8yHUPkrlp+iJsGM9wYNlboaZmDMowIK5ny8sQ5vIb+QakS3ybRa3DfX/T3yNvuhOeCt+367/3oV0yNmCEBK4qKpTsAkWctxXooX1wcAkOwMesqfE42I5Mt+s/UnbU5fXJdzM0YI7WZreEy5oaG1shDxp1PhXoc12yCt5KobTj0xlttUVFVb8IaOY7r4oSI74vrL8KGuZdny0oeWvVbiwA/SvOt7S05RdryYWf66jN71/Aku5LnKJwRoa7veGeX9S5pUe1wZyVSDN6trkJcG5ZJRmEerr4MOZ4YX9cB2FktEmd+estjIlm/UhEIRN8Qv4qd54t6j2Ajhk6EJ3Ky6mI9xiun+0ti9880rIHQiW5MpiZVB+nQlAosTVQu4wRjdnP6Z0ndP83e2rPkHJ/jF2iawXOBoS0Eh11UaXvRQyNQOt3ReIba7E0aSbynpULViOg/lVNLA2qgyp+37Veb44Mi2k7sHg7I8e6MOMVjBhfmv3HvMpdHHBIHSq2vaDlF/0i5o5OT0F1O+06OngfQAaQQc1SdpLeoPKget5fbNF9zgmfKxPodjayq+h6n3vm5QOc4TagtcG1PV38LsiQ== - redis-password: AgCWDT6n+wmF9+Qk4+bu1byc7TFmRwPGqrhBIdVvZrTMRh6jt43E8urutTAlqKO6JPbRw+gw7zA40uOOHYzU3UaIXdAueQtCRMhHAzKWMwvTuzKGqLmmKcxVF452wilyhMjLBgRuBvX43VK4kynIthM3LZmw9a/HAlbQqn624N3wvdOYXyrWG0YKisXJunEFPgQyygWozdFD/N+b2loBq5YvH3mLuOuJDcuAC+Ti7URRbHigZXOhpZK6ilycAcJxJlOE9FVDRXMYSophjDWtD/Wb7WNLU7iakdXjNMFNVlE89mzrLxOskI918l6hrMG+Tk9FrhwKZx9ZuVwoUOdLBhF7I0jjYWKnJ1gEIMKXNBcrQWcnqX392VTu4RG0YNIIzasYkJ4/i3bjDnIH9zpSnRn6VSL2ZRhikJBOGJRXlXamd93XcCC+wg7gLu9XGi6g7ddC9UksxFzfIoMvj6aZ5EzERwJ7Td/qH1mWcfm5iesXKP1Y7PUSElIXIVmx9ifLgzIfbreb5VJDj2v+gTD44zxy+zHhSgdyefR2FcXT2eZv9CFO/VS8WB/F8+edJai0wHmJv0ooYVNS0PtIkyD8DEUC3Egt97SmWlQlEn1rfX1hj7jpN7HTpW19l9kV3r9n84ZzVJf62qybHElKOQWoqdz2Xxv6gPannZ8XQbk3nR0dG99jrUhvTpqjLFaWV+27PE0bRuV6w1G5Zm7X6Jdr/y3p8UvH2UonA2/8xjPANci/tA== - secret-key: AgBGLb8gPEET4udFwIMlgqWz5nIvu0/Tq6AhkCvxYTF4z2Gl4I7uOA4QtsnqDfOeQXJStpJ02ndc+q5l1uoP+hVgwhX1yWdeAtlQgubCpGraCQqofqVrwQwt9DoZqre+8rCp3llEugTP72Vekx9s9/8nDs+JqfBtfgLSdYqaJDO7fd3P4DDvA+DPhRTuT8j1YkX9mejxaWxd9lDss2OXWgZ/HDvGrm61FS3ByVqAo0uuayBcC8TtVrcjA6o2bfCFzz7g1uwzDC10bE7RNuJzpErulrOv/QzgxB/yTmQ4JlJmbgonAC3ZUBBc5hAl7m7hKuq6CFyHD1kZCWJ/cZkg9AagI0u9f96+y5kYh+KZK8/WuPHF3LhM9dam9KYKVJRqWE4nq5/QYcpbkQtKBqKlGPZZCyEmH/ylL6r3djMHNjKTpdCwlMqNFetDPLDMNFB1i2Nqg7PAzqOE3Dq5AHShSBG//losKiTfoNF3uYwbrA3cQhxCOAM/1EiLEvz1KerHaJrlcV5Y32ZaOj6P4aQeBAzEpmS8sRr0yooYmA1iJce+wYMsvI1VlNKP4HU+wLm5xKNca1SRvZaOmz1RUp3l+Q+jckhHmRFubLOR6RpmdiGtTAyvjfMRkRtzDfnyu+xGvCqlontPIPWh7yl8jsqrjhr5/tXVtSs+yZhdfn1M7oiDbv7xa4o2jAxt+MpP1XtMaoH/Rnt3x2JprDrSU+1YICE9Ibzo6xjJYFs5I/fM7auUvF3cmX40zafRHw5DYehWCBU3mA== - template: - metadata: - creationTimestamp: null - name: authentik-secret - namespace: authentik-ns diff --git a/cert-manager/helmrelease-cert-manager.yaml b/cert-manager/helmrelease-cert-manager.yaml deleted file mode 100644 index f3d88f9..0000000 --- a/cert-manager/helmrelease-cert-manager.yaml +++ /dev/null @@ -1,1347 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: certmanager - namespace: cert-manager -spec: - chart: - spec: - chart: cert-manager - sourceRef: - kind: HelmRepository - name: jetstack - namespace: flux-system - interval: 15m0s - timeout: 5m - releaseName: cert-manager - values: - # Default values for cert-manager. - # This is a YAML-formatted file. - # Declare variables to be passed into your templates. - global: - # Reference to one or more secrets to be used when pulling images. - # For more information, see [Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). - # - # For example: - # imagePullSecrets: - # - name: "image-pull-secret" - imagePullSecrets: [] - - # Labels to apply to all resources. - # Please note that this does not add labels to the resources created dynamically by the controllers. - # For these resources, you have to add the labels in the template in the cert-manager custom resource: - # For example, podTemplate/ ingressTemplate in ACMEChallengeSolverHTTP01Ingress - # For more information, see the [cert-manager documentation](https://cert-manager.io/docs/reference/api-docs/#acme.cert-manager.io/v1.ACMEChallengeSolverHTTP01Ingress). - # For example, secretTemplate in CertificateSpec - # For more information, see the [cert-manager documentation](https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec). - commonLabels: {} - - # The number of old ReplicaSets to retain to allow rollback (if not set, the default Kubernetes value is set to 10). - # +docs:property - # revisionHistoryLimit: 1 - - # The optional priority class to be used for the cert-manager pods. - priorityClassName: "" - - rbac: - # Create required ClusterRoles and ClusterRoleBindings for cert-manager. - create: true - # Aggregate ClusterRoles to Kubernetes default user-facing roles. For more information, see [User-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) - aggregateClusterRoles: true - - podSecurityPolicy: - # Create PodSecurityPolicy for cert-manager. - # - # Note that PodSecurityPolicy was deprecated in Kubernetes 1.21 and removed in Kubernetes 1.25. - enabled: false - # Configure the PodSecurityPolicy to use AppArmor. - useAppArmor: true - - # Set the verbosity of cert-manager. A range of 0 - 6, with 6 being the most verbose. - logLevel: 2 - - leaderElection: - # Override the namespace used for the leader election lease. - namespace: "kube-system" - - # The duration that non-leader candidates will wait after observing a - # leadership renewal until attempting to acquire leadership of a led but - # unrenewed leader slot. This is effectively the maximum duration that a - # leader can be stopped before it is replaced by another candidate. - # +docs:property - # leaseDuration: 60s - - # The interval between attempts by the acting master to renew a leadership - # slot before it stops leading. This must be less than or equal to the - # lease duration. - # +docs:property - # renewDeadline: 40s - - # The duration the clients should wait between attempting acquisition and - # renewal of a leadership. - # +docs:property - # retryPeriod: 15s - - # This option is equivalent to setting crds.enabled=true and crds.keep=true. - # Deprecated: use crds.enabled and crds.keep instead. - installCRDs: true - - crds: - # This option decides if the CRDs should be installed - # as part of the Helm installation. - enabled: true - - # This option makes it so that the "helm.sh/resource-policy": keep - # annotation is added to the CRD. This will prevent Helm from uninstalling - # the CRD when the Helm release is uninstalled. - # WARNING: when the CRDs are removed, all cert-manager custom resources - # (Certificates, Issuers, ...) will be removed too by the garbage collector. - keep: true - - # +docs:section=Controller - - # The number of replicas of the cert-manager controller to run. - # - # The default is 1, but in production set this to 2 or 3 to provide high - # availability. - # - # If `replicas > 1`, consider setting `podDisruptionBudget.enabled=true`. - # - # Note that cert-manager uses leader election to ensure that there can - # only be a single instance active at a time. - replicaCount: 1 - - # Deployment update strategy for the cert-manager controller deployment. - # For more information, see the [Kubernetes documentation](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy). - # - # For example: - # strategy: - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 0 - # maxUnavailable: 1 - strategy: {} - - podDisruptionBudget: - # Enable or disable the PodDisruptionBudget resource. - # - # This prevents downtime during voluntary disruptions such as during a Node upgrade. - # For example, the PodDisruptionBudget will block `kubectl drain` - # if it is used on the Node where the only remaining cert-manager - # Pod is currently running. - enabled: false - - # This configures the minimum available pods for disruptions. It can either be set to - # an integer (e.g. 1) or a percentage value (e.g. 25%). - # It cannot be used if `maxUnavailable` is set. - # +docs:property - # minAvailable: 1 - - # This configures the maximum unavailable pods for disruptions. It can either be set to - # an integer (e.g. 1) or a percentage value (e.g. 25%). - # it cannot be used if `minAvailable` is set. - # +docs:property - # maxUnavailable: 1 - - # A comma-separated list of feature gates that should be enabled on the - # controller pod. - featureGates: "" - - # The maximum number of challenges that can be scheduled as 'processing' at once. - maxConcurrentChallenges: 60 - - image: - # The container registry to pull the manager image from. - # +docs:property - # registry: quay.io - - # The container image for the cert-manager controller. - # +docs:property - repository: quay.io/jetstack/cert-manager-controller - - # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion is used. - # +docs:property - # tag: vX.Y.Z - - # Setting a digest will override any tag. - # +docs:property - # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 - - # Kubernetes imagePullPolicy on Deployment. - pullPolicy: IfNotPresent - - # Override the namespace used to store DNS provider credentials etc. for ClusterIssuer - # resources. By default, the same namespace as cert-manager is deployed within is - # used. This namespace will not be automatically created by the Helm chart. - clusterResourceNamespace: "" - - # This namespace allows you to define where the services are installed into. - # If not set then they use the namespace of the release. - # This is helpful when installing cert manager as a chart dependency (sub chart). - namespace: "" - - serviceAccount: - # Specifies whether a service account should be created. - create: true - - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template. - # +docs:property - # name: "" - - # Optional additional annotations to add to the controller's Service Account. - # +docs:property - # annotations: {} - - # Optional additional labels to add to the controller's Service Account. - # +docs:property - # labels: {} - - # Automount API credentials for a Service Account. - automountServiceAccountToken: true - - # Automounting API credentials for a particular pod. - # +docs:property - # automountServiceAccountToken: true - - # When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted. - enableCertificateOwnerRef: false - - # This property is used to configure options for the controller pod. - # This allows setting options that would usually be provided using flags. - # An APIVersion and Kind must be specified in your values.yaml file. - # Flags will override options that are set here. - # - # For example: - # config: - # apiVersion: controller.config.cert-manager.io/v1alpha1 - # kind: ControllerConfiguration - # logging: - # verbosity: 2 - # format: text - # leaderElectionConfig: - # namespace: kube-system - # kubernetesAPIQPS: 9000 - # kubernetesAPIBurst: 9000 - # numberOfConcurrentWorkers: 200 - # featureGates: - # AdditionalCertificateOutputFormats: true - # DisallowInsecureCSRUsageDefinition: true - # ExperimentalCertificateSigningRequestControllers: true - # ExperimentalGatewayAPISupport: true - # LiteralCertificateSubject: true - # SecretsFilteredCaching: true - # ServerSideApply: true - # StableCertificateRequestName: true - # UseCertificateRequestBasicConstraints: true - # ValidateCAA: true - # metricsTLSConfig: - # dynamic: - # secretNamespace: "cert-manager" - # secretName: "cert-manager-metrics-ca" - # dnsNames: - # - cert-manager-metrics - # - cert-manager-metrics.cert-manager - # - cert-manager-metrics.cert-manager.svc - config: {} - - # Setting Nameservers for DNS01 Self Check. - # For more information, see the [cert-manager documentation](https://cert-manager.io/docs/configuration/acme/dns01/#setting-nameservers-for-dns01-self-check). - - # A comma-separated string with the host and port of the recursive nameservers cert-manager should query. - dns01RecursiveNameservers: "" - - # Forces cert-manager to use only the recursive nameservers for verification. - # Enabling this option could cause the DNS01 self check to take longer owing to caching performed by the recursive nameservers. - dns01RecursiveNameserversOnly: false - - # Additional command line flags to pass to cert-manager controller binary. - # To see all available flags run `docker run quay.io/jetstack/cert-manager-controller: --help`. - # - # Use this flag to enable or disable arbitrary controllers. For example, to disable the CertificiateRequests approver. - # - # For example: - # extraArgs: - # - --controllers=*,-certificaterequests-approver - extraArgs: [] - - # Additional environment variables to pass to cert-manager controller binary. - extraEnv: [] - # - name: SOME_VAR - # value: 'some value' - - # Resources to provide to the cert-manager controller pod. - # - # For example: - # requests: - # cpu: 10m - # memory: 32Mi - # - # For more information, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). - resources: {} - - # Pod Security Context. - # For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). - # +docs:property - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - - # Container Security Context to be set on the controller component container. - # For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). - # +docs:property - containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - - # Additional volumes to add to the cert-manager controller pod. - volumes: [] - - # Additional volume mounts to add to the cert-manager controller container. - volumeMounts: [] - - # Optional additional annotations to add to the controller Deployment. - # +docs:property - # deploymentAnnotations: {} - - # Optional additional annotations to add to the controller Pods. - # +docs:property - # podAnnotations: {} - - # Optional additional labels to add to the controller Pods. - podLabels: {} - - # Optional annotations to add to the controller Service. - # +docs:property - # serviceAnnotations: {} - - # Optional additional labels to add to the controller Service. - # +docs:property - # serviceLabels: {} - - # Optionally set the IP family policy for the controller Service to configure dual-stack; see [Configure dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services). - # +docs:property - # serviceIPFamilyPolicy: "" - - # Optionally set the IP families for the controller Service that should be supported, in the order in which they should be applied to ClusterIP. Can be IPv4 and/or IPv6. - # +docs:property - # serviceIPFamilies: [] - - # Optional DNS settings. These are useful if you have a public and private DNS zone for - # the same domain on Route 53. The following is an example of ensuring - # cert-manager can access an ingress or DNS TXT records at all times. - # Note that this requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for - # the cluster to work. - - # Pod DNS policy. - # For more information, see [Pod's DNS Policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy). - # +docs:property - # podDnsPolicy: "None" - - # Pod DNS configuration. The podDnsConfig field is optional and can work with any podDnsPolicy - # settings. However, when a Pod's dnsPolicy is set to "None", the dnsConfig field has to be specified. - # For more information, see [Pod's DNS Config](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config). - # +docs:property - # podDnsConfig: - # nameservers: - # - "1.1.1.1" - # - "8.8.8.8" - - # Optional hostAliases for cert-manager-controller pods. May be useful when performing ACME DNS-01 self checks. - hostAliases: [] - # - ip: 127.0.0.1 - # hostnames: - # - foo.local - # - bar.local - # - ip: 10.1.2.3 - # hostnames: - # - foo.remote - # - bar.remote - - # The nodeSelector on Pods tells Kubernetes to schedule Pods on the nodes with - # matching labels. - # For more information, see [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). - # - # This default ensures that Pods are only scheduled to Linux nodes. - # It prevents Pods being scheduled to Windows nodes in a mixed OS cluster. - # +docs:property - nodeSelector: - kubernetes.io/os: linux - - # +docs:ignore - ingressShim: {} - - # Optional default issuer to use for ingress resources. - # +docs:property=ingressShim.defaultIssuerName - # defaultIssuerName: "" - - # Optional default issuer kind to use for ingress resources. - # +docs:property=ingressShim.defaultIssuerKind - # defaultIssuerKind: "" - - # Optional default issuer group to use for ingress resources. - # +docs:property=ingressShim.defaultIssuerGroup - # defaultIssuerGroup: "" - - # Use these variables to configure the HTTP_PROXY environment variables. - - # Configures the HTTP_PROXY environment variable where a HTTP proxy is required. - # +docs:property - # http_proxy: "http://proxy:8080" - - # Configures the HTTPS_PROXY environment variable where a HTTP proxy is required. - # +docs:property - # https_proxy: "https://proxy:8080" - - # Configures the NO_PROXY environment variable where a HTTP proxy is required, - # but certain domains should be excluded. - # +docs:property - # no_proxy: 127.0.0.1,localhost - - - # A Kubernetes Affinity, if required. For more information, see [Affinity v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core). - # - # For example: - # affinity: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: foo.bar.com/role - # operator: In - # values: - # - master - affinity: {} - - # A list of Kubernetes Tolerations, if required. For more information, see [Toleration v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core). - # - # For example: - # tolerations: - # - key: foo.bar.com/role - # operator: Equal - # value: master - # effect: NoSchedule - tolerations: [] - - # A list of Kubernetes TopologySpreadConstraints, if required. For more information, see [Topology spread constraint v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#topologyspreadconstraint-v1-core - # - # For example: - # topologySpreadConstraints: - # - maxSkew: 2 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: ScheduleAnyway - # labelSelector: - # matchLabels: - # app.kubernetes.io/instance: cert-manager - # app.kubernetes.io/component: controller - topologySpreadConstraints: [] - - # LivenessProbe settings for the controller container of the controller Pod. - # - # This is enabled by default, in order to enable the clock-skew liveness probe that - # restarts the controller in case of a skew between the system clock and the monotonic clock. - # LivenessProbe durations and thresholds are based on those used for the Kubernetes - # controller-manager. For more information see the following on the - # [Kubernetes GitHub repository](https://github.com/kubernetes/kubernetes/blob/806b30170c61a38fedd54cc9ede4cd6275a1ad3b/cmd/kubeadm/app/util/staticpod/utils.go#L241-L245) - # +docs:property - livenessProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 15 - successThreshold: 1 - failureThreshold: 8 - - # enableServiceLinks indicates whether information about services should be - # injected into the pod's environment variables, matching the syntax of Docker - # links. - enableServiceLinks: false - - # +docs:section=Prometheus - - prometheus: - # Enable Prometheus monitoring for the cert-manager controller to use with the - # Prometheus Operator. If this option is enabled without enabling `prometheus.servicemonitor.enabled` or - # `prometheus.podmonitor.enabled`, 'prometheus.io' annotations are added to the cert-manager Deployment - # resources. Additionally, a service is created which can be used together - # with your own ServiceMonitor (managed outside of this Helm chart). - # Otherwise, a ServiceMonitor/ PodMonitor is created. - enabled: true - - servicemonitor: - # Create a ServiceMonitor to add cert-manager to Prometheus. - enabled: false - - # Specifies the `prometheus` label on the created ServiceMonitor. This is - # used when different Prometheus instances have label selectors matching - # different ServiceMonitors. - prometheusInstance: default - - # The target port to set on the ServiceMonitor. This must match the port that the - # cert-manager controller is listening on for metrics. - targetPort: 9402 - - # The path to scrape for metrics. - path: /metrics - - # The interval to scrape metrics. - interval: 60s - - # The timeout before a metrics scrape fails. - scrapeTimeout: 30s - - # Additional labels to add to the ServiceMonitor. - labels: {} - - # Additional annotations to add to the ServiceMonitor. - annotations: {} - - # Keep labels from scraped data, overriding server-side labels. - honorLabels: false - - # EndpointAdditionalProperties allows setting additional properties on the - # endpoint such as relabelings, metricRelabelings etc. - # - # For example: - # endpointAdditionalProperties: - # relabelings: - # - action: replace - # sourceLabels: - # - __meta_kubernetes_pod_node_name - # targetLabel: instance - # - # +docs:property - endpointAdditionalProperties: {} - - # Note that you can not enable both PodMonitor and ServiceMonitor as they are mutually exclusive. Enabling both will result in a error. - podmonitor: - # Create a PodMonitor to add cert-manager to Prometheus. - enabled: false - - # Specifies the `prometheus` label on the created PodMonitor. This is - # used when different Prometheus instances have label selectors matching - # different PodMonitors. - prometheusInstance: default - - # The path to scrape for metrics. - path: /metrics - - # The interval to scrape metrics. - interval: 60s - - # The timeout before a metrics scrape fails. - scrapeTimeout: 30s - - # Additional labels to add to the PodMonitor. - labels: {} - - # Additional annotations to add to the PodMonitor. - annotations: {} - - # Keep labels from scraped data, overriding server-side labels. - honorLabels: false - - # EndpointAdditionalProperties allows setting additional properties on the - # endpoint such as relabelings, metricRelabelings etc. - # - # For example: - # endpointAdditionalProperties: - # relabelings: - # - action: replace - # sourceLabels: - # - __meta_kubernetes_pod_node_name - # targetLabel: instance - # - # +docs:property - endpointAdditionalProperties: {} - - # +docs:section=Webhook - - webhook: - # Number of replicas of the cert-manager webhook to run. - # - # The default is 1, but in production set this to 2 or 3 to provide high - # availability. - # - # If `replicas > 1`, consider setting `webhook.podDisruptionBudget.enabled=true`. - replicaCount: 1 - - # The number of seconds the API server should wait for the webhook to respond before treating the call as a failure. - # The value must be between 1 and 30 seconds. For more information, see - # [Validating webhook configuration v1](https://kubernetes.io/docs/reference/kubernetes-api/extend-resources/validating-webhook-configuration-v1/). - # - # The default is set to the maximum value of 30 seconds as - # users sometimes report that the connection between the K8S API server and - # the cert-manager webhook server times out. - # If *this* timeout is reached, the error message will be "context deadline exceeded", - # which doesn't help the user diagnose what phase of the HTTPS connection timed out. - # For example, it could be during DNS resolution, TCP connection, TLS - # negotiation, HTTP negotiation, or slow HTTP response from the webhook - # server. - # By setting this timeout to its maximum value the underlying timeout error - # message has more chance of being returned to the end user. - timeoutSeconds: 30 - - # This is used to configure options for the webhook pod. - # This allows setting options that would usually be provided using flags. - # An APIVersion and Kind must be specified in your values.yaml file. - # Flags override options that are set here. - # - # For example: - # apiVersion: webhook.config.cert-manager.io/v1alpha1 - # kind: WebhookConfiguration - # # The port that the webhook listens on for requests. - # # In GKE private clusters, by default Kubernetes apiservers are allowed to - # # talk to the cluster nodes only on 443 and 10250. Configuring - # # securePort: 10250 therefore will work out-of-the-box without needing to add firewall - # # rules or requiring NET_BIND_SERVICE capabilities to bind port numbers < 1000. - # # This should be uncommented and set as a default by the chart once - # # the apiVersion of WebhookConfiguration graduates beyond v1alpha1. - # securePort: 10250 - config: {} - - # The update strategy for the cert-manager webhook deployment. - # For more information, see the [Kubernetes documentation](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy) - # - # For example: - # strategy: - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 0 - # maxUnavailable: 1 - strategy: {} - - # Pod Security Context to be set on the webhook component Pod. - # For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). - # +docs:property - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - - # Container Security Context to be set on the webhook component container. - # For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). - # +docs:property - containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - - podDisruptionBudget: - # Enable or disable the PodDisruptionBudget resource. - # - # This prevents downtime during voluntary disruptions such as during a Node upgrade. - # For example, the PodDisruptionBudget will block `kubectl drain` - # if it is used on the Node where the only remaining cert-manager - # Pod is currently running. - enabled: false - - # This property configures the minimum available pods for disruptions. Can either be set to - # an integer (e.g. 1) or a percentage value (e.g. 25%). - # It cannot be used if `maxUnavailable` is set. - # +docs:property - # minAvailable: 1 - - # This property configures the maximum unavailable pods for disruptions. Can either be set to - # an integer (e.g. 1) or a percentage value (e.g. 25%). - # It cannot be used if `minAvailable` is set. - # +docs:property - # maxUnavailable: 1 - - # Optional additional annotations to add to the webhook Deployment. - # +docs:property - # deploymentAnnotations: {} - - # Optional additional annotations to add to the webhook Pods. - # +docs:property - # podAnnotations: {} - - # Optional additional annotations to add to the webhook Service. - # +docs:property - # serviceAnnotations: {} - - # Optional additional annotations to add to the webhook MutatingWebhookConfiguration. - # +docs:property - # mutatingWebhookConfigurationAnnotations: {} - - # Optional additional annotations to add to the webhook ValidatingWebhookConfiguration. - # +docs:property - # validatingWebhookConfigurationAnnotations: {} - - validatingWebhookConfiguration: - # Configure spec.namespaceSelector for validating webhooks. - # +docs:property - namespaceSelector: - matchExpressions: - - key: "cert-manager.io/disable-validation" - operator: "NotIn" - values: - - "true" - - mutatingWebhookConfiguration: - # Configure spec.namespaceSelector for mutating webhooks. - # +docs:property - namespaceSelector: {} - # matchLabels: - # key: value - # matchExpressions: - # - key: kubernetes.io/metadata.name - # operator: NotIn - # values: - # - kube-system - - - # Additional command line flags to pass to cert-manager webhook binary. - # To see all available flags run `docker run quay.io/jetstack/cert-manager-webhook: --help`. - extraArgs: [] - # Path to a file containing a WebhookConfiguration object used to configure the webhook. - # - --config= - - # Comma separated list of feature gates that should be enabled on the - # webhook pod. - featureGates: "" - - # Resources to provide to the cert-manager webhook pod. - # - # For example: - # requests: - # cpu: 10m - # memory: 32Mi - # - # For more information, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). - resources: {} - - # Liveness probe values. - # For more information, see [Container probes](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes). - # - # +docs:property - livenessProbe: - failureThreshold: 3 - initialDelaySeconds: 60 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - - # Readiness probe values. - # For more information, see [Container probes](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes). - # - # +docs:property - readinessProbe: - failureThreshold: 3 - initialDelaySeconds: 5 - periodSeconds: 5 - successThreshold: 1 - timeoutSeconds: 1 - - # The nodeSelector on Pods tells Kubernetes to schedule Pods on the nodes with - # matching labels. - # For more information, see [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). - # - # This default ensures that Pods are only scheduled to Linux nodes. - # It prevents Pods being scheduled to Windows nodes in a mixed OS cluster. - # +docs:property - nodeSelector: - kubernetes.io/os: linux - - # A Kubernetes Affinity, if required. For more information, see [Affinity v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core). - # - # For example: - # affinity: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: foo.bar.com/role - # operator: In - # values: - # - master - affinity: {} - - # A list of Kubernetes Tolerations, if required. For more information, see [Toleration v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core). - # - # For example: - # tolerations: - # - key: foo.bar.com/role - # operator: Equal - # value: master - # effect: NoSchedule - tolerations: [] - - # A list of Kubernetes TopologySpreadConstraints, if required. For more information, see [Topology spread constraint v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#topologyspreadconstraint-v1-core). - # - # For example: - # topologySpreadConstraints: - # - maxSkew: 2 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: ScheduleAnyway - # labelSelector: - # matchLabels: - # app.kubernetes.io/instance: cert-manager - # app.kubernetes.io/component: controller - topologySpreadConstraints: [] - - # Optional additional labels to add to the Webhook Pods. - podLabels: {} - - # Optional additional labels to add to the Webhook Service. - serviceLabels: {} - - # Optionally set the IP family policy for the controller Service to configure dual-stack; see [Configure dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services). - serviceIPFamilyPolicy: "" - - # Optionally set the IP families for the controller Service that should be supported, in the order in which they should be applied to ClusterIP. Can be IPv4 and/or IPv6. - serviceIPFamilies: [] - - image: - # The container registry to pull the webhook image from. - # +docs:property - # registry: quay.io - - # The container image for the cert-manager webhook - # +docs:property - repository: quay.io/jetstack/cert-manager-webhook - - # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion will be used. - # +docs:property - # tag: vX.Y.Z - - # Setting a digest will override any tag - # +docs:property - # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 - - # Kubernetes imagePullPolicy on Deployment. - pullPolicy: IfNotPresent - - serviceAccount: - # Specifies whether a service account should be created. - create: true - - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template. - # +docs:property - # name: "" - - # Optional additional annotations to add to the controller's Service Account. - # +docs:property - # annotations: {} - - # Optional additional labels to add to the webhook's Service Account. - # +docs:property - # labels: {} - - # Automount API credentials for a Service Account. - automountServiceAccountToken: true - - # Automounting API credentials for a particular pod. - # +docs:property - # automountServiceAccountToken: true - - # The port that the webhook listens on for requests. - # In GKE private clusters, by default Kubernetes apiservers are allowed to - # talk to the cluster nodes only on 443 and 10250. Configuring - # securePort: 10250, therefore will work out-of-the-box without needing to add firewall - # rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000. - securePort: 10250 - - # Specifies if the webhook should be started in hostNetwork mode. - # - # Required for use in some managed kubernetes clusters (such as AWS EKS) with custom - # CNI (such as calico), because control-plane managed by AWS cannot communicate - # with pods' IP CIDR and admission webhooks are not working - # - # Since the default port for the webhook conflicts with kubelet on the host - # network, `webhook.securePort` should be changed to an available port if - # running in hostNetwork mode. - hostNetwork: false - - # Specifies how the service should be handled. Useful if you want to expose the - # webhook outside of the cluster. In some cases, the control plane cannot - # reach internal services. - serviceType: ClusterIP - - # Specify the load balancer IP for the created service. - # +docs:property - # loadBalancerIP: "10.10.10.10" - - # Overrides the mutating webhook and validating webhook so they reach the webhook - # service using the `url` field instead of a service. - url: {} - # host: - - # Enables default network policies for webhooks. - networkPolicy: - # Create network policies for the webhooks. - enabled: false - - # Ingress rule for the webhook network policy. By default, it allows all - # inbound traffic. - # +docs:property - ingress: - - from: - - ipBlock: - cidr: 0.0.0.0/0 - - # Egress rule for the webhook network policy. By default, it allows all - # outbound traffic to ports 80 and 443, as well as DNS ports. - # +docs:property - egress: - - ports: - - port: 80 - protocol: TCP - - port: 443 - protocol: TCP - - port: 53 - protocol: TCP - - port: 53 - protocol: UDP - # On OpenShift and OKD, the Kubernetes API server listens on. - # port 6443. - - port: 6443 - protocol: TCP - to: - - ipBlock: - cidr: 0.0.0.0/0 - - # Additional volumes to add to the cert-manager controller pod. - volumes: [] - - # Additional volume mounts to add to the cert-manager controller container. - volumeMounts: [] - - # enableServiceLinks indicates whether information about services should be - # injected into the pod's environment variables, matching the syntax of Docker - # links. - enableServiceLinks: false - - # +docs:section=CA Injector - - cainjector: - # Create the CA Injector deployment - enabled: true - - # The number of replicas of the cert-manager cainjector to run. - # - # The default is 1, but in production set this to 2 or 3 to provide high - # availability. - # - # If `replicas > 1`, consider setting `cainjector.podDisruptionBudget.enabled=true`. - # - # Note that cert-manager uses leader election to ensure that there can - # only be a single instance active at a time. - replicaCount: 1 - - # This is used to configure options for the cainjector pod. - # It allows setting options that are usually provided via flags. - # An APIVersion and Kind must be specified in your values.yaml file. - # Flags override options that are set here. - # - # For example: - # apiVersion: cainjector.config.cert-manager.io/v1alpha1 - # kind: CAInjectorConfiguration - # logging: - # verbosity: 2 - # format: text - # leaderElectionConfig: - # namespace: kube-system - config: {} - - # Deployment update strategy for the cert-manager cainjector deployment. - # For more information, see the [Kubernetes documentation](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy). - # - # For example: - # strategy: - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 0 - # maxUnavailable: 1 - strategy: {} - - # Pod Security Context to be set on the cainjector component Pod - # For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). - # +docs:property - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - - # Container Security Context to be set on the cainjector component container - # For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). - # +docs:property - containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - - podDisruptionBudget: - # Enable or disable the PodDisruptionBudget resource. - # - # This prevents downtime during voluntary disruptions such as during a Node upgrade. - # For example, the PodDisruptionBudget will block `kubectl drain` - # if it is used on the Node where the only remaining cert-manager - # Pod is currently running. - enabled: false - - # `minAvailable` configures the minimum available pods for disruptions. It can either be set to - # an integer (e.g. 1) or a percentage value (e.g. 25%). - # Cannot be used if `maxUnavailable` is set. - # +docs:property - # minAvailable: 1 - - # `maxUnavailable` configures the maximum unavailable pods for disruptions. It can either be set to - # an integer (e.g. 1) or a percentage value (e.g. 25%). - # Cannot be used if `minAvailable` is set. - # +docs:property - # maxUnavailable: 1 - - # Optional additional annotations to add to the cainjector Deployment. - # +docs:property - # deploymentAnnotations: {} - - # Optional additional annotations to add to the cainjector Pods. - # +docs:property - # podAnnotations: {} - - # Additional command line flags to pass to cert-manager cainjector binary. - # To see all available flags run `docker run quay.io/jetstack/cert-manager-cainjector: --help`. - extraArgs: [] - # Enable profiling for cainjector. - # - --enable-profiling=true - - # Comma separated list of feature gates that should be enabled on the - # cainjector pod. - featureGates: "" - - # Resources to provide to the cert-manager cainjector pod. - # - # For example: - # requests: - # cpu: 10m - # memory: 32Mi - # - # For more information, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). - resources: {} - - - # The nodeSelector on Pods tells Kubernetes to schedule Pods on the nodes with - # matching labels. - # For more information, see [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). - # - # This default ensures that Pods are only scheduled to Linux nodes. - # It prevents Pods being scheduled to Windows nodes in a mixed OS cluster. - # +docs:property - nodeSelector: - kubernetes.io/os: linux - - # A Kubernetes Affinity, if required. For more information, see [Affinity v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core). - # - # For example: - # affinity: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: foo.bar.com/role - # operator: In - # values: - # - master - affinity: {} - - # A list of Kubernetes Tolerations, if required. For more information, see [Toleration v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core). - # - # For example: - # tolerations: - # - key: foo.bar.com/role - # operator: Equal - # value: master - # effect: NoSchedule - tolerations: [] - - # A list of Kubernetes TopologySpreadConstraints, if required. For more information, see [Topology spread constraint v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#topologyspreadconstraint-v1-core). - # - # For example: - # topologySpreadConstraints: - # - maxSkew: 2 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: ScheduleAnyway - # labelSelector: - # matchLabels: - # app.kubernetes.io/instance: cert-manager - # app.kubernetes.io/component: controller - topologySpreadConstraints: [] - - # Optional additional labels to add to the CA Injector Pods. - podLabels: {} - - image: - # The container registry to pull the cainjector image from. - # +docs:property - # registry: quay.io - - # The container image for the cert-manager cainjector - # +docs:property - repository: quay.io/jetstack/cert-manager-cainjector - - # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion will be used. - # +docs:property - # tag: vX.Y.Z - - # Setting a digest will override any tag. - # +docs:property - # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 - - # Kubernetes imagePullPolicy on Deployment. - pullPolicy: IfNotPresent - - serviceAccount: - # Specifies whether a service account should be created. - create: true - - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - # +docs:property - # name: "" - - # Optional additional annotations to add to the controller's Service Account. - # +docs:property - # annotations: {} - - # Optional additional labels to add to the cainjector's Service Account. - # +docs:property - # labels: {} - - # Automount API credentials for a Service Account. - automountServiceAccountToken: true - - # Automounting API credentials for a particular pod. - # +docs:property - # automountServiceAccountToken: true - - # Additional volumes to add to the cert-manager controller pod. - volumes: [] - - # Additional volume mounts to add to the cert-manager controller container. - volumeMounts: [] - - # enableServiceLinks indicates whether information about services should be - # injected into the pod's environment variables, matching the syntax of Docker - # links. - enableServiceLinks: false - - # +docs:section=ACME Solver - - acmesolver: - image: - # The container registry to pull the acmesolver image from. - # +docs:property - # registry: quay.io - - # The container image for the cert-manager acmesolver. - # +docs:property - repository: quay.io/jetstack/cert-manager-acmesolver - - # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion is used. - # +docs:property - # tag: vX.Y.Z - - # Setting a digest will override any tag. - # +docs:property - # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 - - # Kubernetes imagePullPolicy on Deployment. - pullPolicy: IfNotPresent - - # +docs:section=Startup API Check - # This startupapicheck is a Helm post-install hook that waits for the webhook - # endpoints to become available. - # The check is implemented using a Kubernetes Job - if you are injecting mesh - # sidecar proxies into cert-manager pods, ensure that they - # are not injected into this Job's pod. Otherwise, the installation may time out - # owing to the Job never being completed because the sidecar proxy does not exit. - # For more information, see [this note](https://github.com/cert-manager/cert-manager/pull/4414). - - startupapicheck: - # Enables the startup api check. - enabled: true - - # Pod Security Context to be set on the startupapicheck component Pod. - # For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). - # +docs:property - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - - # Container Security Context to be set on the controller component container. - # For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). - # +docs:property - containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - - # Timeout for 'kubectl check api' command. - timeout: 1m - - # Job backoffLimit - backoffLimit: 4 - - # Optional additional annotations to add to the startupapicheck Job. - # +docs:property - jobAnnotations: - helm.sh/hook: post-install - helm.sh/hook-weight: "1" - helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded - - # Optional additional annotations to add to the startupapicheck Pods. - # +docs:property - # podAnnotations: {} - - # Additional command line flags to pass to startupapicheck binary. - # To see all available flags run `docker run quay.io/jetstack/cert-manager-startupapicheck: --help`. - # - # Verbose logging is enabled by default so that if startupapicheck fails, you - # can know what exactly caused the failure. Verbose logs include details of - # the webhook URL, IP address and TCP connect errors for example. - # +docs:property - extraArgs: - - -v - - # Resources to provide to the cert-manager controller pod. - # - # For example: - # requests: - # cpu: 10m - # memory: 32Mi - # - # For more information, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). - resources: {} - - - # The nodeSelector on Pods tells Kubernetes to schedule Pods on the nodes with - # matching labels. - # For more information, see [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). - # - # This default ensures that Pods are only scheduled to Linux nodes. - # It prevents Pods being scheduled to Windows nodes in a mixed OS cluster. - # +docs:property - nodeSelector: - kubernetes.io/os: linux - - # A Kubernetes Affinity, if required. For more information, see [Affinity v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core). - # For example: - # affinity: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: foo.bar.com/role - # operator: In - # values: - # - master - affinity: {} - - # A list of Kubernetes Tolerations, if required. For more information, see [Toleration v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core). - # - # For example: - # tolerations: - # - key: foo.bar.com/role - # operator: Equal - # value: master - # effect: NoSchedule - tolerations: [] - - # Optional additional labels to add to the startupapicheck Pods. - podLabels: {} - - image: - # The container registry to pull the startupapicheck image from. - # +docs:property - # registry: quay.io - - # The container image for the cert-manager startupapicheck. - # +docs:property - repository: quay.io/jetstack/cert-manager-startupapicheck - - # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion is used. - # +docs:property - # tag: vX.Y.Z - - # Setting a digest will override any tag. - # +docs:property - # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 - - # Kubernetes imagePullPolicy on Deployment. - pullPolicy: IfNotPresent - - rbac: - # annotations for the startup API Check job RBAC and PSP resources. - # +docs:property - annotations: - helm.sh/hook: post-install - helm.sh/hook-weight: "-5" - helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded - - # Automounting API credentials for a particular pod. - # +docs:property - # automountServiceAccountToken: true - - serviceAccount: - # Specifies whether a service account should be created. - create: true - - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template. - # +docs:property - # name: "" - - # Optional additional annotations to add to the Job's Service Account. - # +docs:property - annotations: - helm.sh/hook: post-install - helm.sh/hook-weight: "-5" - helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded - - # Automount API credentials for a Service Account. - # +docs:property - automountServiceAccountToken: true - - # Optional additional labels to add to the startupapicheck's Service Account. - # +docs:property - # labels: {} - - # Additional volumes to add to the cert-manager controller pod. - volumes: [] - - # Additional volume mounts to add to the cert-manager controller container. - volumeMounts: [] - - # enableServiceLinks indicates whether information about services should be - # injected into pod's environment variables, matching the syntax of Docker - # links. - enableServiceLinks: false - - # Create dynamic manifests via values. - # - # For example: - # extraObjects: - # - | - # apiVersion: v1 - # kind: ConfigMap - # metadata: - # name: '{{ template "cert-manager.name" . }}-extra-configmap' - extraObjects: [] diff --git a/firefly-iii/catagorize-ai-sealed-secret.yaml b/firefly-iii/catagorize-ai-sealed-secret.yaml deleted file mode 100644 index ef20a22..0000000 --- a/firefly-iii/catagorize-ai-sealed-secret.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: firefly-iii-ai - namespace: firefly-ns -spec: - encryptedData: - FIREFLY_PERSONAL_TOKEN: AgDimybAXS9nGY5qbE+YXcWklHXrl6aOTl3dC2pZGEQ5abwbE7H7vsccOMJWPhfbT/x0/gK63qEQeucu3CB8va+QKRF82DE1h9TNe++hjneKl51htDAG8wnzpyLZfQIRMrmYejjC2t1ID3ti5JXVwfzrMwge9bsx5FLSBZQJbTy74gNRBU/q+zz2bvcRXKmQS5kocUBsmJXGBKYYreVd8qAA1eEb13YQYllZ+iJXFmJqYoK/pkYuQO1ClzZHLMA1AIWVughhvQeOmvfNXxm7hMopTHMMRdeXFuGnv1J45ktE0YYInnlGrJoJY3hjRNlWy+fQgqzA0IfcIVF58w2A29pL5oXr+J5v5grAr6gm60Xm+P/c8uRMQeQ9Yv6W3L+mG9ECr9DhNCvAdadGAyK1oVYUoJ1AISMLTbGkQ98uNTG9ExG9zcoYJ4teXgTmqDN+HP5wRhDdz00ELIedxOhtXK0mhT1lEuZN3E6nyid/knRX5aGY0vts3V5odJvfur4xRQKlwegTR4lAhPdn7rW+LAeWrl+3pfyQnsR8gkc8/8kgNLcqlefX/2K/tt9vsGbb6H9ciBdrrzpLja/Ml7v/hAJSPqFEZSz1DLaI3TIEq8xUmjaJu9RlqpTh8gi9oCYDIVVdjbphpWeL+O8w5hHuUgaX0EcSDQPI9Nsmi4Z643CbqhWa6lvL07FvwQ5AyjA4xMOhEFi/2pB5lUu46NFa3ZzMw3UYynLeKLVF9Tgs8G/UcWWSJvkLu2hEc/EVavjtj6iTw93/qxPS6klDRjFX46FUr1DMJBTDcYhsEH/PEcIwJGeafgCOdVoVfAc+eJknJ2dDSBxNLkxTMgM8SenUr7kdmm844qWR9H6DzfNOtbn+uaOnpeHvhU4qxu+/UzqAT9/HCuds68XAGvhRltHrphPr6LgVWjPi+BrdIghpiAY5LtNWhgeQLyrQ2Due3Dt4tUL8wC4/xi9hYltx/AynCzNIwzs76/eQdl1VYDjToXDWye0NENqOCL8GK59V+1rXflRRX2q4iR72zttBgjpbWCNZ6wNnWWbQdH3dHxBvJ7Db736fRQtAoEC4rdKTbVw8A1W8X1Zn6Mt9SIYY+fssNF5aNb0YSNzNYrthIfYmtG/TWKkOR9Z5lvzdeTfzlS34y9xYEF1IAmjRC2bUywp7RNHRzMrgENYp3kyq5F1FruHkYwfX9pRQX1ZqyjcC10Dh/CB4pj2ivygqaBEwdqQIETgVzRQKYpDcRPJi9lyd2oaIcI4k+v47ZJX32ZJLDJBVUf3w6pwFVbiuuf5cdiWo+CvzsBmvGNtwa2Nn/K0b29piPlNBSvlFDSixSW2egmmLiSujYU5RjZy07bbPf9mFHdRNjiHB4GRUHafeL6BNJ7+4mdMIzVMPgjmQ2HfwVwaup7JpTY2D5vnd5cJwKreyKfMigg9w7l9fFx7WrDp1u2qiZtRQ07Xd6LsEaLkQDrKGgyuqu33BgOsOX82F5RPpkCYezeo0D4m3mLYMLOhp1+U3Kpt+OlvmuXPrWoJeVzwtGRoiv2FBk3Kz471wl0qwxYZxXom1I0XojVpJf8wtTtY9zI0gT7U1RZP2YIKW87dHAOWmM96miOLbsunCejRIFiRFEpJkwlnscsAn7woknl4hYj2oW7k2imO7mSwcyZiAFx+CitNqvXxoX9foMdJ3G7uXFKgqz0w1FEm0hSqDzut9DyHPunovLlMk+piBHl2eFwEtG6ODvx8SQBZ4DEJF30hmGL6NLmJiTNANDVhBgnmuiIvZRonj9gzHuCic9LUnf6DMIcMp3KLTBcpEPE/PLihmscswTuRpzF1TliCuDjhjVs2HyEx3WisY0w76w/YX/s4ipCInHmXV7HeGL0+RNAjlVGgAhRrNbUuAn0BwTDm/PMQvV/XJmwjd0964GkpK1lwBcms979WJzN+Gg6sTNa5Y9xRzm6p5QiXj8M7P8QzKHrZxSu+lMqYGtipONcBKb6pWk50Q0YGjSDkJ9lUymj3iEg5y5A== - OPENAI: AgAK3CS9m5SHyxnyk8tP7MlKu2XCLd+Qt8xBuYiFO/PtfxcE+j8MV1haSFUdEwcoySpkBYlctifqJc8uaBdmW1aewYhv6jZIXynScnEj4VZUe0sM1TGasuKduDWNgZ8jDRS+2fQmfzZh67zvtBVHHsiagAL6i+GHsjD3i9Fj2EoNbKueGgYgc7sMzAAoTcaEaP9SiTEzhujYb2HhRY7IoJkvmU2yyjOeFc7n3l6txkA4UsJ+c9wUblCTX4sXnU/pqTA2UAmWCfWPqYlXlKGsqZEPX7q+HcrR6H8TmiCBb/RIa5cZM87WdLTUxT5U85gcoBjj+Q7mAQlLAsfPTyncJgZSNBsNKm5UCrz7GyWr4NCnZonQIzE0GjlhQnP5ERq1+VjWdxTjsH1/QUEjjaJgq4JiJQV1OvRAemhiGXF7m8grqopwYMjBtrHt7tuIqDVNZhx8lWhZ/p21f8zvluR8WREEdmff/wsFNJkIbYJGwpwy8VpF4hiuqkcXSPGcz6OdBfY5sktUYwQNkRqEQV4wDLjU23hngL9P47dXY5Mx81AMvyD49V6DnRH8az1zaxcexvyC/m+4UEqm4Aw8REIf5sogH4Kpu2pKXW/ZrDMI27zyjammG9EF3AGo9Wi2ND8JJH0j4GJXu2auGDrENdJZaM6qDVauYrDFMVw9jj+ZlMLqUd797qMEYDP64ai4VjXXAj2Q4qS4F6wGu29jrHQK8tWeQx5hxl0pwlfUd5i7NyUjGczGaniXelpmQ3Lcpzp/HIXQd+4= - template: - metadata: - creationTimestamp: null - name: firefly-iii-ai - namespace: firefly-ns diff --git a/firefly-iii/catagorize-ai-service.yaml b/firefly-iii/catagorize-ai-service.yaml deleted file mode 100644 index e519457..0000000 --- a/firefly-iii/catagorize-ai-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: catagorize-ai-service - namespace: firefly-ns -spec: - selector: - app: firefly-iii - ports: - - protocol: TCP - port: 3000 - targetPort: 3000 - type: ClusterIP diff --git a/firefly-iii/catagorize-ai.yaml b/firefly-iii/catagorize-ai.yaml deleted file mode 100644 index 1bb0939..0000000 --- a/firefly-iii/catagorize-ai.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: catagorize-ai - namespace: firefly-ns -spec: - replicas: 1 - selector: - matchLabels: - app: firefly-iii - template: - metadata: - labels: - app: firefly-iii - spec: - containers: - - name: catagorize-ai - image: ghcr.io/bahuma20/firefly-iii-ai-categorize - ports: - - containerPort: 3000 - env: - - name: FIREFLY_URL - value: https://money.clortox.com - - name: ENABLE_UI - value: "true" - - name: FIREFLY_PERSONAL_TOKEN - valueFrom: - secretKeyRef: - name: firefly-iii-ai - key: FIREFLY_PERSONAL_TOKEN - - name: OPENAI_API_KEY - valueFrom: - secretKeyRef: - name: firefly-iii-ai - key: OPENAI diff --git a/firefly-iii/helmrelease-firefly-iii.yaml b/firefly-iii/helmrelease-firefly-iii.yaml deleted file mode 100644 index 8bb16d5..0000000 --- a/firefly-iii/helmrelease-firefly-iii.yaml +++ /dev/null @@ -1,173 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: firefly - namespace: firefly-ns -spec: - chart: - spec: - chart: firefly-iii - sourceRef: - kind: HelmRepository - name: firefly-iii - namespace: flux-system - interval: 15m0s - timeout: 5m - releaseName: firefly-iii - values: - replicaCount: 1 - - image: - repository: "fireflyiii/core" - pullPolicy: IfNotPresent - tag: version-6.1.6 - - imagePullSecrets: [] - nameOverride: "" - fullnameOverride: "" - - persistence: - # -- If you set this to false, uploaded attachments are not stored persistently and will be lost with every restart of the pod - enabled: true - storageClassName: "longhorn" - accessModes: ReadWriteMany - storage: 20Gi - # -- If you want to use an existing claim, set it here - existingClaim: "" - - # -- Environment variables for Firefly III. See docs at: https://github.com/firefly-iii/firefly-iii/blob/main/.env.example - config: - # -- Set this to the name of a secret to load environment variables from. If defined, values in the secret will override values in config.env - existingSecret: "firefly-iii-secret" - - # -- Set environment variables from configMaps or Secrets - envValueFrom: {} - - # -- Directly defined environment variables. Use this for non-secret configuration values. - env: - DB_HOST: postgresql.postgresql-system.svc.cluster.local - DB_CONNECTION: pgsql - DB_PORT: "5432" - DB_DATABASE: firefly - DB_USERNAME: firefly - DEFAULT_LANGUAGE: "en_US" - DEFAULT_LOCALE: "equal" - TZ: "America/New_York" - TRUSTED_PROXIES: "**" - APP_URL: "https://money.clortox.com" - AUTHENTICATION_GUARD: "remote_user_guard" - AUTHENTICATION_GUARD_HEADER: "X-authentik-email" - - - # -- Create a new Secret from values file to store sensitive environment variables. Make sure to keep your secrets encrypted in the repository! For example, you can use the 'helm secrets' plugin (https://github.com/jkroepke/helm-secrets) to encrypt and manage secrets. If the 'config.existingSecret' value is set, a new Secret will not be created. - secrets: - env: - APP_PASSWORD: "CHANGE_ENCRYPT_ME" - DB_PASSWORD: "CHANGE_ENCRYPT_ME" - - # -- A cronjob for [recurring Firefly III tasks](https://docs.firefly-iii.org/firefly-iii/advanced-installation/cron/). - cronjob: - # -- Set to true to enable the CronJob. Note that you need to specify either cronjob.auth.existingSecret or cronjob.auth.token for it to actually be deployed. - enabled: false - - # -- Authorization for the CronJob. See https://docs.firefly-iii.org/firefly-iii/advanced-installation/cron/#request-a-page-over-the-web - auth: - # -- The name of a secret containing a data.token field with the cronjob token - existingSecret: "" - - # -- The name of the key in the existing secret to get the cronjob token from - secretKey: "token" - - # -- The token in plain text - token: "" - - # -- Annotations for the CronJob - annotations: {} - - # -- When to run the CronJob. Defaults to 03:00 as this is when Firefly III executes regular tasks. - schedule: "0 3 * * *" - - # -- How many pods to keep around for successful jobs - successfulJobsHistoryLimit: 3 - - # -- How many pods to keep around for failed jobs - failedJobsHistoryLimit: 1 - - # -- How to treat failed jobs - restartPolicy: OnFailure - - image: - repository: curlimages/curl - pullPolicy: IfNotPresent - tag: 7.81.0 - - imagePullSecrets: [] - - podAnnotations: {} - - securityContext: {} - - podSecurityContext: {} - - resources: {} - - nodeSelector: {} - - tolerations: [] - - affinity: {} - - podAnnotations: {} - - podSecurityContext: {} - # fsGroup: 2000 - - securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - - service: - type: LoadBalancer - port: 80 - - ingress: - enabled: false - className: "" - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - chart-example.local - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - - resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - - autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - # targetMemoryUtilizationPercentage: 80 - - nodeSelector: {} - - tolerations: [] - - affinity: {} diff --git a/firefly-iii/sealed-secret.yaml b/firefly-iii/sealed-secret.yaml deleted file mode 100644 index 68e02b0..0000000 --- a/firefly-iii/sealed-secret.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: firefly-iii-secret - namespace: firefly-ns -spec: - encryptedData: - APP_KEY: AgCSE+/lOiQJV3HE/UiAzeIXc2hgOMY95RSUO8Q22sK+R6WdpLcc0/gkxhOYtAdFGp1r9TIQQcWcbR2cEZ84GsnhoNJxh2vgaME+g5m0EgzQouczW/GTR56qfu/P+zp/IlIjepJVeAhVAOAInLDn/XUJf6kXyfWG7kHLhB/CHI6P0VC1RcHXAjkArDmpn5wOwDzVSMOCWszd6BXjl/LacRPkC58Oj4GCIlEqXo1meBJ7Lc6IG+x7VSjNv19xKVFqULt/Aep2YowIf3TPlQDhkv39Rro434dzm9q/M88JndE6sOqw1MTO+QqPPSUKPDSTWwD72FV4rmVkeXiTKtvMLlAWywQIOFL7ZIVJ50DjYgWV/tx3xz81lnfgwtFa6cT1OwUOfLrAEAe4iF/3hzgY1dfTMB7eKbY+XGpGvrcqAImfcBfMwc2wqSWj7FA3V5qOwrfeObDE96nvOuDyUqgPgzyyG6JZwkM1R9pgTABbG3sEkbtyxLQfJftooKtQ3obDsP56aS9uzfZ0rsZpT6Ek7fNd9dqG0XEqDOjNgOxW8aCJBq+Uy9Pbvx9e0flBwXJM0FV963ql7b4i5vrG5IuBC/CC5t1qPwaQPd/fMARjF8hIjhcfF8lnwFzT5vYRHIaky68U2u7gUx91vkiM5X3W8G3N4TX9kZI8QKr0pHcMy3zdM4ou95qUrhr6s/BYrULKjtZ4jucVRoX1PXa+D/laa6qk0Di0iw9S+KMdni5XK+If5A== - APP_PASSWORD: AgBPGz6DZ4R5L/MnGFbEu91VtBnuT4XiV1h2LGCwg5XTtlPcaB4EgNTQqFihAqt1GbHjGnJeaNMLgTVRXUuVJgklZ7Si4MGeBOhcUaIkuxa/7/p9I0n1mHYKlic+c7oRUMPYEbB6R2xjLeFNALIuCr6sQyX8JcIYlYh+arWqlWgQ7MRVSZW3VaWSPiQGMgRUIequ4CAS6exjzQUwZwwP5yGqKJcV7tBoPKDeQ5lNL2BqcG6xtk1Uin7M/dEYa9hEXBDD6eZrc9IzEfuxIOJiPvJvteXvCFeX6MltA1rEbYorub1p1u+m9ROFBx7K74iR+jJt3Fw1/LP319x3lXS3G7tg0TCwxLwytwRjLfPHlYiHRosUk9n8K+sluHqReyKnDN87DEDvGgKYiToF6oXq67rLd/fQlFYLMy86sIrAIQYEAjrDNvkGVOR557GVMR8XaEvxz2nHBLKuNIXtY4Shr7vUABHf1ove2MgUPnFzNd6UTLnud/YSpkXunlYtyu9zaf1xhJq4a0AXWK9HJpyMoZz7CG5aJTT4+PGpeq3dwMNqcjmX8WmgYIpD8JwxCu09l8cmb5HwxhhV2M0Qdn0X8fo2HyR+hhXN/5P2qWmEHSOXkz7I6nefDAco93PsDQkg+1IAc4IyrmP4HJgAVwh8TD3/OFcpwlO8rG6CTC+hgI/hcnyEZCkDfRczRLUlQ3fG5HWKnm0yX/GP4A+2j4kh8FpMTKsP9U48U6a93Mmp3trVkg== - DB_PASSWORD: AgB1oty1IbWAVgYNDjaIS+ATAccxhUEoKA0zjwwbYZyNtU+Me2Z3vRPb0n4Sqm23VglNx/AGYqIf0t9ewctlib0FbU404mX8IYKMS1/+0VhoFymrLNxlTR0CTlcatOmZBbwvbqh5esEyZ9LVglr1TQWS7p5KNiJB+6b8H9tuTxraHaMBMZhDTdoAhIxyzcSvaKTmJSCPvR38q06ggNeeFIE72hh4v97diJ50h78/P4ScoG8CYbuinQpND3Jg07GoAvhdpZk1PgAZQwSeWBBECmov6rGKmJuCAx5YzReGXQOXpUYk+K3YR5mgeEIGjvABoIoHCmYoMP8T56IIX1uZLGQFpMqIbqnJ828i6qy9gvO7Qxng8zIO4WO4pNZNo+dQw49Ri087TQpT9fq17+wykDj2zvDpvasqh3bc3K0NbaJQo3F0hFzZhtw7ZdQFQ9TrKD0oG1fNscP4jvvvXIKJ9IDEghPUmd+w3C4stIwsICgpUTGTytHQ1lUzL6OebBiQjXWablwEGbtcFWhqAx07esuFpe2hx4+6HDNpEG5MH7T2/IUrwBS1vrlD4OgzT/TKT2bwjuy62ralrr7CmPcbcqax7pfkpnjK3kDna85xz4JIC4/nguqVqztjTkk7fSxDckAFlp/WLvjwvG7byU3gbQX1Y1X9O8bELvLGA+QuaZ3mMYJNEtyi3lx/RVuiO139PQOfVwrK4jsTjhxp5xxn0dk02haVWlDBg15Mb85D8mn9sA== - STATIC_CRON_TOKEN: AgBkzS4jR9phFQ/LLfwETTQM30sbLO63nNVkdUcmoZJQb85iuNtn9Ji7ROdIFyFCgWeBygOPGtrSXYlnFf3YIfdM+tXOJ9YtYRX78pqCr4mm84ZpNAir2VTMRLQDbrtqejv6LQNPkrRPpC+Xia/nUiqoGo2RFi66ypheegH7DNW1gHHxZibpiFvrwxudYj1q5rFNvw3NMwb6sJZ6FfJyKhMoKlQIJ1P+H2YoqAWU29hFiP1ZCZJ4j8+2cfaKiImlHsmGuTkuZuUi+0F8vCSUncSOtNMXJpD3XYFqexcefalls7OZJ/U4gD8LAS6kMZwBqRsCgKFKXTSpQwFW8L01xXrb6NQyPdnItO1IRg/65BJQeFdSMzgfAGec22+MBRNbJc9y2mw3iKUxPf42y+Gij/8Th9kLA9pmX8NYcBaIBCXbv3B/9O8FEDsf62XaREG1mwU3wEP1HZ6YeWEeRCR6+0MCGUWrVQK6pAS8uWQPDz1LkoKqjnMsa5OsHnmi211KUAqOV7vDOGu0gKF3t035ojfoGiHO8XgLLd2viHH0t5iaRWtWLV46UuA4uLgIwrQtw+0IQnLfb5xaIQet9zQacBIi7t2eelJvvgkdyAJ2lqexQMrHzicKjvfJQ1BbHtK1Lsisw77jvARF71IywC18fHmr0e+zK1obSva3+dPthu36xCCuoJiZ2E9GmvMkveNA0qMQYavct497T7w0iR71p3k80hOsVGIRnqUaTGifzNJNfg== - template: - metadata: - creationTimestamp: null - name: firefly-iii-secret - namespace: firefly-ns diff --git a/homarr/homarr-deployment.yaml b/homarr/homarr-deployment.yaml deleted file mode 100644 index 1f76994..0000000 --- a/homarr/homarr-deployment.yaml +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: homarr - namespace: homarr-ns -spec: - replicas: 1 - selector: - matchLabels: - app: homarr - template: - metadata: - labels: - app: homarr - spec: - containers: - - name: homarr - image: ghcr.io/ajnart/homarr:latest - ports: - - containerPort: 7575 - env: - - name: EDIT_MODE_PASSWORD - valueFrom: - secretKeyRef: - name: homarr-edit-key - key: edit-key - volumeMounts: - - name: homarr-config - mountPath: /app/data/configs - - name: homarr-icons - mountPath: /app/public/icons - - name: homarr-data - mountPath: /data - volumes: - - name: homarr-config - persistentVolumeClaim: - claimName: homarr-config-pvc - - name: homarr-icons - persistentVolumeClaim: - claimName: homarr-icons-pvc - - name: homarr-data - persistentVolumeClaim: - claimName: homarr-data-pvc diff --git a/homarr/homarr-pvc-config.yaml b/homarr/homarr-pvc-config.yaml deleted file mode 100644 index 9690f57..0000000 --- a/homarr/homarr-pvc-config.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: homarr-config-pvc - namespace: homarr-ns -spec: - storageClassName: longhorn - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 512Mi diff --git a/homarr/homarr-pvc-data.yaml b/homarr/homarr-pvc-data.yaml deleted file mode 100644 index e77d554..0000000 --- a/homarr/homarr-pvc-data.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: homarr-data-pvc - namespace: homarr-ns -spec: - storageClassName: longhorn - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi diff --git a/homarr/homarr-pvc-icons.yaml b/homarr/homarr-pvc-icons.yaml deleted file mode 100644 index f6cb304..0000000 --- a/homarr/homarr-pvc-icons.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: homarr-icons-pvc - namespace: homarr-ns -spec: - storageClassName: longhorn - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi diff --git a/homarr/homarr-service.yaml b/homarr/homarr-service.yaml deleted file mode 100644 index 86b3b74..0000000 --- a/homarr/homarr-service.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: homarr-service - namespace: homarr-ns -spec: - type: LoadBalancer - ports: - - port: 80 - targetPort: 7575 - selector: - app: homarr diff --git a/homarr/sealed-secret.yaml b/homarr/sealed-secret.yaml deleted file mode 100644 index 27f3df2..0000000 --- a/homarr/sealed-secret.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: homarr-edit-key - namespace: homarr-ns -spec: - encryptedData: - edit-key: AgBnP6HGyQv63BuvrbO9JWdDu/aS7GadN+6dJ/4uBziMT6HxvBBbunrO5ZROHD1Hl9F3BSQs1GECkzYTQDVd5Hji93L39InCpo3+G0GGg0m6BH8j5WarheWS4837WynOUOfHncCCtXzG9iRqFZAUKE3xYtbNMULXXBiMyY625aonF3Agqz9MAtz4Dv56N5cPE4C4Ck0VPi4POQCP6RezHteCktlBBwpbPAem61mUUx+P+V7hEX3/TItl0j4HOvC6ttbHtVLPUwvHHdBcH/0stKhPben4Hnp7qLZe1A16+RCAbaAYF2TS9JbrQsCwtDq8mkQeAQg1sU0S1092b9OZKk9s1QpGGlKuH7G1iwQcaTpdVIj57QVIOPNoGWuuOiVzWe8hf+b1jITEJNT7VYWmBYcIZjLakYFr8zbkWPlvinkTv0GHo8uBOWsqLF+w3ekYk9HNSJ6dFEBpeMpvllXcbKnggb222otyqJ2Z9Kh2svIBqq2+0VulhFtEfjXFYLOMHqi+ZUz/MkPuREevDQXjwJTBoHD5OaB1OFRo6Kp1jyLogkTnUO/j2qv5DZDkofE0ha4PR9/9olqoYzTfs0IOa2+yUQZJ0OJ5dQbrnxNqbUWjCrVn6xVeCqKrZzsK+96wJVBgiPBzruO0y5ZYreNyW0GdBDS1ubvkkv8eMKbVOM+GTEtC1AburtCwuVYwOxgOJ31zudWmDzqEnrDK1Qp91eyzk4W2J+TRd52fxLQUukq9SA== - template: - metadata: - creationTimestamp: null - name: homarr-edit-key - namespace: homarr-ns diff --git a/immich/immich-machine-learning-deployment.yaml b/immich/immich-machine-learning-deployment.yaml deleted file mode 100644 index 2a959e1..0000000 --- a/immich/immich-machine-learning-deployment.yaml +++ /dev/null @@ -1,46 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: immich-machine-learning - namespace: immich-ns -spec: - replicas: 1 - selector: - matchLabels: - app: immich-machine-learning - template: - metadata: - labels: - app: immich-machine-learning - spec: - containers: - - name: immich-machine-learning - image: ghcr.io/immich-app/immich-machine-learning:v1.109.2 - env: - - name: UPLOAD_LOCATION - value: /usr/src/app/upload - - name: DB_HOSTNAME - value: postgresql.postgresql-system.svc.cluster.local - - name: DB_USERNAME - valueFrom: - secretKeyRef: - name: immich-secret - key: username - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: immich-secret - key: password - - name: DB_DATABASE_NAME - valueFrom: - secretKeyRef: - name: immich-secret - key: database - - name: REDIS_HOSTNAME - value: redis-master.redis-system.svc.cluster.local - volumeMounts: - - name: model-cache - mountPath: /cache - volumes: - - name: model-cache - emptyDir: {} diff --git a/immich/immich-microservices-deployment.yaml b/immich/immich-microservices-deployment.yaml deleted file mode 100644 index 620022d..0000000 --- a/immich/immich-microservices-deployment.yaml +++ /dev/null @@ -1,55 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: immich-microservices - namespace: immich-ns -spec: - replicas: 1 - selector: - matchLabels: - app: immich-microservices - template: - metadata: - labels: - app: immich-microservices - spec: - containers: - - name: immich-microservices - image: ghcr.io/immich-app/immich-server:v1.109.2 - args: ["start.sh", "microservices"] - env: - - name: UPLOAD_LOCATION - value: /usr/src/app/upload - - name: DB_VECTOR_EXTENSION - value: pgvector - - name: DB_HOSTNAME - value: postgresql.postgresql-system.svc.cluster.local - - name: DB_USERNAME - valueFrom: - secretKeyRef: - name: immich-secret - key: username - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: immich-secret - key: password - - name: DB_DATABASE_NAME - valueFrom: - secretKeyRef: - name: immich-secret - key: database - - name: REDIS_HOSTNAME - value: redis-master.redis-system.svc.cluster.local - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-immich-secret - key: REDIS_PASS - volumeMounts: - - name: upload-volume - mountPath: /usr/src/app/upload - volumes: - - name: upload-volume - persistentVolumeClaim: - claimName: immich-library-pvc diff --git a/immich/immich-microservices-service.yaml b/immich/immich-microservices-service.yaml deleted file mode 100644 index e69de29..0000000 diff --git a/immich/immich-pvc-library.yaml b/immich/immich-pvc-library.yaml deleted file mode 100644 index 39ee66f..0000000 --- a/immich/immich-pvc-library.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: immich-library-pvc - namespace: immich-ns -spec: - accessModes: - - ReadWriteOnce - storageClassName: longhorn - resources: - requests: - storage: 100Gi diff --git a/immich/immich-server-deployment.yaml b/immich/immich-server-deployment.yaml deleted file mode 100644 index fa2e044..0000000 --- a/immich/immich-server-deployment.yaml +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: immich-server - namespace: immich-ns -spec: - replicas: 1 - selector: - matchLabels: - app: immich-server - template: - metadata: - labels: - app: immich-server - spec: - containers: - - name: immich-server - image: ghcr.io/immich-app/immich-server:v1.109.2 - args: ["start.sh", "immich"] - ports: - - containerPort: 3001 - env: - - name: UPLOAD_LOCATION - value: /usr/src/app/upload - - name: DB_VECTOR_EXTENSION - value: pgvector - - name: DB_HOSTNAME - value: postgresql.postgresql-system.svc.cluster.local - - name: DB_USERNAME - valueFrom: - secretKeyRef: - name: immich-secret - key: username - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: immich-secret - key: password - - name: DB_DATABASE_NAME - valueFrom: - secretKeyRef: - name: immich-secret - key: database - - name: REDIS_HOSTNAME - value: redis-master.redis-system.svc.cluster.local - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-immich-secret - key: REDIS_PASS - volumeMounts: - - name: upload-volume - mountPath: /usr/src/app/upload - volumes: - - name: upload-volume - persistentVolumeClaim: - claimName: immich-library-pvc diff --git a/immich/immich-server-service.yaml b/immich/immich-server-service.yaml deleted file mode 100644 index f1acc8a..0000000 --- a/immich/immich-server-service.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: immich-server-service - namespace: immich-ns -spec: - type: LoadBalancer - ports: - - port: 80 - targetPort: 3001 - selector: - app: immich-server diff --git a/immich/redis-secret.yaml b/immich/redis-secret.yaml deleted file mode 100644 index 90421ef..0000000 --- a/immich/redis-secret.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: redis-immich-secret - namespace: immich-ns -spec: - encryptedData: - REDIS_PASS: AgA87rwcuMmDmgvDRl6pcObFFNBPKSH1qCkXUFgIqB/jX/ursxPgP+5f9ANY7PZjZTJ3QSAenJKPKUu9ER5B04o9b09EIcSpTQ0eVQRl6jwMRRzCbFWedb1bsNPuNyQBaf7IhaLshfQPSsjamp4oAaczLjbQPs/musn/3TUYVThIdgWBltv9i/12+BkbA98sS3gsMVWyP+cCcVQ+mMTGNsLZbxP1XC50yAAWifqJk6NbT+m9CA1wnesgegyr1W7KUGxudKnRA7iaGiP+fC+LbLIbD63tkme6/65b9x5qXZLM9qpiBEX+Yrv7YTn+ZJ94KwMnDjV8Y3Izom4etOawnLaRIIal/PGJPjSLE+PqtVRKXpTO8I3ExKSHb3MfLpfqTQ24N1yoNOnYu6dv2Rhd0Q9lMA6RBX4XUfsjYxHwIWyN1HhdAkbAS+ZqIlcnzT/rVIkkLcU/3/2Ptjj1IRDHFZplibUTbmkiKBvSDeOWDDRXC0FPvMegcfv2mYXY03W70N1uW39JVd0hcDhMxVaaW7yB7rmNEdOpFmpSPBScNtJj7bjEkAQCqXfqogclPs7FJOkrEJKK92Mon8ZMRdeD7GAbh4UqiRIe/SnjD2PsxWKDIMX3uqHN4PpxtsI5F3cY8mQNLG9nP4QzS5b8uU3vfJ4aSX2WpY7UhCXZ1ZuZDMNUDyQ9ULNcFh0FAkB3KzFi35Kqlxf6CsiY2pkxmtHm4w1WJkq09n2iNlsORJayzwDu6Q== - template: - metadata: - creationTimestamp: null - name: redis-immich-secret - namespace: immich-ns diff --git a/immich/sealed-secret.yaml b/immich/sealed-secret.yaml deleted file mode 100644 index b8d3ad1..0000000 --- a/immich/sealed-secret.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: immich-secret - namespace: immich-ns -spec: - encryptedData: - database: AgA+Vgab29fZ+NPF1PxzvcT3StAlEiOOKO77tYH+IgfKhdK7wTP4q+OVdV6gWPahK1ssZ8lPISml1HDMPx/IIlCYHmp1xi+wtoOgvyOGq5/8czupMQ4dLwiMVWFyRnCUm94119dCA9KImIqyhrNZ/FebqrcqvykI3h8/XDGCZujjMlHhnhRSUF3AohL3cW72tnZkDeSKebp1Mkmi0LEij2v0/+dZXuIEsfLPVHgxJKvCfPX7ND3TigBlFsa1VQOSZY19MI283rS9keqX0pFP+h0LAT6iGw/4p9fOjVYPNZySVn/z/XXcxnKjO477edJp9TGb+xd1m/kSmUhKF2w58jkKoZMlUwwCxteh9H1zj9rHMQfSmVG+tg9j5WoSsfIaWbDIFIf91l07XSwa8MGJ91NE6nvHEgf7C/OtZ52SjHTKEielLHsvTPRn2lIi14P9tMadI1z11POTf416CIcB2fXzuu619FHARSJseBpBLYwPM5pSpF0XKqTl7mW0kypa46kikjGou6CuJWhrFkh8Yqpth6hfsIV0BkLxXUpoWW9/dMQztfnuB7OvogNUJRTn+g9tzGLyY5bWddokV9s6uxyyaDAi9wPe48HRhX6bGwOgEPdprV5VRSuXu7A2g2YGYsxvvsEr9dXZA3rY9dW63wAzIhydxO8i0+9JHd9CMKohj60S5Llh402p4fDm3JIXchpeNwJzyo4= - password: AgC68pWUzY4eghLcYSxEkwVtBL7BlQ8ytG11hk8NuGcPK+B9kA0VFtw5gFTYMIb0UL95O0BN/L7A6O7oXZm6skWlwOaYmUUOhdCnws1vRA7RamA+gWiT6qV+aFVdeiWLm2pgTdwquqB/Ky2/K3FF2tLoA2Gmp+uGGbet8txMb5RlCWA5jdb6xqsszCFu8NKpcb85kaRtBAP1AzXwWWnP1E+ITM8FjsL1QXlwkxra/uChN99w6Sc66GR8VUb3M3lmtv26AX2hHhqOeWNNJzIbWpmThS+DuluopF4UF+rEixTnR5jBtl8Let6ZA/UwgZ0sfBOijFLyoSFK0ly0f1p3bDH7jtgL2f7OQNPv/VkY6RKi5LViE20m2fYKmt2Fx+FdrIAw64jK3fhLuWF9MKuHOLhgbcrpCvuIMcR1P+/TEPoOrwLy8qzSyGlHZlYHo2m16FqdHqvwHF2vd3A2OnblBx8RN51Hxr0PaRb11FxGQSdQgVU4IoQp0GlvDrhzRXHU1g4G7BnG7+fQpFHujw5QB0rrSLP8WgfWkdYOo6E7xF5EXZ+E2vWsRPRJ2bkVH0mywIo8BC1e7WCR28uLK29e2kBMxiwzDxu+7x/g8rbXxLZGVakEhvZMlWPUSBpcU6rEdW1x7+TEJCGxxBUf3/e6K60MqvOQIe3gRrevY8DddkCFbi6+ZIPmTpd95K9MwnwDDWub+CzKZaWBn6+23NMiBkMa2mgFIWn0QMxEtazTWwJITw== - username: AgA/sz7ukcLAtrSfiGncgMC/VkekQYAYhUmsVTR/sS9di8gv98+pBZbC2i1CC+Qy0yagVEmpstqD46AlkI4d/38S1YLoEolJomEn8KUcdvle7RXK5d+HXSDQCbWdhdhJsbw094rLd2pPzJ1ykVpJglbg+Ec9pzydorjS5LA8vXyujmH3YXW3OU2GCI+B8rgiedetlP6zyZciKuSNd/yDPB7cYzch0lmheGHREulvAzXE6xPv4hiyZtY0FA26zjixtQjW/CJnmwzD6/F1MBZWXtColxZob6I9I5DY4zGawNgS8n4qF/bRoIr75LYkD77KEfBWba5QkQcfnvsEmJWKFmMBchdrM8+wHulgElzTRn8HIfaslk6Aq9RBasXEBDtumBgLiOVCr4TNNX6RHNooyF6uc+Ms4zTdTsibBmMs3X0W8ON1qZx+oXf5M7QW3x+rz+cl7o1TQUsGaHeAcLjh1xGJWddSo1gRL8kqX7wlVucm2LZwIwdWnGT+Bp97FJmJ+R+xgjrmzy9lhboSK58LnpHk65psIngp0XCZ6b3pNrKbDc7H/v8EAjElSAhTGwX7nIwZ4jGCdgPICcX0FtWW17nlJIXJoHmQL08fPa7dqqkpx2JgLQ2E19TywfItxxRApYtRP2AXuf53XLiyQjDgo6STldASysj4MgpJti0lKZNUQkK2QedaXKhyLO3/n53SADSac+P8s0E= - template: - metadata: - creationTimestamp: null - name: immich-secret - namespace: immich-ns diff --git a/invidious/invidious-deployment.yaml b/invidious/invidious-deployment.yaml deleted file mode 100644 index 8575553..0000000 --- a/invidious/invidious-deployment.yaml +++ /dev/null @@ -1,61 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: invidious - namespace: invidious-ns -spec: - replicas: 1 - selector: - matchLabels: - app: invidious - template: - metadata: - labels: - app: invidious - spec: - containers: - - name: wait-and-die - image: alpine:latest - command: ["/bin/sh", "-c"] - args: ["sleep 21600; exit 0"] - - name: invidious - image: quay.io/invidious/invidious:2024.04.26-eda7444 - env: - - name: INVIDIOUS_PORT - value: "3000" - - name: INVIDIOUS_DB_PASSWORD - valueFrom: - secretKeyRef: - name: invidious-secret - key: invidious-postgres-password - - name: INVIDIOUS_HMAC_KEY - valueFrom: - secretKeyRef: - name: invidious-secret - key: hmac - - name: INVIDIOUS_CONFIG - value: | - db: - dbname: Invidious - user: invidious - password: $(INVIDIOUS_DB_PASSWORD) - host: postgresql.postgresql-system.svc.cluster.local - port: 5432 - check_tables: true - hmac_key: "$(INVIDIOUS_HMAC_KEY)" - pool_size: 100 - statistics_enabled: true - admins: ["tyler"] - channel_threads: 2 - channel_refresh_interval: 15m - feed_threads: 2 - banner: "Lol. Lmao even." - default_user_preferences: - default_home: "Subscriptions" - quality: dash - save_player_pos: true - port: 3000 - #external_port: 443 - #domain: watch.clortox.com - ports: - - containerPort: 3000 diff --git a/invidious/invidious-service.yaml b/invidious/invidious-service.yaml deleted file mode 100644 index 0f6065d..0000000 --- a/invidious/invidious-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: invidious - namespace: invidious-ns -spec: - type: LoadBalancer - ports: - - protocol: TCP - port: 80 - targetPort: 3000 - selector: - app: invidious diff --git a/invidious/sealed-secret.yaml b/invidious/sealed-secret.yaml deleted file mode 100644 index 153cb5d..0000000 --- a/invidious/sealed-secret.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: invidious-secret - namespace: invidious-ns -spec: - encryptedData: - hmac: AgBnXw0QxXIHdSyv1jruFE7gKlnWZwHjIF6yqpx/VwXdV1G6WWCfvv+ZMA9RNvnFGP3QmNttNpErFXgpGJKP6a9gr7nIK9ilPgm9oZZP0gt8MDnNSm/17sLeMv0X84uT5SfKCbzukTPKQj2NICWLYO9M3XV5x4CXNi+1E7r+F5qtAYV/V0ZPdo35QHALKjDYv5hofsvJNaUXxamMGzMjrOBtMZKDAGx4K0ftOVr348IbKb8R3WgSrJDN2YQdk+8U1lyRZoK2yBsMYEx1/z3/YsYF/ZvE8Z6tPnRCImJSr+jkEDde0So0DkXTESdBKVnkRQ2e31pyRHGu7+z3dqZlNITFbVt3YN54+P7jDMGEEbPEgVfjJTk/MhqsfaY2WrqONXJvBFcsfVooDXG3rQinG5UkPUBLWPCnInD1mvbSyN5whC7oVh5+qwCrEN3WSsEpMUig8re10sVDwmwXehf0TqWwsIPdT/4OxYnBjzjqJ5HYopBHqCcHxeHD6o+6fNjZPSofNo2YkIX1yI+9laSjEHBmIwdFBCty10yaDsF625X07zlqFBMzSaPRcK3MVReFfUrI5w7mZuM+bzT4OG3Zf4bolQp18glzltSPxWPOsc7RRRImkcjf+PkyXmGVwZ2oPXISX+8xuOIuxhMMGAke0a7b8R7hNb/vvZ6dbtStMwZWUd0IB3Rnmb8rWmdy5qHoANYbmVmwTfcDSKxp0hqfoPNYBG7xJKAg3FjdoYjcmVmbAQ== - invidious-postgres-password: AgDCqXfmNpRx1XQeKqVrXw7u9BXLvoyWiy16S3H5MgGf7SkBffIM9fbE3bFsOI8ow0obxd1vJRw/7XZtFoGYwumoGvFLU/5N1AeluHLD8c6muBNEH7hBQmXj7rGlZ2PGKIZ+C0iqMLrt0xWpiPsPKuSxeXBwyTuZpdcw5PpTQ9N6pWhLyAM5Aw7BHXzWN3PiH4dplWnYcilj0MkNAueTwQtwksHrmPrA7ezE965adfhWzn+IWS0Rco5/QqNMArmFQqYKNkfh0mkCKz258TOLGGbznNbvWU5PQklElBUTqB2r1nJc5nYdAN0cOYYRbXhql5s61Q0S4REXG0gZVfqZMxGFpomeVx09tQRbYHKW/ptp4HKb0x2GbA/Wk1qcvvHAOqhU9f1/+MhIeyUShNeQdTthbm2hnS3Z46KPw0EEdLuSo9xG8hu+saak/xIs4bOaKbtkjSqdeTH3UzEKCjK0bQDoB6JvS6tq+CVzxoUGVYYDzbS0ADDKgdVGkOsGzVswtUOo7yYzOY9jLHanbMCZjvDfOByyYdTnegtS/iIExCPhM0V/9WzY1Y1/crX2RIgdWzTsV2djG24/tZvIggMTZE3PZH83pEduWzcMyi4JED/OYCaWlJRWFqhq+3g/K/0DgM3YPDRwul3yGhoKiWr3bRDC2RPMRTlINd10ctocnDupV1yxFzgLPimrG0LLxcmk2foRkTeJ2d/3LtjN0HfvmLSvVKrAOUDOTVcOsenoyVauNg== - template: - metadata: - creationTimestamp: null - name: invidious-secret - namespace: invidious-ns diff --git a/jobs/test-ntfy.yaml b/jobs/test-ntfy.yaml deleted file mode 100644 index 7f37987..0000000 --- a/jobs/test-ntfy.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: batch/v1 -kind: CronJob -metadata: - name: ntfy-notification-cronjob - namespace: default -spec: - schedule: "03 21 * * *" - jobTemplate: - spec: - template: - spec: - restartPolicy: OnFailure - containers: - - name: ntfy-notifier - image: curlimages/curl:7.80.0 - command: ["/bin/sh", "-c"] - args: - - | - curl -X POST -d "this is a test notification from a k3s cronjob!" https://ntfy.sh/test - diff --git a/longhorn/longhorn.yaml b/longhorn/longhorn.yaml deleted file mode 100644 index 80029d9..0000000 --- a/longhorn/longhorn.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: longhorn - namespace: longhorn-system -spec: - interval: 5m - chart: - spec: - chart: longhorn - version: "1.5.1" - sourceRef: - kind: HelmRepository - name: longhorn - namespace: flux-system - values: - service: - ui: - type: LoadBalancer diff --git a/longhorn/sealed-secret.yaml b/longhorn/sealed-secret.yaml deleted file mode 100644 index a4c8751..0000000 --- a/longhorn/sealed-secret.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: longhorn-backup - namespace: longhorn-system -spec: - encryptedData: - AWS_ACCESS_KEY_ID: AgAaiekhcrroR4st3tc4Bk1GQQYa+1thkHqYz7gANqClaHCbwWWDKi312YN32xGdlxtCLWeqp+oU1yJjXa71Ha1U6R3IT4oLqh+3CrmE0HvqWEVWbaiInB/J8S91ejFKhSniL4W45667ntqByIh7yG/sR2ROpW4NGy1iDOH+B/D3j47XJFPEbLQVci1fWN9inLATNZlHOWvml5xjNaQCQP9xCTdBWBCPvqby9oQZGp3/I2z75/2bAu9UCV9U7ARY550v30EwrBpfROIRUGVw7TCPtiNabhmcy2PU1QWleFTZNf2wAvIMoFxH6DgfmaiRY39JFuGFypRviAf9t+OyP9YYplkrk8pdXGImTTsLl+y6PSM+ibdJyOH4g/cyWA/+882aPpg0f8u1fmfOjonmYPSGi9UqKEJoJsWUnRBW+INZoxnG3vQvYF0hoaMgqi6QnLcMlOP+zjd/2sLlPRqlwPUUF35Q67RmAahAHUA2P1a25ojcBVGFBMQ8voKy3QtMyOJkrsS8Uq0iX1gVNLZuZRZsCoxJDxU13043XF7OD/lSggzG7gElAsuGHZ0/3ltF17TuPDx0/GawW+fIzU9yQBnLFvYn1UJwC1m9lQvRmlg5yQm1uFO1oBP4rX6N7GWttn8zmLzyUeqnuOvUFI0kZHrUsfTYEM+FqUAIPJFKFO2OZGRagVn59Vr3/aEgn83CODveD8g00oq2PU5rqQ61zWYZNoQOGA== - AWS_ENDPOINTS: AgBytK+0Q+PqVpzW1thju/ntgZmMm8n5V3ZaoJ8ZmkEm+9USijqrpx0N4aGdrm1Z4a/OYKhBm0Xtwm4s3MSIsipAkB5cireUNrd0ggeCAqG9nhqIqZhqsbkbll3g1QNFxuprE4196Byo9nCdcxkR7OMHJ86FMraPkjxqLzHNb8XuNpQn9CinFaUdXcbhO03ZKZFCtxwlnRYtr2Anx2EBT38tLcg/dIt25+4QY95djZdw8NwFP1xlslwE+rbwMQHAxpM6bZLqGtqWNDTdJ0NummoJXnWiOwVPEpjiPM6vdXiBkjRlGpldXuF2JK/c3t4G1CA5URck8jcFaVNfO+wUsG45hp+EkE5XwXMXdY1KB8oveYU7ehCrI8wCY2obhYsbQVve34S2Mv/C47uCtFYHSE7lZsHttabWbYS4QSmSe+BM8uCFoUkJErBPsIZ73TzRgjhSR1Y/qtoyNDlYSH8jZEm8OlIsk5fdOWMN87e/l4TIqSVd02kJAKvn/o564BjrcdqB8gU6adV0vh4q1GBtwwF8nquQP3mL9zxDiljCikDwDD7DNVbzSUwA7tg4YYmQAwpRDfhEZkr5uPaQJ8pyhVp3ZyjACfHB0i2zF/x/UnUogs42OunchrPlkBAPUvY1vbM8uRcy+E0UO6h5gTOf9O1cB0pnIxz//MIbg4ve2SYftLstdtlJr4xL/QhqPWb7VZDB0QZUceFvKpbourbE1M6htGr8jGxR/UI= - AWS_SECRET_ACCESS_KEY: AgC33pdA8wIcOtyYeh0+rsrBtw5VsT8r9z35ovM8za2hcUnEg8ON8SMlGjdPSUSUVQPvT3NoXCZhzNvIwvdBPDsqSGdIeEb1zQ1hGz4jycd5OYPCNOA8yiTv9UfzYaO/YthyoeYOufHHSfz4o7uZFAqr7xIYX+1tOobtJjiQQopKxSbtlbkwNUAH86TSwJp66jhXsy35aAWVcfycAhwtVzc1TLyTJ6EB43BT/0f+qiLxiAqRWnfMQ063swnAoQ1RDAto3LMBMsJOCnYk2sxIXrlg+l2vdAH5OSRHxLTKAK7i2z7h74NhlXhJmWMNm+M3rz8pDIUfpYlgmDW96B/JRkq/xVgbUXdNuSE1E6w04QVVcgqeh9xgYuCVwxaIpFKY27cnX5Z3SG/WcELHw4QUzoPwaNmOKho0cSFzseI8r1inIj7OTafP4/j8/3gunlvTilUI6O/Nz7n0gt/ZJwzhX6Un9ETstPYsCFFGCpYgi0Mpr4bVOKiRvdR/00r29lYmoTs49U2FPdoqZWo9h+m2VBl0WLnMqSHvrCR4y6Gz8iQQAlSxIVEaQ+5i6N0K51Ba+9g7PLKljpCIhAL356MT1595FfQrhg6GozUuv184paa4SR/sqX2k70RrdAULAEFoGvkv9dIYJRFPL7RPxXKhr1UO1/HgJbCGZvCpa+VbgzB9Fztf3w4gwY2rxHhPf+kRC788xsUEY1n6L1qOUgQaQJtxlLf53RHVNsj8eEWC3gPcer0qyIH0lGQE - template: - metadata: - creationTimestamp: null - name: longhorn-backup - namespace: longhorn-system diff --git a/metallb-config/ipaddresspool.yaml b/metallb-config/ipaddresspool.yaml deleted file mode 100644 index 9877ef3..0000000 --- a/metallb-config/ipaddresspool.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: metallb.io/v1beta1 -kind: IPAddressPool -metadata: - name: metallb-pool-addresses - namespace: metallb-system -spec: - addresses: - - 10.0.3.64-10.0.3.253 diff --git a/metallb-config/l2-advertisement.yaml b/metallb-config/l2-advertisement.yaml deleted file mode 100644 index 9c5351d..0000000 --- a/metallb-config/l2-advertisement.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: metallb.io/v1beta1 -kind: L2Advertisement -metadata: - name: my-l2-advertisment - namespace: metallb-system -spec: - ipAddressPools: - - metallb-pool-addresses diff --git a/metallb/config-map-overrides.yaml b/metallb/config-map-overrides.yaml deleted file mode 100644 index bc18d29..0000000 --- a/metallb/config-map-overrides.yaml +++ /dev/null @@ -1,349 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: metallb-helm-chart-value-overrides - namespace: metallb-system -data: - values.yaml: |- - # Default values for metallb. - # This is a YAML-formatted file. - # Declare variables to be passed into your templates. - - imagePullSecrets: [] - nameOverride: "" - fullnameOverride: "" - loadBalancerClass: "" - - # To configure MetalLB, you must specify ONE of the following two - # options. - - rbac: - # create specifies whether to install and use RBAC rules. - create: true - - prometheus: - # scrape annotations specifies whether to add Prometheus metric - # auto-collection annotations to pods. See - # https://github.com/prometheus/prometheus/blob/release-2.1/documentation/examples/prometheus-kubernetes.yml - # for a corresponding Prometheus configuration. Alternatively, you - # may want to use the Prometheus Operator - # (https://github.com/coreos/prometheus-operator) for more powerful - # monitoring configuration. If you use the Prometheus operator, this - # can be left at false. - scrapeAnnotations: false - - # port both controller and speaker will listen on for metrics - metricsPort: 7472 - - # if set, enables rbac proxy on the controller and speaker to expose - # the metrics via tls. - # secureMetricsPort: 9120 - - # the name of the secret to be mounted in the speaker pod - # to expose the metrics securely. If not present, a self signed - # certificate to be used. - speakerMetricsTLSSecret: "" - - # the name of the secret to be mounted in the controller pod - # to expose the metrics securely. If not present, a self signed - # certificate to be used. - controllerMetricsTLSSecret: "" - - # prometheus doens't have the permission to scrape all namespaces so we give it permission to scrape metallb's one - rbacPrometheus: true - - # the service account used by prometheus - # required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true " - serviceAccount: "" - - # the namespace where prometheus is deployed - # required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true " - namespace: "" - - # the image to be used for the kuberbacproxy container - rbacProxy: - repository: gcr.io/kubebuilder/kube-rbac-proxy - tag: v0.12.0 - pullPolicy: - - # Prometheus Operator PodMonitors - podMonitor: - # enable support for Prometheus Operator - enabled: false - - # optional additionnal labels for podMonitors - additionalLabels: {} - - # optional annotations for podMonitors - annotations: {} - - # Job label for scrape target - jobLabel: "app.kubernetes.io/name" - - # Scrape interval. If not set, the Prometheus default scrape interval is used. - interval: - - # metric relabel configs to apply to samples before ingestion. - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # target_label: nodename - # replacement: $1 - # action: replace - - # Prometheus Operator ServiceMonitors. To be used as an alternative - # to podMonitor, supports secure metrics. - serviceMonitor: - # enable support for Prometheus Operator - enabled: false - - speaker: - # optional additional labels for the speaker serviceMonitor - additionalLabels: {} - # optional additional annotations for the speaker serviceMonitor - annotations: {} - # optional tls configuration for the speaker serviceMonitor, in case - # secure metrics are enabled. - tlsConfig: - insecureSkipVerify: true - - controller: - # optional additional labels for the controller serviceMonitor - additionalLabels: {} - # optional additional annotations for the controller serviceMonitor - annotations: {} - # optional tls configuration for the controller serviceMonitor, in case - # secure metrics are enabled. - tlsConfig: - insecureSkipVerify: true - - # Job label for scrape target - jobLabel: "app.kubernetes.io/name" - - # Scrape interval. If not set, the Prometheus default scrape interval is used. - interval: - - # metric relabel configs to apply to samples before ingestion. - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # target_label: nodename - # replacement: $1 - # action: replace - - # Prometheus Operator alertmanager alerts - prometheusRule: - # enable alertmanager alerts - enabled: false - - # optional additionnal labels for prometheusRules - additionalLabels: {} - - # optional annotations for prometheusRules - annotations: {} - - # MetalLBStaleConfig - staleConfig: - enabled: true - labels: - severity: warning - - # MetalLBConfigNotLoaded - configNotLoaded: - enabled: true - labels: - severity: warning - - # MetalLBAddressPoolExhausted - addressPoolExhausted: - enabled: true - labels: - severity: alert - - addressPoolUsage: - enabled: true - thresholds: - - percent: 75 - labels: - severity: warning - - percent: 85 - labels: - severity: warning - - percent: 95 - labels: - severity: alert - - # MetalLBBGPSessionDown - bgpSessionDown: - enabled: true - labels: - severity: alert - - extraAlerts: [] - - # controller contains configuration specific to the MetalLB cluster - # controller. - controller: - enabled: true - # -- Controller log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none` - logLevel: info - # command: /controller - # webhookMode: enabled - image: - repository: quay.io/metallb/controller - tag: - pullPolicy: - ## @param controller.updateStrategy.type Metallb controller deployment strategy type. - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy - ## e.g: - ## strategy: - ## type: RollingUpdate - ## rollingUpdate: - ## maxSurge: 25% - ## maxUnavailable: 25% - ## - strategy: - type: RollingUpdate - serviceAccount: - # Specifies whether a ServiceAccount should be created - create: true - # The name of the ServiceAccount to use. If not set and create is - # true, a name is generated using the fullname template - name: "" - annotations: {} - securityContext: - runAsNonRoot: true - # nobody - runAsUser: 65534 - fsGroup: 65534 - resources: {} - # limits: - # cpu: 100m - # memory: 100Mi - nodeSelector: {} - tolerations: [] - priorityClassName: "" - runtimeClassName: "" - affinity: {} - podAnnotations: {} - labels: {} - livenessProbe: - enabled: true - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - readinessProbe: - enabled: true - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - - # speaker contains configuration specific to the MetalLB speaker - # daemonset. - speaker: - enabled: true - # command: /speaker - # -- Speaker log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none` - logLevel: info - tolerateMaster: true - memberlist: - enabled: true - mlBindPort: 7946 - mlSecretKeyPath: "/etc/ml_secret_key" - excludeInterfaces: - enabled: true - image: - repository: quay.io/metallb/speaker - tag: - pullPolicy: - ## @param speaker.updateStrategy.type Speaker daemonset strategy type - ## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/ - ## - updateStrategy: - ## StrategyType - ## Can be set to RollingUpdate or OnDelete - ## - type: RollingUpdate - serviceAccount: - # Specifies whether a ServiceAccount should be created - create: true - # The name of the ServiceAccount to use. If not set and create is - # true, a name is generated using the fullname template - name: "" - annotations: {} - ## Defines a secret name for the controller to generate a memberlist encryption secret - ## By default secretName: {{ "metallb.fullname" }}-memberlist - ## - # secretName: - resources: {} - # limits: - # cpu: 100m - # memory: 100Mi - nodeSelector: {} - tolerations: [] - priorityClassName: "" - affinity: {} - ## Selects which runtime class will be used by the pod. - runtimeClassName: "" - podAnnotations: {} - labels: {} - livenessProbe: - enabled: true - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - readinessProbe: - enabled: true - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - startupProbe: - enabled: true - failureThreshold: 30 - periodSeconds: 5 - # frr contains configuration specific to the MetalLB FRR container, - # for speaker running alongside FRR. - frr: - enabled: true - image: - repository: quay.io/frrouting/frr - tag: 8.5.2 - pullPolicy: - metricsPort: 7473 - resources: {} - - # if set, enables a rbac proxy sidecar container on the speaker to - # expose the frr metrics via tls. - # secureMetricsPort: 9121 - - reloader: - resources: {} - - frrMetrics: - resources: {} - - crds: - enabled: true - validationFailurePolicy: Fail diff --git a/metallb/helmrelease-metallb.yaml b/metallb/helmrelease-metallb.yaml deleted file mode 100644 index 53a667d..0000000 --- a/metallb/helmrelease-metallb.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: metallb - namespace: metallb-system -spec: - chart: - spec: - chart: metallb - version: 0.13.11 - sourceRef: - kind: HelmRepository - name: metallb - namespace: flux-system - interval: 15m - timeout: 5m - releaseName: metallb - valuesFrom: - - kind: ConfigMap - name: metallb-helm-chart-value-overrides - valuesKey: values.yaml diff --git a/minio/helmrelease-minio.yaml b/minio/helmrelease-minio.yaml deleted file mode 100644 index a92942e..0000000 --- a/minio/helmrelease-minio.yaml +++ /dev/null @@ -1,564 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: minio - namespace: minio-ns -spec: - chart: - spec: - chart: minio - sourceRef: - kind: HelmRepository - name: minio - namespace: flux-system - interval: 15m0s - timeout: 5m - releaseName: minio - values: - ## Provide a name in place of minio for `app:` labels - ## - nameOverride: "" - - ## Provide a name to substitute for the full names of resources - ## - fullnameOverride: "" - - ## set kubernetes cluster domain where minio is running - ## - clusterDomain: cluster.local - - ## Set default image, imageTag, and imagePullPolicy. mode is used to indicate the - ## - image: - repository: quay.io/minio/minio - tag: RELEASE.2023-09-30T07-02-29Z - pullPolicy: IfNotPresent - - imagePullSecrets: [] - # - name: "image-pull-secret" - - ## Set default image, imageTag, and imagePullPolicy for the `mc` (the minio - ## client used to create a default bucket). - ## - mcImage: - repository: quay.io/minio/mc - tag: RELEASE.2023-09-29T16-41-22Z - pullPolicy: IfNotPresent - - ## minio mode, i.e. standalone or distributed - mode: standalone ## other supported values are "standalone" - - ## Additional labels to include with deployment or statefulset - additionalLabels: {} - - ## Additional annotations to include with deployment or statefulset - additionalAnnotations: {} - - ## Typically the deployment/statefulset includes checksums of secrets/config, - ## So that when these change on a subsequent helm install, the deployment/statefulset - ## is restarted. This can result in unnecessary restarts under GitOps tooling such as - ## flux, so set to "true" to disable this behaviour. - ignoreChartChecksums: false - - ## Additional arguments to pass to minio binary - extraArgs: [] - - ## Additional volumes to minio container - extraVolumes: [] - - ## Additional volumeMounts to minio container - extraVolumeMounts: [] - - ## Additional sidecar containers - extraContainers: [] - - ## Internal port number for MinIO S3 API container - ## Change service.port to change external port number - minioAPIPort: "9000" - - ## Internal port number for MinIO Browser Console container - ## Change consoleService.port to change external port number - minioConsolePort: "9001" - - ## Update strategy for Deployments - deploymentUpdate: - type: RollingUpdate - maxUnavailable: 0 - maxSurge: 100% - - ## Update strategy for StatefulSets - statefulSetUpdate: - updateStrategy: RollingUpdate - - ## Pod priority settings - ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ - ## - priorityClassName: "" - - ## Pod runtime class name - ## ref https://kubernetes.io/docs/concepts/containers/runtime-class/ - ## - runtimeClassName: "" - - ## Set default rootUser, rootPassword - ## AccessKey and secretKey is generated when not set - ## Distributed MinIO ref: https://min.io/docs/minio/linux/operations/install-deploy-manage/deploy-minio-multi-node-multi-drive.html - ## - #rootUser: "" - #rootPassword: "" - # - - ## Use existing Secret that store following variables: - ## - ## | Chart var | .data. in Secret | - ## |:----------------------|:-------------------------| - ## | rootUser | rootUser | - ## | rootPassword | rootPassword | - ## - ## All mentioned variables will be ignored in values file. - ## .data.rootUser and .data.rootPassword are mandatory, - ## others depend on enabled status of corresponding sections. - existingSecret: "minio-default-credentials" - - ## Directory on the MinIO pof - certsPath: "/etc/minio/certs/" - configPathmc: "/etc/minio/mc/" - - ## Path where PV would be mounted on the MinIO Pod - mountPath: "/export" - ## Override the root directory which the minio server should serve from. - ## If left empty, it defaults to the value of {{ .Values.mountPath }} - ## If defined, it must be a sub-directory of the path specified in {{ .Values.mountPath }} - ## - bucketRoot: "" - - # Number of drives attached to a node - drivesPerNode: 1 - # Number of MinIO containers running - replicas: 1 - # Number of expanded MinIO clusters - pools: 1 - - ## TLS Settings for MinIO - tls: - enabled: false - ## Create a secret with private.key and public.crt files and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret - certSecret: "" - publicCrt: public.crt - privateKey: private.key - - ## Trusted Certificates Settings for MinIO. Ref: https://min.io/docs/minio/linux/operations/network-encryption.html#third-party-certificate-authorities - ## Bundle multiple trusted certificates into one secret and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret - ## When using self-signed certificates, remember to include MinIO's own certificate in the bundle with key public.crt. - ## If certSecret is left empty and tls is enabled, this chart installs the public certificate from .Values.tls.certSecret. - trustedCertsSecret: "" - - ## Enable persistence using Persistent Volume Claims - ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - enabled: true - #annotations: {} - - ## A manually managed Persistent Volume and Claim - ## Requires persistence.enabled: true - ## If defined, PVC must be created manually before volume will be bound - #existingClaim: "" - - ## minio data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - ## Storage class of PV to bind. By default it looks for standard storage class. - ## If the PV uses a different storage class, specify that here. - storageClass: "longhorn" - #volumeName: "" - accessMode: ReadWriteOnce - size: 30Gi - - ## If subPath is set mount a sub folder of a volume instead of the root of the volume. - ## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs). - ## - subPath: "" - - ## Expose the MinIO service to be accessed from outside the cluster (LoadBalancer service). - ## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. - ## ref: http://kubernetes.io/docs/user-guide/services/ - ## - service: - type: LoadBalancer - clusterIP: ~ - port: "9000" - nodePort: 9000 - loadBalancerIP: ~ - externalIPs: [] - annotations: {} - - ## Configure Ingress based on the documentation here: https://kubernetes.io/docs/concepts/services-networking/ingress/ - ## - - ingress: - enabled: false - ingressClassName: ~ - labels: {} - # node-role.kubernetes.io/ingress: platform - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - # kubernetes.io/ingress.allow-http: "false" - # kubernetes.io/ingress.global-static-ip-name: "" - # nginx.ingress.kubernetes.io/secure-backends: "true" - # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" - # nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0 - path: / - hosts: - - minio-example.local - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - - consoleService: - type: LoadBalancer - clusterIP: ~ - port: "9001" - nodePort: 80 - loadBalancerIP: ~ - externalIPs: [] - annotations: {} - - consoleIngress: - enabled: false - ingressClassName: ~ - labels: {} - # node-role.kubernetes.io/ingress: platform - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - # kubernetes.io/ingress.allow-http: "false" - # kubernetes.io/ingress.global-static-ip-name: "" - # nginx.ingress.kubernetes.io/secure-backends: "true" - # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" - # nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0 - path: / - hosts: - - console.minio-example.local - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - - ## Node labels for pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - - ## Add stateful containers to have security context, if enabled MinIO will run as this - ## user and group NOTE: securityContext is only enabled if persistence.enabled=true - securityContext: - enabled: true - runAsUser: 1000 - runAsGroup: 1000 - fsGroup: 1000 - fsGroupChangePolicy: "OnRootMismatch" - - # Additational pod annotations - podAnnotations: {} - - # Additional pod labels - podLabels: {} - - ## Configure resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: - requests: - memory: 16Gi - - ## List of policies to be created after minio install - ## - ## In addition to default policies [readonly|readwrite|writeonly|consoleAdmin|diagnostics] - ## you can define additional policies with custom supported actions and resources - policies: [] - ## writeexamplepolicy policy grants creation or deletion of buckets with name - ## starting with example. In addition, grants objects write permissions on buckets starting with - ## example. - # - name: writeexamplepolicy - # statements: - # - effect: Allow # this is the default - # resources: - # - 'arn:aws:s3:::example*/*' - # actions: - # - "s3:AbortMultipartUpload" - # - "s3:GetObject" - # - "s3:DeleteObject" - # - "s3:PutObject" - # - "s3:ListMultipartUploadParts" - # - resources: - # - 'arn:aws:s3:::example*' - # actions: - # - "s3:CreateBucket" - # - "s3:DeleteBucket" - # - "s3:GetBucketLocation" - # - "s3:ListBucket" - # - "s3:ListBucketMultipartUploads" - ## readonlyexamplepolicy policy grants access to buckets with name starting with example. - ## In addition, grants objects read permissions on buckets starting with example. - # - name: readonlyexamplepolicy - # statements: - # - resources: - # - 'arn:aws:s3:::example*/*' - # actions: - # - "s3:GetObject" - # - resources: - # - 'arn:aws:s3:::example*' - # actions: - # - "s3:GetBucketLocation" - # - "s3:ListBucket" - # - "s3:ListBucketMultipartUploads" - ## conditionsexample policy creates all access to example bucket with aws:username="johndoe" and source ip range 10.0.0.0/8 and 192.168.0.0/24 only - # - name: conditionsexample - # statements: - # - resources: - # - 'arn:aws:s3:::example/*' - # actions: - # - 's3:*' - # conditions: - # - StringEquals: '"aws:username": "johndoe"' - # - IpAddress: | - # "aws:SourceIp": [ - # "10.0.0.0/8", - # "192.168.0.0/24" - # ] - # - ## Additional Annotations for the Kubernetes Job makePolicyJob - makePolicyJob: - securityContext: - enabled: false - runAsUser: 1000 - runAsGroup: 1000 - resources: - requests: - memory: 128Mi - # Command to run after the main command on exit - exitCommand: "" - - ## List of users to be created after minio install - ## - users: - ## Username, password and policy to be assigned to the user - ## Default policies are [readonly|readwrite|writeonly|consoleAdmin|diagnostics] - ## Add new policies as explained here https://min.io/docs/minio/kubernetes/upstream/administration/identity-access-management.html#access-management - ## NOTE: this will fail if LDAP is enabled in your MinIO deployment - ## make sure to disable this if you are using LDAP. - - accessKey: console - secretKey: console123 - policy: consoleAdmin - # Or you can refer to specific secret - #- accessKey: externalSecret - # existingSecret: my-secret - # existingSecretKey: password - # policy: readonly - - ## Additional Annotations for the Kubernetes Job makeUserJob - makeUserJob: - securityContext: - enabled: false - runAsUser: 1000 - runAsGroup: 1000 - resources: - requests: - memory: 128Mi - # Command to run after the main command on exit - exitCommand: "" - - ## List of service accounts to be created after minio install - ## - svcaccts: [] - ## accessKey, secretKey and parent user to be assigned to the service accounts - ## Add new service accounts as explained here https://min.io/docs/minio/kubernetes/upstream/administration/identity-access-management/minio-user-management.html#service-accounts - # - accessKey: console-svcacct - # secretKey: console123 - # user: console - ## Or you can refer to specific secret - # - accessKey: externalSecret - # existingSecret: my-secret - # existingSecretKey: password - # user: console - ## You also can pass custom policy - # - accessKey: console-svcacct - # secretKey: console123 - # user: console - # policy: - # statements: - # - resources: - # - 'arn:aws:s3:::example*/*' - # actions: - # - "s3:AbortMultipartUpload" - # - "s3:GetObject" - # - "s3:DeleteObject" - # - "s3:PutObject" - # - "s3:ListMultipartUploadParts" - - makeServiceAccountJob: - securityContext: - enabled: false - runAsUser: 1000 - runAsGroup: 1000 - resources: - requests: - memory: 128Mi - # Command to run after the main command on exit - exitCommand: "" - - ## List of buckets to be created after minio install - ## - buckets: [] - # # Name of the bucket - # - name: bucket1 - # # Policy to be set on the - # # bucket [none|download|upload|public] - # policy: none - # # Purge if bucket exists already - # purge: false - # # set versioning for - # # bucket [true|false] - # versioning: false - # # set objectlocking for - # # bucket [true|false] NOTE: versioning is enabled by default if you use locking - # objectlocking: false - # - name: bucket2 - # policy: none - # purge: false - # versioning: true - # # set objectlocking for - # # bucket [true|false] NOTE: versioning is enabled by default if you use locking - # objectlocking: false - - ## Additional Annotations for the Kubernetes Job makeBucketJob - makeBucketJob: - securityContext: - enabled: false - runAsUser: 1000 - runAsGroup: 1000 - resources: - requests: - memory: 128Mi - # Command to run after the main command on exit - exitCommand: "" - - ## List of command to run after minio install - ## NOTE: the mc command TARGET is always "myminio" - customCommands: - # - command: "admin policy attach myminio consoleAdmin --group='cn=ops,cn=groups,dc=example,dc=com'" - - ## Additional Annotations for the Kubernetes Job customCommandJob - customCommandJob: - securityContext: - enabled: false - runAsUser: 1000 - runAsGroup: 1000 - resources: - requests: - memory: 128Mi - # Command to run after the main command on exit - exitCommand: "" - - ## Merge jobs - postJob: - podAnnotations: {} - annotations: {} - securityContext: - enabled: false - runAsUser: 1000 - runAsGroup: 1000 - fsGroup: 1000 - nodeSelector: {} - tolerations: [] - affinity: {} - - ## Use this field to add environment variables relevant to MinIO server. These fields will be passed on to MinIO container(s) - ## when Chart is deployed - environment: - ## Please refer for comprehensive list https://min.io/docs/minio/linux/reference/minio-server/minio-server.html - ## MINIO_SUBNET_LICENSE: "License key obtained from https://subnet.min.io" - ## MINIO_BROWSER: "off" - - ## The name of a secret in the same kubernetes namespace which contain secret values - ## This can be useful for LDAP password, etc - ## The key in the secret must be 'config.env' - ## - extraSecret: ~ - - ## OpenID Identity Management - ## The following section documents environment variables for enabling external identity management using an OpenID Connect (OIDC)-compatible provider. - ## See https://min.io/docs/minio/linux/operations/external-iam/configure-openid-external-identity-management.html for a tutorial on using these variables. - oidc: - enabled: false - configUrl: "https://identity-provider-url/.well-known/openid-configuration" - clientId: "minio" - clientSecret: "" - # Provide existing client secret from the Kubernetes Secret resource, existing secret will have priority over `clientSecret` - existingClientSecretName: "" - existingClientSecretKey: "" - claimName: "policy" - scopes: "openid,profile,email" - redirectUri: "https://console-endpoint-url/oauth_callback" - # Can leave empty - claimPrefix: "" - comment: "" - - networkPolicy: - enabled: false - allowExternal: true - - ## PodDisruptionBudget settings - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - ## - podDisruptionBudget: - enabled: false - maxUnavailable: 1 - - ## Specify the service account to use for the MinIO pods. If 'create' is set to 'false' - ## and 'name' is left unspecified, the account 'default' will be used. - serviceAccount: - create: true - ## The name of the service account to use. If 'create' is 'true', a service account with that name - ## will be created. - name: "minio-sa" - - metrics: - serviceMonitor: - enabled: false - # scrape each node/pod individually for additional metrics - includeNode: false - public: true - additionalLabels: {} - annotations: {} - # for node metrics - relabelConfigs: {} - # for cluster metrics - relabelConfigsCluster: {} - # metricRelabelings: - # - regex: (server|pod) - # action: labeldrop - namespace: ~ - # Scrape interval, for example `interval: 30s` - interval: ~ - # Scrape timeout, for example `scrapeTimeout: 10s` - scrapeTimeout: ~ - - ## ETCD settings: https://github.com/minio/minio/blob/master/docs/sts/etcd.md - ## Define endpoints to enable this section. - etcd: - endpoints: [] - pathPrefix: "" - corednsPathPrefix: "" - clientCert: "" - clientCertKey: "" diff --git a/minio/sealed-secret.yaml b/minio/sealed-secret.yaml deleted file mode 100644 index 4869434..0000000 --- a/minio/sealed-secret.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: minio-default-credentials - namespace: minio-ns -spec: - encryptedData: - rootPassword: AgASkMrZq0TO6z/oeMyzGjDmSbJLBQCYW/7IQHdRS8M8vZkioEujShT/8IE6etxTOzGLwOkmpO8PyExTgMD3atyRRdiyBs2jaT0SIOyEZUA0PjiAgmYTWx9cAXBROOYzkT7u8IvMomEjiKx/EZG2XPhxgg0/Z9tBCVkstuEYyUfRokSco4icJ/JyHAz1Gg2F9w/KMiQJProcoAV5ajRdI4Bfb9e6E5GIW2Z0WKSH4fcCyM07nW+QnwlNGZNaAgLmSZygnUeF2PN/qD5aSj5YSjK5Va7KQRIlYszmzJcFg70yeustMIcE2nD2YVFFHb0CXKqEgnF9QrieBagorwoRvqU5XtXoXiBmzgvXtDQTJJ7ODT4XAB0oVF0QOdreBuVZ34D+Fb61O5HtFvSHRN3HsGXdvkKKgywJbjL+LaQBcEgztq0xjiGj/tjf3UDZOdOeHPZYJno9gdJX5eCTTjWVnaPxMyfwl3y4YmmHKVenCS6tsBkABk2/+lEthGUBRY9CyKl/ugwDQCJviX4tf7ZvMGGuPAxqIlZuM69jU53Zgp9Vq/8+UuTlksJzwQlH/VoyZsQl+/vSekyjDyPR3g3AunjoLsQDNnBwcghMzBFgeJzB/dSiyg0dQpiMUCcwe8i/20N/ER4pIC+ag1IyBAoKMQpWWJWyPU7IQ+JbYPdCI9Q1bMhQIpBNLkJsaiaRCvwrWaK07Ml9T4i+wMat8z8v0gIbnK+2JZ7FIeA66uuhxXhMi2Coqs5L0/vk - rootUser: AgDUG6LKdvzJorlYnxlW6cnJaqrhQkumFheLwZTD3aRf8ufFqQaGM/IPyNXwhKj4YAlr17gSR9kzIhYnkrKwVq9rtzo/arm2hF4cDWwQEZlrfmkqZfAec4p81KvyYgL19fuhDOeiZQfuCHl0MvDw+j6AzAk6Q6bbNdjWElaRzNLzjRAM892OCS2pubzRPLJl2+/9Ny/lZ2ePmZHHdin7ca73aCrcO1PryrhqQxZRMM0iaNKjUGsY8WMeoNnayhJ34KbsEMDTtPkWXlZb2FGtJDafw0A0fNn19PlU7wN7HeMK05SPgp4Sjs9LFrHNBanjF/rKqInCSg2lN57bUcJcVotpXEt6rmTEySo2QhnfFAXafX6hfl/HHT9GSrya+vFLKNXVf8hxVZMRjXmNIi0N3obvHOqGIJFDiy4iWEwOdrn/yetHs8ctS+DrO4pNY1cz/6SzaBayqaPqcxIAWhCKxXtNWb6sHBpTRsXpwUFq2Hoc9idB1uTGOpmpSWl8awUUsanXv4Kb2sZkXNc3iCCwx6TBDLQ1fukISj4n30RcTFDqa++3Nxq1n1immNerX30PjMWewxlUvAm5O9kwcIplfk8iW9ii3gRlth0Qs8FGhbfrghz5xs8CIgxEhnrCRphNeIow3JT1wxGU0r/QKoQu8zgEz+TsNdCXmB8bnauYyrW6ANhZaWx/wGoB29j7mHWfvLsTIwB2Q8HeV4agwKXoGSsp - template: - metadata: - creationTimestamp: null - name: minio-default-credentials - namespace: minio-ns diff --git a/navidrome/navidrome-data.yaml b/navidrome/navidrome-data.yaml deleted file mode 100644 index cc2b2a3..0000000 --- a/navidrome/navidrome-data.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: navidrome-pvc-data - namespace: navidrome-ns -spec: - accessModes: - - ReadWriteOnce - storageClassName: longhorn - resources: - requests: - storage: 16Gi diff --git a/navidrome/navidrome-deployment.yaml b/navidrome/navidrome-deployment.yaml deleted file mode 100644 index 1d4f289..0000000 --- a/navidrome/navidrome-deployment.yaml +++ /dev/null @@ -1,81 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: navidrome - namespace: navidrome-ns -spec: - selector: - matchLabels: - app: navidrome - template: - metadata: - labels: - app: navidrome - spec: - nodeSelector: - kubernetes.io/hostname: gluttony - securityContext: - fsGroup: 1000 - containers: - - name: navidrome - image: deluan/navidrome:latest - securityContext: - runAsUser: 1000 - runAsGroup: 1000 - ports: - - containerPort: 4533 - env: - - name: ND_BASEURL - value: "https://music.clortox.com" - - name: ND_CACHEFOLDER - value: "/cache" - - name: ND_MUSICFOLDER - value: "/music" - - name: ND_DATAFOLDER - value: "/data" - - - name: ND_SCANSCHEDULE - value: "1h" - - name: ND_LOGLEVEL - value: "info" - - name: ND_SESSIONTIMEOUT - value: "24h" - - - name: ND_ENABLESHARING - value: "true" - - name: ND_UILOGINBACKGROUNDURL - value: "https://general.api.clortox.com/images/background/today" - - name: ND_UIWELCOMEMESSAGE - value: "Lol. Lmao even" - - - name: ND_REVERSEPROXYUSERHEADER - value: "X-Authentik-Username" - - name: ND_REVERSEPROXYWHITELIST - value: "0.0.0.0/0" - #- name: ND_SPOTIFY_ID - # valueFrom: - # secretKeyRef: - # name: spotify-creds - # key: CLIENT_ID - #- name: ND_SPOTIFY_SECRET - # valueFrom: - # secretKeyRef: - # name: spotify-creds - # key: CLIENT_ID - volumeMounts: - - name: data - mountPath: "/data" - - name: music - mountPath: "/music" - readOnly: true - - name: cache - mountPath: "/cache" - volumes: - - name: data - persistentVolumeClaim: - claimName: navidrome-pvc-data - - name: music - persistentVolumeClaim: - claimName: navidrome-pvc-music - - name: cache - emptyDir: {} diff --git a/navidrome/navidrome-ingress.yaml b/navidrome/navidrome-ingress.yaml deleted file mode 100644 index 297a922..0000000 --- a/navidrome/navidrome-ingress.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: navidrome-ingress - namespace: navidrome-ns - annotations: - kubernetes.io/ingress.class: "nginx" - cert-manager.io/cluster-issuer: "letsencrypt-prod" -spec: - tls: - - hosts: - - music.clortox.com - secretName: music-clortox-com-tls - rules: - - host: music.clortox.com - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: navidrome-service - port: - number: 80 # Ensure this is the correct port your Navidrome service listens on - diff --git a/navidrome/navidrome-pv-music.yaml b/navidrome/navidrome-pv-music.yaml deleted file mode 100644 index 16a7294..0000000 --- a/navidrome/navidrome-pv-music.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: navidrome-pv-music - namespace: navidrome-ns -spec: - storageClassName: local-storage - capacity: - storage: 18000Gi - accessModes: - - ReadWriteMany - hostPath: - path: "/Main/Media" - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - gluttony diff --git a/navidrome/navidrome-pvc-music.yaml b/navidrome/navidrome-pvc-music.yaml deleted file mode 100644 index d6dc488..0000000 --- a/navidrome/navidrome-pvc-music.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: navidrome-pvc-music - namespace: navidrome-ns -spec: - volumeName: navidrome-pv-music - storageClassName: local-storage - accessModes: - - ReadWriteMany - resources: - requests: - storage: 18000Gi diff --git a/navidrome/navidrome-service.yaml b/navidrome/navidrome-service.yaml deleted file mode 100644 index 2f7f829..0000000 --- a/navidrome/navidrome-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: navidrome-services - namespace: navidrome-ns -spec: - type: LoadBalancer - ports: - - port: 80 - targetPort: 4533 - protocol: TCP - selector: - app: navidrome diff --git a/nginx/helmrelease-nginx-ingress.yaml b/nginx/helmrelease-nginx-ingress.yaml deleted file mode 100644 index 85c0081..0000000 --- a/nginx/helmrelease-nginx-ingress.yaml +++ /dev/null @@ -1,1180 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: nginx-ingress - namespace: ingress -spec: - interval: 1h - chart: - spec: - chart: nginx-ingress-controller - version: "11.3.0" - sourceRef: - kind: HelmRepository - name: bitnami - namespace: flux-system - values: - # Copyright Broadcom, Inc. All Rights Reserved. - # SPDX-License-Identifier: APACHE-2.0 - - ## @section Global parameters - ## Global Docker image parameters - ## Please, note that this will override the image parameters, including dependencies, configured to use the global value - ## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass - - ## @param global.imageRegistry Global Docker image registry - ## @param global.imagePullSecrets Global Docker registry secret names as an array - ## - global: - imageRegistry: "" - ## E.g. - ## imagePullSecrets: - ## - myRegistryKeySecretName - ## - imagePullSecrets: [] - ## Compatibility adaptations for Kubernetes platforms - ## - compatibility: - ## Compatibility adaptations for Openshift - ## - openshift: - ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) - ## - adaptSecurityContext: auto - ## @section Common parameters - - ## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) - ## - kubeVersion: "" - ## @param nameOverride String to partially override common.names.fullname - ## - nameOverride: "" - ## @param fullnameOverride String to fully override common.names.fullname - ## - fullnameOverride: "" - ## @param namespaceOverride String to fully override common.names.namespace - ## - namespaceOverride: "" - ## @param commonLabels Add labels to all the deployed resources - ## - commonLabels: {} - ## @param commonAnnotations Add annotations to all the deployed resources - ## - commonAnnotations: {} - ## @param extraDeploy Array of extra objects to deploy with the release - ## - extraDeploy: [] - ## @param clusterDomain Kubernetes cluster domain name - ## - clusterDomain: cluster.local - ## @section Nginx Ingress Controller parameters - - ## Bitnami NGINX Ingress controller image version - ## ref: https://hub.docker.com/r/bitnami/nginx-ingress-controller/tags/ - ## @param image.registry [default: REGISTRY_NAME] Nginx Ingress Controller image registry - ## @param image.repository [default: REPOSITORY_NAME/nginx-ingress-controller] Nginx Ingress Controller image repository - ## @skip image.tag Nginx Ingress Controller image tag (immutable tags are recommended) - ## @param image.digest Nginx Ingress Controller image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param image.pullPolicy Nginx Ingress Controller image pull policy - ## @param image.pullSecrets Specify docker-registry secret names as an array - ## - image: - registry: docker.io - repository: bitnami/nginx-ingress-controller - tag: 1.10.1-debian-12-r5 - digest: "" - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## @param containerPorts.http Nginx Ingress Controller HTTP port - ## @param containerPorts.https Nginx Ingress Controller HTTPS port - ## @param containerPorts.defaultServer Nginx Ingress Controller default server port - ## @param containerPorts.metrics Nginx Ingress Controller metrics port - ## @param containerPorts.profiler Nginx Ingress Controller profiler port - ## @param containerPorts.status Nginx Ingress Controller status port - ## @param containerPorts.stream Nginx Ingress Controller stream port - ## - containerPorts: - http: 8080 - https: 8443 - defaultServer: 8181 - metrics: 10254 - profiler: 10245 - status: 10246 - stream: 10247 - ## @param automountServiceAccountToken Mount Service Account token in pod - ## - automountServiceAccountToken: true - ## @param hostAliases Deployment pod host aliases - ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ - ## - hostAliases: [] - ## @param config Custom configuration options for NGINX - ## ref: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ - ## - config: {} - ## @param proxySetHeaders Custom headers before sending traffic to backends - ## ref: https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers - ## - proxySetHeaders: {} - ## @param addHeaders Custom headers before sending response traffic to the client - ## ref: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers - ## - addHeaders: {} - ## @param defaultBackendService Default 404 backend service; required only if `defaultBackend.enabled = false` - ## Must be / - ## - defaultBackendService: "" - ## @param electionID Election ID to use for status update - ## - electionID: ingress-controller-leader - ## @param allowSnippetAnnotations Allow users to set snippet annotations - ## - allowSnippetAnnotations: false - ## @param reportNodeInternalIp If using `hostNetwork=true`, setting `reportNodeInternalIp=true`, will pass the flag `report-node-internal-ip-address` to Nginx Ingress Controller - ## Bare-metal considerations via the host network - ## ref: https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network - ## - reportNodeInternalIp: false - ## @param watchIngressWithoutClass Process Ingress objects without ingressClass annotation/ingressClassName field - ## - watchIngressWithoutClass: false - ## Configuring this doesn't affect `kubernetes.io/ingress.class` annotation. See `extraArgs` below how to configure processing of custom annotation. - ## @param ingressClassResource.name Name of the IngressClass resource - ## @param ingressClassResource.enabled Create the IngressClass resource - ## @param ingressClassResource.default Set the created IngressClass resource as default class - ## @param ingressClassResource.controllerClass IngressClass identifier for the controller - ## @param ingressClassResource.parameters Optional parameters for the controller - ## - ingressClassResource: - name: nginx - enabled: true - default: false - controllerClass: "k8s.io/ingress-nginx" - parameters: {} - ## Allows customization of the external service - ## the ingress will be bound to via DNS - ## - publishService: - ## @param publishService.enabled Set the endpoint records on the Ingress objects to reflect those on the service - ## - enabled: false - ## @param publishService.pathOverride Allows overriding of the publish service to bind to - ## Must be / - ## - pathOverride: "" - ## @param scope.enabled Limit the scope of the controller. - ## @param scope.namespace Scope namespace. Defaults to `.Release.Namespace` - ## - scope: - enabled: false - namespace: "" - ## @param configMapNamespace Allows customization of the configmap / nginx-configmap namespace - ## Defaults to .Release.Namespace - ## - configMapNamespace: "" - ## @param tcpConfigMapNamespace Allows customization of the tcp-services-configmap namespace - ## Defaults to .Release.Namespace - ## - tcpConfigMapNamespace: "" - ## @param udpConfigMapNamespace Allows customization of the udp-services-configmap namespace - ## Defaults to .Release.Namespace - ## - udpConfigMapNamespace: "" - ## @param maxmindLicenseKey License key used to download Geolite2 database - ## - maxmindLicenseKey: "" - ## @param dhParam A base64ed Diffie-Hellman parameter - ## This can be generated with: openssl dhparam 4096 2> / - ## Ref: https://github.com/krmichel/ingress-nginx/blob/master/docs/examples/customization/ssl-dh-param - dhParam: "" - ## @param tcp TCP service key:value pairs - ## ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp - ## e.g: - ## tcp: - ## 8080: "default/example-tcp-svc:9000" - ## - tcp: {} - ## @param udp UDP service key:value pairs - ## ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp - ## e.g: - ## udp: - ## 53: "kube-system/kube-dns:53" - ## - udp: {} - ## @param command Override default container command (useful when using custom images) - ## - command: [] - ## @param args Override default container args (useful when using custom images) - ## - args: [] - ## @param lifecycleHooks for the %%MAIN_CONTAINER_NAME%% container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - ## @param extraArgs Additional command line arguments to pass to nginx-ingress-controller - ## E.g. to specify the default SSL certificate you can use - ## extraArgs: - ## default-ssl-certificate: "/" - ## ingress-class: nginx - ## - extraArgs: {} - ## @param extraEnvVars Extra environment variables to be set on Nginx Ingress container - ## E.g: - ## extraEnvs: - ## - name: FOO - ## valueFrom: - ## secretKeyRef: - ## key: FOO - ## name: secret-resource - ## - extraEnvVars: [] - ## @param extraEnvVarsCM Name of a existing ConfigMap containing extra environment variables - ## - extraEnvVarsCM: "" - ## @param extraEnvVarsSecret Name of a existing Secret containing extra environment variables - ## - extraEnvVarsSecret: "" - ## @section Nginx Ingress deployment / daemonset parameters - - ## @param kind Install as Deployment or DaemonSet - ## - kind: Deployment - ## Daemonset configuration - ## - daemonset: - ## @param daemonset.useHostPort If `kind` is `DaemonSet`, this will enable `hostPort` for `TCP/80` and `TCP/443` - ## - useHostPort: false - ## @param daemonset.hostPorts [object] HTTP and HTTPS ports - ## - hostPorts: - http: 80 - https: 443 - ## @param replicaCount Desired number of Controller pods - ## - replicaCount: 1 - ## @param updateStrategy Strategy to use to update Pods - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: {} - ## @param revisionHistoryLimit The number of old history to retain to allow rollback - ## - revisionHistoryLimit: 10 - ## Controller pods' Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param podSecurityContext.enabled Enable Controller pods' Security Context - ## @param podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param podSecurityContext.fsGroup Group ID for the container filesystem - ## - podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## Controller containers' Security Context (only main container) - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param containerSecurityContext.enabled Enable Controller containers' Security Context - ## @param containerSecurityContext.allowPrivilegeEscalation Switch to allow priviledge escalation on the Controller container - ## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param containerSecurityContext.runAsUser User ID for the Controller container - ## @param containerSecurityContext.runAsGroup Group ID for the Controller container - ## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem - ## @param containerSecurityContext.capabilities.drop [array] Linux Kernel capabilities that should be dropped - ## @param containerSecurityContext.capabilities.add [array] Linux Kernel capabilities that should be added - ## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot - ## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: true - allowPrivilegeEscalation: false - seLinuxOptions: null - runAsUser: 1001 - runAsGroup: 1001 - readOnlyRootFilesystem: true - capabilities: - drop: ["ALL"] - add: ["NET_BIND_SERVICE"] - runAsNonRoot: true - seccompProfile: - type: "RuntimeDefault" - ## @param minReadySeconds How many seconds a pod needs to be ready before killing the next, during update - ## - minReadySeconds: 0 - ## Controller containers' resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## We usually recommend not to specify default resources and to leave this as a conscious - ## choice for the user. This also increases chances charts run on environments with little - ## resources, such as Minikube. If you do want to specify resources, uncomment the following - ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. - ## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "nano" - ## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Controller containers' liveness probe. Evaluated as a template. - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes - ## @param livenessProbe.enabled Enable livenessProbe - ## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - ## Controller containers' readiness probe. Evaluated as a template. - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes - ## @param readinessProbe.enabled Enable readinessProbe - ## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - ## Controller containers' startup probe. Evaluated as a template. - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes - ## @param startupProbe.enabled Enable startupProbe - ## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param startupProbe.periodSeconds Period seconds for startupProbe - ## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param startupProbe.failureThreshold Failure threshold for startupProbe - ## @param startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - ## @param customLivenessProbe Override default liveness probe - ## - customLivenessProbe: {} - ## @param customReadinessProbe Override default readiness probe - ## - customReadinessProbe: {} - ## @param customStartupProbe Custom liveness probe for the Web component - ## - customStartupProbe: {} - ## @param lifecycle LifecycleHooks to set additional configuration at startup - ## - lifecycle: {} - ## @param podLabels Extra labels for Controller pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param podAnnotations Annotations for Controller pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param priorityClassName Controller priorityClassName - ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass - ## - priorityClassName: "" - ## @param schedulerName Name of the k8s scheduler (other than default) - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param hostNetwork If the Nginx deployment / daemonset should run on the host's network namespace - ## Required on CNI based K8s installations, since CNI and hostport don't mix yet - ## Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 is merged - ## - hostNetwork: false - ## @param dnsPolicy By default, while using host network, name resolution uses the host's DNS - ## Optionally, change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true' if you wish nginx-controller - ## to keep resolving names inside the Kubernetes network - ## - dnsPolicy: ClusterFirst - ## @param dnsConfig is an object with optional parameters to pass to the DNS resolver - ## The dnsConfig field is optional and it can work with any dnsPolicy settings. - ## However, when a Pod's dnsPolicy is set to "None", the dnsConfig field has to be specified. - ## - dnsConfig: {} - ## @param terminationGracePeriodSeconds How many seconds to wait before terminating a pod - ## - terminationGracePeriodSeconds: 60 - ## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set. - ## E.g. - ## key: "kubernetes.io/e2e-az-name" - ## - key: "" - ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param affinity Affinity for pod assignment. Evaluated as a template. - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set - ## - affinity: {} - ## @param nodeSelector Node labels for pod assignment. Evaluated as a template. - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ - ## - nodeSelector: {} - ## @param tolerations Tolerations for pod assignment. Evaluated as a template. - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param extraVolumes Optionally specify extra list of additional volumes for Controller pods - ## - extraVolumes: [] - ## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for Controller container(s) - ## - extraVolumeMounts: [] - ## @param initContainers Add init containers to the controller pods - ## Example: - ## initContainers: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - initContainers: [] - ## @param sidecars Add sidecars to the controller pods. - ## Example: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param customTemplate [object] Override NGINX template - ## - customTemplate: - configMapName: "" - configMapKey: "" - ## @param topologySpreadConstraints Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## - ## topologySpreadConstraints: - ## - maxSkew: 1 - ## topologyKey: failure-domain.beta.kubernetes.io/zone - ## whenUnsatisfiable: DoNotSchedule - ## labelSelector: - ## matchLabels: - ## app.kubernetes.io/instance: ingress-nginx-internal - ## - topologySpreadConstraints: [] - ## @param podSecurityPolicy.enabled Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later - ## https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - enabled: false - ## @section Default backend parameters - - ## Default 404 backend - ## - defaultBackend: - ## @param defaultBackend.enabled Enable a default backend based on NGINX - ## - enabled: true - ## @param defaultBackend.automountServiceAccountToken Mount Service Account token in pod - ## - automountServiceAccountToken: true - ## @param defaultBackend.hostAliases Add deployment host aliases - ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ - ## - hostAliases: [] - ## Bitnami NGINX image - ## ref: https://hub.docker.com/r/bitnami/nginx/tags/ - ## @param defaultBackend.image.registry [default: REGISTRY_NAME] Default backend image registry - ## @param defaultBackend.image.repository [default: REPOSITORY_NAME/nginx] Default backend image repository - ## @skip defaultBackend.image.tag Default backend image tag (immutable tags are recommended) - ## @param defaultBackend.image.digest Default backend image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param defaultBackend.image.pullPolicy Image pull policy - ## @param defaultBackend.image.pullSecrets Specify docker-registry secret names as an array - ## - image: - registry: docker.io - repository: bitnami/nginx - tag: 1.26.0-debian-12-r1 - digest: "" - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## @param defaultBackend.extraArgs Additional command line arguments to pass to Nginx container - ## - extraArgs: {} - ## @param defaultBackend.containerPort HTTP container port number - ## - containerPort: 8080 - ## @param defaultBackend.serverBlockConfig [string] NGINX backend default server block configuration - ## Should be compliant with: https://kubernetes.github.io/ingress-nginx/user-guide/default-backend/ - ## - serverBlockConfig: |- - location /healthz { - return 200; - } - - location / { - return 404; - } - ## @param defaultBackend.replicaCount Desired number of default backend pods - ## - replicaCount: 1 - ## Default backend pods' Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param defaultBackend.podSecurityContext.enabled Enable Default backend pods' Security Context - ## @param defaultBackend.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param defaultBackend.podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param defaultBackend.podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param defaultBackend.podSecurityContext.fsGroup Group ID for the container filesystem - ## - podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## Default backend containers' Security Context (only main container) - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param defaultBackend.containerSecurityContext.enabled Enable Default backend containers' Security Context - ## @param defaultBackend.containerSecurityContext.capabilities.drop [array] Linux Kernel capabilities that should be dropped - ## @param defaultBackend.containerSecurityContext.allowPrivilegeEscalation Switch to allow priviledge escalation on the container - ## @param defaultBackend.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param defaultBackend.containerSecurityContext.runAsUser User ID for the Default backend container - ## @param defaultBackend.containerSecurityContext.runAsGroup Group ID for the Default backend container - ## @param defaultBackend.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem - ## @param defaultBackend.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot - ## @param defaultBackend.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: true - allowPrivilegeEscalation: false - seLinuxOptions: null - runAsUser: 1001 - runAsGroup: 1001 - readOnlyRootFilesystem: true - capabilities: - drop: ["ALL"] - runAsNonRoot: true - seccompProfile: - type: "RuntimeDefault" - ## Default backend containers' resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## We usually recommend not to specify default resources and to leave this as a conscious - ## choice for the user. This also increases chances charts run on environments with little - ## resources, such as Minikube. If you do want to specify resources, uncomment the following - ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. - ## @param defaultBackend.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if defaultBackend.resources is set (defaultBackend.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "nano" - ## @param defaultBackend.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Default backend containers' liveness probe. Evaluated as a template. - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes - ## @param defaultBackend.livenessProbe.enabled Enable livenessProbe - ## @param defaultBackend.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param defaultBackend.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param defaultBackend.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param defaultBackend.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param defaultBackend.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - failureThreshold: 3 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - ## Default backend containers' readiness probe. Evaluated as a template. - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes - ## @param defaultBackend.readinessProbe.enabled Enable readinessProbe - ## @param defaultBackend.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param defaultBackend.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param defaultBackend.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param defaultBackend.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param defaultBackend.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - failureThreshold: 6 - initialDelaySeconds: 0 - periodSeconds: 5 - successThreshold: 1 - timeoutSeconds: 5 - ## Default backend containers' startup probe. Evaluated as a template. - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes - ## @param defaultBackend.startupProbe.enabled Enable startupProbe - ## @param defaultBackend.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param defaultBackend.startupProbe.periodSeconds Period seconds for startupProbe - ## @param defaultBackend.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param defaultBackend.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param defaultBackend.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - failureThreshold: 6 - initialDelaySeconds: 0 - periodSeconds: 5 - successThreshold: 1 - timeoutSeconds: 5 - ## @param defaultBackend.customStartupProbe Custom liveness probe for the Web component - ## - customStartupProbe: {} - ## @param defaultBackend.customLivenessProbe Custom liveness probe for the Web component - ## - customLivenessProbe: {} - ## @param defaultBackend.customReadinessProbe Custom readiness probe for the Web component - ## - customReadinessProbe: {} - ## @param defaultBackend.podLabels Extra labels for Controller pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param defaultBackend.podAnnotations Annotations for Controller pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param defaultBackend.priorityClassName priorityClassName - ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass - ## - priorityClassName: "" - ## @param defaultBackend.schedulerName Name of the k8s scheduler (other than default) - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param defaultBackend.terminationGracePeriodSeconds In seconds, time the given to the pod to terminate gracefully - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods - ## - terminationGracePeriodSeconds: 60 - ## @param defaultBackend.topologySpreadConstraints Topology Spread Constraints for pod assignment - ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## The value is evaluated as a template - ## - topologySpreadConstraints: [] - ## @param defaultBackend.podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param defaultBackend.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param defaultBackend.nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param defaultBackend.nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set. - ## E.g. - ## key: "kubernetes.io/e2e-az-name" - ## - key: "" - ## @param defaultBackend.nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param defaultBackend.command Override default container command (useful when using custom images) - ## - command: [] - ## @param defaultBackend.args Override default container args (useful when using custom images) - ## - args: [] - ## @param defaultBackend.lifecycleHooks for the %%MAIN_CONTAINER_NAME%% container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - ## @param defaultBackend.extraEnvVars Array with extra environment variables to add to %%MAIN_CONTAINER_NAME%% nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param defaultBackend.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for %%MAIN_CONTAINER_NAME%% nodes - ## - extraEnvVarsCM: "" - ## @param defaultBackend.extraEnvVarsSecret Name of existing Secret containing extra env vars for %%MAIN_CONTAINER_NAME%% nodes - ## - extraEnvVarsSecret: "" - ## @param defaultBackend.extraVolumes Optionally specify extra list of additional volumes for the %%MAIN_CONTAINER_NAME%% pod(s) - ## - extraVolumes: [] - ## @param defaultBackend.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the %%MAIN_CONTAINER_NAME%% container(s) - ## - extraVolumeMounts: [] - ## @param defaultBackend.sidecars Add additional sidecar containers to the %%MAIN_CONTAINER_NAME%% pod(s) - ## e.g: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param defaultBackend.initContainers Add additional init containers to the %%MAIN_CONTAINER_NAME%% pod(s) - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - ## e.g: - ## initContainers: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## command: ['sh', '-c', 'echo "hello world"'] - ## - initContainers: [] - ## @param defaultBackend.affinity Affinity for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## Note: defaultBackend.podAffinityPreset, defaultBackend.podAntiAffinityPreset, and defaultBackend.nodeAffinityPreset will be ignored when it's set - ## - affinity: {} - ## @param defaultBackend.nodeSelector Node labels for pod assignment - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ - ## - nodeSelector: {} - ## @param defaultBackend.tolerations Tolerations for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## Default backend Service parameters - ## - service: - ## @param defaultBackend.service.type Kubernetes Service type for default backend - ## - type: ClusterIP - ## @param defaultBackend.service.ports.http Default backend service HTTP port - ## - ports: - http: 80 - ## @param defaultBackend.service.annotations Annotations for the default backend service - ## - annotations: {} - ## Network Policies - ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - ## - networkPolicy: - ## @param defaultBackend.networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: true - ## @param defaultBackend.networkPolicy.allowExternal Don't require server label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## server label will have network access to the ports server is listening - ## on. When true, server will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param defaultBackend.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. - ## - allowExternalEgress: true - ## @param defaultBackend.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - extraIngress: [] - ## @param defaultBackend.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param defaultBackend.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces - ## @param defaultBackend.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} - ## Default backend Pod Disruption Budget configuration - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ - ## - pdb: - ## @param defaultBackend.pdb.create Enable/disable a Pod Disruption Budget creation for Default backend - ## - create: true - ## @param defaultBackend.pdb.minAvailable Minimum number/percentage of Default backend pods that should remain scheduled - ## - minAvailable: "" - ## @param defaultBackend.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `defaultBackend.pdb.minAvailable` and `defaultBackend.pdb.maxUnavailable` are empty. - ## - maxUnavailable: "" - ## @section Traffic exposure parameters - - ## Service parameters - ## - service: - ## @param service.type Kubernetes Service type for Controller - ## - type: LoadBalancer - ## @param service.ports [object] Service ports - ## - ports: - http: 80 - https: 443 - ## @param service.targetPorts [object] Map the controller service HTTP/HTTPS port - ## - targetPorts: - http: http - https: https - ## @param service.nodePorts [object] Specify the nodePort value(s) for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - nodePorts: - http: "" - https: "" - tcp: {} - udp: {} - ## @param service.annotations Annotations for controller service - ## This can be used to set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - annotations: {} - ## @param service.labels Labels for controller service - ## - labels: {} - ## @param service.clusterIP Controller Internal Cluster Service IP (optional) - ## - clusterIP: "" - ## @param service.externalIPs Controller Service external IP addresses - ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips - ## - externalIPs: [] - ## @param service.ipFamilyPolicy Controller Service ipFamilyPolicy (optional, cloud specific) - ## This can be either SingleStack, PreferDualStack or RequireDualStack - ## ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services - ## - ipFamilyPolicy: "" - ## @param service.ipFamilies Controller Service ipFamilies (optional, cloud specific) - ## This can be either ["IPv4"], ["IPv6"], ["IPv4", "IPv6"] or ["IPv6", "IPv4"] - ## ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services - ## - ipFamilies: [] - ## @param service.loadBalancerIP Kubernetes LoadBalancerIP to request for Controller (optional, cloud specific) - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer - ## - loadBalancerIP: "" - ## @param service.loadBalancerSourceRanges List of IP CIDRs allowed access to load balancer (if supported) - ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## - loadBalancerSourceRanges: [] - ## @param service.extraPorts Extra ports to expose (normally used with the `sidecar` value) - ## - extraPorts: [] - ## @param service.externalTrafficPolicy Set external traffic policy to: "Local" to preserve source IP on providers supporting it - ## Enable client source IP preservation - ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer - ## - externalTrafficPolicy: "" - ## @param service.healthCheckNodePort Set this to the managed health-check port the kube-proxy will expose. If blank, a random port in the `NodePort` range will be assigned - ## - healthCheckNodePort: 0 - ## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - ## Network Policies - ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - ## - networkPolicy: - ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: true - ## @param networkPolicy.allowExternal Don't require server label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## server label will have network access to the ports server is listening - ## on. When true, server will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. - ## - allowExternalEgress: true - ## @param networkPolicy.kubeAPIServerPorts [array] List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security) - ## - kubeAPIServerPorts: [443, 6443, 8443] - ## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - extraIngress: [] - ## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces - ## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} - ## @section RBAC parameters - - ## Pods Service Account - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - ## @param serviceAccount.create Enable the creation of a ServiceAccount for Controller pods - ## - create: true - ## @param serviceAccount.name Name of the created ServiceAccount - ## If not set and create is true, a name is generated using the metrics-server.fullname template - name: "" - ## @param serviceAccount.annotations Annotations for service account. - ## Only used if `create` is `true`. - ## - annotations: {} - ## @param serviceAccount.automountServiceAccountToken Automount service account token for the server service account - ## - automountServiceAccountToken: false - ## Role Based Access - ## Ref: https://kubernetes.io/docs/admin/authorization/rbac/ - ## - rbac: - ## @param rbac.create Specifies whether RBAC rules should be created - ## - create: true - ## @param rbac.rules Custom RBAC rules - ## Example: - ## rules: - ## - apiGroups: - ## - "" - ## resources: - ## - pods - ## verbs: - ## - get - ## - list - ## - rules: [] - ## @section Other parameters - - ## Controller Pod Disruption Budget configuration - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ - ## - pdb: - ## @param pdb.create Enable/disable a Pod Disruption Budget creation for Controller - ## - create: true - ## @param pdb.minAvailable Minimum number/percentage of Controller pods that should remain scheduled - ## - minAvailable: "" - ## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `pdb.minAvailable` and `pdb.maxUnavailable` are empty. - ## - maxUnavailable: "" - ## Controller Autoscaling configuration - ## @param autoscaling.enabled Enable autoscaling for Controller - ## @param autoscaling.minReplicas Minimum number of Controller replicas - ## @param autoscaling.maxReplicas Maximum number of Controller replicas - ## @param autoscaling.targetCPU Target CPU utilization percentage - ## @param autoscaling.targetMemory Target Memory utilization percentage - ## - autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 11 - targetCPU: "" - targetMemory: "" - ## @section Metrics parameters - - ## Prometheus exporter parameters - ## - metrics: - ## @param metrics.enabled Enable exposing Controller statistics - ## - enabled: false - ## Prometheus exporter service parameters - ## - service: - ## @param metrics.service.type Type of Prometheus metrics service to create - ## - type: ClusterIP - ## @param metrics.service.ports.metrics Service HTTP management port - ## - ports: - metrics: 9913 - ## @param metrics.service.annotations [object] Annotations for the Prometheus exporter service - ## - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "{{ coalesce .Values.metrics.service.ports.metrics .Values.metrics.service.port }}" - ## @param metrics.service.labels Labels for the Prometheus exporter service - ## - labels: {} - ## Prometheus Operator ServiceMonitor configuration - ## - serviceMonitor: - ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource for scraping metrics using PrometheusOperator - ## - enabled: false - ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running - ## - namespace: "" - ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. - ## - jobLabel: "" - ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - interval: 30s - ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## e.g: - ## scrapeTimeout: 10s - ## - scrapeTimeout: "" - ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig - ## - relabelings: [] - ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - ## @param metrics.serviceMonitor.selector ServiceMonitor selector labels - ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration - ## e.g: - ## selector: - ## prometheus: my-prometheus - ## - selector: {} - ## @param metrics.serviceMonitor.annotations Extra annotations for the ServiceMonitor - ## - annotations: {} - ## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor - ## - labels: {} - ## @param metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels - ## - honorLabels: false - ## @param metrics.prometheusRule.enabled Create PrometheusRules resource for scraping metrics using PrometheusOperator - ## @param metrics.prometheusRule.additionalLabels Used to pass Labels that are required by the Installed Prometheus Operator - ## @param metrics.prometheusRule.namespace Namespace which Prometheus is running in - ## @param metrics.prometheusRule.rules Rules to be prometheus in YAML format, check values for an example - ## - prometheusRule: - enabled: false - additionalLabels: {} - namespace: "" - rules: [] diff --git a/nvidia/helmrelease-nvidia-operator.yaml b/nvidia/helmrelease-nvidia-operator.yaml deleted file mode 100644 index 424eace..0000000 --- a/nvidia/helmrelease-nvidia-operator.yaml +++ /dev/null @@ -1,556 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: gpu-operator - namespace: nvidia-system -spec: - chart: - spec: - chart: gpu-operator - sourceRef: - kind: HelmRepository - name: nvidia-operator - namespace: flux-system - interval: 15m0s - timeout: 5m - releaseName: gpu-operator - values: - # Default values for gpu-operator. - # This is a YAML-formatted file. - # Declare variables to be passed into your templates. - - platform: - openshift: false - - nfd: - enabled: true - nodefeaturerules: false - - psa: - enabled: false - - cdi: - enabled: false - default: false - - sandboxWorkloads: - enabled: false - defaultWorkload: "container" - - daemonsets: - labels: {} - annotations: {} - priorityClassName: system-node-critical - tolerations: - - key: nvidia.com/gpu - operator: Exists - effect: NoSchedule - # configuration for controlling update strategy("OnDelete" or "RollingUpdate") of GPU Operands - # note that driver Daemonset is always set with OnDelete to avoid unintended disruptions - updateStrategy: "RollingUpdate" - # configuration for controlling rolling update of GPU Operands - rollingUpdate: - # maximum number of nodes to simultaneously apply pod updates on. - # can be specified either as number or percentage of nodes. Default 1. - maxUnavailable: "1" - - validator: - repository: nvcr.io/nvidia/cloud-native - image: gpu-operator-validator - # If version is not specified, then default is to use chart.AppVersion - #version: "" - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: [] - args: [] - resources: {} - plugin: - env: - - name: WITH_WORKLOAD - value: "false" - - operator: - repository: nvcr.io/nvidia - image: gpu-operator - # If version is not specified, then default is to use chart.AppVersion - #version: "" - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - priorityClassName: system-node-critical - defaultRuntime: docker - runtimeClass: nvidia - use_ocp_driver_toolkit: false - # cleanup CRD on chart un-install - cleanupCRD: false - # upgrade CRD on chart upgrade, requires --disable-openapi-validation flag - # to be passed during helm upgrade. - upgradeCRD: false - initContainer: - image: cuda - repository: nvcr.io/nvidia - version: 12.3.2-base-ubi8 - imagePullPolicy: IfNotPresent - tolerations: - - key: "node-role.kubernetes.io/master" - operator: "Equal" - value: "" - effect: "NoSchedule" - - key: "node-role.kubernetes.io/control-plane" - operator: "Equal" - value: "" - effect: "NoSchedule" - annotations: - openshift.io/scc: restricted-readonly - affinity: - nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - preference: - matchExpressions: - - key: "node-role.kubernetes.io/master" - operator: In - values: [""] - - weight: 1 - preference: - matchExpressions: - - key: "node-role.kubernetes.io/control-plane" - operator: In - values: [""] - logging: - # Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano') - timeEncoding: epoch - # Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity - level: info - # Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn) - # Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) - develMode: false - resources: - limits: - cpu: 500m - memory: 350Mi - requests: - cpu: 200m - memory: 100Mi - - mig: - strategy: single - - driver: - enabled: true - nvidiaDriverCRD: - enabled: false - deployDefaultCR: true - driverType: gpu - nodeSelector: {} - useOpenKernelModules: false - # use pre-compiled packages for NVIDIA driver installation. - # only supported for as a tech-preview feature on ubuntu22.04 kernels. - usePrecompiled: false - repository: nvcr.io/nvidia - image: driver - version: "550.54.15" - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - startupProbe: - initialDelaySeconds: 60 - periodSeconds: 10 - # nvidia-smi can take longer than 30s in some cases - # ensure enough timeout is set - timeoutSeconds: 60 - failureThreshold: 120 - rdma: - enabled: false - useHostMofed: false - upgradePolicy: - # global switch for automatic upgrade feature - # if set to false all other options are ignored - autoUpgrade: true - # how many nodes can be upgraded in parallel - # 0 means no limit, all nodes will be upgraded in parallel - maxParallelUpgrades: 1 - # maximum number of nodes with the driver installed, that can be unavailable during - # the upgrade. Value can be an absolute number (ex: 5) or - # a percentage of total nodes at the start of upgrade (ex: - # 10%). Absolute number is calculated from percentage by rounding - # up. By default, a fixed value of 25% is used.' - maxUnavailable: 25% - # options for waiting on pod(job) completions - waitForCompletion: - timeoutSeconds: 0 - podSelector: "" - # options for gpu pod deletion - gpuPodDeletion: - force: false - timeoutSeconds: 300 - deleteEmptyDir: false - # options for node drain (`kubectl drain`) before the driver reload - # this is required only if default GPU pod deletions done by the operator - # are not sufficient to re-install the driver - drain: - enable: false - force: false - podSelector: "" - # It's recommended to set a timeout to avoid infinite drain in case non-fatal error keeps happening on retries - timeoutSeconds: 300 - deleteEmptyDir: false - manager: - image: k8s-driver-manager - repository: nvcr.io/nvidia/cloud-native - # When choosing a different version of k8s-driver-manager, DO NOT downgrade to a version lower than v0.6.4 - # to ensure k8s-driver-manager stays compatible with gpu-operator starting from v24.3.0 - version: v0.6.7 - imagePullPolicy: IfNotPresent - env: - - name: ENABLE_GPU_POD_EVICTION - value: "true" - - name: ENABLE_AUTO_DRAIN - value: "false" - - name: DRAIN_USE_FORCE - value: "false" - - name: DRAIN_POD_SELECTOR_LABEL - value: "" - - name: DRAIN_TIMEOUT_SECONDS - value: "0s" - - name: DRAIN_DELETE_EMPTYDIR_DATA - value: "false" - env: [] - resources: {} - # Private mirror repository configuration - repoConfig: - configMapName: "" - # custom ssl key/certificate configuration - certConfig: - name: "" - # vGPU licensing configuration - licensingConfig: - configMapName: "" - nlsEnabled: true - # vGPU topology daemon configuration - virtualTopology: - config: "" - # kernel module configuration for NVIDIA driver - kernelModuleConfig: - name: "" - - toolkit: - enabled: true - repository: nvcr.io/nvidia/k8s - image: container-toolkit - version: v1.15.0-rc.4-ubuntu20.04 - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: - - name: CONTAINERD_CONFIG - value: /var/lib/rancher/k3s/agent/etc/containerd/config.toml - - name: CONTAINERD_SOCKET - value: /run/k3s/containerd/containerd.sock - resources: {} - installDir: "/usr/local/nvidia" - - devicePlugin: - enabled: true - repository: nvcr.io/nvidia - image: k8s-device-plugin - version: v0.15.0-rc.2-ubi8 - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - args: [] - env: - - name: PASS_DEVICE_SPECS - value: "true" - - name: FAIL_ON_INIT_ERROR - value: "true" - - name: DEVICE_LIST_STRATEGY - value: envvar - - name: DEVICE_ID_STRATEGY - value: uuid - - name: NVIDIA_VISIBLE_DEVICES - value: all - - name: NVIDIA_DRIVER_CAPABILITIES - value: all - resources: {} - # Plugin configuration - # Use "name" to either point to an existing ConfigMap or to create a new one with a list of configurations(i.e with create=true). - # Use "data" to build an integrated ConfigMap from a set of configurations as - # part of this helm chart. An example of setting "data" might be: - # config: - # name: device-plugin-config - # create: true - # data: - # default: |- - # version: v1 - # flags: - # migStrategy: none - # mig-single: |- - # version: v1 - # flags: - # migStrategy: single - # mig-mixed: |- - # version: v1 - # flags: - # migStrategy: mixed - config: - # Create a ConfigMap (default: false) - create: false - # ConfigMap name (either exiting or to create a new one with create=true above) - name: "" - # Default config name within the ConfigMap - default: "" - # Data section for the ConfigMap to create (i.e only applies when create=true) - data: {} - # MPS related configuration for the plugin - mps: - # MPS root path on the host - root: "/run/nvidia/mps" - - # standalone dcgm hostengine - dcgm: - # disabled by default to use embedded nv-hostengine by exporter - enabled: false - repository: nvcr.io/nvidia/cloud-native - image: dcgm - version: 3.3.3-1-ubuntu22.04 - imagePullPolicy: IfNotPresent - hostPort: 5555 - args: [] - env: [] - resources: {} - - dcgmExporter: - enabled: true - repository: nvcr.io/nvidia/k8s - image: dcgm-exporter - version: 3.3.5-3.4.0-ubuntu22.04 - imagePullPolicy: IfNotPresent - env: - - name: DCGM_EXPORTER_LISTEN - value: ":9400" - - name: DCGM_EXPORTER_KUBERNETES - value: "true" - - name: DCGM_EXPORTER_COLLECTORS - value: "/etc/dcgm-exporter/dcp-metrics-included.csv" - resources: {} - serviceMonitor: - enabled: false - interval: 15s - honorLabels: false - additionalLabels: {} - relabelings: [] - # - source_labels: - # - __meta_kubernetes_pod_node_name - # regex: (.*) - # target_label: instance - # replacement: $1 - # action: replace - - gfd: - enabled: true - repository: nvcr.io/nvidia - image: k8s-device-plugin - version: v0.15.0-rc.2-ubi8 - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: - - name: GFD_SLEEP_INTERVAL - value: 60s - - name: GFD_FAIL_ON_INIT_ERROR - value: "true" - resources: {} - - migManager: - enabled: true - repository: nvcr.io/nvidia/cloud-native - image: k8s-mig-manager - version: v0.6.0-ubuntu20.04 - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: - - name: WITH_REBOOT - value: "false" - resources: {} - config: - name: "default-mig-parted-config" - default: "all-disabled" - gpuClientsConfig: - name: "" - - nodeStatusExporter: - enabled: false - repository: nvcr.io/nvidia/cloud-native - image: gpu-operator-validator - # If version is not specified, then default is to use chart.AppVersion - #version: "" - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - resources: {} - - gds: - enabled: false - repository: nvcr.io/nvidia/cloud-native - image: nvidia-fs - version: "2.17.5" - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: [] - args: [] - - gdrcopy: - enabled: false - repository: nvcr.io/nvidia/cloud-native - image: gdrdrv - version: "v2.4.1" - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: [] - args: [] - - vgpuManager: - enabled: false - repository: "" - image: vgpu-manager - version: "" - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: [] - resources: {} - driverManager: - image: k8s-driver-manager - repository: nvcr.io/nvidia/cloud-native - # When choosing a different version of k8s-driver-manager, DO NOT downgrade to a version lower than v0.6.4 - # to ensure k8s-driver-manager stays compatible with gpu-operator starting from v24.3.0 - version: v0.6.7 - imagePullPolicy: IfNotPresent - env: - - name: ENABLE_GPU_POD_EVICTION - value: "false" - - name: ENABLE_AUTO_DRAIN - value: "false" - - vgpuDeviceManager: - enabled: true - repository: nvcr.io/nvidia/cloud-native - image: vgpu-device-manager - version: "v0.2.5" - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: [] - config: - name: "" - default: "default" - - vfioManager: - enabled: true - repository: nvcr.io/nvidia - image: cuda - version: 12.3.2-base-ubi8 - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: [] - resources: {} - driverManager: - image: k8s-driver-manager - repository: nvcr.io/nvidia/cloud-native - # When choosing a different version of k8s-driver-manager, DO NOT downgrade to a version lower than v0.6.4 - # to ensure k8s-driver-manager stays compatible with gpu-operator starting from v24.3.0 - version: v0.6.7 - imagePullPolicy: IfNotPresent - env: - - name: ENABLE_GPU_POD_EVICTION - value: "false" - - name: ENABLE_AUTO_DRAIN - value: "false" - - kataManager: - enabled: false - config: - artifactsDir: "/opt/nvidia-gpu-operator/artifacts/runtimeclasses" - runtimeClasses: - - name: kata-qemu-nvidia-gpu - nodeSelector: {} - artifacts: - url: nvcr.io/nvidia/cloud-native/kata-gpu-artifacts:ubuntu22.04-535.54.03 - pullSecret: "" - - name: kata-qemu-nvidia-gpu-snp - nodeSelector: - "nvidia.com/cc.capable": "true" - artifacts: - url: nvcr.io/nvidia/cloud-native/kata-gpu-artifacts:ubuntu22.04-535.86.10-snp - pullSecret: "" - repository: nvcr.io/nvidia/cloud-native - image: k8s-kata-manager - version: v0.1.2 - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: [] - resources: {} - - sandboxDevicePlugin: - enabled: true - repository: nvcr.io/nvidia - image: kubevirt-gpu-device-plugin - version: v1.2.6 - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - args: [] - env: [] - resources: {} - - ccManager: - enabled: false - defaultMode: "off" - repository: nvcr.io/nvidia/cloud-native - image: k8s-cc-manager - version: v0.1.1 - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: - - name: CC_CAPABLE_DEVICE_IDS - value: "0x2339,0x2331,0x2330,0x2324,0x2322,0x233d" - resources: {} - - node-feature-discovery: - enableNodeFeatureApi: true - gc: - enable: true - replicaCount: 1 - serviceAccount: - name: node-feature-discovery - create: false - worker: - serviceAccount: - name: node-feature-discovery - # disable creation to avoid duplicate serviceaccount creation by master spec below - create: false - tolerations: - - key: "node-role.kubernetes.io/master" - operator: "Equal" - value: "" - effect: "NoSchedule" - - key: "node-role.kubernetes.io/control-plane" - operator: "Equal" - value: "" - effect: "NoSchedule" - - key: nvidia.com/gpu - operator: Exists - effect: NoSchedule - config: - sources: - pci: - deviceClassWhitelist: - - "02" - - "0200" - - "0207" - - "0300" - - "0302" - deviceLabelFields: - - vendor - master: - serviceAccount: - name: node-feature-discovery - create: true - config: - extraLabelNs: ["nvidia.com"] - # noPublish: false - # resourceLabels: ["nvidia.com/feature-1","nvidia.com/feature-2"] - # enableTaints: false - # labelWhiteList: "nvidia.com/gpu" diff --git a/pgadmin/helmrelease-pgadmin.yaml b/pgadmin/helmrelease-pgadmin.yaml deleted file mode 100644 index 2515e99..0000000 --- a/pgadmin/helmrelease-pgadmin.yaml +++ /dev/null @@ -1,376 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: pgadmin - namespace: postgresql-system -spec: - chart: - spec: - chart: pgadmin4 - sourceRef: - kind: HelmRepository - name: runix - namespace: flux-system - interval: 15m0s - timeout: 5m - releaseName: pgadmin - values: - # Default values for pgAdmin4. - - replicaCount: 1 - - ## pgAdmin4 container image - ## - image: - registry: docker.io - repository: dpage/pgadmin4 - # Overrides the image tag whose default is the chart appVersion. - tag: "" - pullPolicy: IfNotPresent - - ## Deployment annotations - annotations: {} - - ## priorityClassName - priorityClassName: "" - - ## Deployment entrypoint override - ## Useful when there's a requirement to modify container's default: - ## https://www.vaultproject.io/docs/platform/k8s/injector/examples#environment-variable-example - ## ref: https://github.com/postgres/pgadmin4/blob/master/Dockerfile#L206 - # command: "['/bin/sh', '-c', 'source /vault/secrets/config && ']" - - service: - type: LoadBalancer - clusterIP: "" - loadBalancerIP: "" - port: 80 - targetPort: 80 - # targetPort: 4181 To be used with a proxy extraContainer - portName: http - - annotations: {} - ## Special annotations at the service level, e.g - ## this will set vnet internal IP's rather than public ip's - ## service.beta.kubernetes.io/azure-load-balancer-internal: "true" - - ## Specify the nodePort value for the service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - # nodePort: - - ## Pod Service Account - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - # Specifies whether a service account should be created - create: false - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: "" - # Opt out of API credential automounting. - # If you don't want the kubelet to automatically mount a ServiceAccount's API credentials, - # you can opt out of the default behavior - automountServiceAccountToken: false - - ## Strategy used to replace old Pods by new ones - ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy - ## - strategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 0 - # maxUnavailable: 1 - - ## Server definitions will be loaded at launch time. This allows connection - ## information to be pre-loaded into the instance of pgAdmin4 in the container. - ## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/import_export_servers.html - ## - serverDefinitions: - ## If true, server definitions will be created - ## - enabled: false - - ## The resource type to use for deploying server definitions. - ## Can either be ConfigMap or Secret - resourceType: ConfigMap - - servers: - # firstServer: - # Name: "Minimally Defined Server" - # Group: "Servers" - # Port: 5432 - # Username: "postgres" - # Host: "localhost" - # SSLMode: "prefer" - # MaintenanceDB: "postgres" - - networkPolicy: - enabled: true - - ## Ingress - ## Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ - ingress: - enabled: false - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - # ingressClassName: "" - hosts: - - host: chart-example.local - paths: - - path: / - pathType: Prefix - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - - # Additional config maps to be mounted inside a container - # Can be used to map config maps for sidecar as well - extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /etc/ssl/certs - # subPath: ca-certificates.crt # (optional) - # configMap: certs-configmap - # readOnly: true - - extraSecretMounts: [] - # - name: pgpassfile - # secret: pgpassfile - # subPath: pgpassfile - # mountPath: "/var/lib/pgadmin/storage/pgadmin/file.pgpass" - # readOnly: true - - ## Additional volumes to be mounted inside a container - ## - extraVolumeMounts: [] - - ## Specify additional containers in extraContainers. - ## For example, to add an authentication proxy to a pgadmin4 pod. - extraContainers: | - # - name: proxy - # image: quay.io/gambol99/keycloak-proxy:latest - # args: - # - -provider=github - # - -client-id= - # - -client-secret= - # - -github-org= - # - -email-domain=* - # - -cookie-secret= - # - -http-address=http://0.0.0.0:4181 - # - -upstream-url=http://127.0.0.1:3000 - # ports: - # - name: proxy-web - # containerPort: 4181 - - ## @param existingSecret Name of existing secret to use for default pgadmin credentials. `env.password` will be ignored and picked up from this secret. - ## - existingSecret: "" - ## @param secretKeys.pgadminPasswordKey Name of key in existing secret to use for default pgadmin credentials. Only used when `existingSecret` is set. - ## - secretKeys: - pgadminPasswordKey: password - - ## pgAdmin4 startup configuration - ## Values in here get injected as environment variables - ## Needed chart reinstall for apply changes - env: - # can be email or nickname - email: tyler@clortox.com - password: defaultpassword - # pgpassfile: /var/lib/pgadmin/storage/pgadmin/file.pgpass - - # set context path for application (e.g. /pgadmin4/*) - # contextPath: /pgadmin4 - - ## If True, allows pgAdmin4 to create session cookies based on IP address - ## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html - ## - enhanced_cookie_protection: "False" - - ## Add custom environment variables that will be injected to deployment - ## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/container_deployment.html - ## - variables: [] - # - name: PGADMIN_LISTEN_ADDRESS - # value: "0.0.0.0" - # - name: PGADMIN_LISTEN_PORT - # value: "8080" - - ## Additional environment variables from ConfigMaps - envVarsFromConfigMaps: [] - # - array-of - # - config-map-names - - ## Additional environment variables from Secrets - envVarsFromSecrets: [] - # - array-of - # - secret-names - - persistentVolume: - ## If true, pgAdmin4 will create/use a Persistent Volume Claim - ## If false, use emptyDir - enabled: true - - ## pgAdmin4 Persistent Volume Claim annotations - ## - annotations: {} - - ## pgAdmin4 Persistent Volume access modes - ## Must match those of existing PV or dynamic provisioner - ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - accessModes: - - ReadWriteOnce - - ## pgAdmin4 Persistent Volume Size - ## - size: 1Gi - storageClass: "longhorn" - - ## pgAdmin4 Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - #existingClaim: "pgadmin-pvc" - - ## Additional volumes to be added to the deployment - ## - extraVolumes: [] - - ## Security context to be added to pgAdmin4 pods - ## - securityContext: - runAsUser: 5050 - runAsGroup: 5050 - fsGroup: 5050 - - containerSecurityContext: - enabled: false - allowPrivilegeEscalation: false - - ## pgAdmin4 readiness and liveness probe initial delay and timeout - ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ - ## - livenessProbe: - initialDelaySeconds: 30 - periodSeconds: 60 - timeoutSeconds: 15 - successThreshold: 1 - failureThreshold: 3 - - readinessProbe: - initialDelaySeconds: 30 - periodSeconds: 60 - timeoutSeconds: 15 - successThreshold: 1 - failureThreshold: 3 - - ## Required to be enabled pre pgAdmin4 4.16 release, to set the ACL on /var/lib/pgadmin. - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - ## - VolumePermissions: - ## If true, enables an InitContainer to set permissions on /var/lib/pgadmin. - ## - enabled: false - - ## @param extraDeploy list of extra manifests to deploy - ## - extraDeploy: [] - - ## Additional InitContainers to initialize the pod - ## - extraInitContainers: | - # - name: add-folder-for-pgpass - # image: "dpage/pgadmin4:latest" - # command: ["/bin/mkdir", "-p", "/var/lib/pgadmin/storage/pgadmin"] - # volumeMounts: - # - name: pgadmin-data - # mountPath: /var/lib/pgadmin - # securityContext: - # runAsUser: 5050 - - containerPorts: - http: 80 - - resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - - ## Horizontal Pod Autoscaling - ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - # - autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - # targetMemoryUtilizationPercentage: 80 - - ## Node labels for pgAdmin4 pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Node tolerations for server scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - - ## Pod affinity - ## - affinity: {} - - ## Pod annotations - ## - podAnnotations: {} - - ## Pod labels - ## - podLabels: {} - # key1: value1 - # key2: value2 - - # -- The name of the Namespace to deploy - # If not set, `.Release.Namespace` is used - namespace: null - - init: - ## Init container resources - ## - resources: {} - - ## Define values for chart tests - test: - ## Container image for test-connection.yaml - image: - registry: docker.io - repository: busybox - tag: latest - ## Resources request/limit for test-connection Pod - resources: {} - # limits: - # cpu: 50m - # memory: 32Mi - # requests: - # cpu: 25m - # memory: 16Mi - ## Security context for test-connection Pod - securityContext: - runAsUser: 5051 - runAsGroup: 5051 - fsGroup: 5051 diff --git a/plex/plex-deployment.yaml b/plex/plex-deployment.yaml deleted file mode 100644 index 71952a3..0000000 --- a/plex/plex-deployment.yaml +++ /dev/null @@ -1,100 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: plex - namespace: plex-ns - annotations: - force-recreate: true -spec: - replicas: 1 - selector: - matchLabels: - app: plex - template: - metadata: - labels: - app: plex - spec: - nodeSelector: - kubernetes.io/hostname: gluttony - containers: - - name: plex - image: plexinc/pms-docker:public - env: - - name: TZ - value: EST - - name: PLEX_UID - value: "1000" - - name: PLEX_GID - value: "1000" - - name: PLEX_CLAIM - valueFrom: - secretKeyRef: - name: plex-claim - key: PLEX_CLAIM - ports: - - containerPort: 32400 - - containerPort: 8234 - - containerPort: 32469 - - containerPort: 1900 - - containerPort: 32410 - - containerPort: 32412 - - containerPort: 32413 - - containerPort: 32414 - volumeMounts: - - name: plex-config - mountPath: /config - - name: plex-media - mountPath: /data - - # Sidecar providing access to upload/view/download raw media files - - name: filebrowswer - image: git.clortox.com/infrastructure/filebrowser:v1.0.1 - env: - - name: ADMIN_PASS - valueFrom: - secretKeyRef: - name: filebrowser-secret - key: ADMIN-PASS - - name: DEFAULT_USERNAME - value: "default" - - name: DEFAULT_PASSWORD - valueFrom: - secretKeyRef: - name: filebrowser-secret - key: DEFAULT-PASS - - name: BRANDING_NAME - value: "Media Storage" - - name: AUTH_METHOD - value: "proxy" - - name: AUTH_HEADER - value: "X-Auth-User" - - name: PERM_ADMIN - value: "false" - - name: PERM_EXECUTE - value: "false" - - name: PERM_CREATE - value: "true" - - name: PERM_RENAME - value: "true" - - name: PERM_MODIFY - value: "true" - - name: PERM_DELETE - value: "false" - - name: PERM_SHARE - value: "true" - - name: PERM_DOWNLOAD - value: "true" - volumeMounts: - - name: plex-media - mountPath: /srv - ports: - - containerPort: 80 - - volumes: - - name: plex-config - persistentVolumeClaim: - claimName: plex-pvc-config - - name: plex-media - persistentVolumeClaim: - claimName: plex-pvc-media diff --git a/plex/plex-pv-media.yaml b/plex/plex-pv-media.yaml deleted file mode 100644 index 90e9d38..0000000 --- a/plex/plex-pv-media.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: plex-pv-media -spec: - storageClassName: local-storage - capacity: - storage: 18000Gi - accessModes: - - ReadWriteMany - hostPath: - path: "/Main/Media" - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - gluttony diff --git a/plex/plex-pvc-config.yaml b/plex/plex-pvc-config.yaml deleted file mode 100644 index c010d33..0000000 --- a/plex/plex-pvc-config.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: plex-pvc-config - namespace: plex-ns -spec: - storageClassName: longhorn - accessModes: - - ReadWriteMany - resources: - requests: - storage: 200Gi diff --git a/plex/plex-pvc-media.yaml b/plex/plex-pvc-media.yaml deleted file mode 100644 index c5f9816..0000000 --- a/plex/plex-pvc-media.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: plex-pvc-media - namespace: plex-ns -spec: - volumeName: plex-pv-media - storageClassName: local-storage - accessModes: - - ReadWriteMany - resources: - requests: - storage: 18000Gi diff --git a/plex/plex-secret.yaml b/plex/plex-secret.yaml deleted file mode 100644 index 72147b5..0000000 --- a/plex/plex-secret.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: plex-claim - namespace: plex-ns -spec: - encryptedData: - PLEX_CLAIM: AgBKYZur5cfL/WOGEs6sciEoqE4Jf5+nNVXTzLYni7bGkifPed7b4PTPK0+Mdao3Z6AkGceYnNlYMOUoEJBjC+5aEWf1+enJHmlbjwqYrRAIyG90nlA79e35dRHd6XcEoxKxshOL9HNQbywXaSdk/FlXSz3y2JbiOxU+x3Tnkl4vJRVBBncL+B2EFpNwF5tK9A7s3pYXvrVSPYJuk5yUH8USyu9EOUrS3wzqggq5Y2zTXEngtXRl42qPHWHFtYqeE+2wlutPKvcqtnoIxVr88910NFvv4+h1PSrJztEPgUiStE4APpehZlF8X0UpGK4kF0ozsDHY0NJoJpa8rK69vYfj/y1EX9RBs5s076lfwHtpItaKxQ4+a/dnk0vs2qLj4AEG57iKrPbnTicreYTCFyeSUKwTKUTXc+8PALWnu8sk4JAR1c0lQhLnPbHzX/yURlYzMGTlCue03hllS4r7ZknOatW7HjAQveqn6j0s893Ntbo6vAr8Ooe1EjevSED1cA7OUaQnYLge7zQjzlL928raWhu44ltRQG0xswyQpbphBJCIcGqxEq/yVozTKb5X+J7Jir1xmE4x9NG/PAkUVtlrNhM7yjfF/2xZITyqNlAHJlU3UfQg4yI18DB6RdgvzLCXEqhvg6brtjQyhRtg4PowVvB3nCZPSH9Qjn2u1aeeSqEypCyQRe77X7awc5hLkAQRjyO+I1N5Z/S/x1FTabZfhR0n7/MYHbQTsA== - template: - metadata: - creationTimestamp: null - name: plex-claim - namespace: plex-ns ---- diff --git a/plex/plex-service.yaml b/plex/plex-service.yaml deleted file mode 100644 index e999ada..0000000 --- a/plex/plex-service.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: plex-service - namespace: plex-ns -spec: - selector: - app: plex - type: LoadBalancer - ports: - - name: plex - protocol: TCP - port: 32400 - targetPort: 32400 - - name: filebrowswer - protocol: TCP - port: 80 - targetPort: 80 diff --git a/plex/sealed-secret.yaml b/plex/sealed-secret.yaml deleted file mode 100644 index d35c7b0..0000000 --- a/plex/sealed-secret.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: filebrowser-secret - namespace: plex-ns -spec: - encryptedData: - ADMIN-PASS: AgAj7jrppKzPFfnYDV+FEuXQx9lrkppWFElR3DjtR017tpBQs6/KjZYU1TX81TkNh8cONg4mGB72zvk60Yft5b5TSgZWuKA4qTXYEoFusyMR3wyOU/Ft7ZMk7IAr/7Hi9dDAh3CkmrQ2lQ3C5hRlfTljaSxqC9abmEZIeSo7OhrkX8YIvFhanBMbPenfkulSsK38dp3PfIC8kntRV1u37Z7CxovVu+Kn7IoRC4sKa3gcdJ5lIA/Aq3rln8atmzZcPGPzjIAPY4P72mjPaeIvzqzLsNMcecIIr20MyLTOG/eI8WrM+WC+dgyvj/Pjq/hzTW1QD3z4jZW224o4ghKiPr6mW0BbN6KBBqv/JFtpBqiYgGi/ADBVxTG7YUA+FcT7YA6nuxlqg+TMpwqP8ZJBmghosBeqmBndjKUjpexoihmy+XTDbEr7e8RDpOdL9jS9hGPt47cmFITSFSEQIGM6kOtdYWcMw6+aKkTt5Ul4bUfV9TXultGyNYITibATXWNqbRfZDjYVrWOIfoVJOe19N3WZg9R4UeKzow3RkoJvn3MUTYOOrzr9Csx+VxUMeGxLPFftedUIy8zzXaqL/0OFogQZ2P+mesiYxc67Z4VS2u0+iCLkJdUDYnM+2q6TRQMI2nP40ko62xDuSE2BDcufqsKfHoddswlYDyelLVqJKee+P3sUoxcblYlv6kqz1GbVhBKQrHzFphx72KG219N9zwjOI6w8V6NXHUEFblQ3gt9RPA== - DEFAULT-PASS: AgDXxxyMBUb7Q0J8LvxPXNEAz75c1JcS7xL3rN7E2Wg7MLsZHj2/0hRf9jaCCyyVnr/Pabbdmjb0nth4Dlm50tLWH+rU7KtLPwHB0pMVi9zSxKBdyvOJurVdY/nlbSuanxSL37rBOrwRQRv6t8w/IIs4R9GEaFjxKoIJTuV8JRu3r62FiL/3o2zyok9UYcLxw2h9H7B9yn9wXn5CAFk0M4jNRUns3oU7d0/hPbfwC216vU0ZIdga8yYlZw3zvVz54mX2XECnHWZT8gPK1w6v8AEca/kDFuVFBi63OdXFgjBHCa/uSs5wifzNPBzcRA+A8s/JgoSHEeMXTmBsMOlihSSz0kSGHS/rUdu6nZamVZfzCWOHskb3RVjs23yNJsSEDlYR/AMeAjnkMDvMe5b/X/eV1AOYkAQ/pACrSk2aG+4kLmLoLYXaeDVf8pTHj4yOvdffWk39ClCqIOyWF2+//N02lDepVwis498cL+7I4kEVXqy9FugUCsbtzxVXX6OHym4KpBZpAmrMqH83rC6CtU4orF6gjmTKCe1Ufq5GmsQgFFZTZYTexnbeTKXz6yw+RbHLTGdsaJnMaAQx5uB3khO9Pkge7/HLDmXEx+mtaaTvk7AF8PWjFJSQZEWxVSCr6O1Zd4LKsg0EP6Mrk+s+8OOfGb42e3wfJ6gY7KlTBBu8KmKnHRQl9uoMVO7y5PWwl+B3Wam5j78ggV4L9UmiEw6gYvrc8rmQWZqQbuw7pClQ3Q== - template: - metadata: - creationTimestamp: null - name: filebrowser-secret - namespace: plex-ns diff --git a/postgresql/helmrelease-postgresql.yaml b/postgresql/helmrelease-postgresql.yaml deleted file mode 100644 index 2d0f691..0000000 --- a/postgresql/helmrelease-postgresql.yaml +++ /dev/null @@ -1,1622 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: postgresql - namespace: postgresql-system -spec: - chart: - spec: - chart: postgresql - sourceRef: - kind: HelmRepository - name: bitnami - namespace: flux-system - interval: 15m0s - timeout: 5m - releaseName: postgresql - values: - # Copyright VMware, Inc. - # SPDX-License-Identifier: APACHE-2.0 - - ## @section Global parameters - ## Please, note that this will override the parameters, including dependencies, configured to use the global value - ## - global: - ## @param global.imageRegistry Global Docker image registry - ## - imageRegistry: "" - ## @param global.imagePullSecrets Global Docker registry secret names as an array - ## e.g. - ## imagePullSecrets: - ## - myRegistryKeySecretName - ## - imagePullSecrets: [] - ## @param global.storageClass Global StorageClass for Persistent Volume(s) - ## - storageClass: "" - postgresql: - ## @param global.postgresql.auth.postgresPassword Password for the "postgres" admin user (overrides `auth.postgresPassword`) - ## @param global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`) - ## @param global.postgresql.auth.password Password for the custom user to create (overrides `auth.password`) - ## @param global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`) - ## @param global.postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`). - ## @param global.postgresql.auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. - ## @param global.postgresql.auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. - ## @param global.postgresql.auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. - ## - auth: - #postgresPassword: "" - #username: "" - #password: "" - database: "" - existingSecret: "postgresql-default-credentials" - secretKeys: - adminPasswordKey: "" - userPasswordKey: "" - replicationPasswordKey: "" - ## @param global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`) - ## - service: - ports: - postgresql: "" - - ## @section Common parameters - ## - - ## @param kubeVersion Override Kubernetes version - ## - kubeVersion: "" - ## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) - ## - nameOverride: "" - ## @param fullnameOverride String to fully override common.names.fullname template - ## - fullnameOverride: "" - ## @param clusterDomain Kubernetes Cluster Domain - ## - clusterDomain: cluster.local - ## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template) - ## - extraDeploy: [] - ## @param commonLabels Add labels to all the deployed resources - ## - commonLabels: {} - ## @param commonAnnotations Add annotations to all the deployed resources - ## - commonAnnotations: {} - ## Enable diagnostic mode in the statefulset - ## - diagnosticMode: - ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) - ## - enabled: false - ## @param diagnosticMode.command Command to override all containers in the statefulset - ## - command: - - sleep - ## @param diagnosticMode.args Args to override all containers in the statefulset - ## - args: - - infinity - - ## @section PostgreSQL common parameters - ## - - ## Bitnami PostgreSQL image version - ## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ - ## @param image.registry PostgreSQL image registry - ## @param image.repository PostgreSQL image repository - ## @param image.tag PostgreSQL image tag (immutable tags are recommended) - ## @param image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param image.pullPolicy PostgreSQL image pull policy - ## @param image.pullSecrets Specify image pull secrets - ## @param image.debug Specify if debug values should be set - ## - image: - registry: git.clortox.com - repository: infrastructure/gluttony-cluster-postgresql - tag: v1.2.0 - digest: "" - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Set to true if you would like to see extra information on logs - ## - debug: false - ## Authentication parameters - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#setting-the-root-password-on-first-run - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-on-first-run - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-user-on-first-run - ## - auth: - ## @param auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user - ## - enablePostgresUser: true - ## @param auth.postgresPassword Password for the "postgres" admin user. Ignored if `auth.existingSecret` is provided - ## - postgresPassword: "" - ## @param auth.username Name for a custom user to create - ## - username: "" - ## @param auth.password Password for the custom user to create. Ignored if `auth.existingSecret` is provided - ## - password: "" - ## @param auth.database Name for a custom database to create - ## - database: "" - ## @param auth.replicationUsername Name of the replication user - ## - replicationUsername: repl_user - ## @param auth.replicationPassword Password for the replication user. Ignored if `auth.existingSecret` is provided - ## - replicationPassword: "" - ## @param auth.existingSecret Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. - ## - existingSecret: "" - ## @param auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. - ## @param auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. - ## @param auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. - ## - secretKeys: - adminPasswordKey: postgres-password - userPasswordKey: password - replicationPasswordKey: replication-password - ## @param auth.usePasswordFiles Mount credentials as a files instead of using an environment variable - ## - usePasswordFiles: false - ## @param architecture PostgreSQL architecture (`standalone` or `replication`) - ## - architecture: standalone - ## Replication configuration - ## Ignored if `architecture` is `standalone` - ## - replication: - ## @param replication.synchronousCommit Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` - ## @param replication.numSynchronousReplicas Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. - ## ref: https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT - ## - synchronousCommit: "off" - numSynchronousReplicas: 0 - ## @param replication.applicationName Cluster application name. Useful for advanced replication settings - ## - applicationName: my_application - ## @param containerPorts.postgresql PostgreSQL container port - ## - containerPorts: - postgresql: 5432 - ## Audit settings - ## https://github.com/bitnami/containers/tree/main/bitnami/postgresql#auditing - ## @param audit.logHostname Log client hostnames - ## @param audit.logConnections Add client log-in operations to the log file - ## @param audit.logDisconnections Add client log-outs operations to the log file - ## @param audit.pgAuditLog Add operations to log using the pgAudit extension - ## @param audit.pgAuditLogCatalog Log catalog using pgAudit - ## @param audit.clientMinMessages Message log level to share with the user - ## @param audit.logLinePrefix Template for log line prefix (default if not set) - ## @param audit.logTimezone Timezone for the log timestamps - ## - audit: - logHostname: false - logConnections: false - logDisconnections: false - pgAuditLog: "" - pgAuditLogCatalog: "off" - clientMinMessages: error - logLinePrefix: "" - logTimezone: "" - ## LDAP configuration - ## @param ldap.enabled Enable LDAP support - ## DEPRECATED ldap.url It will removed in a future, please use 'ldap.uri' instead - ## @param ldap.server IP address or name of the LDAP server. - ## @param ldap.port Port number on the LDAP server to connect to - ## @param ldap.prefix String to prepend to the user name when forming the DN to bind - ## @param ldap.suffix String to append to the user name when forming the DN to bind - ## DEPRECATED ldap.baseDN It will removed in a future, please use 'ldap.basedn' instead - ## DEPRECATED ldap.bindDN It will removed in a future, please use 'ldap.binddn' instead - ## DEPRECATED ldap.bind_password It will removed in a future, please use 'ldap.bindpw' instead - ## @param ldap.basedn Root DN to begin the search for the user in - ## @param ldap.binddn DN of user to bind to LDAP - ## @param ldap.bindpw Password for the user to bind to LDAP - ## DEPRECATED ldap.search_attr It will removed in a future, please use 'ldap.searchAttribute' instead - ## DEPRECATED ldap.search_filter It will removed in a future, please use 'ldap.searchFilter' instead - ## @param ldap.searchAttribute Attribute to match against the user name in the search - ## @param ldap.searchFilter The search filter to use when doing search+bind authentication - ## @param ldap.scheme Set to `ldaps` to use LDAPS - ## DEPRECATED ldap.tls as string is deprecated please use 'ldap.tls.enabled' instead - ## @param ldap.tls.enabled Se to true to enable TLS encryption - ## - ldap: - enabled: false - server: "" - port: "" - prefix: "" - suffix: "" - basedn: "" - binddn: "" - bindpw: "" - searchAttribute: "" - searchFilter: "" - scheme: "" - tls: - enabled: false - ## @param ldap.uri LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored. - ## Ref: https://www.postgresql.org/docs/current/auth-ldap.html - ## - uri: "" - ## @param postgresqlDataDir PostgreSQL data dir folder - ## - postgresqlDataDir: /bitnami/postgresql/data - ## @param postgresqlSharedPreloadLibraries Shared preload libraries (comma-separated list) - ## - postgresqlSharedPreloadLibraries: "pgaudit" - ## Start PostgreSQL pod(s) without limitations on shm memory. - ## By default docker and containerd (and possibly other container runtimes) limit `/dev/shm` to `64M` - ## ref: https://github.com/docker-library/postgres/issues/416 - ## ref: https://github.com/containerd/containerd/issues/3654 - ## - shmVolume: - ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) - ## - enabled: true - ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs - ## Note: the size of the tmpfs counts against container's memory limit - ## e.g: - ## sizeLimit: 1Gi - ## - sizeLimit: "" - ## TLS configuration - ## - tls: - ## @param tls.enabled Enable TLS traffic support - ## - enabled: false - ## @param tls.autoGenerated Generate automatically self-signed TLS certificates - ## - autoGenerated: false - ## @param tls.preferServerCiphers Whether to use the server's TLS cipher preferences rather than the client's - ## - preferServerCiphers: true - ## @param tls.certificatesSecret Name of an existing secret that contains the certificates - ## - certificatesSecret: "" - ## @param tls.certFilename Certificate filename - ## - certFilename: "" - ## @param tls.certKeyFilename Certificate key filename - ## - certKeyFilename: "" - ## @param tls.certCAFilename CA Certificate filename - ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate - ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html - ## - certCAFilename: "" - ## @param tls.crlFilename File containing a Certificate Revocation List - ## - crlFilename: "" - - ## @section PostgreSQL Primary parameters - ## - primary: - ## @param primary.name Name of the primary database (eg primary, master, leader, ...) - ## - name: primary - ## @param primary.configuration PostgreSQL Primary main configuration to be injected as ConfigMap - ## ref: https://www.postgresql.org/docs/current/static/runtime-config.html - ## - configuration: "" - ## @param primary.pgHbaConfiguration PostgreSQL Primary client authentication configuration - ## ref: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html - ## e.g:# - ## pgHbaConfiguration: |- - ## local all all trust - ## host all all localhost trust - ## host mydatabase mysuser 192.168.0.0/24 md5 - ## - pgHbaConfiguration: "" - ## @param primary.existingConfigmap Name of an existing ConfigMap with PostgreSQL Primary configuration - ## NOTE: `primary.configuration` and `primary.pgHbaConfiguration` will be ignored - ## - existingConfigmap: "" - ## @param primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration) - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf - ## - extendedConfiguration: "" - ## @param primary.existingExtendedConfigmap Name of an existing ConfigMap with PostgreSQL Primary extended configuration - ## NOTE: `primary.extendedConfiguration` will be ignored - ## - existingExtendedConfigmap: "" - ## Initdb configuration - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#specifying-initdb-arguments - ## - initdb: - ## @param primary.initdb.args PostgreSQL initdb extra arguments - ## - args: "" - ## @param primary.initdb.postgresqlWalDir Specify a custom location for the PostgreSQL transaction log - ## - postgresqlWalDir: "" - ## @param primary.initdb.scripts Dictionary of initdb scripts - ## Specify dictionary of scripts to be run at first boot - ## e.g: - ## scripts: - ## my_init_script.sh: | - ## #!/bin/sh - ## echo "Do something." - ## - scripts: {} - ## @param primary.initdb.scriptsConfigMap ConfigMap with scripts to be run at first boot - ## NOTE: This will override `primary.initdb.scripts` - ## - scriptsConfigMap: "" - ## @param primary.initdb.scriptsSecret Secret with scripts to be run at first boot (in case it contains sensitive information) - ## NOTE: This can work along `primary.initdb.scripts` or `primary.initdb.scriptsConfigMap` - ## - scriptsSecret: "" - ## @param primary.initdb.user Specify the PostgreSQL username to execute the initdb scripts - ## - user: "" - ## @param primary.initdb.password Specify the PostgreSQL password to execute the initdb scripts - ## - password: "" - ## Configure current cluster's primary server to be the standby server in other cluster. - ## This will allow cross cluster replication and provide cross cluster high availability. - ## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. - ## @param primary.standby.enabled Whether to enable current cluster's primary as standby server of another cluster or not - ## @param primary.standby.primaryHost The Host of replication primary in the other cluster - ## @param primary.standby.primaryPort The Port of replication primary in the other cluster - ## - standby: - enabled: false - primaryHost: "" - primaryPort: "" - ## @param primary.extraEnvVars Array with extra environment variables to add to PostgreSQL Primary nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes - ## - extraEnvVarsCM: "" - ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL Primary nodes - ## - extraEnvVarsSecret: "" - ## @param primary.command Override default container command (useful when using custom images) - ## - command: [] - ## @param primary.args Override default container args (useful when using custom images) - ## - args: [] - ## Configure extra options for PostgreSQL Primary containers' liveness, readiness and startup probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes - ## @param primary.livenessProbe.enabled Enable livenessProbe on PostgreSQL Primary containers - ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param primary.readinessProbe.enabled Enable readinessProbe on PostgreSQL Primary containers - ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param primary.startupProbe.enabled Enable startupProbe on PostgreSQL Primary containers - ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe - ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param primary.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 15 - successThreshold: 1 - ## @param primary.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param primary.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param primary.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param primary.lifecycleHooks for the PostgreSQL Primary container to automate configuration before or after startup - ## - lifecycleHooks: {} - ## PostgreSQL Primary resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param primary.resources.limits The resources limits for the PostgreSQL Primary containers - ## @param primary.resources.requests.memory The requested memory for the PostgreSQL Primary containers - ## @param primary.resources.requests.cpu The requested cpu for the PostgreSQL Primary containers - ## - resources: - limits: {} - requests: - memory: 256Mi - cpu: 250m - ## Pod Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## @param primary.podSecurityContext.enabled Enable security context - ## @param primary.podSecurityContext.fsGroup Group ID for the pod - ## - podSecurityContext: - enabled: true - fsGroup: 1001 - ## Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## @param primary.containerSecurityContext.enabled Enable container security context - ## @param primary.containerSecurityContext.runAsUser User ID for the container - ## @param primary.containerSecurityContext.runAsGroup Group ID for the container - ## @param primary.containerSecurityContext.runAsNonRoot Set runAsNonRoot for the container - ## @param primary.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation for the container - ## @param primary.containerSecurityContext.seccompProfile.type Set seccompProfile.type for the container - ## @param primary.containerSecurityContext.capabilities.drop Set capabilities.drop for the container - ## - containerSecurityContext: - enabled: true - runAsUser: 1001 - runAsGroup: 0 - runAsNonRoot: true - allowPrivilegeEscalation: false - seccompProfile: - type: RuntimeDefault - capabilities: - drop: - - ALL - ## @param primary.hostAliases PostgreSQL primary pods host aliases - ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ - ## - hostAliases: [] - ## @param primary.hostNetwork Specify if host network should be enabled for PostgreSQL pod (postgresql primary) - ## - hostNetwork: false - ## @param primary.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) - ## - hostIPC: false - ## @param primary.labels Map of labels to add to the statefulset (postgresql primary) - ## - labels: {} - ## @param primary.annotations Annotations for PostgreSQL primary pods - ## - annotations: {} - ## @param primary.podLabels Map of labels to add to the pods (postgresql primary) - ## - podLabels: {} - ## @param primary.podAnnotations Map of annotations to add to the pods (postgresql primary) - ## - podAnnotations: {} - ## @param primary.podAffinityPreset PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param primary.podAntiAffinityPreset PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## PostgreSQL Primary node affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param primary.nodeAffinityPreset.type PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param primary.nodeAffinityPreset.key PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. - ## E.g. - ## key: "kubernetes.io/e2e-az-name" - ## - key: "" - ## @param primary.nodeAffinityPreset.values PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param primary.affinity Affinity for PostgreSQL primary pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set - ## - affinity: {} - ## @param primary.nodeSelector Node labels for PostgreSQL primary pods assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - ## @param primary.tolerations Tolerations for PostgreSQL primary pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods - ## - topologySpreadConstraints: [] - ## @param primary.priorityClassName Priority Class to use for each pod (postgresql primary) - ## - priorityClassName: "" - ## @param primary.schedulerName Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param primary.terminationGracePeriodSeconds Seconds PostgreSQL primary pod needs to terminate gracefully - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods - ## - terminationGracePeriodSeconds: "" - ## @param primary.updateStrategy.type PostgreSQL Primary statefulset strategy type - ## @param primary.updateStrategy.rollingUpdate PostgreSQL Primary statefulset rolling update configuration parameters - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - type: RollingUpdate - rollingUpdate: {} - ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) - ## - extraVolumeMounts: [] - ## @param primary.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) - ## - extraVolumes: [] - ## @param primary.sidecars Add additional sidecar containers to the PostgreSQL Primary pod(s) - ## For example: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param primary.initContainers Add additional init containers to the PostgreSQL Primary pod(s) - ## Example - ## - ## initContainers: - ## - name: do-something - ## image: busybox - ## command: ['do', 'something'] - ## - initContainers: [] - ## @param primary.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) - ## - extraPodSpec: {} - ## PostgreSQL Primary service configuration - ## - service: - ## @param primary.service.type Kubernetes Service type - ## - type: ClusterIP - ## @param primary.service.ports.postgresql PostgreSQL service port - ## - ports: - postgresql: 5432 - ## Node ports to expose - ## NOTE: choose port between <30000-32767> - ## @param primary.service.nodePorts.postgresql Node port for PostgreSQL - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - nodePorts: - postgresql: "" - ## @param primary.service.clusterIP Static clusterIP or None for headless services - ## e.g: - ## clusterIP: None - ## - clusterIP: "" - ## @param primary.service.annotations Annotations for PostgreSQL primary service - ## - annotations: {} - ## @param primary.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` - ## Set the LoadBalancer service type to internal only - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - loadBalancerIP: "" - ## @param primary.service.externalTrafficPolicy Enable client source IP preservation - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer - ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param primary.service.extraPorts Extra ports to expose in the PostgreSQL primary service - ## - extraPorts: [] - ## @param primary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param primary.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - ## Headless service properties - ## - headless: - ## @param primary.service.headless.annotations Additional custom annotations for headless PostgreSQL primary service - ## - annotations: {} - ## PostgreSQL Primary persistence configuration - ## - persistence: - ## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC - ## - enabled: true - ## @param primary.persistence.existingClaim Name of an existing PVC to use - ## - existingClaim: "data-postgresql-0" - ## @param primary.persistence.mountPath The path the volume will be mounted at - ## Note: useful when using custom PostgreSQL images - ## - mountPath: /bitnami/postgresql - ## @param primary.persistence.subPath The subdirectory of the volume to mount to - ## Useful in dev environments and one PV for multiple services - ## - subPath: "" - ## @param primary.persistence.storageClass PVC Storage Class for PostgreSQL Primary data volume - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - storageClass: "longhorn" - ## @param primary.persistence.accessModes PVC Access Mode for PostgreSQL volume - ## - accessModes: - - ReadWriteOnce - ## @param primary.persistence.size PVC Storage Request for PostgreSQL volume - ## - size: 20Gi - ## @param primary.persistence.annotations Annotations for the PVC - ## - annotations: {} - ## @param primary.persistence.labels Labels for the PVC - ## - labels: {} - ## @param primary.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) - ## selector: - ## matchLabels: - ## app: my-app - ## - selector: {} - ## @param primary.persistence.dataSource Custom PVC data source - ## - dataSource: {} - ## PostgreSQL Primary Persistent Volume Claim Retention Policy - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention - ## - persistentVolumeClaimRetentionPolicy: - ## @param primary.persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for Primary Statefulset - ## - enabled: true - ## @param primary.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced - ## - whenScaled: Retain - ## @param primary.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted - ## - whenDeleted: Retain - - ## @section PostgreSQL read only replica parameters (only used when `architecture` is set to `replication`) - ## - readReplicas: - ## @param readReplicas.name Name of the read replicas database (eg secondary, slave, ...) - ## - name: read - ## @param readReplicas.replicaCount Number of PostgreSQL read only replicas - ## - replicaCount: 1 - ## @param readReplicas.extendedConfiguration Extended PostgreSQL read only replicas configuration (appended to main or default configuration) - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf - ## - extendedConfiguration: "" - ## @param readReplicas.extraEnvVars Array with extra environment variables to add to PostgreSQL read only nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param readReplicas.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes - ## - extraEnvVarsCM: "" - ## @param readReplicas.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL read only nodes - ## - extraEnvVarsSecret: "" - ## @param readReplicas.command Override default container command (useful when using custom images) - ## - command: [] - ## @param readReplicas.args Override default container args (useful when using custom images) - ## - args: [] - ## Configure extra options for PostgreSQL read only containers' liveness, readiness and startup probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes - ## @param readReplicas.livenessProbe.enabled Enable livenessProbe on PostgreSQL read only containers - ## @param readReplicas.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param readReplicas.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param readReplicas.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param readReplicas.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param readReplicas.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param readReplicas.readinessProbe.enabled Enable readinessProbe on PostgreSQL read only containers - ## @param readReplicas.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param readReplicas.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param readReplicas.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param readReplicas.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param readReplicas.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param readReplicas.startupProbe.enabled Enable startupProbe on PostgreSQL read only containers - ## @param readReplicas.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param readReplicas.startupProbe.periodSeconds Period seconds for startupProbe - ## @param readReplicas.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param readReplicas.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param readReplicas.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 15 - successThreshold: 1 - ## @param readReplicas.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param readReplicas.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param readReplicas.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param readReplicas.lifecycleHooks for the PostgreSQL read only container to automate configuration before or after startup - ## - lifecycleHooks: {} - ## PostgreSQL read only resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param readReplicas.resources.limits The resources limits for the PostgreSQL read only containers - ## @param readReplicas.resources.requests.memory The requested memory for the PostgreSQL read only containers - ## @param readReplicas.resources.requests.cpu The requested cpu for the PostgreSQL read only containers - ## - resources: - limits: {} - requests: - memory: 256Mi - cpu: 250m - ## Pod Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## @param readReplicas.podSecurityContext.enabled Enable security context - ## @param readReplicas.podSecurityContext.fsGroup Group ID for the pod - ## - podSecurityContext: - enabled: true - fsGroup: 1001 - ## Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## @param readReplicas.containerSecurityContext.enabled Enable container security context - ## @param readReplicas.containerSecurityContext.runAsUser User ID for the container - ## @param readReplicas.containerSecurityContext.runAsGroup Group ID for the container - ## @param readReplicas.containerSecurityContext.runAsNonRoot Set runAsNonRoot for the container - ## @param readReplicas.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation for the container - ## @param readReplicas.containerSecurityContext.seccompProfile.type Set seccompProfile.type for the container - ## @param readReplicas.containerSecurityContext.capabilities.drop Set capabilities.drop for the container - ## - containerSecurityContext: - enabled: true - runAsUser: 1001 - runAsGroup: 0 - runAsNonRoot: true - allowPrivilegeEscalation: false - seccompProfile: - type: RuntimeDefault - capabilities: - drop: - - ALL - ## @param readReplicas.hostAliases PostgreSQL read only pods host aliases - ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ - ## - hostAliases: [] - ## @param readReplicas.hostNetwork Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) - ## - hostNetwork: false - ## @param readReplicas.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) - ## - hostIPC: false - ## @param readReplicas.labels Map of labels to add to the statefulset (PostgreSQL read only) - ## - labels: {} - ## @param readReplicas.annotations Annotations for PostgreSQL read only pods - ## - annotations: {} - ## @param readReplicas.podLabels Map of labels to add to the pods (PostgreSQL read only) - ## - podLabels: {} - ## @param readReplicas.podAnnotations Map of annotations to add to the pods (PostgreSQL read only) - ## - podAnnotations: {} - ## @param readReplicas.podAffinityPreset PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param readReplicas.podAntiAffinityPreset PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## PostgreSQL read only node affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param readReplicas.nodeAffinityPreset.type PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param readReplicas.nodeAffinityPreset.key PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. - ## E.g. - ## key: "kubernetes.io/e2e-az-name" - ## - key: "" - ## @param readReplicas.nodeAffinityPreset.values PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param readReplicas.affinity Affinity for PostgreSQL read only pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set - ## - affinity: {} - ## @param readReplicas.nodeSelector Node labels for PostgreSQL read only pods assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - ## @param readReplicas.tolerations Tolerations for PostgreSQL read only pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param readReplicas.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods - ## - topologySpreadConstraints: [] - ## @param readReplicas.priorityClassName Priority Class to use for each pod (PostgreSQL read only) - ## - priorityClassName: "" - ## @param readReplicas.schedulerName Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param readReplicas.terminationGracePeriodSeconds Seconds PostgreSQL read only pod needs to terminate gracefully - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods - ## - terminationGracePeriodSeconds: "" - ## @param readReplicas.updateStrategy.type PostgreSQL read only statefulset strategy type - ## @param readReplicas.updateStrategy.rollingUpdate PostgreSQL read only statefulset rolling update configuration parameters - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - type: RollingUpdate - rollingUpdate: {} - ## @param readReplicas.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) - ## - extraVolumeMounts: [] - ## @param readReplicas.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) - ## - extraVolumes: [] - ## @param readReplicas.sidecars Add additional sidecar containers to the PostgreSQL read only pod(s) - ## For example: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param readReplicas.initContainers Add additional init containers to the PostgreSQL read only pod(s) - ## Example - ## - ## initContainers: - ## - name: do-something - ## image: busybox - ## command: ['do', 'something'] - ## - initContainers: [] - ## @param readReplicas.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL read only pod(s) - ## - extraPodSpec: {} - ## PostgreSQL read only service configuration - ## - service: - ## @param readReplicas.service.type Kubernetes Service type - ## - type: ClusterIP - ## @param readReplicas.service.ports.postgresql PostgreSQL service port - ## - ports: - postgresql: 5432 - ## Node ports to expose - ## NOTE: choose port between <30000-32767> - ## @param readReplicas.service.nodePorts.postgresql Node port for PostgreSQL - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - nodePorts: - postgresql: "" - ## @param readReplicas.service.clusterIP Static clusterIP or None for headless services - ## e.g: - ## clusterIP: None - ## - clusterIP: "" - ## @param readReplicas.service.annotations Annotations for PostgreSQL read only service - ## - annotations: {} - ## @param readReplicas.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` - ## Set the LoadBalancer service type to internal only - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - loadBalancerIP: "" - ## @param readReplicas.service.externalTrafficPolicy Enable client source IP preservation - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param readReplicas.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer - ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param readReplicas.service.extraPorts Extra ports to expose in the PostgreSQL read only service - ## - extraPorts: [] - ## @param readReplicas.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param readReplicas.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - ## Headless service properties - ## - headless: - ## @param readReplicas.service.headless.annotations Additional custom annotations for headless PostgreSQL read only service - ## - annotations: {} - ## PostgreSQL read only persistence configuration - ## - persistence: - ## @param readReplicas.persistence.enabled Enable PostgreSQL read only data persistence using PVC - ## - enabled: true - ## @param readReplicas.persistence.existingClaim Name of an existing PVC to use - ## - existingClaim: "" - ## @param readReplicas.persistence.mountPath The path the volume will be mounted at - ## Note: useful when using custom PostgreSQL images - ## - mountPath: /bitnami/postgresql - ## @param readReplicas.persistence.subPath The subdirectory of the volume to mount to - ## Useful in dev environments and one PV for multiple services - ## - subPath: "" - ## @param readReplicas.persistence.storageClass PVC Storage Class for PostgreSQL read only data volume - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - storageClass: "" - ## @param readReplicas.persistence.accessModes PVC Access Mode for PostgreSQL volume - ## - accessModes: - - ReadWriteOnce - ## @param readReplicas.persistence.size PVC Storage Request for PostgreSQL volume - ## - size: 8Gi - ## @param readReplicas.persistence.annotations Annotations for the PVC - ## - annotations: {} - ## @param readReplicas.persistence.labels Labels for the PVC - ## - labels: {} - ## @param readReplicas.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) - ## selector: - ## matchLabels: - ## app: my-app - ## - selector: {} - ## @param readReplicas.persistence.dataSource Custom PVC data source - ## - dataSource: {} - ## PostgreSQL Read only Persistent Volume Claim Retention Policy - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention - ## - persistentVolumeClaimRetentionPolicy: - ## @param readReplicas.persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for read only Statefulset - ## - enabled: false - ## @param readReplicas.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced - ## - whenScaled: Retain - ## @param readReplicas.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted - ## - whenDeleted: Retain - - - ## @section Backup parameters - ## This section implements a trivial logical dump cronjob of the database. - ## This only comes with the consistency guarantees of the dump program. - ## This is not a snapshot based roll forward/backward recovery backup. - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/ - backup: - ## @param backup.enabled Enable the logical dump of the database "regularly" - enabled: false - cronjob: - ## @param backup.cronjob.schedule Set the cronjob parameter schedule - schedule: "@daily" - ## @param backup.cronjob.timeZone Set the cronjob parameter timeZone - timeZone: "" - ## @param backup.cronjob.concurrencyPolicy Set the cronjob parameter concurrencyPolicy - concurrencyPolicy: Allow - ## @param backup.cronjob.failedJobsHistoryLimit Set the cronjob parameter failedJobsHistoryLimit - failedJobsHistoryLimit: 1 - ## @param backup.cronjob.successfulJobsHistoryLimit Set the cronjob parameter successfulJobsHistoryLimit - successfulJobsHistoryLimit: 3 - ## @param backup.cronjob.startingDeadlineSeconds Set the cronjob parameter startingDeadlineSeconds - startingDeadlineSeconds: "" - ## @param backup.cronjob.ttlSecondsAfterFinished Set the cronjob parameter ttlSecondsAfterFinished - ttlSecondsAfterFinished: "" - ## @param backup.cronjob.restartPolicy Set the cronjob parameter restartPolicy - restartPolicy: OnFailure - ## @param backup.cronjob.podSecurityContext.enabled Enable PodSecurityContext for CronJob/Backup - ## @param backup.cronjob.podSecurityContext.fsGroup Group ID for the CronJob - podSecurityContext: - enabled: true - fsGroup: 1001 - ## backup container's Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param backup.cronjob.containerSecurityContext.enabled Enable container security context - ## @param backup.cronjob.containerSecurityContext.runAsUser User ID for the backup container - ## @param backup.cronjob.containerSecurityContext.runAsGroup Group ID for the backup container - ## @param backup.cronjob.containerSecurityContext.runAsNonRoot Set backup container's Security Context runAsNonRoot - ## @param backup.cronjob.containerSecurityContext.readOnlyRootFilesystem Is the container itself readonly - ## @param backup.cronjob.containerSecurityContext.allowPrivilegeEscalation Is it possible to escalate backup pod(s) privileges - ## @param backup.cronjob.containerSecurityContext.seccompProfile.type Set backup container's Security Context seccompProfile type - ## @param backup.cronjob.containerSecurityContext.capabilities.drop Set backup container's Security Context capabilities to drop - containerSecurityContext: - enabled: true - runAsUser: 1001 - runAsGroup: 0 - runAsNonRoot: true - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - seccompProfile: - type: RuntimeDefault - capabilities: - drop: - - ALL - ## @param backup.cronjob.command Set backup container's command to run - command: - - /bin/sh - - -c - - "pg_dumpall --clean --if-exists --load-via-partition-root --quote-all-identifiers --no-password --file=${PGDUMP_DIR}/pg_dumpall-$(date '+%Y-%m-%d-%H-%M').pgdump" - - ## @param backup.cronjob.labels Set the cronjob labels - labels: {} - ## @param backup.cronjob.annotations Set the cronjob annotations - annotations: {} - ## @param backup.cronjob.nodeSelector Node labels for PostgreSQL backup CronJob pod assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - storage: - ## @param backup.cronjob.storage.existingClaim Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`) - ## If defined, PVC must be created manually before volume will be bound - ## - existingClaim: "" - ## @param backup.cronjob.storage.resourcePolicy Setting it to "keep" to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted - ## - resourcePolicy: "" - ## @param backup.cronjob.storage.storageClass PVC Storage Class for the backup data volume - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. - ## - storageClass: "" - ## @param backup.cronjob.storage.accessModes PV Access Mode - ## - accessModes: - - ReadWriteOnce - ## @param backup.cronjob.storage.size PVC Storage Request for the backup data volume - ## - size: 8Gi - ## @param backup.cronjob.storage.annotations PVC annotations - ## - annotations: {} - ## @param backup.cronjob.storage.mountPath Path to mount the volume at - ## - mountPath: /backup/pgdump - ## @param backup.cronjob.storage.subPath Subdirectory of the volume to mount at - ## and one PV for multiple services. - ## - subPath: "" - ## Fine tuning for volumeClaimTemplates - ## - volumeClaimTemplates: - ## @param backup.cronjob.storage.volumeClaimTemplates.selector A label query over volumes to consider for binding (e.g. when using local volumes) - ## A label query over volumes to consider for binding (e.g. when using local volumes) - ## See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#labelselector-v1-meta for more details - ## - selector: {} - - ## @section NetworkPolicy parameters - ## - - ## Add networkpolicies - ## - networkPolicy: - ## @param networkPolicy.enabled Enable network policies - ## - enabled: false - ## @param networkPolicy.metrics.enabled Enable network policies for metrics (prometheus) - ## @param networkPolicy.metrics.namespaceSelector [object] Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace. - ## @param networkPolicy.metrics.podSelector [object] Monitoring pod selector labels. These labels will be used to identify the Prometheus pods. - ## - metrics: - enabled: false - ## e.g: - ## namespaceSelector: - ## label: monitoring - ## - namespaceSelector: {} - ## e.g: - ## podSelector: - ## label: monitoring - ## - podSelector: {} - ## Ingress Rules - ## - ingressRules: - ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin. - ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s). - ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s). - ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules Custom network policy for the PostgreSQL primary node. - ## - primaryAccessOnlyFrom: - enabled: false - ## e.g: - ## namespaceSelector: - ## label: ingress - ## - namespaceSelector: {} - ## e.g: - ## podSelector: - ## label: access - ## - podSelector: {} - ## custom ingress rules - ## e.g: - ## customRules: - ## - from: - ## - namespaceSelector: - ## matchLabels: - ## label: example - ## - customRules: [] - ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin. - ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s). - ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s). - ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules Custom network policy for the PostgreSQL read-only nodes. - ## - readReplicasAccessOnlyFrom: - enabled: false - ## e.g: - ## namespaceSelector: - ## label: ingress - ## - namespaceSelector: {} - ## e.g: - ## podSelector: - ## label: access - ## - podSelector: {} - ## custom ingress rules - ## e.g: - ## CustomRules: - ## - from: - ## - namespaceSelector: - ## matchLabels: - ## label: example - ## - customRules: [] - ## @param networkPolicy.egressRules.denyConnectionsToExternal Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53). - ## @param networkPolicy.egressRules.customRules Custom network policy rule - ## - egressRules: - # Deny connections to external. This is not compatible with an external database. - denyConnectionsToExternal: false - ## Additional custom egress rules - ## e.g: - ## customRules: - ## - to: - ## - namespaceSelector: - ## matchLabels: - ## label: example - ## - customRules: [] - - ## @section Volume Permissions parameters - ## - - ## Init containers parameters: - ## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node - ## - volumePermissions: - ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume - ## - enabled: false - ## @param volumePermissions.image.registry Init container volume-permissions image registry - ## @param volumePermissions.image.repository Init container volume-permissions image repository - ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) - ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy - ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets - ## - image: - registry: docker.io - repository: bitnami/os-shell - tag: 11-debian-11-r77 - digest: "" - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Init container resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param volumePermissions.resources.limits Init container volume-permissions resource limits - ## @param volumePermissions.resources.requests Init container volume-permissions resource requests - ## - resources: - limits: {} - requests: {} - ## Init container' Security Context - ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser - ## and not the below volumePermissions.containerSecurityContext.runAsUser - ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container - ## @param volumePermissions.containerSecurityContext.runAsGroup Group ID for the init container - ## @param volumePermissions.containerSecurityContext.runAsNonRoot runAsNonRoot for the init container - ## @param volumePermissions.containerSecurityContext.seccompProfile.type seccompProfile.type for the init container - ## - containerSecurityContext: - runAsUser: 0 - runAsGroup: 0 - runAsNonRoot: false - seccompProfile: - type: RuntimeDefault - ## @section Other Parameters - ## - - ## @param serviceBindings.enabled Create secret for service binding (Experimental) - ## Ref: https://servicebinding.io/service-provider/ - ## - serviceBindings: - enabled: false - - ## Service account for PostgreSQL to use. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - ## @param serviceAccount.create Enable creation of ServiceAccount for PostgreSQL pod - ## - create: false - ## @param serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created - ## Can be set to false if pods using this serviceAccount do not need to use K8s API - ## - automountServiceAccountToken: true - ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount - ## - annotations: {} - ## Creates role for ServiceAccount - ## @param rbac.create Create Role and RoleBinding (required for PSP to work) - ## - rbac: - create: false - ## @param rbac.rules Custom RBAC rules to set - ## e.g: - ## rules: - ## - apiGroups: - ## - "" - ## resources: - ## - pods - ## verbs: - ## - get - ## - list - ## - rules: [] - ## Pod Security Policy - ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later - ## - psp: - create: false - - ## @section Metrics Parameters - ## - - metrics: - ## @param metrics.enabled Start a prometheus exporter - ## - enabled: false - ## @param metrics.image.registry PostgreSQL Prometheus Exporter image registry - ## @param metrics.image.repository PostgreSQL Prometheus Exporter image repository - ## @param metrics.image.tag PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) - ## @param metrics.image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param metrics.image.pullPolicy PostgreSQL Prometheus Exporter image pull policy - ## @param metrics.image.pullSecrets Specify image pull secrets - ## - image: - registry: docker.io - repository: bitnami/postgres-exporter - tag: 0.14.0-debian-11-r5 - digest: "" - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## @param metrics.customMetrics Define additional custom metrics - ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file - ## customMetrics: - ## pg_database: - ## query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" - ## metrics: - ## - name: - ## usage: "LABEL" - ## description: "Name of the database" - ## - size_bytes: - ## usage: "GAUGE" - ## description: "Size of the database in bytes" - ## - customMetrics: {} - ## @param metrics.extraEnvVars Extra environment variables to add to PostgreSQL Prometheus exporter - ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables - ## For example: - ## extraEnvVars: - ## - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS - ## value: "true" - ## - extraEnvVars: [] - ## PostgreSQL Prometheus exporter containers' Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param metrics.containerSecurityContext.enabled Enable PostgreSQL Prometheus exporter containers' Security Context - ## @param metrics.containerSecurityContext.runAsUser Set PostgreSQL Prometheus exporter containers' Security Context runAsUser - ## @param metrics.containerSecurityContext.runAsGroup Set PostgreSQL Prometheus exporter containers' Security Context runAsGroup - ## @param metrics.containerSecurityContext.runAsNonRoot Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot - ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set PostgreSQL Prometheus exporter containers' Security Context allowPrivilegeEscalation - ## @param metrics.containerSecurityContext.seccompProfile.type Set PostgreSQL Prometheus exporter containers' Security Context seccompProfile.type - ## @param metrics.containerSecurityContext.capabilities.drop Set PostgreSQL Prometheus exporter containers' Security Context capabilities.drop - ## - containerSecurityContext: - enabled: true - runAsUser: 1001 - runAsGroup: 0 - runAsNonRoot: true - allowPrivilegeEscalation: false - seccompProfile: - type: RuntimeDefault - capabilities: - drop: - - ALL - ## Configure extra options for PostgreSQL Prometheus exporter containers' liveness, readiness and startup probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes - ## @param metrics.livenessProbe.enabled Enable livenessProbe on PostgreSQL Prometheus exporter containers - ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param metrics.readinessProbe.enabled Enable readinessProbe on PostgreSQL Prometheus exporter containers - ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param metrics.startupProbe.enabled Enable startupProbe on PostgreSQL Prometheus exporter containers - ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe - ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 15 - successThreshold: 1 - ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param metrics.containerPorts.metrics PostgreSQL Prometheus exporter metrics container port - ## - containerPorts: - metrics: 9187 - ## PostgreSQL Prometheus exporter resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param metrics.resources.limits The resources limits for the PostgreSQL Prometheus exporter container - ## @param metrics.resources.requests The requested resources for the PostgreSQL Prometheus exporter container - ## - resources: - limits: {} - requests: {} - ## Service configuration - ## - service: - ## @param metrics.service.ports.metrics PostgreSQL Prometheus Exporter service port - ## - ports: - metrics: 9187 - ## @param metrics.service.clusterIP Static clusterIP or None for headless services - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address - ## - clusterIP: "" - ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin - ## Values: ClientIP or None - ## ref: https://kubernetes.io/docs/user-guide/services/ - ## - sessionAffinity: None - ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint - ## - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" - ## Prometheus Operator ServiceMonitor configuration - ## - serviceMonitor: - ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator - ## - enabled: false - ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) - ## - namespace: "" - ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - interval: "" - ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - scrapeTimeout: "" - ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus - ## - labels: {} - ## @param metrics.serviceMonitor.selector Prometheus instance selector labels - ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration - ## - selector: {} - ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping - ## - relabelings: [] - ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion - ## - metricRelabelings: [] - ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint - ## - honorLabels: false - ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. - ## - jobLabel: "" - ## Custom PrometheusRule to be defined - ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart - ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions - ## - prometheusRule: - ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator - ## - enabled: false - ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) - ## - namespace: "" - ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus - ## - labels: {} - ## @param metrics.prometheusRule.rules PrometheusRule definitions - ## Make sure to constraint the rules to the current postgresql service. - ## rules: - ## - alert: HugeReplicationLag - ## expr: pg_replication_lag{service="{{ printf "%s-metrics" (include "common.names.fullname" .) }}"} / 3600 > 1 - ## for: 1m - ## labels: - ## severity: critical - ## annotations: - ## description: replication for {{ include "common.names.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). - ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). - ## - rules: [] diff --git a/postgresql/sealed-secret.yaml b/postgresql/sealed-secret.yaml deleted file mode 100644 index 3c5175b..0000000 --- a/postgresql/sealed-secret.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: postgresql-default-credentials - namespace: postgresql-system -spec: - encryptedData: - password: AgAN2drH19WiBU8KYZyN8N0T1o8Sh9ti1M5kroU+xDpDD+pOLlZZEw63qcUeeK2paiTm7E3hVO/EnCNyGUBvrDKQzoMNsImbsTMJMVHldiTZedZV1IQxQXIYELgUtk93I2WoOiNvpfL+ro2vomjqPExlVeI1tuqPVdL1+xZYfKfFk+pPL3kLpRuO5HDmwcjy12yYd0E3RqU0g58O7UCCCdQRMOtU8/Z/MPM0I3ZGxG5DQCN3FEra8g1wacvsOplJVGYFzIBS2T7tPyW6I5zW9mFPDozMwqINukuoDC4uSUGSP2Ka2al3VyZiZnXySV3LJ38yj9TpZpTCKY2pgDeMue1hl50xMoCGhBXGzN3lCx6M4/us42a+oss/dn8oXFLAbOVaI2B7bpWHSz8fykdpogpqGgsa23gCuR2V8crZ7xVuACXqNDyunoHLgph8McFDsBXWNcyTg7ocILIjVKFLL4LDhtFQgHZcZXiTe6kMJNdKMxnH/0z/A00JO7dhU2uub31Oa0LwiE/rWO31E39tDZj4o0bRez1jsneuvbMVvwYyyr0OmOfdznv9qvGXbFSgGYCNUuTAPzFRMU4NkIup4RL3a0s2Fg28X79JIaWGjpuXgfyUKiBqUe7f6FAKkiEFeqYCJoccVQpbiYOODjyV5+89tfopmJaj7HZ1t3HfrDeGk/Vj6w4C6e2avCl+qLWqz2nFyM526ymdfVaWV5B945pFTTFrlxh8lRH7Ej3qGJEz1Q== - postgres-password: AgAwdjZR9WDv74oup8dhkzKxYYrMOd40O9S09t8pQspuw/xiO/CaeNFkggWrVVbNaFI1nnQrd/3JFTu6/1mwinr13MqAKKmile/rXSSKnXo+f90PSEFlsanDpfMSuhZKGF5gDHp5HwWIGDl8P6uBC50/Z7u/+muPcdgnCgg7rVMo5EiipgZgQOJXuAxHN8a8w1HMQoVd4PD1SewTHfbwxKsZoBYgy+RL5vS4Gd8d4UbqkSFk8h+uZHdsJALrZ9PCsUDMInT06Ll1YgmcSigtFR0bM46kfbGr1tvXKmv2lYMBjn8gcOS3rRxKmhqT/HUxaepg6TDjoG/Hw0oNtxGHRmwwnzuDBtPtCJOCb3LEodIAXsz7U3hUrvTI0/BMsXBP/wZFB7mZ6mvy4tfz2SkHEIoqu+um2I6sC9OHPNFIQPSq59PX2t2G9RH4aTJVWKFcLv7fZ7+w+ot0D4300z0fRbKYCgUowKUtkd+H3eYmu1AzMpFkEUh+9NahHNeXCf+YsAHZb0vm4mz39UWTVuRzdwGFFG193goOFI6A2t9n2E18f/UQnhKewi9z552THTqsFO3VE/Kq2C9q+AA3BaGVCSlNw7eRXRr+lSaNiuTGgEZeQZubBLriC7rTzrfJjP2ik2vayCXb1dvGPrSGst4oo9IYnb548uFjgpd7ZQz0WGCufBhM7GNv7XaC+ZQEsCHPwJLaiD6irl8d1IKn+7g9vnOPUYRhcTiPtuEuWFneQ9tNMw== - username: AgAn4TsaveRieevxEf80zFJeKLkQoLMf+o5upoeq5YdXRnyV00xk1yL0QBYdvNdt3FnEmZR5R0oiJKimZmQqHOHH9++G1cqACsmNmEbjU+BBUNwVOhXZAkU1xHvpAACNKaqiqlhR5uGYx+rP6GbsW11UrwTu4oeYBqyGXtO5i9FaFaIbK32UPJ4e5lsml0l01reWWwZI6DH9L2O6E7wif5Pxw6wEcQphfk2YlTddXGRZA0dI1xFSSuvjxRRaASpfJqU/ztqdzF/MVCKnheZneuVYyw7w7Suv87RIx4ddrJKqDz0Fla9LWAC1xJMqGxWPE0Bgd9jWlRK4Cy6DogZ5ZoJv+pZm8zXy3N60SSM2ZM7TaP+FOz8FhgoDYXSn3lKPlLY2mlOBFJ8PYL1dcrGqDkHKiklAUelbID1Hiw7CdcavohFTi9CsaZXWpyeAEiMmiMpXY2+nnwppqv0+Oc82DvU4305q8FzFmi9N2olR4vnvzjBclHsXsSesJ5rKjNHk3aApTT2MIbOexDXTyIXIkrLRFK1/KbUx86qjlPOycZ2YlqfvbakEajFVV3TyR5Bb7ZHf5yQOOyzkmJj6Z3xaNmfvskqHyhImQHKRHEKjyvoKqWMrAQZEUm4LwLTdSRMo3o9p00Xb2YFFCOOhJF9hoxf65TvXkWNJyYMNjBW1a1H8gnrjT0zEyV4Veh75YRDmzgtl2CR/2w== - template: - metadata: - creationTimestamp: null - name: postgresql-default-credentials - namespace: postgresql-system diff --git a/prometheus/prometheous-operator-helmrelease.yaml b/prometheus/prometheous-operator-helmrelease.yaml deleted file mode 100644 index cb0f33f..0000000 --- a/prometheus/prometheous-operator-helmrelease.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: kube-prometheus-stack - namespace: monitoring -spec: - interval: 1m - chart: - spec: - chart: kube-prometheus-stack - version: "58.7.2" - sourceRef: - kind: HelmRepository - name: prometheus-community - namespace: flux-system - values: {} diff --git a/redis/helmrelease-redis.yaml b/redis/helmrelease-redis.yaml deleted file mode 100644 index 66019e5..0000000 --- a/redis/helmrelease-redis.yaml +++ /dev/null @@ -1,1916 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: redis - namespace: redis-system -spec: - chart: - spec: - chart: redis - sourceRef: - kind: HelmRepository - name: bitnami - namespace: flux-system - - interval: 15m0s - timeout: 5m - releaseName: redis - values: - # Copyright VMware, Inc. - # SPDX-License-Identifier: APACHE-2.0 - - ## @section Global parameters - ## Global Docker image parameters - ## Please, note that this will override the image parameters, including dependencies, configured to use the global value - ## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass - ## - - ## @param global.imageRegistry Global Docker image registry - ## @param global.imagePullSecrets Global Docker registry secret names as an array - ## @param global.storageClass Global StorageClass for Persistent Volume(s) - ## @param global.redis.password Global Redis® password (overrides `auth.password`) - ## - global: - imageRegistry: "" - ## E.g. - ## imagePullSecrets: - ## - myRegistryKeySecretName - ## - imagePullSecrets: [] - storageClass: "longhorn" - redis: - password: "" - - ## @section Common parameters - ## - - ## @param kubeVersion Override Kubernetes version - ## - kubeVersion: "" - ## @param nameOverride String to partially override common.names.fullname - ## - nameOverride: "" - ## @param fullnameOverride String to fully override common.names.fullname - ## - fullnameOverride: "" - ## @param commonLabels Labels to add to all deployed objects - ## - commonLabels: {} - ## @param commonAnnotations Annotations to add to all deployed objects - ## - commonAnnotations: {} - ## @param secretAnnotations Annotations to add to secret - ## - secretAnnotations: {} - ## @param clusterDomain Kubernetes cluster domain name - ## - clusterDomain: cluster.local - ## @param extraDeploy Array of extra objects to deploy with the release - ## - extraDeploy: [] - ## @param useHostnames Use hostnames internally when announcing replication. If false, the hostname will be resolved to an IP address - ## - useHostnames: true - ## @param nameResolutionThreshold Failure threshold for internal hostnames resolution - ## - nameResolutionThreshold: 5 - ## @param nameResolutionTimeout Timeout seconds between probes for internal hostnames resolution - ## - nameResolutionTimeout: 5 - - ## Enable diagnostic mode in the deployment - ## - diagnosticMode: - ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) - ## - enabled: false - ## @param diagnosticMode.command Command to override all containers in the deployment - ## - command: - - sleep - ## @param diagnosticMode.args Args to override all containers in the deployment - ## - args: - - infinity - - ## @section Redis® Image parameters - ## - - ## Bitnami Redis® image - ## ref: https://hub.docker.com/r/bitnami/redis/tags/ - ## @param image.registry Redis® image registry - ## @param image.repository Redis® image repository - ## @param image.tag Redis® image tag (immutable tags are recommended) - ## @param image.digest Redis® image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param image.pullPolicy Redis® image pull policy - ## @param image.pullSecrets Redis® image pull secrets - ## @param image.debug Enable image debug mode - ## - image: - registry: docker.io - repository: bitnami/redis - tag: 7.2.4 - digest: "" - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Enable debug mode - ## - debug: false - - ## @section Redis® common configuration parameters - ## https://github.com/bitnami/containers/tree/main/bitnami/redis#configuration - ## - - ## @param architecture Redis® architecture. Allowed values: `standalone` or `replication` - ## - architecture: standalone - ## Redis® Authentication parameters - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/redis#setting-the-server-password-on-first-run - ## - auth: - ## @param auth.enabled Enable password authentication - ## - enabled: true - ## @param auth.sentinel Enable password authentication on sentinels too - ## - sentinel: true - ## @param auth.password Redis® password - ## Defaults to a random 10-character alphanumeric string if not set - ## - #password: "" - ## @param auth.existingSecret The name of an existing secret with Redis® credentials - ## NOTE: When it's set, the previous `auth.password` parameter is ignored - ## - existingSecret: "redis-key" - ## @param auth.existingSecretPasswordKey Password key to be retrieved from existing secret - ## NOTE: ignored unless `auth.existingSecret` parameter is set - ## - existingSecretPasswordKey: "password" - ## @param auth.usePasswordFiles Mount credentials as files instead of using an environment variable - ## - usePasswordFiles: false - - ## @param commonConfiguration [string] Common configuration to be added into the ConfigMap - ## ref: https://redis.io/topics/config - ## - commonConfiguration: |- - # Enable AOF https://redis.io/topics/persistence#append-only-file - appendonly yes - # Disable RDB persistence, AOF persistence already enabled. - save "" - ## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for Redis® nodes - ## - existingConfigmap: "" - - ## @section Redis® master configuration parameters - ## - - master: - ## @param master.count Number of Redis® master instances to deploy (experimental, requires additional configuration) - ## - count: 1 - ## @param master.configuration Configuration for Redis® master nodes - ## ref: https://redis.io/topics/config - ## - configuration: "" - ## @param master.disableCommands Array with Redis® commands to disable on master nodes - ## Commands will be completely disabled by renaming each to an empty string. - ## ref: https://redis.io/topics/security#disabling-of-specific-commands - ## - disableCommands: - - FLUSHDB - ## @param master.command Override default container command (useful when using custom images) - ## - command: [] - ## @param master.args Override default container args (useful when using custom images) - ## - args: [] - ## @param master.enableServiceLinks Whether information about services should be injected into pod's environment variable - ## - enableServiceLinks: true - ## @param master.preExecCmds Additional commands to run prior to starting Redis® master - ## - preExecCmds: [] - ## @param master.extraFlags Array with additional command line flags for Redis® master - ## e.g: - ## extraFlags: - ## - "--maxmemory-policy volatile-ttl" - ## - "--repl-backlog-size 1024mb" - ## - extraFlags: [] - ## @param master.extraEnvVars Array with extra environment variables to add to Redis® master nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param master.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® master nodes - ## - extraEnvVarsCM: "" - ## @param master.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® master nodes - ## - extraEnvVarsSecret: "" - ## @param master.containerPorts.redis Container port to open on Redis® master nodes - ## - containerPorts: - redis: 6379 - ## Configure extra options for Redis® containers' liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes - ## @param master.startupProbe.enabled Enable startupProbe on Redis® master nodes - ## @param master.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param master.startupProbe.periodSeconds Period seconds for startupProbe - ## @param master.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param master.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param master.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - ## @param master.livenessProbe.enabled Enable livenessProbe on Redis® master nodes - ## @param master.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param master.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param master.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param master.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param master.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - ## @param master.readinessProbe.enabled Enable readinessProbe on Redis® master nodes - ## @param master.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param master.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param master.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param master.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param master.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 5 - ## @param master.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param master.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param master.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## Redis® master resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param master.resources.limits The resources limits for the Redis® master containers - ## @param master.resources.requests The requested resources for the Redis® master containers - ## - resources: - limits: {} - requests: {} - ## Configure Pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param master.podSecurityContext.enabled Enabled Redis® master pods' Security Context - ## @param master.podSecurityContext.fsGroup Set Redis® master pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroup: 1001 - ## Configure Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param master.containerSecurityContext.enabled Enabled Redis® master containers' Security Context - ## @param master.containerSecurityContext.runAsUser Set Redis® master containers' Security Context runAsUser - ## @param master.containerSecurityContext.runAsGroup Set Redis® master containers' Security Context runAsGroup - ## @param master.containerSecurityContext.runAsNonRoot Set Redis® master containers' Security Context runAsNonRoot - ## @param master.containerSecurityContext.allowPrivilegeEscalation Is it possible to escalate Redis® pod(s) privileges - ## @param master.containerSecurityContext.seccompProfile.type Set Redis® master containers' Security Context seccompProfile - ## @param master.containerSecurityContext.capabilities.drop Set Redis® master containers' Security Context capabilities to drop - ## - containerSecurityContext: - enabled: true - runAsUser: 1001 - runAsGroup: 0 - runAsNonRoot: true - allowPrivilegeEscalation: false - seccompProfile: - type: RuntimeDefault - capabilities: - drop: - - ALL - ## @param master.kind Use either Deployment or StatefulSet (default) - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ - ## - kind: StatefulSet - ## @param master.schedulerName Alternate scheduler for Redis® master pods - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param master.updateStrategy.type Redis® master statefulset strategy type - ## @skip master.updateStrategy.rollingUpdate - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - ## StrategyType - ## Can be set to RollingUpdate, OnDelete (statefulset), Recreate (deployment) - ## - type: RollingUpdate - ## @param master.minReadySeconds How many seconds a pod needs to be ready before killing the next, during update - ## - minReadySeconds: 0 - ## @param master.priorityClassName Redis® master pods' priorityClassName - ## - priorityClassName: "" - ## @param master.hostAliases Redis® master pods host aliases - ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ - ## - hostAliases: [] - ## @param master.podLabels Extra labels for Redis® master pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param master.podAnnotations Annotations for Redis® master pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param master.shareProcessNamespace Share a single process namespace between all of the containers in Redis® master pods - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ - ## - shareProcessNamespace: false - ## @param master.podAffinityPreset Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param master.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node master.affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param master.nodeAffinityPreset.type Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param master.nodeAffinityPreset.key Node label key to match. Ignored if `master.affinity` is set - ## - key: "" - ## @param master.nodeAffinityPreset.values Node label values to match. Ignored if `master.affinity` is set - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param master.affinity Affinity for Redis® master pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## NOTE: `master.podAffinityPreset`, `master.podAntiAffinityPreset`, and `master.nodeAffinityPreset` will be ignored when it's set - ## - affinity: {} - ## @param master.nodeSelector Node labels for Redis® master pods assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - ## @param master.tolerations Tolerations for Redis® master pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param master.topologySpreadConstraints Spread Constraints for Redis® master pod assignment - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## E.g. - ## topologySpreadConstraints: - ## - maxSkew: 1 - ## topologyKey: node - ## whenUnsatisfiable: DoNotSchedule - ## - topologySpreadConstraints: [] - ## @param master.dnsPolicy DNS Policy for Redis® master pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ - ## E.g. - ## dnsPolicy: ClusterFirst - ## - dnsPolicy: "" - ## @param master.dnsConfig DNS Configuration for Redis® master pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ - ## E.g. - ## dnsConfig: - ## options: - ## - name: ndots - ## value: "4" - ## - name: single-request-reopen - ## - dnsConfig: {} - ## @param master.lifecycleHooks for the Redis® master container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - ## @param master.extraVolumes Optionally specify extra list of additional volumes for the Redis® master pod(s) - ## - extraVolumes: [] - ## @param master.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® master container(s) - ## - extraVolumeMounts: [] - ## @param master.sidecars Add additional sidecar containers to the Redis® master pod(s) - ## e.g: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param master.initContainers Add additional init containers to the Redis® master pod(s) - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - ## e.g: - ## initContainers: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## command: ['sh', '-c', 'echo "hello world"'] - ## - initContainers: [] - ## Persistence parameters - ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - ## @param master.persistence.enabled Enable persistence on Redis® master nodes using Persistent Volume Claims - ## - enabled: true - ## @param master.persistence.medium Provide a medium for `emptyDir` volumes. - ## - medium: "" - ## @param master.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. - ## - sizeLimit: "" - ## @param master.persistence.path The path the volume will be mounted at on Redis® master containers - ## NOTE: Useful when using different Redis® images - ## - path: /data - ## @param master.persistence.subPath The subdirectory of the volume to mount on Redis® master containers - ## NOTE: Useful in dev environments - ## - subPath: "" - ## @param master.persistence.subPathExpr Used to construct the subPath subdirectory of the volume to mount on Redis® master containers - ## - subPathExpr: "" - ## @param master.persistence.storageClass Persistent Volume storage class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner - ## - storageClass: "longhorn" - ## @param master.persistence.accessModes Persistent Volume access modes - ## - accessModes: - - ReadWriteOnce - ## @param master.persistence.size Persistent Volume size - ## - size: 8Gi - ## @param master.persistence.annotations Additional custom annotations for the PVC - ## - annotations: {} - ## @param master.persistence.labels Additional custom labels for the PVC - ## - labels: {} - ## @param master.persistence.selector Additional labels to match for the PVC - ## e.g: - ## selector: - ## matchLabels: - ## app: my-app - ## - selector: {} - ## @param master.persistence.dataSource Custom PVC data source - ## - dataSource: {} - ## @param master.persistence.existingClaim Use a existing PVC which must be created manually before bound - ## NOTE: requires master.persistence.enabled: true - ## - existingClaim: "" - ## persistentVolumeClaimRetentionPolicy - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention - ## @param master.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet - ## @param master.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced - ## @param master.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted - ## - persistentVolumeClaimRetentionPolicy: - enabled: false - whenScaled: Retain - whenDeleted: Retain - ## Redis® master service parameters - ## - service: - ## @param master.service.type Redis® master service type - ## - type: ClusterIP - ## @param master.service.ports.redis Redis® master service port - ## - ports: - redis: 6379 - ## @param master.service.nodePorts.redis Node port for Redis® master - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - redis: "" - ## @param master.service.externalTrafficPolicy Redis® master service external traffic policy - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param master.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) - ## - extraPorts: [] - ## @param master.service.internalTrafficPolicy Redis® master service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) - ## ref: https://kubernetes.io/docs/concepts/services-networking/service-traffic-policy/ - ## - internalTrafficPolicy: Cluster - ## @param master.service.clusterIP Redis® master service Cluster IP - ## - clusterIP: "" - ## @param master.service.loadBalancerIP Redis® master service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - loadBalancerIP: "" - ## @param master.service.loadBalancerSourceRanges Redis® master service Load Balancer sources - ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g. - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param master.service.externalIPs Redis® master service External IPs - ## https://kubernetes.io/docs/concepts/services-networking/service/#external-ips - ## e.g. - ## externalIPs: - ## - 10.10.10.1 - ## - 201.22.30.1 - ## - externalIPs: [] - ## @param master.service.annotations Additional custom annotations for Redis® master service - ## - annotations: {} - ## @param master.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param master.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - ## @param master.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-master pods - ## - terminationGracePeriodSeconds: 30 - ## ServiceAccount configuration - ## - - serviceAccount: - ## @param master.serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: false - ## @param master.serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param master.serviceAccount.automountServiceAccountToken Whether to auto mount the service account token - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server - ## - automountServiceAccountToken: true - ## @param master.serviceAccount.annotations Additional custom annotations for the ServiceAccount - ## - annotations: {} - - ## @section Redis® replicas configuration parameters - ## - - replica: - ## @param replica.replicaCount Number of Redis® replicas to deploy - ## - replicaCount: 1 - ## @param replica.configuration Configuration for Redis® replicas nodes - ## ref: https://redis.io/topics/config - ## - configuration: "" - ## @param replica.disableCommands Array with Redis® commands to disable on replicas nodes - ## Commands will be completely disabled by renaming each to an empty string. - ## ref: https://redis.io/topics/security#disabling-of-specific-commands - ## - disableCommands: - - FLUSHDB - - FLUSHALL - ## @param replica.command Override default container command (useful when using custom images) - ## - command: [] - ## @param replica.args Override default container args (useful when using custom images) - ## - args: [] - ## @param replica.enableServiceLinks Whether information about services should be injected into pod's environment variable - ## - enableServiceLinks: true - ## @param replica.preExecCmds Additional commands to run prior to starting Redis® replicas - ## - preExecCmds: [] - ## @param replica.extraFlags Array with additional command line flags for Redis® replicas - ## e.g: - ## extraFlags: - ## - "--maxmemory-policy volatile-ttl" - ## - "--repl-backlog-size 1024mb" - ## - extraFlags: [] - ## @param replica.extraEnvVars Array with extra environment variables to add to Redis® replicas nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param replica.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® replicas nodes - ## - extraEnvVarsCM: "" - ## @param replica.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® replicas nodes - ## - extraEnvVarsSecret: "" - ## @param replica.externalMaster.enabled Use external master for bootstrapping - ## @param replica.externalMaster.host External master host to bootstrap from - ## @param replica.externalMaster.port Port for Redis service external master host - ## - externalMaster: - enabled: false - host: "" - port: 6379 - ## @param replica.containerPorts.redis Container port to open on Redis® replicas nodes - ## - containerPorts: - redis: 6379 - ## Configure extra options for Redis® containers' liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes - ## @param replica.startupProbe.enabled Enable startupProbe on Redis® replicas nodes - ## @param replica.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param replica.startupProbe.periodSeconds Period seconds for startupProbe - ## @param replica.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param replica.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param replica.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 22 - ## @param replica.livenessProbe.enabled Enable livenessProbe on Redis® replicas nodes - ## @param replica.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param replica.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param replica.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param replica.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param replica.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - ## @param replica.readinessProbe.enabled Enable readinessProbe on Redis® replicas nodes - ## @param replica.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param replica.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param replica.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param replica.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param replica.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 5 - ## @param replica.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param replica.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param replica.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## Redis® replicas resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param replica.resources.limits The resources limits for the Redis® replicas containers - ## @param replica.resources.requests The requested resources for the Redis® replicas containers - ## - resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: {} - # cpu: 250m - # memory: 256Mi - requests: {} - # cpu: 250m - # memory: 256Mi - ## Configure Pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param replica.podSecurityContext.enabled Enabled Redis® replicas pods' Security Context - ## @param replica.podSecurityContext.fsGroup Set Redis® replicas pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroup: 1001 - ## Configure Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param replica.containerSecurityContext.enabled Enabled Redis® replicas containers' Security Context - ## @param replica.containerSecurityContext.runAsUser Set Redis® replicas containers' Security Context runAsUser - ## @param replica.containerSecurityContext.runAsGroup Set Redis® replicas containers' Security Context runAsGroup - ## @param replica.containerSecurityContext.runAsNonRoot Set Redis® replicas containers' Security Context runAsNonRoot - ## @param replica.containerSecurityContext.allowPrivilegeEscalation Set Redis® replicas pod's Security Context allowPrivilegeEscalation - ## @param replica.containerSecurityContext.seccompProfile.type Set Redis® replicas containers' Security Context seccompProfile - ## @param replica.containerSecurityContext.capabilities.drop Set Redis® replicas containers' Security Context capabilities to drop - ## - containerSecurityContext: - enabled: true - runAsUser: 1001 - runAsGroup: 0 - runAsNonRoot: true - allowPrivilegeEscalation: false - seccompProfile: - type: RuntimeDefault - capabilities: - drop: - - ALL - ## @param replica.schedulerName Alternate scheduler for Redis® replicas pods - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param replica.updateStrategy.type Redis® replicas statefulset strategy type - ## @skip replica.updateStrategy.rollingUpdate - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - ## StrategyType - ## Can be set to RollingUpdate, OnDelete (statefulset), Recreate (deployment) - ## - type: RollingUpdate - ## @param replica.minReadySeconds How many seconds a pod needs to be ready before killing the next, during update - ## - minReadySeconds: 3 - ## @param replica.priorityClassName Redis® replicas pods' priorityClassName - ## - priorityClassName: "" - ## @param replica.podManagementPolicy podManagementPolicy to manage scaling operation of %%MAIN_CONTAINER_NAME%% pods - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies - ## - podManagementPolicy: "" - ## @param replica.hostAliases Redis® replicas pods host aliases - ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ - ## - hostAliases: [] - ## @param replica.podLabels Extra labels for Redis® replicas pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param replica.podAnnotations Annotations for Redis® replicas pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param replica.shareProcessNamespace Share a single process namespace between all of the containers in Redis® replicas pods - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ - ## - shareProcessNamespace: false - ## @param replica.podAffinityPreset Pod affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param replica.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param replica.nodeAffinityPreset.type Node affinity preset type. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param replica.nodeAffinityPreset.key Node label key to match. Ignored if `replica.affinity` is set - ## - key: "" - ## @param replica.nodeAffinityPreset.values Node label values to match. Ignored if `replica.affinity` is set - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param replica.affinity Affinity for Redis® replicas pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## NOTE: `replica.podAffinityPreset`, `replica.podAntiAffinityPreset`, and `replica.nodeAffinityPreset` will be ignored when it's set - ## - affinity: {} - ## @param replica.nodeSelector Node labels for Redis® replicas pods assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - ## @param replica.tolerations Tolerations for Redis® replicas pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param replica.topologySpreadConstraints Spread Constraints for Redis® replicas pod assignment - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## E.g. - ## topologySpreadConstraints: - ## - maxSkew: 1 - ## topologyKey: node - ## whenUnsatisfiable: DoNotSchedule - ## - topologySpreadConstraints: [] - ## @param replica.dnsPolicy DNS Policy for Redis® replica pods - ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ - ## E.g. - ## dnsPolicy: ClusterFirst - ## - dnsPolicy: "" - ## @param replica.dnsConfig DNS Configuration for Redis® replica pods - ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ - ## E.g. - ## dnsConfig: - ## options: - ## - name: ndots - ## value: "4" - ## - name: single-request-reopen - ## - dnsConfig: {} - ## @param replica.lifecycleHooks for the Redis® replica container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - ## @param replica.extraVolumes Optionally specify extra list of additional volumes for the Redis® replicas pod(s) - ## - extraVolumes: [] - ## @param replica.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® replicas container(s) - ## - extraVolumeMounts: [] - ## @param replica.sidecars Add additional sidecar containers to the Redis® replicas pod(s) - ## e.g: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param replica.initContainers Add additional init containers to the Redis® replicas pod(s) - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - ## e.g: - ## initContainers: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## command: ['sh', '-c', 'echo "hello world"'] - ## - initContainers: [] - ## Persistence Parameters - ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - ## @param replica.persistence.enabled Enable persistence on Redis® replicas nodes using Persistent Volume Claims - ## - enabled: true - ## @param replica.persistence.medium Provide a medium for `emptyDir` volumes. - ## - medium: "" - ## @param replica.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. - ## - sizeLimit: "" - ## @param replica.persistence.path The path the volume will be mounted at on Redis® replicas containers - ## NOTE: Useful when using different Redis® images - ## - path: /data - ## @param replica.persistence.subPath The subdirectory of the volume to mount on Redis® replicas containers - ## NOTE: Useful in dev environments - ## - subPath: "" - ## @param replica.persistence.subPathExpr Used to construct the subPath subdirectory of the volume to mount on Redis® replicas containers - ## - subPathExpr: "" - ## @param replica.persistence.storageClass Persistent Volume storage class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner - ## - storageClass: "" - ## @param replica.persistence.accessModes Persistent Volume access modes - ## - accessModes: - - ReadWriteOnce - ## @param replica.persistence.size Persistent Volume size - ## - size: 8Gi - ## @param replica.persistence.annotations Additional custom annotations for the PVC - ## - annotations: {} - ## @param replica.persistence.labels Additional custom labels for the PVC - ## - labels: {} - ## @param replica.persistence.selector Additional labels to match for the PVC - ## e.g: - ## selector: - ## matchLabels: - ## app: my-app - ## - selector: {} - ## @param replica.persistence.dataSource Custom PVC data source - ## - dataSource: {} - ## @param replica.persistence.existingClaim Use a existing PVC which must be created manually before bound - ## NOTE: requires replica.persistence.enabled: true - ## - existingClaim: "" - ## persistentVolumeClaimRetentionPolicy - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention - ## @param replica.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet - ## @param replica.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced - ## @param replica.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted - ## - persistentVolumeClaimRetentionPolicy: - enabled: false - whenScaled: Retain - whenDeleted: Retain - ## Redis® replicas service parameters - ## - service: - ## @param replica.service.type Redis® replicas service type - ## - type: ClusterIP - ## @param replica.service.ports.redis Redis® replicas service port - ## - ports: - redis: 6379 - ## @param replica.service.nodePorts.redis Node port for Redis® replicas - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - redis: "" - ## @param replica.service.externalTrafficPolicy Redis® replicas service external traffic policy - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param replica.service.internalTrafficPolicy Redis® replicas service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) - ## ref: https://kubernetes.io/docs/concepts/services-networking/service-traffic-policy/ - ## - internalTrafficPolicy: Cluster - ## @param replica.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) - ## - extraPorts: [] - ## @param replica.service.clusterIP Redis® replicas service Cluster IP - ## - clusterIP: "" - ## @param replica.service.loadBalancerIP Redis® replicas service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - loadBalancerIP: "" - ## @param replica.service.loadBalancerSourceRanges Redis® replicas service Load Balancer sources - ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g. - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param replica.service.annotations Additional custom annotations for Redis® replicas service - ## - annotations: {} - ## @param replica.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param replica.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - ## @param replica.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-replicas pods - ## - terminationGracePeriodSeconds: 30 - ## Autoscaling configuration - ## - autoscaling: - - ## @param replica.autoscaling.enabled Enable replica autoscaling settings - ## - enabled: false - ## @param replica.autoscaling.minReplicas Minimum replicas for the pod autoscaling - ## - minReplicas: 1 - ## @param replica.autoscaling.maxReplicas Maximum replicas for the pod autoscaling - ## - maxReplicas: 11 - ## @param replica.autoscaling.targetCPU Percentage of CPU to consider when autoscaling - ## - targetCPU: "" - ## @param replica.autoscaling.targetMemory Percentage of Memory to consider when autoscaling - ## - targetMemory: "" - ## ServiceAccount configuration - ## - serviceAccount: - ## @param replica.serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: false - ## @param replica.serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param replica.serviceAccount.automountServiceAccountToken Whether to auto mount the service account token - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server - ## - automountServiceAccountToken: true - ## @param replica.serviceAccount.annotations Additional custom annotations for the ServiceAccount - ## - annotations: {} - ## @section Redis® Sentinel configuration parameters - ## - - sentinel: - ## @param sentinel.enabled Use Redis® Sentinel on Redis® pods. - ## IMPORTANT: this will disable the master and replicas services and - ## create a single Redis® service exposing both the Redis and Sentinel ports - ## - enabled: false - ## Bitnami Redis® Sentinel image version - ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ - ## @param sentinel.image.registry Redis® Sentinel image registry - ## @param sentinel.image.repository Redis® Sentinel image repository - ## @param sentinel.image.tag Redis® Sentinel image tag (immutable tags are recommended) - ## @param sentinel.image.digest Redis® Sentinel image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param sentinel.image.pullPolicy Redis® Sentinel image pull policy - ## @param sentinel.image.pullSecrets Redis® Sentinel image pull secrets - ## @param sentinel.image.debug Enable image debug mode - ## - image: - registry: docker.io - repository: bitnami/redis-sentinel - tag: 7.2.1-debian-11-r26 - digest: "" - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Enable debug mode - ## - debug: false - ## @param sentinel.annotations Additional custom annotations for Redis® Sentinel resource - ## - annotations: {} - ## @param sentinel.masterSet Master set name - ## - masterSet: mymaster - ## @param sentinel.quorum Sentinel Quorum - ## - quorum: 2 - ## @param sentinel.getMasterTimeout Amount of time to allow before get_sentinel_master_info() times out. - ## - getMasterTimeout: 99 - ## @param sentinel.automateClusterRecovery Automate cluster recovery in cases where the last replica is not considered a good replica and Sentinel won't automatically failover to it. - ## This also prevents any new replica from starting until the last remaining replica is elected as master to guarantee that it is the one to be elected by Sentinel, and not a newly started replica with no data. - ## NOTE: This feature requires a "downAfterMilliseconds" value less or equal to 2000. - ## - automateClusterRecovery: false - ## @param sentinel.redisShutdownWaitFailover Whether the Redis® master container waits for the failover at shutdown (in addition to the Redis® Sentinel container). - ## - redisShutdownWaitFailover: true - ## Sentinel timing restrictions - ## @param sentinel.downAfterMilliseconds Timeout for detecting a Redis® node is down - ## @param sentinel.failoverTimeout Timeout for performing a election failover - ## - downAfterMilliseconds: 60000 - failoverTimeout: 180000 - ## @param sentinel.parallelSyncs Number of replicas that can be reconfigured in parallel to use the new master after a failover - ## - parallelSyncs: 1 - ## @param sentinel.configuration Configuration for Redis® Sentinel nodes - ## ref: https://redis.io/topics/sentinel - ## - configuration: "" - ## @param sentinel.command Override default container command (useful when using custom images) - ## - command: [] - ## @param sentinel.args Override default container args (useful when using custom images) - ## - args: [] - ## @param sentinel.enableServiceLinks Whether information about services should be injected into pod's environment variable - ## - enableServiceLinks: true - ## @param sentinel.preExecCmds Additional commands to run prior to starting Redis® Sentinel - ## - preExecCmds: [] - ## @param sentinel.extraEnvVars Array with extra environment variables to add to Redis® Sentinel nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param sentinel.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® Sentinel nodes - ## - extraEnvVarsCM: "" - ## @param sentinel.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® Sentinel nodes - ## - extraEnvVarsSecret: "" - ## @param sentinel.externalMaster.enabled Use external master for bootstrapping - ## @param sentinel.externalMaster.host External master host to bootstrap from - ## @param sentinel.externalMaster.port Port for Redis service external master host - ## - externalMaster: - enabled: false - host: "" - port: 6379 - ## @param sentinel.containerPorts.sentinel Container port to open on Redis® Sentinel nodes - ## - containerPorts: - sentinel: 26379 - ## Configure extra options for Redis® containers' liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes - ## @param sentinel.startupProbe.enabled Enable startupProbe on Redis® Sentinel nodes - ## @param sentinel.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param sentinel.startupProbe.periodSeconds Period seconds for startupProbe - ## @param sentinel.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param sentinel.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param sentinel.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 22 - ## @param sentinel.livenessProbe.enabled Enable livenessProbe on Redis® Sentinel nodes - ## @param sentinel.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param sentinel.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param sentinel.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param sentinel.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param sentinel.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 6 - ## @param sentinel.readinessProbe.enabled Enable readinessProbe on Redis® Sentinel nodes - ## @param sentinel.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param sentinel.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param sentinel.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param sentinel.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param sentinel.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 6 - ## @param sentinel.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param sentinel.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param sentinel.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## Persistence parameters - ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - ## @param sentinel.persistence.enabled Enable persistence on Redis® sentinel nodes using Persistent Volume Claims (Experimental) - ## - enabled: false - ## @param sentinel.persistence.storageClass Persistent Volume storage class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner - ## - storageClass: "" - ## @param sentinel.persistence.accessModes Persistent Volume access modes - ## - accessModes: - - ReadWriteOnce - ## @param sentinel.persistence.size Persistent Volume size - ## - size: 100Mi - ## @param sentinel.persistence.annotations Additional custom annotations for the PVC - ## - annotations: {} - ## @param sentinel.persistence.labels Additional custom labels for the PVC - ## - labels: {} - ## @param sentinel.persistence.selector Additional labels to match for the PVC - ## e.g: - ## selector: - ## matchLabels: - ## app: my-app - ## - selector: {} - ## @param sentinel.persistence.dataSource Custom PVC data source - ## - dataSource: {} - ## @param sentinel.persistence.medium Provide a medium for `emptyDir` volumes. - ## - medium: "" - ## @param sentinel.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. - ## - sizeLimit: "" - ## persistentVolumeClaimRetentionPolicy - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention - ## @param sentinel.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet - ## @param sentinel.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced - ## @param sentinel.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted - ## - persistentVolumeClaimRetentionPolicy: - enabled: false - whenScaled: Retain - whenDeleted: Retain - ## Redis® Sentinel resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param sentinel.resources.limits The resources limits for the Redis® Sentinel containers - ## @param sentinel.resources.requests The requested resources for the Redis® Sentinel containers - ## - resources: - limits: {} - requests: {} - ## Configure Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param sentinel.containerSecurityContext.enabled Enabled Redis® Sentinel containers' Security Context - ## @param sentinel.containerSecurityContext.runAsUser Set Redis® Sentinel containers' Security Context runAsUser - ## @param sentinel.containerSecurityContext.runAsGroup Set Redis® Sentinel containers' Security Context runAsGroup - ## @param sentinel.containerSecurityContext.runAsNonRoot Set Redis® Sentinel containers' Security Context runAsNonRoot - ## @param sentinel.containerSecurityContext.allowPrivilegeEscalation Set Redis® Sentinel containers' Security Context allowPrivilegeEscalation - ## @param sentinel.containerSecurityContext.seccompProfile.type Set Redis® Sentinel containers' Security Context seccompProfile - ## @param sentinel.containerSecurityContext.capabilities.drop Set Redis® Sentinel containers' Security Context capabilities to drop - ## - containerSecurityContext: - enabled: true - runAsUser: 1001 - runAsGroup: 0 - runAsNonRoot: true - allowPrivilegeEscalation: false - seccompProfile: - type: RuntimeDefault - capabilities: - drop: - - ALL - ## @param sentinel.lifecycleHooks for the Redis® sentinel container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - ## @param sentinel.extraVolumes Optionally specify extra list of additional volumes for the Redis® Sentinel - ## - extraVolumes: [] - ## @param sentinel.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® Sentinel container(s) - ## - extraVolumeMounts: [] - ## Redis® Sentinel service parameters - ## - service: - ## @param sentinel.service.type Redis® Sentinel service type - ## - type: ClusterIP - ## @param sentinel.service.ports.redis Redis® service port for Redis® - ## @param sentinel.service.ports.sentinel Redis® service port for Redis® Sentinel - ## - ports: - redis: 6379 - sentinel: 26379 - ## @param sentinel.service.nodePorts.redis Node port for Redis® - ## @param sentinel.service.nodePorts.sentinel Node port for Sentinel - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## NOTE: choose port between <30000-32767> - ## NOTE: By leaving these values blank, they will be generated by ports-configmap - ## If setting manually, please leave at least replica.replicaCount + 1 in between sentinel.service.nodePorts.redis and sentinel.service.nodePorts.sentinel to take into account the ports that will be created while incrementing that base port - ## - nodePorts: - redis: "" - sentinel: "" - ## @param sentinel.service.externalTrafficPolicy Redis® Sentinel service external traffic policy - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param sentinel.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) - ## - extraPorts: [] - ## @param sentinel.service.clusterIP Redis® Sentinel service Cluster IP - ## - clusterIP: "" - ## @param sentinel.service.loadBalancerIP Redis® Sentinel service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - loadBalancerIP: "" - ## @param sentinel.service.loadBalancerSourceRanges Redis® Sentinel service Load Balancer sources - ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g. - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param sentinel.service.annotations Additional custom annotations for Redis® Sentinel service - ## - annotations: {} - ## @param sentinel.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param sentinel.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - ## Headless service properties - ## - headless: - ## @param sentinel.service.headless.annotations Annotations for the headless service. - ## - annotations: {} - ## @param sentinel.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-node pods - ## - terminationGracePeriodSeconds: 30 - - ## @section Other Parameters - ## - - ## @param serviceBindings.enabled Create secret for service binding (Experimental) - ## Ref: https://servicebinding.io/service-provider/ - ## - serviceBindings: - enabled: false - - ## Network Policy configuration - ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - ## - networkPolicy: - ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources - ## - enabled: false - ## @param networkPolicy.allowExternal Don't require client label for connections - ## When set to false, only pods with the correct client label will have network access to the ports - ## Redis® is listening on. When true, Redis® will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param networkPolicy.extraIngress Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraIngress: [] - ## @param networkPolicy.extraEgress Add extra egress rules to the NetworkPolicy - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param networkPolicy.ingressNSMatchLabels Labels to match to allow traffic from other namespaces - ## @param networkPolicy.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} - - metrics: - ## @param networkPolicy.metrics.allowExternal Don't require client label for connections for metrics endpoint - ## When set to false, only pods with the correct client label will have network access to the metrics port - ## - allowExternal: true - ## @param networkPolicy.metrics.ingressNSMatchLabels Labels to match to allow traffic from other namespaces to metrics endpoint - ## @param networkPolicy.metrics.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces to metrics endpoint - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} - - ## PodSecurityPolicy configuration - ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - ## @param podSecurityPolicy.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later - ## - create: false - ## @param podSecurityPolicy.enabled Enable PodSecurityPolicy's RBAC rules - ## - enabled: false - ## RBAC configuration - ## - rbac: - ## @param rbac.create Specifies whether RBAC resources should be created - ## - create: false - ## @param rbac.rules Custom RBAC rules to set - ## e.g: - ## rules: - ## - apiGroups: - ## - "" - ## resources: - ## - pods - ## verbs: - ## - get - ## - list - ## - rules: [] - ## ServiceAccount configuration - ## - serviceAccount: - ## @param serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: true - ## @param serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param serviceAccount.automountServiceAccountToken Whether to auto mount the service account token - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server - ## - automountServiceAccountToken: true - ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount - ## - annotations: {} - ## Redis® Pod Disruption Budget configuration - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ - ## - pdb: - ## @param pdb.create Specifies whether a PodDisruptionBudget should be created - ## - create: false - ## @param pdb.minAvailable Min number of pods that must still be available after the eviction - ## - minAvailable: 1 - ## @param pdb.maxUnavailable Max number of pods that can be unavailable after the eviction - ## - maxUnavailable: "" - ## TLS configuration - ## - tls: - ## @param tls.enabled Enable TLS traffic - ## - enabled: false - ## @param tls.authClients Require clients to authenticate - ## - authClients: true - ## @param tls.autoGenerated Enable autogenerated certificates - ## - autoGenerated: false - ## @param tls.existingSecret The name of the existing secret that contains the TLS certificates - ## - existingSecret: "" - ## @param tls.certificatesSecret DEPRECATED. Use existingSecret instead. - ## - certificatesSecret: "" - ## @param tls.certFilename Certificate filename - ## - certFilename: "" - ## @param tls.certKeyFilename Certificate Key filename - ## - certKeyFilename: "" - ## @param tls.certCAFilename CA Certificate filename - ## - certCAFilename: "" - ## @param tls.dhParamsFilename File containing DH params (in order to support DH based ciphers) - ## - dhParamsFilename: "" - - ## @section Metrics Parameters - ## - - metrics: - ## @param metrics.enabled Start a sidecar prometheus exporter to expose Redis® metrics - ## - enabled: false - ## Bitnami Redis® Exporter image - ## ref: https://hub.docker.com/r/bitnami/redis-exporter/tags/ - ## @param metrics.image.registry Redis® Exporter image registry - ## @param metrics.image.repository Redis® Exporter image repository - ## @param metrics.image.tag Redis® Exporter image tag (immutable tags are recommended) - ## @param metrics.image.digest Redis® Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param metrics.image.pullPolicy Redis® Exporter image pull policy - ## @param metrics.image.pullSecrets Redis® Exporter image pull secrets - ## - image: - registry: docker.io - repository: bitnami/redis-exporter - tag: 1.55.0-debian-11-r0 - digest: "" - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Configure extra options for Redis® containers' liveness, readiness & startup probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ - ## @param metrics.startupProbe.enabled Enable startupProbe on Redis® replicas nodes - ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe - ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - ## @param metrics.livenessProbe.enabled Enable livenessProbe on Redis® replicas nodes - ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - ## @param metrics.readinessProbe.enabled Enable readinessProbe on Redis® replicas nodes - ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param metrics.command Override default metrics container init command (useful when using custom images) - ## - command: [] - ## @param metrics.redisTargetHost A way to specify an alternative Redis® hostname - ## Useful for certificate CN/SAN matching - ## - redisTargetHost: "localhost" - ## @param metrics.extraArgs Extra arguments for Redis® exporter, for example: - ## e.g.: - ## extraArgs: - ## check-keys: myKey,myOtherKey - ## - extraArgs: {} - ## @param metrics.extraEnvVars Array with extra environment variables to add to Redis® exporter - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## Configure Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param metrics.containerSecurityContext.enabled Enabled Redis® exporter containers' Security Context - ## @param metrics.containerSecurityContext.runAsUser Set Redis® exporter containers' Security Context runAsUser - ## @param metrics.containerSecurityContext.runAsGroup Set Redis® exporter containers' Security Context runAsGroup - ## @param metrics.containerSecurityContext.runAsNonRoot Set Redis® exporter containers' Security Context runAsNonRoot - ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set Redis® exporter containers' Security Context allowPrivilegeEscalation - ## @param metrics.containerSecurityContext.seccompProfile.type Set Redis® exporter containers' Security Context seccompProfile - ## @param metrics.containerSecurityContext.capabilities.drop Set Redis® exporter containers' Security Context capabilities to drop - ## - containerSecurityContext: - enabled: true - runAsUser: 1001 - runAsGroup: 0 - runAsNonRoot: true - allowPrivilegeEscalation: false - seccompProfile: - type: RuntimeDefault - capabilities: - drop: - - ALL - ## @param metrics.extraVolumes Optionally specify extra list of additional volumes for the Redis® metrics sidecar - ## - extraVolumes: [] - ## @param metrics.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® metrics sidecar - ## - extraVolumeMounts: [] - ## Redis® exporter resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param metrics.resources.limits The resources limits for the Redis® exporter container - ## @param metrics.resources.requests The requested resources for the Redis® exporter container - ## - resources: - limits: {} - requests: {} - ## @param metrics.podLabels Extra labels for Redis® exporter pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param metrics.podAnnotations [object] Annotations for Redis® exporter pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9121" - ## Redis® exporter service parameters - ## - service: - ## @param metrics.service.type Redis® exporter service type - ## - type: ClusterIP - ## @param metrics.service.port Redis® exporter service port - ## - port: 9121 - ## @param metrics.service.externalTrafficPolicy Redis® exporter service external traffic policy - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param metrics.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) - ## - extraPorts: [] - ## @param metrics.service.loadBalancerIP Redis® exporter service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - loadBalancerIP: "" - ## @param metrics.service.loadBalancerSourceRanges Redis® exporter service Load Balancer sources - ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g. - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param metrics.service.annotations Additional custom annotations for Redis® exporter service - ## - annotations: {} - ## @param metrics.service.clusterIP Redis® exporter service Cluster IP - ## - clusterIP: "" - ## Prometheus Service Monitor - ## ref: https://github.com/coreos/prometheus-operator - ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - serviceMonitor: - ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator - ## - enabled: false - ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created - ## - namespace: "" - ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped - ## - interval: 30s - ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended - ## - scrapeTimeout: "" - ## @param metrics.serviceMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. - ## - relabellings: [] - ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. - ## - metricRelabelings: [] - ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint - ## - honorLabels: false - ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus - ## - additionalLabels: {} - ## @param metrics.serviceMonitor.podTargetLabels Labels from the Kubernetes pod to be transferred to the created metrics - ## - podTargetLabels: [] - ## @param metrics.serviceMonitor.sampleLimit Limit of how many samples should be scraped from every Pod - ## - sampleLimit: false - ## @param metrics.serviceMonitor.targetLimit Limit of how many targets should be scraped - ## - targetLimit: false - - ## Custom PrometheusRule to be defined - ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions - ## - prometheusRule: - ## @param metrics.prometheusRule.enabled Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator - ## - enabled: false - ## @param metrics.prometheusRule.namespace The namespace in which the prometheusRule will be created - ## - namespace: "" - ## @param metrics.prometheusRule.additionalLabels Additional labels for the prometheusRule - ## - additionalLabels: {} - ## @param metrics.prometheusRule.rules Custom Prometheus rules - ## e.g: - ## rules: - ## - alert: RedisDown - ## expr: redis_up{service="{{ template "common.names.fullname" . }}-metrics"} == 0 - ## for: 2m - ## labels: - ## severity: error - ## annotations: - ## summary: Redis® instance {{ "{{ $labels.instance }}" }} down - ## description: Redis® instance {{ "{{ $labels.instance }}" }} is down - ## - alert: RedisMemoryHigh - ## expr: > - ## redis_memory_used_bytes{service="{{ template "common.names.fullname" . }}-metrics"} * 100 - ## / - ## redis_memory_max_bytes{service="{{ template "common.names.fullname" . }}-metrics"} - ## > 90 - ## for: 2m - ## labels: - ## severity: error - ## annotations: - ## summary: Redis® instance {{ "{{ $labels.instance }}" }} is using too much memory - ## description: | - ## Redis® instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. - ## - alert: RedisKeyEviction - ## expr: | - ## increase(redis_evicted_keys_total{service="{{ template "common.names.fullname" . }}-metrics"}[5m]) > 0 - ## for: 1s - ## labels: - ## severity: error - ## annotations: - ## summary: Redis® instance {{ "{{ $labels.instance }}" }} has evicted keys - ## description: | - ## Redis® instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. - ## - rules: [] - - ## @section Init Container Parameters - ## - - ## 'volumePermissions' init container parameters - ## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values - ## based on the *podSecurityContext/*containerSecurityContext parameters - ## - volumePermissions: - ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` - ## - enabled: false - ## OS Shell + Utility image - ## ref: https://hub.docker.com/r/bitnami/os-shell/tags/ - ## @param volumePermissions.image.registry OS Shell + Utility image registry - ## @param volumePermissions.image.repository OS Shell + Utility image repository - ## @param volumePermissions.image.tag OS Shell + Utility image tag (immutable tags are recommended) - ## @param volumePermissions.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param volumePermissions.image.pullPolicy OS Shell + Utility image pull policy - ## @param volumePermissions.image.pullSecrets OS Shell + Utility image pull secrets - ## - image: - registry: docker.io - repository: bitnami/os-shell - tag: 11-debian-11-r90 - digest: "" - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Init container's resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param volumePermissions.resources.limits The resources limits for the init container - ## @param volumePermissions.resources.requests The requested resources for the init container - ## - resources: - limits: {} - requests: {} - ## Init container Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param volumePermissions.containerSecurityContext.runAsUser Set init container's Security Context runAsUser - ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the - ## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` - ## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed) - ## - containerSecurityContext: - runAsUser: 0 - - ## init-sysctl container parameters - ## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) - ## - sysctl: - ## @param sysctl.enabled Enable init container to modify Kernel settings - ## - enabled: false - ## OS Shell + Utility image - ## ref: https://hub.docker.com/r/bitnami/os-shell/tags/ - ## @param sysctl.image.registry OS Shell + Utility image registry - ## @param sysctl.image.repository OS Shell + Utility image repository - ## @param sysctl.image.tag OS Shell + Utility image tag (immutable tags are recommended) - ## @param sysctl.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param sysctl.image.pullPolicy OS Shell + Utility image pull policy - ## @param sysctl.image.pullSecrets OS Shell + Utility image pull secrets - ## - image: - registry: docker.io - repository: bitnami/os-shell - tag: 11-debian-11-r90 - digest: "" - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## @param sysctl.command Override default init-sysctl container command (useful when using custom images) - ## - command: [] - ## @param sysctl.mountHostSys Mount the host `/sys` folder to `/host-sys` - ## - mountHostSys: false - ## Init container's resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param sysctl.resources.limits The resources limits for the init container - ## @param sysctl.resources.requests The requested resources for the init container - ## - resources: - limits: {} - requests: {} - - ## @section useExternalDNS Parameters - ## - ## @param useExternalDNS.enabled Enable various syntax that would enable external-dns to work. Note this requires a working installation of `external-dns` to be usable. - ## @param useExternalDNS.additionalAnnotations Extra annotations to be utilized when `external-dns` is enabled. - ## @param useExternalDNS.annotationKey The annotation key utilized when `external-dns` is enabled. Setting this to `false` will disable annotations. - ## @param useExternalDNS.suffix The DNS suffix utilized when `external-dns` is enabled. Note that we prepend the suffix with the full name of the release. - ## - useExternalDNS: - enabled: false - suffix: "" - annotationKey: external-dns.alpha.kubernetes.io/ - additionalAnnotations: {} diff --git a/redis/redis-service-account.yaml b/redis/redis-service-account.yaml deleted file mode 100644 index 9c7dca7..0000000 --- a/redis/redis-service-account.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: redis - namespace: redis-system diff --git a/redis/sealed-secret.yaml b/redis/sealed-secret.yaml deleted file mode 100644 index 3a0e364..0000000 --- a/redis/sealed-secret.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: redis-key - namespace: redis-system -spec: - encryptedData: - password: AgAQ9PHv4fJez1wAJtcvWSOMFEMOOHULZhX1wFzoO9JTm4WDeK9GaWbT4tSM3fXsd+9GfhggnsFHeP4t5G/4BlvQ8lNs0bXfUZiSomUL69zhH2YEg9EhJVm9eJWvvJ75m1HnfIL2yFMm9jsxgzajg+fn5a6h4od0gjPAah9+uiVYi4xdIAv8SJK+CEXKKLhuwzV+MkQ0XdiISdanHjrPvYKA5FGRLqjmJePfSTtea5xGhx4DkHzkQ2KwzKIM/v4JOhA3JnwXebZh+GrUv6cg/fh9xnBUxeFvoimAt0gzOD0ajUIWTqTEHCqmPfumNo4w2paG+s+0vAL2gercxeyamOhkRZuWfOLwnQ/eoAm+gQGItn7UhL0yjaFDpkdICTrIXOEebScv27aHKe+4Cdw1BcAS8lIrE9JelVVgOqxBCaIvIBBPVyaFFVXF/YmMK6VAYTO1c3MDPpJEeFyNGoMo82lzL3IwRRFrPYoDrKbfsrWfZUQRYKOxVWihgWYFYx/asceJxegPAdCLq7avQ7tCoIodm9qgZ4F7F0x+N38oFLLCCe3tAhorInC/sWjkrsLpDBtAkWEsJnN865a+yRpN2YHFz+NKf2rugGDre0jA7GgisPwukmY4sC6r8MSjxumkaBo22hMoyRXBpsEBzLTsWMDjI6155J60iamBIUUORYpEVOHVFmY4iDSY9mBbp/ZzIvOa+mJCcvI5U5apJBALOUrGY3hSXHm+am7FWZtM6U0rmw== - template: - metadata: - creationTimestamp: null - name: redis-key - namespace: redis-system