diff --git a/authentik/helmrelease-authentik.yaml b/authentik/helmrelease-authentik.yaml deleted file mode 100644 index 72f624e..0000000 --- a/authentik/helmrelease-authentik.yaml +++ /dev/null @@ -1,1028 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: authentik - namespace: authentik-ns - annotations: - force-recreate: true -spec: - chart: - spec: - chart: authentik - sourceRef: - kind: HelmRepository - name: authentik - namespace: flux-system - interval: 15m0s - timeout: 5m - releaseName: authentik - values: - # -- Provide a name in place of `authentik`. Prefer using global.nameOverride if possible - nameOverride: "" - # -- String to fully override `"authentik.fullname"`. Prefer using global.fullnameOverride if possible - fullnameOverride: "" - # -- Override the Kubernetes version, which is used to evaluate certain manifests - kubeVersionOverride: "" - - - ## Globally shared configuration for authentik components. - global: - # -- Provide a name in place of `authentik` - nameOverride: "" - # -- String to fully override `"authentik.fullname"` - fullnameOverride: "" - # -- Common labels for all resources. - additionalLabels: {} - # app: authentik - - # Number of old deployment ReplicaSets to retain. The rest will be garbage collected. - revisionHistoryLimit: 3 - - # Default image used by all authentik components. For GeoIP configuration, see the geoip values below. - image: - # -- If defined, a repository applied to all authentik deployments - repository: ghcr.io/goauthentik/server - # -- Overrides the global authentik whose default is the chart appVersion - tag: "" - # -- If defined, an image digest applied to all authentik deployments - digest: "" - # -- If defined, an imagePullPolicy applied to all authentik deployments - pullPolicy: IfNotPresent - - # -- Secrets with credentials to pull images from a private registry - imagePullSecrets: [] - - # -- Annotations for all deployed Deployments - deploymentAnnotations: {} - - # -- Annotations for all deployed pods - podAnnotations: {} - - # -- Labels for all deployed pods - podLabels: {} - - # -- Add Prometheus scrape annotations to all metrics services. This can be used as an alternative to the ServiceMonitors. - addPrometheusAnnotations: false - - # -- Toggle and define pod-level security context. - # @default -- `{}` (See [values.yaml]) - securityContext: {} - # runAsUser: 1000 - # runAsGroup: 1000 - # fsGroup: 1000 - - # -- Mapping between IP and hostnames that will be injected as entries in the pod's hosts files - hostAliases: [] - # - ip: 10.20.30.40 - # hostnames: - # - my.hostname - - # -- Default priority class for all components - priorityClassName: "" - - # -- Default node selector for all components - nodeSelector: {} - - # -- Default tolerations for all components - tolerations: [] - - # Default affinity preset for all components - affinity: - # -- Default pod anti-affinity rules. Either: `none`, `soft` or `hard` - podAntiAffinity: soft - # Node affinity rules - nodeAffinity: - # -- Default node affinity rules. Either `none`, `soft` or `hard` - type: hard - # -- Default match expressions for node affinity - matchExpressions: [] - # - key: topology.kubernetes.io/zone - # operator: In - # values: - # - zonea - # - zoneb - - # -- Default [TopologySpreadConstraints] rules for all components - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule - - # -- Deployment strategy for all deployed Deployments - deploymentStrategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 25% - # maxUnavailable: 25% - - # -- Environment variables to pass to all deployed Deployments. Does not apply to GeoIP - # See configuration options at https://goauthentik.io/docs/installation/configuration/ - # @default -- `[]` (See [values.yaml]) - env: - - name: AUTHENTIK_SECRET_KEY - valueFrom: - secretKeyRef: - name: authentik-secret - key: secret-key - - name: AUTHENTIK_POSTGRESQL__PASSWORD - valueFrom: - secretKeyRef: - name: authentik-secret - key: postgres-password - - name: AUTHENTIK_REDIS__PASSWORD - valueFrom: - secretKeyRef: - name: authentik-secret - key: redis-password - # - name: AUTHENTIK_VAR_NAME - # value: VALUE - # - name: AUTHENTIK_VAR_OTHER - # valueFrom: - # secretKeyRef: - # name: secret-name - # key: secret-key - # - name: AUTHENTIK_VAR_ANOTHER - # valueFrom: - # configMapKeyRef: - # name: config-map-name - # key: config-map-key - - # -- envFrom to pass to all deployed Deployments. Does not apply to GeoIP - # @default -- `[]` (See [values.yaml]) - envFrom: [] - # - configMapRef: - # name: config-map-name - # - secretRef: - # name: secret-name - - # -- Additional volumeMounts to all deployed Deployments. Does not apply to GeoIP - # @default -- `[]` (See [values.yaml]) - volumeMounts: [] - # - name: custom - # mountPath: /custom - - # -- Additional volumes to all deployed Deployments. - # @default -- `[]` (See [values.yaml]) - volumes: [] - # - name: custom - # emptyDir: {} - - - ## Authentik configuration - authentik: - # -- Log level for server and worker - log_level: info - # -- Secret key used for cookie singing and unique user IDs, - # don't change this after the first install - secret_key: "" - events: - context_processors: - # -- Path for the GeoIP City database. If the file doesn't exist, GeoIP features are disabled. - geoip: /geoip/GeoLite2-City.mmdb - # -- Path for the GeoIP ASN database. If the file doesn't exist, GeoIP features are disabled. - asn: /geoip/GeoLite2-ASN.mmdb - email: - # -- SMTP Server emails are sent from, fully optional - host: "" - # -- SMTP server port - port: 587 - # -- SMTP credentials, when left empty, no authentication will be done - username: "" - # -- SMTP credentials, when left empty, no authentication will be done - password: "" - # -- Enable either use_tls or use_ssl, they can't be enabled at the same time. - use_tls: false - # -- Enable either use_tls or use_ssl, they can't be enabled at the same time. - use_ssl: false - # -- Connection timeout - timeout: 30 - # -- Email from address, can either be in the format "foo@bar.baz" or "authentik " - from: "" - outposts: - # -- Template used for managed outposts. The following placeholders can be used - # %(type)s - the type of the outpost - # %(version)s - version of your authentik install - # %(build_hash)s - only for beta versions, the build hash of the image - container_image_base: ghcr.io/goauthentik/%(type)s:%(version)s - error_reporting: - # -- This sends anonymous usage-data, stack traces on errors and - # performance data to sentry.beryju.org, and is fully opt-in - enabled: false - # -- This is a string that is sent to sentry with your error reports - environment: "k8s" - # -- Send PII (Personally identifiable information) data to sentry - send_pii: false - postgresql: - # -- set the postgresql hostname to talk to - # if unset and .Values.postgresql.enabled == true, will generate the default - # @default -- `{{ .Release.Name }}-postgresql` - host: "postgresql.postgresql-system.svc.cluster.local" - # -- postgresql Database name - # @default -- `authentik` - name: "authentik" - # -- postgresql Username - # @default -- `authentik` - user: "authentik" - #password: "" - port: 5432 - redis: - # -- set the redis hostname to talk to - # @default -- `{{ .Release.Name }}-redis-master` - host: "redis-master.redis-system.svc.cluster.local" - #host: "{{ .Release.Name }}-redis-master" - #password: "" - - - blueprints: - # -- List of config maps to mount blueprints from. - # Only keys in the configMap ending with `.yaml` will be discovered and applied. - configMaps: [] - # -- List of secrets to mount blueprints from. - # Only keys in the secret ending with `.yaml` will be discovered and applied. - secrets: [] - - - ## authentik server - server: - # -- authentik server name - name: server - - # -- The number of server pods to run - replicas: 1 - - ## authentik server Horizontal Pod Autoscaler - autoscaling: - # -- Enable Horizontal Pod Autoscaler ([HPA]) for the authentik server - enabled: false - # -- Minimum number of replicas for the authentik server [HPA] - minReplicas: 1 - # -- Maximum number of replicas for the authentik server [HPA] - maxReplicas: 5 - # -- Average CPU utilization percentage for the authentik server [HPA] - targetCPUUtilizationPercentage: 50 - # -- Average memory utilization percentage for the authentik server [HPA] - targetMemoryUtilizationPercentage: ~ - # -- Configures the scaling behavior of the target in both Up and Down directions. - behavior: {} - # scaleDown: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 1 - # periodSeconds: 180 - # scaleUp: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 2 - # periodSeconds: 60 - # -- Configures custom HPA metrics for the authentik server - # Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - metrics: [] - - ## authentik server Pod Disruption Budget - ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ - pdb: - # -- Deploy a [PodDistrubtionBudget] for the authentik server - enabled: false - # -- Labels to be added to the authentik server pdb - labels: {} - # -- Annotations to be added to the authentik server pdb - annotations: {} - # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) - # @default -- `""` (defaults to 0 if not specified) - minAvailable: "" - # -- Number of pods that are unavailable after eviction as number or percentage (eg.: 50%) - ## Has higher precedence over `server.pdb.minAvailable` - maxUnavailable: "" - - ## authentik server image - ## This should match what is deployed in the worker. Prefer using global.image - image: - # -- Repository to use to the authentik server - # @default -- `""` (defaults to global.image.repository) - repository: "" # defaults to global.image.repository - # -- Tag to use to the authentik server - # @default -- `""` (defaults to global.image.tag) - tag: "" # defaults to global.image.tag - # -- Digest to use to the authentik server - # @default -- `""` (defaults to global.image.digest) - digest: "" # defaults to global.image.digest - # -- Image pull policy to use to the authentik server - # @default -- `""` (defaults to global.image.pullPolicy) - pullPolicy: "" # defaults to global.image.pullPolicy - - # -- Secrets with credentials to pull images from a private registry - # @default -- `[]` (defaults to global.imagePullSecrets) - imagePullSecrets: [] - - # -- Environment variables to pass to the authentik server. Does not apply to GeoIP - # See configuration options at https://goauthentik.io/docs/installation/configuration/ - # @default -- `[]` (See [values.yaml]) - env: [] - # - name: AUTHENTIK_VAR_NAME - # value: VALUE - # - name: AUTHENTIK_VAR_OTHER - # valueFrom: - # secretKeyRef: - # name: secret-name - # key: secret-key - # - name: AUTHENTIK_VAR_ANOTHER - # valueFrom: - # configMapKeyRef: - # name: config-map-name - # key: config-map-key - - # -- envFrom to pass to the authentik server. Does not apply to GeoIP - # @default -- `[]` (See [values.yaml]) - envFrom: [] - # - configMapRef: - # name: config-map-name - # - secretRef: - # name: secret-name - - # -- Specify postStart and preStop lifecycle hooks for you authentik server container - lifecycle: {} - - # -- Additional containers to be added to the authentik server pod - ## Note: Supports use of custom Helm templates - extraContainers: [] - # - name: my-sidecar - # image: nginx:latest - - # -- Init containers to add to the authentik server pod - ## Note: Supports use of custom Helm templates - initContainers: [] - # - name: download-tools - # image: alpine:3 - # command: [sh, -c] - # args: - # - echo init - - # -- Additional volumeMounts to the authentik server main container - volumeMounts: [] - # - name: custom - # mountPath: /custom - - # -- Additional volumes to the authentik server pod - volumes: [] - # - name: custom - # emptyDir: {} - - # -- Annotations to be added to the authentik server Deployment - deploymentAnnotations: {} - - # -- Annotations to be added to the authentik server pods - podAnnotations: {} - - # -- Labels to be added to the authentik server pods - podLabels: {} - - # -- Resource limits and requests for the authentik server - resources: {} - # requests: - # cpu: 100m - # memory: 512Mi - # limits: - # memory: 512Mi - - # authentik server container ports - containerPorts: - # -- http container port - http: 9000 - # -- https container port - https: 9443 - # -- metrics container port - metrics: 9300 - - # -- Host Network for authentik server pods - hostNetwork: false - - # -- [DNS configuration] - dnsConfig: {} - # -- Alternative DNS policy for authentik server pods - dnsPolicy: "" - - # -- authentik server pod-level security context - # @default -- `{}` (See [values.yaml]) - securityContext: {} - # runAsUser: 1000 - # runAsGroup: 1000 - # fsGroup: 1000 - - # -- authentik server container-level security context - # @default -- See [values.yaml] - containerSecurityContext: {} - # Not all of the following has been tested. Use at your own risk. - # runAsNonRoot: true - # readOnlyRootFilesystem: true - # allowPrivilegeEscalation: false - # seccomProfile: - # type: RuntimeDefault - # capabilities: - # drop: - # - ALL - - ## Liveness, readiness and startup probes for authentik server - ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ - livenessProbe: - # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded - failureThreshold: 3 - # -- Number of seconds after the container has started before [probe] is initiated - initialDelaySeconds: 5 - # -- How often (in seconds) to perform the [probe] - periodSeconds: 10 - # -- Minimum consecutive successes for the [probe] to be considered successful after having failed - successThreshold: 1 - # -- Number of seconds after which the [probe] times out - timeoutSeconds: 1 - ## Probe configuration - httpGet: - path: /-/health/live/ - port: http - - readinessProbe: - # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded - failureThreshold: 3 - # -- Number of seconds after the container has started before [probe] is initiated - initialDelaySeconds: 5 - # -- How often (in seconds) to perform the [probe] - periodSeconds: 10 - # -- Minimum consecutive successes for the [probe] to be considered successful after having failed - successThreshold: 1 - # -- Number of seconds after which the [probe] times out - timeoutSeconds: 1 - ## Probe configuration - httpGet: - path: /-/health/ready/ - port: http - - startupProbe: - # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded - failureThreshold: 60 - # -- Number of seconds after the container has started before [probe] is initiated - initialDelaySeconds: 5 - # -- How often (in seconds) to perform the [probe] - periodSeconds: 10 - # -- Minimum consecutive successes for the [probe] to be considered successful after having failed - successThreshold: 1 - # -- Number of seconds after which the [probe] times out - timeoutSeconds: 1 - ## Probe configuration - httpGet: - path: /-/health/live/ - port: http - - # -- terminationGracePeriodSeconds for container lifecycle hook - terminationGracePeriodSeconds: 30 - - # -- Prority class for the authentik server pods - # @default -- `""` (defaults to global.priorityClassName) - priorityClassName: "" - - # -- [Node selector] - # @default -- `{}` (defaults to global.nodeSelector) - nodeSelector: {} - - # -- [Tolerations] for use with node taints - # @default -- `[]` (defaults to global.tolerations) - tolerations: [] - - # -- Assign custom [affinity] rules to the deployment - # @default -- `{}` (defaults to the global.affinity preset) - affinity: {} - - # -- Assign custom [TopologySpreadConstraints] rules to the authentik server - # @default -- `[]` (defaults to global.topologySpreadConstraints) - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment - topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule - - # -- Deployment strategy to be added to the authentik server Deployment - # @default -- `{}` (defaults to global.deploymentStrategy) - deploymentStrategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 25% - # maxUnavailable: 25% - - ## authentik server service configuration - service: - # -- authentik server service annotations - annotations: {} - # -- authentik server service labels - labels: {} - # -- authentik server service type - type: LoadBalancer - # -- authentik server service http port for NodePort service type (only if `server.service.type` is set to `NodePort`) - nodePortHttp: 30080 - # -- authentik server service https port for NodePort service type (only if `server.service.type` is set to `NodePort`) - nodePortHttps: 30443 - # -- authentik server service http port - servicePortHttp: 80 - # -- authentik server service https port - servicePortHttps: 443 - # -- authentik server service http port name - servicePortHttpName: http - # -- authentik server service https port name - servicePortHttpsName: https - # -- authentik server service http port appProtocol - # servicePortHttpAppProtocol: HTTP - # -- authentik server service https port appProtocol - # servicePortHttpsAppProtocol: HTTPS - # -- LoadBalancer will get created with the IP specified in this field - loadBalancerIP: "" - # -- Source IP ranges to allow access to service from - loadBalancerSourceRanges: [] - # -- authentik server service external IPs - externalIPs: [] - # -- Denotes if this service desires to route external traffic to node-local or cluster-wide endpoints - externalTrafficPolicy: "" - # -- Used to maintain session affinity. Supports `ClientIP` and `None` - sessionAffinity: "" - # -- Session affinity configuration - sessionAffinityConfig: {} - - ## authentik server metrics service configuration - metrics: - # -- deploy metrics service - enabled: true - service: - # -- metrics service type - type: ClusterIP - # -- metrics service clusterIP. `None` makes a "headless service" (no virtual IP) - clusterIP: "" - # -- metrics service annotations - annotations: {} - # -- metrics service labels - labels: {} - # -- metrics service port - servicePort: 9300 - # -- metrics service port name - portName: metrics - serviceMonitor: - # -- enable a prometheus ServiceMonitor - enabled: false - # -- Prometheus ServiceMonitor interval - interval: 30s - # -- Prometheus ServiceMonitor scrape timeout - scrapeTimeout: 3s - # -- Prometheus [RelabelConfigs] to apply to samples before scraping - relabelings: [] - # -- Prometheus [MetricsRelabelConfigs] to apply to samples before ingestion - metricRelabelings: [] - # -- Prometheus ServiceMonitor selector - selector: {} - # prometheus: kube-prometheus - - # -- Prometheus ServiceMonitor scheme - scheme: "" - # -- Prometheus ServiceMonitor tlsConfig - tlsConfig: {} - # -- Prometheus ServiceMonitor namespace - namespace: "" - # -- Prometheus ServiceMonitor labels - labels: {} - # -- Prometheus ServiceMonitor annotations - annotations: {} - - ingress: - # -- enable an ingress resource for the authentik server - enabled: false - # -- additional ingress annotations - annotations: {} - # -- additional ingress labels - labels: {} - # -- defines which ingress controller will implement the resource - ingressClassName: "" - # -- List of ingress hosts - hosts: [] - # - authentik.domain.tld - - # -- List of ingress paths - paths: - - / - # -- Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific` - pathType: Prefix - # -- additional ingress paths - extraPaths: [] - # - path: /* - # pathType: Prefix - # backend: - # service: - # name: ssl-redirect - # port: - # name: use-annotation - - # -- ingress TLS configuration - tls: [] - # - secretName: authentik-tls - # hosts: - # - authentik.domain.tld - - # -- uses `server.service.servicePortHttps` instead of `server.service.servicePortHttp` - https: false - - - ## authentik worker - worker: - # -- authentik worker name - name: worker - - # -- The number of worker pods to run - replicas: 1 - - ## authentik worker Horizontal Pod Autoscaler - autoscaling: - # -- Enable Horizontal Pod Autoscaler ([HPA]) for the authentik worker - enabled: true - # -- Minimum number of replicas for the authentik worker [HPA] - minReplicas: 1 - # -- Maximum number of replicas for the authentik worker [HPA] - maxReplicas: 5 - # -- Average CPU utilization percentage for the authentik worker [HPA] - targetCPUUtilizationPercentage: 50 - # -- Average memory utilization percentage for the authentik worker [HPA] - targetMemoryUtilizationPercentage: ~ - # -- Configures the scaling behavior of the target in both Up and Down directions. - behavior: {} - # scaleDown: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 1 - # periodSeconds: 180 - # scaleUp: - # stabilizationWindowSeconds: 300 - # policies: - # - type: Pods - # value: 2 - # periodSeconds: 60 - # -- Configures custom HPA metrics for the authentik worker - # Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - metrics: [] - - ## authentik worker Pod Disruption Budget - ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ - pdb: - # -- Deploy a [PodDistrubtionBudget] for the authentik worker - enabled: false - # -- Labels to be added to the authentik worker pdb - labels: {} - # -- Annotations to be added to the authentik worker pdb - annotations: {} - # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) - # @default -- `""` (defaults to 0 if not specified) - minAvailable: "" - # -- Number of pods that are unavailable after eviction as number or percentage (eg.: 50%) - ## Has higher precedence over `worker.pdb.minAvailable` - maxUnavailable: "" - - ## authentik worker image - ## This should match what is deployed in the server. Prefer using global.image - image: - # -- Repository to use to the authentik worker - # @default -- `""` (defaults to global.image.repository) - repository: "" # defaults to global.image.repository - # -- Tag to use to the authentik worker - # @default -- `""` (defaults to global.image.tag) - tag: "" # defaults to global.image.tag - # -- Digest to use to the authentik worker - # @default -- `""` (defaults to global.image.digest) - digest: "" # defaults to global.image.digest - # -- Image pull policy to use to the authentik worker - # @default -- `""` (defaults to global.image.pullPolicy) - pullPolicy: "" # defaults to global.image.pullPolicy - - # -- Secrets with credentials to pull images from a private registry - # @default -- `[]` (defaults to global.imagePullSecrets) - imagePullSecrets: [] - - # -- Environment variables to pass to the authentik worker. Does not apply to GeoIP - # See configuration options at https://goauthentik.io/docs/installation/configuration/ - # @default -- `[]` (See [values.yaml]) - env: - - name: AUTHENTIK_REDIS__DB - value: "1" - # - name: AUTHENTIK_VAR_NAME - # value: VALUE - # - name: AUTHENTIK_VAR_OTHER - # valueFrom: - # secretKeyRef: - # name: secret-name - # key: secret-key - # - name: AUTHENTIK_VAR_ANOTHER - # valueFrom: - # configMapKeyRef: - # name: config-map-name - # key: config-map-key - - # -- envFrom to pass to the authentik worker. Does not apply to GeoIP - # @default -- `[]` (See [values.yaml]) - envFrom: [] - # - configMapRef: - # name: config-map-name - # - secretRef: - # name: secret-name - - # -- Specify postStart and preStop lifecycle hooks for you authentik worker container - lifecycle: {} - - # -- Additional containers to be added to the authentik worker pod - ## Note: Supports use of custom Helm templates - extraContainers: [] - # - name: my-sidecar - # image: nginx:latest - - # -- Init containers to add to the authentik worker pod - ## Note: Supports use of custom Helm templates - initContainers: [] - # - name: download-tools - # image: alpine:3 - # command: [sh, -c] - # args: - # - echo init - - # -- Additional volumeMounts to the authentik worker main container - volumeMounts: [] - # - name: custom - # mountPath: /custom - - # -- Additional volumes to the authentik worker pod - volumes: [] - # - name: custom - # emptyDir: {} - - # -- Annotations to be added to the authentik worker Deployment - deploymentAnnotations: {} - - # -- Annotations to be added to the authentik worker pods - podAnnotations: {} - - # -- Labels to be added to the authentik worker pods - podLabels: {} - - # -- Resource limits and requests for the authentik worker - resources: {} - # requests: - # cpu: 100m - # memory: 512Mi - # limits: - # memory: 512Mi - - # -- Host Network for authentik worker pods - hostNetwork: false - - # -- [DNS configuration] - dnsConfig: {} - # -- Alternative DNS policy for authentik worker pods - dnsPolicy: "" - - # -- authentik worker pod-level security context - # @default -- `{}` (See [values.yaml]) - securityContext: {} - # runAsUser: 1000 - # runAsGroup: 1000 - # fsGroup: 1000 - - # -- authentik worker container-level security context - # @default -- See [values.yaml] - containerSecurityContext: {} - # Not all of the following has been tested. Use at your own risk. - # runAsNonRoot: true - # readOnlyRootFilesystem: true - # allowPrivilegeEscalation: false - # seccomProfile: - # type: RuntimeDefault - # capabilities: - # drop: - # - ALL - - livenessProbe: - # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded - failureThreshold: 3 - # -- Number of seconds after the container has started before [probe] is initiated - initialDelaySeconds: 5 - # -- How often (in seconds) to perform the [probe] - periodSeconds: 10 - # -- Minimum consecutive successes for the [probe] to be considered successful after having failed - successThreshold: 1 - # -- Number of seconds after which the [probe] times out - timeoutSeconds: 1 - ## Probe configuration - exec: - command: - - ak - - healthcheck - - readinessProbe: - # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded - failureThreshold: 3 - # -- Number of seconds after the container has started before [probe] is initiated - initialDelaySeconds: 5 - # -- How often (in seconds) to perform the [probe] - periodSeconds: 10 - # -- Minimum consecutive successes for the [probe] to be considered successful after having failed - successThreshold: 1 - # -- Number of seconds after which the [probe] times out - timeoutSeconds: 1 - ## Probe configuration - exec: - command: - - ak - - healthcheck - - startupProbe: - # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded - failureThreshold: 60 - # -- Number of seconds after the container has started before [probe] is initiated - initialDelaySeconds: 30 - # -- How often (in seconds) to perform the [probe] - periodSeconds: 10 - # -- Minimum consecutive successes for the [probe] to be considered successful after having failed - successThreshold: 1 - # -- Number of seconds after which the [probe] times out - timeoutSeconds: 1 - ## Probe configuration - exec: - command: - - ak - - healthcheck - - # -- terminationGracePeriodSeconds for container lifecycle hook - terminationGracePeriodSeconds: 30 - - # -- Prority class for the authentik worker pods - # @default -- `""` (defaults to global.priorityClassName) - priorityClassName: "" - - # -- [Node selector] - # @default -- `{}` (defaults to global.nodeSelector) - nodeSelector: {} - - # -- [Tolerations] for use with node taints - # @default -- `[]` (defaults to global.tolerations) - tolerations: [] - - # -- Assign custom [affinity] rules to the deployment - # @default -- `{}` (defaults to the global.affinity preset) - affinity: {} - - # -- Assign custom [TopologySpreadConstraints] rules to the authentik worker - # @default -- `[]` (defaults to global.topologySpreadConstraints) - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment - topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: topology.kubernetes.io/zone - # whenUnsatisfiable: DoNotSchedule - - # -- Deployment strategy to be added to the authentik worker Deployment - # @default -- `{}` (defaults to global.deploymentStrategy) - deploymentStrategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 25% - # maxUnavailable: 25% - - - serviceAccount: - # -- Create service account. Needed for managed outposts - create: true - # -- additional service account annotations - annotations: {} - serviceAccountSecret: - # As we use the authentik-remote-cluster chart as subchart, and that chart - # creates a service account secret by default which we don't need here, - # disable its creation - enabled: false - fullnameOverride: authentik - - - geoip: - # -- enable GeoIP sidecars for the authentik server and worker pods - enabled: false - - editionIds: "GeoLite2-City GeoLite2-ASN" - # -- GeoIP update frequency, in hours - updateInterval: 8 - # -- sign up under https://www.maxmind.com/en/geolite2/signup - accountId: "" - # -- sign up under https://www.maxmind.com/en/geolite2/signup - licenseKey: "" - ## use existing secret instead of values above - existingSecret: - # -- name of an existing secret to use instead of values above - secretName: "" - # -- key in the secret containing the account ID - accountId: "account_id" - # -- key in the secret containing the license key - licenseKey: "license_key" - - image: - # -- If defined, a repository for GeoIP images - repository: ghcr.io/maxmind/geoipupdate - # -- If defined, a tag for GeoIP images - tag: v6.0.0 - # -- If defined, an image digest for GeoIP images - digest: "" - # -- If defined, an imagePullPolicy for GeoIP images - pullPolicy: IfNotPresent - - # -- Environment variables to pass to the GeoIP containers - # @default -- `[]` (See [values.yaml]) - env: [] - # - name: GEOIPUPDATE_VAR_NAME - # value: VALUE - # - name: GEOIPUPDATE_VAR_OTHER - # valueFrom: - # secretKeyRef: - # name: secret-name - # key: secret-key - # - name: GEOIPUPDATE_VAR_ANOTHER - # valueFrom: - # configMapKeyRef: - # name: config-map-name - # key: config-map-key - - # -- envFrom to pass to the GeoIP containers - # @default -- `[]` (See [values.yaml]) - envFrom: [] - # - configMapRef: - # name: config-map-name - # - secretRef: - # name: secret-name - - # -- Additional volumeMounts to the GeoIP containers. Make sure the volumes exists for the server and the worker. - volumeMounts: [] - # - name: custom - # mountPath: /custom - - # -- Resource limits and requests for GeoIP containers - resources: {} - # requests: - # cpu: 100m - # memory: 128Mi - # limits: - # memory: 128Mi - - # -- GeoIP container-level security context - # @default -- See [values.yaml] - containerSecurityContext: {} - # Not all of the following has been tested. Use at your own risk. - # runAsNonRoot: true - # readOnlyRootFilesystem: true - # allowPrivilegeEscalation: false - # seccomProfile: - # type: RuntimeDefault - # capabilities: - # drop: - # - ALL - - - prometheus: - rules: - enabled: false - # -- PrometheusRule namespace - namespace: "" - # -- PrometheusRule selector - selector: {} - # prometheus: kube-prometheus - - # -- PrometheusRule labels - labels: {} - # -- PrometheusRule annotations - annotations: {} - - - postgresql: - # -- enable the Bitnami PostgreSQL chart. Refer to https://github.com/bitnami/charts/blob/main/bitnami/postgresql/ for possible values. - enabled: false - auth: - username: authentik - database: authentik - # password: "" - primary: - extendedConfiguration: | - max_connections = 500 - # persistence: - # enabled: true - # storageClass: - # accessModes: - # - ReadWriteOnce - - - redis: - # -- enable the Bitnami Redis chart. Refer to https://github.com/bitnami/charts/blob/main/bitnami/redis/ for possible values. - enabled: false - architecture: standalone - auth: - enabled: false - - - # -- additional resources to deploy. Those objects are templated. - additionalObjects: [] diff --git a/authentik/sealed-secret.yaml b/authentik/sealed-secret.yaml deleted file mode 100644 index 812e47b..0000000 --- a/authentik/sealed-secret.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: authentik-secret - namespace: authentik-ns -spec: - encryptedData: - postgres-password: AgBigFPSosBY6PGUxR4zdIntM+oGMyaDY9mHZBwL5xbjEEvmzNKCuCfQFuiE07WqV3fjWEp6D3o23fIMomPC3SNLWySfti8o5pyBrPGDZLR1dVYWLmkyMCj0pzbDmPgAArBuzGmQG6P+Kn4lqlkSU6F50ev/W8yHUPkrlp+iJsGM9wYNlboaZmDMowIK5ny8sQ5vIb+QakS3ybRa3DfX/T3yNvuhOeCt+367/3oV0yNmCEBK4qKpTsAkWctxXooX1wcAkOwMesqfE42I5Mt+s/UnbU5fXJdzM0YI7WZreEy5oaG1shDxp1PhXoc12yCt5KobTj0xlttUVFVb8IaOY7r4oSI74vrL8KGuZdny0oeWvVbiwA/SvOt7S05RdryYWf66jN71/Aku5LnKJwRoa7veGeX9S5pUe1wZyVSDN6trkJcG5ZJRmEerr4MOZ4YX9cB2FktEmd+estjIlm/UhEIRN8Qv4qd54t6j2Ajhk6EJ3Ky6mI9xiun+0ti9880rIHQiW5MpiZVB+nQlAosTVQu4wRjdnP6Z0ndP83e2rPkHJ/jF2iawXOBoS0Eh11UaXvRQyNQOt3ReIba7E0aSbynpULViOg/lVNLA2qgyp+37Veb44Mi2k7sHg7I8e6MOMVjBhfmv3HvMpdHHBIHSq2vaDlF/0i5o5OT0F1O+06OngfQAaQQc1SdpLeoPKget5fbNF9zgmfKxPodjayq+h6n3vm5QOc4TagtcG1PV38LsiQ== - redis-password: AgCWDT6n+wmF9+Qk4+bu1byc7TFmRwPGqrhBIdVvZrTMRh6jt43E8urutTAlqKO6JPbRw+gw7zA40uOOHYzU3UaIXdAueQtCRMhHAzKWMwvTuzKGqLmmKcxVF452wilyhMjLBgRuBvX43VK4kynIthM3LZmw9a/HAlbQqn624N3wvdOYXyrWG0YKisXJunEFPgQyygWozdFD/N+b2loBq5YvH3mLuOuJDcuAC+Ti7URRbHigZXOhpZK6ilycAcJxJlOE9FVDRXMYSophjDWtD/Wb7WNLU7iakdXjNMFNVlE89mzrLxOskI918l6hrMG+Tk9FrhwKZx9ZuVwoUOdLBhF7I0jjYWKnJ1gEIMKXNBcrQWcnqX392VTu4RG0YNIIzasYkJ4/i3bjDnIH9zpSnRn6VSL2ZRhikJBOGJRXlXamd93XcCC+wg7gLu9XGi6g7ddC9UksxFzfIoMvj6aZ5EzERwJ7Td/qH1mWcfm5iesXKP1Y7PUSElIXIVmx9ifLgzIfbreb5VJDj2v+gTD44zxy+zHhSgdyefR2FcXT2eZv9CFO/VS8WB/F8+edJai0wHmJv0ooYVNS0PtIkyD8DEUC3Egt97SmWlQlEn1rfX1hj7jpN7HTpW19l9kV3r9n84ZzVJf62qybHElKOQWoqdz2Xxv6gPannZ8XQbk3nR0dG99jrUhvTpqjLFaWV+27PE0bRuV6w1G5Zm7X6Jdr/y3p8UvH2UonA2/8xjPANci/tA== - secret-key: AgBGLb8gPEET4udFwIMlgqWz5nIvu0/Tq6AhkCvxYTF4z2Gl4I7uOA4QtsnqDfOeQXJStpJ02ndc+q5l1uoP+hVgwhX1yWdeAtlQgubCpGraCQqofqVrwQwt9DoZqre+8rCp3llEugTP72Vekx9s9/8nDs+JqfBtfgLSdYqaJDO7fd3P4DDvA+DPhRTuT8j1YkX9mejxaWxd9lDss2OXWgZ/HDvGrm61FS3ByVqAo0uuayBcC8TtVrcjA6o2bfCFzz7g1uwzDC10bE7RNuJzpErulrOv/QzgxB/yTmQ4JlJmbgonAC3ZUBBc5hAl7m7hKuq6CFyHD1kZCWJ/cZkg9AagI0u9f96+y5kYh+KZK8/WuPHF3LhM9dam9KYKVJRqWE4nq5/QYcpbkQtKBqKlGPZZCyEmH/ylL6r3djMHNjKTpdCwlMqNFetDPLDMNFB1i2Nqg7PAzqOE3Dq5AHShSBG//losKiTfoNF3uYwbrA3cQhxCOAM/1EiLEvz1KerHaJrlcV5Y32ZaOj6P4aQeBAzEpmS8sRr0yooYmA1iJce+wYMsvI1VlNKP4HU+wLm5xKNca1SRvZaOmz1RUp3l+Q+jckhHmRFubLOR6RpmdiGtTAyvjfMRkRtzDfnyu+xGvCqlontPIPWh7yl8jsqrjhr5/tXVtSs+yZhdfn1M7oiDbv7xa4o2jAxt+MpP1XtMaoH/Rnt3x2JprDrSU+1YICE9Ibzo6xjJYFs5I/fM7auUvF3cmX40zafRHw5DYehWCBU3mA== - template: - metadata: - creationTimestamp: null - name: authentik-secret - namespace: authentik-ns diff --git a/grocy/grocy-deployment.yaml b/grocy/grocy-deployment.yaml deleted file mode 100644 index 09d7f34..0000000 --- a/grocy/grocy-deployment.yaml +++ /dev/null @@ -1,38 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: grocy - namespace: grocy-ns -spec: - replicas: 1 - selector: - matchLabels: - app: grocy - template: - metadata: - labels: - app: grocy - spec: - containers: - - name: grocy - image: lscr.io/linuxserver/grocy:4.2.0 - ports: - - containerPort: 80 - env: - #- name: GROCY_AUTH_CLASS - # value: "Grocy/Middleware/ReverseProxyAuthMiddleware" - #- name: GROCY_REVERSE_PROXY_AUTH_HEADER - # value: "X-authentik-name" - - name: PUID - value: "1000" - - name: PGID - value: "1000" - - name: TZ - value: "Etc/UTC" - volumeMounts: - - mountPath: "/config" - name: grocy-config - volumes: - - name: grocy-config - persistentVolumeClaim: - claimName: grocy-pvc diff --git a/grocy/grocy-pvc.yaml b/grocy/grocy-pvc.yaml deleted file mode 100644 index 7236259..0000000 --- a/grocy/grocy-pvc.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: grocy-pvc - namespace: grocy-ns -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi diff --git a/grocy/grocy-service.yaml b/grocy/grocy-service.yaml deleted file mode 100644 index cf685f0..0000000 --- a/grocy/grocy-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: grocy - namespace: grocy-ns -spec: - type: LoadBalancer - selector: - app: grocy - ports: - - port: 80 - targetPort: 80 - protocol: TCP diff --git a/homarr/homarr-deployment.yaml b/homarr/homarr-deployment.yaml deleted file mode 100644 index 1f76994..0000000 --- a/homarr/homarr-deployment.yaml +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: homarr - namespace: homarr-ns -spec: - replicas: 1 - selector: - matchLabels: - app: homarr - template: - metadata: - labels: - app: homarr - spec: - containers: - - name: homarr - image: ghcr.io/ajnart/homarr:latest - ports: - - containerPort: 7575 - env: - - name: EDIT_MODE_PASSWORD - valueFrom: - secretKeyRef: - name: homarr-edit-key - key: edit-key - volumeMounts: - - name: homarr-config - mountPath: /app/data/configs - - name: homarr-icons - mountPath: /app/public/icons - - name: homarr-data - mountPath: /data - volumes: - - name: homarr-config - persistentVolumeClaim: - claimName: homarr-config-pvc - - name: homarr-icons - persistentVolumeClaim: - claimName: homarr-icons-pvc - - name: homarr-data - persistentVolumeClaim: - claimName: homarr-data-pvc diff --git a/homarr/homarr-pvc-config.yaml b/homarr/homarr-pvc-config.yaml deleted file mode 100644 index 9690f57..0000000 --- a/homarr/homarr-pvc-config.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: homarr-config-pvc - namespace: homarr-ns -spec: - storageClassName: longhorn - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 512Mi diff --git a/homarr/homarr-pvc-data.yaml b/homarr/homarr-pvc-data.yaml deleted file mode 100644 index e77d554..0000000 --- a/homarr/homarr-pvc-data.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: homarr-data-pvc - namespace: homarr-ns -spec: - storageClassName: longhorn - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi diff --git a/homarr/homarr-pvc-icons.yaml b/homarr/homarr-pvc-icons.yaml deleted file mode 100644 index f6cb304..0000000 --- a/homarr/homarr-pvc-icons.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: homarr-icons-pvc - namespace: homarr-ns -spec: - storageClassName: longhorn - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi diff --git a/homarr/homarr-service.yaml b/homarr/homarr-service.yaml deleted file mode 100644 index 86b3b74..0000000 --- a/homarr/homarr-service.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: homarr-service - namespace: homarr-ns -spec: - type: LoadBalancer - ports: - - port: 80 - targetPort: 7575 - selector: - app: homarr diff --git a/homarr/sealed-secret.yaml b/homarr/sealed-secret.yaml deleted file mode 100644 index 27f3df2..0000000 --- a/homarr/sealed-secret.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: homarr-edit-key - namespace: homarr-ns -spec: - encryptedData: - edit-key: AgBnP6HGyQv63BuvrbO9JWdDu/aS7GadN+6dJ/4uBziMT6HxvBBbunrO5ZROHD1Hl9F3BSQs1GECkzYTQDVd5Hji93L39InCpo3+G0GGg0m6BH8j5WarheWS4837WynOUOfHncCCtXzG9iRqFZAUKE3xYtbNMULXXBiMyY625aonF3Agqz9MAtz4Dv56N5cPE4C4Ck0VPi4POQCP6RezHteCktlBBwpbPAem61mUUx+P+V7hEX3/TItl0j4HOvC6ttbHtVLPUwvHHdBcH/0stKhPben4Hnp7qLZe1A16+RCAbaAYF2TS9JbrQsCwtDq8mkQeAQg1sU0S1092b9OZKk9s1QpGGlKuH7G1iwQcaTpdVIj57QVIOPNoGWuuOiVzWe8hf+b1jITEJNT7VYWmBYcIZjLakYFr8zbkWPlvinkTv0GHo8uBOWsqLF+w3ekYk9HNSJ6dFEBpeMpvllXcbKnggb222otyqJ2Z9Kh2svIBqq2+0VulhFtEfjXFYLOMHqi+ZUz/MkPuREevDQXjwJTBoHD5OaB1OFRo6Kp1jyLogkTnUO/j2qv5DZDkofE0ha4PR9/9olqoYzTfs0IOa2+yUQZJ0OJ5dQbrnxNqbUWjCrVn6xVeCqKrZzsK+96wJVBgiPBzruO0y5ZYreNyW0GdBDS1ubvkkv8eMKbVOM+GTEtC1AburtCwuVYwOxgOJ31zudWmDzqEnrDK1Qp91eyzk4W2J+TRd52fxLQUukq9SA== - template: - metadata: - creationTimestamp: null - name: homarr-edit-key - namespace: homarr-ns diff --git a/immich/immich-machine-learning-deployment.yaml b/immich/immich-machine-learning-deployment.yaml deleted file mode 100644 index 43c7e8f..0000000 --- a/immich/immich-machine-learning-deployment.yaml +++ /dev/null @@ -1,46 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: immich-machine-learning - namespace: immich-ns -spec: - replicas: 1 - selector: - matchLabels: - app: immich-machine-learning - template: - metadata: - labels: - app: immich-machine-learning - spec: - containers: - - name: immich-machine-learning - image: ghcr.io/immich-app/immich-machine-learning:release - env: - - name: UPLOAD_LOCATION - value: /usr/src/app/upload - - name: DB_HOSTNAME - value: postgresql.postgresql-system.svc.cluster.local - - name: DB_USERNAME - valueFrom: - secretKeyRef: - name: immich-secret - key: username - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: immich-secret - key: password - - name: DB_DATABASE_NAME - valueFrom: - secretKeyRef: - name: immich-secret - key: database - - name: REDIS_HOSTNAME - value: redis-master.redis-system.svc.cluster.local - volumeMounts: - - name: model-cache - mountPath: /cache - volumes: - - name: model-cache - emptyDir: {} diff --git a/immich/immich-microservices-deployment.yaml b/immich/immich-microservices-deployment.yaml deleted file mode 100644 index aeccd13..0000000 --- a/immich/immich-microservices-deployment.yaml +++ /dev/null @@ -1,55 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: immich-microservices - namespace: immich-ns -spec: - replicas: 1 - selector: - matchLabels: - app: immich-microservices - template: - metadata: - labels: - app: immich-microservices - spec: - containers: - - name: immich-microservices - image: ghcr.io/immich-app/immich-server:release - args: ["start.sh", "microservices"] - env: - - name: UPLOAD_LOCATION - value: /usr/src/app/upload - - name: DB_VECTOR_EXTENSION - value: pgvector - - name: DB_HOSTNAME - value: postgresql.postgresql-system.svc.cluster.local - - name: DB_USERNAME - valueFrom: - secretKeyRef: - name: immich-secret - key: username - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: immich-secret - key: password - - name: DB_DATABASE_NAME - valueFrom: - secretKeyRef: - name: immich-secret - key: database - - name: REDIS_HOSTNAME - value: redis-master.redis-system.svc.cluster.local - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-immich-secret - key: REDIS_PASS - volumeMounts: - - name: upload-volume - mountPath: /usr/src/app/upload - volumes: - - name: upload-volume - persistentVolumeClaim: - claimName: immich-library-pvc diff --git a/immich/immich-microservices-service.yaml b/immich/immich-microservices-service.yaml deleted file mode 100644 index e69de29..0000000 diff --git a/immich/immich-pvc-library.yaml b/immich/immich-pvc-library.yaml deleted file mode 100644 index 9954244..0000000 --- a/immich/immich-pvc-library.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: immich-library-pvc - namespace: immich-ns -spec: - accessModes: - - ReadWriteMany - storageClassName: longhorn - resources: - requests: - storage: 100Gi diff --git a/immich/immich-server-deployment.yaml b/immich/immich-server-deployment.yaml deleted file mode 100644 index a69bbca..0000000 --- a/immich/immich-server-deployment.yaml +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: immich-server - namespace: immich-ns -spec: - replicas: 1 - selector: - matchLabels: - app: immich-server - template: - metadata: - labels: - app: immich-server - spec: - containers: - - name: immich-server - image: ghcr.io/immich-app/immich-server:release - args: ["start.sh", "immich"] - ports: - - containerPort: 3001 - env: - - name: UPLOAD_LOCATION - value: /usr/src/app/upload - - name: DB_VECTOR_EXTENSION - value: pgvector - - name: DB_HOSTNAME - value: postgresql.postgresql-system.svc.cluster.local - - name: DB_USERNAME - valueFrom: - secretKeyRef: - name: immich-secret - key: username - - name: DB_PASSWORD - valueFrom: - secretKeyRef: - name: immich-secret - key: password - - name: DB_DATABASE_NAME - valueFrom: - secretKeyRef: - name: immich-secret - key: database - - name: REDIS_HOSTNAME - value: redis-master.redis-system.svc.cluster.local - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-immich-secret - key: REDIS_PASS - volumeMounts: - - name: upload-volume - mountPath: /usr/src/app/upload - volumes: - - name: upload-volume - persistentVolumeClaim: - claimName: immich-library-pvc diff --git a/immich/immich-server-service.yaml b/immich/immich-server-service.yaml deleted file mode 100644 index f1acc8a..0000000 --- a/immich/immich-server-service.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: immich-server-service - namespace: immich-ns -spec: - type: LoadBalancer - ports: - - port: 80 - targetPort: 3001 - selector: - app: immich-server diff --git a/immich/redis-secret.yaml b/immich/redis-secret.yaml deleted file mode 100644 index 90421ef..0000000 --- a/immich/redis-secret.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: redis-immich-secret - namespace: immich-ns -spec: - encryptedData: - REDIS_PASS: AgA87rwcuMmDmgvDRl6pcObFFNBPKSH1qCkXUFgIqB/jX/ursxPgP+5f9ANY7PZjZTJ3QSAenJKPKUu9ER5B04o9b09EIcSpTQ0eVQRl6jwMRRzCbFWedb1bsNPuNyQBaf7IhaLshfQPSsjamp4oAaczLjbQPs/musn/3TUYVThIdgWBltv9i/12+BkbA98sS3gsMVWyP+cCcVQ+mMTGNsLZbxP1XC50yAAWifqJk6NbT+m9CA1wnesgegyr1W7KUGxudKnRA7iaGiP+fC+LbLIbD63tkme6/65b9x5qXZLM9qpiBEX+Yrv7YTn+ZJ94KwMnDjV8Y3Izom4etOawnLaRIIal/PGJPjSLE+PqtVRKXpTO8I3ExKSHb3MfLpfqTQ24N1yoNOnYu6dv2Rhd0Q9lMA6RBX4XUfsjYxHwIWyN1HhdAkbAS+ZqIlcnzT/rVIkkLcU/3/2Ptjj1IRDHFZplibUTbmkiKBvSDeOWDDRXC0FPvMegcfv2mYXY03W70N1uW39JVd0hcDhMxVaaW7yB7rmNEdOpFmpSPBScNtJj7bjEkAQCqXfqogclPs7FJOkrEJKK92Mon8ZMRdeD7GAbh4UqiRIe/SnjD2PsxWKDIMX3uqHN4PpxtsI5F3cY8mQNLG9nP4QzS5b8uU3vfJ4aSX2WpY7UhCXZ1ZuZDMNUDyQ9ULNcFh0FAkB3KzFi35Kqlxf6CsiY2pkxmtHm4w1WJkq09n2iNlsORJayzwDu6Q== - template: - metadata: - creationTimestamp: null - name: redis-immich-secret - namespace: immich-ns diff --git a/immich/sealed-secret.yaml b/immich/sealed-secret.yaml deleted file mode 100644 index b8d3ad1..0000000 --- a/immich/sealed-secret.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: immich-secret - namespace: immich-ns -spec: - encryptedData: - database: AgA+Vgab29fZ+NPF1PxzvcT3StAlEiOOKO77tYH+IgfKhdK7wTP4q+OVdV6gWPahK1ssZ8lPISml1HDMPx/IIlCYHmp1xi+wtoOgvyOGq5/8czupMQ4dLwiMVWFyRnCUm94119dCA9KImIqyhrNZ/FebqrcqvykI3h8/XDGCZujjMlHhnhRSUF3AohL3cW72tnZkDeSKebp1Mkmi0LEij2v0/+dZXuIEsfLPVHgxJKvCfPX7ND3TigBlFsa1VQOSZY19MI283rS9keqX0pFP+h0LAT6iGw/4p9fOjVYPNZySVn/z/XXcxnKjO477edJp9TGb+xd1m/kSmUhKF2w58jkKoZMlUwwCxteh9H1zj9rHMQfSmVG+tg9j5WoSsfIaWbDIFIf91l07XSwa8MGJ91NE6nvHEgf7C/OtZ52SjHTKEielLHsvTPRn2lIi14P9tMadI1z11POTf416CIcB2fXzuu619FHARSJseBpBLYwPM5pSpF0XKqTl7mW0kypa46kikjGou6CuJWhrFkh8Yqpth6hfsIV0BkLxXUpoWW9/dMQztfnuB7OvogNUJRTn+g9tzGLyY5bWddokV9s6uxyyaDAi9wPe48HRhX6bGwOgEPdprV5VRSuXu7A2g2YGYsxvvsEr9dXZA3rY9dW63wAzIhydxO8i0+9JHd9CMKohj60S5Llh402p4fDm3JIXchpeNwJzyo4= - password: AgC68pWUzY4eghLcYSxEkwVtBL7BlQ8ytG11hk8NuGcPK+B9kA0VFtw5gFTYMIb0UL95O0BN/L7A6O7oXZm6skWlwOaYmUUOhdCnws1vRA7RamA+gWiT6qV+aFVdeiWLm2pgTdwquqB/Ky2/K3FF2tLoA2Gmp+uGGbet8txMb5RlCWA5jdb6xqsszCFu8NKpcb85kaRtBAP1AzXwWWnP1E+ITM8FjsL1QXlwkxra/uChN99w6Sc66GR8VUb3M3lmtv26AX2hHhqOeWNNJzIbWpmThS+DuluopF4UF+rEixTnR5jBtl8Let6ZA/UwgZ0sfBOijFLyoSFK0ly0f1p3bDH7jtgL2f7OQNPv/VkY6RKi5LViE20m2fYKmt2Fx+FdrIAw64jK3fhLuWF9MKuHOLhgbcrpCvuIMcR1P+/TEPoOrwLy8qzSyGlHZlYHo2m16FqdHqvwHF2vd3A2OnblBx8RN51Hxr0PaRb11FxGQSdQgVU4IoQp0GlvDrhzRXHU1g4G7BnG7+fQpFHujw5QB0rrSLP8WgfWkdYOo6E7xF5EXZ+E2vWsRPRJ2bkVH0mywIo8BC1e7WCR28uLK29e2kBMxiwzDxu+7x/g8rbXxLZGVakEhvZMlWPUSBpcU6rEdW1x7+TEJCGxxBUf3/e6K60MqvOQIe3gRrevY8DddkCFbi6+ZIPmTpd95K9MwnwDDWub+CzKZaWBn6+23NMiBkMa2mgFIWn0QMxEtazTWwJITw== - username: AgA/sz7ukcLAtrSfiGncgMC/VkekQYAYhUmsVTR/sS9di8gv98+pBZbC2i1CC+Qy0yagVEmpstqD46AlkI4d/38S1YLoEolJomEn8KUcdvle7RXK5d+HXSDQCbWdhdhJsbw094rLd2pPzJ1ykVpJglbg+Ec9pzydorjS5LA8vXyujmH3YXW3OU2GCI+B8rgiedetlP6zyZciKuSNd/yDPB7cYzch0lmheGHREulvAzXE6xPv4hiyZtY0FA26zjixtQjW/CJnmwzD6/F1MBZWXtColxZob6I9I5DY4zGawNgS8n4qF/bRoIr75LYkD77KEfBWba5QkQcfnvsEmJWKFmMBchdrM8+wHulgElzTRn8HIfaslk6Aq9RBasXEBDtumBgLiOVCr4TNNX6RHNooyF6uc+Ms4zTdTsibBmMs3X0W8ON1qZx+oXf5M7QW3x+rz+cl7o1TQUsGaHeAcLjh1xGJWddSo1gRL8kqX7wlVucm2LZwIwdWnGT+Bp97FJmJ+R+xgjrmzy9lhboSK58LnpHk65psIngp0XCZ6b3pNrKbDc7H/v8EAjElSAhTGwX7nIwZ4jGCdgPICcX0FtWW17nlJIXJoHmQL08fPa7dqqkpx2JgLQ2E19TywfItxxRApYtRP2AXuf53XLiyQjDgo6STldASysj4MgpJti0lKZNUQkK2QedaXKhyLO3/n53SADSac+P8s0E= - template: - metadata: - creationTimestamp: null - name: immich-secret - namespace: immich-ns diff --git a/invidious/invidious-deployment.yaml b/invidious/invidious-deployment.yaml deleted file mode 100644 index 8575553..0000000 --- a/invidious/invidious-deployment.yaml +++ /dev/null @@ -1,61 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: invidious - namespace: invidious-ns -spec: - replicas: 1 - selector: - matchLabels: - app: invidious - template: - metadata: - labels: - app: invidious - spec: - containers: - - name: wait-and-die - image: alpine:latest - command: ["/bin/sh", "-c"] - args: ["sleep 21600; exit 0"] - - name: invidious - image: quay.io/invidious/invidious:2024.04.26-eda7444 - env: - - name: INVIDIOUS_PORT - value: "3000" - - name: INVIDIOUS_DB_PASSWORD - valueFrom: - secretKeyRef: - name: invidious-secret - key: invidious-postgres-password - - name: INVIDIOUS_HMAC_KEY - valueFrom: - secretKeyRef: - name: invidious-secret - key: hmac - - name: INVIDIOUS_CONFIG - value: | - db: - dbname: Invidious - user: invidious - password: $(INVIDIOUS_DB_PASSWORD) - host: postgresql.postgresql-system.svc.cluster.local - port: 5432 - check_tables: true - hmac_key: "$(INVIDIOUS_HMAC_KEY)" - pool_size: 100 - statistics_enabled: true - admins: ["tyler"] - channel_threads: 2 - channel_refresh_interval: 15m - feed_threads: 2 - banner: "Lol. Lmao even." - default_user_preferences: - default_home: "Subscriptions" - quality: dash - save_player_pos: true - port: 3000 - #external_port: 443 - #domain: watch.clortox.com - ports: - - containerPort: 3000 diff --git a/invidious/invidious-service.yaml b/invidious/invidious-service.yaml deleted file mode 100644 index 0f6065d..0000000 --- a/invidious/invidious-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: invidious - namespace: invidious-ns -spec: - type: LoadBalancer - ports: - - protocol: TCP - port: 80 - targetPort: 3000 - selector: - app: invidious diff --git a/invidious/sealed-secret.yaml b/invidious/sealed-secret.yaml deleted file mode 100644 index 153cb5d..0000000 --- a/invidious/sealed-secret.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: invidious-secret - namespace: invidious-ns -spec: - encryptedData: - hmac: AgBnXw0QxXIHdSyv1jruFE7gKlnWZwHjIF6yqpx/VwXdV1G6WWCfvv+ZMA9RNvnFGP3QmNttNpErFXgpGJKP6a9gr7nIK9ilPgm9oZZP0gt8MDnNSm/17sLeMv0X84uT5SfKCbzukTPKQj2NICWLYO9M3XV5x4CXNi+1E7r+F5qtAYV/V0ZPdo35QHALKjDYv5hofsvJNaUXxamMGzMjrOBtMZKDAGx4K0ftOVr348IbKb8R3WgSrJDN2YQdk+8U1lyRZoK2yBsMYEx1/z3/YsYF/ZvE8Z6tPnRCImJSr+jkEDde0So0DkXTESdBKVnkRQ2e31pyRHGu7+z3dqZlNITFbVt3YN54+P7jDMGEEbPEgVfjJTk/MhqsfaY2WrqONXJvBFcsfVooDXG3rQinG5UkPUBLWPCnInD1mvbSyN5whC7oVh5+qwCrEN3WSsEpMUig8re10sVDwmwXehf0TqWwsIPdT/4OxYnBjzjqJ5HYopBHqCcHxeHD6o+6fNjZPSofNo2YkIX1yI+9laSjEHBmIwdFBCty10yaDsF625X07zlqFBMzSaPRcK3MVReFfUrI5w7mZuM+bzT4OG3Zf4bolQp18glzltSPxWPOsc7RRRImkcjf+PkyXmGVwZ2oPXISX+8xuOIuxhMMGAke0a7b8R7hNb/vvZ6dbtStMwZWUd0IB3Rnmb8rWmdy5qHoANYbmVmwTfcDSKxp0hqfoPNYBG7xJKAg3FjdoYjcmVmbAQ== - invidious-postgres-password: AgDCqXfmNpRx1XQeKqVrXw7u9BXLvoyWiy16S3H5MgGf7SkBffIM9fbE3bFsOI8ow0obxd1vJRw/7XZtFoGYwumoGvFLU/5N1AeluHLD8c6muBNEH7hBQmXj7rGlZ2PGKIZ+C0iqMLrt0xWpiPsPKuSxeXBwyTuZpdcw5PpTQ9N6pWhLyAM5Aw7BHXzWN3PiH4dplWnYcilj0MkNAueTwQtwksHrmPrA7ezE965adfhWzn+IWS0Rco5/QqNMArmFQqYKNkfh0mkCKz258TOLGGbznNbvWU5PQklElBUTqB2r1nJc5nYdAN0cOYYRbXhql5s61Q0S4REXG0gZVfqZMxGFpomeVx09tQRbYHKW/ptp4HKb0x2GbA/Wk1qcvvHAOqhU9f1/+MhIeyUShNeQdTthbm2hnS3Z46KPw0EEdLuSo9xG8hu+saak/xIs4bOaKbtkjSqdeTH3UzEKCjK0bQDoB6JvS6tq+CVzxoUGVYYDzbS0ADDKgdVGkOsGzVswtUOo7yYzOY9jLHanbMCZjvDfOByyYdTnegtS/iIExCPhM0V/9WzY1Y1/crX2RIgdWzTsV2djG24/tZvIggMTZE3PZH83pEduWzcMyi4JED/OYCaWlJRWFqhq+3g/K/0DgM3YPDRwul3yGhoKiWr3bRDC2RPMRTlINd10ctocnDupV1yxFzgLPimrG0LLxcmk2foRkTeJ2d/3LtjN0HfvmLSvVKrAOUDOTVcOsenoyVauNg== - template: - metadata: - creationTimestamp: null - name: invidious-secret - namespace: invidious-ns diff --git a/longhorn/longhorn.yaml b/longhorn/longhorn.yaml deleted file mode 100644 index f5fb8c6..0000000 --- a/longhorn/longhorn.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: longhorn - namespace: longhorn-system -spec: - interval: 5m - chart: - spec: - chart: longhorn - version: "1.5.1" - sourceRef: - kind: HelmRepository - name: longhorn - namespace: flux-system diff --git a/longhorn/sealed-secret.yaml b/longhorn/sealed-secret.yaml deleted file mode 100644 index a4c8751..0000000 --- a/longhorn/sealed-secret.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: longhorn-backup - namespace: longhorn-system -spec: - encryptedData: - AWS_ACCESS_KEY_ID: AgAaiekhcrroR4st3tc4Bk1GQQYa+1thkHqYz7gANqClaHCbwWWDKi312YN32xGdlxtCLWeqp+oU1yJjXa71Ha1U6R3IT4oLqh+3CrmE0HvqWEVWbaiInB/J8S91ejFKhSniL4W45667ntqByIh7yG/sR2ROpW4NGy1iDOH+B/D3j47XJFPEbLQVci1fWN9inLATNZlHOWvml5xjNaQCQP9xCTdBWBCPvqby9oQZGp3/I2z75/2bAu9UCV9U7ARY550v30EwrBpfROIRUGVw7TCPtiNabhmcy2PU1QWleFTZNf2wAvIMoFxH6DgfmaiRY39JFuGFypRviAf9t+OyP9YYplkrk8pdXGImTTsLl+y6PSM+ibdJyOH4g/cyWA/+882aPpg0f8u1fmfOjonmYPSGi9UqKEJoJsWUnRBW+INZoxnG3vQvYF0hoaMgqi6QnLcMlOP+zjd/2sLlPRqlwPUUF35Q67RmAahAHUA2P1a25ojcBVGFBMQ8voKy3QtMyOJkrsS8Uq0iX1gVNLZuZRZsCoxJDxU13043XF7OD/lSggzG7gElAsuGHZ0/3ltF17TuPDx0/GawW+fIzU9yQBnLFvYn1UJwC1m9lQvRmlg5yQm1uFO1oBP4rX6N7GWttn8zmLzyUeqnuOvUFI0kZHrUsfTYEM+FqUAIPJFKFO2OZGRagVn59Vr3/aEgn83CODveD8g00oq2PU5rqQ61zWYZNoQOGA== - AWS_ENDPOINTS: AgBytK+0Q+PqVpzW1thju/ntgZmMm8n5V3ZaoJ8ZmkEm+9USijqrpx0N4aGdrm1Z4a/OYKhBm0Xtwm4s3MSIsipAkB5cireUNrd0ggeCAqG9nhqIqZhqsbkbll3g1QNFxuprE4196Byo9nCdcxkR7OMHJ86FMraPkjxqLzHNb8XuNpQn9CinFaUdXcbhO03ZKZFCtxwlnRYtr2Anx2EBT38tLcg/dIt25+4QY95djZdw8NwFP1xlslwE+rbwMQHAxpM6bZLqGtqWNDTdJ0NummoJXnWiOwVPEpjiPM6vdXiBkjRlGpldXuF2JK/c3t4G1CA5URck8jcFaVNfO+wUsG45hp+EkE5XwXMXdY1KB8oveYU7ehCrI8wCY2obhYsbQVve34S2Mv/C47uCtFYHSE7lZsHttabWbYS4QSmSe+BM8uCFoUkJErBPsIZ73TzRgjhSR1Y/qtoyNDlYSH8jZEm8OlIsk5fdOWMN87e/l4TIqSVd02kJAKvn/o564BjrcdqB8gU6adV0vh4q1GBtwwF8nquQP3mL9zxDiljCikDwDD7DNVbzSUwA7tg4YYmQAwpRDfhEZkr5uPaQJ8pyhVp3ZyjACfHB0i2zF/x/UnUogs42OunchrPlkBAPUvY1vbM8uRcy+E0UO6h5gTOf9O1cB0pnIxz//MIbg4ve2SYftLstdtlJr4xL/QhqPWb7VZDB0QZUceFvKpbourbE1M6htGr8jGxR/UI= - AWS_SECRET_ACCESS_KEY: AgC33pdA8wIcOtyYeh0+rsrBtw5VsT8r9z35ovM8za2hcUnEg8ON8SMlGjdPSUSUVQPvT3NoXCZhzNvIwvdBPDsqSGdIeEb1zQ1hGz4jycd5OYPCNOA8yiTv9UfzYaO/YthyoeYOufHHSfz4o7uZFAqr7xIYX+1tOobtJjiQQopKxSbtlbkwNUAH86TSwJp66jhXsy35aAWVcfycAhwtVzc1TLyTJ6EB43BT/0f+qiLxiAqRWnfMQ063swnAoQ1RDAto3LMBMsJOCnYk2sxIXrlg+l2vdAH5OSRHxLTKAK7i2z7h74NhlXhJmWMNm+M3rz8pDIUfpYlgmDW96B/JRkq/xVgbUXdNuSE1E6w04QVVcgqeh9xgYuCVwxaIpFKY27cnX5Z3SG/WcELHw4QUzoPwaNmOKho0cSFzseI8r1inIj7OTafP4/j8/3gunlvTilUI6O/Nz7n0gt/ZJwzhX6Un9ETstPYsCFFGCpYgi0Mpr4bVOKiRvdR/00r29lYmoTs49U2FPdoqZWo9h+m2VBl0WLnMqSHvrCR4y6Gz8iQQAlSxIVEaQ+5i6N0K51Ba+9g7PLKljpCIhAL356MT1595FfQrhg6GozUuv184paa4SR/sqX2k70RrdAULAEFoGvkv9dIYJRFPL7RPxXKhr1UO1/HgJbCGZvCpa+VbgzB9Fztf3w4gwY2rxHhPf+kRC788xsUEY1n6L1qOUgQaQJtxlLf53RHVNsj8eEWC3gPcer0qyIH0lGQE - template: - metadata: - creationTimestamp: null - name: longhorn-backup - namespace: longhorn-system diff --git a/metallb-config/ipaddresspool.yaml b/metallb-config/ipaddresspool.yaml deleted file mode 100644 index 9877ef3..0000000 --- a/metallb-config/ipaddresspool.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: metallb.io/v1beta1 -kind: IPAddressPool -metadata: - name: metallb-pool-addresses - namespace: metallb-system -spec: - addresses: - - 10.0.3.64-10.0.3.253 diff --git a/metallb-config/l2-advertisement.yaml b/metallb-config/l2-advertisement.yaml deleted file mode 100644 index 9c5351d..0000000 --- a/metallb-config/l2-advertisement.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: metallb.io/v1beta1 -kind: L2Advertisement -metadata: - name: my-l2-advertisment - namespace: metallb-system -spec: - ipAddressPools: - - metallb-pool-addresses diff --git a/metallb/config-map-overrides.yaml b/metallb/config-map-overrides.yaml deleted file mode 100644 index bc18d29..0000000 --- a/metallb/config-map-overrides.yaml +++ /dev/null @@ -1,349 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: metallb-helm-chart-value-overrides - namespace: metallb-system -data: - values.yaml: |- - # Default values for metallb. - # This is a YAML-formatted file. - # Declare variables to be passed into your templates. - - imagePullSecrets: [] - nameOverride: "" - fullnameOverride: "" - loadBalancerClass: "" - - # To configure MetalLB, you must specify ONE of the following two - # options. - - rbac: - # create specifies whether to install and use RBAC rules. - create: true - - prometheus: - # scrape annotations specifies whether to add Prometheus metric - # auto-collection annotations to pods. See - # https://github.com/prometheus/prometheus/blob/release-2.1/documentation/examples/prometheus-kubernetes.yml - # for a corresponding Prometheus configuration. Alternatively, you - # may want to use the Prometheus Operator - # (https://github.com/coreos/prometheus-operator) for more powerful - # monitoring configuration. If you use the Prometheus operator, this - # can be left at false. - scrapeAnnotations: false - - # port both controller and speaker will listen on for metrics - metricsPort: 7472 - - # if set, enables rbac proxy on the controller and speaker to expose - # the metrics via tls. - # secureMetricsPort: 9120 - - # the name of the secret to be mounted in the speaker pod - # to expose the metrics securely. If not present, a self signed - # certificate to be used. - speakerMetricsTLSSecret: "" - - # the name of the secret to be mounted in the controller pod - # to expose the metrics securely. If not present, a self signed - # certificate to be used. - controllerMetricsTLSSecret: "" - - # prometheus doens't have the permission to scrape all namespaces so we give it permission to scrape metallb's one - rbacPrometheus: true - - # the service account used by prometheus - # required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true " - serviceAccount: "" - - # the namespace where prometheus is deployed - # required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true " - namespace: "" - - # the image to be used for the kuberbacproxy container - rbacProxy: - repository: gcr.io/kubebuilder/kube-rbac-proxy - tag: v0.12.0 - pullPolicy: - - # Prometheus Operator PodMonitors - podMonitor: - # enable support for Prometheus Operator - enabled: false - - # optional additionnal labels for podMonitors - additionalLabels: {} - - # optional annotations for podMonitors - annotations: {} - - # Job label for scrape target - jobLabel: "app.kubernetes.io/name" - - # Scrape interval. If not set, the Prometheus default scrape interval is used. - interval: - - # metric relabel configs to apply to samples before ingestion. - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # target_label: nodename - # replacement: $1 - # action: replace - - # Prometheus Operator ServiceMonitors. To be used as an alternative - # to podMonitor, supports secure metrics. - serviceMonitor: - # enable support for Prometheus Operator - enabled: false - - speaker: - # optional additional labels for the speaker serviceMonitor - additionalLabels: {} - # optional additional annotations for the speaker serviceMonitor - annotations: {} - # optional tls configuration for the speaker serviceMonitor, in case - # secure metrics are enabled. - tlsConfig: - insecureSkipVerify: true - - controller: - # optional additional labels for the controller serviceMonitor - additionalLabels: {} - # optional additional annotations for the controller serviceMonitor - annotations: {} - # optional tls configuration for the controller serviceMonitor, in case - # secure metrics are enabled. - tlsConfig: - insecureSkipVerify: true - - # Job label for scrape target - jobLabel: "app.kubernetes.io/name" - - # Scrape interval. If not set, the Prometheus default scrape interval is used. - interval: - - # metric relabel configs to apply to samples before ingestion. - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # target_label: nodename - # replacement: $1 - # action: replace - - # Prometheus Operator alertmanager alerts - prometheusRule: - # enable alertmanager alerts - enabled: false - - # optional additionnal labels for prometheusRules - additionalLabels: {} - - # optional annotations for prometheusRules - annotations: {} - - # MetalLBStaleConfig - staleConfig: - enabled: true - labels: - severity: warning - - # MetalLBConfigNotLoaded - configNotLoaded: - enabled: true - labels: - severity: warning - - # MetalLBAddressPoolExhausted - addressPoolExhausted: - enabled: true - labels: - severity: alert - - addressPoolUsage: - enabled: true - thresholds: - - percent: 75 - labels: - severity: warning - - percent: 85 - labels: - severity: warning - - percent: 95 - labels: - severity: alert - - # MetalLBBGPSessionDown - bgpSessionDown: - enabled: true - labels: - severity: alert - - extraAlerts: [] - - # controller contains configuration specific to the MetalLB cluster - # controller. - controller: - enabled: true - # -- Controller log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none` - logLevel: info - # command: /controller - # webhookMode: enabled - image: - repository: quay.io/metallb/controller - tag: - pullPolicy: - ## @param controller.updateStrategy.type Metallb controller deployment strategy type. - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy - ## e.g: - ## strategy: - ## type: RollingUpdate - ## rollingUpdate: - ## maxSurge: 25% - ## maxUnavailable: 25% - ## - strategy: - type: RollingUpdate - serviceAccount: - # Specifies whether a ServiceAccount should be created - create: true - # The name of the ServiceAccount to use. If not set and create is - # true, a name is generated using the fullname template - name: "" - annotations: {} - securityContext: - runAsNonRoot: true - # nobody - runAsUser: 65534 - fsGroup: 65534 - resources: {} - # limits: - # cpu: 100m - # memory: 100Mi - nodeSelector: {} - tolerations: [] - priorityClassName: "" - runtimeClassName: "" - affinity: {} - podAnnotations: {} - labels: {} - livenessProbe: - enabled: true - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - readinessProbe: - enabled: true - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - - # speaker contains configuration specific to the MetalLB speaker - # daemonset. - speaker: - enabled: true - # command: /speaker - # -- Speaker log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none` - logLevel: info - tolerateMaster: true - memberlist: - enabled: true - mlBindPort: 7946 - mlSecretKeyPath: "/etc/ml_secret_key" - excludeInterfaces: - enabled: true - image: - repository: quay.io/metallb/speaker - tag: - pullPolicy: - ## @param speaker.updateStrategy.type Speaker daemonset strategy type - ## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/ - ## - updateStrategy: - ## StrategyType - ## Can be set to RollingUpdate or OnDelete - ## - type: RollingUpdate - serviceAccount: - # Specifies whether a ServiceAccount should be created - create: true - # The name of the ServiceAccount to use. If not set and create is - # true, a name is generated using the fullname template - name: "" - annotations: {} - ## Defines a secret name for the controller to generate a memberlist encryption secret - ## By default secretName: {{ "metallb.fullname" }}-memberlist - ## - # secretName: - resources: {} - # limits: - # cpu: 100m - # memory: 100Mi - nodeSelector: {} - tolerations: [] - priorityClassName: "" - affinity: {} - ## Selects which runtime class will be used by the pod. - runtimeClassName: "" - podAnnotations: {} - labels: {} - livenessProbe: - enabled: true - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - readinessProbe: - enabled: true - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - startupProbe: - enabled: true - failureThreshold: 30 - periodSeconds: 5 - # frr contains configuration specific to the MetalLB FRR container, - # for speaker running alongside FRR. - frr: - enabled: true - image: - repository: quay.io/frrouting/frr - tag: 8.5.2 - pullPolicy: - metricsPort: 7473 - resources: {} - - # if set, enables a rbac proxy sidecar container on the speaker to - # expose the frr metrics via tls. - # secureMetricsPort: 9121 - - reloader: - resources: {} - - frrMetrics: - resources: {} - - crds: - enabled: true - validationFailurePolicy: Fail diff --git a/metallb/helmrelease-metallb.yaml b/metallb/helmrelease-metallb.yaml deleted file mode 100644 index 53a667d..0000000 --- a/metallb/helmrelease-metallb.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: metallb - namespace: metallb-system -spec: - chart: - spec: - chart: metallb - version: 0.13.11 - sourceRef: - kind: HelmRepository - name: metallb - namespace: flux-system - interval: 15m - timeout: 5m - releaseName: metallb - valuesFrom: - - kind: ConfigMap - name: metallb-helm-chart-value-overrides - valuesKey: values.yaml diff --git a/minio/helmrelease-minio.yaml b/minio/helmrelease-minio.yaml deleted file mode 100644 index a92942e..0000000 --- a/minio/helmrelease-minio.yaml +++ /dev/null @@ -1,564 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: minio - namespace: minio-ns -spec: - chart: - spec: - chart: minio - sourceRef: - kind: HelmRepository - name: minio - namespace: flux-system - interval: 15m0s - timeout: 5m - releaseName: minio - values: - ## Provide a name in place of minio for `app:` labels - ## - nameOverride: "" - - ## Provide a name to substitute for the full names of resources - ## - fullnameOverride: "" - - ## set kubernetes cluster domain where minio is running - ## - clusterDomain: cluster.local - - ## Set default image, imageTag, and imagePullPolicy. mode is used to indicate the - ## - image: - repository: quay.io/minio/minio - tag: RELEASE.2023-09-30T07-02-29Z - pullPolicy: IfNotPresent - - imagePullSecrets: [] - # - name: "image-pull-secret" - - ## Set default image, imageTag, and imagePullPolicy for the `mc` (the minio - ## client used to create a default bucket). - ## - mcImage: - repository: quay.io/minio/mc - tag: RELEASE.2023-09-29T16-41-22Z - pullPolicy: IfNotPresent - - ## minio mode, i.e. standalone or distributed - mode: standalone ## other supported values are "standalone" - - ## Additional labels to include with deployment or statefulset - additionalLabels: {} - - ## Additional annotations to include with deployment or statefulset - additionalAnnotations: {} - - ## Typically the deployment/statefulset includes checksums of secrets/config, - ## So that when these change on a subsequent helm install, the deployment/statefulset - ## is restarted. This can result in unnecessary restarts under GitOps tooling such as - ## flux, so set to "true" to disable this behaviour. - ignoreChartChecksums: false - - ## Additional arguments to pass to minio binary - extraArgs: [] - - ## Additional volumes to minio container - extraVolumes: [] - - ## Additional volumeMounts to minio container - extraVolumeMounts: [] - - ## Additional sidecar containers - extraContainers: [] - - ## Internal port number for MinIO S3 API container - ## Change service.port to change external port number - minioAPIPort: "9000" - - ## Internal port number for MinIO Browser Console container - ## Change consoleService.port to change external port number - minioConsolePort: "9001" - - ## Update strategy for Deployments - deploymentUpdate: - type: RollingUpdate - maxUnavailable: 0 - maxSurge: 100% - - ## Update strategy for StatefulSets - statefulSetUpdate: - updateStrategy: RollingUpdate - - ## Pod priority settings - ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ - ## - priorityClassName: "" - - ## Pod runtime class name - ## ref https://kubernetes.io/docs/concepts/containers/runtime-class/ - ## - runtimeClassName: "" - - ## Set default rootUser, rootPassword - ## AccessKey and secretKey is generated when not set - ## Distributed MinIO ref: https://min.io/docs/minio/linux/operations/install-deploy-manage/deploy-minio-multi-node-multi-drive.html - ## - #rootUser: "" - #rootPassword: "" - # - - ## Use existing Secret that store following variables: - ## - ## | Chart var | .data. in Secret | - ## |:----------------------|:-------------------------| - ## | rootUser | rootUser | - ## | rootPassword | rootPassword | - ## - ## All mentioned variables will be ignored in values file. - ## .data.rootUser and .data.rootPassword are mandatory, - ## others depend on enabled status of corresponding sections. - existingSecret: "minio-default-credentials" - - ## Directory on the MinIO pof - certsPath: "/etc/minio/certs/" - configPathmc: "/etc/minio/mc/" - - ## Path where PV would be mounted on the MinIO Pod - mountPath: "/export" - ## Override the root directory which the minio server should serve from. - ## If left empty, it defaults to the value of {{ .Values.mountPath }} - ## If defined, it must be a sub-directory of the path specified in {{ .Values.mountPath }} - ## - bucketRoot: "" - - # Number of drives attached to a node - drivesPerNode: 1 - # Number of MinIO containers running - replicas: 1 - # Number of expanded MinIO clusters - pools: 1 - - ## TLS Settings for MinIO - tls: - enabled: false - ## Create a secret with private.key and public.crt files and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret - certSecret: "" - publicCrt: public.crt - privateKey: private.key - - ## Trusted Certificates Settings for MinIO. Ref: https://min.io/docs/minio/linux/operations/network-encryption.html#third-party-certificate-authorities - ## Bundle multiple trusted certificates into one secret and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret - ## When using self-signed certificates, remember to include MinIO's own certificate in the bundle with key public.crt. - ## If certSecret is left empty and tls is enabled, this chart installs the public certificate from .Values.tls.certSecret. - trustedCertsSecret: "" - - ## Enable persistence using Persistent Volume Claims - ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - enabled: true - #annotations: {} - - ## A manually managed Persistent Volume and Claim - ## Requires persistence.enabled: true - ## If defined, PVC must be created manually before volume will be bound - #existingClaim: "" - - ## minio data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - ## Storage class of PV to bind. By default it looks for standard storage class. - ## If the PV uses a different storage class, specify that here. - storageClass: "longhorn" - #volumeName: "" - accessMode: ReadWriteOnce - size: 30Gi - - ## If subPath is set mount a sub folder of a volume instead of the root of the volume. - ## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs). - ## - subPath: "" - - ## Expose the MinIO service to be accessed from outside the cluster (LoadBalancer service). - ## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. - ## ref: http://kubernetes.io/docs/user-guide/services/ - ## - service: - type: LoadBalancer - clusterIP: ~ - port: "9000" - nodePort: 9000 - loadBalancerIP: ~ - externalIPs: [] - annotations: {} - - ## Configure Ingress based on the documentation here: https://kubernetes.io/docs/concepts/services-networking/ingress/ - ## - - ingress: - enabled: false - ingressClassName: ~ - labels: {} - # node-role.kubernetes.io/ingress: platform - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - # kubernetes.io/ingress.allow-http: "false" - # kubernetes.io/ingress.global-static-ip-name: "" - # nginx.ingress.kubernetes.io/secure-backends: "true" - # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" - # nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0 - path: / - hosts: - - minio-example.local - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - - consoleService: - type: LoadBalancer - clusterIP: ~ - port: "9001" - nodePort: 80 - loadBalancerIP: ~ - externalIPs: [] - annotations: {} - - consoleIngress: - enabled: false - ingressClassName: ~ - labels: {} - # node-role.kubernetes.io/ingress: platform - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - # kubernetes.io/ingress.allow-http: "false" - # kubernetes.io/ingress.global-static-ip-name: "" - # nginx.ingress.kubernetes.io/secure-backends: "true" - # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" - # nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0 - path: / - hosts: - - console.minio-example.local - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - - ## Node labels for pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - - ## Add stateful containers to have security context, if enabled MinIO will run as this - ## user and group NOTE: securityContext is only enabled if persistence.enabled=true - securityContext: - enabled: true - runAsUser: 1000 - runAsGroup: 1000 - fsGroup: 1000 - fsGroupChangePolicy: "OnRootMismatch" - - # Additational pod annotations - podAnnotations: {} - - # Additional pod labels - podLabels: {} - - ## Configure resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: - requests: - memory: 16Gi - - ## List of policies to be created after minio install - ## - ## In addition to default policies [readonly|readwrite|writeonly|consoleAdmin|diagnostics] - ## you can define additional policies with custom supported actions and resources - policies: [] - ## writeexamplepolicy policy grants creation or deletion of buckets with name - ## starting with example. In addition, grants objects write permissions on buckets starting with - ## example. - # - name: writeexamplepolicy - # statements: - # - effect: Allow # this is the default - # resources: - # - 'arn:aws:s3:::example*/*' - # actions: - # - "s3:AbortMultipartUpload" - # - "s3:GetObject" - # - "s3:DeleteObject" - # - "s3:PutObject" - # - "s3:ListMultipartUploadParts" - # - resources: - # - 'arn:aws:s3:::example*' - # actions: - # - "s3:CreateBucket" - # - "s3:DeleteBucket" - # - "s3:GetBucketLocation" - # - "s3:ListBucket" - # - "s3:ListBucketMultipartUploads" - ## readonlyexamplepolicy policy grants access to buckets with name starting with example. - ## In addition, grants objects read permissions on buckets starting with example. - # - name: readonlyexamplepolicy - # statements: - # - resources: - # - 'arn:aws:s3:::example*/*' - # actions: - # - "s3:GetObject" - # - resources: - # - 'arn:aws:s3:::example*' - # actions: - # - "s3:GetBucketLocation" - # - "s3:ListBucket" - # - "s3:ListBucketMultipartUploads" - ## conditionsexample policy creates all access to example bucket with aws:username="johndoe" and source ip range 10.0.0.0/8 and 192.168.0.0/24 only - # - name: conditionsexample - # statements: - # - resources: - # - 'arn:aws:s3:::example/*' - # actions: - # - 's3:*' - # conditions: - # - StringEquals: '"aws:username": "johndoe"' - # - IpAddress: | - # "aws:SourceIp": [ - # "10.0.0.0/8", - # "192.168.0.0/24" - # ] - # - ## Additional Annotations for the Kubernetes Job makePolicyJob - makePolicyJob: - securityContext: - enabled: false - runAsUser: 1000 - runAsGroup: 1000 - resources: - requests: - memory: 128Mi - # Command to run after the main command on exit - exitCommand: "" - - ## List of users to be created after minio install - ## - users: - ## Username, password and policy to be assigned to the user - ## Default policies are [readonly|readwrite|writeonly|consoleAdmin|diagnostics] - ## Add new policies as explained here https://min.io/docs/minio/kubernetes/upstream/administration/identity-access-management.html#access-management - ## NOTE: this will fail if LDAP is enabled in your MinIO deployment - ## make sure to disable this if you are using LDAP. - - accessKey: console - secretKey: console123 - policy: consoleAdmin - # Or you can refer to specific secret - #- accessKey: externalSecret - # existingSecret: my-secret - # existingSecretKey: password - # policy: readonly - - ## Additional Annotations for the Kubernetes Job makeUserJob - makeUserJob: - securityContext: - enabled: false - runAsUser: 1000 - runAsGroup: 1000 - resources: - requests: - memory: 128Mi - # Command to run after the main command on exit - exitCommand: "" - - ## List of service accounts to be created after minio install - ## - svcaccts: [] - ## accessKey, secretKey and parent user to be assigned to the service accounts - ## Add new service accounts as explained here https://min.io/docs/minio/kubernetes/upstream/administration/identity-access-management/minio-user-management.html#service-accounts - # - accessKey: console-svcacct - # secretKey: console123 - # user: console - ## Or you can refer to specific secret - # - accessKey: externalSecret - # existingSecret: my-secret - # existingSecretKey: password - # user: console - ## You also can pass custom policy - # - accessKey: console-svcacct - # secretKey: console123 - # user: console - # policy: - # statements: - # - resources: - # - 'arn:aws:s3:::example*/*' - # actions: - # - "s3:AbortMultipartUpload" - # - "s3:GetObject" - # - "s3:DeleteObject" - # - "s3:PutObject" - # - "s3:ListMultipartUploadParts" - - makeServiceAccountJob: - securityContext: - enabled: false - runAsUser: 1000 - runAsGroup: 1000 - resources: - requests: - memory: 128Mi - # Command to run after the main command on exit - exitCommand: "" - - ## List of buckets to be created after minio install - ## - buckets: [] - # # Name of the bucket - # - name: bucket1 - # # Policy to be set on the - # # bucket [none|download|upload|public] - # policy: none - # # Purge if bucket exists already - # purge: false - # # set versioning for - # # bucket [true|false] - # versioning: false - # # set objectlocking for - # # bucket [true|false] NOTE: versioning is enabled by default if you use locking - # objectlocking: false - # - name: bucket2 - # policy: none - # purge: false - # versioning: true - # # set objectlocking for - # # bucket [true|false] NOTE: versioning is enabled by default if you use locking - # objectlocking: false - - ## Additional Annotations for the Kubernetes Job makeBucketJob - makeBucketJob: - securityContext: - enabled: false - runAsUser: 1000 - runAsGroup: 1000 - resources: - requests: - memory: 128Mi - # Command to run after the main command on exit - exitCommand: "" - - ## List of command to run after minio install - ## NOTE: the mc command TARGET is always "myminio" - customCommands: - # - command: "admin policy attach myminio consoleAdmin --group='cn=ops,cn=groups,dc=example,dc=com'" - - ## Additional Annotations for the Kubernetes Job customCommandJob - customCommandJob: - securityContext: - enabled: false - runAsUser: 1000 - runAsGroup: 1000 - resources: - requests: - memory: 128Mi - # Command to run after the main command on exit - exitCommand: "" - - ## Merge jobs - postJob: - podAnnotations: {} - annotations: {} - securityContext: - enabled: false - runAsUser: 1000 - runAsGroup: 1000 - fsGroup: 1000 - nodeSelector: {} - tolerations: [] - affinity: {} - - ## Use this field to add environment variables relevant to MinIO server. These fields will be passed on to MinIO container(s) - ## when Chart is deployed - environment: - ## Please refer for comprehensive list https://min.io/docs/minio/linux/reference/minio-server/minio-server.html - ## MINIO_SUBNET_LICENSE: "License key obtained from https://subnet.min.io" - ## MINIO_BROWSER: "off" - - ## The name of a secret in the same kubernetes namespace which contain secret values - ## This can be useful for LDAP password, etc - ## The key in the secret must be 'config.env' - ## - extraSecret: ~ - - ## OpenID Identity Management - ## The following section documents environment variables for enabling external identity management using an OpenID Connect (OIDC)-compatible provider. - ## See https://min.io/docs/minio/linux/operations/external-iam/configure-openid-external-identity-management.html for a tutorial on using these variables. - oidc: - enabled: false - configUrl: "https://identity-provider-url/.well-known/openid-configuration" - clientId: "minio" - clientSecret: "" - # Provide existing client secret from the Kubernetes Secret resource, existing secret will have priority over `clientSecret` - existingClientSecretName: "" - existingClientSecretKey: "" - claimName: "policy" - scopes: "openid,profile,email" - redirectUri: "https://console-endpoint-url/oauth_callback" - # Can leave empty - claimPrefix: "" - comment: "" - - networkPolicy: - enabled: false - allowExternal: true - - ## PodDisruptionBudget settings - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - ## - podDisruptionBudget: - enabled: false - maxUnavailable: 1 - - ## Specify the service account to use for the MinIO pods. If 'create' is set to 'false' - ## and 'name' is left unspecified, the account 'default' will be used. - serviceAccount: - create: true - ## The name of the service account to use. If 'create' is 'true', a service account with that name - ## will be created. - name: "minio-sa" - - metrics: - serviceMonitor: - enabled: false - # scrape each node/pod individually for additional metrics - includeNode: false - public: true - additionalLabels: {} - annotations: {} - # for node metrics - relabelConfigs: {} - # for cluster metrics - relabelConfigsCluster: {} - # metricRelabelings: - # - regex: (server|pod) - # action: labeldrop - namespace: ~ - # Scrape interval, for example `interval: 30s` - interval: ~ - # Scrape timeout, for example `scrapeTimeout: 10s` - scrapeTimeout: ~ - - ## ETCD settings: https://github.com/minio/minio/blob/master/docs/sts/etcd.md - ## Define endpoints to enable this section. - etcd: - endpoints: [] - pathPrefix: "" - corednsPathPrefix: "" - clientCert: "" - clientCertKey: "" diff --git a/minio/sealed-secret.yaml b/minio/sealed-secret.yaml deleted file mode 100644 index 4869434..0000000 --- a/minio/sealed-secret.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: minio-default-credentials - namespace: minio-ns -spec: - encryptedData: - rootPassword: AgASkMrZq0TO6z/oeMyzGjDmSbJLBQCYW/7IQHdRS8M8vZkioEujShT/8IE6etxTOzGLwOkmpO8PyExTgMD3atyRRdiyBs2jaT0SIOyEZUA0PjiAgmYTWx9cAXBROOYzkT7u8IvMomEjiKx/EZG2XPhxgg0/Z9tBCVkstuEYyUfRokSco4icJ/JyHAz1Gg2F9w/KMiQJProcoAV5ajRdI4Bfb9e6E5GIW2Z0WKSH4fcCyM07nW+QnwlNGZNaAgLmSZygnUeF2PN/qD5aSj5YSjK5Va7KQRIlYszmzJcFg70yeustMIcE2nD2YVFFHb0CXKqEgnF9QrieBagorwoRvqU5XtXoXiBmzgvXtDQTJJ7ODT4XAB0oVF0QOdreBuVZ34D+Fb61O5HtFvSHRN3HsGXdvkKKgywJbjL+LaQBcEgztq0xjiGj/tjf3UDZOdOeHPZYJno9gdJX5eCTTjWVnaPxMyfwl3y4YmmHKVenCS6tsBkABk2/+lEthGUBRY9CyKl/ugwDQCJviX4tf7ZvMGGuPAxqIlZuM69jU53Zgp9Vq/8+UuTlksJzwQlH/VoyZsQl+/vSekyjDyPR3g3AunjoLsQDNnBwcghMzBFgeJzB/dSiyg0dQpiMUCcwe8i/20N/ER4pIC+ag1IyBAoKMQpWWJWyPU7IQ+JbYPdCI9Q1bMhQIpBNLkJsaiaRCvwrWaK07Ml9T4i+wMat8z8v0gIbnK+2JZ7FIeA66uuhxXhMi2Coqs5L0/vk - rootUser: AgDUG6LKdvzJorlYnxlW6cnJaqrhQkumFheLwZTD3aRf8ufFqQaGM/IPyNXwhKj4YAlr17gSR9kzIhYnkrKwVq9rtzo/arm2hF4cDWwQEZlrfmkqZfAec4p81KvyYgL19fuhDOeiZQfuCHl0MvDw+j6AzAk6Q6bbNdjWElaRzNLzjRAM892OCS2pubzRPLJl2+/9Ny/lZ2ePmZHHdin7ca73aCrcO1PryrhqQxZRMM0iaNKjUGsY8WMeoNnayhJ34KbsEMDTtPkWXlZb2FGtJDafw0A0fNn19PlU7wN7HeMK05SPgp4Sjs9LFrHNBanjF/rKqInCSg2lN57bUcJcVotpXEt6rmTEySo2QhnfFAXafX6hfl/HHT9GSrya+vFLKNXVf8hxVZMRjXmNIi0N3obvHOqGIJFDiy4iWEwOdrn/yetHs8ctS+DrO4pNY1cz/6SzaBayqaPqcxIAWhCKxXtNWb6sHBpTRsXpwUFq2Hoc9idB1uTGOpmpSWl8awUUsanXv4Kb2sZkXNc3iCCwx6TBDLQ1fukISj4n30RcTFDqa++3Nxq1n1immNerX30PjMWewxlUvAm5O9kwcIplfk8iW9ii3gRlth0Qs8FGhbfrghz5xs8CIgxEhnrCRphNeIow3JT1wxGU0r/QKoQu8zgEz+TsNdCXmB8bnauYyrW6ANhZaWx/wGoB29j7mHWfvLsTIwB2Q8HeV4agwKXoGSsp - template: - metadata: - creationTimestamp: null - name: minio-default-credentials - namespace: minio-ns diff --git a/navidrome/navidrome-data.yaml b/navidrome/navidrome-data.yaml deleted file mode 100644 index cc2b2a3..0000000 --- a/navidrome/navidrome-data.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: navidrome-pvc-data - namespace: navidrome-ns -spec: - accessModes: - - ReadWriteOnce - storageClassName: longhorn - resources: - requests: - storage: 16Gi diff --git a/navidrome/navidrome-deployment.yaml b/navidrome/navidrome-deployment.yaml deleted file mode 100644 index 1d4f289..0000000 --- a/navidrome/navidrome-deployment.yaml +++ /dev/null @@ -1,81 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: navidrome - namespace: navidrome-ns -spec: - selector: - matchLabels: - app: navidrome - template: - metadata: - labels: - app: navidrome - spec: - nodeSelector: - kubernetes.io/hostname: gluttony - securityContext: - fsGroup: 1000 - containers: - - name: navidrome - image: deluan/navidrome:latest - securityContext: - runAsUser: 1000 - runAsGroup: 1000 - ports: - - containerPort: 4533 - env: - - name: ND_BASEURL - value: "https://music.clortox.com" - - name: ND_CACHEFOLDER - value: "/cache" - - name: ND_MUSICFOLDER - value: "/music" - - name: ND_DATAFOLDER - value: "/data" - - - name: ND_SCANSCHEDULE - value: "1h" - - name: ND_LOGLEVEL - value: "info" - - name: ND_SESSIONTIMEOUT - value: "24h" - - - name: ND_ENABLESHARING - value: "true" - - name: ND_UILOGINBACKGROUNDURL - value: "https://general.api.clortox.com/images/background/today" - - name: ND_UIWELCOMEMESSAGE - value: "Lol. Lmao even" - - - name: ND_REVERSEPROXYUSERHEADER - value: "X-Authentik-Username" - - name: ND_REVERSEPROXYWHITELIST - value: "0.0.0.0/0" - #- name: ND_SPOTIFY_ID - # valueFrom: - # secretKeyRef: - # name: spotify-creds - # key: CLIENT_ID - #- name: ND_SPOTIFY_SECRET - # valueFrom: - # secretKeyRef: - # name: spotify-creds - # key: CLIENT_ID - volumeMounts: - - name: data - mountPath: "/data" - - name: music - mountPath: "/music" - readOnly: true - - name: cache - mountPath: "/cache" - volumes: - - name: data - persistentVolumeClaim: - claimName: navidrome-pvc-data - - name: music - persistentVolumeClaim: - claimName: navidrome-pvc-music - - name: cache - emptyDir: {} diff --git a/navidrome/navidrome-pv-music.yaml b/navidrome/navidrome-pv-music.yaml deleted file mode 100644 index 16a7294..0000000 --- a/navidrome/navidrome-pv-music.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: navidrome-pv-music - namespace: navidrome-ns -spec: - storageClassName: local-storage - capacity: - storage: 18000Gi - accessModes: - - ReadWriteMany - hostPath: - path: "/Main/Media" - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - gluttony diff --git a/navidrome/navidrome-pvc-music.yaml b/navidrome/navidrome-pvc-music.yaml deleted file mode 100644 index d6dc488..0000000 --- a/navidrome/navidrome-pvc-music.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: navidrome-pvc-music - namespace: navidrome-ns -spec: - volumeName: navidrome-pv-music - storageClassName: local-storage - accessModes: - - ReadWriteMany - resources: - requests: - storage: 18000Gi diff --git a/navidrome/navidrome-service.yaml b/navidrome/navidrome-service.yaml deleted file mode 100644 index 2f7f829..0000000 --- a/navidrome/navidrome-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: navidrome-services - namespace: navidrome-ns -spec: - type: LoadBalancer - ports: - - port: 80 - targetPort: 4533 - protocol: TCP - selector: - app: navidrome diff --git a/nvidia/helmrelease-nvidia-operator.yaml b/nvidia/helmrelease-nvidia-operator.yaml deleted file mode 100644 index 424eace..0000000 --- a/nvidia/helmrelease-nvidia-operator.yaml +++ /dev/null @@ -1,556 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: gpu-operator - namespace: nvidia-system -spec: - chart: - spec: - chart: gpu-operator - sourceRef: - kind: HelmRepository - name: nvidia-operator - namespace: flux-system - interval: 15m0s - timeout: 5m - releaseName: gpu-operator - values: - # Default values for gpu-operator. - # This is a YAML-formatted file. - # Declare variables to be passed into your templates. - - platform: - openshift: false - - nfd: - enabled: true - nodefeaturerules: false - - psa: - enabled: false - - cdi: - enabled: false - default: false - - sandboxWorkloads: - enabled: false - defaultWorkload: "container" - - daemonsets: - labels: {} - annotations: {} - priorityClassName: system-node-critical - tolerations: - - key: nvidia.com/gpu - operator: Exists - effect: NoSchedule - # configuration for controlling update strategy("OnDelete" or "RollingUpdate") of GPU Operands - # note that driver Daemonset is always set with OnDelete to avoid unintended disruptions - updateStrategy: "RollingUpdate" - # configuration for controlling rolling update of GPU Operands - rollingUpdate: - # maximum number of nodes to simultaneously apply pod updates on. - # can be specified either as number or percentage of nodes. Default 1. - maxUnavailable: "1" - - validator: - repository: nvcr.io/nvidia/cloud-native - image: gpu-operator-validator - # If version is not specified, then default is to use chart.AppVersion - #version: "" - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: [] - args: [] - resources: {} - plugin: - env: - - name: WITH_WORKLOAD - value: "false" - - operator: - repository: nvcr.io/nvidia - image: gpu-operator - # If version is not specified, then default is to use chart.AppVersion - #version: "" - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - priorityClassName: system-node-critical - defaultRuntime: docker - runtimeClass: nvidia - use_ocp_driver_toolkit: false - # cleanup CRD on chart un-install - cleanupCRD: false - # upgrade CRD on chart upgrade, requires --disable-openapi-validation flag - # to be passed during helm upgrade. - upgradeCRD: false - initContainer: - image: cuda - repository: nvcr.io/nvidia - version: 12.3.2-base-ubi8 - imagePullPolicy: IfNotPresent - tolerations: - - key: "node-role.kubernetes.io/master" - operator: "Equal" - value: "" - effect: "NoSchedule" - - key: "node-role.kubernetes.io/control-plane" - operator: "Equal" - value: "" - effect: "NoSchedule" - annotations: - openshift.io/scc: restricted-readonly - affinity: - nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - preference: - matchExpressions: - - key: "node-role.kubernetes.io/master" - operator: In - values: [""] - - weight: 1 - preference: - matchExpressions: - - key: "node-role.kubernetes.io/control-plane" - operator: In - values: [""] - logging: - # Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano') - timeEncoding: epoch - # Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity - level: info - # Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn) - # Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) - develMode: false - resources: - limits: - cpu: 500m - memory: 350Mi - requests: - cpu: 200m - memory: 100Mi - - mig: - strategy: single - - driver: - enabled: true - nvidiaDriverCRD: - enabled: false - deployDefaultCR: true - driverType: gpu - nodeSelector: {} - useOpenKernelModules: false - # use pre-compiled packages for NVIDIA driver installation. - # only supported for as a tech-preview feature on ubuntu22.04 kernels. - usePrecompiled: false - repository: nvcr.io/nvidia - image: driver - version: "550.54.15" - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - startupProbe: - initialDelaySeconds: 60 - periodSeconds: 10 - # nvidia-smi can take longer than 30s in some cases - # ensure enough timeout is set - timeoutSeconds: 60 - failureThreshold: 120 - rdma: - enabled: false - useHostMofed: false - upgradePolicy: - # global switch for automatic upgrade feature - # if set to false all other options are ignored - autoUpgrade: true - # how many nodes can be upgraded in parallel - # 0 means no limit, all nodes will be upgraded in parallel - maxParallelUpgrades: 1 - # maximum number of nodes with the driver installed, that can be unavailable during - # the upgrade. Value can be an absolute number (ex: 5) or - # a percentage of total nodes at the start of upgrade (ex: - # 10%). Absolute number is calculated from percentage by rounding - # up. By default, a fixed value of 25% is used.' - maxUnavailable: 25% - # options for waiting on pod(job) completions - waitForCompletion: - timeoutSeconds: 0 - podSelector: "" - # options for gpu pod deletion - gpuPodDeletion: - force: false - timeoutSeconds: 300 - deleteEmptyDir: false - # options for node drain (`kubectl drain`) before the driver reload - # this is required only if default GPU pod deletions done by the operator - # are not sufficient to re-install the driver - drain: - enable: false - force: false - podSelector: "" - # It's recommended to set a timeout to avoid infinite drain in case non-fatal error keeps happening on retries - timeoutSeconds: 300 - deleteEmptyDir: false - manager: - image: k8s-driver-manager - repository: nvcr.io/nvidia/cloud-native - # When choosing a different version of k8s-driver-manager, DO NOT downgrade to a version lower than v0.6.4 - # to ensure k8s-driver-manager stays compatible with gpu-operator starting from v24.3.0 - version: v0.6.7 - imagePullPolicy: IfNotPresent - env: - - name: ENABLE_GPU_POD_EVICTION - value: "true" - - name: ENABLE_AUTO_DRAIN - value: "false" - - name: DRAIN_USE_FORCE - value: "false" - - name: DRAIN_POD_SELECTOR_LABEL - value: "" - - name: DRAIN_TIMEOUT_SECONDS - value: "0s" - - name: DRAIN_DELETE_EMPTYDIR_DATA - value: "false" - env: [] - resources: {} - # Private mirror repository configuration - repoConfig: - configMapName: "" - # custom ssl key/certificate configuration - certConfig: - name: "" - # vGPU licensing configuration - licensingConfig: - configMapName: "" - nlsEnabled: true - # vGPU topology daemon configuration - virtualTopology: - config: "" - # kernel module configuration for NVIDIA driver - kernelModuleConfig: - name: "" - - toolkit: - enabled: true - repository: nvcr.io/nvidia/k8s - image: container-toolkit - version: v1.15.0-rc.4-ubuntu20.04 - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: - - name: CONTAINERD_CONFIG - value: /var/lib/rancher/k3s/agent/etc/containerd/config.toml - - name: CONTAINERD_SOCKET - value: /run/k3s/containerd/containerd.sock - resources: {} - installDir: "/usr/local/nvidia" - - devicePlugin: - enabled: true - repository: nvcr.io/nvidia - image: k8s-device-plugin - version: v0.15.0-rc.2-ubi8 - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - args: [] - env: - - name: PASS_DEVICE_SPECS - value: "true" - - name: FAIL_ON_INIT_ERROR - value: "true" - - name: DEVICE_LIST_STRATEGY - value: envvar - - name: DEVICE_ID_STRATEGY - value: uuid - - name: NVIDIA_VISIBLE_DEVICES - value: all - - name: NVIDIA_DRIVER_CAPABILITIES - value: all - resources: {} - # Plugin configuration - # Use "name" to either point to an existing ConfigMap or to create a new one with a list of configurations(i.e with create=true). - # Use "data" to build an integrated ConfigMap from a set of configurations as - # part of this helm chart. An example of setting "data" might be: - # config: - # name: device-plugin-config - # create: true - # data: - # default: |- - # version: v1 - # flags: - # migStrategy: none - # mig-single: |- - # version: v1 - # flags: - # migStrategy: single - # mig-mixed: |- - # version: v1 - # flags: - # migStrategy: mixed - config: - # Create a ConfigMap (default: false) - create: false - # ConfigMap name (either exiting or to create a new one with create=true above) - name: "" - # Default config name within the ConfigMap - default: "" - # Data section for the ConfigMap to create (i.e only applies when create=true) - data: {} - # MPS related configuration for the plugin - mps: - # MPS root path on the host - root: "/run/nvidia/mps" - - # standalone dcgm hostengine - dcgm: - # disabled by default to use embedded nv-hostengine by exporter - enabled: false - repository: nvcr.io/nvidia/cloud-native - image: dcgm - version: 3.3.3-1-ubuntu22.04 - imagePullPolicy: IfNotPresent - hostPort: 5555 - args: [] - env: [] - resources: {} - - dcgmExporter: - enabled: true - repository: nvcr.io/nvidia/k8s - image: dcgm-exporter - version: 3.3.5-3.4.0-ubuntu22.04 - imagePullPolicy: IfNotPresent - env: - - name: DCGM_EXPORTER_LISTEN - value: ":9400" - - name: DCGM_EXPORTER_KUBERNETES - value: "true" - - name: DCGM_EXPORTER_COLLECTORS - value: "/etc/dcgm-exporter/dcp-metrics-included.csv" - resources: {} - serviceMonitor: - enabled: false - interval: 15s - honorLabels: false - additionalLabels: {} - relabelings: [] - # - source_labels: - # - __meta_kubernetes_pod_node_name - # regex: (.*) - # target_label: instance - # replacement: $1 - # action: replace - - gfd: - enabled: true - repository: nvcr.io/nvidia - image: k8s-device-plugin - version: v0.15.0-rc.2-ubi8 - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: - - name: GFD_SLEEP_INTERVAL - value: 60s - - name: GFD_FAIL_ON_INIT_ERROR - value: "true" - resources: {} - - migManager: - enabled: true - repository: nvcr.io/nvidia/cloud-native - image: k8s-mig-manager - version: v0.6.0-ubuntu20.04 - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: - - name: WITH_REBOOT - value: "false" - resources: {} - config: - name: "default-mig-parted-config" - default: "all-disabled" - gpuClientsConfig: - name: "" - - nodeStatusExporter: - enabled: false - repository: nvcr.io/nvidia/cloud-native - image: gpu-operator-validator - # If version is not specified, then default is to use chart.AppVersion - #version: "" - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - resources: {} - - gds: - enabled: false - repository: nvcr.io/nvidia/cloud-native - image: nvidia-fs - version: "2.17.5" - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: [] - args: [] - - gdrcopy: - enabled: false - repository: nvcr.io/nvidia/cloud-native - image: gdrdrv - version: "v2.4.1" - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: [] - args: [] - - vgpuManager: - enabled: false - repository: "" - image: vgpu-manager - version: "" - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: [] - resources: {} - driverManager: - image: k8s-driver-manager - repository: nvcr.io/nvidia/cloud-native - # When choosing a different version of k8s-driver-manager, DO NOT downgrade to a version lower than v0.6.4 - # to ensure k8s-driver-manager stays compatible with gpu-operator starting from v24.3.0 - version: v0.6.7 - imagePullPolicy: IfNotPresent - env: - - name: ENABLE_GPU_POD_EVICTION - value: "false" - - name: ENABLE_AUTO_DRAIN - value: "false" - - vgpuDeviceManager: - enabled: true - repository: nvcr.io/nvidia/cloud-native - image: vgpu-device-manager - version: "v0.2.5" - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: [] - config: - name: "" - default: "default" - - vfioManager: - enabled: true - repository: nvcr.io/nvidia - image: cuda - version: 12.3.2-base-ubi8 - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: [] - resources: {} - driverManager: - image: k8s-driver-manager - repository: nvcr.io/nvidia/cloud-native - # When choosing a different version of k8s-driver-manager, DO NOT downgrade to a version lower than v0.6.4 - # to ensure k8s-driver-manager stays compatible with gpu-operator starting from v24.3.0 - version: v0.6.7 - imagePullPolicy: IfNotPresent - env: - - name: ENABLE_GPU_POD_EVICTION - value: "false" - - name: ENABLE_AUTO_DRAIN - value: "false" - - kataManager: - enabled: false - config: - artifactsDir: "/opt/nvidia-gpu-operator/artifacts/runtimeclasses" - runtimeClasses: - - name: kata-qemu-nvidia-gpu - nodeSelector: {} - artifacts: - url: nvcr.io/nvidia/cloud-native/kata-gpu-artifacts:ubuntu22.04-535.54.03 - pullSecret: "" - - name: kata-qemu-nvidia-gpu-snp - nodeSelector: - "nvidia.com/cc.capable": "true" - artifacts: - url: nvcr.io/nvidia/cloud-native/kata-gpu-artifacts:ubuntu22.04-535.86.10-snp - pullSecret: "" - repository: nvcr.io/nvidia/cloud-native - image: k8s-kata-manager - version: v0.1.2 - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: [] - resources: {} - - sandboxDevicePlugin: - enabled: true - repository: nvcr.io/nvidia - image: kubevirt-gpu-device-plugin - version: v1.2.6 - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - args: [] - env: [] - resources: {} - - ccManager: - enabled: false - defaultMode: "off" - repository: nvcr.io/nvidia/cloud-native - image: k8s-cc-manager - version: v0.1.1 - imagePullPolicy: IfNotPresent - imagePullSecrets: [] - env: - - name: CC_CAPABLE_DEVICE_IDS - value: "0x2339,0x2331,0x2330,0x2324,0x2322,0x233d" - resources: {} - - node-feature-discovery: - enableNodeFeatureApi: true - gc: - enable: true - replicaCount: 1 - serviceAccount: - name: node-feature-discovery - create: false - worker: - serviceAccount: - name: node-feature-discovery - # disable creation to avoid duplicate serviceaccount creation by master spec below - create: false - tolerations: - - key: "node-role.kubernetes.io/master" - operator: "Equal" - value: "" - effect: "NoSchedule" - - key: "node-role.kubernetes.io/control-plane" - operator: "Equal" - value: "" - effect: "NoSchedule" - - key: nvidia.com/gpu - operator: Exists - effect: NoSchedule - config: - sources: - pci: - deviceClassWhitelist: - - "02" - - "0200" - - "0207" - - "0300" - - "0302" - deviceLabelFields: - - vendor - master: - serviceAccount: - name: node-feature-discovery - create: true - config: - extraLabelNs: ["nvidia.com"] - # noPublish: false - # resourceLabels: ["nvidia.com/feature-1","nvidia.com/feature-2"] - # enableTaints: false - # labelWhiteList: "nvidia.com/gpu" diff --git a/pgadmin/helmrelease-pgadmin.yaml b/pgadmin/helmrelease-pgadmin.yaml deleted file mode 100644 index ccb871b..0000000 --- a/pgadmin/helmrelease-pgadmin.yaml +++ /dev/null @@ -1,376 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: pgadmin - namespace: postgresql-system -spec: - chart: - spec: - chart: pgadmin4 - sourceRef: - kind: HelmRepository - name: runix - namespace: flux-system - interval: 15m0s - timeout: 5m - releaseName: pgadmin - values: - # Default values for pgAdmin4. - - replicaCount: 1 - - ## pgAdmin4 container image - ## - image: - registry: docker.io - repository: dpage/pgadmin4 - # Overrides the image tag whose default is the chart appVersion. - tag: "" - pullPolicy: IfNotPresent - - ## Deployment annotations - annotations: {} - - ## priorityClassName - priorityClassName: "" - - ## Deployment entrypoint override - ## Useful when there's a requirement to modify container's default: - ## https://www.vaultproject.io/docs/platform/k8s/injector/examples#environment-variable-example - ## ref: https://github.com/postgres/pgadmin4/blob/master/Dockerfile#L206 - # command: "['/bin/sh', '-c', 'source /vault/secrets/config && ']" - - service: - type: LoadBalancer - clusterIP: "" - loadBalancerIP: "" - port: 80 - targetPort: 80 - # targetPort: 4181 To be used with a proxy extraContainer - portName: http - - annotations: {} - ## Special annotations at the service level, e.g - ## this will set vnet internal IP's rather than public ip's - ## service.beta.kubernetes.io/azure-load-balancer-internal: "true" - - ## Specify the nodePort value for the service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - # nodePort: - - ## Pod Service Account - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - # Specifies whether a service account should be created - create: false - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: "" - # Opt out of API credential automounting. - # If you don't want the kubelet to automatically mount a ServiceAccount's API credentials, - # you can opt out of the default behavior - automountServiceAccountToken: false - - ## Strategy used to replace old Pods by new ones - ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy - ## - strategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 0 - # maxUnavailable: 1 - - ## Server definitions will be loaded at launch time. This allows connection - ## information to be pre-loaded into the instance of pgAdmin4 in the container. - ## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/import_export_servers.html - ## - serverDefinitions: - ## If true, server definitions will be created - ## - enabled: false - - ## The resource type to use for deploying server definitions. - ## Can either be ConfigMap or Secret - resourceType: ConfigMap - - servers: - # firstServer: - # Name: "Minimally Defined Server" - # Group: "Servers" - # Port: 5432 - # Username: "postgres" - # Host: "localhost" - # SSLMode: "prefer" - # MaintenanceDB: "postgres" - - networkPolicy: - enabled: true - - ## Ingress - ## Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ - ingress: - enabled: false - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - # ingressClassName: "" - hosts: - - host: chart-example.local - paths: - - path: / - pathType: Prefix - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - - # Additional config maps to be mounted inside a container - # Can be used to map config maps for sidecar as well - extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /etc/ssl/certs - # subPath: ca-certificates.crt # (optional) - # configMap: certs-configmap - # readOnly: true - - extraSecretMounts: [] - # - name: pgpassfile - # secret: pgpassfile - # subPath: pgpassfile - # mountPath: "/var/lib/pgadmin/storage/pgadmin/file.pgpass" - # readOnly: true - - ## Additional volumes to be mounted inside a container - ## - extraVolumeMounts: [] - - ## Specify additional containers in extraContainers. - ## For example, to add an authentication proxy to a pgadmin4 pod. - extraContainers: | - # - name: proxy - # image: quay.io/gambol99/keycloak-proxy:latest - # args: - # - -provider=github - # - -client-id= - # - -client-secret= - # - -github-org= - # - -email-domain=* - # - -cookie-secret= - # - -http-address=http://0.0.0.0:4181 - # - -upstream-url=http://127.0.0.1:3000 - # ports: - # - name: proxy-web - # containerPort: 4181 - - ## @param existingSecret Name of existing secret to use for default pgadmin credentials. `env.password` will be ignored and picked up from this secret. - ## - existingSecret: "" - ## @param secretKeys.pgadminPasswordKey Name of key in existing secret to use for default pgadmin credentials. Only used when `existingSecret` is set. - ## - secretKeys: - pgadminPasswordKey: password - - ## pgAdmin4 startup configuration - ## Values in here get injected as environment variables - ## Needed chart reinstall for apply changes - env: - # can be email or nickname - email: tyler@clortox.com - password: defaultpassword - # pgpassfile: /var/lib/pgadmin/storage/pgadmin/file.pgpass - - # set context path for application (e.g. /pgadmin4/*) - # contextPath: /pgadmin4 - - ## If True, allows pgAdmin4 to create session cookies based on IP address - ## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html - ## - enhanced_cookie_protection: "False" - - ## Add custom environment variables that will be injected to deployment - ## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/container_deployment.html - ## - variables: [] - # - name: PGADMIN_LISTEN_ADDRESS - # value: "0.0.0.0" - # - name: PGADMIN_LISTEN_PORT - # value: "8080" - - ## Additional environment variables from ConfigMaps - envVarsFromConfigMaps: [] - # - array-of - # - config-map-names - - ## Additional environment variables from Secrets - envVarsFromSecrets: [] - # - array-of - # - secret-names - - persistentVolume: - ## If true, pgAdmin4 will create/use a Persistent Volume Claim - ## If false, use emptyDir - enabled: true - - ## pgAdmin4 Persistent Volume Claim annotations - ## - annotations: {} - - ## pgAdmin4 Persistent Volume access modes - ## Must match those of existing PV or dynamic provisioner - ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - accessModes: - - ReadWriteMany - - ## pgAdmin4 Persistent Volume Size - ## - size: 2Gi - storageClass: "longhorn" - - ## pgAdmin4 Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - existingClaim: "pgadmin-pvc" - - ## Additional volumes to be added to the deployment - ## - extraVolumes: [] - - ## Security context to be added to pgAdmin4 pods - ## - securityContext: - runAsUser: 5050 - runAsGroup: 5050 - fsGroup: 5050 - - containerSecurityContext: - enabled: false - allowPrivilegeEscalation: false - - ## pgAdmin4 readiness and liveness probe initial delay and timeout - ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ - ## - livenessProbe: - initialDelaySeconds: 30 - periodSeconds: 60 - timeoutSeconds: 15 - successThreshold: 1 - failureThreshold: 3 - - readinessProbe: - initialDelaySeconds: 30 - periodSeconds: 60 - timeoutSeconds: 15 - successThreshold: 1 - failureThreshold: 3 - - ## Required to be enabled pre pgAdmin4 4.16 release, to set the ACL on /var/lib/pgadmin. - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - ## - VolumePermissions: - ## If true, enables an InitContainer to set permissions on /var/lib/pgadmin. - ## - enabled: false - - ## @param extraDeploy list of extra manifests to deploy - ## - extraDeploy: [] - - ## Additional InitContainers to initialize the pod - ## - extraInitContainers: | - # - name: add-folder-for-pgpass - # image: "dpage/pgadmin4:latest" - # command: ["/bin/mkdir", "-p", "/var/lib/pgadmin/storage/pgadmin"] - # volumeMounts: - # - name: pgadmin-data - # mountPath: /var/lib/pgadmin - # securityContext: - # runAsUser: 5050 - - containerPorts: - http: 80 - - resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - - ## Horizontal Pod Autoscaling - ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ - # - autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - # targetMemoryUtilizationPercentage: 80 - - ## Node labels for pgAdmin4 pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Node tolerations for server scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - - ## Pod affinity - ## - affinity: {} - - ## Pod annotations - ## - podAnnotations: {} - - ## Pod labels - ## - podLabels: {} - # key1: value1 - # key2: value2 - - # -- The name of the Namespace to deploy - # If not set, `.Release.Namespace` is used - namespace: null - - init: - ## Init container resources - ## - resources: {} - - ## Define values for chart tests - test: - ## Container image for test-connection.yaml - image: - registry: docker.io - repository: busybox - tag: latest - ## Resources request/limit for test-connection Pod - resources: {} - # limits: - # cpu: 50m - # memory: 32Mi - # requests: - # cpu: 25m - # memory: 16Mi - ## Security context for test-connection Pod - securityContext: - runAsUser: 5051 - runAsGroup: 5051 - fsGroup: 5051 diff --git a/plex/plex-deployment.yaml b/plex/plex-deployment.yaml deleted file mode 100644 index c19d12d..0000000 --- a/plex/plex-deployment.yaml +++ /dev/null @@ -1,100 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: plex - namespace: plex-ns - annotations: - force-recreate: true -spec: - replicas: 1 - selector: - matchLabels: - app: plex - template: - metadata: - labels: - app: plex - spec: - nodeSelector: - kubernetes.io/hostname: gluttony - containers: - - name: plex - image: plexinc/pms-docker:public - env: - - name: TZ - value: EST - - name: PLEX_UID - value: "1000" - - name: PLEX_GID - value: "1000" - - name: PLEX_CLAIM - valueFrom: - secretKeyRef: - name: plex-claim - key: PLEXCLAIM - ports: - - containerPort: 32400 - - containerPort: 8234 - - containerPort: 32469 - - containerPort: 1900 - - containerPort: 32410 - - containerPort: 32412 - - containerPort: 32413 - - containerPort: 32414 - volumeMounts: - - name: plex-config - mountPath: /config - - name: plex-media - mountPath: /data - - # Sidecar providing access to upload/view/download raw media files - - name: filebrowswer - image: git.clortox.com/infrastructure/filebrowser:v1.0.1 - env: - - name: ADMIN_PASS - valueFrom: - secretKeyRef: - name: filebrowser-secret - key: ADMIN-PASS - - name: DEFAULT_USERNAME - value: "default" - - name: DEFAULT_PASSWORD - valueFrom: - secretKeyRef: - name: filebrowser-secret - key: DEFAULT-PASS - - name: BRANDING_NAME - value: "Media Storage" - - name: AUTH_METHOD - value: "proxy" - - name: AUTH_HEADER - value: "X-Auth-User" - - name: PERM_ADMIN - value: "false" - - name: PERM_EXECUTE - value: "false" - - name: PERM_CREATE - value: "true" - - name: PERM_RENAME - value: "true" - - name: PERM_MODIFY - value: "true" - - name: PERM_DELETE - value: "false" - - name: PERM_SHARE - value: "true" - - name: PERM_DOWNLOAD - value: "true" - volumeMounts: - - name: plex-media - mountPath: /srv - ports: - - containerPort: 80 - - volumes: - - name: plex-config - persistentVolumeClaim: - claimName: plex-pvc-config - - name: plex-media - persistentVolumeClaim: - claimName: plex-pvc-media diff --git a/plex/plex-pv-media.yaml b/plex/plex-pv-media.yaml deleted file mode 100644 index 90e9d38..0000000 --- a/plex/plex-pv-media.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: plex-pv-media -spec: - storageClassName: local-storage - capacity: - storage: 18000Gi - accessModes: - - ReadWriteMany - hostPath: - path: "/Main/Media" - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - gluttony diff --git a/plex/plex-pvc-config.yaml b/plex/plex-pvc-config.yaml deleted file mode 100644 index c010d33..0000000 --- a/plex/plex-pvc-config.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: plex-pvc-config - namespace: plex-ns -spec: - storageClassName: longhorn - accessModes: - - ReadWriteMany - resources: - requests: - storage: 200Gi diff --git a/plex/plex-pvc-media.yaml b/plex/plex-pvc-media.yaml deleted file mode 100644 index c5f9816..0000000 --- a/plex/plex-pvc-media.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: plex-pvc-media - namespace: plex-ns -spec: - volumeName: plex-pv-media - storageClassName: local-storage - accessModes: - - ReadWriteMany - resources: - requests: - storage: 18000Gi diff --git a/plex/plex-secret.yaml b/plex/plex-secret.yaml deleted file mode 100644 index 8864319..0000000 --- a/plex/plex-secret.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: plex-claim - namespace: plex-ns -spec: - encryptedData: - PLEXCLAIM: AgB4+SIsvYCxwg5cAzJOYHCIu52+uAhOalhNf4GlHMMCJfgw46FH3AXVpikDFKtZ0gBMR6aAeJ6lqP6b1TuaOo4rs+LI8NkCOLxMq4S8zHH5y1K9T0CzxcQBWRVLI+WGVb3TX7H0riixV5WeCbI3h0wkSith4pRM/hUAOY4Rez7RlUOt6Krkn5uMwzpDTscXFev+oF70PhwQSNqKPGwY3DiOZa2Vt3maMOopevD8VuYzsRDMTJTGtUchjs1SPk8iTsu2a8O8zFmtx/GCWDDrUmKhohMOspg8xhCp+n4KD3n+IYVCJRa/N2c2cs3fmFtnh5u+RjMDwPAl9XmhfGSsDkQ0oURszAy0OjuRu1govB31UHgaYFioD3KwjCl1JJ1e1F6bJ5qbF+jzeZUPg9WZfzUKygbULrNLkB7VMsCSzkNjj11ho6NIffjOY5Q0CCOOgb8PjilLNsaSDTABMJ41flrG2hjVrzIEy1dsnH8HIuR2YYUgaic5NJoD4lpH/GJqFqsp1R7BC8YDn/5sHYENhQ4FrEZpi3pFD5yp4biq2Va7/ZmxFs4VHOZJSgBUlk0lNsdHdIjM7ZyUsDTlKd2W3W/U7jnab33IBKij5gJzv2mZ9Poly4BJODUJWcS5qJD4WWbnzpJBLhNRa5nMJYXYGy4bcgvq1wo3d6GHeO6ogjELgkw3l9BCxeBtgOpu23TinehUWNIZZjvXYAteRA6354Se6lfk+aCeiof7ig== - template: - metadata: - creationTimestamp: null - name: plex-claim - namespace: plex-ns ---- diff --git a/plex/plex-service.yaml b/plex/plex-service.yaml deleted file mode 100644 index e999ada..0000000 --- a/plex/plex-service.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: plex-service - namespace: plex-ns -spec: - selector: - app: plex - type: LoadBalancer - ports: - - name: plex - protocol: TCP - port: 32400 - targetPort: 32400 - - name: filebrowswer - protocol: TCP - port: 80 - targetPort: 80 diff --git a/plex/sealed-secret.yaml b/plex/sealed-secret.yaml deleted file mode 100644 index d35c7b0..0000000 --- a/plex/sealed-secret.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: filebrowser-secret - namespace: plex-ns -spec: - encryptedData: - ADMIN-PASS: AgAj7jrppKzPFfnYDV+FEuXQx9lrkppWFElR3DjtR017tpBQs6/KjZYU1TX81TkNh8cONg4mGB72zvk60Yft5b5TSgZWuKA4qTXYEoFusyMR3wyOU/Ft7ZMk7IAr/7Hi9dDAh3CkmrQ2lQ3C5hRlfTljaSxqC9abmEZIeSo7OhrkX8YIvFhanBMbPenfkulSsK38dp3PfIC8kntRV1u37Z7CxovVu+Kn7IoRC4sKa3gcdJ5lIA/Aq3rln8atmzZcPGPzjIAPY4P72mjPaeIvzqzLsNMcecIIr20MyLTOG/eI8WrM+WC+dgyvj/Pjq/hzTW1QD3z4jZW224o4ghKiPr6mW0BbN6KBBqv/JFtpBqiYgGi/ADBVxTG7YUA+FcT7YA6nuxlqg+TMpwqP8ZJBmghosBeqmBndjKUjpexoihmy+XTDbEr7e8RDpOdL9jS9hGPt47cmFITSFSEQIGM6kOtdYWcMw6+aKkTt5Ul4bUfV9TXultGyNYITibATXWNqbRfZDjYVrWOIfoVJOe19N3WZg9R4UeKzow3RkoJvn3MUTYOOrzr9Csx+VxUMeGxLPFftedUIy8zzXaqL/0OFogQZ2P+mesiYxc67Z4VS2u0+iCLkJdUDYnM+2q6TRQMI2nP40ko62xDuSE2BDcufqsKfHoddswlYDyelLVqJKee+P3sUoxcblYlv6kqz1GbVhBKQrHzFphx72KG219N9zwjOI6w8V6NXHUEFblQ3gt9RPA== - DEFAULT-PASS: AgDXxxyMBUb7Q0J8LvxPXNEAz75c1JcS7xL3rN7E2Wg7MLsZHj2/0hRf9jaCCyyVnr/Pabbdmjb0nth4Dlm50tLWH+rU7KtLPwHB0pMVi9zSxKBdyvOJurVdY/nlbSuanxSL37rBOrwRQRv6t8w/IIs4R9GEaFjxKoIJTuV8JRu3r62FiL/3o2zyok9UYcLxw2h9H7B9yn9wXn5CAFk0M4jNRUns3oU7d0/hPbfwC216vU0ZIdga8yYlZw3zvVz54mX2XECnHWZT8gPK1w6v8AEca/kDFuVFBi63OdXFgjBHCa/uSs5wifzNPBzcRA+A8s/JgoSHEeMXTmBsMOlihSSz0kSGHS/rUdu6nZamVZfzCWOHskb3RVjs23yNJsSEDlYR/AMeAjnkMDvMe5b/X/eV1AOYkAQ/pACrSk2aG+4kLmLoLYXaeDVf8pTHj4yOvdffWk39ClCqIOyWF2+//N02lDepVwis498cL+7I4kEVXqy9FugUCsbtzxVXX6OHym4KpBZpAmrMqH83rC6CtU4orF6gjmTKCe1Ufq5GmsQgFFZTZYTexnbeTKXz6yw+RbHLTGdsaJnMaAQx5uB3khO9Pkge7/HLDmXEx+mtaaTvk7AF8PWjFJSQZEWxVSCr6O1Zd4LKsg0EP6Mrk+s+8OOfGb42e3wfJ6gY7KlTBBu8KmKnHRQl9uoMVO7y5PWwl+B3Wam5j78ggV4L9UmiEw6gYvrc8rmQWZqQbuw7pClQ3Q== - template: - metadata: - creationTimestamp: null - name: filebrowser-secret - namespace: plex-ns diff --git a/postgresql/helmrelease-postgresql.yaml b/postgresql/helmrelease-postgresql.yaml deleted file mode 100644 index d1c361c..0000000 --- a/postgresql/helmrelease-postgresql.yaml +++ /dev/null @@ -1,1622 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: postgresql - namespace: postgresql-system -spec: - chart: - spec: - chart: postgresql - sourceRef: - kind: HelmRepository - name: bitnami - namespace: flux-system - interval: 15m0s - timeout: 5m - releaseName: postgresql - values: - # Copyright VMware, Inc. - # SPDX-License-Identifier: APACHE-2.0 - - ## @section Global parameters - ## Please, note that this will override the parameters, including dependencies, configured to use the global value - ## - global: - ## @param global.imageRegistry Global Docker image registry - ## - imageRegistry: "" - ## @param global.imagePullSecrets Global Docker registry secret names as an array - ## e.g. - ## imagePullSecrets: - ## - myRegistryKeySecretName - ## - imagePullSecrets: [] - ## @param global.storageClass Global StorageClass for Persistent Volume(s) - ## - storageClass: "" - postgresql: - ## @param global.postgresql.auth.postgresPassword Password for the "postgres" admin user (overrides `auth.postgresPassword`) - ## @param global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`) - ## @param global.postgresql.auth.password Password for the custom user to create (overrides `auth.password`) - ## @param global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`) - ## @param global.postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`). - ## @param global.postgresql.auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. - ## @param global.postgresql.auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. - ## @param global.postgresql.auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. - ## - auth: - #postgresPassword: "" - #username: "" - #password: "" - database: "" - existingSecret: "postgresql-default-credentials" - secretKeys: - adminPasswordKey: "" - userPasswordKey: "" - replicationPasswordKey: "" - ## @param global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`) - ## - service: - ports: - postgresql: "" - - ## @section Common parameters - ## - - ## @param kubeVersion Override Kubernetes version - ## - kubeVersion: "" - ## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) - ## - nameOverride: "" - ## @param fullnameOverride String to fully override common.names.fullname template - ## - fullnameOverride: "" - ## @param clusterDomain Kubernetes Cluster Domain - ## - clusterDomain: cluster.local - ## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template) - ## - extraDeploy: [] - ## @param commonLabels Add labels to all the deployed resources - ## - commonLabels: {} - ## @param commonAnnotations Add annotations to all the deployed resources - ## - commonAnnotations: {} - ## Enable diagnostic mode in the statefulset - ## - diagnosticMode: - ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) - ## - enabled: false - ## @param diagnosticMode.command Command to override all containers in the statefulset - ## - command: - - sleep - ## @param diagnosticMode.args Args to override all containers in the statefulset - ## - args: - - infinity - - ## @section PostgreSQL common parameters - ## - - ## Bitnami PostgreSQL image version - ## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ - ## @param image.registry PostgreSQL image registry - ## @param image.repository PostgreSQL image repository - ## @param image.tag PostgreSQL image tag (immutable tags are recommended) - ## @param image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param image.pullPolicy PostgreSQL image pull policy - ## @param image.pullSecrets Specify image pull secrets - ## @param image.debug Specify if debug values should be set - ## - image: - registry: git.clortox.com - repository: infrastructure/gluttony-cluster-postgresql - tag: v1.2.0 - digest: "" - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Set to true if you would like to see extra information on logs - ## - debug: false - ## Authentication parameters - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#setting-the-root-password-on-first-run - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-on-first-run - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-user-on-first-run - ## - auth: - ## @param auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user - ## - enablePostgresUser: true - ## @param auth.postgresPassword Password for the "postgres" admin user. Ignored if `auth.existingSecret` is provided - ## - postgresPassword: "" - ## @param auth.username Name for a custom user to create - ## - username: "" - ## @param auth.password Password for the custom user to create. Ignored if `auth.existingSecret` is provided - ## - password: "" - ## @param auth.database Name for a custom database to create - ## - database: "" - ## @param auth.replicationUsername Name of the replication user - ## - replicationUsername: repl_user - ## @param auth.replicationPassword Password for the replication user. Ignored if `auth.existingSecret` is provided - ## - replicationPassword: "" - ## @param auth.existingSecret Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. - ## - existingSecret: "" - ## @param auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. - ## @param auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. - ## @param auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. - ## - secretKeys: - adminPasswordKey: postgres-password - userPasswordKey: password - replicationPasswordKey: replication-password - ## @param auth.usePasswordFiles Mount credentials as a files instead of using an environment variable - ## - usePasswordFiles: false - ## @param architecture PostgreSQL architecture (`standalone` or `replication`) - ## - architecture: standalone - ## Replication configuration - ## Ignored if `architecture` is `standalone` - ## - replication: - ## @param replication.synchronousCommit Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` - ## @param replication.numSynchronousReplicas Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. - ## ref: https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT - ## - synchronousCommit: "off" - numSynchronousReplicas: 0 - ## @param replication.applicationName Cluster application name. Useful for advanced replication settings - ## - applicationName: my_application - ## @param containerPorts.postgresql PostgreSQL container port - ## - containerPorts: - postgresql: 5432 - ## Audit settings - ## https://github.com/bitnami/containers/tree/main/bitnami/postgresql#auditing - ## @param audit.logHostname Log client hostnames - ## @param audit.logConnections Add client log-in operations to the log file - ## @param audit.logDisconnections Add client log-outs operations to the log file - ## @param audit.pgAuditLog Add operations to log using the pgAudit extension - ## @param audit.pgAuditLogCatalog Log catalog using pgAudit - ## @param audit.clientMinMessages Message log level to share with the user - ## @param audit.logLinePrefix Template for log line prefix (default if not set) - ## @param audit.logTimezone Timezone for the log timestamps - ## - audit: - logHostname: false - logConnections: false - logDisconnections: false - pgAuditLog: "" - pgAuditLogCatalog: "off" - clientMinMessages: error - logLinePrefix: "" - logTimezone: "" - ## LDAP configuration - ## @param ldap.enabled Enable LDAP support - ## DEPRECATED ldap.url It will removed in a future, please use 'ldap.uri' instead - ## @param ldap.server IP address or name of the LDAP server. - ## @param ldap.port Port number on the LDAP server to connect to - ## @param ldap.prefix String to prepend to the user name when forming the DN to bind - ## @param ldap.suffix String to append to the user name when forming the DN to bind - ## DEPRECATED ldap.baseDN It will removed in a future, please use 'ldap.basedn' instead - ## DEPRECATED ldap.bindDN It will removed in a future, please use 'ldap.binddn' instead - ## DEPRECATED ldap.bind_password It will removed in a future, please use 'ldap.bindpw' instead - ## @param ldap.basedn Root DN to begin the search for the user in - ## @param ldap.binddn DN of user to bind to LDAP - ## @param ldap.bindpw Password for the user to bind to LDAP - ## DEPRECATED ldap.search_attr It will removed in a future, please use 'ldap.searchAttribute' instead - ## DEPRECATED ldap.search_filter It will removed in a future, please use 'ldap.searchFilter' instead - ## @param ldap.searchAttribute Attribute to match against the user name in the search - ## @param ldap.searchFilter The search filter to use when doing search+bind authentication - ## @param ldap.scheme Set to `ldaps` to use LDAPS - ## DEPRECATED ldap.tls as string is deprecated please use 'ldap.tls.enabled' instead - ## @param ldap.tls.enabled Se to true to enable TLS encryption - ## - ldap: - enabled: false - server: "" - port: "" - prefix: "" - suffix: "" - basedn: "" - binddn: "" - bindpw: "" - searchAttribute: "" - searchFilter: "" - scheme: "" - tls: - enabled: false - ## @param ldap.uri LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored. - ## Ref: https://www.postgresql.org/docs/current/auth-ldap.html - ## - uri: "" - ## @param postgresqlDataDir PostgreSQL data dir folder - ## - postgresqlDataDir: /bitnami/postgresql/data - ## @param postgresqlSharedPreloadLibraries Shared preload libraries (comma-separated list) - ## - postgresqlSharedPreloadLibraries: "pgaudit" - ## Start PostgreSQL pod(s) without limitations on shm memory. - ## By default docker and containerd (and possibly other container runtimes) limit `/dev/shm` to `64M` - ## ref: https://github.com/docker-library/postgres/issues/416 - ## ref: https://github.com/containerd/containerd/issues/3654 - ## - shmVolume: - ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) - ## - enabled: true - ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs - ## Note: the size of the tmpfs counts against container's memory limit - ## e.g: - ## sizeLimit: 1Gi - ## - sizeLimit: "" - ## TLS configuration - ## - tls: - ## @param tls.enabled Enable TLS traffic support - ## - enabled: false - ## @param tls.autoGenerated Generate automatically self-signed TLS certificates - ## - autoGenerated: false - ## @param tls.preferServerCiphers Whether to use the server's TLS cipher preferences rather than the client's - ## - preferServerCiphers: true - ## @param tls.certificatesSecret Name of an existing secret that contains the certificates - ## - certificatesSecret: "" - ## @param tls.certFilename Certificate filename - ## - certFilename: "" - ## @param tls.certKeyFilename Certificate key filename - ## - certKeyFilename: "" - ## @param tls.certCAFilename CA Certificate filename - ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate - ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html - ## - certCAFilename: "" - ## @param tls.crlFilename File containing a Certificate Revocation List - ## - crlFilename: "" - - ## @section PostgreSQL Primary parameters - ## - primary: - ## @param primary.name Name of the primary database (eg primary, master, leader, ...) - ## - name: primary - ## @param primary.configuration PostgreSQL Primary main configuration to be injected as ConfigMap - ## ref: https://www.postgresql.org/docs/current/static/runtime-config.html - ## - configuration: "" - ## @param primary.pgHbaConfiguration PostgreSQL Primary client authentication configuration - ## ref: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html - ## e.g:# - ## pgHbaConfiguration: |- - ## local all all trust - ## host all all localhost trust - ## host mydatabase mysuser 192.168.0.0/24 md5 - ## - pgHbaConfiguration: "" - ## @param primary.existingConfigmap Name of an existing ConfigMap with PostgreSQL Primary configuration - ## NOTE: `primary.configuration` and `primary.pgHbaConfiguration` will be ignored - ## - existingConfigmap: "" - ## @param primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration) - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf - ## - extendedConfiguration: "" - ## @param primary.existingExtendedConfigmap Name of an existing ConfigMap with PostgreSQL Primary extended configuration - ## NOTE: `primary.extendedConfiguration` will be ignored - ## - existingExtendedConfigmap: "" - ## Initdb configuration - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#specifying-initdb-arguments - ## - initdb: - ## @param primary.initdb.args PostgreSQL initdb extra arguments - ## - args: "" - ## @param primary.initdb.postgresqlWalDir Specify a custom location for the PostgreSQL transaction log - ## - postgresqlWalDir: "" - ## @param primary.initdb.scripts Dictionary of initdb scripts - ## Specify dictionary of scripts to be run at first boot - ## e.g: - ## scripts: - ## my_init_script.sh: | - ## #!/bin/sh - ## echo "Do something." - ## - scripts: {} - ## @param primary.initdb.scriptsConfigMap ConfigMap with scripts to be run at first boot - ## NOTE: This will override `primary.initdb.scripts` - ## - scriptsConfigMap: "" - ## @param primary.initdb.scriptsSecret Secret with scripts to be run at first boot (in case it contains sensitive information) - ## NOTE: This can work along `primary.initdb.scripts` or `primary.initdb.scriptsConfigMap` - ## - scriptsSecret: "" - ## @param primary.initdb.user Specify the PostgreSQL username to execute the initdb scripts - ## - user: "" - ## @param primary.initdb.password Specify the PostgreSQL password to execute the initdb scripts - ## - password: "" - ## Configure current cluster's primary server to be the standby server in other cluster. - ## This will allow cross cluster replication and provide cross cluster high availability. - ## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. - ## @param primary.standby.enabled Whether to enable current cluster's primary as standby server of another cluster or not - ## @param primary.standby.primaryHost The Host of replication primary in the other cluster - ## @param primary.standby.primaryPort The Port of replication primary in the other cluster - ## - standby: - enabled: false - primaryHost: "" - primaryPort: "" - ## @param primary.extraEnvVars Array with extra environment variables to add to PostgreSQL Primary nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes - ## - extraEnvVarsCM: "" - ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL Primary nodes - ## - extraEnvVarsSecret: "" - ## @param primary.command Override default container command (useful when using custom images) - ## - command: [] - ## @param primary.args Override default container args (useful when using custom images) - ## - args: [] - ## Configure extra options for PostgreSQL Primary containers' liveness, readiness and startup probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes - ## @param primary.livenessProbe.enabled Enable livenessProbe on PostgreSQL Primary containers - ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param primary.readinessProbe.enabled Enable readinessProbe on PostgreSQL Primary containers - ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param primary.startupProbe.enabled Enable startupProbe on PostgreSQL Primary containers - ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe - ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param primary.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 15 - successThreshold: 1 - ## @param primary.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param primary.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param primary.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param primary.lifecycleHooks for the PostgreSQL Primary container to automate configuration before or after startup - ## - lifecycleHooks: {} - ## PostgreSQL Primary resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param primary.resources.limits The resources limits for the PostgreSQL Primary containers - ## @param primary.resources.requests.memory The requested memory for the PostgreSQL Primary containers - ## @param primary.resources.requests.cpu The requested cpu for the PostgreSQL Primary containers - ## - resources: - limits: {} - requests: - memory: 256Mi - cpu: 250m - ## Pod Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## @param primary.podSecurityContext.enabled Enable security context - ## @param primary.podSecurityContext.fsGroup Group ID for the pod - ## - podSecurityContext: - enabled: true - fsGroup: 1001 - ## Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## @param primary.containerSecurityContext.enabled Enable container security context - ## @param primary.containerSecurityContext.runAsUser User ID for the container - ## @param primary.containerSecurityContext.runAsGroup Group ID for the container - ## @param primary.containerSecurityContext.runAsNonRoot Set runAsNonRoot for the container - ## @param primary.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation for the container - ## @param primary.containerSecurityContext.seccompProfile.type Set seccompProfile.type for the container - ## @param primary.containerSecurityContext.capabilities.drop Set capabilities.drop for the container - ## - containerSecurityContext: - enabled: true - runAsUser: 1001 - runAsGroup: 0 - runAsNonRoot: true - allowPrivilegeEscalation: false - seccompProfile: - type: RuntimeDefault - capabilities: - drop: - - ALL - ## @param primary.hostAliases PostgreSQL primary pods host aliases - ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ - ## - hostAliases: [] - ## @param primary.hostNetwork Specify if host network should be enabled for PostgreSQL pod (postgresql primary) - ## - hostNetwork: false - ## @param primary.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) - ## - hostIPC: false - ## @param primary.labels Map of labels to add to the statefulset (postgresql primary) - ## - labels: {} - ## @param primary.annotations Annotations for PostgreSQL primary pods - ## - annotations: {} - ## @param primary.podLabels Map of labels to add to the pods (postgresql primary) - ## - podLabels: {} - ## @param primary.podAnnotations Map of annotations to add to the pods (postgresql primary) - ## - podAnnotations: {} - ## @param primary.podAffinityPreset PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param primary.podAntiAffinityPreset PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## PostgreSQL Primary node affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param primary.nodeAffinityPreset.type PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param primary.nodeAffinityPreset.key PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. - ## E.g. - ## key: "kubernetes.io/e2e-az-name" - ## - key: "" - ## @param primary.nodeAffinityPreset.values PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param primary.affinity Affinity for PostgreSQL primary pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set - ## - affinity: {} - ## @param primary.nodeSelector Node labels for PostgreSQL primary pods assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - ## @param primary.tolerations Tolerations for PostgreSQL primary pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods - ## - topologySpreadConstraints: [] - ## @param primary.priorityClassName Priority Class to use for each pod (postgresql primary) - ## - priorityClassName: "" - ## @param primary.schedulerName Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param primary.terminationGracePeriodSeconds Seconds PostgreSQL primary pod needs to terminate gracefully - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods - ## - terminationGracePeriodSeconds: "" - ## @param primary.updateStrategy.type PostgreSQL Primary statefulset strategy type - ## @param primary.updateStrategy.rollingUpdate PostgreSQL Primary statefulset rolling update configuration parameters - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - type: RollingUpdate - rollingUpdate: {} - ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) - ## - extraVolumeMounts: [] - ## @param primary.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) - ## - extraVolumes: [] - ## @param primary.sidecars Add additional sidecar containers to the PostgreSQL Primary pod(s) - ## For example: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param primary.initContainers Add additional init containers to the PostgreSQL Primary pod(s) - ## Example - ## - ## initContainers: - ## - name: do-something - ## image: busybox - ## command: ['do', 'something'] - ## - initContainers: [] - ## @param primary.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) - ## - extraPodSpec: {} - ## PostgreSQL Primary service configuration - ## - service: - ## @param primary.service.type Kubernetes Service type - ## - type: ClusterIP - ## @param primary.service.ports.postgresql PostgreSQL service port - ## - ports: - postgresql: 5432 - ## Node ports to expose - ## NOTE: choose port between <30000-32767> - ## @param primary.service.nodePorts.postgresql Node port for PostgreSQL - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - nodePorts: - postgresql: "" - ## @param primary.service.clusterIP Static clusterIP or None for headless services - ## e.g: - ## clusterIP: None - ## - clusterIP: "" - ## @param primary.service.annotations Annotations for PostgreSQL primary service - ## - annotations: {} - ## @param primary.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` - ## Set the LoadBalancer service type to internal only - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - loadBalancerIP: "" - ## @param primary.service.externalTrafficPolicy Enable client source IP preservation - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer - ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param primary.service.extraPorts Extra ports to expose in the PostgreSQL primary service - ## - extraPorts: [] - ## @param primary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param primary.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - ## Headless service properties - ## - headless: - ## @param primary.service.headless.annotations Additional custom annotations for headless PostgreSQL primary service - ## - annotations: {} - ## PostgreSQL Primary persistence configuration - ## - persistence: - ## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC - ## - enabled: true - ## @param primary.persistence.existingClaim Name of an existing PVC to use - ## - existingClaim: "data-postgresql-pvc" - ## @param primary.persistence.mountPath The path the volume will be mounted at - ## Note: useful when using custom PostgreSQL images - ## - mountPath: /bitnami/postgresql - ## @param primary.persistence.subPath The subdirectory of the volume to mount to - ## Useful in dev environments and one PV for multiple services - ## - subPath: "" - ## @param primary.persistence.storageClass PVC Storage Class for PostgreSQL Primary data volume - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - storageClass: "longhorn" - ## @param primary.persistence.accessModes PVC Access Mode for PostgreSQL volume - ## - accessModes: - - ReadWriteOnce - ## @param primary.persistence.size PVC Storage Request for PostgreSQL volume - ## - size: 20Gi - ## @param primary.persistence.annotations Annotations for the PVC - ## - annotations: {} - ## @param primary.persistence.labels Labels for the PVC - ## - labels: {} - ## @param primary.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) - ## selector: - ## matchLabels: - ## app: my-app - ## - selector: {} - ## @param primary.persistence.dataSource Custom PVC data source - ## - dataSource: {} - ## PostgreSQL Primary Persistent Volume Claim Retention Policy - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention - ## - persistentVolumeClaimRetentionPolicy: - ## @param primary.persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for Primary Statefulset - ## - enabled: true - ## @param primary.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced - ## - whenScaled: Retain - ## @param primary.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted - ## - whenDeleted: Retain - - ## @section PostgreSQL read only replica parameters (only used when `architecture` is set to `replication`) - ## - readReplicas: - ## @param readReplicas.name Name of the read replicas database (eg secondary, slave, ...) - ## - name: read - ## @param readReplicas.replicaCount Number of PostgreSQL read only replicas - ## - replicaCount: 1 - ## @param readReplicas.extendedConfiguration Extended PostgreSQL read only replicas configuration (appended to main or default configuration) - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf - ## - extendedConfiguration: "" - ## @param readReplicas.extraEnvVars Array with extra environment variables to add to PostgreSQL read only nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param readReplicas.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes - ## - extraEnvVarsCM: "" - ## @param readReplicas.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL read only nodes - ## - extraEnvVarsSecret: "" - ## @param readReplicas.command Override default container command (useful when using custom images) - ## - command: [] - ## @param readReplicas.args Override default container args (useful when using custom images) - ## - args: [] - ## Configure extra options for PostgreSQL read only containers' liveness, readiness and startup probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes - ## @param readReplicas.livenessProbe.enabled Enable livenessProbe on PostgreSQL read only containers - ## @param readReplicas.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param readReplicas.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param readReplicas.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param readReplicas.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param readReplicas.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param readReplicas.readinessProbe.enabled Enable readinessProbe on PostgreSQL read only containers - ## @param readReplicas.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param readReplicas.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param readReplicas.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param readReplicas.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param readReplicas.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param readReplicas.startupProbe.enabled Enable startupProbe on PostgreSQL read only containers - ## @param readReplicas.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param readReplicas.startupProbe.periodSeconds Period seconds for startupProbe - ## @param readReplicas.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param readReplicas.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param readReplicas.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 15 - successThreshold: 1 - ## @param readReplicas.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param readReplicas.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param readReplicas.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param readReplicas.lifecycleHooks for the PostgreSQL read only container to automate configuration before or after startup - ## - lifecycleHooks: {} - ## PostgreSQL read only resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param readReplicas.resources.limits The resources limits for the PostgreSQL read only containers - ## @param readReplicas.resources.requests.memory The requested memory for the PostgreSQL read only containers - ## @param readReplicas.resources.requests.cpu The requested cpu for the PostgreSQL read only containers - ## - resources: - limits: {} - requests: - memory: 256Mi - cpu: 250m - ## Pod Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## @param readReplicas.podSecurityContext.enabled Enable security context - ## @param readReplicas.podSecurityContext.fsGroup Group ID for the pod - ## - podSecurityContext: - enabled: true - fsGroup: 1001 - ## Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## @param readReplicas.containerSecurityContext.enabled Enable container security context - ## @param readReplicas.containerSecurityContext.runAsUser User ID for the container - ## @param readReplicas.containerSecurityContext.runAsGroup Group ID for the container - ## @param readReplicas.containerSecurityContext.runAsNonRoot Set runAsNonRoot for the container - ## @param readReplicas.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation for the container - ## @param readReplicas.containerSecurityContext.seccompProfile.type Set seccompProfile.type for the container - ## @param readReplicas.containerSecurityContext.capabilities.drop Set capabilities.drop for the container - ## - containerSecurityContext: - enabled: true - runAsUser: 1001 - runAsGroup: 0 - runAsNonRoot: true - allowPrivilegeEscalation: false - seccompProfile: - type: RuntimeDefault - capabilities: - drop: - - ALL - ## @param readReplicas.hostAliases PostgreSQL read only pods host aliases - ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ - ## - hostAliases: [] - ## @param readReplicas.hostNetwork Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) - ## - hostNetwork: false - ## @param readReplicas.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) - ## - hostIPC: false - ## @param readReplicas.labels Map of labels to add to the statefulset (PostgreSQL read only) - ## - labels: {} - ## @param readReplicas.annotations Annotations for PostgreSQL read only pods - ## - annotations: {} - ## @param readReplicas.podLabels Map of labels to add to the pods (PostgreSQL read only) - ## - podLabels: {} - ## @param readReplicas.podAnnotations Map of annotations to add to the pods (PostgreSQL read only) - ## - podAnnotations: {} - ## @param readReplicas.podAffinityPreset PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param readReplicas.podAntiAffinityPreset PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## PostgreSQL read only node affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param readReplicas.nodeAffinityPreset.type PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param readReplicas.nodeAffinityPreset.key PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. - ## E.g. - ## key: "kubernetes.io/e2e-az-name" - ## - key: "" - ## @param readReplicas.nodeAffinityPreset.values PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param readReplicas.affinity Affinity for PostgreSQL read only pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set - ## - affinity: {} - ## @param readReplicas.nodeSelector Node labels for PostgreSQL read only pods assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - ## @param readReplicas.tolerations Tolerations for PostgreSQL read only pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param readReplicas.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods - ## - topologySpreadConstraints: [] - ## @param readReplicas.priorityClassName Priority Class to use for each pod (PostgreSQL read only) - ## - priorityClassName: "" - ## @param readReplicas.schedulerName Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param readReplicas.terminationGracePeriodSeconds Seconds PostgreSQL read only pod needs to terminate gracefully - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods - ## - terminationGracePeriodSeconds: "" - ## @param readReplicas.updateStrategy.type PostgreSQL read only statefulset strategy type - ## @param readReplicas.updateStrategy.rollingUpdate PostgreSQL read only statefulset rolling update configuration parameters - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - type: RollingUpdate - rollingUpdate: {} - ## @param readReplicas.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) - ## - extraVolumeMounts: [] - ## @param readReplicas.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) - ## - extraVolumes: [] - ## @param readReplicas.sidecars Add additional sidecar containers to the PostgreSQL read only pod(s) - ## For example: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param readReplicas.initContainers Add additional init containers to the PostgreSQL read only pod(s) - ## Example - ## - ## initContainers: - ## - name: do-something - ## image: busybox - ## command: ['do', 'something'] - ## - initContainers: [] - ## @param readReplicas.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL read only pod(s) - ## - extraPodSpec: {} - ## PostgreSQL read only service configuration - ## - service: - ## @param readReplicas.service.type Kubernetes Service type - ## - type: ClusterIP - ## @param readReplicas.service.ports.postgresql PostgreSQL service port - ## - ports: - postgresql: 5432 - ## Node ports to expose - ## NOTE: choose port between <30000-32767> - ## @param readReplicas.service.nodePorts.postgresql Node port for PostgreSQL - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - nodePorts: - postgresql: "" - ## @param readReplicas.service.clusterIP Static clusterIP or None for headless services - ## e.g: - ## clusterIP: None - ## - clusterIP: "" - ## @param readReplicas.service.annotations Annotations for PostgreSQL read only service - ## - annotations: {} - ## @param readReplicas.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` - ## Set the LoadBalancer service type to internal only - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - loadBalancerIP: "" - ## @param readReplicas.service.externalTrafficPolicy Enable client source IP preservation - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param readReplicas.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer - ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param readReplicas.service.extraPorts Extra ports to expose in the PostgreSQL read only service - ## - extraPorts: [] - ## @param readReplicas.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param readReplicas.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - ## Headless service properties - ## - headless: - ## @param readReplicas.service.headless.annotations Additional custom annotations for headless PostgreSQL read only service - ## - annotations: {} - ## PostgreSQL read only persistence configuration - ## - persistence: - ## @param readReplicas.persistence.enabled Enable PostgreSQL read only data persistence using PVC - ## - enabled: true - ## @param readReplicas.persistence.existingClaim Name of an existing PVC to use - ## - existingClaim: "" - ## @param readReplicas.persistence.mountPath The path the volume will be mounted at - ## Note: useful when using custom PostgreSQL images - ## - mountPath: /bitnami/postgresql - ## @param readReplicas.persistence.subPath The subdirectory of the volume to mount to - ## Useful in dev environments and one PV for multiple services - ## - subPath: "" - ## @param readReplicas.persistence.storageClass PVC Storage Class for PostgreSQL read only data volume - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - storageClass: "" - ## @param readReplicas.persistence.accessModes PVC Access Mode for PostgreSQL volume - ## - accessModes: - - ReadWriteOnce - ## @param readReplicas.persistence.size PVC Storage Request for PostgreSQL volume - ## - size: 8Gi - ## @param readReplicas.persistence.annotations Annotations for the PVC - ## - annotations: {} - ## @param readReplicas.persistence.labels Labels for the PVC - ## - labels: {} - ## @param readReplicas.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) - ## selector: - ## matchLabels: - ## app: my-app - ## - selector: {} - ## @param readReplicas.persistence.dataSource Custom PVC data source - ## - dataSource: {} - ## PostgreSQL Read only Persistent Volume Claim Retention Policy - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention - ## - persistentVolumeClaimRetentionPolicy: - ## @param readReplicas.persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for read only Statefulset - ## - enabled: false - ## @param readReplicas.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced - ## - whenScaled: Retain - ## @param readReplicas.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted - ## - whenDeleted: Retain - - - ## @section Backup parameters - ## This section implements a trivial logical dump cronjob of the database. - ## This only comes with the consistency guarantees of the dump program. - ## This is not a snapshot based roll forward/backward recovery backup. - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/ - backup: - ## @param backup.enabled Enable the logical dump of the database "regularly" - enabled: false - cronjob: - ## @param backup.cronjob.schedule Set the cronjob parameter schedule - schedule: "@daily" - ## @param backup.cronjob.timeZone Set the cronjob parameter timeZone - timeZone: "" - ## @param backup.cronjob.concurrencyPolicy Set the cronjob parameter concurrencyPolicy - concurrencyPolicy: Allow - ## @param backup.cronjob.failedJobsHistoryLimit Set the cronjob parameter failedJobsHistoryLimit - failedJobsHistoryLimit: 1 - ## @param backup.cronjob.successfulJobsHistoryLimit Set the cronjob parameter successfulJobsHistoryLimit - successfulJobsHistoryLimit: 3 - ## @param backup.cronjob.startingDeadlineSeconds Set the cronjob parameter startingDeadlineSeconds - startingDeadlineSeconds: "" - ## @param backup.cronjob.ttlSecondsAfterFinished Set the cronjob parameter ttlSecondsAfterFinished - ttlSecondsAfterFinished: "" - ## @param backup.cronjob.restartPolicy Set the cronjob parameter restartPolicy - restartPolicy: OnFailure - ## @param backup.cronjob.podSecurityContext.enabled Enable PodSecurityContext for CronJob/Backup - ## @param backup.cronjob.podSecurityContext.fsGroup Group ID for the CronJob - podSecurityContext: - enabled: true - fsGroup: 1001 - ## backup container's Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param backup.cronjob.containerSecurityContext.enabled Enable container security context - ## @param backup.cronjob.containerSecurityContext.runAsUser User ID for the backup container - ## @param backup.cronjob.containerSecurityContext.runAsGroup Group ID for the backup container - ## @param backup.cronjob.containerSecurityContext.runAsNonRoot Set backup container's Security Context runAsNonRoot - ## @param backup.cronjob.containerSecurityContext.readOnlyRootFilesystem Is the container itself readonly - ## @param backup.cronjob.containerSecurityContext.allowPrivilegeEscalation Is it possible to escalate backup pod(s) privileges - ## @param backup.cronjob.containerSecurityContext.seccompProfile.type Set backup container's Security Context seccompProfile type - ## @param backup.cronjob.containerSecurityContext.capabilities.drop Set backup container's Security Context capabilities to drop - containerSecurityContext: - enabled: true - runAsUser: 1001 - runAsGroup: 0 - runAsNonRoot: true - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - seccompProfile: - type: RuntimeDefault - capabilities: - drop: - - ALL - ## @param backup.cronjob.command Set backup container's command to run - command: - - /bin/sh - - -c - - "pg_dumpall --clean --if-exists --load-via-partition-root --quote-all-identifiers --no-password --file=${PGDUMP_DIR}/pg_dumpall-$(date '+%Y-%m-%d-%H-%M').pgdump" - - ## @param backup.cronjob.labels Set the cronjob labels - labels: {} - ## @param backup.cronjob.annotations Set the cronjob annotations - annotations: {} - ## @param backup.cronjob.nodeSelector Node labels for PostgreSQL backup CronJob pod assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - storage: - ## @param backup.cronjob.storage.existingClaim Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`) - ## If defined, PVC must be created manually before volume will be bound - ## - existingClaim: "" - ## @param backup.cronjob.storage.resourcePolicy Setting it to "keep" to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted - ## - resourcePolicy: "" - ## @param backup.cronjob.storage.storageClass PVC Storage Class for the backup data volume - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. - ## - storageClass: "" - ## @param backup.cronjob.storage.accessModes PV Access Mode - ## - accessModes: - - ReadWriteOnce - ## @param backup.cronjob.storage.size PVC Storage Request for the backup data volume - ## - size: 8Gi - ## @param backup.cronjob.storage.annotations PVC annotations - ## - annotations: {} - ## @param backup.cronjob.storage.mountPath Path to mount the volume at - ## - mountPath: /backup/pgdump - ## @param backup.cronjob.storage.subPath Subdirectory of the volume to mount at - ## and one PV for multiple services. - ## - subPath: "" - ## Fine tuning for volumeClaimTemplates - ## - volumeClaimTemplates: - ## @param backup.cronjob.storage.volumeClaimTemplates.selector A label query over volumes to consider for binding (e.g. when using local volumes) - ## A label query over volumes to consider for binding (e.g. when using local volumes) - ## See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#labelselector-v1-meta for more details - ## - selector: {} - - ## @section NetworkPolicy parameters - ## - - ## Add networkpolicies - ## - networkPolicy: - ## @param networkPolicy.enabled Enable network policies - ## - enabled: false - ## @param networkPolicy.metrics.enabled Enable network policies for metrics (prometheus) - ## @param networkPolicy.metrics.namespaceSelector [object] Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace. - ## @param networkPolicy.metrics.podSelector [object] Monitoring pod selector labels. These labels will be used to identify the Prometheus pods. - ## - metrics: - enabled: false - ## e.g: - ## namespaceSelector: - ## label: monitoring - ## - namespaceSelector: {} - ## e.g: - ## podSelector: - ## label: monitoring - ## - podSelector: {} - ## Ingress Rules - ## - ingressRules: - ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin. - ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s). - ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s). - ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules Custom network policy for the PostgreSQL primary node. - ## - primaryAccessOnlyFrom: - enabled: false - ## e.g: - ## namespaceSelector: - ## label: ingress - ## - namespaceSelector: {} - ## e.g: - ## podSelector: - ## label: access - ## - podSelector: {} - ## custom ingress rules - ## e.g: - ## customRules: - ## - from: - ## - namespaceSelector: - ## matchLabels: - ## label: example - ## - customRules: [] - ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin. - ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s). - ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s). - ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules Custom network policy for the PostgreSQL read-only nodes. - ## - readReplicasAccessOnlyFrom: - enabled: false - ## e.g: - ## namespaceSelector: - ## label: ingress - ## - namespaceSelector: {} - ## e.g: - ## podSelector: - ## label: access - ## - podSelector: {} - ## custom ingress rules - ## e.g: - ## CustomRules: - ## - from: - ## - namespaceSelector: - ## matchLabels: - ## label: example - ## - customRules: [] - ## @param networkPolicy.egressRules.denyConnectionsToExternal Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53). - ## @param networkPolicy.egressRules.customRules Custom network policy rule - ## - egressRules: - # Deny connections to external. This is not compatible with an external database. - denyConnectionsToExternal: false - ## Additional custom egress rules - ## e.g: - ## customRules: - ## - to: - ## - namespaceSelector: - ## matchLabels: - ## label: example - ## - customRules: [] - - ## @section Volume Permissions parameters - ## - - ## Init containers parameters: - ## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node - ## - volumePermissions: - ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume - ## - enabled: false - ## @param volumePermissions.image.registry Init container volume-permissions image registry - ## @param volumePermissions.image.repository Init container volume-permissions image repository - ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) - ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy - ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets - ## - image: - registry: docker.io - repository: bitnami/os-shell - tag: 11-debian-11-r77 - digest: "" - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Init container resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param volumePermissions.resources.limits Init container volume-permissions resource limits - ## @param volumePermissions.resources.requests Init container volume-permissions resource requests - ## - resources: - limits: {} - requests: {} - ## Init container' Security Context - ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser - ## and not the below volumePermissions.containerSecurityContext.runAsUser - ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container - ## @param volumePermissions.containerSecurityContext.runAsGroup Group ID for the init container - ## @param volumePermissions.containerSecurityContext.runAsNonRoot runAsNonRoot for the init container - ## @param volumePermissions.containerSecurityContext.seccompProfile.type seccompProfile.type for the init container - ## - containerSecurityContext: - runAsUser: 0 - runAsGroup: 0 - runAsNonRoot: false - seccompProfile: - type: RuntimeDefault - ## @section Other Parameters - ## - - ## @param serviceBindings.enabled Create secret for service binding (Experimental) - ## Ref: https://servicebinding.io/service-provider/ - ## - serviceBindings: - enabled: false - - ## Service account for PostgreSQL to use. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - ## @param serviceAccount.create Enable creation of ServiceAccount for PostgreSQL pod - ## - create: false - ## @param serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created - ## Can be set to false if pods using this serviceAccount do not need to use K8s API - ## - automountServiceAccountToken: true - ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount - ## - annotations: {} - ## Creates role for ServiceAccount - ## @param rbac.create Create Role and RoleBinding (required for PSP to work) - ## - rbac: - create: false - ## @param rbac.rules Custom RBAC rules to set - ## e.g: - ## rules: - ## - apiGroups: - ## - "" - ## resources: - ## - pods - ## verbs: - ## - get - ## - list - ## - rules: [] - ## Pod Security Policy - ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later - ## - psp: - create: false - - ## @section Metrics Parameters - ## - - metrics: - ## @param metrics.enabled Start a prometheus exporter - ## - enabled: false - ## @param metrics.image.registry PostgreSQL Prometheus Exporter image registry - ## @param metrics.image.repository PostgreSQL Prometheus Exporter image repository - ## @param metrics.image.tag PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) - ## @param metrics.image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param metrics.image.pullPolicy PostgreSQL Prometheus Exporter image pull policy - ## @param metrics.image.pullSecrets Specify image pull secrets - ## - image: - registry: docker.io - repository: bitnami/postgres-exporter - tag: 0.14.0-debian-11-r5 - digest: "" - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## @param metrics.customMetrics Define additional custom metrics - ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file - ## customMetrics: - ## pg_database: - ## query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" - ## metrics: - ## - name: - ## usage: "LABEL" - ## description: "Name of the database" - ## - size_bytes: - ## usage: "GAUGE" - ## description: "Size of the database in bytes" - ## - customMetrics: {} - ## @param metrics.extraEnvVars Extra environment variables to add to PostgreSQL Prometheus exporter - ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables - ## For example: - ## extraEnvVars: - ## - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS - ## value: "true" - ## - extraEnvVars: [] - ## PostgreSQL Prometheus exporter containers' Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param metrics.containerSecurityContext.enabled Enable PostgreSQL Prometheus exporter containers' Security Context - ## @param metrics.containerSecurityContext.runAsUser Set PostgreSQL Prometheus exporter containers' Security Context runAsUser - ## @param metrics.containerSecurityContext.runAsGroup Set PostgreSQL Prometheus exporter containers' Security Context runAsGroup - ## @param metrics.containerSecurityContext.runAsNonRoot Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot - ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set PostgreSQL Prometheus exporter containers' Security Context allowPrivilegeEscalation - ## @param metrics.containerSecurityContext.seccompProfile.type Set PostgreSQL Prometheus exporter containers' Security Context seccompProfile.type - ## @param metrics.containerSecurityContext.capabilities.drop Set PostgreSQL Prometheus exporter containers' Security Context capabilities.drop - ## - containerSecurityContext: - enabled: true - runAsUser: 1001 - runAsGroup: 0 - runAsNonRoot: true - allowPrivilegeEscalation: false - seccompProfile: - type: RuntimeDefault - capabilities: - drop: - - ALL - ## Configure extra options for PostgreSQL Prometheus exporter containers' liveness, readiness and startup probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes - ## @param metrics.livenessProbe.enabled Enable livenessProbe on PostgreSQL Prometheus exporter containers - ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param metrics.readinessProbe.enabled Enable readinessProbe on PostgreSQL Prometheus exporter containers - ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param metrics.startupProbe.enabled Enable startupProbe on PostgreSQL Prometheus exporter containers - ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe - ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 15 - successThreshold: 1 - ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param metrics.containerPorts.metrics PostgreSQL Prometheus exporter metrics container port - ## - containerPorts: - metrics: 9187 - ## PostgreSQL Prometheus exporter resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param metrics.resources.limits The resources limits for the PostgreSQL Prometheus exporter container - ## @param metrics.resources.requests The requested resources for the PostgreSQL Prometheus exporter container - ## - resources: - limits: {} - requests: {} - ## Service configuration - ## - service: - ## @param metrics.service.ports.metrics PostgreSQL Prometheus Exporter service port - ## - ports: - metrics: 9187 - ## @param metrics.service.clusterIP Static clusterIP or None for headless services - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address - ## - clusterIP: "" - ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin - ## Values: ClientIP or None - ## ref: https://kubernetes.io/docs/user-guide/services/ - ## - sessionAffinity: None - ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint - ## - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" - ## Prometheus Operator ServiceMonitor configuration - ## - serviceMonitor: - ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator - ## - enabled: false - ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) - ## - namespace: "" - ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - interval: "" - ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - scrapeTimeout: "" - ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus - ## - labels: {} - ## @param metrics.serviceMonitor.selector Prometheus instance selector labels - ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration - ## - selector: {} - ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping - ## - relabelings: [] - ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion - ## - metricRelabelings: [] - ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint - ## - honorLabels: false - ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. - ## - jobLabel: "" - ## Custom PrometheusRule to be defined - ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart - ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions - ## - prometheusRule: - ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator - ## - enabled: false - ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) - ## - namespace: "" - ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus - ## - labels: {} - ## @param metrics.prometheusRule.rules PrometheusRule definitions - ## Make sure to constraint the rules to the current postgresql service. - ## rules: - ## - alert: HugeReplicationLag - ## expr: pg_replication_lag{service="{{ printf "%s-metrics" (include "common.names.fullname" .) }}"} / 3600 > 1 - ## for: 1m - ## labels: - ## severity: critical - ## annotations: - ## description: replication for {{ include "common.names.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). - ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). - ## - rules: [] diff --git a/postgresql/sealed-secret.yaml b/postgresql/sealed-secret.yaml deleted file mode 100644 index 3c5175b..0000000 --- a/postgresql/sealed-secret.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: postgresql-default-credentials - namespace: postgresql-system -spec: - encryptedData: - password: AgAN2drH19WiBU8KYZyN8N0T1o8Sh9ti1M5kroU+xDpDD+pOLlZZEw63qcUeeK2paiTm7E3hVO/EnCNyGUBvrDKQzoMNsImbsTMJMVHldiTZedZV1IQxQXIYELgUtk93I2WoOiNvpfL+ro2vomjqPExlVeI1tuqPVdL1+xZYfKfFk+pPL3kLpRuO5HDmwcjy12yYd0E3RqU0g58O7UCCCdQRMOtU8/Z/MPM0I3ZGxG5DQCN3FEra8g1wacvsOplJVGYFzIBS2T7tPyW6I5zW9mFPDozMwqINukuoDC4uSUGSP2Ka2al3VyZiZnXySV3LJ38yj9TpZpTCKY2pgDeMue1hl50xMoCGhBXGzN3lCx6M4/us42a+oss/dn8oXFLAbOVaI2B7bpWHSz8fykdpogpqGgsa23gCuR2V8crZ7xVuACXqNDyunoHLgph8McFDsBXWNcyTg7ocILIjVKFLL4LDhtFQgHZcZXiTe6kMJNdKMxnH/0z/A00JO7dhU2uub31Oa0LwiE/rWO31E39tDZj4o0bRez1jsneuvbMVvwYyyr0OmOfdznv9qvGXbFSgGYCNUuTAPzFRMU4NkIup4RL3a0s2Fg28X79JIaWGjpuXgfyUKiBqUe7f6FAKkiEFeqYCJoccVQpbiYOODjyV5+89tfopmJaj7HZ1t3HfrDeGk/Vj6w4C6e2avCl+qLWqz2nFyM526ymdfVaWV5B945pFTTFrlxh8lRH7Ej3qGJEz1Q== - postgres-password: AgAwdjZR9WDv74oup8dhkzKxYYrMOd40O9S09t8pQspuw/xiO/CaeNFkggWrVVbNaFI1nnQrd/3JFTu6/1mwinr13MqAKKmile/rXSSKnXo+f90PSEFlsanDpfMSuhZKGF5gDHp5HwWIGDl8P6uBC50/Z7u/+muPcdgnCgg7rVMo5EiipgZgQOJXuAxHN8a8w1HMQoVd4PD1SewTHfbwxKsZoBYgy+RL5vS4Gd8d4UbqkSFk8h+uZHdsJALrZ9PCsUDMInT06Ll1YgmcSigtFR0bM46kfbGr1tvXKmv2lYMBjn8gcOS3rRxKmhqT/HUxaepg6TDjoG/Hw0oNtxGHRmwwnzuDBtPtCJOCb3LEodIAXsz7U3hUrvTI0/BMsXBP/wZFB7mZ6mvy4tfz2SkHEIoqu+um2I6sC9OHPNFIQPSq59PX2t2G9RH4aTJVWKFcLv7fZ7+w+ot0D4300z0fRbKYCgUowKUtkd+H3eYmu1AzMpFkEUh+9NahHNeXCf+YsAHZb0vm4mz39UWTVuRzdwGFFG193goOFI6A2t9n2E18f/UQnhKewi9z552THTqsFO3VE/Kq2C9q+AA3BaGVCSlNw7eRXRr+lSaNiuTGgEZeQZubBLriC7rTzrfJjP2ik2vayCXb1dvGPrSGst4oo9IYnb548uFjgpd7ZQz0WGCufBhM7GNv7XaC+ZQEsCHPwJLaiD6irl8d1IKn+7g9vnOPUYRhcTiPtuEuWFneQ9tNMw== - username: AgAn4TsaveRieevxEf80zFJeKLkQoLMf+o5upoeq5YdXRnyV00xk1yL0QBYdvNdt3FnEmZR5R0oiJKimZmQqHOHH9++G1cqACsmNmEbjU+BBUNwVOhXZAkU1xHvpAACNKaqiqlhR5uGYx+rP6GbsW11UrwTu4oeYBqyGXtO5i9FaFaIbK32UPJ4e5lsml0l01reWWwZI6DH9L2O6E7wif5Pxw6wEcQphfk2YlTddXGRZA0dI1xFSSuvjxRRaASpfJqU/ztqdzF/MVCKnheZneuVYyw7w7Suv87RIx4ddrJKqDz0Fla9LWAC1xJMqGxWPE0Bgd9jWlRK4Cy6DogZ5ZoJv+pZm8zXy3N60SSM2ZM7TaP+FOz8FhgoDYXSn3lKPlLY2mlOBFJ8PYL1dcrGqDkHKiklAUelbID1Hiw7CdcavohFTi9CsaZXWpyeAEiMmiMpXY2+nnwppqv0+Oc82DvU4305q8FzFmi9N2olR4vnvzjBclHsXsSesJ5rKjNHk3aApTT2MIbOexDXTyIXIkrLRFK1/KbUx86qjlPOycZ2YlqfvbakEajFVV3TyR5Bb7ZHf5yQOOyzkmJj6Z3xaNmfvskqHyhImQHKRHEKjyvoKqWMrAQZEUm4LwLTdSRMo3o9p00Xb2YFFCOOhJF9hoxf65TvXkWNJyYMNjBW1a1H8gnrjT0zEyV4Veh75YRDmzgtl2CR/2w== - template: - metadata: - creationTimestamp: null - name: postgresql-default-credentials - namespace: postgresql-system diff --git a/redis-insight/redis-insight-deployment.yaml b/redis-insight/redis-insight-deployment.yaml deleted file mode 100644 index 94bfe19..0000000 --- a/redis-insight/redis-insight-deployment.yaml +++ /dev/null @@ -1,47 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: redisinsight #deployment name - namespace: redis-system - labels: - app: redisinsight #deployment label -spec: - replicas: 1 #a single replica pod - strategy: - type: Recreate - selector: - matchLabels: - app: redisinsight #which pods is the deployment managing, as defined by the pod template - template: #pod template - metadata: - labels: - app: redisinsight #label for pod/s - spec: - volumes: - - name: db - persistentVolumeClaim: - claimName: redisinsight-pvc - initContainers: - - name: init - image: busybox - command: - - /bin/sh - - '-c' - - | - chown -R 1001 /db - resources: {} - volumeMounts: - - name: db - mountPath: /db - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - containers: - - name: redisinsight #Container name (DNS_LABEL, unique) - image: redislabs/redisinsight:latest #repo/image - imagePullPolicy: IfNotPresent #Always pull image - volumeMounts: - - name: db #Pod volumes to mount into the container's filesystem. Cannot be updated. - mountPath: /db - ports: - - containerPort: 8001 #exposed container port and protocol - protocol: TCP diff --git a/redis-insight/redis-insight-pvc-db.yaml b/redis-insight/redis-insight-pvc-db.yaml deleted file mode 100644 index 3bd8ba5..0000000 --- a/redis-insight/redis-insight-pvc-db.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: redisinsight-pvc - namespace: redis-system - labels: - app: redisinsight -spec: - storageClassName: longhorn - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi diff --git a/redis-insight/redis-insight-service.yaml b/redis-insight/redis-insight-service.yaml deleted file mode 100644 index f9c3ea2..0000000 --- a/redis-insight/redis-insight-service.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: redisinsight-service # name should not be 'redisinsight' - namespace: redis-system -spec: - type: LoadBalancer - ports: - - port: 80 - targetPort: 8001 - selector: - app: redisinsight diff --git a/redis/helmrelease-redis.yaml b/redis/helmrelease-redis.yaml deleted file mode 100644 index 66019e5..0000000 --- a/redis/helmrelease-redis.yaml +++ /dev/null @@ -1,1916 +0,0 @@ -apiVersion: helm.toolkit.fluxcd.io/v2beta1 -kind: HelmRelease -metadata: - name: redis - namespace: redis-system -spec: - chart: - spec: - chart: redis - sourceRef: - kind: HelmRepository - name: bitnami - namespace: flux-system - - interval: 15m0s - timeout: 5m - releaseName: redis - values: - # Copyright VMware, Inc. - # SPDX-License-Identifier: APACHE-2.0 - - ## @section Global parameters - ## Global Docker image parameters - ## Please, note that this will override the image parameters, including dependencies, configured to use the global value - ## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass - ## - - ## @param global.imageRegistry Global Docker image registry - ## @param global.imagePullSecrets Global Docker registry secret names as an array - ## @param global.storageClass Global StorageClass for Persistent Volume(s) - ## @param global.redis.password Global Redis® password (overrides `auth.password`) - ## - global: - imageRegistry: "" - ## E.g. - ## imagePullSecrets: - ## - myRegistryKeySecretName - ## - imagePullSecrets: [] - storageClass: "longhorn" - redis: - password: "" - - ## @section Common parameters - ## - - ## @param kubeVersion Override Kubernetes version - ## - kubeVersion: "" - ## @param nameOverride String to partially override common.names.fullname - ## - nameOverride: "" - ## @param fullnameOverride String to fully override common.names.fullname - ## - fullnameOverride: "" - ## @param commonLabels Labels to add to all deployed objects - ## - commonLabels: {} - ## @param commonAnnotations Annotations to add to all deployed objects - ## - commonAnnotations: {} - ## @param secretAnnotations Annotations to add to secret - ## - secretAnnotations: {} - ## @param clusterDomain Kubernetes cluster domain name - ## - clusterDomain: cluster.local - ## @param extraDeploy Array of extra objects to deploy with the release - ## - extraDeploy: [] - ## @param useHostnames Use hostnames internally when announcing replication. If false, the hostname will be resolved to an IP address - ## - useHostnames: true - ## @param nameResolutionThreshold Failure threshold for internal hostnames resolution - ## - nameResolutionThreshold: 5 - ## @param nameResolutionTimeout Timeout seconds between probes for internal hostnames resolution - ## - nameResolutionTimeout: 5 - - ## Enable diagnostic mode in the deployment - ## - diagnosticMode: - ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) - ## - enabled: false - ## @param diagnosticMode.command Command to override all containers in the deployment - ## - command: - - sleep - ## @param diagnosticMode.args Args to override all containers in the deployment - ## - args: - - infinity - - ## @section Redis® Image parameters - ## - - ## Bitnami Redis® image - ## ref: https://hub.docker.com/r/bitnami/redis/tags/ - ## @param image.registry Redis® image registry - ## @param image.repository Redis® image repository - ## @param image.tag Redis® image tag (immutable tags are recommended) - ## @param image.digest Redis® image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param image.pullPolicy Redis® image pull policy - ## @param image.pullSecrets Redis® image pull secrets - ## @param image.debug Enable image debug mode - ## - image: - registry: docker.io - repository: bitnami/redis - tag: 7.2.4 - digest: "" - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Enable debug mode - ## - debug: false - - ## @section Redis® common configuration parameters - ## https://github.com/bitnami/containers/tree/main/bitnami/redis#configuration - ## - - ## @param architecture Redis® architecture. Allowed values: `standalone` or `replication` - ## - architecture: standalone - ## Redis® Authentication parameters - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/redis#setting-the-server-password-on-first-run - ## - auth: - ## @param auth.enabled Enable password authentication - ## - enabled: true - ## @param auth.sentinel Enable password authentication on sentinels too - ## - sentinel: true - ## @param auth.password Redis® password - ## Defaults to a random 10-character alphanumeric string if not set - ## - #password: "" - ## @param auth.existingSecret The name of an existing secret with Redis® credentials - ## NOTE: When it's set, the previous `auth.password` parameter is ignored - ## - existingSecret: "redis-key" - ## @param auth.existingSecretPasswordKey Password key to be retrieved from existing secret - ## NOTE: ignored unless `auth.existingSecret` parameter is set - ## - existingSecretPasswordKey: "password" - ## @param auth.usePasswordFiles Mount credentials as files instead of using an environment variable - ## - usePasswordFiles: false - - ## @param commonConfiguration [string] Common configuration to be added into the ConfigMap - ## ref: https://redis.io/topics/config - ## - commonConfiguration: |- - # Enable AOF https://redis.io/topics/persistence#append-only-file - appendonly yes - # Disable RDB persistence, AOF persistence already enabled. - save "" - ## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for Redis® nodes - ## - existingConfigmap: "" - - ## @section Redis® master configuration parameters - ## - - master: - ## @param master.count Number of Redis® master instances to deploy (experimental, requires additional configuration) - ## - count: 1 - ## @param master.configuration Configuration for Redis® master nodes - ## ref: https://redis.io/topics/config - ## - configuration: "" - ## @param master.disableCommands Array with Redis® commands to disable on master nodes - ## Commands will be completely disabled by renaming each to an empty string. - ## ref: https://redis.io/topics/security#disabling-of-specific-commands - ## - disableCommands: - - FLUSHDB - ## @param master.command Override default container command (useful when using custom images) - ## - command: [] - ## @param master.args Override default container args (useful when using custom images) - ## - args: [] - ## @param master.enableServiceLinks Whether information about services should be injected into pod's environment variable - ## - enableServiceLinks: true - ## @param master.preExecCmds Additional commands to run prior to starting Redis® master - ## - preExecCmds: [] - ## @param master.extraFlags Array with additional command line flags for Redis® master - ## e.g: - ## extraFlags: - ## - "--maxmemory-policy volatile-ttl" - ## - "--repl-backlog-size 1024mb" - ## - extraFlags: [] - ## @param master.extraEnvVars Array with extra environment variables to add to Redis® master nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param master.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® master nodes - ## - extraEnvVarsCM: "" - ## @param master.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® master nodes - ## - extraEnvVarsSecret: "" - ## @param master.containerPorts.redis Container port to open on Redis® master nodes - ## - containerPorts: - redis: 6379 - ## Configure extra options for Redis® containers' liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes - ## @param master.startupProbe.enabled Enable startupProbe on Redis® master nodes - ## @param master.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param master.startupProbe.periodSeconds Period seconds for startupProbe - ## @param master.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param master.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param master.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - ## @param master.livenessProbe.enabled Enable livenessProbe on Redis® master nodes - ## @param master.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param master.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param master.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param master.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param master.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - ## @param master.readinessProbe.enabled Enable readinessProbe on Redis® master nodes - ## @param master.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param master.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param master.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param master.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param master.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 5 - ## @param master.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param master.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param master.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## Redis® master resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param master.resources.limits The resources limits for the Redis® master containers - ## @param master.resources.requests The requested resources for the Redis® master containers - ## - resources: - limits: {} - requests: {} - ## Configure Pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param master.podSecurityContext.enabled Enabled Redis® master pods' Security Context - ## @param master.podSecurityContext.fsGroup Set Redis® master pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroup: 1001 - ## Configure Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param master.containerSecurityContext.enabled Enabled Redis® master containers' Security Context - ## @param master.containerSecurityContext.runAsUser Set Redis® master containers' Security Context runAsUser - ## @param master.containerSecurityContext.runAsGroup Set Redis® master containers' Security Context runAsGroup - ## @param master.containerSecurityContext.runAsNonRoot Set Redis® master containers' Security Context runAsNonRoot - ## @param master.containerSecurityContext.allowPrivilegeEscalation Is it possible to escalate Redis® pod(s) privileges - ## @param master.containerSecurityContext.seccompProfile.type Set Redis® master containers' Security Context seccompProfile - ## @param master.containerSecurityContext.capabilities.drop Set Redis® master containers' Security Context capabilities to drop - ## - containerSecurityContext: - enabled: true - runAsUser: 1001 - runAsGroup: 0 - runAsNonRoot: true - allowPrivilegeEscalation: false - seccompProfile: - type: RuntimeDefault - capabilities: - drop: - - ALL - ## @param master.kind Use either Deployment or StatefulSet (default) - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ - ## - kind: StatefulSet - ## @param master.schedulerName Alternate scheduler for Redis® master pods - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param master.updateStrategy.type Redis® master statefulset strategy type - ## @skip master.updateStrategy.rollingUpdate - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - ## StrategyType - ## Can be set to RollingUpdate, OnDelete (statefulset), Recreate (deployment) - ## - type: RollingUpdate - ## @param master.minReadySeconds How many seconds a pod needs to be ready before killing the next, during update - ## - minReadySeconds: 0 - ## @param master.priorityClassName Redis® master pods' priorityClassName - ## - priorityClassName: "" - ## @param master.hostAliases Redis® master pods host aliases - ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ - ## - hostAliases: [] - ## @param master.podLabels Extra labels for Redis® master pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param master.podAnnotations Annotations for Redis® master pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param master.shareProcessNamespace Share a single process namespace between all of the containers in Redis® master pods - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ - ## - shareProcessNamespace: false - ## @param master.podAffinityPreset Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param master.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node master.affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param master.nodeAffinityPreset.type Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param master.nodeAffinityPreset.key Node label key to match. Ignored if `master.affinity` is set - ## - key: "" - ## @param master.nodeAffinityPreset.values Node label values to match. Ignored if `master.affinity` is set - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param master.affinity Affinity for Redis® master pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## NOTE: `master.podAffinityPreset`, `master.podAntiAffinityPreset`, and `master.nodeAffinityPreset` will be ignored when it's set - ## - affinity: {} - ## @param master.nodeSelector Node labels for Redis® master pods assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - ## @param master.tolerations Tolerations for Redis® master pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param master.topologySpreadConstraints Spread Constraints for Redis® master pod assignment - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## E.g. - ## topologySpreadConstraints: - ## - maxSkew: 1 - ## topologyKey: node - ## whenUnsatisfiable: DoNotSchedule - ## - topologySpreadConstraints: [] - ## @param master.dnsPolicy DNS Policy for Redis® master pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ - ## E.g. - ## dnsPolicy: ClusterFirst - ## - dnsPolicy: "" - ## @param master.dnsConfig DNS Configuration for Redis® master pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ - ## E.g. - ## dnsConfig: - ## options: - ## - name: ndots - ## value: "4" - ## - name: single-request-reopen - ## - dnsConfig: {} - ## @param master.lifecycleHooks for the Redis® master container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - ## @param master.extraVolumes Optionally specify extra list of additional volumes for the Redis® master pod(s) - ## - extraVolumes: [] - ## @param master.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® master container(s) - ## - extraVolumeMounts: [] - ## @param master.sidecars Add additional sidecar containers to the Redis® master pod(s) - ## e.g: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param master.initContainers Add additional init containers to the Redis® master pod(s) - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - ## e.g: - ## initContainers: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## command: ['sh', '-c', 'echo "hello world"'] - ## - initContainers: [] - ## Persistence parameters - ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - ## @param master.persistence.enabled Enable persistence on Redis® master nodes using Persistent Volume Claims - ## - enabled: true - ## @param master.persistence.medium Provide a medium for `emptyDir` volumes. - ## - medium: "" - ## @param master.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. - ## - sizeLimit: "" - ## @param master.persistence.path The path the volume will be mounted at on Redis® master containers - ## NOTE: Useful when using different Redis® images - ## - path: /data - ## @param master.persistence.subPath The subdirectory of the volume to mount on Redis® master containers - ## NOTE: Useful in dev environments - ## - subPath: "" - ## @param master.persistence.subPathExpr Used to construct the subPath subdirectory of the volume to mount on Redis® master containers - ## - subPathExpr: "" - ## @param master.persistence.storageClass Persistent Volume storage class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner - ## - storageClass: "longhorn" - ## @param master.persistence.accessModes Persistent Volume access modes - ## - accessModes: - - ReadWriteOnce - ## @param master.persistence.size Persistent Volume size - ## - size: 8Gi - ## @param master.persistence.annotations Additional custom annotations for the PVC - ## - annotations: {} - ## @param master.persistence.labels Additional custom labels for the PVC - ## - labels: {} - ## @param master.persistence.selector Additional labels to match for the PVC - ## e.g: - ## selector: - ## matchLabels: - ## app: my-app - ## - selector: {} - ## @param master.persistence.dataSource Custom PVC data source - ## - dataSource: {} - ## @param master.persistence.existingClaim Use a existing PVC which must be created manually before bound - ## NOTE: requires master.persistence.enabled: true - ## - existingClaim: "" - ## persistentVolumeClaimRetentionPolicy - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention - ## @param master.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet - ## @param master.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced - ## @param master.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted - ## - persistentVolumeClaimRetentionPolicy: - enabled: false - whenScaled: Retain - whenDeleted: Retain - ## Redis® master service parameters - ## - service: - ## @param master.service.type Redis® master service type - ## - type: ClusterIP - ## @param master.service.ports.redis Redis® master service port - ## - ports: - redis: 6379 - ## @param master.service.nodePorts.redis Node port for Redis® master - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - redis: "" - ## @param master.service.externalTrafficPolicy Redis® master service external traffic policy - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param master.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) - ## - extraPorts: [] - ## @param master.service.internalTrafficPolicy Redis® master service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) - ## ref: https://kubernetes.io/docs/concepts/services-networking/service-traffic-policy/ - ## - internalTrafficPolicy: Cluster - ## @param master.service.clusterIP Redis® master service Cluster IP - ## - clusterIP: "" - ## @param master.service.loadBalancerIP Redis® master service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - loadBalancerIP: "" - ## @param master.service.loadBalancerSourceRanges Redis® master service Load Balancer sources - ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g. - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param master.service.externalIPs Redis® master service External IPs - ## https://kubernetes.io/docs/concepts/services-networking/service/#external-ips - ## e.g. - ## externalIPs: - ## - 10.10.10.1 - ## - 201.22.30.1 - ## - externalIPs: [] - ## @param master.service.annotations Additional custom annotations for Redis® master service - ## - annotations: {} - ## @param master.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param master.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - ## @param master.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-master pods - ## - terminationGracePeriodSeconds: 30 - ## ServiceAccount configuration - ## - - serviceAccount: - ## @param master.serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: false - ## @param master.serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param master.serviceAccount.automountServiceAccountToken Whether to auto mount the service account token - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server - ## - automountServiceAccountToken: true - ## @param master.serviceAccount.annotations Additional custom annotations for the ServiceAccount - ## - annotations: {} - - ## @section Redis® replicas configuration parameters - ## - - replica: - ## @param replica.replicaCount Number of Redis® replicas to deploy - ## - replicaCount: 1 - ## @param replica.configuration Configuration for Redis® replicas nodes - ## ref: https://redis.io/topics/config - ## - configuration: "" - ## @param replica.disableCommands Array with Redis® commands to disable on replicas nodes - ## Commands will be completely disabled by renaming each to an empty string. - ## ref: https://redis.io/topics/security#disabling-of-specific-commands - ## - disableCommands: - - FLUSHDB - - FLUSHALL - ## @param replica.command Override default container command (useful when using custom images) - ## - command: [] - ## @param replica.args Override default container args (useful when using custom images) - ## - args: [] - ## @param replica.enableServiceLinks Whether information about services should be injected into pod's environment variable - ## - enableServiceLinks: true - ## @param replica.preExecCmds Additional commands to run prior to starting Redis® replicas - ## - preExecCmds: [] - ## @param replica.extraFlags Array with additional command line flags for Redis® replicas - ## e.g: - ## extraFlags: - ## - "--maxmemory-policy volatile-ttl" - ## - "--repl-backlog-size 1024mb" - ## - extraFlags: [] - ## @param replica.extraEnvVars Array with extra environment variables to add to Redis® replicas nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param replica.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® replicas nodes - ## - extraEnvVarsCM: "" - ## @param replica.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® replicas nodes - ## - extraEnvVarsSecret: "" - ## @param replica.externalMaster.enabled Use external master for bootstrapping - ## @param replica.externalMaster.host External master host to bootstrap from - ## @param replica.externalMaster.port Port for Redis service external master host - ## - externalMaster: - enabled: false - host: "" - port: 6379 - ## @param replica.containerPorts.redis Container port to open on Redis® replicas nodes - ## - containerPorts: - redis: 6379 - ## Configure extra options for Redis® containers' liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes - ## @param replica.startupProbe.enabled Enable startupProbe on Redis® replicas nodes - ## @param replica.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param replica.startupProbe.periodSeconds Period seconds for startupProbe - ## @param replica.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param replica.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param replica.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 22 - ## @param replica.livenessProbe.enabled Enable livenessProbe on Redis® replicas nodes - ## @param replica.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param replica.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param replica.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param replica.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param replica.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - ## @param replica.readinessProbe.enabled Enable readinessProbe on Redis® replicas nodes - ## @param replica.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param replica.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param replica.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param replica.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param replica.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 5 - ## @param replica.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param replica.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param replica.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## Redis® replicas resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param replica.resources.limits The resources limits for the Redis® replicas containers - ## @param replica.resources.requests The requested resources for the Redis® replicas containers - ## - resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: {} - # cpu: 250m - # memory: 256Mi - requests: {} - # cpu: 250m - # memory: 256Mi - ## Configure Pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param replica.podSecurityContext.enabled Enabled Redis® replicas pods' Security Context - ## @param replica.podSecurityContext.fsGroup Set Redis® replicas pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroup: 1001 - ## Configure Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param replica.containerSecurityContext.enabled Enabled Redis® replicas containers' Security Context - ## @param replica.containerSecurityContext.runAsUser Set Redis® replicas containers' Security Context runAsUser - ## @param replica.containerSecurityContext.runAsGroup Set Redis® replicas containers' Security Context runAsGroup - ## @param replica.containerSecurityContext.runAsNonRoot Set Redis® replicas containers' Security Context runAsNonRoot - ## @param replica.containerSecurityContext.allowPrivilegeEscalation Set Redis® replicas pod's Security Context allowPrivilegeEscalation - ## @param replica.containerSecurityContext.seccompProfile.type Set Redis® replicas containers' Security Context seccompProfile - ## @param replica.containerSecurityContext.capabilities.drop Set Redis® replicas containers' Security Context capabilities to drop - ## - containerSecurityContext: - enabled: true - runAsUser: 1001 - runAsGroup: 0 - runAsNonRoot: true - allowPrivilegeEscalation: false - seccompProfile: - type: RuntimeDefault - capabilities: - drop: - - ALL - ## @param replica.schedulerName Alternate scheduler for Redis® replicas pods - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param replica.updateStrategy.type Redis® replicas statefulset strategy type - ## @skip replica.updateStrategy.rollingUpdate - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - ## StrategyType - ## Can be set to RollingUpdate, OnDelete (statefulset), Recreate (deployment) - ## - type: RollingUpdate - ## @param replica.minReadySeconds How many seconds a pod needs to be ready before killing the next, during update - ## - minReadySeconds: 3 - ## @param replica.priorityClassName Redis® replicas pods' priorityClassName - ## - priorityClassName: "" - ## @param replica.podManagementPolicy podManagementPolicy to manage scaling operation of %%MAIN_CONTAINER_NAME%% pods - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies - ## - podManagementPolicy: "" - ## @param replica.hostAliases Redis® replicas pods host aliases - ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ - ## - hostAliases: [] - ## @param replica.podLabels Extra labels for Redis® replicas pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param replica.podAnnotations Annotations for Redis® replicas pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param replica.shareProcessNamespace Share a single process namespace between all of the containers in Redis® replicas pods - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ - ## - shareProcessNamespace: false - ## @param replica.podAffinityPreset Pod affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param replica.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param replica.nodeAffinityPreset.type Node affinity preset type. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param replica.nodeAffinityPreset.key Node label key to match. Ignored if `replica.affinity` is set - ## - key: "" - ## @param replica.nodeAffinityPreset.values Node label values to match. Ignored if `replica.affinity` is set - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param replica.affinity Affinity for Redis® replicas pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## NOTE: `replica.podAffinityPreset`, `replica.podAntiAffinityPreset`, and `replica.nodeAffinityPreset` will be ignored when it's set - ## - affinity: {} - ## @param replica.nodeSelector Node labels for Redis® replicas pods assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - ## @param replica.tolerations Tolerations for Redis® replicas pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param replica.topologySpreadConstraints Spread Constraints for Redis® replicas pod assignment - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## E.g. - ## topologySpreadConstraints: - ## - maxSkew: 1 - ## topologyKey: node - ## whenUnsatisfiable: DoNotSchedule - ## - topologySpreadConstraints: [] - ## @param replica.dnsPolicy DNS Policy for Redis® replica pods - ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ - ## E.g. - ## dnsPolicy: ClusterFirst - ## - dnsPolicy: "" - ## @param replica.dnsConfig DNS Configuration for Redis® replica pods - ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ - ## E.g. - ## dnsConfig: - ## options: - ## - name: ndots - ## value: "4" - ## - name: single-request-reopen - ## - dnsConfig: {} - ## @param replica.lifecycleHooks for the Redis® replica container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - ## @param replica.extraVolumes Optionally specify extra list of additional volumes for the Redis® replicas pod(s) - ## - extraVolumes: [] - ## @param replica.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® replicas container(s) - ## - extraVolumeMounts: [] - ## @param replica.sidecars Add additional sidecar containers to the Redis® replicas pod(s) - ## e.g: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param replica.initContainers Add additional init containers to the Redis® replicas pod(s) - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - ## e.g: - ## initContainers: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## command: ['sh', '-c', 'echo "hello world"'] - ## - initContainers: [] - ## Persistence Parameters - ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - ## @param replica.persistence.enabled Enable persistence on Redis® replicas nodes using Persistent Volume Claims - ## - enabled: true - ## @param replica.persistence.medium Provide a medium for `emptyDir` volumes. - ## - medium: "" - ## @param replica.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. - ## - sizeLimit: "" - ## @param replica.persistence.path The path the volume will be mounted at on Redis® replicas containers - ## NOTE: Useful when using different Redis® images - ## - path: /data - ## @param replica.persistence.subPath The subdirectory of the volume to mount on Redis® replicas containers - ## NOTE: Useful in dev environments - ## - subPath: "" - ## @param replica.persistence.subPathExpr Used to construct the subPath subdirectory of the volume to mount on Redis® replicas containers - ## - subPathExpr: "" - ## @param replica.persistence.storageClass Persistent Volume storage class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner - ## - storageClass: "" - ## @param replica.persistence.accessModes Persistent Volume access modes - ## - accessModes: - - ReadWriteOnce - ## @param replica.persistence.size Persistent Volume size - ## - size: 8Gi - ## @param replica.persistence.annotations Additional custom annotations for the PVC - ## - annotations: {} - ## @param replica.persistence.labels Additional custom labels for the PVC - ## - labels: {} - ## @param replica.persistence.selector Additional labels to match for the PVC - ## e.g: - ## selector: - ## matchLabels: - ## app: my-app - ## - selector: {} - ## @param replica.persistence.dataSource Custom PVC data source - ## - dataSource: {} - ## @param replica.persistence.existingClaim Use a existing PVC which must be created manually before bound - ## NOTE: requires replica.persistence.enabled: true - ## - existingClaim: "" - ## persistentVolumeClaimRetentionPolicy - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention - ## @param replica.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet - ## @param replica.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced - ## @param replica.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted - ## - persistentVolumeClaimRetentionPolicy: - enabled: false - whenScaled: Retain - whenDeleted: Retain - ## Redis® replicas service parameters - ## - service: - ## @param replica.service.type Redis® replicas service type - ## - type: ClusterIP - ## @param replica.service.ports.redis Redis® replicas service port - ## - ports: - redis: 6379 - ## @param replica.service.nodePorts.redis Node port for Redis® replicas - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - redis: "" - ## @param replica.service.externalTrafficPolicy Redis® replicas service external traffic policy - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param replica.service.internalTrafficPolicy Redis® replicas service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) - ## ref: https://kubernetes.io/docs/concepts/services-networking/service-traffic-policy/ - ## - internalTrafficPolicy: Cluster - ## @param replica.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) - ## - extraPorts: [] - ## @param replica.service.clusterIP Redis® replicas service Cluster IP - ## - clusterIP: "" - ## @param replica.service.loadBalancerIP Redis® replicas service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - loadBalancerIP: "" - ## @param replica.service.loadBalancerSourceRanges Redis® replicas service Load Balancer sources - ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g. - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param replica.service.annotations Additional custom annotations for Redis® replicas service - ## - annotations: {} - ## @param replica.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param replica.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - ## @param replica.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-replicas pods - ## - terminationGracePeriodSeconds: 30 - ## Autoscaling configuration - ## - autoscaling: - - ## @param replica.autoscaling.enabled Enable replica autoscaling settings - ## - enabled: false - ## @param replica.autoscaling.minReplicas Minimum replicas for the pod autoscaling - ## - minReplicas: 1 - ## @param replica.autoscaling.maxReplicas Maximum replicas for the pod autoscaling - ## - maxReplicas: 11 - ## @param replica.autoscaling.targetCPU Percentage of CPU to consider when autoscaling - ## - targetCPU: "" - ## @param replica.autoscaling.targetMemory Percentage of Memory to consider when autoscaling - ## - targetMemory: "" - ## ServiceAccount configuration - ## - serviceAccount: - ## @param replica.serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: false - ## @param replica.serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param replica.serviceAccount.automountServiceAccountToken Whether to auto mount the service account token - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server - ## - automountServiceAccountToken: true - ## @param replica.serviceAccount.annotations Additional custom annotations for the ServiceAccount - ## - annotations: {} - ## @section Redis® Sentinel configuration parameters - ## - - sentinel: - ## @param sentinel.enabled Use Redis® Sentinel on Redis® pods. - ## IMPORTANT: this will disable the master and replicas services and - ## create a single Redis® service exposing both the Redis and Sentinel ports - ## - enabled: false - ## Bitnami Redis® Sentinel image version - ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ - ## @param sentinel.image.registry Redis® Sentinel image registry - ## @param sentinel.image.repository Redis® Sentinel image repository - ## @param sentinel.image.tag Redis® Sentinel image tag (immutable tags are recommended) - ## @param sentinel.image.digest Redis® Sentinel image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param sentinel.image.pullPolicy Redis® Sentinel image pull policy - ## @param sentinel.image.pullSecrets Redis® Sentinel image pull secrets - ## @param sentinel.image.debug Enable image debug mode - ## - image: - registry: docker.io - repository: bitnami/redis-sentinel - tag: 7.2.1-debian-11-r26 - digest: "" - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Enable debug mode - ## - debug: false - ## @param sentinel.annotations Additional custom annotations for Redis® Sentinel resource - ## - annotations: {} - ## @param sentinel.masterSet Master set name - ## - masterSet: mymaster - ## @param sentinel.quorum Sentinel Quorum - ## - quorum: 2 - ## @param sentinel.getMasterTimeout Amount of time to allow before get_sentinel_master_info() times out. - ## - getMasterTimeout: 99 - ## @param sentinel.automateClusterRecovery Automate cluster recovery in cases where the last replica is not considered a good replica and Sentinel won't automatically failover to it. - ## This also prevents any new replica from starting until the last remaining replica is elected as master to guarantee that it is the one to be elected by Sentinel, and not a newly started replica with no data. - ## NOTE: This feature requires a "downAfterMilliseconds" value less or equal to 2000. - ## - automateClusterRecovery: false - ## @param sentinel.redisShutdownWaitFailover Whether the Redis® master container waits for the failover at shutdown (in addition to the Redis® Sentinel container). - ## - redisShutdownWaitFailover: true - ## Sentinel timing restrictions - ## @param sentinel.downAfterMilliseconds Timeout for detecting a Redis® node is down - ## @param sentinel.failoverTimeout Timeout for performing a election failover - ## - downAfterMilliseconds: 60000 - failoverTimeout: 180000 - ## @param sentinel.parallelSyncs Number of replicas that can be reconfigured in parallel to use the new master after a failover - ## - parallelSyncs: 1 - ## @param sentinel.configuration Configuration for Redis® Sentinel nodes - ## ref: https://redis.io/topics/sentinel - ## - configuration: "" - ## @param sentinel.command Override default container command (useful when using custom images) - ## - command: [] - ## @param sentinel.args Override default container args (useful when using custom images) - ## - args: [] - ## @param sentinel.enableServiceLinks Whether information about services should be injected into pod's environment variable - ## - enableServiceLinks: true - ## @param sentinel.preExecCmds Additional commands to run prior to starting Redis® Sentinel - ## - preExecCmds: [] - ## @param sentinel.extraEnvVars Array with extra environment variables to add to Redis® Sentinel nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param sentinel.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® Sentinel nodes - ## - extraEnvVarsCM: "" - ## @param sentinel.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® Sentinel nodes - ## - extraEnvVarsSecret: "" - ## @param sentinel.externalMaster.enabled Use external master for bootstrapping - ## @param sentinel.externalMaster.host External master host to bootstrap from - ## @param sentinel.externalMaster.port Port for Redis service external master host - ## - externalMaster: - enabled: false - host: "" - port: 6379 - ## @param sentinel.containerPorts.sentinel Container port to open on Redis® Sentinel nodes - ## - containerPorts: - sentinel: 26379 - ## Configure extra options for Redis® containers' liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes - ## @param sentinel.startupProbe.enabled Enable startupProbe on Redis® Sentinel nodes - ## @param sentinel.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param sentinel.startupProbe.periodSeconds Period seconds for startupProbe - ## @param sentinel.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param sentinel.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param sentinel.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 22 - ## @param sentinel.livenessProbe.enabled Enable livenessProbe on Redis® Sentinel nodes - ## @param sentinel.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param sentinel.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param sentinel.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param sentinel.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param sentinel.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 6 - ## @param sentinel.readinessProbe.enabled Enable readinessProbe on Redis® Sentinel nodes - ## @param sentinel.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param sentinel.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param sentinel.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param sentinel.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param sentinel.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 20 - periodSeconds: 5 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 6 - ## @param sentinel.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param sentinel.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param sentinel.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## Persistence parameters - ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - ## @param sentinel.persistence.enabled Enable persistence on Redis® sentinel nodes using Persistent Volume Claims (Experimental) - ## - enabled: false - ## @param sentinel.persistence.storageClass Persistent Volume storage class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner - ## - storageClass: "" - ## @param sentinel.persistence.accessModes Persistent Volume access modes - ## - accessModes: - - ReadWriteOnce - ## @param sentinel.persistence.size Persistent Volume size - ## - size: 100Mi - ## @param sentinel.persistence.annotations Additional custom annotations for the PVC - ## - annotations: {} - ## @param sentinel.persistence.labels Additional custom labels for the PVC - ## - labels: {} - ## @param sentinel.persistence.selector Additional labels to match for the PVC - ## e.g: - ## selector: - ## matchLabels: - ## app: my-app - ## - selector: {} - ## @param sentinel.persistence.dataSource Custom PVC data source - ## - dataSource: {} - ## @param sentinel.persistence.medium Provide a medium for `emptyDir` volumes. - ## - medium: "" - ## @param sentinel.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. - ## - sizeLimit: "" - ## persistentVolumeClaimRetentionPolicy - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention - ## @param sentinel.persistentVolumeClaimRetentionPolicy.enabled Controls if and how PVCs are deleted during the lifecycle of a StatefulSet - ## @param sentinel.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced - ## @param sentinel.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted - ## - persistentVolumeClaimRetentionPolicy: - enabled: false - whenScaled: Retain - whenDeleted: Retain - ## Redis® Sentinel resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param sentinel.resources.limits The resources limits for the Redis® Sentinel containers - ## @param sentinel.resources.requests The requested resources for the Redis® Sentinel containers - ## - resources: - limits: {} - requests: {} - ## Configure Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param sentinel.containerSecurityContext.enabled Enabled Redis® Sentinel containers' Security Context - ## @param sentinel.containerSecurityContext.runAsUser Set Redis® Sentinel containers' Security Context runAsUser - ## @param sentinel.containerSecurityContext.runAsGroup Set Redis® Sentinel containers' Security Context runAsGroup - ## @param sentinel.containerSecurityContext.runAsNonRoot Set Redis® Sentinel containers' Security Context runAsNonRoot - ## @param sentinel.containerSecurityContext.allowPrivilegeEscalation Set Redis® Sentinel containers' Security Context allowPrivilegeEscalation - ## @param sentinel.containerSecurityContext.seccompProfile.type Set Redis® Sentinel containers' Security Context seccompProfile - ## @param sentinel.containerSecurityContext.capabilities.drop Set Redis® Sentinel containers' Security Context capabilities to drop - ## - containerSecurityContext: - enabled: true - runAsUser: 1001 - runAsGroup: 0 - runAsNonRoot: true - allowPrivilegeEscalation: false - seccompProfile: - type: RuntimeDefault - capabilities: - drop: - - ALL - ## @param sentinel.lifecycleHooks for the Redis® sentinel container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - ## @param sentinel.extraVolumes Optionally specify extra list of additional volumes for the Redis® Sentinel - ## - extraVolumes: [] - ## @param sentinel.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® Sentinel container(s) - ## - extraVolumeMounts: [] - ## Redis® Sentinel service parameters - ## - service: - ## @param sentinel.service.type Redis® Sentinel service type - ## - type: ClusterIP - ## @param sentinel.service.ports.redis Redis® service port for Redis® - ## @param sentinel.service.ports.sentinel Redis® service port for Redis® Sentinel - ## - ports: - redis: 6379 - sentinel: 26379 - ## @param sentinel.service.nodePorts.redis Node port for Redis® - ## @param sentinel.service.nodePorts.sentinel Node port for Sentinel - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## NOTE: choose port between <30000-32767> - ## NOTE: By leaving these values blank, they will be generated by ports-configmap - ## If setting manually, please leave at least replica.replicaCount + 1 in between sentinel.service.nodePorts.redis and sentinel.service.nodePorts.sentinel to take into account the ports that will be created while incrementing that base port - ## - nodePorts: - redis: "" - sentinel: "" - ## @param sentinel.service.externalTrafficPolicy Redis® Sentinel service external traffic policy - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param sentinel.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) - ## - extraPorts: [] - ## @param sentinel.service.clusterIP Redis® Sentinel service Cluster IP - ## - clusterIP: "" - ## @param sentinel.service.loadBalancerIP Redis® Sentinel service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - loadBalancerIP: "" - ## @param sentinel.service.loadBalancerSourceRanges Redis® Sentinel service Load Balancer sources - ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g. - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param sentinel.service.annotations Additional custom annotations for Redis® Sentinel service - ## - annotations: {} - ## @param sentinel.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param sentinel.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - ## Headless service properties - ## - headless: - ## @param sentinel.service.headless.annotations Annotations for the headless service. - ## - annotations: {} - ## @param sentinel.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-node pods - ## - terminationGracePeriodSeconds: 30 - - ## @section Other Parameters - ## - - ## @param serviceBindings.enabled Create secret for service binding (Experimental) - ## Ref: https://servicebinding.io/service-provider/ - ## - serviceBindings: - enabled: false - - ## Network Policy configuration - ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - ## - networkPolicy: - ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources - ## - enabled: false - ## @param networkPolicy.allowExternal Don't require client label for connections - ## When set to false, only pods with the correct client label will have network access to the ports - ## Redis® is listening on. When true, Redis® will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param networkPolicy.extraIngress Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraIngress: [] - ## @param networkPolicy.extraEgress Add extra egress rules to the NetworkPolicy - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param networkPolicy.ingressNSMatchLabels Labels to match to allow traffic from other namespaces - ## @param networkPolicy.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} - - metrics: - ## @param networkPolicy.metrics.allowExternal Don't require client label for connections for metrics endpoint - ## When set to false, only pods with the correct client label will have network access to the metrics port - ## - allowExternal: true - ## @param networkPolicy.metrics.ingressNSMatchLabels Labels to match to allow traffic from other namespaces to metrics endpoint - ## @param networkPolicy.metrics.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces to metrics endpoint - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} - - ## PodSecurityPolicy configuration - ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - ## @param podSecurityPolicy.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later - ## - create: false - ## @param podSecurityPolicy.enabled Enable PodSecurityPolicy's RBAC rules - ## - enabled: false - ## RBAC configuration - ## - rbac: - ## @param rbac.create Specifies whether RBAC resources should be created - ## - create: false - ## @param rbac.rules Custom RBAC rules to set - ## e.g: - ## rules: - ## - apiGroups: - ## - "" - ## resources: - ## - pods - ## verbs: - ## - get - ## - list - ## - rules: [] - ## ServiceAccount configuration - ## - serviceAccount: - ## @param serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: true - ## @param serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param serviceAccount.automountServiceAccountToken Whether to auto mount the service account token - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server - ## - automountServiceAccountToken: true - ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount - ## - annotations: {} - ## Redis® Pod Disruption Budget configuration - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ - ## - pdb: - ## @param pdb.create Specifies whether a PodDisruptionBudget should be created - ## - create: false - ## @param pdb.minAvailable Min number of pods that must still be available after the eviction - ## - minAvailable: 1 - ## @param pdb.maxUnavailable Max number of pods that can be unavailable after the eviction - ## - maxUnavailable: "" - ## TLS configuration - ## - tls: - ## @param tls.enabled Enable TLS traffic - ## - enabled: false - ## @param tls.authClients Require clients to authenticate - ## - authClients: true - ## @param tls.autoGenerated Enable autogenerated certificates - ## - autoGenerated: false - ## @param tls.existingSecret The name of the existing secret that contains the TLS certificates - ## - existingSecret: "" - ## @param tls.certificatesSecret DEPRECATED. Use existingSecret instead. - ## - certificatesSecret: "" - ## @param tls.certFilename Certificate filename - ## - certFilename: "" - ## @param tls.certKeyFilename Certificate Key filename - ## - certKeyFilename: "" - ## @param tls.certCAFilename CA Certificate filename - ## - certCAFilename: "" - ## @param tls.dhParamsFilename File containing DH params (in order to support DH based ciphers) - ## - dhParamsFilename: "" - - ## @section Metrics Parameters - ## - - metrics: - ## @param metrics.enabled Start a sidecar prometheus exporter to expose Redis® metrics - ## - enabled: false - ## Bitnami Redis® Exporter image - ## ref: https://hub.docker.com/r/bitnami/redis-exporter/tags/ - ## @param metrics.image.registry Redis® Exporter image registry - ## @param metrics.image.repository Redis® Exporter image repository - ## @param metrics.image.tag Redis® Exporter image tag (immutable tags are recommended) - ## @param metrics.image.digest Redis® Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param metrics.image.pullPolicy Redis® Exporter image pull policy - ## @param metrics.image.pullSecrets Redis® Exporter image pull secrets - ## - image: - registry: docker.io - repository: bitnami/redis-exporter - tag: 1.55.0-debian-11-r0 - digest: "" - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Configure extra options for Redis® containers' liveness, readiness & startup probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ - ## @param metrics.startupProbe.enabled Enable startupProbe on Redis® replicas nodes - ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe - ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - ## @param metrics.livenessProbe.enabled Enable livenessProbe on Redis® replicas nodes - ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - ## @param metrics.readinessProbe.enabled Enable readinessProbe on Redis® replicas nodes - ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param metrics.command Override default metrics container init command (useful when using custom images) - ## - command: [] - ## @param metrics.redisTargetHost A way to specify an alternative Redis® hostname - ## Useful for certificate CN/SAN matching - ## - redisTargetHost: "localhost" - ## @param metrics.extraArgs Extra arguments for Redis® exporter, for example: - ## e.g.: - ## extraArgs: - ## check-keys: myKey,myOtherKey - ## - extraArgs: {} - ## @param metrics.extraEnvVars Array with extra environment variables to add to Redis® exporter - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## Configure Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param metrics.containerSecurityContext.enabled Enabled Redis® exporter containers' Security Context - ## @param metrics.containerSecurityContext.runAsUser Set Redis® exporter containers' Security Context runAsUser - ## @param metrics.containerSecurityContext.runAsGroup Set Redis® exporter containers' Security Context runAsGroup - ## @param metrics.containerSecurityContext.runAsNonRoot Set Redis® exporter containers' Security Context runAsNonRoot - ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set Redis® exporter containers' Security Context allowPrivilegeEscalation - ## @param metrics.containerSecurityContext.seccompProfile.type Set Redis® exporter containers' Security Context seccompProfile - ## @param metrics.containerSecurityContext.capabilities.drop Set Redis® exporter containers' Security Context capabilities to drop - ## - containerSecurityContext: - enabled: true - runAsUser: 1001 - runAsGroup: 0 - runAsNonRoot: true - allowPrivilegeEscalation: false - seccompProfile: - type: RuntimeDefault - capabilities: - drop: - - ALL - ## @param metrics.extraVolumes Optionally specify extra list of additional volumes for the Redis® metrics sidecar - ## - extraVolumes: [] - ## @param metrics.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® metrics sidecar - ## - extraVolumeMounts: [] - ## Redis® exporter resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param metrics.resources.limits The resources limits for the Redis® exporter container - ## @param metrics.resources.requests The requested resources for the Redis® exporter container - ## - resources: - limits: {} - requests: {} - ## @param metrics.podLabels Extra labels for Redis® exporter pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param metrics.podAnnotations [object] Annotations for Redis® exporter pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9121" - ## Redis® exporter service parameters - ## - service: - ## @param metrics.service.type Redis® exporter service type - ## - type: ClusterIP - ## @param metrics.service.port Redis® exporter service port - ## - port: 9121 - ## @param metrics.service.externalTrafficPolicy Redis® exporter service external traffic policy - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param metrics.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) - ## - extraPorts: [] - ## @param metrics.service.loadBalancerIP Redis® exporter service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - loadBalancerIP: "" - ## @param metrics.service.loadBalancerSourceRanges Redis® exporter service Load Balancer sources - ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g. - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param metrics.service.annotations Additional custom annotations for Redis® exporter service - ## - annotations: {} - ## @param metrics.service.clusterIP Redis® exporter service Cluster IP - ## - clusterIP: "" - ## Prometheus Service Monitor - ## ref: https://github.com/coreos/prometheus-operator - ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - serviceMonitor: - ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator - ## - enabled: false - ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created - ## - namespace: "" - ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped - ## - interval: 30s - ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended - ## - scrapeTimeout: "" - ## @param metrics.serviceMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. - ## - relabellings: [] - ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. - ## - metricRelabelings: [] - ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint - ## - honorLabels: false - ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus - ## - additionalLabels: {} - ## @param metrics.serviceMonitor.podTargetLabels Labels from the Kubernetes pod to be transferred to the created metrics - ## - podTargetLabels: [] - ## @param metrics.serviceMonitor.sampleLimit Limit of how many samples should be scraped from every Pod - ## - sampleLimit: false - ## @param metrics.serviceMonitor.targetLimit Limit of how many targets should be scraped - ## - targetLimit: false - - ## Custom PrometheusRule to be defined - ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions - ## - prometheusRule: - ## @param metrics.prometheusRule.enabled Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator - ## - enabled: false - ## @param metrics.prometheusRule.namespace The namespace in which the prometheusRule will be created - ## - namespace: "" - ## @param metrics.prometheusRule.additionalLabels Additional labels for the prometheusRule - ## - additionalLabels: {} - ## @param metrics.prometheusRule.rules Custom Prometheus rules - ## e.g: - ## rules: - ## - alert: RedisDown - ## expr: redis_up{service="{{ template "common.names.fullname" . }}-metrics"} == 0 - ## for: 2m - ## labels: - ## severity: error - ## annotations: - ## summary: Redis® instance {{ "{{ $labels.instance }}" }} down - ## description: Redis® instance {{ "{{ $labels.instance }}" }} is down - ## - alert: RedisMemoryHigh - ## expr: > - ## redis_memory_used_bytes{service="{{ template "common.names.fullname" . }}-metrics"} * 100 - ## / - ## redis_memory_max_bytes{service="{{ template "common.names.fullname" . }}-metrics"} - ## > 90 - ## for: 2m - ## labels: - ## severity: error - ## annotations: - ## summary: Redis® instance {{ "{{ $labels.instance }}" }} is using too much memory - ## description: | - ## Redis® instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. - ## - alert: RedisKeyEviction - ## expr: | - ## increase(redis_evicted_keys_total{service="{{ template "common.names.fullname" . }}-metrics"}[5m]) > 0 - ## for: 1s - ## labels: - ## severity: error - ## annotations: - ## summary: Redis® instance {{ "{{ $labels.instance }}" }} has evicted keys - ## description: | - ## Redis® instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. - ## - rules: [] - - ## @section Init Container Parameters - ## - - ## 'volumePermissions' init container parameters - ## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values - ## based on the *podSecurityContext/*containerSecurityContext parameters - ## - volumePermissions: - ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` - ## - enabled: false - ## OS Shell + Utility image - ## ref: https://hub.docker.com/r/bitnami/os-shell/tags/ - ## @param volumePermissions.image.registry OS Shell + Utility image registry - ## @param volumePermissions.image.repository OS Shell + Utility image repository - ## @param volumePermissions.image.tag OS Shell + Utility image tag (immutable tags are recommended) - ## @param volumePermissions.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param volumePermissions.image.pullPolicy OS Shell + Utility image pull policy - ## @param volumePermissions.image.pullSecrets OS Shell + Utility image pull secrets - ## - image: - registry: docker.io - repository: bitnami/os-shell - tag: 11-debian-11-r90 - digest: "" - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Init container's resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param volumePermissions.resources.limits The resources limits for the init container - ## @param volumePermissions.resources.requests The requested resources for the init container - ## - resources: - limits: {} - requests: {} - ## Init container Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param volumePermissions.containerSecurityContext.runAsUser Set init container's Security Context runAsUser - ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the - ## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` - ## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed) - ## - containerSecurityContext: - runAsUser: 0 - - ## init-sysctl container parameters - ## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) - ## - sysctl: - ## @param sysctl.enabled Enable init container to modify Kernel settings - ## - enabled: false - ## OS Shell + Utility image - ## ref: https://hub.docker.com/r/bitnami/os-shell/tags/ - ## @param sysctl.image.registry OS Shell + Utility image registry - ## @param sysctl.image.repository OS Shell + Utility image repository - ## @param sysctl.image.tag OS Shell + Utility image tag (immutable tags are recommended) - ## @param sysctl.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param sysctl.image.pullPolicy OS Shell + Utility image pull policy - ## @param sysctl.image.pullSecrets OS Shell + Utility image pull secrets - ## - image: - registry: docker.io - repository: bitnami/os-shell - tag: 11-debian-11-r90 - digest: "" - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## @param sysctl.command Override default init-sysctl container command (useful when using custom images) - ## - command: [] - ## @param sysctl.mountHostSys Mount the host `/sys` folder to `/host-sys` - ## - mountHostSys: false - ## Init container's resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param sysctl.resources.limits The resources limits for the init container - ## @param sysctl.resources.requests The requested resources for the init container - ## - resources: - limits: {} - requests: {} - - ## @section useExternalDNS Parameters - ## - ## @param useExternalDNS.enabled Enable various syntax that would enable external-dns to work. Note this requires a working installation of `external-dns` to be usable. - ## @param useExternalDNS.additionalAnnotations Extra annotations to be utilized when `external-dns` is enabled. - ## @param useExternalDNS.annotationKey The annotation key utilized when `external-dns` is enabled. Setting this to `false` will disable annotations. - ## @param useExternalDNS.suffix The DNS suffix utilized when `external-dns` is enabled. Note that we prepend the suffix with the full name of the release. - ## - useExternalDNS: - enabled: false - suffix: "" - annotationKey: external-dns.alpha.kubernetes.io/ - additionalAnnotations: {} diff --git a/redis/redis-service-account.yaml b/redis/redis-service-account.yaml deleted file mode 100644 index 9c7dca7..0000000 --- a/redis/redis-service-account.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: redis - namespace: redis-system diff --git a/redis/sealed-secret.yaml b/redis/sealed-secret.yaml deleted file mode 100644 index 3a0e364..0000000 --- a/redis/sealed-secret.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: bitnami.com/v1alpha1 -kind: SealedSecret -metadata: - creationTimestamp: null - name: redis-key - namespace: redis-system -spec: - encryptedData: - password: AgAQ9PHv4fJez1wAJtcvWSOMFEMOOHULZhX1wFzoO9JTm4WDeK9GaWbT4tSM3fXsd+9GfhggnsFHeP4t5G/4BlvQ8lNs0bXfUZiSomUL69zhH2YEg9EhJVm9eJWvvJ75m1HnfIL2yFMm9jsxgzajg+fn5a6h4od0gjPAah9+uiVYi4xdIAv8SJK+CEXKKLhuwzV+MkQ0XdiISdanHjrPvYKA5FGRLqjmJePfSTtea5xGhx4DkHzkQ2KwzKIM/v4JOhA3JnwXebZh+GrUv6cg/fh9xnBUxeFvoimAt0gzOD0ajUIWTqTEHCqmPfumNo4w2paG+s+0vAL2gercxeyamOhkRZuWfOLwnQ/eoAm+gQGItn7UhL0yjaFDpkdICTrIXOEebScv27aHKe+4Cdw1BcAS8lIrE9JelVVgOqxBCaIvIBBPVyaFFVXF/YmMK6VAYTO1c3MDPpJEeFyNGoMo82lzL3IwRRFrPYoDrKbfsrWfZUQRYKOxVWihgWYFYx/asceJxegPAdCLq7avQ7tCoIodm9qgZ4F7F0x+N38oFLLCCe3tAhorInC/sWjkrsLpDBtAkWEsJnN865a+yRpN2YHFz+NKf2rugGDre0jA7GgisPwukmY4sC6r8MSjxumkaBo22hMoyRXBpsEBzLTsWMDjI6155J60iamBIUUORYpEVOHVFmY4iDSY9mBbp/ZzIvOa+mJCcvI5U5apJBALOUrGY3hSXHm+am7FWZtM6U0rmw== - template: - metadata: - creationTimestamp: null - name: redis-key - namespace: redis-system