Reduce to base repo
continuous-integration/drone/push Build is passing Details

This commit is contained in:
Tyler Perkins 2024-04-29 20:36:39 -04:00
parent 574121240b
commit bfe3c57642
Signed by: tyler
GPG Key ID: 03B27509E17EFDC8
115 changed files with 0 additions and 10231 deletions

View File

@ -1,14 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: api-service
namespace: api-ns
spec:
type: LoadBalancer
ports:
- name: general-api
port: 8080
targetPort: 80
selector:
app: api-apps

View File

@ -1,15 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: general-api-config-map
namespace: api-ns
data:
config.yaml: |
images:
access_key: ${ACCESS_KEY}
secret_key: ${SECRET_KEY}
endpoint: s3.clortox.com
bucket: backgrounds
secure: True
weather:
period: 15

View File

@ -1,46 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: general-api
namespace: api-ns
labels:
app: general-api
spec:
replicas: 1
selector:
matchLabels:
app: api-apps
template:
metadata:
labels:
app: api-apps
spec:
containers:
- name: general-api
image: git.clortox.com/tyler/general-api:1.0.15
imagePullPolicy: Always
env:
- name: ACCESS_KEY
valueFrom:
secretKeyRef:
name: general-api-secret
key: access-key
- name: SECRET_KEY
valueFrom:
secretKeyRef:
name: general-api-secret
key: secret-key
- name: CONFIG
value: "config.yaml"
ports:
- containerPort: 80
volumesMounts:
- name: config-volume
mountPath: /media/config.yaml
volumes:
- name: config-volume
configMap:
name: general-api-config-map
items:
- key: config.yaml
path: config.yaml

View File

@ -1,16 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: general-api-secret
namespace: api-ns
spec:
encryptedData:
access-key: AgAPhGl0HfCpVQEmmLhArZgJgfXRUBEvokXse1mzEbsrIiSech0beSlKgTwrDwIFt//HISitEn3qtJkU1UXtVjhv8iTco9JgFLg/xmv+24eiRwG5smrtZqPN1iE7SL5kGfxM4RDvpSMxwdNnfsF53WGjsXIYaHtIVn316e2TQf85vSyLpq+BUgbgP5GqG931ur5ErZf48LHIdZ91wvxd1Lcog+C/jVKRmq0KvDKQZPJZD5qF1uyUiMz/ttY2NDLieGzkhQRpjVJmZaqHTdb6nBGcsMhdu8rI1pCkP8PHe8LsnwooRqPdZwg63Vdna7PzztrEesy5koyQwm4IOduB8xU48wY7KGiQ7ZLk4AHoamIQ1mYwK7D/Z5yvpVHItOUPsCzqo+JYbNhTMlXWVrCTWJU5D+CIvIgRUN5d4W4mM70wb75Glo5JGZr4Yw31zbxMSqCOzGeqILRwnKXP78RtM0URFU5sVfkvqbjm/1qP70YgtlowC/gBNEgHykYJV8CjeBb8tf1vjUDLOr+NgOj0FV/SrnFwan3YyMdwMirrZSoB3irta+7AEe1211ur+13JjZWhdbuJfCkP2l3uJz7zxWdGEapf2psCmC+syzyVrkEA5p1B0/Mu8H+d3dpranRmCWNOTySa1CEIPFuJ+ipxMsbQmPi7R60nQ6ZIUAOnJh/SAae1n1ixuOdc7KfYaSR+abYXMgrTBkC9riG6ew==
secret-key: AgCcGmblfLgGakV8y2p1S3PHReMe2EuqvnfM8XHs6mK8fRGlFIzUw9Rsi9R/MeEfSx5eBTHzN+Euy/ykWJAKhbWw0cEcx+YcL8RahnGAJIqFsSw+atYmv4MJ9JjsCXX+3H4svjtV5AiE019YxwwAX27QzMcEyWE3Rg7/WPNnqyvferfdI0j5NttDiFKyKQvZSrWg2knyopbfNywMijEICBGWgZMj/nRbNm2vXdgYWhFvxkGYVCuRjnbz+iU+T0PMlqWZmj1Yxs72QOoKBYa4pJxSfDjg1erTEiPQgFJPULiSiEargIrxcCdxRdbn9Us/qO26lgvTSCdtiHTzOALmeD9no8Cr6wqZDQD616OyBaFvTTcwCTa+YxaVB5mpoLHDUPOzjVBCpB7ojRH5nXXa7x3bIt9fz9dA9KNPywySsRcQ0hR/UoeMmtJfKx0I86VvxqhhhlEHAKAnUjZyCfaRvftCOkc4JfB9XZtDJr0/I47ToWNofEU1WDJlTkvm9dOJFvRsNGzsLAHhT3I/8cP+sCAY594lmI6J+MMfOjPV5Ig0xQic2my9clrKPohUbKue0R8cSUIb42OnskLOE0bx91JYXBdDeZ6lxawrWznWwPG3j7BsIslqDYSUeKFun91c4xSp2GvdliTS3Md/O/f+yqcBSKGnRkGXZaOpPEB+9MyP3PYVd2pSFt/7fXi9gFj2CxnbClVCsDNCf+hqVH52a2UB9Q758FLO+N+iSpzD61hQZg==
template:
metadata:
creationTimestamp: null
name: general-api-secret
namespace: api-ns
---

View File

@ -1,285 +0,0 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: authentik
namespace: authentik-ns
annotations:
force-recreate: true
spec:
chart:
spec:
chart: authentik
sourceRef:
kind: HelmRepository
name: authentik
namespace: flux-system
interval: 15m0s
timeout: 5m
releaseName: authentik
values:
# -- Server replicas
replicas: 1
# -- Custom priority class for different treatment by the scheduler
priorityClassName:
# -- server securityContext
securityContext: {}
# -- server containerSecurityContext
containerSecurityContext: {}
worker:
# -- worker replicas
replicas: 1
# -- Custom priority class for different treatment by the scheduler
priorityClassName:
# -- worker securityContext
securityContext: {}
# -- server containerSecurityContext
containerSecurityContext: {}
env:
- name: AUTHENTIK_REDIS__DB
value: "1"
image:
repository: ghcr.io/goauthentik/server
tag: 2024.2.2
#tag: latest
# -- optional container image digest
digest: ""
pullPolicy: IfNotPresent
pullSecrets: []
# -- Specify any initContainers here as dictionary items. Each initContainer should have its own key. The dictionary item key will determine the order. Helm templates can be used
initContainers: {}
# -- Specify any additional containers here as dictionary items. Each additional container should have its own key. Helm templates can be used.
additionalContainers: {}
ingress:
enabled: false
ingressClassName: ""
annotations: {}
labels: {}
hosts:
- host: authentik.domain.tld
paths:
- path: "/"
pathType: Prefix
tls: []
# -- Annotations to add to the server and worker deployments
annotations: {}
# -- Annotations to add to the server and worker pods
podAnnotations: {}
authentik:
# -- Log level for server and worker
log_level: info
# -- Secret key used for cookie singing and unique user IDs,
# don't change this after the first install
#secret_key: ""
# -- Path for the geoip database. If the file doesn't exist, GeoIP features are disabled.
geoip: /geoip/GeoLite2-City.mmdb
email:
# -- SMTP Server emails are sent from, fully optional
host: ""
port: 587
# -- SMTP credentials, when left empty, not authentication will be done
username: ""
# -- SMTP credentials, when left empty, not authentication will be done
password: ""
# -- Enable either use_tls or use_ssl, they can't be enabled at the same time.
use_tls: false
# -- Enable either use_tls or use_ssl, they can't be enabled at the same time.
use_ssl: false
# -- Connection timeout
timeout: 30
# -- Email from address, can either be in the format "foo@bar.baz" or "authentik <foo@bar.baz>"
from: ""
outposts:
# -- Template used for managed outposts. The following placeholders can be used
# %(type)s - the type of the outpost
# %(version)s - version of your authentik install
# %(build_hash)s - only for beta versions, the build hash of the image
container_image_base: ghcr.io/goauthentik/%(type)s:%(version)s
error_reporting:
# -- This sends anonymous usage-data, stack traces on errors and
# performance data to sentry.beryju.org, and is fully opt-in
enabled: false
# -- This is a string that is sent to sentry with your error reports
environment: "k8s"
# -- Send PII (Personally identifiable information) data to sentry
send_pii: false
postgresql:
# -- set the postgresql hostname to talk to
# if unset and .Values.postgresql.enabled == true, will generate the default
# @default -- `{{ .Release.Name }}-postgresql`
host: "postgresql.postgresql-system.svc.cluster.local"
# -- postgresql Database name
# @default -- `authentik`
name: "authentik"
# -- postgresql Username
# @default -- `authentik`
user: "authentik"
#password: ""
port: 5432
redis:
# -- set the redis hostname to talk to
# @default -- `{{ .Release.Name }}-redis-master`
host: "redis-master.redis-system.svc.cluster.local"
#password: ""
# -- List of config maps to mount blueprints from. Only keys in the
# configmap ending with ".yaml" wil be discovered and applied
blueprints: []
# -- see configuration options at https://goauthentik.io/docs/installation/configuration/
env:
- name: AUTHENTIK_REDIS__DB
value: "1"
# AUTHENTIK_VAR_NAME: VALUE
envFrom: []
# - configMapRef:
# name: special-config
envValueFrom:
AUTHENTIK_SECRET_KEY:
secretKeyRef:
name: authentik-secret
key: secret-key
AUTHENTIK_POSTGRESQL__PASSWORD:
secretKeyRef:
name: authentik-secret
key: postgres-password
AUTHENTIK_REDIS__PASSWORD:
secretKeyRef:
name: authentik-secret
key: redis-password
service:
# -- Service that is created to access authentik
enabled: true
type: LoadBalancer
port: 80
name: http
protocol: TCP
labels: {}
annotations: {}
volumes: []
volumeMounts: []
# -- affinity applied to the deployments
affinity: {}
# -- tolerations applied to the deployments
tolerations: []
# -- nodeSelector applied to the deployments
nodeSelector: {}
resources:
server: {}
worker: {}
autoscaling:
server:
# -- Create a HPA for the server deployment
enabled: false
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 50
worker:
# -- Create a HPA for the worker deployment
enabled: false
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
livenessProbe:
# -- enables or disables the livenessProbe
enabled: true
httpGet:
# -- liveness probe url path
path: /-/health/live/
port: http
initialDelaySeconds: 5
periodSeconds: 10
startupProbe:
# -- enables or disables the livenessProbe
enabled: true
httpGet:
# -- liveness probe url path
path: /-/health/live/
port: http
failureThreshold: 60
periodSeconds: 5
readinessProbe:
enabled: true
httpGet:
path: /-/health/ready/
port: http
periodSeconds: 10
serviceAccount:
# -- Service account is needed for managed outposts
create: true
annotations: {}
serviceAccountSecret:
# -- As we use the authentik-remote-cluster chart as subchart, and that chart
# creates a service account secret by default which we don't need here, disable its creation
enabled: false
fullnameOverride: authentik
nameOverride: authentik
prometheus:
serviceMonitor:
create: false
interval: 30s
scrapeTimeout: 3s
# -- labels additional on ServiceMonitor
labels: {}
rules:
create: false
# -- labels additional on PrometheusRule
labels: {}
geoip:
# -- optional GeoIP, deploys a cronjob to download the maxmind database
enabled: false
# -- sign up under https://www.maxmind.com/en/geolite2/signup
accountId: ""
# -- sign up under https://www.maxmind.com/en/geolite2/signup
licenseKey: ""
editionIds: "GeoLite2-City"
image: maxmindinc/geoipupdate:v4.8
# -- number of hours between update runs
updateInterval: 8
# -- server containerSecurityContext
containerSecurityContext: {}
postgresql:
# -- enable the bundled bitnami postgresql chart
enabled: false
postgresqlMaxConnections: 500
postgresqlUsername: "authentik"
# postgresqlPassword: ""
postgresqlDatabase: "authentik"
# persistence:
# enabled: true
# storageClass:
# accessModes:
# - ReadWriteOnce
image:
tag: 15.4.0-debian-11-r0
redis:
# -- enable the bundled bitnami redis chart
enabled: false
architecture: standalone
auth:
enabled: false
image:
tag: 6.2.10-debian-11-r13

View File

@ -1,17 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: authentik-secret
namespace: authentik-ns
spec:
encryptedData:
postgres-password: AgBigFPSosBY6PGUxR4zdIntM+oGMyaDY9mHZBwL5xbjEEvmzNKCuCfQFuiE07WqV3fjWEp6D3o23fIMomPC3SNLWySfti8o5pyBrPGDZLR1dVYWLmkyMCj0pzbDmPgAArBuzGmQG6P+Kn4lqlkSU6F50ev/W8yHUPkrlp+iJsGM9wYNlboaZmDMowIK5ny8sQ5vIb+QakS3ybRa3DfX/T3yNvuhOeCt+367/3oV0yNmCEBK4qKpTsAkWctxXooX1wcAkOwMesqfE42I5Mt+s/UnbU5fXJdzM0YI7WZreEy5oaG1shDxp1PhXoc12yCt5KobTj0xlttUVFVb8IaOY7r4oSI74vrL8KGuZdny0oeWvVbiwA/SvOt7S05RdryYWf66jN71/Aku5LnKJwRoa7veGeX9S5pUe1wZyVSDN6trkJcG5ZJRmEerr4MOZ4YX9cB2FktEmd+estjIlm/UhEIRN8Qv4qd54t6j2Ajhk6EJ3Ky6mI9xiun+0ti9880rIHQiW5MpiZVB+nQlAosTVQu4wRjdnP6Z0ndP83e2rPkHJ/jF2iawXOBoS0Eh11UaXvRQyNQOt3ReIba7E0aSbynpULViOg/lVNLA2qgyp+37Veb44Mi2k7sHg7I8e6MOMVjBhfmv3HvMpdHHBIHSq2vaDlF/0i5o5OT0F1O+06OngfQAaQQc1SdpLeoPKget5fbNF9zgmfKxPodjayq+h6n3vm5QOc4TagtcG1PV38LsiQ==
redis-password: AgCWDT6n+wmF9+Qk4+bu1byc7TFmRwPGqrhBIdVvZrTMRh6jt43E8urutTAlqKO6JPbRw+gw7zA40uOOHYzU3UaIXdAueQtCRMhHAzKWMwvTuzKGqLmmKcxVF452wilyhMjLBgRuBvX43VK4kynIthM3LZmw9a/HAlbQqn624N3wvdOYXyrWG0YKisXJunEFPgQyygWozdFD/N+b2loBq5YvH3mLuOuJDcuAC+Ti7URRbHigZXOhpZK6ilycAcJxJlOE9FVDRXMYSophjDWtD/Wb7WNLU7iakdXjNMFNVlE89mzrLxOskI918l6hrMG+Tk9FrhwKZx9ZuVwoUOdLBhF7I0jjYWKnJ1gEIMKXNBcrQWcnqX392VTu4RG0YNIIzasYkJ4/i3bjDnIH9zpSnRn6VSL2ZRhikJBOGJRXlXamd93XcCC+wg7gLu9XGi6g7ddC9UksxFzfIoMvj6aZ5EzERwJ7Td/qH1mWcfm5iesXKP1Y7PUSElIXIVmx9ifLgzIfbreb5VJDj2v+gTD44zxy+zHhSgdyefR2FcXT2eZv9CFO/VS8WB/F8+edJai0wHmJv0ooYVNS0PtIkyD8DEUC3Egt97SmWlQlEn1rfX1hj7jpN7HTpW19l9kV3r9n84ZzVJf62qybHElKOQWoqdz2Xxv6gPannZ8XQbk3nR0dG99jrUhvTpqjLFaWV+27PE0bRuV6w1G5Zm7X6Jdr/y3p8UvH2UonA2/8xjPANci/tA==
secret-key: AgBGLb8gPEET4udFwIMlgqWz5nIvu0/Tq6AhkCvxYTF4z2Gl4I7uOA4QtsnqDfOeQXJStpJ02ndc+q5l1uoP+hVgwhX1yWdeAtlQgubCpGraCQqofqVrwQwt9DoZqre+8rCp3llEugTP72Vekx9s9/8nDs+JqfBtfgLSdYqaJDO7fd3P4DDvA+DPhRTuT8j1YkX9mejxaWxd9lDss2OXWgZ/HDvGrm61FS3ByVqAo0uuayBcC8TtVrcjA6o2bfCFzz7g1uwzDC10bE7RNuJzpErulrOv/QzgxB/yTmQ4JlJmbgonAC3ZUBBc5hAl7m7hKuq6CFyHD1kZCWJ/cZkg9AagI0u9f96+y5kYh+KZK8/WuPHF3LhM9dam9KYKVJRqWE4nq5/QYcpbkQtKBqKlGPZZCyEmH/ylL6r3djMHNjKTpdCwlMqNFetDPLDMNFB1i2Nqg7PAzqOE3Dq5AHShSBG//losKiTfoNF3uYwbrA3cQhxCOAM/1EiLEvz1KerHaJrlcV5Y32ZaOj6P4aQeBAzEpmS8sRr0yooYmA1iJce+wYMsvI1VlNKP4HU+wLm5xKNca1SRvZaOmz1RUp3l+Q+jckhHmRFubLOR6RpmdiGtTAyvjfMRkRtzDfnyu+xGvCqlontPIPWh7yl8jsqrjhr5/tXVtSs+yZhdfn1M7oiDbv7xa4o2jAxt+MpP1XtMaoH/Rnt3x2JprDrSU+1YICE9Ibzo6xjJYFs5I/fM7auUvF3cmX40zafRHw5DYehWCBU3mA==
template:
metadata:
creationTimestamp: null
name: authentik-secret
namespace: authentik-ns
---

View File

@ -1,16 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: firefly-iii-ai
namespace: firefly-ns
spec:
encryptedData:
FIREFLY_PERSONAL_TOKEN: AgDimybAXS9nGY5qbE+YXcWklHXrl6aOTl3dC2pZGEQ5abwbE7H7vsccOMJWPhfbT/x0/gK63qEQeucu3CB8va+QKRF82DE1h9TNe++hjneKl51htDAG8wnzpyLZfQIRMrmYejjC2t1ID3ti5JXVwfzrMwge9bsx5FLSBZQJbTy74gNRBU/q+zz2bvcRXKmQS5kocUBsmJXGBKYYreVd8qAA1eEb13YQYllZ+iJXFmJqYoK/pkYuQO1ClzZHLMA1AIWVughhvQeOmvfNXxm7hMopTHMMRdeXFuGnv1J45ktE0YYInnlGrJoJY3hjRNlWy+fQgqzA0IfcIVF58w2A29pL5oXr+J5v5grAr6gm60Xm+P/c8uRMQeQ9Yv6W3L+mG9ECr9DhNCvAdadGAyK1oVYUoJ1AISMLTbGkQ98uNTG9ExG9zcoYJ4teXgTmqDN+HP5wRhDdz00ELIedxOhtXK0mhT1lEuZN3E6nyid/knRX5aGY0vts3V5odJvfur4xRQKlwegTR4lAhPdn7rW+LAeWrl+3pfyQnsR8gkc8/8kgNLcqlefX/2K/tt9vsGbb6H9ciBdrrzpLja/Ml7v/hAJSPqFEZSz1DLaI3TIEq8xUmjaJu9RlqpTh8gi9oCYDIVVdjbphpWeL+O8w5hHuUgaX0EcSDQPI9Nsmi4Z643CbqhWa6lvL07FvwQ5AyjA4xMOhEFi/2pB5lUu46NFa3ZzMw3UYynLeKLVF9Tgs8G/UcWWSJvkLu2hEc/EVavjtj6iTw93/qxPS6klDRjFX46FUr1DMJBTDcYhsEH/PEcIwJGeafgCOdVoVfAc+eJknJ2dDSBxNLkxTMgM8SenUr7kdmm844qWR9H6DzfNOtbn+uaOnpeHvhU4qxu+/UzqAT9/HCuds68XAGvhRltHrphPr6LgVWjPi+BrdIghpiAY5LtNWhgeQLyrQ2Due3Dt4tUL8wC4/xi9hYltx/AynCzNIwzs76/eQdl1VYDjToXDWye0NENqOCL8GK59V+1rXflRRX2q4iR72zttBgjpbWCNZ6wNnWWbQdH3dHxBvJ7Db736fRQtAoEC4rdKTbVw8A1W8X1Zn6Mt9SIYY+fssNF5aNb0YSNzNYrthIfYmtG/TWKkOR9Z5lvzdeTfzlS34y9xYEF1IAmjRC2bUywp7RNHRzMrgENYp3kyq5F1FruHkYwfX9pRQX1ZqyjcC10Dh/CB4pj2ivygqaBEwdqQIETgVzRQKYpDcRPJi9lyd2oaIcI4k+v47ZJX32ZJLDJBVUf3w6pwFVbiuuf5cdiWo+CvzsBmvGNtwa2Nn/K0b29piPlNBSvlFDSixSW2egmmLiSujYU5RjZy07bbPf9mFHdRNjiHB4GRUHafeL6BNJ7+4mdMIzVMPgjmQ2HfwVwaup7JpTY2D5vnd5cJwKreyKfMigg9w7l9fFx7WrDp1u2qiZtRQ07Xd6LsEaLkQDrKGgyuqu33BgOsOX82F5RPpkCYezeo0D4m3mLYMLOhp1+U3Kpt+OlvmuXPrWoJeVzwtGRoiv2FBk3Kz471wl0qwxYZxXom1I0XojVpJf8wtTtY9zI0gT7U1RZP2YIKW87dHAOWmM96miOLbsunCejRIFiRFEpJkwlnscsAn7woknl4hYj2oW7k2imO7mSwcyZiAFx+CitNqvXxoX9foMdJ3G7uXFKgqz0w1FEm0hSqDzut9DyHPunovLlMk+piBHl2eFwEtG6ODvx8SQBZ4DEJF30hmGL6NLmJiTNANDVhBgnmuiIvZRonj9gzHuCic9LUnf6DMIcMp3KLTBcpEPE/PLihmscswTuRpzF1TliCuDjhjVs2HyEx3WisY0w76w/YX/s4ipCInHmXV7HeGL0+RNAjlVGgAhRrNbUuAn0BwTDm/PMQvV/XJmwjd0964GkpK1lwBcms979WJzN+Gg6sTNa5Y9xRzm6p5QiXj8M7P8QzKHrZxSu+lMqYGtipONcBKb6pWk50Q0YGjSDkJ9lUymj3iEg5y5A==
OPENAI: AgAK3CS9m5SHyxnyk8tP7MlKu2XCLd+Qt8xBuYiFO/PtfxcE+j8MV1haSFUdEwcoySpkBYlctifqJc8uaBdmW1aewYhv6jZIXynScnEj4VZUe0sM1TGasuKduDWNgZ8jDRS+2fQmfzZh67zvtBVHHsiagAL6i+GHsjD3i9Fj2EoNbKueGgYgc7sMzAAoTcaEaP9SiTEzhujYb2HhRY7IoJkvmU2yyjOeFc7n3l6txkA4UsJ+c9wUblCTX4sXnU/pqTA2UAmWCfWPqYlXlKGsqZEPX7q+HcrR6H8TmiCBb/RIa5cZM87WdLTUxT5U85gcoBjj+Q7mAQlLAsfPTyncJgZSNBsNKm5UCrz7GyWr4NCnZonQIzE0GjlhQnP5ERq1+VjWdxTjsH1/QUEjjaJgq4JiJQV1OvRAemhiGXF7m8grqopwYMjBtrHt7tuIqDVNZhx8lWhZ/p21f8zvluR8WREEdmff/wsFNJkIbYJGwpwy8VpF4hiuqkcXSPGcz6OdBfY5sktUYwQNkRqEQV4wDLjU23hngL9P47dXY5Mx81AMvyD49V6DnRH8az1zaxcexvyC/m+4UEqm4Aw8REIf5sogH4Kpu2pKXW/ZrDMI27zyjammG9EF3AGo9Wi2ND8JJH0j4GJXu2auGDrENdJZaM6qDVauYrDFMVw9jj+ZlMLqUd797qMEYDP64ai4VjXXAj2Q4qS4F6wGu29jrHQK8tWeQx5hxl0pwlfUd5i7NyUjGczGaniXelpmQ3Lcpzp/HIXQd+4=
template:
metadata:
creationTimestamp: null
name: firefly-iii-ai
namespace: firefly-ns
---

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: catagorize-ai-service
namespace: firefly-ns
spec:
selector:
app: firefly-iii
ports:
- protocol: TCP
port: 3000
targetPort: 3000
type: ClusterIP

View File

@ -1,35 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: catagorize-ai
namespace: firefly-ns
spec:
replicas: 1
selector:
matchLabels:
app: firefly-iii
template:
metadata:
labels:
app: firefly-iii
spec:
containers:
- name: catagorize-ai
image: ghcr.io/bahuma20/firefly-iii-ai-categorize
ports:
- containerPort: 3000
env:
- name: FIREFLY_URL
value: https://money.clortox.com
- name: ENABLE_UI
value: "true"
- name: FIREFLY_PERSONAL_TOKEN
valueFrom:
secretKeyRef:
name: firefly-iii-ai
key: FIREFLY_PERSONAL_TOKEN
- name: OPENAI_API_KEY
valueFrom:
secretKeyRef:
name: firefly-iii-ai
key: OPENAI

View File

@ -1,173 +0,0 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: firefly
namespace: firefly-ns
spec:
chart:
spec:
chart: firefly-iii
sourceRef:
kind: HelmRepository
name: firefly-iii
namespace: flux-system
interval: 15m0s
timeout: 5m
releaseName: firefly-iii
values:
replicaCount: 1
image:
repository: "fireflyiii/core"
pullPolicy: IfNotPresent
tag: version-6.1.6
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
persistence:
# -- If you set this to false, uploaded attachments are not stored persistently and will be lost with every restart of the pod
enabled: true
storageClassName: "longhorn"
accessModes: ReadWriteOnce
storage: 20Gi
# -- If you want to use an existing claim, set it here
existingClaim: ""
# -- Environment variables for Firefly III. See docs at: https://github.com/firefly-iii/firefly-iii/blob/main/.env.example
config:
# -- Set this to the name of a secret to load environment variables from. If defined, values in the secret will override values in config.env
existingSecret: "firefly-iii-secret"
# -- Set environment variables from configMaps or Secrets
envValueFrom: {}
# -- Directly defined environment variables. Use this for non-secret configuration values.
env:
DB_HOST: postgresql.postgresql-system.svc.cluster.local
DB_CONNECTION: pgsql
DB_PORT: "5432"
DB_DATABASE: firefly
DB_USERNAME: firefly
DEFAULT_LANGUAGE: "en_US"
DEFAULT_LOCALE: "equal"
TZ: "America/New_York"
TRUSTED_PROXIES: "**"
APP_URL: "https://money.clortox.com"
AUTHENTICATION_GUARD: "remote_user_guard"
AUTHENTICATION_GUARD_HEADER: "X-authentik-email"
# -- Create a new Secret from values file to store sensitive environment variables. Make sure to keep your secrets encrypted in the repository! For example, you can use the 'helm secrets' plugin (https://github.com/jkroepke/helm-secrets) to encrypt and manage secrets. If the 'config.existingSecret' value is set, a new Secret will not be created.
secrets:
env:
APP_PASSWORD: "CHANGE_ENCRYPT_ME"
DB_PASSWORD: "CHANGE_ENCRYPT_ME"
# -- A cronjob for [recurring Firefly III tasks](https://docs.firefly-iii.org/firefly-iii/advanced-installation/cron/).
cronjob:
# -- Set to true to enable the CronJob. Note that you need to specify either cronjob.auth.existingSecret or cronjob.auth.token for it to actually be deployed.
enabled: false
# -- Authorization for the CronJob. See https://docs.firefly-iii.org/firefly-iii/advanced-installation/cron/#request-a-page-over-the-web
auth:
# -- The name of a secret containing a data.token field with the cronjob token
existingSecret: ""
# -- The name of the key in the existing secret to get the cronjob token from
secretKey: "token"
# -- The token in plain text
token: ""
# -- Annotations for the CronJob
annotations: {}
# -- When to run the CronJob. Defaults to 03:00 as this is when Firefly III executes regular tasks.
schedule: "0 3 * * *"
# -- How many pods to keep around for successful jobs
successfulJobsHistoryLimit: 3
# -- How many pods to keep around for failed jobs
failedJobsHistoryLimit: 1
# -- How to treat failed jobs
restartPolicy: OnFailure
image:
repository: curlimages/curl
pullPolicy: IfNotPresent
tag: 7.81.0
imagePullSecrets: []
podAnnotations: {}
securityContext: {}
podSecurityContext: {}
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: LoadBalancer
port: 80
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -1,18 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: firefly-iii-secret
namespace: firefly-ns
spec:
encryptedData:
APP_KEY: AgCSE+/lOiQJV3HE/UiAzeIXc2hgOMY95RSUO8Q22sK+R6WdpLcc0/gkxhOYtAdFGp1r9TIQQcWcbR2cEZ84GsnhoNJxh2vgaME+g5m0EgzQouczW/GTR56qfu/P+zp/IlIjepJVeAhVAOAInLDn/XUJf6kXyfWG7kHLhB/CHI6P0VC1RcHXAjkArDmpn5wOwDzVSMOCWszd6BXjl/LacRPkC58Oj4GCIlEqXo1meBJ7Lc6IG+x7VSjNv19xKVFqULt/Aep2YowIf3TPlQDhkv39Rro434dzm9q/M88JndE6sOqw1MTO+QqPPSUKPDSTWwD72FV4rmVkeXiTKtvMLlAWywQIOFL7ZIVJ50DjYgWV/tx3xz81lnfgwtFa6cT1OwUOfLrAEAe4iF/3hzgY1dfTMB7eKbY+XGpGvrcqAImfcBfMwc2wqSWj7FA3V5qOwrfeObDE96nvOuDyUqgPgzyyG6JZwkM1R9pgTABbG3sEkbtyxLQfJftooKtQ3obDsP56aS9uzfZ0rsZpT6Ek7fNd9dqG0XEqDOjNgOxW8aCJBq+Uy9Pbvx9e0flBwXJM0FV963ql7b4i5vrG5IuBC/CC5t1qPwaQPd/fMARjF8hIjhcfF8lnwFzT5vYRHIaky68U2u7gUx91vkiM5X3W8G3N4TX9kZI8QKr0pHcMy3zdM4ou95qUrhr6s/BYrULKjtZ4jucVRoX1PXa+D/laa6qk0Di0iw9S+KMdni5XK+If5A==
APP_PASSWORD: AgBPGz6DZ4R5L/MnGFbEu91VtBnuT4XiV1h2LGCwg5XTtlPcaB4EgNTQqFihAqt1GbHjGnJeaNMLgTVRXUuVJgklZ7Si4MGeBOhcUaIkuxa/7/p9I0n1mHYKlic+c7oRUMPYEbB6R2xjLeFNALIuCr6sQyX8JcIYlYh+arWqlWgQ7MRVSZW3VaWSPiQGMgRUIequ4CAS6exjzQUwZwwP5yGqKJcV7tBoPKDeQ5lNL2BqcG6xtk1Uin7M/dEYa9hEXBDD6eZrc9IzEfuxIOJiPvJvteXvCFeX6MltA1rEbYorub1p1u+m9ROFBx7K74iR+jJt3Fw1/LP319x3lXS3G7tg0TCwxLwytwRjLfPHlYiHRosUk9n8K+sluHqReyKnDN87DEDvGgKYiToF6oXq67rLd/fQlFYLMy86sIrAIQYEAjrDNvkGVOR557GVMR8XaEvxz2nHBLKuNIXtY4Shr7vUABHf1ove2MgUPnFzNd6UTLnud/YSpkXunlYtyu9zaf1xhJq4a0AXWK9HJpyMoZz7CG5aJTT4+PGpeq3dwMNqcjmX8WmgYIpD8JwxCu09l8cmb5HwxhhV2M0Qdn0X8fo2HyR+hhXN/5P2qWmEHSOXkz7I6nefDAco93PsDQkg+1IAc4IyrmP4HJgAVwh8TD3/OFcpwlO8rG6CTC+hgI/hcnyEZCkDfRczRLUlQ3fG5HWKnm0yX/GP4A+2j4kh8FpMTKsP9U48U6a93Mmp3trVkg==
DB_PASSWORD: AgB1oty1IbWAVgYNDjaIS+ATAccxhUEoKA0zjwwbYZyNtU+Me2Z3vRPb0n4Sqm23VglNx/AGYqIf0t9ewctlib0FbU404mX8IYKMS1/+0VhoFymrLNxlTR0CTlcatOmZBbwvbqh5esEyZ9LVglr1TQWS7p5KNiJB+6b8H9tuTxraHaMBMZhDTdoAhIxyzcSvaKTmJSCPvR38q06ggNeeFIE72hh4v97diJ50h78/P4ScoG8CYbuinQpND3Jg07GoAvhdpZk1PgAZQwSeWBBECmov6rGKmJuCAx5YzReGXQOXpUYk+K3YR5mgeEIGjvABoIoHCmYoMP8T56IIX1uZLGQFpMqIbqnJ828i6qy9gvO7Qxng8zIO4WO4pNZNo+dQw49Ri087TQpT9fq17+wykDj2zvDpvasqh3bc3K0NbaJQo3F0hFzZhtw7ZdQFQ9TrKD0oG1fNscP4jvvvXIKJ9IDEghPUmd+w3C4stIwsICgpUTGTytHQ1lUzL6OebBiQjXWablwEGbtcFWhqAx07esuFpe2hx4+6HDNpEG5MH7T2/IUrwBS1vrlD4OgzT/TKT2bwjuy62ralrr7CmPcbcqax7pfkpnjK3kDna85xz4JIC4/nguqVqztjTkk7fSxDckAFlp/WLvjwvG7byU3gbQX1Y1X9O8bELvLGA+QuaZ3mMYJNEtyi3lx/RVuiO139PQOfVwrK4jsTjhxp5xxn0dk02haVWlDBg15Mb85D8mn9sA==
STATIC_CRON_TOKEN: AgBkzS4jR9phFQ/LLfwETTQM30sbLO63nNVkdUcmoZJQb85iuNtn9Ji7ROdIFyFCgWeBygOPGtrSXYlnFf3YIfdM+tXOJ9YtYRX78pqCr4mm84ZpNAir2VTMRLQDbrtqejv6LQNPkrRPpC+Xia/nUiqoGo2RFi66ypheegH7DNW1gHHxZibpiFvrwxudYj1q5rFNvw3NMwb6sJZ6FfJyKhMoKlQIJ1P+H2YoqAWU29hFiP1ZCZJ4j8+2cfaKiImlHsmGuTkuZuUi+0F8vCSUncSOtNMXJpD3XYFqexcefalls7OZJ/U4gD8LAS6kMZwBqRsCgKFKXTSpQwFW8L01xXrb6NQyPdnItO1IRg/65BJQeFdSMzgfAGec22+MBRNbJc9y2mw3iKUxPf42y+Gij/8Th9kLA9pmX8NYcBaIBCXbv3B/9O8FEDsf62XaREG1mwU3wEP1HZ6YeWEeRCR6+0MCGUWrVQK6pAS8uWQPDz1LkoKqjnMsa5OsHnmi211KUAqOV7vDOGu0gKF3t035ojfoGiHO8XgLLd2viHH0t5iaRWtWLV46UuA4uLgIwrQtw+0IQnLfb5xaIQet9zQacBIi7t2eelJvvgkdyAJ2lqexQMrHzicKjvfJQ1BbHtK1Lsisw77jvARF71IywC18fHmr0e+zK1obSva3+dPthu36xCCuoJiZ2E9GmvMkveNA0qMQYavct497T7w0iR71p3k80hOsVGIRnqUaTGifzNJNfg==
template:
metadata:
creationTimestamp: null
name: firefly-iii-secret
namespace: firefly-ns
---

View File

@ -1,925 +0,0 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: fission
namespace: fission-ns
spec:
chart:
spec:
chart: fission-all
sourceRef:
kind: HelmRepository
name: fission
namespace: flux-system
interval: 5m
values:
## Fission chart configuration
##
## serviceType to consider while creating Fission Controller service.
## For minikube/kind, set this to NodePort, elsewhere use LoadBalancer or ClusterIP.
##
serviceType: LoadBalancer
## routerServiceType to consider while creating Fission Router service.
## For minikube, set this to NodePort, elsewhere use LoadBalancer or ClusterIP.
##
routerServiceType: LoadBalancer
## repository represents base repository for images used in the chart.
## Keep it empty for using existing local image
##
repository: ghcr.io
## image represents the base image fission-bundle used by multiple Fission components.
## We alter arguments to the image to run a particular component.
##
image: fission/fission-bundle
## imageTag represents the tag of the base image fission-bundle used by multiple Fission components.
## It is also used by the chart to identify version of the few more images apart from fission-bundle.
## Keep it empty for using latest tag.
##
imageTag: v1.19.0
## pullPolicy represents the pull policy to use for images in the chart.
##
pullPolicy: IfNotPresent
## imageppullsecrets
imagePullSecrets: []
## priorityClassName represents the priority class name to use for Fission components.
## Refer to https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/
## executor.priorityClassName takes precedence over this value for executor.
## router.priorityClassName takes precedence over this value for router.
##
priorityClassName: ""
## terminationMessagePath is the path at which the pod termination message will be written.
## executor.terminationMessagePath takes precedence over this value for executor.
## router.terminationMessagePath takes precedence over this value for router.
##
terminationMessagePath: /dev/termination-log
## terminationMessagePolicy is the policy for the termination message.
## executor.terminationMessagePolicy takes precedence over this value for executor.
## router.terminationMessagePolicy takes precedence over this value for router.
##
terminationMessagePolicy: File
## controllerPort represents the port at which the Fission controller service should be exposed.
##
controllerPort: 31313
## routerPort represents the port at which the Fission Router service should be exposed.
##
routerPort: 31314
## defaultNamespace represents the namespace in which Fission custom resources will be created by the Fission user.
## This is different from the release namespace.
## Please consider setting `additionalFissionNamespaces` if you want more than one namespace to be used for Fission custom resources.
##
defaultNamespace: fission-service-ns
## builderNamespace represents the namespace in which Fission Builder resources will be created.
## if builderNamespace is set to empty then builder resources will be created in the same namespace as the Fission resources.
## This is different from the release namespace.
##
builderNamespace: ""
## functionNamespace represents the namespace in which Fission Function resources will be created.
## if functionNamespace is set to empty then function resources will be created in the same namespace as the Fission resources.
## This is different from the release namespace.
##
functionNamespace: ""
## Fission will watch the following namespaces along with the `defaultNamespace` for fission custom resources.
## additionalFissionNamespaces:
## - namespace1
## - namespace2
## - namespace3
additionalFissionNamespaces: []
## createNamespace decides to create namespaces by the chart.
## If set to true, functionNamespace and builderNamespace namespaces mentioned above will be created by the chart.
## Set to false if you want to create the namespaces manually.
##
createNamespace: true
## enableIstio indicates whether to enable istio integration.
##
enableIstio: false
## fetcher is a light weight component that helps in running functions.
## fetcher helps in fetching function source code/build and uploading it when function is invoked.
##
fetcher:
## image represents the image of the fetcher component.
image: fission/fetcher
## imageTag represents the tag of the image of the fetcher component.
imageTag: v1.19.0
## Fetcher is only for to downloading or uploading archive.
## Normally, you don't need to change the value here, unless necessary.
##
resource:
## cpu represents the cpu resource required by the fetcher component.
##
cpu:
requests: "10m"
## Low CPU limits will increases the function specialization time.
limits: ""
## mem represents the memory resource required by the fetcher component.
##
mem:
requests: "16Mi"
limits: ""
## executor is responsible for providing resources to your functions.
##
executor:
## executor priorityClassName
## Ref. https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/
## Recommended to use system-cluster-critical for executor pods.
##
priorityClassName: ""
## terminationMessagePath is the path at which the file to which the executor will write a message upon termination.
##
terminationMessagePath: ""
## terminationMessagePolicy is the policy for the executor termination message.
##
terminationMessagePolicy: ""
## adoptExistingResources decides whether to adopt existing resources when executor restarts or Fission is redeployed.
##
adoptExistingResources: false
## podReadyTimeout represents the timeout in seconds for waiting for pod to become ready.
## This is applicable to Pool Manager executor type only.
##
podReadyTimeout: 300s
## Pod resources as:
## resources:
## limits:
## cpu: <tbd>
## memory: <tbd>
## requests:
## cpu: <tbd>
## memory: <tbd>
##
resources: {}
## Security Context
## It holds pod-level and container level security configuration.
## This is an experimental section, please verify before enabling in production.
## Ref: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1
securityContext:
enabled: true
## Mark it false, if you want to stop the non root user validation
runAsNonRoot: true
fsGroup: 10001
runAsUser: 10001
runAsGroup: 10001
## Object Reaper
## objectReaperInterval (seconds) represents GLOBAL interval to run process that reaps objects after certain idle time.
## Also you can set different objectReaperInterval for specific executor type. See poolmgs/newdeploy/container section
## Default: 5 (in seconds)
##
objectReaperInterval: 5
poolmgr: {}
## objectReaperInterval specific to poolmgr executor type
##
## objectReaperInterval: 5
newdeploy: {}
## objectReaperInterval specific to newdeploy executor type
##
## objectReaperInterval: 5
container: {}
## objectReaperInterval specific to container executor type
##
## objectReaperInterval: 5
serviceAccountCheck:
## enables fission to create service account, roles and rolebinding for missing permission for builder and fetcher.
enabled: true
## indicates the time interval in minutes, after that fission will create service account, roles and rolebinding for builder and fetcher.
## interval will be applicable only if enable value is set to true.
## default timing will be 0 minutes. That means check will run only once.
## if you want to run check every 30 minutes then set interval to 30.
interval: 0
## router is responsible for routing function calls to the appropriate function.
##
router:
## router priorityClassName
## Ref. https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/
## Recommended to use system-cluster-critical for router pods.
##
priorityClassName: ""
## terminationMessagePath is the path at which the file to which the router will write a message upon termination.
##
terminationMessagePath: ""
## terminationMessagePolicy is the policy for the router termination message.
##
terminationMessagePolicy: ""
## deployAsDaemonSet decides whether to deploy router as a DaemonSet or a Deployment.
##
deployAsDaemonSet: false
## replicas decides how many router pods to deploy. Only used when deployAsDaemonSet is false.
##
replicas: 1
## svcAddressMaxRetries is the max times for router to retry with a specific function service address
##
svcAddressMaxRetries: 5
## svcAddressUpdateTimeout is the timeout setting for a goroutine to wait for the update of a service entry.
##
svcAddressUpdateTimeout: 30s
## unTapServiceTimeout is the timeout used in the request context of unTapService.
## unTapService is called to free up the resources once the function invocation is done.
##
unTapServiceTimeout: 3600s
## displayAccessLog display endpoing access logs
## Please be aware of enabling logging endpoint access log, it increases
## router resource utilization when under heavy workloads.
##
displayAccessLog: false
## svcAnnotations is the annotations to be added to the service resource created for router.
##
# svcAnnotations:
# cloud.google.com/load-balancer-type: Internal
## useEncodedPath decideds to match encoded path.
## If true, "/foo%2Fbar" will match the path "/{var}";
## Otherwise, it will match the path "/foo/bar".
## For details, see: https://github.com/fission/fission/issues/1317
##
useEncodedPath: false
roundTrip:
## If true, router will disable the HTTP keep-alive which result in performance degradation.
## But it ensures that router can redirect new coming requests to new function pods.
##
## If false, router will enable transport keep-alive feature for better performance.
## However, the drawback is it takes longer to switch to newly created function pods
## if using newdeploy as executor type for function. If you want to preserve the
## performance while keeping the short switching time to new function, you can create
## an environment with short grace period by setting flag "--graceperiod" (default 360s),
## so that kubernetes will be able to reap old function pod quickly.
##
## For details, see https://github.com/fission/fission/issues/723
##
disableKeepAlive: false
## keepAliveTime is period for an active network connection to function pod.
##
keepAliveTime: 30s
## timeout is HTTP transport request timeout
##
timeout: 50ms
## The length of request timeout will multiply with timeoutExponent after each retry
##
timeoutExponent: 2
## maxRetries defines no of retries of a failed request
##
maxRetries: 10
## Extend the container specs for the core fission pods.
## Can be used to add things like affinity/tolerations/nodeSelectors/etc.
## For example:
## extraCoreComponentPodConfig:
## affinity:
## nodeAffinity:
## requiredDuringSchedulingIgnoredDuringExecution:
## nodeSelectorTerms:
## - matchExpressions:
## - key: capability
## operator: In
## values:
## - app
##
#extraCoreComponentPodConfig:
# affinity:
# tolerations:
# nodeSelector:
## Pod resources as:
## resources:
## limits:
## cpu: <tbd>
## memory: <tbd>
## requests:
## cpu: <tbd>
## memory: <tbd>
##
resources: {}
## Security Context
## It holds pod-level and container level security configuration.
## This is an experimental section, please verify before enabling in production.
## Ref: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1
securityContext:
enabled: true
## Mark it false, if you want to stop the non root user validation
runAsNonRoot: true
fsGroup: 10001
runAsUser: 10001
runAsGroup: 10001
## The builder manager watches the package & environments CRD changes and manages the builds of function source code.
##
buildermgr:
## Pod resources as:
## resources:
## limits:
## cpu: <tbd>
## memory: <tbd>
## requests:
## cpu: <tbd>
## memory: <tbd>
##
resources: {}
## Security Context
## It holds pod-level and container level security configuration.
## This is an experimental section, please verify before enabling in production.
## Ref: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1
securityContext:
enabled: true
## Mark it false, if you want to stop the non root user validation
runAsNonRoot: true
fsGroup: 10001
runAsUser: 10001
runAsGroup: 10001
## controller is the component that the client talks to.
## It contains CRUD APIs for functions, triggers, environments, Kubernetes event watches, etc. and proxy APIs to internal 3rd-party services.
##
controller:
enabled: true
## Pod resources as:
## resources:
## limits:
## cpu: <tbd>
## memory: <tbd>
## requests:
## cpu: <tbd>
## memory: <tbd>
##
resources: {}
## Security Context
## It holds pod-level and container level security configuration.
## This is an experimental section, please verify before enabling in production.
## Ref: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1
securityContext:
enabled: true
## Mark it false, if you want to stop the non root user validation
runAsNonRoot: true
fsGroup: 10001
runAsUser: 10001
runAsGroup: 10001
## webhook is the component that validates API calls.
## It contains validation and mutation for functions, triggers, environments, Kubernetes event watches, etc.
##
webhook:
## Pod resources as:
## resources:
## limits:
## cpu: <tbd>
## memory: <tbd>
## requests:
## cpu: <tbd>
## memory: <tbd>
##
resources: {}
certManager:
enabled: false
caBundlePEM: |
crtPEM: |
keyPEM: |
## Security Context
## It holds pod-level and container level security configuration.
## This is an experimental section, please verify before enabling in production.
## Ref: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1
securityContext:
enabled: true
## Mark it false, if you want to stop the non root user validation
runAsNonRoot: true
fsGroup: 10001
runAsUser: 10001
runAsGroup: 10001
## kubewatcher watches the Kubernetes API and invokes functions associated with watches, sending the watch event to the function.
##
kubewatcher:
## Pod resources as:
## resources:
## limits:
## cpu: <tbd>
## memory: <tbd>
## requests:
## cpu: <tbd>
## memory: <tbd>
##
resources: {}
## Security Context
## It holds pod-level and container level security configuration.
## This is an experimental section, please verify before enabling in production.
## Ref: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1
securityContext:
enabled: true
## Mark it false, if you want to stop the non root user validation
runAsNonRoot: true
fsGroup: 10001
runAsUser: 10001
runAsGroup: 10001
## The storage service is the home for all archives of packages with sizes larger than 256KB.
##
storagesvc:
## Pod resources as:
## resources:
## limits:
## cpu: <tbd>
## memory: <tbd>
## requests:
## cpu: <tbd>
## memory: <tbd>
##
resources: {}
## Archive pruner removes archives from storage which are not referenced by any package.
archivePruner:
enabled: true
## Run prune routine at interval (in minutes)
interval: 60
## Security Context
## It holds pod-level and container level security configuration.
## This is an experimental section, please verify before enabling in production.
## Ref: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1
securityContext:
enabled: true
## Mark it false, if you want to stop the non root user validation
runAsNonRoot: true
fsGroup: 10001
runAsUser: 10001
runAsGroup: 10001
## The timer works like kubernetes CronJob but instead of creating a pod to do the task
## It sends a request to router to invoke the function.
##
timer:
## Pod resources as:
## resources:
## limits:
## cpu: <tbd>
## memory: <tbd>
## requests:
## cpu: <tbd>
## memory: <tbd>
##
resources: {}
## Security Context
## It holds pod-level and container level security configuration.
## This is an experimental section, please verify before enabling in production.
## Ref: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1
securityContext:
enabled: true
## Mark it false, if you want to stop the non root user validation
runAsNonRoot: true
fsGroup: 10001
runAsUser: 10001
runAsGroup: 10001
## Kafka: enable and configure the details
##
kafka:
enabled: false
## note: below link is only for reference.
## Please use the brokers link for your kafka here.
##
brokers: "broker.kafka:9092" # or your-bootstrap-server.kafka:9092/9093
## Sample config for authentication
## authentication:
## tls:
## enabled: true
## caCert: 'auth/kafka/ca.crt'
## userCert: 'auth/kafka/user.crt'
## userKey: 'auth/kafka/user.key'
##
authentication:
tls:
enabled: false
## InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name.
## Warning: Setting this to true, makes TLS susceptible to man-in-the-middle attacks
##
insecureSkipVerify: false
## path to certificate containing public key of CA authority
##
caCert: ""
## path to certificate containing public key of the user signed by CA authority
##
userCert: ""
## path to private key of the user
##
userKey: ""
## version of Kafka broker
## For 0.x it must be a string in the format
## "major.minor.veryMinor.patch" example: 0.8.2.0
## For 1.x it must be a string in the format
## "major.major.veryMinor" example: 2.0.1
## Should be >= 0.11.2.0 to enable Kafka record headers support
##
# version: "0.11.2.0"
# The following components expose Prometheus metrics and have servicemonitors in this chart (disabled by default)
# Controller, router, executor, storage svc
serviceMonitor:
enabled: false
##namespace in which you want to deploy servicemonitor
##
namespace: ""
## Map of additional labels to add to the ServiceMonitor resources
# to allow selecting specific ServiceMonitors
# in case of multiple prometheus deployments
additionalServiceMonitorLabels: {}
# release: "monitoring"
# key: "value"
# The following components expose Prometheus metrics and have podmonitors in this chart (disabled by default)
#
podMonitor:
enabled: false
##namespace in which you want to deploy podmonitor
##
namespace: ""
## Map of additional labels to add to the PodMonitor resources
# to allow selecting specific PodMonitor
# in case of multiple prometheus deployments
additionalPodMonitorLabels: {}
# release: "monitoring"
# key: "value"
## Persist data to a persistent volume.
##
persistence:
## If true, fission will create/use a Persistent Volume Claim unless storageType is set to s3
## If false, use emptyDir
##
enabled: false
## Must be set to either local or S3.
## If storateType is set(other than local), one of its backend configuration must be set as below.
##
#storageType: s3
## Sample configuration for AWS s3 storage backend
##
#s3:
# bucketName:
# subDir: <sub directory within a bucket>
# accessKeyId: <awsAccessKeyId>
# secretAccessKey:
# region: <awsRegion>
## #For Minio and other s3 compatible storage systems set endPoint property
# endPoint: <s3StorageUrl>
## A manually managed Persistent Volume Claim name
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
##
# existingClaim:
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
## Extend the container specs for the core fission pods.
## Can be used to add things like affinity/tolerations/nodeSelectors/etc.
## For example:
## extraCoreComponentPodConfig:
## affinity:
## nodeAffinity:
## requiredDuringSchedulingIgnoredDuringExecution:
## nodeSelectorTerms:
## - matchExpressions:
## - key: capability
## operator: In
## values:
## - app
##
#extraCoreComponentPodConfig:
# affinity:
# tolerations:
# nodeSelector:
## Analytics let us count how many people installed fission. Set to
## false to disable analytics.
##
analytics: true
## Internally used for generating an analytics job for non-helm installs
##
analyticsNonHelmInstall: false
## Google Analytics Tracking ID
##
gaTrackingID: UA-196546703-1
## Logger config
## This would be used if influxdb is enabled
##
logger:
influxdbAdmin: "admin"
fluentdImageRepository: index.docker.io
fluentdImage: fluent/fluent-bit
fluentdImageTag: 1.8.8
## Fluent-bit writes/reads its own sqlite database to record a history of tracked
## files and a state of offsets, this is very useful to resume a state if the ser-
## vice is restarted. For Kubernetes environment with constraints like OpenShift,
## the containers are limited to write hostPath volume. Hence, we have to enable
## security context and set privileged to true.
##
enableSecurityContext: false
## Enable PodSecurityPolicies to allow privileged container
## Only required in some clusters and when enableSecurityContext is true
##
podSecurityPolicy:
enabled: false
## Configure additional capabilities
##
additionalCapabilities:
# example values for linkerd
#- NET_RAW
#- NET_ADMIN
## Enable InfluxDB
##
influxdb:
enabled: false
image: influxdb:1.8
## Allow user to override busybox image used in fluent-bit init container
##
busyboxImage: busybox
## Archive pruner is a garbage collector for archives on the fission storage service.
## This interval configures the frequency at which it runs inside the storagesvc pod.
## The value is in minutes.
##
preUpgradeChecks:
## Run pre-install/pre-upgrade checks if true
##
enabled: true
## pre-install/pre-upgrade checks live in this image
##
image: fission/pre-upgrade-checks
## pre-install/pre-upgrade checks image version
##
imageTag: v1.19.0
## Fission post-install/post-upgrade reporting live in this image
##
postInstallReportImage: fission/reporter
## If there are any pod specialization errors when a function is triggered, the error
## summary is returned as part of http response if this is set to true.
##
debugEnv: false
## Prometheus related configuration to query metrics
##
prometheus:
## please assign the prometheus service URL
## that is accessible by Fission components.
## This is mainly used to enable canary deployment.
##
serviceEndpoint: ""
canaryDeployment:
## set this flag to true if you need canary deployment feature
enabled: false
## Pod resources as:
## resources:
## limits:
## cpu: <tbd>
## memory: <tbd>
## requests:
## cpu: <tbd>
## memory: <tbd>
##
resources: {}
## Security Context
## It holds pod-level and container level security configuration.
## This is an experimental section, please verify before enabling in production.
## Ref: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1
securityContext:
enabled: true
## Mark it false, if you want to stop the non root user validation
runAsNonRoot: true
fsGroup: 10001
runAsUser: 10001
runAsGroup: 10001
## Enable authentication for fission function invocation via Fission router
##
authentication:
## set this flag to true if you need authentication
## for all function invocations
## default 'false'
##
enabled: false
## authUriPath defines authentication endpoint path
## via router
## default '/auth/login'
##
authUriPath:
## authUsername is used as a username for authentication
## default 'admin'
##
authUsername: admin
## jwtSigningKey is the signing key used for
## signing the JWT token
##
jwtSigningKey: serverless
## jwtExpiryTime is the JWT expiry time
## in seconds
## default '120'
##
jwtExpiryTime:
## jwtIssuer is the issuer of JWT
## default 'fission'
##
jwtIssuer: fission
## OpenTelemetry is a set of tools for collecting, analyzing, and visualizing
## distributed tracing data across function calls.
##
openTelemetry:
## Use this flag to set the collector endpoint for OpenTelemetry.
## The variable is endpoint of the collector in the format shown below.
## otlpCollectorEndpoint: "otel-collector.observability.svc:4317"
##
otlpCollectorEndpoint: ""
## Set this flag to false if you are using secure endpoint for the collector.
##
otlpInsecure: true
## Key-value pairs to be used as headers associated with gRPC or HTTP requests to the collector.
## Eg. otlpHeaders: "key1=value1,key2=value2"
##
otlpHeaders: ""
## Supported samplers:
## always_on - Sampler that always samples spans, regardless of the parent span's sampling decision.
## always_off - Sampler that never samples spans, regardless of the parent span's sampling decision.
## traceidratio - Sampler that samples probabalistically based on rate.
## parentbased_always_on - (default if empty) Sampler that respects its parent span's sampling decision, but otherwise always samples.
## parentbased_always_off - Sampler that respects its parent span's sampling decision, but otherwise never samples.
## parentbased_traceidratio - Sampler that respects its parent span's sampling decision, but otherwise samples probabalistically based on rate.
## See https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#general-sdk-configuration
##
tracesSampler: "parentbased_traceidratio"
## Each Sampler type defines its own expected input, if any.
## Currently we get trace ratio for the case of,
## 1. traceidratio
## 2. parentbased_traceidratio
## Sampling probability, a number in the [0..1] range, e.g. "0.1". Default is 0.1.
##
tracesSamplingRate: "0.1"
## Supported providers:
## tracecontext - W3C Trace Context
## baggage - W3C Baggage
## b3 - B3 Single
## b3multi - B3 Multi
## jaeger - Jaeger uber-trace-id header
## xray - AWS X-Ray (third party)
## ottrace - OpenTracing Trace (third party)
## none - No tracing
## See https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#general-sdk-configuration
##
propagators: "tracecontext,baggage"
## Message Queue Trigger Kind, KEDA: enable and configuration
##
mqt_keda:
enabled: true
connector_images:
kafka:
image: fission/keda-kafka-http-connector
tag: v0.12
rabbitmq:
image: fission/keda-rabbitmq-http-connector
tag: v0.10
awskinesis:
image: fission/keda-aws-kinesis-http-connector
tag: v0.10
aws_sqs:
image: fission/keda-aws-sqs-http-connector
tag: v0.11
nats_steaming:
image: fission/keda-nats-streaming-http-connector
tag: v0.13
nats_jetstream:
image: fission/keda-nats-jetstream-http-connector
tag: v0.4
gcp_pubsub:
image: fission/keda-gcp-pubsub-http-connector
tag: v0.6
redis:
image: fission/keda-redis-http-connector
tag: v0.3
## Pod resources as:
## resources:
## limits:
## cpu: <tbd>
## memory: <tbd>
## requests:
## cpu: <tbd>
## memory: <tbd>
##
resources: {}
## Enable Pprof based profiling used mostly by Fission developers
##
pprof:
enabled: false
## Enable runtimePodSpec and add spec to your poolmgr or newdeploy pods
##
runtimePodSpec:
## Setting it false by default so that integration tests pass
##
enabled: false
## Checkout PodSpec in https://fission.io/docs/reference/crd-reference/#runtime
##
podSpec:
## Default podspec to improve security of the pods
##
securityContext:
fsGroup: 10001
runAsGroup: 10001
runAsNonRoot: true
runAsUser: 10001
## Enable builderPodSpec and add spec to your env builder pods
##
builderPodSpec:
## Setting it false by default so that integration tests pass
##
enabled: false
## Checkout PodSpec in https://fission.io/docs/reference/crd-reference/#builder
##
podSpec:
## Default podspec to improve security of the pods
##
securityContext:
fsGroup: 10001
runAsGroup: 10001
runAsNonRoot: true
runAsUser: 10001
## Enable Grafana Dashboard configmaps for auto dashboard provisioning
## If you use kube-prometheus stack for monitoring, these will get imported into grafana
grafana:
## The namespace in which grafana pod is present
namespace: monitoring
dashboards:
## Disabled by default. switch to true to deploy them
enable: false

View File

@ -1,58 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: freshrss
namespace: freshrss-ns
spec:
replicas: 1
selector:
matchLabels:
app: freshrss
template:
metadata:
labels:
app: freshrss
spec:
strategy:
type: Recreate
containers:
- name: freshrss
image: freshrss/freshrss
env:
- name: CRON_MIN
value: "*/10"
- name: OIDC_ENABLED
value: "1"
- name: OIDC_SCOPES
value: "openid profile email"
- name: OIDC_X_FORWARDED_HEADERS
value: "X-Forwarded-Host X-Forwarded-Port X-Forwarded-Proto"
- name: OIDC_PROVIDER_METADATA_URL
valueFrom:
secretKeyRef:
name: freshrss-oidc-config
key: OIDC_PROVIDER_METADATA_URL
- name: OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
name: freshrss-oidc-config
key: OIDC_CLIENT_ID
- name: OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: freshrss-oidc-config
key: OIDC_CLIENT_SECRET
- name: OIDC_CLIENT_CRYPTO_KEY
valueFrom:
secretKeyRef:
name: freshrss-oidc-config
key: OIDC_CLIENT_CRYPTO_KEY
ports:
- containerPort: 80
volumeMounts:
- name: freshrss-storage
mountPath: /config
volumes:
- name: freshrss-storage
persistentVolumeClaim:
claimName: freshrss-pvc

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: freshrss-pvc
namespace: freshrss-ns
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 1Gi

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: freshrss-service
namespace: freshrss-ns
spec:
selector:
app: freshrss
ports:
- protocol: TCP
port: 80
targetPort: 80
type: LoadBalancer

View File

@ -1,18 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: freshrss-oidc-config
namespace: freshrss-ns
spec:
encryptedData:
OIDC_CLIENT_CRYPTO_KEY: AgAXVFKET2896vplk8EB1NtSBahDBSyXjnoRhyD3ZsIKSyoO2dkxRuBA9h6lYikIDnmD31LBE8Onu1KOnsEOg0HFLYls80VEGZhn3Dq9KmLxZvDvSORLMXa0bXD1oDSN1RWeNTcGBFOswkyXgJITpvU0rt8w5g1isr6A+g0JrLV7BG/OHZxy+D7VE6QZfFu+xlz64J0TUn5z6wHxU5asHzmiBdn3mxhMvoYv8KdvNDfpm2hKPvp2sJL7TBrp/GXqm2rT7kaYjGqc7Jm48PyLif3UajUrGpPR0Hjraox7cYkAFebmtepbqFZkoRZpfwY6fxObdlsgmknam9EE4/7ZFc+QJzXm1utTWL6s8v1WidB8ily1Bh6zXFLv2eLw+7q2V/0xxW+WQCV8jNEQaeqO2kA6Ch2lh4OIK4cp5Qgq6XeX6EoccLykO0toFkBNn4UkJ72CCSi5FZJqPNZU7Gu5FihrpGfOvmbLQzVoHNTyuVF7qtOj11EvI1TDgQLc0tu78cdcB2VemgRLMpoffWCr01toW34FwYrt3gOsZUK+WRoT+TAPhzv2mfHd94BMH2DnoLHIkz+VH5qmR0CsTq8dUYCw5M8AjdaTPw38NqUfbvrbJl5+dSRMxQ4Sm/6GEQPLoM7LEAn/mTSwy27EePJKEfZbhfu9xqzKLQB3VsEF/rdl/IVdsk3KMqZZbVY/AlYJIGf1ohdv3gbSssTmPIDHWhEbXMQv+++kbK+rNqxqq2v7ig==
OIDC_CLIENT_ID: AgBs+WyhEc7xDbkrxL0s+/PbNYbA9NQ0cCgiOH3UrufaA6+0l5obAZvITlq7jaDhKdc32NtKzz9bbB/MrLK0UienxiUg28f/ta/H2jpMzljzroxDIgNylZeNG/txrh8/jGnmbs5pKxbX5GsEzXBaSgb0QJ0IdQyit1W1vv/fFFxifCoABczS7VNfiYynJazp2G+Bq1U5EiyZDrjgu37f0h2v9VwBRf25p5W+v1AjSto73jmyTHlmEVPVlaft+4iBDRdDrarHRkgFCFl8mwIcGNWZreVM1/Uzdc5EZ6AU5gVtDMAKiIUT5X2CznseIbhPDSqc9WNwLKEsKWhXlGg17CpvmjG91P6SUXQsNGUJWGfrOeQuKt2mO/C9OhF+H1HuzSMa1NjgA5VRPeT529MY9Gw4gGO5+4ykpLw1BvHisA5Ia7VqieN6RB9VKKIkKr/ITpwjtkiBmuv3oGL+QKAihUwn0Tso8BhyufMM+O6iLnOCesi9dbgI0DeVEof3U3cQNDc/iuhtaesphK6+rNEq7kEfnXAzVoIfbCTvm1d799WYJSYBB6K/OZ9SGwGIoBcxDeEHw1OvBkqE1g807sV/VW1GbRj97xSXtEh7uJcevei2mBwSIICrmI4Yz3OF8IzVzPYjra/8hiIa19WW7SMMN1/oEZXw6G39zUETgKceV2fleIsUOoNQ+UQ92a6Y2UF92UHeIZVKCxE5lT4gcD5QxAChB/XQOE4sB1LceeCb6QBrGitrdBTpU2Yd
OIDC_CLIENT_SECRET: AgBy4M5GzoYXjDQ12DCgbfJ+mLCInGl73lP+NnpzEcccgqrKbPtjL++8GK758irKhtEvi3A5K/FPEjYHltQcrNcDDTZesmXE/Taiawc/s5o1npP9dQoHdFonKJsTFQ/BeOaoRExNTLC3Zz9Ubu2uw2Dd4FU2dk6bHisjk4kDSNPmCAx2VOhxZ8ujc7dr/krTMdNFNCgAPo5v+x6JUsYBzc5s6ZwEs+NninDuXRw+x6wldvCRascdUUZoa7TutWmtUIMsPTszyHcU9inojs40MFodGhCmjufY+XzjjBk3uiVDJZAiyzDozlz5OnKzf0BKJsiMsXxFmHpeotXZwy2ubROQUdyLnKOjTDfI131lhm0FWr2K08rzjiMls1mv1Gyr/CgzJm++fssFxps0V+9IR8sVh/M47qCW9hbfQM6w9AwzpX6Nc1O9MVxSj5CaqW32YYthXGnLYXYyiizSljsSb0onnwnpub6y5v5YiFNKt0o/6MXAkJZtt5JM0adC44KHSQjh/J0XlDcSg4yI3h7ty/E/KaPSmWe6E+C9KNIn+GMoXQeX4h1IluuiemefccRfJWiAiRZO5s8h0SkMizVyrRQYuywOUFOvB4UW1kMjRY2T8P+ziejtg3OHarMY2+fHpMDE1jugym6ctiBU0NgsKhc1xBrC1dHdNVnBkzUos0bP3lanlrRKbo1wqJSsukPxH4iAvKOkSgnDQogg6RhB7/k1eLqNqCHB2iNn04e47jgSguSfgV22jk7gVNiBRIqcY/g8nIWV4LzOetUuYoXpLxPPg03kXPnEqg1OX48z2RJOKKcmv5RzoN0hEKEc4+5DIlfIUh/IMd3gCpDZHdPp59aJ72MyaOzh0Kyq1GhE9c53Jg==
OIDC_PROVIDER_METADATA_URL: AgBayKIOoT5PVqeWexksEMiiW4oytgM54qygSPkUpRbapso56pMzDwUHpdRib6WfIVUODCy3R6UjIXl47mYnSVjjgSz/boMj5aKVntp/1isP2nC+yl2vQlUWOlNAXBSbss5BAych9NiJT7JPn0SqB7id3ByBP/A9hah4bNkXxioiLGZ57h/qYI3hYLP+22ywfdB051tAZx3ytiZhNAIJUmUjjz4dMJOwxI2jCZ2A7d0liF1TpkSjafHZIOnG05S4AfIRWMeIP2+F3Ar8kdNAtH8bpfmzwWcCho4J4O4UTavR1CHaGxteyJAt56dl2+7iSvtdbMI2QKhr4v4fYKG5Kl2BJ8s0/dvrjZ6tRSengBK0ei6CLoXHo/eEc797gXNWD22Mw+OuhKaEAxjuF9VbcNJbF8kgQSniPBsAKe/NH0B66374iUzfJUa0mkoE8RuvJ/DKlREUMn4oZfxuD/lGVLzOopyQ73M+ukhLScUgqThBZITk67iSxK7HxIuV/unr4jSmtLepH1nNEZ335fqEBOpV2pw92S0Fw0YKSr3iXGy4/x8agBiXCfPOMa7/1B6ekk5gHdi3BBL/PS1NPb/q4V2yGgzPq6f8ZKZjQ3Q7VgCf3x4ybg+unAZlFoC/qV5Bfd3vUNcWXnsFLIXcfykme/NrNp+ScuXUVAmb1bWxMhV60Jb0zXcaC0gSMuSbkFvNKX8BaGr1QFUBLTyg4712vpH8N4F3mYfDTGhzD5QGF1njBkpAuZyeOFRtU/SUsfidgDMhivmgnN41hpPxBo9GmD/MXYpZunXszkAhuJit4RMOHg==
template:
metadata:
creationTimestamp: null
name: freshrss-oidc-config
namespace: freshrss-ns
---

View File

@ -1,28 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: factorio
namespace: games-ns
spec:
replicas: 1
selector:
matchLabels:
app: games
template:
metadata:
labels:
app: games
spec:
containers:
- name: factorio
image: factoriotools/factorio:stable
ports:
- containerPort: 34179
- containerPort: 27015
volumeMounts:
- name: factorio-save
mountPath: /factorio
volumes:
- name: factorio-save
persistentVolumeClaim:
claimName: factorio-pvc

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: factorio-pvc
namespace: games-ns
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 8Gi

View File

@ -1,27 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: games-service
namespace: games-ns
spec:
selector:
app: games
ports:
- name: factorio
protocol: UDP
port: 34197
targetPort: 34197
- name: factorio-tui
protocol: TCP
port: 27015
targetPort: 27015
- name: minecraft
port: 25565
targetPort: 25565
- name: minecraft-rcon
port: 28016
targetPort: 28016
- name: minecraft-files
port: 80
targetPort: 80
type: LoadBalancer

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: minecraft-data-pvc
namespace: games-ns
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 16Gi

View File

@ -1,149 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: minecraft
namespace: games-ns
spec:
replicas: 1
selector:
matchLabels:
app: games
template:
metadata:
labels:
app: games
spec:
initContainers:
- name: modpack-downloader
image: alpine:latest
volumeMounts:
- name: minecraft-data
mountPath: /data
command: ["/bin/sh"]
args:
- "-c"
- |
chown -R 1000:1000 /data
containers:
- name: minecraft
securityContext:
runAsUser: 1000
runAsGroup: 1000
image: itzg/minecraft-server:java8-jdk
env:
- name: EULA
value: "TRUE"
- name: ENABLE_RCON
value: "TRUE"
- name: RCON_PORT
value: "28016"
- name: MEMORY
value: "8G"
- name: USE_AIKAR_FLAGS
value: "false"
- name: ENABLE_ROLLING_LOGS
value: "false"
- name: TZ
value: "America/New_York"
- name: VERSION
value: "1.12.2"
- name: REMOVE_OLD_MODS
value: "FALSE"
# Proper curseforge pack
#- name: TYPE
# value: "CURSEFORGE"
#- name: CF_SERVER_MOD
# value: "/modpacks/modpack.zip"
# Generic Forge configuration
- name: TYPE
value: "FORGE"
- name: MODPACK
value: "https://s3.clortox.com/minecraft-modpacks/modpack-latest.zip"
#- name: CUSTOM_SERVER_PROPERTIES
# value: |
# level-type=EarthCubic
# level-name=big-world
# view-distance=16
# vertical-view-distance=-1
- name: ICON
value: "https://s3.clortox.com/minecraft-modpacks/server-icon.png"
- name: OVERRIDE_ICON
value: "TRUE"
- name: MOTD
value: "Lol. Lmao Even."
- name: MAX_PLAYERS
value: "50"
- name: SPAWN_PROTECTION
value: "0"
- name: DIFFICULTY
value: "hard"
- name: OPS
value: "Gritos_Internos"
- name: ALLOW_FLIGHT
value: "TRUE"
ports:
- containerPort: 25565
- containerPort: 28016
volumeMounts:
- name: minecraft-data
mountPath: /data
- name: minecraft-modpacks
mountPath: /modpacks
# Sidecar providing access to upload/view/download raw media files
- name: filebrowser
securityContext:
runAsUser: 1000
runAsGroup: 1000
image: git.clortox.com/infrastructure/filebrowser:v1.0.1
env:
- name: ADMIN_PASS
valueFrom:
secretKeyRef:
name: filebrowser-secret
key: ADMIN-PASS
- name: DEFAULT_USERNAME
value: "default"
- name: DEFAULT_PASSWORD
valueFrom:
secretKeyRef:
name: filebrowser-secret
key: DEFAULT-PASS
- name: BRANDING_NAME
value: "Gluttony Minecraft Server Data"
- name: AUTH_METHOD
value: "proxy"
- name: AUTH_HEADER
value: "X-Auth-User"
- name: PERM_ADMIN
value: "false"
- name: PERM_EXECUTE
value: "false"
- name: PERM_CREATE
value: "true"
- name: PERM_RENAME
value: "true"
- name: PERM_MODIFY
value: "true"
- name: PERM_DELETE
value: "false"
- name: PERM_SHARE
value: "true"
- name: PERM_DOWNLOAD
value: "true"
volumeMounts:
- name: minecraft-data
mountPath: /srv
ports:
- containerPort: 80
volumes:
- name: minecraft-data
persistentVolumeClaim:
claimName: minecraft-data-pvc
- name: minecraft-modpacks
emptyDir: {}

View File

@ -1,15 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: filebrowser-secret
namespace: games-ns
spec:
encryptedData:
ADMIN-PASS: AgDIH0dJrPiz4D9zqFWoEInHR1f2BSFgmlZz+8nfBZasG0o/FKhdL9NneGedP6WTMkuULBE8hYMZpD9Hl0hKLr1pWAK9JRLGW2CFhIIhy/d9VPpqZhuD9+XFCiRkr5JVBzbvqh7D58BPxak2E7MDqxzUHR8CkNcn02nK49DJYkGOJB5apkS0ZnPeN+sk/DIWqmX8KZOuEDG1iLX8tyBuACKIHN0ziMBpRqtHrIZlzPSynGE+HLPmkUF0MRkArQxyLANAgu412f5//wWuc7Ade7pE75H4A7yd53fKK8MNUwzaRbioPf7XzQwbDXpiih2lRNMov4f6Vih4FELoFAKy/2Law/IIDmgBFEVSIJKYjAIHkPlt3IOdcd26re7U5eQf0LUWU2c2KzPpEBLrwuDj9QVIi0THwR9slaRTwltb8n1+bw67KS6/OhmuUcRmga30aQRs2ZD+oRaaa/ZBn8++dSUnTotskrpV0IS/QysVjUKF8IJoosiv/YOPV+Ucd8DJbZVdW4M+5nW36CIexyH3x00B2DQLYvcfu30ktclyDndtGi/Py9f+SXt9E3G2WylWSxyHJD9UKiDahrJg3crycbHPgafZsDAlWzgB6Lt+aycCpbQvO5XayD1EYllSFKSookpT2nxjNzLpaYADTWoVln881aBp4HlBi0HeIdtwr+uJS6lfw2rX082F/QPZ7tr1zQ0iQ2feSvl0ybmXwnttN6STdK8oYcN7Edbc+dvg7eflag==
DEFAULT-PASS: AgC2U8H0EF+W8Qt7JWDRgfdL7dLTTkeO4s9Z9HiI/xTgwZjiSl/7l2gjfXXkN77J2u+moqFV2as10aWj+FUCS5sWs8H8UNT+m4kLhrpP3gcBAbERSntpgJ2fOzcDifXswlHwQF+AS65KqHjndQP0domA6k6qhlz7VDj1LM5vT6swZwaHq96udRKUgYVpFVIEbxqt2xsSy+O8QRy0O197LG5QteLjFytrYqiQa/PVeDDbP7CWYesTWcZ3qiEvbjnLZhPQuKVqFd3I1JDYP1Z/P27dzcCXjKKVNZS41inySVkn8GjGgNeRJtD0QmL66JCWZcNVZAQ956Wy89b+WiEJQVO18tygE/XP4MAtUqUpXkskU/T2P581AcSmXoIhZoQ9TDuNSRYTxgIdA/3d6vdLK3IUsJYaQ3amPbsVcRskTWNWyKqsbqjI9CJNmi0LBoJoS8/S1SLNnHkFItMcfL3cVnO3/6umTLbcPC8cNBPaOugup6MYvjSLe1Ombl4Z3LFx91HjVY15oGp+Aw+Lkr0PpQ0Zwnmz3tkaNz+ADVnX5h9QlDhlSTicWvhXzWOsBb0+dEFrUBX6ioApPoBhlBaScwc4GitmzcTGFtwNx3AQ04S0QLLaaa0vmkKY0g3R6S4gHT+1B5xrHBQRJ+T+qq8S2th/vwsJ7kouVNojtCEGZJ2/qTrbUJM/Ez15aDhv+oW/6NaQbeHor3FFvaeSMQ3+sfgNLFvd4cK2QWWtyG97EpBoSw==
template:
metadata:
creationTimestamp: null
name: filebrowser-secret
namespace: games-ns

View File

@ -1,86 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: grafana
name: grafana
namespace: grafana-ns
spec:
selector:
matchLabels:
app: grafana
template:
metadata:
labels:
app: grafana
spec:
strategy:
type: Recreate
securityContext:
fsGroup: 472
supplementalGroups:
- 0
containers:
- name: grafana
image: grafana/grafana:latest
imagePullPolicy: IfNotPresent
env:
- name: GF_AUTH_GENERIC_OAUTH_ENABLED
value: "true"
- name: GF_AUTH_GENERIC_OAUTH_NAME
value: "Authentik"
- name: GF_AUTH_GENERIC_OAUTH_ALLOW_SIGN_UP
value: "true"
- name: GF_AUTH_GENERIC_OAUTH_CLIENT_ID
valueFrom:
secretKeyRef:
name: grafana-oauth
key: OAUTH_CLIENT_ID
- name: GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: grafana-oauth
key: OAUTH_CLIENT_SECRET
- name: GF_AUTH_GENERIC_OAUTH_AUTH_URL
value: "https://auth.clortox.com/application/o/authorize/"
- name: GF_AUTH_GENERIC_OAUTH_TOKEN_URL
value: "https://auth.clortox.com/application/o/token/"
- name: GF_AUTH_GENERIC_OAUTH_SCOPES
value: "user:email"
- name: GF_AUTH_GENERIC_OAUTH_API_URL
value: "https://auth.clortox.com/application/o/userinfo/"
- name: GF_SERVER_ROOT_URL
value: "https://grafana.clortox.com/"
ports:
- containerPort: 3000
name: http-grafana
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /robots.txt
port: 3000
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 2
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: 3000
timeoutSeconds: 1
resources:
requests:
cpu: 250m
memory: 750Mi
volumeMounts:
- mountPath: /var/lib/grafana
name: grafana-pv
volumes:
- name: grafana-pv
persistentVolumeClaim:
claimName: grafana-pvc

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: grafana-pvc
namespace: grafana-ns
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 1Gi

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: grafana
namespace: grafana-ns
spec:
ports:
- port: 80
protocol: TCP
targetPort: http-grafana
selector:
app: grafana
type: LoadBalancer

View File

@ -1,16 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: grafana-oauth
namespace: grafana-ns
spec:
encryptedData:
OAUTH_CLIENT_ID: AgC4zLWPZrbLn1o6Ehec1O6T6SlxT8OayWxwrsbGZFVBiZj2RBRNx0Rm76+MfuzUX5bgfbk8YQyy+Wi+eR/NzXmqMV+KWy/eZmkM0olGtvPefwH08WoNS5mvHu7C1RRW6CVubJTHmbyhFxiDcHVS7h2XM3STfJ7XjMZZNzYYmNh+EscDVG8IRB4JyvEcRqkIn5nfjDKk2D8Po5BEznh9XzkPAqIfBbNgi1Y/ofk3bGjJr0b12xH1diNe0RCSaz+tBBzs9viKbiOpCSYE+plmBixjBYQXHkErXWbJijWkbzik2T92epf/270jL4wYR+bw7W14VOgxy2x2w0N+rOVzm7BNwFjgbLGr3HReC2ToSx3Kw0UI56n1zb0BS/MaK4OsJa7KGReLdwkRSofqUgX9HnUfDhnJ/2aryc1+C16Vh70l6ZCZHNZr0sdvuFT47YTQ+p8hJAp8Z9addy+WmgGG2HjSBZtFEHlkEcXqpooy5TSkhq5BO8gCHKDOEGbD+i3uuTo9Y2AZK3bPqCYICsKrp51+jhV6zOeQtDpkjO3jKdvh+xZvfoHmydehTITGjRt+/ER3gijm4TcGapa4o2MN/LUZdueGQwLJr+vNwkL7PLGKJgF4azqmCiRc8E4/Em67GMnt9AHv68J0f2Q5g6v29mYnF2kn+mEARN+KVXlZcLIZkXk3Y5UACPIAW2ph9hcFqK8/OFcnH6gXGiUlLWIXWmvk9+yinrptlZ54YyXBWFI497gnHUzunJRi
OAUTH_CLIENT_SECRET: AgBnJPWxgtbgi9Zw34F/pE3Zc0pJaqUb9UG9yc2UIaV3e+g3RFctJdqRWPv0z6Z5diWYYv9ILJuQoVWHTzqglFIf5thAWqCGxs50ntmSrPpz2WzlQ+WyPehXhiTi/3+SwezFbdzuCovR2hKd8gUs4uZkbcIV7NgReI61rnvykfHKCBBSq5szuKTJVaRkDxPlxWjMAx7EBa8k3+68SkHyUBioC1cIA9vzJoLmo+Ab3g2d3Iq3NfZU2gTOfgNB0tP8L/dvr061EuNApkaIhz9YDYK2iB8wb7hHO1FgmmEobyvQXw7DNGvNmCZyCMn9RvxcPpXRfT0Sxz3H5WnKdI6oi6JskPxZsa5cQIlOmKHm2Cd4HN3mepD95PL2Pwb3al/kAqefVRlHXouEIQxoPXWA4Po09dS2D09IljtVOEDzDwk8DYi3b6hC54AnEQT0/aCUcxV55HOuKRCzCywodb2X4/S0HJT+2GfA/nefR4tuMZ+6z+uSbO+6KJNMPSUtgvAGiS5zZ3TlskG9OTeuz2+A3MU/BPP3QS3kSIPX+GaPzNFtm7HIaHDASyJmy5Un+c4PbNfX3ZLxULPDjaPkUWNtBkCBKOArcT7QnHtIS2ygjt6bFrfKKPxFFMMlIgOoRMWf5m991AUe3ZSkxMsXbYbghJMtg0OqXl0Gbm5QNqT9sCtb5F9N3Wm9MGJXmFPNoMMo7sqvuhItTeCzjHWDdjg+HYl/1ByLa9qvWtu+QYzPXlz/WRf+LhiLRtffAmG4MLJFaZiNsHSlyXV91zP1HnT/ZS5r5OlJcxpbQwUNW6fJ34gVnp2w9P6IJwrhsaODnHZDdUM1dEk9sdyxbAKKxGSvMXYwKavg/XVpOzae0BlUX2DzcA==
template:
metadata:
creationTimestamp: null
name: grafana-oauth
namespace: grafana-ns
---

View File

@ -1,38 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: grocy
namespace: grocy-ns
spec:
replicas: 1
selector:
matchLabels:
app: grocy
template:
metadata:
labels:
app: grocy
spec:
containers:
- name: grocy
image: lscr.io/linuxserver/grocy:4.2.0
ports:
- containerPort: 80
env:
#- name: GROCY_AUTH_CLASS
# value: "Grocy/Middleware/ReverseProxyAuthMiddleware"
#- name: GROCY_REVERSE_PROXY_AUTH_HEADER
# value: "X-authentik-name"
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: TZ
value: "Etc/UTC"
volumeMounts:
- mountPath: "/config"
name: grocy-config
volumes:
- name: grocy-config
persistentVolumeClaim:
claimName: grocy-pvc

View File

@ -1,11 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: grocy-pvc
namespace: grocy-ns
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: grocy
namespace: grocy-ns
spec:
type: LoadBalancer
selector:
app: grocy
ports:
- port: 80
targetPort: 80
protocol: TCP

View File

@ -1,43 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: homarr
namespace: homarr-ns
spec:
replicas: 1
selector:
matchLabels:
app: homarr
template:
metadata:
labels:
app: homarr
spec:
containers:
- name: homarr
image: ghcr.io/ajnart/homarr:latest
ports:
- containerPort: 7575
env:
- name: EDIT_MODE_PASSWORD
valueFrom:
secretKeyRef:
name: homarr-edit-key
key: edit-key
volumeMounts:
- name: homarr-config
mountPath: /app/data/configs
- name: homarr-icons
mountPath: /app/public/icons
- name: homarr-data
mountPath: /data
volumes:
- name: homarr-config
persistentVolumeClaim:
claimName: homarr-config-pvc
- name: homarr-icons
persistentVolumeClaim:
claimName: homarr-icons-pvc
- name: homarr-data
persistentVolumeClaim:
claimName: homarr-data-pvc

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: homarr-config-pvc
namespace: homarr-ns
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 512Mi

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: homarr-data-pvc
namespace: homarr-ns
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: homarr-icons-pvc
namespace: homarr-ns
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: homarr-service
namespace: homarr-ns
spec:
type: LoadBalancer
ports:
- port: 80
targetPort: 7575
selector:
app: homarr

View File

@ -1,15 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: homarr-edit-key
namespace: homarr-ns
spec:
encryptedData:
edit-key: AgBnP6HGyQv63BuvrbO9JWdDu/aS7GadN+6dJ/4uBziMT6HxvBBbunrO5ZROHD1Hl9F3BSQs1GECkzYTQDVd5Hji93L39InCpo3+G0GGg0m6BH8j5WarheWS4837WynOUOfHncCCtXzG9iRqFZAUKE3xYtbNMULXXBiMyY625aonF3Agqz9MAtz4Dv56N5cPE4C4Ck0VPi4POQCP6RezHteCktlBBwpbPAem61mUUx+P+V7hEX3/TItl0j4HOvC6ttbHtVLPUwvHHdBcH/0stKhPben4Hnp7qLZe1A16+RCAbaAYF2TS9JbrQsCwtDq8mkQeAQg1sU0S1092b9OZKk9s1QpGGlKuH7G1iwQcaTpdVIj57QVIOPNoGWuuOiVzWe8hf+b1jITEJNT7VYWmBYcIZjLakYFr8zbkWPlvinkTv0GHo8uBOWsqLF+w3ekYk9HNSJ6dFEBpeMpvllXcbKnggb222otyqJ2Z9Kh2svIBqq2+0VulhFtEfjXFYLOMHqi+ZUz/MkPuREevDQXjwJTBoHD5OaB1OFRo6Kp1jyLogkTnUO/j2qv5DZDkofE0ha4PR9/9olqoYzTfs0IOa2+yUQZJ0OJ5dQbrnxNqbUWjCrVn6xVeCqKrZzsK+96wJVBgiPBzruO0y5ZYreNyW0GdBDS1ubvkkv8eMKbVOM+GTEtC1AburtCwuVYwOxgOJ31zudWmDzqEnrDK1Qp91eyzk4W2J+TRd52fxLQUukq9SA==
template:
metadata:
creationTimestamp: null
name: homarr-edit-key
namespace: homarr-ns
---

View File

@ -1,46 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: immich-machine-learning
namespace: immich-ns
spec:
replicas: 1
selector:
matchLabels:
app: immich-machine-learning
template:
metadata:
labels:
app: immich-machine-learning
spec:
containers:
- name: immich-machine-learning
image: ghcr.io/immich-app/immich-machine-learning:release
env:
- name: UPLOAD_LOCATION
value: /usr/src/app/upload
- name: DB_HOSTNAME
value: postgresql.postgresql-system.svc.cluster.local
- name: DB_USERNAME
valueFrom:
secretKeyRef:
name: immich-secret
key: username
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: immich-secret
key: password
- name: DB_DATABASE_NAME
valueFrom:
secretKeyRef:
name: immich-secret
key: database
- name: REDIS_HOSTNAME
value: redis-master.redis-system.svc.cluster.local
volumeMounts:
- name: model-cache
mountPath: /cache
volumes:
- name: model-cache
emptyDir: {}

View File

@ -1,55 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: immich-microservices
namespace: immich-ns
spec:
replicas: 1
selector:
matchLabels:
app: immich-microservices
template:
metadata:
labels:
app: immich-microservices
spec:
containers:
- name: immich-microservices
image: ghcr.io/immich-app/immich-server:release
args: ["start.sh", "microservices"]
env:
- name: UPLOAD_LOCATION
value: /usr/src/app/upload
- name: DB_VECTOR_EXTENSION
value: pgvector
- name: DB_HOSTNAME
value: postgresql.postgresql-system.svc.cluster.local
- name: DB_USERNAME
valueFrom:
secretKeyRef:
name: immich-secret
key: username
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: immich-secret
key: password
- name: DB_DATABASE_NAME
valueFrom:
secretKeyRef:
name: immich-secret
key: database
- name: REDIS_HOSTNAME
value: redis-master.redis-system.svc.cluster.local
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: redis-immich-secret
key: REDIS_PASS
volumeMounts:
- name: upload-volume
mountPath: /usr/src/app/upload
volumes:
- name: upload-volume
persistentVolumeClaim:
claimName: immich-library-pvc

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: immich-library-pvc
namespace: immich-ns
spec:
accessModes:
- ReadWriteMany
storageClassName: longhorn
resources:
requests:
storage: 100Gi

View File

@ -1,57 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: immich-server
namespace: immich-ns
spec:
replicas: 1
selector:
matchLabels:
app: immich-server
template:
metadata:
labels:
app: immich-server
spec:
containers:
- name: immich-server
image: ghcr.io/immich-app/immich-server:release
args: ["start.sh", "immich"]
ports:
- containerPort: 3001
env:
- name: UPLOAD_LOCATION
value: /usr/src/app/upload
- name: DB_VECTOR_EXTENSION
value: pgvector
- name: DB_HOSTNAME
value: postgresql.postgresql-system.svc.cluster.local
- name: DB_USERNAME
valueFrom:
secretKeyRef:
name: immich-secret
key: username
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: immich-secret
key: password
- name: DB_DATABASE_NAME
valueFrom:
secretKeyRef:
name: immich-secret
key: database
- name: REDIS_HOSTNAME
value: redis-master.redis-system.svc.cluster.local
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: redis-immich-secret
key: REDIS_PASS
volumeMounts:
- name: upload-volume
mountPath: /usr/src/app/upload
volumes:
- name: upload-volume
persistentVolumeClaim:
claimName: immich-library-pvc

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: immich-server-service
namespace: immich-ns
spec:
type: LoadBalancer
ports:
- port: 80
targetPort: 3001
selector:
app: immich-server

View File

@ -1,14 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: redis-immich-secret
namespace: immich-ns
spec:
encryptedData:
REDIS_PASS: AgA87rwcuMmDmgvDRl6pcObFFNBPKSH1qCkXUFgIqB/jX/ursxPgP+5f9ANY7PZjZTJ3QSAenJKPKUu9ER5B04o9b09EIcSpTQ0eVQRl6jwMRRzCbFWedb1bsNPuNyQBaf7IhaLshfQPSsjamp4oAaczLjbQPs/musn/3TUYVThIdgWBltv9i/12+BkbA98sS3gsMVWyP+cCcVQ+mMTGNsLZbxP1XC50yAAWifqJk6NbT+m9CA1wnesgegyr1W7KUGxudKnRA7iaGiP+fC+LbLIbD63tkme6/65b9x5qXZLM9qpiBEX+Yrv7YTn+ZJ94KwMnDjV8Y3Izom4etOawnLaRIIal/PGJPjSLE+PqtVRKXpTO8I3ExKSHb3MfLpfqTQ24N1yoNOnYu6dv2Rhd0Q9lMA6RBX4XUfsjYxHwIWyN1HhdAkbAS+ZqIlcnzT/rVIkkLcU/3/2Ptjj1IRDHFZplibUTbmkiKBvSDeOWDDRXC0FPvMegcfv2mYXY03W70N1uW39JVd0hcDhMxVaaW7yB7rmNEdOpFmpSPBScNtJj7bjEkAQCqXfqogclPs7FJOkrEJKK92Mon8ZMRdeD7GAbh4UqiRIe/SnjD2PsxWKDIMX3uqHN4PpxtsI5F3cY8mQNLG9nP4QzS5b8uU3vfJ4aSX2WpY7UhCXZ1ZuZDMNUDyQ9ULNcFh0FAkB3KzFi35Kqlxf6CsiY2pkxmtHm4w1WJkq09n2iNlsORJayzwDu6Q==
template:
metadata:
creationTimestamp: null
name: redis-immich-secret
namespace: immich-ns

View File

@ -1,16 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: immich-secret
namespace: immich-ns
spec:
encryptedData:
database: AgA+Vgab29fZ+NPF1PxzvcT3StAlEiOOKO77tYH+IgfKhdK7wTP4q+OVdV6gWPahK1ssZ8lPISml1HDMPx/IIlCYHmp1xi+wtoOgvyOGq5/8czupMQ4dLwiMVWFyRnCUm94119dCA9KImIqyhrNZ/FebqrcqvykI3h8/XDGCZujjMlHhnhRSUF3AohL3cW72tnZkDeSKebp1Mkmi0LEij2v0/+dZXuIEsfLPVHgxJKvCfPX7ND3TigBlFsa1VQOSZY19MI283rS9keqX0pFP+h0LAT6iGw/4p9fOjVYPNZySVn/z/XXcxnKjO477edJp9TGb+xd1m/kSmUhKF2w58jkKoZMlUwwCxteh9H1zj9rHMQfSmVG+tg9j5WoSsfIaWbDIFIf91l07XSwa8MGJ91NE6nvHEgf7C/OtZ52SjHTKEielLHsvTPRn2lIi14P9tMadI1z11POTf416CIcB2fXzuu619FHARSJseBpBLYwPM5pSpF0XKqTl7mW0kypa46kikjGou6CuJWhrFkh8Yqpth6hfsIV0BkLxXUpoWW9/dMQztfnuB7OvogNUJRTn+g9tzGLyY5bWddokV9s6uxyyaDAi9wPe48HRhX6bGwOgEPdprV5VRSuXu7A2g2YGYsxvvsEr9dXZA3rY9dW63wAzIhydxO8i0+9JHd9CMKohj60S5Llh402p4fDm3JIXchpeNwJzyo4=
password: AgC68pWUzY4eghLcYSxEkwVtBL7BlQ8ytG11hk8NuGcPK+B9kA0VFtw5gFTYMIb0UL95O0BN/L7A6O7oXZm6skWlwOaYmUUOhdCnws1vRA7RamA+gWiT6qV+aFVdeiWLm2pgTdwquqB/Ky2/K3FF2tLoA2Gmp+uGGbet8txMb5RlCWA5jdb6xqsszCFu8NKpcb85kaRtBAP1AzXwWWnP1E+ITM8FjsL1QXlwkxra/uChN99w6Sc66GR8VUb3M3lmtv26AX2hHhqOeWNNJzIbWpmThS+DuluopF4UF+rEixTnR5jBtl8Let6ZA/UwgZ0sfBOijFLyoSFK0ly0f1p3bDH7jtgL2f7OQNPv/VkY6RKi5LViE20m2fYKmt2Fx+FdrIAw64jK3fhLuWF9MKuHOLhgbcrpCvuIMcR1P+/TEPoOrwLy8qzSyGlHZlYHo2m16FqdHqvwHF2vd3A2OnblBx8RN51Hxr0PaRb11FxGQSdQgVU4IoQp0GlvDrhzRXHU1g4G7BnG7+fQpFHujw5QB0rrSLP8WgfWkdYOo6E7xF5EXZ+E2vWsRPRJ2bkVH0mywIo8BC1e7WCR28uLK29e2kBMxiwzDxu+7x/g8rbXxLZGVakEhvZMlWPUSBpcU6rEdW1x7+TEJCGxxBUf3/e6K60MqvOQIe3gRrevY8DddkCFbi6+ZIPmTpd95K9MwnwDDWub+CzKZaWBn6+23NMiBkMa2mgFIWn0QMxEtazTWwJITw==
username: AgA/sz7ukcLAtrSfiGncgMC/VkekQYAYhUmsVTR/sS9di8gv98+pBZbC2i1CC+Qy0yagVEmpstqD46AlkI4d/38S1YLoEolJomEn8KUcdvle7RXK5d+HXSDQCbWdhdhJsbw094rLd2pPzJ1ykVpJglbg+Ec9pzydorjS5LA8vXyujmH3YXW3OU2GCI+B8rgiedetlP6zyZciKuSNd/yDPB7cYzch0lmheGHREulvAzXE6xPv4hiyZtY0FA26zjixtQjW/CJnmwzD6/F1MBZWXtColxZob6I9I5DY4zGawNgS8n4qF/bRoIr75LYkD77KEfBWba5QkQcfnvsEmJWKFmMBchdrM8+wHulgElzTRn8HIfaslk6Aq9RBasXEBDtumBgLiOVCr4TNNX6RHNooyF6uc+Ms4zTdTsibBmMs3X0W8ON1qZx+oXf5M7QW3x+rz+cl7o1TQUsGaHeAcLjh1xGJWddSo1gRL8kqX7wlVucm2LZwIwdWnGT+Bp97FJmJ+R+xgjrmzy9lhboSK58LnpHk65psIngp0XCZ6b3pNrKbDc7H/v8EAjElSAhTGwX7nIwZ4jGCdgPICcX0FtWW17nlJIXJoHmQL08fPa7dqqkpx2JgLQ2E19TywfItxxRApYtRP2AXuf53XLiyQjDgo6STldASysj4MgpJti0lKZNUQkK2QedaXKhyLO3/n53SADSac+P8s0E=
template:
metadata:
creationTimestamp: null
name: immich-secret
namespace: immich-ns

View File

@ -1,61 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: invidious
namespace: invidious-ns
spec:
replicas: 1
selector:
matchLabels:
app: invidious
template:
metadata:
labels:
app: invidious
spec:
containers:
- name: wait-and-die
image: alpine:latest
command: ["/bin/sh", "-c"]
args: ["sleep 21600; exit 0"]
- name: invidious
image: quay.io/invidious/invidious:2024.03.31-08390ac
env:
- name: INVIDIOUS_PORT
value: "3000"
- name: INVIDIOUS_DB_PASSWORD
valueFrom:
secretKeyRef:
name: invidious-secret
key: invidious-postgres-password
- name: INVIDIOUS_HMAC_KEY
valueFrom:
secretKeyRef:
name: invidious-secret
key: hmac
- name: INVIDIOUS_CONFIG
value: |
db:
dbname: Invidious
user: invidious
password: $(INVIDIOUS_DB_PASSWORD)
host: postgresql.postgresql-system.svc.cluster.local
port: 5432
check_tables: true
hmac_key: "$(INVIDIOUS_HMAC_KEY)"
pool_size: 100
statistics_enabled: true
admins: ["tyler"]
channel_threads: 2
channel_refresh_interval: 15m
feed_threads: 2
banner: "Lol. Lmao even."
default_user_preferences:
default_home: "Subscriptions"
quality: dash
save_player_pos: true
port: 3000
#external_port: 443
#domain: watch.clortox.com
ports:
- containerPort: 3000

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: invidious
namespace: invidious-ns
spec:
type: LoadBalancer
ports:
- protocol: TCP
port: 80
targetPort: 3000
selector:
app: invidious

View File

@ -1,16 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: invidious-secret
namespace: invidious-ns
spec:
encryptedData:
hmac: AgBnXw0QxXIHdSyv1jruFE7gKlnWZwHjIF6yqpx/VwXdV1G6WWCfvv+ZMA9RNvnFGP3QmNttNpErFXgpGJKP6a9gr7nIK9ilPgm9oZZP0gt8MDnNSm/17sLeMv0X84uT5SfKCbzukTPKQj2NICWLYO9M3XV5x4CXNi+1E7r+F5qtAYV/V0ZPdo35QHALKjDYv5hofsvJNaUXxamMGzMjrOBtMZKDAGx4K0ftOVr348IbKb8R3WgSrJDN2YQdk+8U1lyRZoK2yBsMYEx1/z3/YsYF/ZvE8Z6tPnRCImJSr+jkEDde0So0DkXTESdBKVnkRQ2e31pyRHGu7+z3dqZlNITFbVt3YN54+P7jDMGEEbPEgVfjJTk/MhqsfaY2WrqONXJvBFcsfVooDXG3rQinG5UkPUBLWPCnInD1mvbSyN5whC7oVh5+qwCrEN3WSsEpMUig8re10sVDwmwXehf0TqWwsIPdT/4OxYnBjzjqJ5HYopBHqCcHxeHD6o+6fNjZPSofNo2YkIX1yI+9laSjEHBmIwdFBCty10yaDsF625X07zlqFBMzSaPRcK3MVReFfUrI5w7mZuM+bzT4OG3Zf4bolQp18glzltSPxWPOsc7RRRImkcjf+PkyXmGVwZ2oPXISX+8xuOIuxhMMGAke0a7b8R7hNb/vvZ6dbtStMwZWUd0IB3Rnmb8rWmdy5qHoANYbmVmwTfcDSKxp0hqfoPNYBG7xJKAg3FjdoYjcmVmbAQ==
invidious-postgres-password: AgDCqXfmNpRx1XQeKqVrXw7u9BXLvoyWiy16S3H5MgGf7SkBffIM9fbE3bFsOI8ow0obxd1vJRw/7XZtFoGYwumoGvFLU/5N1AeluHLD8c6muBNEH7hBQmXj7rGlZ2PGKIZ+C0iqMLrt0xWpiPsPKuSxeXBwyTuZpdcw5PpTQ9N6pWhLyAM5Aw7BHXzWN3PiH4dplWnYcilj0MkNAueTwQtwksHrmPrA7ezE965adfhWzn+IWS0Rco5/QqNMArmFQqYKNkfh0mkCKz258TOLGGbznNbvWU5PQklElBUTqB2r1nJc5nYdAN0cOYYRbXhql5s61Q0S4REXG0gZVfqZMxGFpomeVx09tQRbYHKW/ptp4HKb0x2GbA/Wk1qcvvHAOqhU9f1/+MhIeyUShNeQdTthbm2hnS3Z46KPw0EEdLuSo9xG8hu+saak/xIs4bOaKbtkjSqdeTH3UzEKCjK0bQDoB6JvS6tq+CVzxoUGVYYDzbS0ADDKgdVGkOsGzVswtUOo7yYzOY9jLHanbMCZjvDfOByyYdTnegtS/iIExCPhM0V/9WzY1Y1/crX2RIgdWzTsV2djG24/tZvIggMTZE3PZH83pEduWzcMyi4JED/OYCaWlJRWFqhq+3g/K/0DgM3YPDRwul3yGhoKiWr3bRDC2RPMRTlINd10ctocnDupV1yxFzgLPimrG0LLxcmk2foRkTeJ2d/3LtjN0HfvmLSvVKrAOUDOTVcOsenoyVauNg==
template:
metadata:
creationTimestamp: null
name: invidious-secret
namespace: invidious-ns
---

View File

@ -1,387 +0,0 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: kubernetes-dashboard
namespace: kubernetes-system
spec:
chart:
spec:
chart: kubernetes-dashboard
sourceRef:
kind: HelmRepository
name: kubernetes-dashboard
namespace: flux-system
interval: 15m0s
timeout: 5m
releaseName: kubernetes-dashboard
values:
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default values for kubernetes-dashboard
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
image:
## Repository for container
repository: kubernetesui/dashboard
tag: "" # If not defined, uses appVersion of Chart.yaml
pullPolicy: IfNotPresent
pullSecrets: []
## Number of replicas
replicaCount: 1
## @param commonLabels Labels to add to all deployed objects
##
commonLabels: {}
## @param commonAnnotations Annotations to add to all deployed objects
##
commonAnnotations: {}
## Here annotations can be added to the kubernetes dashboard deployment
annotations: {}
## Here labels can be added to the kubernetes dashboard deployment
labels: {}
## Additional container arguments
##
extraArgs:
- --enable-skip-login
- --enable-insecure-login
- --system-banner="Welcome to Kubernetes"
## Additional container environment variables
##
extraEnv: []
# - name: SOME_VAR
# value: 'some value'
## Additional volumes to be added to kubernetes dashboard pods
##
extraVolumes: []
# - name: dashboard-kubeconfig
# secret:
# defaultMode: 420
# secretName: dashboard-kubeconfig
## Additional volumeMounts to be added to kubernetes dashboard container
##
extraVolumeMounts: []
# - mountPath: /kubeconfig
# name: dashboard-kubeconfig
# readOnly: true
## Array of extra K8s manifests to deploy
##
extraManifests: []
# - apiVersion: v1
# kind: ConfigMap
# metadata:
# name: additional-configmap
# data:
# mykey: myvalue
## Annotations to be added to kubernetes dashboard pods
# podAnnotations:
## SecurityContext to be added to kubernetes dashboard pods
## To disable set the following configuration to null:
# securityContext: null
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
## SecurityContext defaults for the kubernetes dashboard container and metrics scraper container
## To disable set the following configuration to null:
# containerSecurityContext: null
containerSecurityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
capabilities:
drop: ["ALL"]
## @param podLabels Extra labels for OAuth2 Proxy pods
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
##
podLabels: {}
## @param podAnnotations Annotations for OAuth2 Proxy pods
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute"
## Affinity for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
## Name of Priority Class of pods
# priorityClassName: ""
## Pod resource requests & limits
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: 2
memory: 200Mi
## Serve application over HTTP without TLS
##
## Note: If set to true, you may want to add --enable-insecure-login to extraArgs
protocolHttp: false
service:
type: LoadBalancer
# Dashboard service port
externalPort: 443
## LoadBalancerSourcesRange is a list of allowed CIDR values, which are combined with ServicePort to
## set allowed inbound rules on the security group assigned to the master load balancer
# loadBalancerSourceRanges: []
# clusterIP: ""
## A user-specified IP address for load balancer to use as External IP (if supported)
# loadBalancerIP:
## Additional Kubernetes Dashboard Service annotations
annotations: {}
## Here labels can be added to the Kubernetes Dashboard service
labels: {}
## Enable or disable the kubernetes.io/cluster-service label. Should be disabled for GKE clusters >=1.15.
## Otherwise, the addon manager will presume ownership of the service and try to delete it.
clusterServiceLabel:
enabled: true
key: "kubernetes.io/cluster-service"
ingress:
## If true, Kubernetes Dashboard Ingress will be created.
##
enabled: false
## Kubernetes Dashboard Ingress labels
# labels:
# key: value
## Kubernetes Dashboard Ingress annotations
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: 'true'
## If you plan to use TLS backend with enableInsecureLogin set to false
## (default), you need to uncomment the below.
## If you use ingress-nginx < 0.21.0
# nginx.ingress.kubernetes.io/secure-backends: "true"
## if you use ingress-nginx >= 0.21.0
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
## Kubernetes Dashboard Ingress Class
# className: "example-lb"
## Kubernetes Dashboard Ingress paths
## Both `/` and `/*` are required to work on gce ingress.
paths:
- /
# - /*
## Custom Kubernetes Dashboard Ingress paths. Will override default paths.
##
customPaths: []
# - pathType: ImplementationSpecific
# backend:
# service:
# name: ssl-redirect
# port:
# name: use-annotation
# - pathType: ImplementationSpecific
# backend:
# service:
# name: >-
# {{ include "kubernetes-dashboard.fullname" . }}
# port:
# # Don't use string here, use only integer value!
# number: 443
## Kubernetes Dashboard Ingress hostnames
## Must be provided if Ingress is enabled
##
# hosts:
# - kubernetes-dashboard.domain.com
## Kubernetes Dashboard Ingress TLS configuration
## Secrets must be manually created in the namespace
##
# tls:
# - secretName: kubernetes-dashboard-tls
# hosts:
# - kubernetes-dashboard.domain.com
# Global dashboard settings
settings:
{}
## Cluster name that appears in the browser window title if it is set
# clusterName: ""
## Max number of items that can be displayed on each list page
# itemsPerPage: 10
## Number of seconds between every auto-refresh of logs
# logsAutoRefreshTimeInterval: 5
## Number of seconds between every auto-refresh of every resource. Set 0 to disable
# resourceAutoRefreshTimeInterval: 5
## Hide all access denied warnings in the notification panel
# disableAccessDeniedNotifications: false
## Pinned CRDs that will be displayed in dashboard's menu
pinnedCRDs:
[]
# - kind: customresourcedefinition
## Fully qualified name of a CRD
# name: prometheuses.monitoring.coreos.com
## Display name
# displayName: Prometheus
## Is this CRD namespaced?
# namespaced: true
## Metrics Scraper
## Container to scrape, store, and retrieve a window of time from the Metrics Server.
## refs: https://github.com/kubernetes-sigs/dashboard-metrics-scraper
metricsScraper:
## Wether to enable dashboard-metrics-scraper
enabled: false
image:
repository: kubernetesui/metrics-scraper
tag: v1.0.9
resources: {}
## SecurityContext especially for the kubernetes dashboard metrics scraper container
## If not set, the global containterSecurityContext values will define these values
# containerSecurityContext:
# allowPrivilegeEscalation: false
# readOnlyRootFilesystem: true
# runAsUser: 1001
# runAsGroup: 2001
# args:
# - --log-level=info
# - --logtostderr=true
## Optional Metrics Server sub-chart
## Enable this if you don't already have metrics-server enabled on your cluster and
## want to use it with dashboard metrics-scraper
## refs:
## - https://github.com/kubernetes-sigs/metrics-server
## - https://github.com/kubernetes-sigs/metrics-server/tree/master/charts/metrics-server
metrics-server:
enabled: false
## Example for additional args
# args:
# - --kubelet-preferred-address-types=InternalIP
# - --kubelet-insecure-tls
rbac:
# Specifies whether namespaced RBAC resources (Role, Rolebinding) should be created
create: true
# Specifies whether cluster-wide RBAC resources (ClusterRole, ClusterRolebinding) to access metrics should be created
# Independent from rbac.create parameter.
clusterRoleMetrics: true
# Start in ReadOnly mode.
# Specifies whether cluster-wide RBAC resources (ClusterRole, ClusterRolebinding) with read only permissions to all resources listed inside the cluster should be created
# Only dashboard-related Secrets and ConfigMaps will still be available for writing.
#
# The basic idea of the clusterReadOnlyRole
# is not to hide all the secrets and sensitive data but more
# to avoid accidental changes in the cluster outside the standard CI/CD.
#
# It is NOT RECOMMENDED to use this version in production.
# Instead you should review the role and remove all potentially sensitive parts such as
# access to persistentvolumes, pods/log etc.
#
# Independent from rbac.create parameter.
clusterReadOnlyRole: false
# It is possible to add additional rules if read only role is enabled.
# This can be useful, for example, to show CRD resources.
# clusterReadOnlyRoleAdditionalRules: []
# If the default role permissions are not enough, it is possible to add additional permissions.
# roleAdditionalRules: []
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
livenessProbe:
# Number of seconds to wait before sending first probe
initialDelaySeconds: 30
# Number of seconds to wait for probe response
timeoutSeconds: 30
## podDisruptionBudget
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
podDisruptionBudget:
enabled: false
## Minimum available instances; ignored if there is no PodDisruptionBudget
minAvailable:
## Maximum unavailable instances; ignored if there is no PodDisruptionBudget
maxUnavailable:
## PodSecurityContext for pod level securityContext
# securityContext:
# runAsUser: 1001
# runAsGroup: 2001
networkPolicy:
# Whether to create a network policy that allows/restricts access to the service
enabled: false
# Whether to set network policy to deny all ingress traffic for the kubernetes-dashboard
ingressDenyAll: false
## podSecurityPolicy for fine-grained authorization of pod creation and updates
## Note that PSP is deprecated and has been removed from kubernetes 1.25 onwards.
## For 1.25+ consider enabling PodSecurityAdmission, refer to chart README.md.
podSecurityPolicy:
# Specifies whether a pod security policy should be created
enabled: false
serviceMonitor:
# Whether or not to create a Prometheus Operator service monitor.
enabled: false
## Here labels can be added to the serviceMonitor
labels: {}
## Here annotations can be added to the serviceMonitor
annotations: {}
## Optional containers, i.e. for auth addons.
optionalContainers:
enabled: false
containers: []

View File

@ -1,44 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: memerr
namespace: memerr-ns
spec:
replicas: 1
selector:
matchLabels:
app: memerr
template:
metadata:
labels:
app: memerr
spec:
containers:
- name: memerr
image: git.clortox.com/infrastructure/memerr:1.0.65
imagePullPolicy: Always
env:
- name: DISCORD_TOKEN
valueFrom:
secretKeyRef:
name: memerr-secret
key: DISCORD_TOKEN
- name: S3_URL
value: "s3.clortox.com"
- name: S3_UN
valueFrom:
secretKeyRef:
name: memerr-secret
key: S3_UN
- name: S3_PW
valueFrom:
secretKeyRef:
name: memerr-secret
key: S3_PW
- name: S3_TLS
value: "True"
- name: S3_BUCKET
valueFrom:
secretKeyRef:
name: memerr-secret
key: S3_BUCKET

View File

@ -1,18 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: memerr-secret
namespace: memerr-ns
spec:
encryptedData:
DISCORD_TOKEN: AgAF5G/5hpgBKqsWSRhOVRfy35e1PKw04N0+HaeA6LwyutMAMkB7PuV9nkja4g+2dhn4uDFZh2f0u1V5243mKWtN5aj6k2C85AKOa22f0ht58BuADOb7n/dOZzpwpt1yPhGmxsC8JMBlDzB58dfM+bAkFoSkAzoYoemyulf1NZ81aO9MIvRc7begtfYbReAa/+bQgDZn+tBloK2d5SnBwjFW41HChTSxiUYd6ZY9qTBohlSNHRpn8pb8kUIJCGxad9NJeN0IYH9CQjam9PFOWzGB3B87nnRUomFk4YvyEPshhkzVuqmyFGdkS7ZGKDDdabjdDIGBCiC+2euAlo2fXw94Iu2e3zFg78giu297UU7P3k485M8K3xqByAZ74dQyGjdiLstydooIJOVZn0FhBQ03v8gNAtjwJpO7R0yVSppMenzDC3bO3FR/bhiIeZ2XSvnLWqgf5vDCiBz8BoyUrYpeHZl1SPyQqI3Q3qkJlPZ4R7zrlrJcYE8lPNrySYUInQpJGZoOSLZAjCmE8GabRP+3MxkIZ+C+bHic43sQQIHtThq+d/UV8KCd+bUsps4T7e8GufMhOmlFvnZRQZ/l9bWKlLE9MAzfbHGYnC91oKM22QbLFcRo2va+hbb75t3s4YZkaQCR7sMLNMbbZNQAi+q7NGr9fh9JacU8ztIGJWlsZGyGCCRuu920qWTDlmvilmGdcN+6atKJobKakNWwsK6TQS9P2yXYRlW80yfPg0RQblZ38H1q7XHzdm9kRkPcAgDGxIrjglZ7jOps27VSbMBCcbvQ8uVp
S3_BUCKET: AgCZHNXZo/E1K3fgL4APSsPprz9jxoDxDJuzN26QMzaZstfdlJPnLHtjuY9BXnNS5kHp2qlgiSzzQr3iC5kSyGYyiwgp9ZJmtnRSQUDPf1l/b4LTgwuyME3UkjJyft43+qEZ6MukhkduED+sAgIQBj9e80tgECp+n9dcZSdxns4LJY+OuFHMbhMmGWR1qKSohpQeZvkQtOknKrJbdEMuYFgsi7Xj54MUgS33qzHktkZtRqSX0E1yKYRIy5NrUHilC6rJoSHcqn4GEvqGuZdoxUQhdTD95CVA4twWkoMNDZhv2/1G1l5cWlq6MdWKg+aYW92sZoLwfr2J6aO2im5S054TUunXGHIsKtrLKKACJ2+bnub3TrGwby+H3KkN5aQBvXICU0SyL81lc9JJc+88f/J1iHCfjKSFzuMmh1jiW/47b+6BzrMuquEMdkI8BXkZH7xQX3qEhnxCUqaM+A8otT+5wryBB46fmGdXudLJEMhHtr71pYWd7YuuvGoLX0Z9L6w3hIEeaM+fEaPRtiBd0Qcxyr14naqW2DidFQ4NYsQ+PuhJPnHkmgEN/B7+VOuT5CU8ckWElemj0ma/hOwF2i4ETooZd2uRgSWHF9efo70ZsKQnA7orfgEUkKKl9uqOR7UGBd/0GHKrSNL8Ks+jvu7hO3CbKpmK8ygFrNnUWPH++/EbhbCPpUhV3sTWNjDUBftITAhKlg==
S3_PW: AgAcmFhckXAKbaT2GE9GbtHftKj/t0D7sY9qZphtjT9PiMkNLA+xlqwtd8sAproOom+QOmw7ZatpGQL8iBm7MqzkMzlUpZxDKeim6tWCIjBsohpCAxJvUET+FJONWBHbGORs6d4B/hLp2mzOLnuE55hF0Ig8x6WwneSTJPVbSR1aaHMzq3JqYPOotb2n1RYiUeyA6fXfDdQEhqwGF1pLkI8DZk5Wyp55BC2hUrR53X9w/y2NKOfi4dfrzVDYgUP53mN31Q2lWRLCiXOrwbu48m7AyLA4zA6ZzeHr9GqbczJ8njhNpX0nRP9Xf1v/M93yAUMqxUU4VJUz85wa2VKzbf39S1tMi2YNNPw1X2QRjFv7wNfNLoSi3XqKx1zaLk92ZhgYj8ZCCw+8PrxBhQodPhFOLs9Yqrs9nsEekZFn+6MtCDdhU5CMyTtmAo1018WgT4jgocukclcmktwku5znpjkH9FC/0gq456fSfNt3KRF44Nny0Z9lY14YqliLbJidtQ9pl3x+UGV1ah8ssDhx1BB4mZoJSnrA54SeWQeVr+P285CQRhZkKRhdtRFCFOU7WeH5sOCwNfct9bG1O6uU5sEITxTcTrJUFcHHL5w0O3VdhhDgZYtdhDTHfxflKRxMyt1BHfUD5ezgT0387wvtPBX1Fodv+FIiTXE/43kYZSyLP/hZ7aFovpwNGW9p0y8K/ZYc23nV9qRlFGOmAaxdDonhy72ogHku9XlxyQeF5csvkYvd3y/WELly
S3_UN: AgBVevUOoVoMDxXFZxairFKCWbmB6bX4qh06Cv9Tg3bx3zP7nu9Qx33atuQ1bGU5ApQ3pbCPACmnqYRluukMcmGdI/1wwZqmGwE02W62a/re5sVeNgObwVxPDHDsmIEP+cF6rLd9wg5GKrybVjt+MpG9O8JVgnHbsin2AhExrSm1d9MLzOIYum21q4wfPqsfAfaVouCLBqQGB26Wn5D0/cUf8svARlF9DEtDcmKKfmCH0UOY653cTNNHlU7T1JLk2Nl8cT2Ev7Jvq9quyfYlcNJHFSXUTvn7WtGHl5u+id0pd0BF+7a48JqkzjGjACHh7ye0bXEJK5YgE6cLUzfh2UY9wniIHl9BtLuFWxTbBzn7XDsNf8faJ1nft9ZFUQBlIwHYaPwxJA6lfEvKoTc1JJOVdIWqpKzq+PNc8maChxbINx0ZTe2uXf8gcewkiQSZixV98iqx2a9VLZNOH/VCWbsrj5o8R5a2RxtiFWPbgHMGCTd43wUsryOsgOz0iqmny9ExQBBndIrWk7ZD7M5Oj6UG0sxJOnnYbFxlHEjpZJRTZ1o9bDqOTqXzlqS/Obu8MEXDK7fBzrMasVUHbfpTC2f9irDOlqZTL/wGlTv7RrMMD3dA82TsBej2R0KyI23w4xubJ97vgaQ/my4S4A0nR4GU93tB/Q2XrP52Qz2JYzuH5EjGyC19pTaFIKxkIPUEydl1XFFiShKd+f/Q0zhqzvE=
template:
metadata:
creationTimestamp: null
name: memerr-secret
namespace: memerr-ns
---

View File

@ -1,349 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: metallb-helm-chart-value-overrides
namespace: metallb-system
data:
values.yaml: |-
# Default values for metallb.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
loadBalancerClass: ""
# To configure MetalLB, you must specify ONE of the following two
# options.
rbac:
# create specifies whether to install and use RBAC rules.
create: true
prometheus:
# scrape annotations specifies whether to add Prometheus metric
# auto-collection annotations to pods. See
# https://github.com/prometheus/prometheus/blob/release-2.1/documentation/examples/prometheus-kubernetes.yml
# for a corresponding Prometheus configuration. Alternatively, you
# may want to use the Prometheus Operator
# (https://github.com/coreos/prometheus-operator) for more powerful
# monitoring configuration. If you use the Prometheus operator, this
# can be left at false.
scrapeAnnotations: false
# port both controller and speaker will listen on for metrics
metricsPort: 7472
# if set, enables rbac proxy on the controller and speaker to expose
# the metrics via tls.
# secureMetricsPort: 9120
# the name of the secret to be mounted in the speaker pod
# to expose the metrics securely. If not present, a self signed
# certificate to be used.
speakerMetricsTLSSecret: ""
# the name of the secret to be mounted in the controller pod
# to expose the metrics securely. If not present, a self signed
# certificate to be used.
controllerMetricsTLSSecret: ""
# prometheus doens't have the permission to scrape all namespaces so we give it permission to scrape metallb's one
rbacPrometheus: true
# the service account used by prometheus
# required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true "
serviceAccount: ""
# the namespace where prometheus is deployed
# required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true "
namespace: ""
# the image to be used for the kuberbacproxy container
rbacProxy:
repository: gcr.io/kubebuilder/kube-rbac-proxy
tag: v0.12.0
pullPolicy:
# Prometheus Operator PodMonitors
podMonitor:
# enable support for Prometheus Operator
enabled: false
# optional additionnal labels for podMonitors
additionalLabels: {}
# optional annotations for podMonitors
annotations: {}
# Job label for scrape target
jobLabel: "app.kubernetes.io/name"
# Scrape interval. If not set, the Prometheus default scrape interval is used.
interval:
# metric relabel configs to apply to samples before ingestion.
metricRelabelings: []
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
# relabel configs to apply to samples before ingestion.
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# target_label: nodename
# replacement: $1
# action: replace
# Prometheus Operator ServiceMonitors. To be used as an alternative
# to podMonitor, supports secure metrics.
serviceMonitor:
# enable support for Prometheus Operator
enabled: false
speaker:
# optional additional labels for the speaker serviceMonitor
additionalLabels: {}
# optional additional annotations for the speaker serviceMonitor
annotations: {}
# optional tls configuration for the speaker serviceMonitor, in case
# secure metrics are enabled.
tlsConfig:
insecureSkipVerify: true
controller:
# optional additional labels for the controller serviceMonitor
additionalLabels: {}
# optional additional annotations for the controller serviceMonitor
annotations: {}
# optional tls configuration for the controller serviceMonitor, in case
# secure metrics are enabled.
tlsConfig:
insecureSkipVerify: true
# Job label for scrape target
jobLabel: "app.kubernetes.io/name"
# Scrape interval. If not set, the Prometheus default scrape interval is used.
interval:
# metric relabel configs to apply to samples before ingestion.
metricRelabelings: []
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
# relabel configs to apply to samples before ingestion.
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# target_label: nodename
# replacement: $1
# action: replace
# Prometheus Operator alertmanager alerts
prometheusRule:
# enable alertmanager alerts
enabled: false
# optional additionnal labels for prometheusRules
additionalLabels: {}
# optional annotations for prometheusRules
annotations: {}
# MetalLBStaleConfig
staleConfig:
enabled: true
labels:
severity: warning
# MetalLBConfigNotLoaded
configNotLoaded:
enabled: true
labels:
severity: warning
# MetalLBAddressPoolExhausted
addressPoolExhausted:
enabled: true
labels:
severity: alert
addressPoolUsage:
enabled: true
thresholds:
- percent: 75
labels:
severity: warning
- percent: 85
labels:
severity: warning
- percent: 95
labels:
severity: alert
# MetalLBBGPSessionDown
bgpSessionDown:
enabled: true
labels:
severity: alert
extraAlerts: []
# controller contains configuration specific to the MetalLB cluster
# controller.
controller:
enabled: true
# -- Controller log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none`
logLevel: info
# command: /controller
# webhookMode: enabled
image:
repository: quay.io/metallb/controller
tag:
pullPolicy:
## @param controller.updateStrategy.type Metallb controller deployment strategy type.
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
## e.g:
## strategy:
## type: RollingUpdate
## rollingUpdate:
## maxSurge: 25%
## maxUnavailable: 25%
##
strategy:
type: RollingUpdate
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use. If not set and create is
# true, a name is generated using the fullname template
name: ""
annotations: {}
securityContext:
runAsNonRoot: true
# nobody
runAsUser: 65534
fsGroup: 65534
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
nodeSelector: {}
tolerations: []
priorityClassName: ""
runtimeClassName: ""
affinity: {}
podAnnotations: {}
labels: {}
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
# speaker contains configuration specific to the MetalLB speaker
# daemonset.
speaker:
enabled: true
# command: /speaker
# -- Speaker log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none`
logLevel: info
tolerateMaster: true
memberlist:
enabled: true
mlBindPort: 7946
mlSecretKeyPath: "/etc/ml_secret_key"
excludeInterfaces:
enabled: true
image:
repository: quay.io/metallb/speaker
tag:
pullPolicy:
## @param speaker.updateStrategy.type Speaker daemonset strategy type
## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
##
updateStrategy:
## StrategyType
## Can be set to RollingUpdate or OnDelete
##
type: RollingUpdate
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use. If not set and create is
# true, a name is generated using the fullname template
name: ""
annotations: {}
## Defines a secret name for the controller to generate a memberlist encryption secret
## By default secretName: {{ "metallb.fullname" }}-memberlist
##
# secretName:
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
nodeSelector: {}
tolerations: []
priorityClassName: ""
affinity: {}
## Selects which runtime class will be used by the pod.
runtimeClassName: ""
podAnnotations: {}
labels: {}
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
startupProbe:
enabled: true
failureThreshold: 30
periodSeconds: 5
# frr contains configuration specific to the MetalLB FRR container,
# for speaker running alongside FRR.
frr:
enabled: true
image:
repository: quay.io/frrouting/frr
tag: 8.5.2
pullPolicy:
metricsPort: 7473
resources: {}
# if set, enables a rbac proxy sidecar container on the speaker to
# expose the frr metrics via tls.
# secureMetricsPort: 9121
reloader:
resources: {}
frrMetrics:
resources: {}
crds:
enabled: true
validationFailurePolicy: Fail

View File

@ -1,21 +0,0 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: metallb
namespace: metallb-system
spec:
chart:
spec:
chart: metallb
version: 0.13.11
sourceRef:
kind: HelmRepository
name: metallb
namespace: flux-system
interval: 15m
timeout: 5m
releaseName: metallb
valuesFrom:
- kind: ConfigMap
name: metallb-helm-chart-value-overrides
valuesKey: values.yaml

View File

@ -1,8 +0,0 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: metallb-pool-addresses
namespace: metallb-system
spec:
addresses:
- 192.168.1.2-192.168.1.63

View File

@ -1,8 +0,0 @@
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: my-l2-advertisment
namespace: metallb-system
spec:
ipAddressPools:
- metallb-pool-addresses

View File

@ -1,564 +0,0 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: minio
namespace: minio-ns
spec:
chart:
spec:
chart: minio
sourceRef:
kind: HelmRepository
name: minio
namespace: flux-system
interval: 15m0s
timeout: 5m
releaseName: minio
values:
## Provide a name in place of minio for `app:` labels
##
nameOverride: ""
## Provide a name to substitute for the full names of resources
##
fullnameOverride: ""
## set kubernetes cluster domain where minio is running
##
clusterDomain: cluster.local
## Set default image, imageTag, and imagePullPolicy. mode is used to indicate the
##
image:
repository: quay.io/minio/minio
tag: RELEASE.2023-09-30T07-02-29Z
pullPolicy: IfNotPresent
imagePullSecrets: []
# - name: "image-pull-secret"
## Set default image, imageTag, and imagePullPolicy for the `mc` (the minio
## client used to create a default bucket).
##
mcImage:
repository: quay.io/minio/mc
tag: RELEASE.2023-09-29T16-41-22Z
pullPolicy: IfNotPresent
## minio mode, i.e. standalone or distributed
mode: standalone ## other supported values are "standalone"
## Additional labels to include with deployment or statefulset
additionalLabels: {}
## Additional annotations to include with deployment or statefulset
additionalAnnotations: {}
## Typically the deployment/statefulset includes checksums of secrets/config,
## So that when these change on a subsequent helm install, the deployment/statefulset
## is restarted. This can result in unnecessary restarts under GitOps tooling such as
## flux, so set to "true" to disable this behaviour.
ignoreChartChecksums: false
## Additional arguments to pass to minio binary
extraArgs: []
## Additional volumes to minio container
extraVolumes: []
## Additional volumeMounts to minio container
extraVolumeMounts: []
## Additional sidecar containers
extraContainers: []
## Internal port number for MinIO S3 API container
## Change service.port to change external port number
minioAPIPort: "9000"
## Internal port number for MinIO Browser Console container
## Change consoleService.port to change external port number
minioConsolePort: "9001"
## Update strategy for Deployments
deploymentUpdate:
type: RollingUpdate
maxUnavailable: 0
maxSurge: 100%
## Update strategy for StatefulSets
statefulSetUpdate:
updateStrategy: RollingUpdate
## Pod priority settings
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
##
priorityClassName: ""
## Pod runtime class name
## ref https://kubernetes.io/docs/concepts/containers/runtime-class/
##
runtimeClassName: ""
## Set default rootUser, rootPassword
## AccessKey and secretKey is generated when not set
## Distributed MinIO ref: https://min.io/docs/minio/linux/operations/install-deploy-manage/deploy-minio-multi-node-multi-drive.html
##
#rootUser: ""
#rootPassword: ""
#
## Use existing Secret that store following variables:
##
## | Chart var | .data.<key> in Secret |
## |:----------------------|:-------------------------|
## | rootUser | rootUser |
## | rootPassword | rootPassword |
##
## All mentioned variables will be ignored in values file.
## .data.rootUser and .data.rootPassword are mandatory,
## others depend on enabled status of corresponding sections.
existingSecret: "minio-default-credentials"
## Directory on the MinIO pof
certsPath: "/etc/minio/certs/"
configPathmc: "/etc/minio/mc/"
## Path where PV would be mounted on the MinIO Pod
mountPath: "/export"
## Override the root directory which the minio server should serve from.
## If left empty, it defaults to the value of {{ .Values.mountPath }}
## If defined, it must be a sub-directory of the path specified in {{ .Values.mountPath }}
##
bucketRoot: ""
# Number of drives attached to a node
drivesPerNode: 1
# Number of MinIO containers running
replicas: 1
# Number of expanded MinIO clusters
pools: 1
## TLS Settings for MinIO
tls:
enabled: false
## Create a secret with private.key and public.crt files and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret
certSecret: ""
publicCrt: public.crt
privateKey: private.key
## Trusted Certificates Settings for MinIO. Ref: https://min.io/docs/minio/linux/operations/network-encryption.html#third-party-certificate-authorities
## Bundle multiple trusted certificates into one secret and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret
## When using self-signed certificates, remember to include MinIO's own certificate in the bundle with key public.crt.
## If certSecret is left empty and tls is enabled, this chart installs the public certificate from .Values.tls.certSecret.
trustedCertsSecret: ""
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
#annotations: {}
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
#existingClaim: ""
## minio data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
## Storage class of PV to bind. By default it looks for standard storage class.
## If the PV uses a different storage class, specify that here.
storageClass: "longhorn"
#volumeName: ""
accessMode: ReadWriteOnce
size: 30Gi
## If subPath is set mount a sub folder of a volume instead of the root of the volume.
## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
##
subPath: ""
## Expose the MinIO service to be accessed from outside the cluster (LoadBalancer service).
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
type: LoadBalancer
clusterIP: ~
port: "9000"
nodePort: 9000
loadBalancerIP: ~
externalIPs: []
annotations: {}
## Configure Ingress based on the documentation here: https://kubernetes.io/docs/concepts/services-networking/ingress/
##
ingress:
enabled: false
ingressClassName: ~
labels: {}
# node-role.kubernetes.io/ingress: platform
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# kubernetes.io/ingress.allow-http: "false"
# kubernetes.io/ingress.global-static-ip-name: ""
# nginx.ingress.kubernetes.io/secure-backends: "true"
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
# nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0
path: /
hosts:
- minio-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
consoleService:
type: LoadBalancer
clusterIP: ~
port: "9001"
nodePort: 80
loadBalancerIP: ~
externalIPs: []
annotations: {}
consoleIngress:
enabled: false
ingressClassName: ~
labels: {}
# node-role.kubernetes.io/ingress: platform
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# kubernetes.io/ingress.allow-http: "false"
# kubernetes.io/ingress.global-static-ip-name: ""
# nginx.ingress.kubernetes.io/secure-backends: "true"
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
# nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0
path: /
hosts:
- console.minio-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
tolerations: []
affinity: {}
topologySpreadConstraints: []
## Add stateful containers to have security context, if enabled MinIO will run as this
## user and group NOTE: securityContext is only enabled if persistence.enabled=true
securityContext:
enabled: true
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: "OnRootMismatch"
# Additational pod annotations
podAnnotations: {}
# Additional pod labels
podLabels: {}
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 16Gi
## List of policies to be created after minio install
##
## In addition to default policies [readonly|readwrite|writeonly|consoleAdmin|diagnostics]
## you can define additional policies with custom supported actions and resources
policies: []
## writeexamplepolicy policy grants creation or deletion of buckets with name
## starting with example. In addition, grants objects write permissions on buckets starting with
## example.
# - name: writeexamplepolicy
# statements:
# - effect: Allow # this is the default
# resources:
# - 'arn:aws:s3:::example*/*'
# actions:
# - "s3:AbortMultipartUpload"
# - "s3:GetObject"
# - "s3:DeleteObject"
# - "s3:PutObject"
# - "s3:ListMultipartUploadParts"
# - resources:
# - 'arn:aws:s3:::example*'
# actions:
# - "s3:CreateBucket"
# - "s3:DeleteBucket"
# - "s3:GetBucketLocation"
# - "s3:ListBucket"
# - "s3:ListBucketMultipartUploads"
## readonlyexamplepolicy policy grants access to buckets with name starting with example.
## In addition, grants objects read permissions on buckets starting with example.
# - name: readonlyexamplepolicy
# statements:
# - resources:
# - 'arn:aws:s3:::example*/*'
# actions:
# - "s3:GetObject"
# - resources:
# - 'arn:aws:s3:::example*'
# actions:
# - "s3:GetBucketLocation"
# - "s3:ListBucket"
# - "s3:ListBucketMultipartUploads"
## conditionsexample policy creates all access to example bucket with aws:username="johndoe" and source ip range 10.0.0.0/8 and 192.168.0.0/24 only
# - name: conditionsexample
# statements:
# - resources:
# - 'arn:aws:s3:::example/*'
# actions:
# - 's3:*'
# conditions:
# - StringEquals: '"aws:username": "johndoe"'
# - IpAddress: |
# "aws:SourceIp": [
# "10.0.0.0/8",
# "192.168.0.0/24"
# ]
#
## Additional Annotations for the Kubernetes Job makePolicyJob
makePolicyJob:
securityContext:
enabled: false
runAsUser: 1000
runAsGroup: 1000
resources:
requests:
memory: 128Mi
# Command to run after the main command on exit
exitCommand: ""
## List of users to be created after minio install
##
users:
## Username, password and policy to be assigned to the user
## Default policies are [readonly|readwrite|writeonly|consoleAdmin|diagnostics]
## Add new policies as explained here https://min.io/docs/minio/kubernetes/upstream/administration/identity-access-management.html#access-management
## NOTE: this will fail if LDAP is enabled in your MinIO deployment
## make sure to disable this if you are using LDAP.
- accessKey: console
secretKey: console123
policy: consoleAdmin
# Or you can refer to specific secret
#- accessKey: externalSecret
# existingSecret: my-secret
# existingSecretKey: password
# policy: readonly
## Additional Annotations for the Kubernetes Job makeUserJob
makeUserJob:
securityContext:
enabled: false
runAsUser: 1000
runAsGroup: 1000
resources:
requests:
memory: 128Mi
# Command to run after the main command on exit
exitCommand: ""
## List of service accounts to be created after minio install
##
svcaccts: []
## accessKey, secretKey and parent user to be assigned to the service accounts
## Add new service accounts as explained here https://min.io/docs/minio/kubernetes/upstream/administration/identity-access-management/minio-user-management.html#service-accounts
# - accessKey: console-svcacct
# secretKey: console123
# user: console
## Or you can refer to specific secret
# - accessKey: externalSecret
# existingSecret: my-secret
# existingSecretKey: password
# user: console
## You also can pass custom policy
# - accessKey: console-svcacct
# secretKey: console123
# user: console
# policy:
# statements:
# - resources:
# - 'arn:aws:s3:::example*/*'
# actions:
# - "s3:AbortMultipartUpload"
# - "s3:GetObject"
# - "s3:DeleteObject"
# - "s3:PutObject"
# - "s3:ListMultipartUploadParts"
makeServiceAccountJob:
securityContext:
enabled: false
runAsUser: 1000
runAsGroup: 1000
resources:
requests:
memory: 128Mi
# Command to run after the main command on exit
exitCommand: ""
## List of buckets to be created after minio install
##
buckets: []
# # Name of the bucket
# - name: bucket1
# # Policy to be set on the
# # bucket [none|download|upload|public]
# policy: none
# # Purge if bucket exists already
# purge: false
# # set versioning for
# # bucket [true|false]
# versioning: false
# # set objectlocking for
# # bucket [true|false] NOTE: versioning is enabled by default if you use locking
# objectlocking: false
# - name: bucket2
# policy: none
# purge: false
# versioning: true
# # set objectlocking for
# # bucket [true|false] NOTE: versioning is enabled by default if you use locking
# objectlocking: false
## Additional Annotations for the Kubernetes Job makeBucketJob
makeBucketJob:
securityContext:
enabled: false
runAsUser: 1000
runAsGroup: 1000
resources:
requests:
memory: 128Mi
# Command to run after the main command on exit
exitCommand: ""
## List of command to run after minio install
## NOTE: the mc command TARGET is always "myminio"
customCommands:
# - command: "admin policy attach myminio consoleAdmin --group='cn=ops,cn=groups,dc=example,dc=com'"
## Additional Annotations for the Kubernetes Job customCommandJob
customCommandJob:
securityContext:
enabled: false
runAsUser: 1000
runAsGroup: 1000
resources:
requests:
memory: 128Mi
# Command to run after the main command on exit
exitCommand: ""
## Merge jobs
postJob:
podAnnotations: {}
annotations: {}
securityContext:
enabled: false
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
nodeSelector: {}
tolerations: []
affinity: {}
## Use this field to add environment variables relevant to MinIO server. These fields will be passed on to MinIO container(s)
## when Chart is deployed
environment:
## Please refer for comprehensive list https://min.io/docs/minio/linux/reference/minio-server/minio-server.html
## MINIO_SUBNET_LICENSE: "License key obtained from https://subnet.min.io"
## MINIO_BROWSER: "off"
## The name of a secret in the same kubernetes namespace which contain secret values
## This can be useful for LDAP password, etc
## The key in the secret must be 'config.env'
##
extraSecret: ~
## OpenID Identity Management
## The following section documents environment variables for enabling external identity management using an OpenID Connect (OIDC)-compatible provider.
## See https://min.io/docs/minio/linux/operations/external-iam/configure-openid-external-identity-management.html for a tutorial on using these variables.
oidc:
enabled: false
configUrl: "https://identity-provider-url/.well-known/openid-configuration"
clientId: "minio"
clientSecret: ""
# Provide existing client secret from the Kubernetes Secret resource, existing secret will have priority over `clientSecret`
existingClientSecretName: ""
existingClientSecretKey: ""
claimName: "policy"
scopes: "openid,profile,email"
redirectUri: "https://console-endpoint-url/oauth_callback"
# Can leave empty
claimPrefix: ""
comment: ""
networkPolicy:
enabled: false
allowExternal: true
## PodDisruptionBudget settings
## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
##
podDisruptionBudget:
enabled: false
maxUnavailable: 1
## Specify the service account to use for the MinIO pods. If 'create' is set to 'false'
## and 'name' is left unspecified, the account 'default' will be used.
serviceAccount:
create: true
## The name of the service account to use. If 'create' is 'true', a service account with that name
## will be created.
name: "minio-sa"
metrics:
serviceMonitor:
enabled: false
# scrape each node/pod individually for additional metrics
includeNode: false
public: true
additionalLabels: {}
annotations: {}
# for node metrics
relabelConfigs: {}
# for cluster metrics
relabelConfigsCluster: {}
# metricRelabelings:
# - regex: (server|pod)
# action: labeldrop
namespace: ~
# Scrape interval, for example `interval: 30s`
interval: ~
# Scrape timeout, for example `scrapeTimeout: 10s`
scrapeTimeout: ~
## ETCD settings: https://github.com/minio/minio/blob/master/docs/sts/etcd.md
## Define endpoints to enable this section.
etcd:
endpoints: []
pathPrefix: ""
corednsPathPrefix: ""
clientCert: ""
clientCertKey: ""

View File

@ -1,16 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: minio-default-credentials
namespace: minio-ns
spec:
encryptedData:
rootPassword: AgASkMrZq0TO6z/oeMyzGjDmSbJLBQCYW/7IQHdRS8M8vZkioEujShT/8IE6etxTOzGLwOkmpO8PyExTgMD3atyRRdiyBs2jaT0SIOyEZUA0PjiAgmYTWx9cAXBROOYzkT7u8IvMomEjiKx/EZG2XPhxgg0/Z9tBCVkstuEYyUfRokSco4icJ/JyHAz1Gg2F9w/KMiQJProcoAV5ajRdI4Bfb9e6E5GIW2Z0WKSH4fcCyM07nW+QnwlNGZNaAgLmSZygnUeF2PN/qD5aSj5YSjK5Va7KQRIlYszmzJcFg70yeustMIcE2nD2YVFFHb0CXKqEgnF9QrieBagorwoRvqU5XtXoXiBmzgvXtDQTJJ7ODT4XAB0oVF0QOdreBuVZ34D+Fb61O5HtFvSHRN3HsGXdvkKKgywJbjL+LaQBcEgztq0xjiGj/tjf3UDZOdOeHPZYJno9gdJX5eCTTjWVnaPxMyfwl3y4YmmHKVenCS6tsBkABk2/+lEthGUBRY9CyKl/ugwDQCJviX4tf7ZvMGGuPAxqIlZuM69jU53Zgp9Vq/8+UuTlksJzwQlH/VoyZsQl+/vSekyjDyPR3g3AunjoLsQDNnBwcghMzBFgeJzB/dSiyg0dQpiMUCcwe8i/20N/ER4pIC+ag1IyBAoKMQpWWJWyPU7IQ+JbYPdCI9Q1bMhQIpBNLkJsaiaRCvwrWaK07Ml9T4i+wMat8z8v0gIbnK+2JZ7FIeA66uuhxXhMi2Coqs5L0/vk
rootUser: AgDUG6LKdvzJorlYnxlW6cnJaqrhQkumFheLwZTD3aRf8ufFqQaGM/IPyNXwhKj4YAlr17gSR9kzIhYnkrKwVq9rtzo/arm2hF4cDWwQEZlrfmkqZfAec4p81KvyYgL19fuhDOeiZQfuCHl0MvDw+j6AzAk6Q6bbNdjWElaRzNLzjRAM892OCS2pubzRPLJl2+/9Ny/lZ2ePmZHHdin7ca73aCrcO1PryrhqQxZRMM0iaNKjUGsY8WMeoNnayhJ34KbsEMDTtPkWXlZb2FGtJDafw0A0fNn19PlU7wN7HeMK05SPgp4Sjs9LFrHNBanjF/rKqInCSg2lN57bUcJcVotpXEt6rmTEySo2QhnfFAXafX6hfl/HHT9GSrya+vFLKNXVf8hxVZMRjXmNIi0N3obvHOqGIJFDiy4iWEwOdrn/yetHs8ctS+DrO4pNY1cz/6SzaBayqaPqcxIAWhCKxXtNWb6sHBpTRsXpwUFq2Hoc9idB1uTGOpmpSWl8awUUsanXv4Kb2sZkXNc3iCCwx6TBDLQ1fukISj4n30RcTFDqa++3Nxq1n1immNerX30PjMWewxlUvAm5O9kwcIplfk8iW9ii3gRlth0Qs8FGhbfrghz5xs8CIgxEhnrCRphNeIow3JT1wxGU0r/QKoQu8zgEz+TsNdCXmB8bnauYyrW6ANhZaWx/wGoB29j7mHWfvLsTIwB2Q8HeV4agwKXoGSsp
template:
metadata:
creationTimestamp: null
name: minio-default-credentials
namespace: minio-ns
---

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: navidrome-pvc-data
namespace: navidrome-ns
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 16Gi

View File

@ -1,71 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: navidrome
namespace: navidrome-ns
spec:
selector:
matchLabels:
app: navidrome
template:
metadata:
labels:
app: navidrome
spec:
nodeSelector:
kubernetes.io/hostname: gluttony
securityContext:
fsGroup: 1000
containers:
- name: navidrome
image: deluan/navidrome:latest
securityContext:
runAsUser: 1000
runAsGroup: 1000
ports:
- containerPort: 4533
env:
- name: ND_BASEURL
value: "https://music.clortox.com"
- name: ND_CACHEFOLDER
value: "/cache"
- name: ND_MUSICFOLDER
value: "/music"
- name: ND_DATAFOLDER
value: "/data"
- name: ND_SCANSCHEDULE
value: "1h"
- name: ND_LOGLEVEL
value: "info"
- name: ND_SESSIONTIMEOUT
value: "24h"
- name: ND_ENABLESHARING
value: "true"
- name: ND_UILOGINBACKGROUNDURL
value: "https://general.api.clortox.com/images/background/today"
- name: ND_UIWELCOMEMESSAGE
value: "Lol. Lmao even"
- name: ND_REVERSEPROXYUSERHEADER
value: "X-Authentik-Username"
- name: ND_REVERSEPROXYWHITELIST
value: "0.0.0.0/0"
volumeMounts:
- name: data
mountPath: "/data"
- name: music
mountPath: "/music"
readOnly: true
- name: cache
mountPath: "/cache"
volumes:
- name: data
persistentVolumeClaim:
claimName: navidrome-pvc-data
- name: music
persistentVolumeClaim:
claimName: navidrome-pvc-music
- name: cache
emptyDir: {}

View File

@ -1,21 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: navidrome-pv-music
namespace: navidrome-ns
spec:
storageClassName: local-storage
capacity:
storage: 18000Gi
accessModes:
- ReadWriteMany
hostPath:
path: "/Main/Media"
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- gluttony

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: navidrome-pvc-music
namespace: navidrome-ns
spec:
volumeName: navidrome-pv-music
storageClassName: local-storage
accessModes:
- ReadWriteMany
resources:
requests:
storage: 18000Gi

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: navidrome-services
namespace: navidrome-ns
spec:
type: LoadBalancer
ports:
- port: 80
targetPort: 4533
protocol: TCP
selector:
app: navidrome

View File

@ -1,567 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: nodered-settings
namespace: node-red-ns
data:
settings.js: |
/**
* This is the default settings file provided by Node-RED.
*
* It can contain any valid JavaScript code that will get run when Node-RED
* is started.
*
* Lines that start with // are commented out.
* Each entry should be separated from the entries above and below by a comma ','
*
* For more information about individual settings, refer to the documentation:
* https://nodered.org/docs/user-guide/runtime/configuration
*
* The settings are split into the following sections:
* - Flow File and User Directory Settings
* - Security
* - Server Settings
* - Runtime Settings
* - Editor Settings
* - Node Settings
* Test if this is working
**/
module.exports = {
/*******************************************************************************
* Flow File and User Directory Settings
* - flowFile
* - credentialSecret
* - flowFilePretty
* - userDir
* - nodesDir
******************************************************************************/
/** The file containing the flows. If not set, defaults to flows_<hostname>.json **/
flowFile: 'flows.json',
/** By default, credentials are encrypted in storage using a generated key. To
* specify your own secret, set the following property.
* If you want to disable encryption of credentials, set this property to false.
* Note: once you set this property, do not change it - doing so will prevent
* node-red from being able to decrypt your existing credentials and they will be
* lost.
*/
//credentialSecret: "a-secret-key",
/** By default, the flow JSON will be formatted over multiple lines making
* it easier to compare changes when using version control.
* To disable pretty-printing of the JSON set the following property to false.
*/
flowFilePretty: true,
/** By default, all user data is stored in a directory called `.node-red` under
* the user's home directory. To use a different location, the following
* property can be used
*/
//userDir: '/home/nol/.node-red/',
/** Node-RED scans the `nodes` directory in the userDir to find local node files.
* The following property can be used to specify an additional directory to scan.
*/
//nodesDir: '/home/nol/.node-red/nodes',
/*******************************************************************************
* Security
* - adminAuth
* - https
* - httpsRefreshInterval
* - requireHttps
* - httpNodeAuth
* - httpStaticAuth
******************************************************************************/
/** To password protect the Node-RED editor and admin API, the following
* property can be used. See https://nodered.org/docs/security.html for details.
*/
//adminAuth: {
// type: "credentials",
// users: [{
// username: "admin",
// password: "$2a$08$zZWtXTja0fB1pzD4sHCMyOCMYz2Z6dNbM6tl8sJogENOMcxWV9DN.",
// permissions: "*"
// }]
//},
/** The following property can be used to enable HTTPS
* This property can be either an object, containing both a (private) key
* and a (public) certificate, or a function that returns such an object.
* See http://nodejs.org/api/https.html#https_https_createserver_options_requestlistener
* for details of its contents.
*/
/** Option 1: static object */
//https: {
// key: require("fs").readFileSync('privkey.pem'),
// cert: require("fs").readFileSync('cert.pem')
//},
/** Option 2: function that returns the HTTP configuration object */
// https: function() {
// // This function should return the options object, or a Promise
// // that resolves to the options object
// return {
// key: require("fs").readFileSync('privkey.pem'),
// cert: require("fs").readFileSync('cert.pem')
// }
// },
/** If the `https` setting is a function, the following setting can be used
* to set how often, in hours, the function will be called. That can be used
* to refresh any certificates.
*/
//httpsRefreshInterval : 12,
/** The following property can be used to cause insecure HTTP connections to
* be redirected to HTTPS.
*/
//requireHttps: true,
/** To password protect the node-defined HTTP endpoints (httpNodeRoot),
* including node-red-dashboard, or the static content (httpStatic), the
* following properties can be used.
* The `pass` field is a bcrypt hash of the password.
* See https://nodered.org/docs/security.html#generating-the-password-hash
*/
//httpNodeAuth: {user:"user",pass:"$2a$08$zZWtXTja0fB1pzD4sHCMyOCMYz2Z6dNbM6tl8sJogENOMcxWV9DN."},
//httpStaticAuth: {user:"user",pass:"$2a$08$zZWtXTja0fB1pzD4sHCMyOCMYz2Z6dNbM6tl8sJogENOMcxWV9DN."},
/*******************************************************************************
* Server Settings
* - uiPort
* - uiHost
* - apiMaxLength
* - httpServerOptions
* - httpAdminRoot
* - httpAdminMiddleware
* - httpNodeRoot
* - httpNodeCors
* - httpNodeMiddleware
* - httpStatic
* - httpStaticRoot
******************************************************************************/
/** the tcp port that the Node-RED web server is listening on */
uiPort: process.env.PORT || 1880,
/** By default, the Node-RED UI accepts connections on all IPv4 interfaces.
* To listen on all IPv6 addresses, set uiHost to "::",
* The following property can be used to listen on a specific interface. For
* example, the following would only allow connections from the local machine.
*/
//uiHost: "127.0.0.1",
/** The maximum size of HTTP request that will be accepted by the runtime api.
* Default: 5mb
*/
//apiMaxLength: '5mb',
/** The following property can be used to pass custom options to the Express.js
* server used by Node-RED. For a full list of available options, refer
* to http://expressjs.com/en/api.html#app.settings.table
*/
//httpServerOptions: { },
/** By default, the Node-RED UI is available at http://localhost:1880/
* The following property can be used to specify a different root path.
* If set to false, this is disabled.
*/
//httpAdminRoot: '/admin',
/** The following property can be used to add a custom middleware function
* in front of all admin http routes. For example, to set custom http
* headers. It can be a single function or an array of middleware functions.
*/
// httpAdminMiddleware: function(req,res,next) {
// // Set the X-Frame-Options header to limit where the editor
// // can be embedded
// //res.set('X-Frame-Options', 'sameorigin');
// next();
// },
/** Some nodes, such as HTTP In, can be used to listen for incoming http requests.
* By default, these are served relative to '/'. The following property
* can be used to specify a different root path. If set to false, this is
* disabled.
*/
//httpNodeRoot: '/red-nodes',
/** The following property can be used to configure cross-origin resource sharing
* in the HTTP nodes.
* See https://github.com/troygoode/node-cors#configuration-options for
* details on its contents. The following is a basic permissive set of options:
*/
//httpNodeCors: {
// origin: "*",
// methods: "GET,PUT,POST,DELETE"
//},
/** If you need to set an http proxy please set an environment variable
* called http_proxy (or HTTP_PROXY) outside of Node-RED in the operating system.
* For example - http_proxy=http://myproxy.com:8080
* (Setting it here will have no effect)
* You may also specify no_proxy (or NO_PROXY) to supply a comma separated
* list of domains to not proxy, eg - no_proxy=.acme.co,.acme.co.uk
*/
/** The following property can be used to add a custom middleware function
* in front of all http in nodes. This allows custom authentication to be
* applied to all http in nodes, or any other sort of common request processing.
* It can be a single function or an array of middleware functions.
*/
//httpNodeMiddleware: function(req,res,next) {
// // Handle/reject the request, or pass it on to the http in node by calling next();
// // Optionally skip our rawBodyParser by setting this to true;
// //req.skipRawBodyParser = true;
// next();
//},
/** When httpAdminRoot is used to move the UI to a different root path, the
* following property can be used to identify a directory of static content
* that should be served at http://localhost:1880/.
* When httpStaticRoot is set differently to httpAdminRoot, there is no need
* to move httpAdminRoot
*/
//httpStatic: '/home/nol/node-red-static/', //single static source
/**
* OR multiple static sources can be created using an array of objects...
* Each object can also contain an options object for further configuration.
* See https://expressjs.com/en/api.html#express.static for available options.
*/
//httpStatic: [
// {path: '/home/nol/pics/', root: "/img/"},
// {path: '/home/nol/reports/', root: "/doc/"},
// {path: '/home/nol/videos/', root: "/vid/", options: {maxAge: '1d'}}
//],
/**
* All static routes will be appended to httpStaticRoot
* e.g. if httpStatic = "/home/nol/docs" and httpStaticRoot = "/static/"
* then "/home/nol/docs" will be served at "/static/"
* e.g. if httpStatic = [{path: '/home/nol/pics/', root: "/img/"}]
* and httpStaticRoot = "/static/"
* then "/home/nol/pics/" will be served at "/static/img/"
*/
//httpStaticRoot: '/static/',
/*******************************************************************************
* Runtime Settings
* - lang
* - runtimeState
* - diagnostics
* - logging
* - contextStorage
* - exportGlobalContextKeys
* - externalModules
******************************************************************************/
/** Uncomment the following to run node-red in your preferred language.
* Available languages include: en-US (default), ja, de, zh-CN, zh-TW, ru, ko
* Some languages are more complete than others.
*/
// lang: "de",
/** Configure diagnostics options
* - enabled: When `enabled` is `true` (or unset), diagnostics data will
* be available at http://localhost:1880/diagnostics
* - ui: When `ui` is `true` (or unset), the action `show-system-info` will
* be available to logged in users of node-red editor
*/
diagnostics: {
/** enable or disable diagnostics endpoint. Must be set to `false` to disable */
enabled: true,
/** enable or disable diagnostics display in the node-red editor. Must be set to `false` to disable */
ui: true,
},
/** Configure runtimeState options
* - enabled: When `enabled` is `true` flows runtime can be Started/Stopped
* by POSTing to available at http://localhost:1880/flows/state
* - ui: When `ui` is `true`, the action `core:start-flows` and
* `core:stop-flows` will be available to logged in users of node-red editor
* Also, the deploy menu (when set to default) will show a stop or start button
*/
runtimeState: {
/** enable or disable flows/state endpoint. Must be set to `false` to disable */
enabled: false,
/** show or hide runtime stop/start options in the node-red editor. Must be set to `false` to hide */
ui: false,
},
/** Configure the logging output */
logging: {
/** Only console logging is currently supported */
console: {
/** Level of logging to be recorded. Options are:
* fatal - only those errors which make the application unusable should be recorded
* error - record errors which are deemed fatal for a particular request + fatal errors
* warn - record problems which are non fatal + errors + fatal errors
* info - record information about the general running of the application + warn + error + fatal errors
* debug - record information which is more verbose than info + info + warn + error + fatal errors
* trace - record very detailed logging + debug + info + warn + error + fatal errors
* off - turn off all logging (doesn't affect metrics or audit)
*/
level: "info",
/** Whether or not to include metric events in the log output */
metrics: false,
/** Whether or not to include audit events in the log output */
audit: false
}
},
/** Context Storage
* The following property can be used to enable context storage. The configuration
* provided here will enable file-based context that flushes to disk every 30 seconds.
* Refer to the documentation for further options: https://nodered.org/docs/api/context/
*/
//contextStorage: {
// default: {
// module:"localfilesystem"
// },
//},
/** `global.keys()` returns a list of all properties set in global context.
* This allows them to be displayed in the Context Sidebar within the editor.
* In some circumstances it is not desirable to expose them to the editor. The
* following property can be used to hide any property set in `functionGlobalContext`
* from being list by `global.keys()`.
* By default, the property is set to false to avoid accidental exposure of
* their values. Setting this to true will cause the keys to be listed.
*/
exportGlobalContextKeys: false,
/** Configure how the runtime will handle external npm modules.
* This covers:
* - whether the editor will allow new node modules to be installed
* - whether nodes, such as the Function node are allowed to have their
* own dynamically configured dependencies.
* The allow/denyList options can be used to limit what modules the runtime
* will install/load. It can use '*' as a wildcard that matches anything.
*/
externalModules: {
// autoInstall: false, /** Whether the runtime will attempt to automatically install missing modules */
// autoInstallRetry: 30, /** Interval, in seconds, between reinstall attempts */
// palette: { /** Configuration for the Palette Manager */
// allowInstall: true, /** Enable the Palette Manager in the editor */
// allowUpdate: true, /** Allow modules to be updated in the Palette Manager */
// allowUpload: true, /** Allow module tgz files to be uploaded and installed */
// allowList: ['*'],
// denyList: [],
// allowUpdateList: ['*'],
// denyUpdateList: []
// },
// modules: { /** Configuration for node-specified modules */
// allowInstall: true,
// allowList: [],
// denyList: []
// }
},
/*******************************************************************************
* Editor Settings
* - disableEditor
* - editorTheme
******************************************************************************/
/** The following property can be used to disable the editor. The admin API
* is not affected by this option. To disable both the editor and the admin
* API, use either the httpRoot or httpAdminRoot properties
*/
//disableEditor: false,
/** Customising the editor
* See https://nodered.org/docs/user-guide/runtime/configuration#editor-themes
* for all available options.
*/
editorTheme: {
/** The following property can be used to set a custom theme for the editor.
* See https://github.com/node-red-contrib-themes/theme-collection for
* a collection of themes to chose from.
*/
//theme: "",
/** To disable the 'Welcome to Node-RED' tour that is displayed the first
* time you access the editor for each release of Node-RED, set this to false
*/
//tours: false,
palette: {
/** The following property can be used to order the categories in the editor
* palette. If a node's category is not in the list, the category will get
* added to the end of the palette.
* If not set, the following default order is used:
*/
//categories: ['subflows', 'common', 'function', 'network', 'sequence', 'parser', 'storage'],
},
projects: {
/** To enable the Projects feature, set this value to true */
enabled: false,
workflow: {
/** Set the default projects workflow mode.
* - manual - you must manually commit changes
* - auto - changes are automatically committed
* This can be overridden per-user from the 'Git config'
* section of 'User Settings' within the editor
*/
mode: "manual"
}
},
codeEditor: {
/** Select the text editor component used by the editor.
* As of Node-RED V3, this defaults to "monaco", but can be set to "ace" if desired
*/
lib: "monaco",
options: {
/** The follow options only apply if the editor is set to "monaco"
*
* theme - must match the file name of a theme in
* packages/node_modules/@node-red/editor-client/src/vendor/monaco/dist/theme
* e.g. "tomorrow-night", "upstream-sunburst", "github", "my-theme"
*/
// theme: "vs",
/** other overrides can be set e.g. fontSize, fontFamily, fontLigatures etc.
* for the full list, see https://microsoft.github.io/monaco-editor/docs.html#interfaces/editor.IStandaloneEditorConstructionOptions.html
*/
//fontSize: 14,
//fontFamily: "Cascadia Code, Fira Code, Consolas, 'Courier New', monospace",
//fontLigatures: true,
}
},
markdownEditor: {
mermaid: {
/** enable or disable mermaid diagram in markdown document
*/
enabled: true
}
},
},
/*******************************************************************************
* Node Settings
* - fileWorkingDirectory
* - functionGlobalContext
* - functionExternalModules
* - functionTimeout
* - nodeMessageBufferMaxLength
* - ui (for use with Node-RED Dashboard)
* - debugUseColors
* - debugMaxLength
* - execMaxBufferSize
* - httpRequestTimeout
* - mqttReconnectTime
* - serialReconnectTime
* - socketReconnectTime
* - socketTimeout
* - tcpMsgQueueSize
* - inboundWebSocketTimeout
* - tlsConfigDisableLocalFiles
* - webSocketNodeVerifyClient
******************************************************************************/
/** The working directory to handle relative file paths from within the File nodes
* defaults to the working directory of the Node-RED process.
*/
//fileWorkingDirectory: "",
/** Allow the Function node to load additional npm modules directly */
functionExternalModules: true,
/** Default timeout, in seconds, for the Function node. 0 means no timeout is applied */
functionTimeout: 0,
/** The following property can be used to set predefined values in Global Context.
* This allows extra node modules to be made available with in Function node.
* For example, the following:
* functionGlobalContext: { os:require('os') }
* will allow the `os` module to be accessed in a Function node using:
* global.get("os")
*/
functionGlobalContext: {
// os:require('os'),
},
/** The maximum number of messages nodes will buffer internally as part of their
* operation. This applies across a range of nodes that operate on message sequences.
* defaults to no limit. A value of 0 also means no limit is applied.
*/
//nodeMessageBufferMaxLength: 0,
/** If you installed the optional node-red-dashboard you can set it's path
* relative to httpNodeRoot
* Other optional properties include
* readOnly:{boolean},
* middleware:{function or array}, (req,res,next) - http middleware
* ioMiddleware:{function or array}, (socket,next) - socket.io middleware
*/
//ui: { path: "ui" },
/** Colourise the console output of the debug node */
//debugUseColors: true,
/** The maximum length, in characters, of any message sent to the debug sidebar tab */
debugMaxLength: 1000,
/** Maximum buffer size for the exec node. Defaults to 10Mb */
//execMaxBufferSize: 10000000,
/** Timeout in milliseconds for HTTP request connections. Defaults to 120s */
//httpRequestTimeout: 120000,
/** Retry time in milliseconds for MQTT connections */
mqttReconnectTime: 15000,
/** Retry time in milliseconds for Serial port connections */
serialReconnectTime: 15000,
/** Retry time in milliseconds for TCP socket connections */
//socketReconnectTime: 10000,
/** Timeout in milliseconds for TCP server socket connections. Defaults to no timeout */
//socketTimeout: 120000,
/** Maximum number of messages to wait in queue while attempting to connect to TCP socket
* defaults to 1000
*/
//tcpMsgQueueSize: 2000,
/** Timeout in milliseconds for inbound WebSocket connections that do not
* match any configured node. Defaults to 5000
*/
//inboundWebSocketTimeout: 5000,
/** To disable the option for using local files for storing keys and
* certificates in the TLS configuration node, set this to true.
*/
//tlsConfigDisableLocalFiles: true,
/** The following property can be used to verify WebSocket connection attempts.
* This allows, for example, the HTTP request headers to be checked to ensure
* they include valid authentication information.
*/
//webSocketNodeVerifyClient: function(info) {
// /** 'info' has three properties:
// * - origin : the value in the Origin header
// * - req : the HTTP request
// * - secure : true if req.connection.authorized or req.connection.encrypted is set
// *
// * The function should return true if the connection should be accepted, false otherwise.
// *
// * Alternatively, if this function is defined to accept a second argument, callback,
// * it can be used to verify the client asynchronously.
// * The callback takes three arguments:
// * - result : boolean, whether to accept the connection or not
// * - code : if result is false, the HTTP error status to return
// * - reason: if result is false, the HTTP reason string to return
// */
//},
}

View File

@ -1,35 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: node-red
namespace: node-red-ns
spec:
replicas: 1
selector:
matchLabels:
app: node-red
template:
metadata:
labels:
app: node-red
spec:
securityContext:
fsGroup: 1000
containers:
- name: node-red
image: nodered/node-red
ports:
- containerPort: 1880
volumeMounts:
- name: node-red-data
mountPath: /data
- name: settings-file
mountPath: /data/settings.js
subPath: settings.js
volumes:
- name: node-red-data
persistentVolumeClaim:
claimName: node-red-pvc
- name: settings-file
configMap:
name: nodered-settings

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: node-red-pvc
namespace: node-red-ns
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 1Gi

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: node-red-service
namespace: node-red-ns
spec:
selector:
app: node-red
ports:
- protocol: TCP
port: 80
targetPort: 1880
type: LoadBalancer

View File

@ -1,556 +0,0 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: gpu-operator
namespace: nvidia-system
spec:
chart:
spec:
chart: gpu-operator
sourceRef:
kind: HelmRepository
name: nvidia-operator
namespace: flux-system
interval: 15m0s
timeout: 5m
releaseName: gpu-operator
values:
# Default values for gpu-operator.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
platform:
openshift: false
nfd:
enabled: true
nodefeaturerules: false
psa:
enabled: false
cdi:
enabled: false
default: false
sandboxWorkloads:
enabled: false
defaultWorkload: "container"
daemonsets:
labels: {}
annotations: {}
priorityClassName: system-node-critical
tolerations:
- key: nvidia.com/gpu
operator: Exists
effect: NoSchedule
# configuration for controlling update strategy("OnDelete" or "RollingUpdate") of GPU Operands
# note that driver Daemonset is always set with OnDelete to avoid unintended disruptions
updateStrategy: "RollingUpdate"
# configuration for controlling rolling update of GPU Operands
rollingUpdate:
# maximum number of nodes to simultaneously apply pod updates on.
# can be specified either as number or percentage of nodes. Default 1.
maxUnavailable: "1"
validator:
repository: nvcr.io/nvidia/cloud-native
image: gpu-operator-validator
# If version is not specified, then default is to use chart.AppVersion
#version: ""
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env: []
args: []
resources: {}
plugin:
env:
- name: WITH_WORKLOAD
value: "false"
operator:
repository: nvcr.io/nvidia
image: gpu-operator
# If version is not specified, then default is to use chart.AppVersion
#version: ""
imagePullPolicy: IfNotPresent
imagePullSecrets: []
priorityClassName: system-node-critical
defaultRuntime: docker
runtimeClass: nvidia
use_ocp_driver_toolkit: false
# cleanup CRD on chart un-install
cleanupCRD: false
# upgrade CRD on chart upgrade, requires --disable-openapi-validation flag
# to be passed during helm upgrade.
upgradeCRD: false
initContainer:
image: cuda
repository: nvcr.io/nvidia
version: 12.3.2-base-ubi8
imagePullPolicy: IfNotPresent
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Equal"
value: ""
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Equal"
value: ""
effect: "NoSchedule"
annotations:
openshift.io/scc: restricted-readonly
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: "node-role.kubernetes.io/master"
operator: In
values: [""]
- weight: 1
preference:
matchExpressions:
- key: "node-role.kubernetes.io/control-plane"
operator: In
values: [""]
logging:
# Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano')
timeEncoding: epoch
# Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity
level: info
# Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn)
# Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error)
develMode: false
resources:
limits:
cpu: 500m
memory: 350Mi
requests:
cpu: 200m
memory: 100Mi
mig:
strategy: single
driver:
enabled: true
nvidiaDriverCRD:
enabled: false
deployDefaultCR: true
driverType: gpu
nodeSelector: {}
useOpenKernelModules: false
# use pre-compiled packages for NVIDIA driver installation.
# only supported for as a tech-preview feature on ubuntu22.04 kernels.
usePrecompiled: false
repository: nvcr.io/nvidia
image: driver
version: "550.54.15"
imagePullPolicy: IfNotPresent
imagePullSecrets: []
startupProbe:
initialDelaySeconds: 60
periodSeconds: 10
# nvidia-smi can take longer than 30s in some cases
# ensure enough timeout is set
timeoutSeconds: 60
failureThreshold: 120
rdma:
enabled: false
useHostMofed: false
upgradePolicy:
# global switch for automatic upgrade feature
# if set to false all other options are ignored
autoUpgrade: true
# how many nodes can be upgraded in parallel
# 0 means no limit, all nodes will be upgraded in parallel
maxParallelUpgrades: 1
# maximum number of nodes with the driver installed, that can be unavailable during
# the upgrade. Value can be an absolute number (ex: 5) or
# a percentage of total nodes at the start of upgrade (ex:
# 10%). Absolute number is calculated from percentage by rounding
# up. By default, a fixed value of 25% is used.'
maxUnavailable: 25%
# options for waiting on pod(job) completions
waitForCompletion:
timeoutSeconds: 0
podSelector: ""
# options for gpu pod deletion
gpuPodDeletion:
force: false
timeoutSeconds: 300
deleteEmptyDir: false
# options for node drain (`kubectl drain`) before the driver reload
# this is required only if default GPU pod deletions done by the operator
# are not sufficient to re-install the driver
drain:
enable: false
force: false
podSelector: ""
# It's recommended to set a timeout to avoid infinite drain in case non-fatal error keeps happening on retries
timeoutSeconds: 300
deleteEmptyDir: false
manager:
image: k8s-driver-manager
repository: nvcr.io/nvidia/cloud-native
# When choosing a different version of k8s-driver-manager, DO NOT downgrade to a version lower than v0.6.4
# to ensure k8s-driver-manager stays compatible with gpu-operator starting from v24.3.0
version: v0.6.7
imagePullPolicy: IfNotPresent
env:
- name: ENABLE_GPU_POD_EVICTION
value: "true"
- name: ENABLE_AUTO_DRAIN
value: "false"
- name: DRAIN_USE_FORCE
value: "false"
- name: DRAIN_POD_SELECTOR_LABEL
value: ""
- name: DRAIN_TIMEOUT_SECONDS
value: "0s"
- name: DRAIN_DELETE_EMPTYDIR_DATA
value: "false"
env: []
resources: {}
# Private mirror repository configuration
repoConfig:
configMapName: ""
# custom ssl key/certificate configuration
certConfig:
name: ""
# vGPU licensing configuration
licensingConfig:
configMapName: ""
nlsEnabled: true
# vGPU topology daemon configuration
virtualTopology:
config: ""
# kernel module configuration for NVIDIA driver
kernelModuleConfig:
name: ""
toolkit:
enabled: true
repository: nvcr.io/nvidia/k8s
image: container-toolkit
version: v1.15.0-rc.4-ubuntu20.04
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env:
- name: CONTAINERD_CONFIG
value: /var/lib/rancher/k3s/agent/etc/containerd/config.toml
- name: CONTAINERD_SOCKET
value: /run/k3s/containerd/containerd.sock
resources: {}
installDir: "/usr/local/nvidia"
devicePlugin:
enabled: true
repository: nvcr.io/nvidia
image: k8s-device-plugin
version: v0.15.0-rc.2-ubi8
imagePullPolicy: IfNotPresent
imagePullSecrets: []
args: []
env:
- name: PASS_DEVICE_SPECS
value: "true"
- name: FAIL_ON_INIT_ERROR
value: "true"
- name: DEVICE_LIST_STRATEGY
value: envvar
- name: DEVICE_ID_STRATEGY
value: uuid
- name: NVIDIA_VISIBLE_DEVICES
value: all
- name: NVIDIA_DRIVER_CAPABILITIES
value: all
resources: {}
# Plugin configuration
# Use "name" to either point to an existing ConfigMap or to create a new one with a list of configurations(i.e with create=true).
# Use "data" to build an integrated ConfigMap from a set of configurations as
# part of this helm chart. An example of setting "data" might be:
# config:
# name: device-plugin-config
# create: true
# data:
# default: |-
# version: v1
# flags:
# migStrategy: none
# mig-single: |-
# version: v1
# flags:
# migStrategy: single
# mig-mixed: |-
# version: v1
# flags:
# migStrategy: mixed
config:
# Create a ConfigMap (default: false)
create: false
# ConfigMap name (either exiting or to create a new one with create=true above)
name: ""
# Default config name within the ConfigMap
default: ""
# Data section for the ConfigMap to create (i.e only applies when create=true)
data: {}
# MPS related configuration for the plugin
mps:
# MPS root path on the host
root: "/run/nvidia/mps"
# standalone dcgm hostengine
dcgm:
# disabled by default to use embedded nv-hostengine by exporter
enabled: false
repository: nvcr.io/nvidia/cloud-native
image: dcgm
version: 3.3.3-1-ubuntu22.04
imagePullPolicy: IfNotPresent
hostPort: 5555
args: []
env: []
resources: {}
dcgmExporter:
enabled: true
repository: nvcr.io/nvidia/k8s
image: dcgm-exporter
version: 3.3.5-3.4.0-ubuntu22.04
imagePullPolicy: IfNotPresent
env:
- name: DCGM_EXPORTER_LISTEN
value: ":9400"
- name: DCGM_EXPORTER_KUBERNETES
value: "true"
- name: DCGM_EXPORTER_COLLECTORS
value: "/etc/dcgm-exporter/dcp-metrics-included.csv"
resources: {}
serviceMonitor:
enabled: false
interval: 15s
honorLabels: false
additionalLabels: {}
relabelings: []
# - source_labels:
# - __meta_kubernetes_pod_node_name
# regex: (.*)
# target_label: instance
# replacement: $1
# action: replace
gfd:
enabled: true
repository: nvcr.io/nvidia
image: k8s-device-plugin
version: v0.15.0-rc.2-ubi8
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env:
- name: GFD_SLEEP_INTERVAL
value: 60s
- name: GFD_FAIL_ON_INIT_ERROR
value: "true"
resources: {}
migManager:
enabled: true
repository: nvcr.io/nvidia/cloud-native
image: k8s-mig-manager
version: v0.6.0-ubuntu20.04
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env:
- name: WITH_REBOOT
value: "false"
resources: {}
config:
name: "default-mig-parted-config"
default: "all-disabled"
gpuClientsConfig:
name: ""
nodeStatusExporter:
enabled: false
repository: nvcr.io/nvidia/cloud-native
image: gpu-operator-validator
# If version is not specified, then default is to use chart.AppVersion
#version: ""
imagePullPolicy: IfNotPresent
imagePullSecrets: []
resources: {}
gds:
enabled: false
repository: nvcr.io/nvidia/cloud-native
image: nvidia-fs
version: "2.17.5"
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env: []
args: []
gdrcopy:
enabled: false
repository: nvcr.io/nvidia/cloud-native
image: gdrdrv
version: "v2.4.1"
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env: []
args: []
vgpuManager:
enabled: false
repository: ""
image: vgpu-manager
version: ""
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env: []
resources: {}
driverManager:
image: k8s-driver-manager
repository: nvcr.io/nvidia/cloud-native
# When choosing a different version of k8s-driver-manager, DO NOT downgrade to a version lower than v0.6.4
# to ensure k8s-driver-manager stays compatible with gpu-operator starting from v24.3.0
version: v0.6.7
imagePullPolicy: IfNotPresent
env:
- name: ENABLE_GPU_POD_EVICTION
value: "false"
- name: ENABLE_AUTO_DRAIN
value: "false"
vgpuDeviceManager:
enabled: true
repository: nvcr.io/nvidia/cloud-native
image: vgpu-device-manager
version: "v0.2.5"
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env: []
config:
name: ""
default: "default"
vfioManager:
enabled: true
repository: nvcr.io/nvidia
image: cuda
version: 12.3.2-base-ubi8
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env: []
resources: {}
driverManager:
image: k8s-driver-manager
repository: nvcr.io/nvidia/cloud-native
# When choosing a different version of k8s-driver-manager, DO NOT downgrade to a version lower than v0.6.4
# to ensure k8s-driver-manager stays compatible with gpu-operator starting from v24.3.0
version: v0.6.7
imagePullPolicy: IfNotPresent
env:
- name: ENABLE_GPU_POD_EVICTION
value: "false"
- name: ENABLE_AUTO_DRAIN
value: "false"
kataManager:
enabled: false
config:
artifactsDir: "/opt/nvidia-gpu-operator/artifacts/runtimeclasses"
runtimeClasses:
- name: kata-qemu-nvidia-gpu
nodeSelector: {}
artifacts:
url: nvcr.io/nvidia/cloud-native/kata-gpu-artifacts:ubuntu22.04-535.54.03
pullSecret: ""
- name: kata-qemu-nvidia-gpu-snp
nodeSelector:
"nvidia.com/cc.capable": "true"
artifacts:
url: nvcr.io/nvidia/cloud-native/kata-gpu-artifacts:ubuntu22.04-535.86.10-snp
pullSecret: ""
repository: nvcr.io/nvidia/cloud-native
image: k8s-kata-manager
version: v0.1.2
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env: []
resources: {}
sandboxDevicePlugin:
enabled: true
repository: nvcr.io/nvidia
image: kubevirt-gpu-device-plugin
version: v1.2.6
imagePullPolicy: IfNotPresent
imagePullSecrets: []
args: []
env: []
resources: {}
ccManager:
enabled: false
defaultMode: "off"
repository: nvcr.io/nvidia/cloud-native
image: k8s-cc-manager
version: v0.1.1
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env:
- name: CC_CAPABLE_DEVICE_IDS
value: "0x2339,0x2331,0x2330,0x2324,0x2322,0x233d"
resources: {}
node-feature-discovery:
enableNodeFeatureApi: true
gc:
enable: true
replicaCount: 1
serviceAccount:
name: node-feature-discovery
create: false
worker:
serviceAccount:
name: node-feature-discovery
# disable creation to avoid duplicate serviceaccount creation by master spec below
create: false
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Equal"
value: ""
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Equal"
value: ""
effect: "NoSchedule"
- key: nvidia.com/gpu
operator: Exists
effect: NoSchedule
config:
sources:
pci:
deviceClassWhitelist:
- "02"
- "0200"
- "0207"
- "0300"
- "0302"
deviceLabelFields:
- vendor
master:
serviceAccount:
name: node-feature-discovery
create: true
config:
extraLabelNs: ["nvidia.com"]
# noPublish: false
# resourceLabels: ["nvidia.com/feature-1","nvidia.com/feature-2"]
# enableTaints: false
# labelWhiteList: "nvidia.com/gpu"

View File

@ -1,36 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ollama-deployment
namespace: ollama-ns
spec:
replicas: 1
selector:
matchLabels:
app: ollama
template:
metadata:
labels:
app: ollama
spec:
runtimeClassName: nvidia
containers:
- name: ollama
image: ollama/ollama
env:
- name: OLLAMA_HOST
value: 0.0.0.0
- name: OLLAMA_MODELS
value: "/models"
ports:
- containerPort: 11434
resources:
limits:
nvidia.com/gpu: 2
volumeMounts:
- name: ollama-volume
mountPath: "/my-models"
volumes:
- name: ollama-volume
persistentVolumeClaim:
claimName: ollama-pvc

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ollama-pvc
namespace: ollama-ns
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 200Gi

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: ollama-service
namespace: ollama-ns
spec:
type: LoadBalancer
ports:
- port: 11434
targetPort: 11434
selector:
app: ollama

View File

@ -1,30 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: open-webui-deployment
namespace: open-webui-ns
spec:
replicas: 1
selector:
matchLabels:
app: open-webui
template:
metadata:
labels:
app: open-webui
spec:
containers:
- name: open-webui
image: ghcr.io/open-webui/open-webui:main
ports:
- containerPort: 8080
env:
- name: OLLAMA_BASE_URL
value: "http://ollama-service.ollama-ns.svc.cluster.local:11434" # Assuming the internal service is named 'open-webui-service'
volumeMounts:
- name: config
mountPath: /app/backend/data
volumes:
- name: config
persistentVolumeClaim:
claimName: open-webui-pvc

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: open-webui-pvc
namespace: open-webui-ns
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
storageClassName: longhorn

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: open-webui-service
namespace: open-webui-ns
spec:
type: LoadBalancer
ports:
- protocol: TCP
port: 80
targetPort: 8080
selector:
app: open-webui

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: paperless-ngx-consume-pvc
namespace: paperless-ngx-ns
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 2Gi

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: paperless-ngx-data-pvc
namespace: paperless-ngx-ns
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 10Gi

View File

@ -1,94 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: paperless-ngx
namespace: paperless-ngx-ns
spec:
replicas: 1
selector:
matchLabels:
app: paperless-ngx
template:
metadata:
labels:
app: paperless-ngx
spec:
containers:
- name: paperless-ngx
image: ghcr.io/paperless-ngx/paperless-ngx:2.3.3
env:
- name: PAPERLESS_URL
value: "https://paperless.clortox.com"
- name: PAPERLESS_TIME_ZONE
value: America/New_York
- name: PAPERLESS_ADMIN_USER
valueFrom:
secretKeyRef:
name: paperless-config
key: ADMIN_NAME
- name: PAPERLESS_ADMIN_MAIL
valueFrom:
secretKeyRef:
name: paperless-config
key: ADMIN_EMAIL
- name: PAPERLESS_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: paperless-config
key: ADMIN_PASSWORD
#- name: PAPERLESS_ENABLE_HTTP_REMOTE_USER
# value: "true"
#- name: PAPERLESS_HTTP_REMOTE_USER_HEADER_NAME
# value: "HTTP_REMOTE_USER"
#- name: PAPERLESS_LOGOUT_REDIRECT_URL
# value: ""
- name: PAPERLESS_SECRET_KEY
valueFrom:
secretKeyRef:
name: paperless-config
key: SECRET
# Secret because the URI contains the password
- name: PAPERLESS_REDIS
valueFrom:
secretKeyRef:
name: paperless-config
key: REDIS_URL
- name: PAPERLESS_DBENGINE
value: "postgresql"
- name: PAPERLESS_DBHOST
value: postgresql.postgresql-system.svc.cluster.local
- name: PAPERLESS_DBPORT
value: "5432"
- name: PAPERLESS_DBUSER
value: "paperless"
- name: PAPERLESS_DBPASS
valueFrom:
secretKeyRef:
name: paperless-config
key: POSTGRES_PASS
ports:
- containerPort: 8000
volumeMounts:
- name: paperless-consume
mountPath: "/usr/src/paperless/consume"
- name: paperless-media
mountPath: "/usr/src/paperless/media"
- name: paperless-export
mountPath: "/usr/src/paperless/export"
- name: paperless-data
mountPath: "/usr/src/paperless/data"
volumes:
- name: paperless-consume
persistentVolumeClaim:
claimName: paperless-ngx-consume-pvc
- name: paperless-media
persistentVolumeClaim:
claimName: paperless-ngx-media-pvc
- name: paperless-export
persistentVolumeClaim:
claimName: paperless-ngx-export-pvc
- name: paperless-data
persistentVolumeClaim:
claimName: paperless-ngx-data-pvc

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: paperless-ngx-export-pvc
namespace: paperless-ngx-ns
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 1Gi

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: paperless-ngx-media-pvc
namespace: paperless-ngx-ns
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 30Gi

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: paperless-ngx-service
namespace: paperless-ngx-ns
spec:
selector:
app: paperless-ngx
type: LoadBalancer
ports:
- protocol: TCP
port: 80
targetPort: 8000

View File

@ -1,20 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: paperless-config
namespace: paperless-ngx-ns
spec:
encryptedData:
ADMIN_EMAIL: AgDX1IzNrK1moCA/DJMU9TwKdn+vQrfK9F3lHMwisFrruPRh9yLkv8X3AWNzAL4KG+sINa4xbUaGn73Di2nto3MrOkBgE8sMDJ54sYGlmQtWpn0zNWpLcmOzpJVxtiTJXyCGMCV8S87z53w+HAsYMv7ReuKghUa9ZlLjCi243chXDV1hpZnwh5uczKsTFY+I+c5zzYh6GhykJPDlyGoa80YsWU8VwO7jIz75eUWiGlCfADtUCA1T9yfrydBCrroGVyYBxiKhcBuqyeHzKGGF9mfuIXeMhWl1Idpr4Ohp2DJLS2Nm16O0S3jh5SiWpQR6fiEHa7q9PKysnp4PGF577iSGEQFQW40TdnJ7dKUP8pgOMWBVI1C1CD+8DktoAx1KBFopfxvvlBnSf/+asPDcDlIbGc/CSkVeJVnvwhZsifXBhLeUTelAtpUnEZ3XCKEEqIiw/ICnVFG3oiuZN9bAM1FwEsRW0Vgh5VOel6q2Wi4DqF0mNyiaaKEgIvAmUUBhkpFhgvmsrDPV8UhlcCLBcxw2vfIget6jf0cK6UDIXs9/ooIZoxJ74/52DeI0FWY9Q/7iwZmza0ADsH15+hO6YrQukcuWokQNoUUmDrpUxOZuyw2kK3OXMo5Lq+gYc+ykV77OK0t5K/tDo2nv2WQ0W+Lw421mTA2S2TT2pi1SaXMDomXHTr9HRUDLJ2tTpIX+mOO2hFwIBgqVlBGZYefMqeCiLA==
ADMIN_NAME: AgDEdbEOWyZ4DMGSRFn2BduMEHbNYvXv7/2sjvTe/t87CEgj4YluZVlFAr0KQAvED7HJBl3jDZHbrkqYate/EzqkeV1lHVIaHCyLDIHrmWhE9mPRZ5cLTbPMDlxpFa4H6iq796u52KeCgLuM4cVxZTArrFsfILhgpaMmIYCStt+3DfBU+PP04PEmYytY1bsbrVL4wZNIFwJoBcJVxiCowAeCQ4I8SkJuZkKo8bCBVTTtQ5vtVXjY4qxqJ0XYwpxbyatCYAHN005EOjh/ZQUUskGDtbrbIeZkIj1MBmmb9ZlIQOfjO9+qxKkL4TqrUiWHmWaqokCG5wxo5yxZxari8B4GR2tHbF2aWgPfVlJi3+o4DzlJbGXYvHXXC4NmY3sPze4cuiSgDWiq0vbgw8V5RXPjcqjdyEUHoZAtV96XwsU0tIfSKj8uhhpcIiFEtljA3ie61nr3q3bcZB4a9KgGBlFv9O+mBzqJeq2DywIVhYnbyG8vqzGHX8kJWwXzPCH5G5BRU9GuA75FUaaHd3jxwHlP08qfntuQQZptqIh7Q75xaPaJvnTyJBHTZX1rDiOF4NrJ/MKZLE+LmI2gQ2Dso/XOlE9iGaN8xKZyTtVAS+U5J4b2rKqBCSi+d8cvpFUryW8qdCp4043+aYIznyQQP/8pwzqEfFObCnLKqjCo8lKvQSAtxx3uEvmLqrqSumK6ws/En+Py5g==
ADMIN_PASSWORD: AgBW40SS1is1/jahPEZQT9k91Sd4ahlPEW+ZM++VeIvvhOPgeB0YjchYL9TbB4IbqA2tXyP1vF6mzWTK4NjGUm/o4T+MIfEy+cwpgsKfgC5NqcAe4YBVAWeHPBIlJlc+lQYOoZbggjmEmnynQaMK7y/zwJ3K0OZiBugy4jwg2wlRd9EkEmUgt++TH6k8+4TjNX4eOJy26ucqUqTEbC9a4tMODgqasVB+zTXx3bv5Ojkpa0JyR4keCUtHfTeAy769nEgcQHCeJXW9+No95wgVCrMwgLlGOgWiuqQrBP50XVVhtaI2rDzReYRrW3ajRqVafTcrGCnKlVpOb7u34N7ikSpn0Hrn2UzhwKB+za8Bpkw8vs74bBy0IpRL352rx27hEc1mozEIr3ISxy6JKnWKawHYNgR6Hj5mg1f5Q2LyucAJDVbVPwTx/+uGquqfUrKCU2Gy3Xevp4hFMmFh5KXuewqTOo5m6lcUcuXeEdw/5Sc7RxpKLPaADZ9cvJjNqudDjI3JDt39TfqR82kOzG0o2kiSTBdkhr7ZkodOxeV+/d3KCNkFKUBQ0BJZsTwbL8FhUa0xVrSRCqW+eeKZ4uvxtVJByAkKSrGQSUHyVqQ50AQqArHI2zHL+iYsDhl3exOoCiFE0HrOaOYcw/PTiErVGL4PFnVip/R8k4ML9vtYlveKXSSHV/GgwRRRA6JZbtGjIH/3gupr0Uj+sqvTSlpVoNDrXEL0hLW+6lvtYcGbpvEcvQ==
POSTGRES_PASS: AgDWdgpJABCcxVvjoHzNTcwQDkCnLhxUFql59/pmhPOuGnpOUfU7Aebwt7ZoI5tNFlb2dJT1JVr2ghcbvy9KUsvtl7gh8ht6n+uiWwpb3cAqyHQe60bvHMVbJyVu1Z4Lam1VEhGhtCoKrdY4NL29++DHk4XFaPPUChXWXhNBW+h0+unlAGd4zB0paiRtP/J5IbAyu37ipKYs9fFWjSfDKFow1e/qbV/RHcGbpsE0Ea1KVepw05Y0w+9/8E07vJlhzqZIrNioq2L8wYe8/Caov6wx2gZo2+PqphDjEedDQgl07GndOWRYwmAlJWEzBSkKQXaChNVVV1aWN44aCya+29wmp5v5IZhEfTlqtxTF1yr5XJFNfvuNHFSED3oS2FmC51GX7ONtNBS+Psy662M1E1dO36oo08mLD0DyvrC3FhVpS0tPKy3u7CzMLxCqSCm10aK+t5st/n8o3rPLbqJT/0llCZ/cwZTuIyRwQ0yMnM6TYDSQhncP7cExqwmLjevAMyxJtJmQz9CECuhx6a2CHfaITVaEEFSCBZGpu44s3K8iHAEvsR9I+Z7BPr8E+p8gLrBmp3+diP5BKksliw4mQ9125DsEZIgmsTYoEhf0PZhWLeEfOPfTh/2SlA4TYK/5uiVgIyb+iwMcWND81ZtUdwSVQGl+CVShPffcRAxPVm9n3pjytK1Nlni6AONlCjxlcWYktdJvpUY6PAQRBX4U9PfFQdB0Kt+ire5YAwuPBq2nUw==
REDIS_URL: AgDRAas6f87ZL022J/pMvbi+V4ZMVyppl/FtZnReptBVfbLUJD7uSWG5s24paZ59nrDyp3PCVqfcSa+5+2dKRp4IN7oJnjQ1ack5gZMB2M94J1uUjJjaHlNoFb9VLbwasxtSFb/uX0vkb86GkCj3RrKeE08OVMimjR9557hJ99ZDz/LzwJRAlVeB93tB1ztfeSDFDOJhQydMwai4rgjCoQ0Z/C/NTDnRjmCro6PGMgQNALtUwtWypkoRu//SMwgFzBSF0/SbgL2G7TjDWDOuGHGsaWuVn3hQlYTT2Gb18TzB3O+6NGuWnJuCl2gAKDp/ZZF4QR1PAPV2LVxzqpBQwn7K2+dYw9gQPC6Sw6iRgC0ChnjCcr+2ryzLpAu8I9H5BJ2JUeh9dcbiUcQTJYvpa4AN84Noi7leIzzGBivYCC6Q3Z8Cd3a+I7fa4F8ZE5F4Qw1U7CNhVJQ61UUliRhlvJIXTXRfJ9/4bAoIKQfVu5UWpRZvPxdN3g64TsKFAMdE+Px0bCMpxlRxI7RLAa1VYTfVZL3g9mAIIZlL/u2mot6gmH37IsbnTkdL6FC0hWRKi8k0y20KUB66EAA9blentdhc716tU9CuI7Ouiik0/bcL2VnoQwDQCCPlr2yNSeIfLKxhdpQTnGKacUFNsQjAfF3x+E05gz7dDct8lNpjI0FVFq0M9BAV0djT3ECU8kqViy717ZrCi8J6m64RrR26tErzMouFjgXpMqw+jNttnOsz32ycXUa1DAK9Z8h7FK16L1nV+1Dbt30AcA54wEJFb4nsCeeUl0b04N+OlwNnveUI4xAmo9FmB1txTYw=
SECRET: AgB7kULin1dJwJHRnJUXX1YZHqx6Fc8JF7Rd3NWRYI5sqpRLHLVHZr19kf2p3EPFuoCBK4jcOo5+sg9L3IQgefSOtvTqjazl9Zcuv4w0LJKyX5+PJaqe2FFINoM+We+DispCNRWgnWFDokA2RH6J9htKpQvXscAlRhsEdERaRmrn24g69aryDeJ4EVXh0yOQK73kzGkurnod/caybunZUmtL3bQkMbWW5OTphxoyOMLolPV5JT4GooR0yGPgF/0tLvcph2Ys8zi2PjWVcqvdNGjMrrcT/HrA04ucFFLbhipl7e4MK5CfhzEfA435x6ujJySbIpYdgw0gTWti/WXXfr2SrZordj2bXVvZ237BpKdvyqOnaNV4QVC0yUHKfhSfEcPBJL3Q79w3HRpTmmd+TKpjfFgYGU0KPDKHmeIhiRng5OEDYiOMUrfnBBcXn+fILV0HeuQa8wHNRjIQ++GOfZ5dwbO1BtkInawNIOCzdkLa4C8c/rcLF46dzvhcHDC5t6jlA4fAGkZhlVRwJDmHwPubEuk3vKhXCbDQjlV1vKp7qnscEn4zXGUDVbGundg25ddDH3UCuMazEs7hvOut3WXh/vBXlMVG3k4hL0qubJQM0AhEs8X2vAo0X0tKEkKNqh2D1oWfjFWUT96LBlyfNa2FRlDW6rR1rfr/FKelFonpEY4afy5BIIyrj/RHWdwlliL0DO6MMTEWJxuOyOi/HXh0FAgN7mDQk4n0kttzdVdue/s6aKGltlMRUvH/fugOZNQya5uPrTj2QxB6qmU9dHalbfApdGIMHhdRFrjW/hTjQFLNO+RvMu1wgpB0/s5qDqSD1DqAkSwjkYQv3VjUbyj6Jc9RV192X4DsAJkPJxJKQQ==
template:
metadata:
creationTimestamp: null
name: paperless-config
namespace: paperless-ngx-ns
---

View File

@ -1,377 +0,0 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: pgadmin
namespace: postgresql-system
spec:
chart:
spec:
chart: pgadmin4
sourceRef:
kind: HelmRepository
name: runix
namespace: flux-system
interval: 15m0s
timeout: 5m
releaseName: pgadmin
values:
# Default values for pgAdmin4.
replicaCount: 1
## pgAdmin4 container image
##
image:
registry: docker.io
repository: dpage/pgadmin4
# Overrides the image tag whose default is the chart appVersion.
tag: ""
pullPolicy: IfNotPresent
## Deployment annotations
annotations: {}
## priorityClassName
priorityClassName: ""
## Deployment entrypoint override
## Useful when there's a requirement to modify container's default:
## https://www.vaultproject.io/docs/platform/k8s/injector/examples#environment-variable-example
## ref: https://github.com/postgres/pgadmin4/blob/master/Dockerfile#L206
# command: "['/bin/sh', '-c', 'source /vault/secrets/config && <entrypoint script>']"
service:
type: LoadBalancer
clusterIP: ""
loadBalancerIP: ""
port: 80
targetPort: 80
# targetPort: 4181 To be used with a proxy extraContainer
portName: http
annotations: {}
## Special annotations at the service level, e.g
## this will set vnet internal IP's rather than public ip's
## service.beta.kubernetes.io/azure-load-balancer-internal: "true"
## Specify the nodePort value for the service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Pod Service Account
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
##
serviceAccount:
# Specifies whether a service account should be created
create: false
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# Opt out of API credential automounting.
# If you don't want the kubelet to automatically mount a ServiceAccount's API credentials,
# you can opt out of the default behavior
automountServiceAccountToken: false
## Strategy used to replace old Pods by new ones
## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
##
strategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 0
# maxUnavailable: 1
## Server definitions will be loaded at launch time. This allows connection
## information to be pre-loaded into the instance of pgAdmin4 in the container.
## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/import_export_servers.html
##
serverDefinitions:
## If true, server definitions will be created
##
enabled: false
## The resource type to use for deploying server definitions.
## Can either be ConfigMap or Secret
resourceType: ConfigMap
servers:
# firstServer:
# Name: "Minimally Defined Server"
# Group: "Servers"
# Port: 5432
# Username: "postgres"
# Host: "localhost"
# SSLMode: "prefer"
# MaintenanceDB: "postgres"
networkPolicy:
enabled: true
## Ingress
## Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# ingressClassName: ""
hosts:
- host: chart-example.local
paths:
- path: /
pathType: Prefix
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
# Additional config maps to be mounted inside a container
# Can be used to map config maps for sidecar as well
extraConfigmapMounts: []
# - name: certs-configmap
# mountPath: /etc/ssl/certs
# subPath: ca-certificates.crt # (optional)
# configMap: certs-configmap
# readOnly: true
extraSecretMounts: []
# - name: pgpassfile
# secret: pgpassfile
# subPath: pgpassfile
# mountPath: "/var/lib/pgadmin/storage/pgadmin/file.pgpass"
# readOnly: true
## Additional volumes to be mounted inside a container
##
extraVolumeMounts: []
## Specify additional containers in extraContainers.
## For example, to add an authentication proxy to a pgadmin4 pod.
extraContainers: |
# - name: proxy
# image: quay.io/gambol99/keycloak-proxy:latest
# args:
# - -provider=github
# - -client-id=
# - -client-secret=
# - -github-org=<ORG_NAME>
# - -email-domain=*
# - -cookie-secret=
# - -http-address=http://0.0.0.0:4181
# - -upstream-url=http://127.0.0.1:3000
# ports:
# - name: proxy-web
# containerPort: 4181
## @param existingSecret Name of existing secret to use for default pgadmin credentials. `env.password` will be ignored and picked up from this secret.
##
existingSecret: ""
## @param secretKeys.pgadminPasswordKey Name of key in existing secret to use for default pgadmin credentials. Only used when `existingSecret` is set.
##
secretKeys:
pgadminPasswordKey: password
## pgAdmin4 startup configuration
## Values in here get injected as environment variables
## Needed chart reinstall for apply changes
env:
# can be email or nickname
email: tyler@clortox.com
password: defaultpassword
# pgpassfile: /var/lib/pgadmin/storage/pgadmin/file.pgpass
# set context path for application (e.g. /pgadmin4/*)
# contextPath: /pgadmin4
## If True, allows pgAdmin4 to create session cookies based on IP address
## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html
##
enhanced_cookie_protection: "False"
## Add custom environment variables that will be injected to deployment
## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/container_deployment.html
##
variables: []
# - name: PGADMIN_LISTEN_ADDRESS
# value: "0.0.0.0"
# - name: PGADMIN_LISTEN_PORT
# value: "8080"
## Additional environment variables from ConfigMaps
envVarsFromConfigMaps: []
# - array-of
# - config-map-names
## Additional environment variables from Secrets
envVarsFromSecrets: []
# - array-of
# - secret-names
persistentVolume:
## If true, pgAdmin4 will create/use a Persistent Volume Claim
## If false, use emptyDir
##
enabled: true
## pgAdmin4 Persistent Volume Claim annotations
##
annotations: {}
## pgAdmin4 Persistent Volume access modes
## Must match those of existing PV or dynamic provisioner
## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
accessModes:
- ReadWriteOnce
## pgAdmin4 Persistent Volume Size
##
size: 2Gi
storageClass: "longhorn"
## pgAdmin4 Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
# existingClaim: ""
## Additional volumes to be added to the deployment
##
extraVolumes: []
## Security context to be added to pgAdmin4 pods
##
securityContext:
runAsUser: 5050
runAsGroup: 5050
fsGroup: 5050
containerSecurityContext:
enabled: false
allowPrivilegeEscalation: false
## pgAdmin4 readiness and liveness probe initial delay and timeout
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
##
livenessProbe:
initialDelaySeconds: 30
periodSeconds: 60
timeoutSeconds: 15
successThreshold: 1
failureThreshold: 3
readinessProbe:
initialDelaySeconds: 30
periodSeconds: 60
timeoutSeconds: 15
successThreshold: 1
failureThreshold: 3
## Required to be enabled pre pgAdmin4 4.16 release, to set the ACL on /var/lib/pgadmin.
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
##
VolumePermissions:
## If true, enables an InitContainer to set permissions on /var/lib/pgadmin.
##
enabled: false
## @param extraDeploy list of extra manifests to deploy
##
extraDeploy: []
## Additional InitContainers to initialize the pod
##
extraInitContainers: |
# - name: add-folder-for-pgpass
# image: "dpage/pgadmin4:latest"
# command: ["/bin/mkdir", "-p", "/var/lib/pgadmin/storage/pgadmin"]
# volumeMounts:
# - name: pgadmin-data
# mountPath: /var/lib/pgadmin
# securityContext:
# runAsUser: 5050
containerPorts:
http: 80
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Horizontal Pod Autoscaling
## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
#
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
## Node labels for pgAdmin4 pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Node tolerations for server scheduling to nodes with taints
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
##
tolerations: []
## Pod affinity
##
affinity: {}
## Pod annotations
##
podAnnotations: {}
## Pod labels
##
podLabels: {}
# key1: value1
# key2: value2
# -- The name of the Namespace to deploy
# If not set, `.Release.Namespace` is used
namespace: null
init:
## Init container resources
##
resources: {}
## Define values for chart tests
test:
## Container image for test-connection.yaml
image:
registry: docker.io
repository: busybox
tag: latest
## Resources request/limit for test-connection Pod
resources: {}
# limits:
# cpu: 50m
# memory: 32Mi
# requests:
# cpu: 25m
# memory: 16Mi
## Security context for test-connection Pod
securityContext:
runAsUser: 5051
runAsGroup: 5051
fsGroup: 5051

View File

@ -1,95 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: plex-bundle
namespace: plex-ns
annotations:
force-recreate: true
spec:
replicas: 1
selector:
matchLabels:
app: plex
template:
metadata:
labels:
app: plex
spec:
nodeSelector:
kubernetes.io/hostname: gluttony
containers:
- name: plex
image: plexinc/pms-docker:public
env:
- name: TZ
value: EST
- name: PLEX_UID
value: "1000"
- name: PLEX_GID
value: "1000"
ports:
- containerPort: 32400
- containerPort: 8234
- containerPort: 32469
- containerPort: 1900
- containerPort: 32410
- containerPort: 32412
- containerPort: 32413
- containerPort: 32414
volumeMounts:
- name: plex-config
mountPath: /config
- name: plex-media
mountPath: /data
# Sidecar providing access to upload/view/download raw media files
- name: filebrowswer
image: git.clortox.com/infrastructure/filebrowser:v1.0.1
env:
- name: ADMIN_PASS
valueFrom:
secretKeyRef:
name: filebrowser-secret
key: ADMIN-PASS
- name: DEFAULT_USERNAME
value: "default"
- name: DEFAULT_PASSWORD
valueFrom:
secretKeyRef:
name: filebrowser-secret
key: DEFAULT-PASS
- name: BRANDING_NAME
value: "Media Storage"
- name: AUTH_METHOD
value: "proxy"
- name: AUTH_HEADER
value: "X-Auth-User"
- name: PERM_ADMIN
value: "false"
- name: PERM_EXECUTE
value: "false"
- name: PERM_CREATE
value: "true"
- name: PERM_RENAME
value: "true"
- name: PERM_MODIFY
value: "true"
- name: PERM_DELETE
value: "false"
- name: PERM_SHARE
value: "true"
- name: PERM_DOWNLOAD
value: "true"
volumeMounts:
- name: plex-media
mountPath: /srv
ports:
- containerPort: 80
volumes:
- name: plex-config
persistentVolumeClaim:
claimName: plex-pvc-config
- name: plex-media
persistentVolumeClaim:
claimName: plex-pvc-media

View File

@ -1,21 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: plex-pv-config
namespace: plex-ns
spec:
storageClassName: local-storage
capacity:
storage: 200Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/Main/Container-Configs/Plex/"
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- gluttony

View File

@ -1,20 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: plex-pv-media
spec:
storageClassName: local-storage
capacity:
storage: 18000Gi
accessModes:
- ReadWriteMany
hostPath:
path: "/Main/Media"
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- gluttony

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: plex-pvc-config
namespace: plex-ns
spec:
volumeName: plex-pv-config
storageClassName: local-storage
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 200Gi

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: plex-pvc-media
namespace: plex-ns
spec:
volumeName: plex-pv-media
storageClassName: local-storage
accessModes:
- ReadWriteMany
resources:
requests:
storage: 18000Gi

View File

@ -1,18 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: plex-service
namespace: plex-ns
spec:
selector:
app: plex
type: LoadBalancer
ports:
- name: plex
protocol: TCP
port: 32400
targetPort: 32400
- name: filebrowswer
protocol: TCP
port: 80
targetPort: 80

View File

@ -1,15 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: filebrowser-secret
namespace: plex-ns
spec:
encryptedData:
ADMIN-PASS: AgAj7jrppKzPFfnYDV+FEuXQx9lrkppWFElR3DjtR017tpBQs6/KjZYU1TX81TkNh8cONg4mGB72zvk60Yft5b5TSgZWuKA4qTXYEoFusyMR3wyOU/Ft7ZMk7IAr/7Hi9dDAh3CkmrQ2lQ3C5hRlfTljaSxqC9abmEZIeSo7OhrkX8YIvFhanBMbPenfkulSsK38dp3PfIC8kntRV1u37Z7CxovVu+Kn7IoRC4sKa3gcdJ5lIA/Aq3rln8atmzZcPGPzjIAPY4P72mjPaeIvzqzLsNMcecIIr20MyLTOG/eI8WrM+WC+dgyvj/Pjq/hzTW1QD3z4jZW224o4ghKiPr6mW0BbN6KBBqv/JFtpBqiYgGi/ADBVxTG7YUA+FcT7YA6nuxlqg+TMpwqP8ZJBmghosBeqmBndjKUjpexoihmy+XTDbEr7e8RDpOdL9jS9hGPt47cmFITSFSEQIGM6kOtdYWcMw6+aKkTt5Ul4bUfV9TXultGyNYITibATXWNqbRfZDjYVrWOIfoVJOe19N3WZg9R4UeKzow3RkoJvn3MUTYOOrzr9Csx+VxUMeGxLPFftedUIy8zzXaqL/0OFogQZ2P+mesiYxc67Z4VS2u0+iCLkJdUDYnM+2q6TRQMI2nP40ko62xDuSE2BDcufqsKfHoddswlYDyelLVqJKee+P3sUoxcblYlv6kqz1GbVhBKQrHzFphx72KG219N9zwjOI6w8V6NXHUEFblQ3gt9RPA==
DEFAULT-PASS: AgDXxxyMBUb7Q0J8LvxPXNEAz75c1JcS7xL3rN7E2Wg7MLsZHj2/0hRf9jaCCyyVnr/Pabbdmjb0nth4Dlm50tLWH+rU7KtLPwHB0pMVi9zSxKBdyvOJurVdY/nlbSuanxSL37rBOrwRQRv6t8w/IIs4R9GEaFjxKoIJTuV8JRu3r62FiL/3o2zyok9UYcLxw2h9H7B9yn9wXn5CAFk0M4jNRUns3oU7d0/hPbfwC216vU0ZIdga8yYlZw3zvVz54mX2XECnHWZT8gPK1w6v8AEca/kDFuVFBi63OdXFgjBHCa/uSs5wifzNPBzcRA+A8s/JgoSHEeMXTmBsMOlihSSz0kSGHS/rUdu6nZamVZfzCWOHskb3RVjs23yNJsSEDlYR/AMeAjnkMDvMe5b/X/eV1AOYkAQ/pACrSk2aG+4kLmLoLYXaeDVf8pTHj4yOvdffWk39ClCqIOyWF2+//N02lDepVwis498cL+7I4kEVXqy9FugUCsbtzxVXX6OHym4KpBZpAmrMqH83rC6CtU4orF6gjmTKCe1Ufq5GmsQgFFZTZYTexnbeTKXz6yw+RbHLTGdsaJnMaAQx5uB3khO9Pkge7/HLDmXEx+mtaaTvk7AF8PWjFJSQZEWxVSCr6O1Zd4LKsg0EP6Mrk+s+8OOfGb42e3wfJ6gY7KlTBBu8KmKnHRQl9uoMVO7y5PWwl+B3Wam5j78ggV4L9UmiEw6gYvrc8rmQWZqQbuw7pClQ3Q==
template:
metadata:
creationTimestamp: null
name: filebrowser-secret
namespace: plex-ns

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: postgresql-default-credentials
namespace: postgresql-system
spec:
encryptedData:
password: AgAN2drH19WiBU8KYZyN8N0T1o8Sh9ti1M5kroU+xDpDD+pOLlZZEw63qcUeeK2paiTm7E3hVO/EnCNyGUBvrDKQzoMNsImbsTMJMVHldiTZedZV1IQxQXIYELgUtk93I2WoOiNvpfL+ro2vomjqPExlVeI1tuqPVdL1+xZYfKfFk+pPL3kLpRuO5HDmwcjy12yYd0E3RqU0g58O7UCCCdQRMOtU8/Z/MPM0I3ZGxG5DQCN3FEra8g1wacvsOplJVGYFzIBS2T7tPyW6I5zW9mFPDozMwqINukuoDC4uSUGSP2Ka2al3VyZiZnXySV3LJ38yj9TpZpTCKY2pgDeMue1hl50xMoCGhBXGzN3lCx6M4/us42a+oss/dn8oXFLAbOVaI2B7bpWHSz8fykdpogpqGgsa23gCuR2V8crZ7xVuACXqNDyunoHLgph8McFDsBXWNcyTg7ocILIjVKFLL4LDhtFQgHZcZXiTe6kMJNdKMxnH/0z/A00JO7dhU2uub31Oa0LwiE/rWO31E39tDZj4o0bRez1jsneuvbMVvwYyyr0OmOfdznv9qvGXbFSgGYCNUuTAPzFRMU4NkIup4RL3a0s2Fg28X79JIaWGjpuXgfyUKiBqUe7f6FAKkiEFeqYCJoccVQpbiYOODjyV5+89tfopmJaj7HZ1t3HfrDeGk/Vj6w4C6e2avCl+qLWqz2nFyM526ymdfVaWV5B945pFTTFrlxh8lRH7Ej3qGJEz1Q==
postgres-password: AgAwdjZR9WDv74oup8dhkzKxYYrMOd40O9S09t8pQspuw/xiO/CaeNFkggWrVVbNaFI1nnQrd/3JFTu6/1mwinr13MqAKKmile/rXSSKnXo+f90PSEFlsanDpfMSuhZKGF5gDHp5HwWIGDl8P6uBC50/Z7u/+muPcdgnCgg7rVMo5EiipgZgQOJXuAxHN8a8w1HMQoVd4PD1SewTHfbwxKsZoBYgy+RL5vS4Gd8d4UbqkSFk8h+uZHdsJALrZ9PCsUDMInT06Ll1YgmcSigtFR0bM46kfbGr1tvXKmv2lYMBjn8gcOS3rRxKmhqT/HUxaepg6TDjoG/Hw0oNtxGHRmwwnzuDBtPtCJOCb3LEodIAXsz7U3hUrvTI0/BMsXBP/wZFB7mZ6mvy4tfz2SkHEIoqu+um2I6sC9OHPNFIQPSq59PX2t2G9RH4aTJVWKFcLv7fZ7+w+ot0D4300z0fRbKYCgUowKUtkd+H3eYmu1AzMpFkEUh+9NahHNeXCf+YsAHZb0vm4mz39UWTVuRzdwGFFG193goOFI6A2t9n2E18f/UQnhKewi9z552THTqsFO3VE/Kq2C9q+AA3BaGVCSlNw7eRXRr+lSaNiuTGgEZeQZubBLriC7rTzrfJjP2ik2vayCXb1dvGPrSGst4oo9IYnb548uFjgpd7ZQz0WGCufBhM7GNv7XaC+ZQEsCHPwJLaiD6irl8d1IKn+7g9vnOPUYRhcTiPtuEuWFneQ9tNMw==
username: AgAn4TsaveRieevxEf80zFJeKLkQoLMf+o5upoeq5YdXRnyV00xk1yL0QBYdvNdt3FnEmZR5R0oiJKimZmQqHOHH9++G1cqACsmNmEbjU+BBUNwVOhXZAkU1xHvpAACNKaqiqlhR5uGYx+rP6GbsW11UrwTu4oeYBqyGXtO5i9FaFaIbK32UPJ4e5lsml0l01reWWwZI6DH9L2O6E7wif5Pxw6wEcQphfk2YlTddXGRZA0dI1xFSSuvjxRRaASpfJqU/ztqdzF/MVCKnheZneuVYyw7w7Suv87RIx4ddrJKqDz0Fla9LWAC1xJMqGxWPE0Bgd9jWlRK4Cy6DogZ5ZoJv+pZm8zXy3N60SSM2ZM7TaP+FOz8FhgoDYXSn3lKPlLY2mlOBFJ8PYL1dcrGqDkHKiklAUelbID1Hiw7CdcavohFTi9CsaZXWpyeAEiMmiMpXY2+nnwppqv0+Oc82DvU4305q8FzFmi9N2olR4vnvzjBclHsXsSesJ5rKjNHk3aApTT2MIbOexDXTyIXIkrLRFK1/KbUx86qjlPOycZ2YlqfvbakEajFVV3TyR5Bb7ZHf5yQOOyzkmJj6Z3xaNmfvskqHyhImQHKRHEKjyvoKqWMrAQZEUm4LwLTdSRMo3o9p00Xb2YFFCOOhJF9hoxf65TvXkWNJyYMNjBW1a1H8gnrjT0zEyV4Veh75YRDmzgtl2CR/2w==
template:
metadata:
creationTimestamp: null
name: postgresql-default-credentials
namespace: postgresql-system
---

View File

@ -1,36 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus
namespace: prometheus-ns
labels:
app: prometheus
spec:
replicas: 1
selector:
matchLabels:
app: prometheus
template:
metadata:
labels:
app: prometheus
spec:
initContainers:
- name: prometheus-data-permission-fix
image: busybox
command: ["/bin/chmod","-R","777", "/prometheus"]
volumeMounts:
- name: prometheus-storage
mountPath: /prometheus
containers:
- name: prometheus
image: prom/prometheus:v2.27.1
ports:
- containerPort: 9090
volumeMounts:
- name: prometheus-storage
mountPath: /prometheus
volumes:
- name: prometheus-storage
persistentVolumeClaim:
claimName: prometheus-pvc

View File

@ -1,14 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: prometheus-pvc
namespace: prometheus-ns
labels:
app: prometheus
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

View File

@ -1,16 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: prometheus
namespace: prometheus-ns
labels:
app: prometheus
spec:
type: LoadBalancer
ports:
- port: 9090
targetPort: 9090
protocol: TCP
name: http
selector:
app: prometheus

View File

@ -1,47 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: redisinsight #deployment name
namespace: redis-system
labels:
app: redisinsight #deployment label
spec:
replicas: 1 #a single replica pod
strategy:
type: Recreate
selector:
matchLabels:
app: redisinsight #which pods is the deployment managing, as defined by the pod template
template: #pod template
metadata:
labels:
app: redisinsight #label for pod/s
spec:
volumes:
- name: db
persistentVolumeClaim:
claimName: redisinsight-pvc
initContainers:
- name: init
image: busybox
command:
- /bin/sh
- '-c'
- |
chown -R 1001 /db
resources: {}
volumeMounts:
- name: db
mountPath: /db
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
containers:
- name: redisinsight #Container name (DNS_LABEL, unique)
image: redislabs/redisinsight:latest #repo/image
imagePullPolicy: IfNotPresent #Always pull image
volumeMounts:
- name: db #Pod volumes to mount into the container's filesystem. Cannot be updated.
mountPath: /db
ports:
- containerPort: 8001 #exposed container port and protocol
protocol: TCP

View File

@ -1,14 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: redisinsight-pvc
namespace: redis-system
labels:
app: redisinsight
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: redisinsight-service # name should not be 'redisinsight'
namespace: redis-system
spec:
type: LoadBalancer
ports:
- port: 80
targetPort: 8001
selector:
app: redisinsight

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: redis
namespace: redis-system

View File

@ -1,15 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: redis-key
namespace: redis-system
spec:
encryptedData:
password: AgAQ9PHv4fJez1wAJtcvWSOMFEMOOHULZhX1wFzoO9JTm4WDeK9GaWbT4tSM3fXsd+9GfhggnsFHeP4t5G/4BlvQ8lNs0bXfUZiSomUL69zhH2YEg9EhJVm9eJWvvJ75m1HnfIL2yFMm9jsxgzajg+fn5a6h4od0gjPAah9+uiVYi4xdIAv8SJK+CEXKKLhuwzV+MkQ0XdiISdanHjrPvYKA5FGRLqjmJePfSTtea5xGhx4DkHzkQ2KwzKIM/v4JOhA3JnwXebZh+GrUv6cg/fh9xnBUxeFvoimAt0gzOD0ajUIWTqTEHCqmPfumNo4w2paG+s+0vAL2gercxeyamOhkRZuWfOLwnQ/eoAm+gQGItn7UhL0yjaFDpkdICTrIXOEebScv27aHKe+4Cdw1BcAS8lIrE9JelVVgOqxBCaIvIBBPVyaFFVXF/YmMK6VAYTO1c3MDPpJEeFyNGoMo82lzL3IwRRFrPYoDrKbfsrWfZUQRYKOxVWihgWYFYx/asceJxegPAdCLq7avQ7tCoIodm9qgZ4F7F0x+N38oFLLCCe3tAhorInC/sWjkrsLpDBtAkWEsJnN865a+yRpN2YHFz+NKf2rugGDre0jA7GgisPwukmY4sC6r8MSjxumkaBo22hMoyRXBpsEBzLTsWMDjI6155J60iamBIUUORYpEVOHVFmY4iDSY9mBbp/ZzIvOa+mJCcvI5U5apJBALOUrGY3hSXHm+am7FWZtM6U0rmw==
template:
metadata:
creationTimestamp: null
name: redis-key
namespace: redis-system
---

View File

@ -1,35 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: rmfakecloud
namespace: rmfakecloud-ns
spec:
replicas: 1
selector:
matchLabels:
app: rmfakecloud
template:
metadata:
labels:
app: rmfakecloud
spec:
containers:
- name: rmfakecloud
image: ddvk/rmfakecloud
env:
- name: JWT_SECRET_KEY
valueFrom:
secretKeyRef:
name: rmfakecloud-jwt
key: JWT-KEY
- name: STORAGE_URL
value: "https://remarkable.clortox.com"
ports:
- containerPort: 3000
volumeMounts:
- name: rmfakecloud-data
mountPath: "/data"
volumes:
- name: rmfakecloud-data
persistentVolumeClaim:
claimName: rmfakecloud-pvc-data

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: rmfakecloud-pvc-data
namespace: rmfakecloud-ns
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: rmfakecloud-service
namespace: rmfakecloud-ns
spec:
selector:
app: rmfakecloud
type: LoadBalancer
ports:
- name: rmfakecloud
port: 80
targetPort: 3000

Some files were not shown because too many files have changed in this diff Show More