Save progress

This commit is contained in:
Tyler Perkins 2024-08-28 19:34:02 -04:00
parent 8da71232a9
commit 29ed8c008e
Signed by: tyler
GPG Key ID: 03B27509E17EFDC8
62 changed files with 0 additions and 10263 deletions

View File

@ -1,14 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: fast-whisper-service
namespace: api-ns
spec:
type: LoadBalancer
ports:
- name: faster-whisper
port: 8000
targetPort: 8000
selector:
app: fast-whisper

View File

@ -1,31 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: fast-whisper
namespace: api-ns
labels:
app: fast-whisper
spec:
replicas: 1
selector:
matchLabels:
app: fast-whisper
template:
metadata:
labels:
app: fast-whisper
spec:
runtimeClassName: nvidia
containers:
- name: fast-whisper
image: fedirz/faster-whisper-server:latest-cuda
ports:
- containerPort: 8000
resources:
limits:
nvidia.com/gpu: 1
env:
- name: MAX_NO_DATA_SECONDS
value: "100.0"
- name: WHISPER_MODEL
value: "Systran/faster-distil-whisper-medium.en"

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: api-service
namespace: api-ns
spec:
type: LoadBalancer
ports:
- name: general-api
port: 8080
targetPort: 80
selector:
app: api-apps

View File

@ -1,15 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: general-api-config-map
namespace: api-ns
data:
config.yaml: |
images:
access_key: ${ACCESS_KEY}
secret_key: ${SECRET_KEY}
endpoint: s3.clortox.com
bucket: backgrounds
secure: True
weather:
period: 15

View File

@ -1,46 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: general-api
namespace: api-ns
labels:
app: general-api
spec:
replicas: 1
selector:
matchLabels:
app: api-apps
template:
metadata:
labels:
app: api-apps
spec:
containers:
- name: general-api
image: git.clortox.com/tyler/general-api:1.0.15
imagePullPolicy: Always
env:
- name: ACCESS_KEY
valueFrom:
secretKeyRef:
name: general-api-secret
key: access-key
- name: SECRET_KEY
valueFrom:
secretKeyRef:
name: general-api-secret
key: secret-key
- name: CONFIG
value: "config.yaml"
ports:
- containerPort: 80
volumesMounts:
- name: config-volume
mountPath: /media/config.yaml
volumes:
- name: config-volume
configMap:
name: general-api-config-map
items:
- key: config.yaml
path: config.yaml

View File

@ -1,15 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: general-api-secret
namespace: api-ns
spec:
encryptedData:
access-key: AgAPhGl0HfCpVQEmmLhArZgJgfXRUBEvokXse1mzEbsrIiSech0beSlKgTwrDwIFt//HISitEn3qtJkU1UXtVjhv8iTco9JgFLg/xmv+24eiRwG5smrtZqPN1iE7SL5kGfxM4RDvpSMxwdNnfsF53WGjsXIYaHtIVn316e2TQf85vSyLpq+BUgbgP5GqG931ur5ErZf48LHIdZ91wvxd1Lcog+C/jVKRmq0KvDKQZPJZD5qF1uyUiMz/ttY2NDLieGzkhQRpjVJmZaqHTdb6nBGcsMhdu8rI1pCkP8PHe8LsnwooRqPdZwg63Vdna7PzztrEesy5koyQwm4IOduB8xU48wY7KGiQ7ZLk4AHoamIQ1mYwK7D/Z5yvpVHItOUPsCzqo+JYbNhTMlXWVrCTWJU5D+CIvIgRUN5d4W4mM70wb75Glo5JGZr4Yw31zbxMSqCOzGeqILRwnKXP78RtM0URFU5sVfkvqbjm/1qP70YgtlowC/gBNEgHykYJV8CjeBb8tf1vjUDLOr+NgOj0FV/SrnFwan3YyMdwMirrZSoB3irta+7AEe1211ur+13JjZWhdbuJfCkP2l3uJz7zxWdGEapf2psCmC+syzyVrkEA5p1B0/Mu8H+d3dpranRmCWNOTySa1CEIPFuJ+ipxMsbQmPi7R60nQ6ZIUAOnJh/SAae1n1ixuOdc7KfYaSR+abYXMgrTBkC9riG6ew==
secret-key: AgCcGmblfLgGakV8y2p1S3PHReMe2EuqvnfM8XHs6mK8fRGlFIzUw9Rsi9R/MeEfSx5eBTHzN+Euy/ykWJAKhbWw0cEcx+YcL8RahnGAJIqFsSw+atYmv4MJ9JjsCXX+3H4svjtV5AiE019YxwwAX27QzMcEyWE3Rg7/WPNnqyvferfdI0j5NttDiFKyKQvZSrWg2knyopbfNywMijEICBGWgZMj/nRbNm2vXdgYWhFvxkGYVCuRjnbz+iU+T0PMlqWZmj1Yxs72QOoKBYa4pJxSfDjg1erTEiPQgFJPULiSiEargIrxcCdxRdbn9Us/qO26lgvTSCdtiHTzOALmeD9no8Cr6wqZDQD616OyBaFvTTcwCTa+YxaVB5mpoLHDUPOzjVBCpB7ojRH5nXXa7x3bIt9fz9dA9KNPywySsRcQ0hR/UoeMmtJfKx0I86VvxqhhhlEHAKAnUjZyCfaRvftCOkc4JfB9XZtDJr0/I47ToWNofEU1WDJlTkvm9dOJFvRsNGzsLAHhT3I/8cP+sCAY594lmI6J+MMfOjPV5Ig0xQic2my9clrKPohUbKue0R8cSUIb42OnskLOE0bx91JYXBdDeZ6lxawrWznWwPG3j7BsIslqDYSUeKFun91c4xSp2GvdliTS3Md/O/f+yqcBSKGnRkGXZaOpPEB+9MyP3PYVd2pSFt/7fXi9gFj2CxnbClVCsDNCf+hqVH52a2UB9Q758FLO+N+iSpzD61hQZg==
template:
metadata:
creationTimestamp: null
name: general-api-secret
namespace: api-ns

File diff suppressed because it is too large Load Diff

View File

@ -1,16 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: authentik-secret
namespace: authentik-ns
spec:
encryptedData:
postgres-password: AgBigFPSosBY6PGUxR4zdIntM+oGMyaDY9mHZBwL5xbjEEvmzNKCuCfQFuiE07WqV3fjWEp6D3o23fIMomPC3SNLWySfti8o5pyBrPGDZLR1dVYWLmkyMCj0pzbDmPgAArBuzGmQG6P+Kn4lqlkSU6F50ev/W8yHUPkrlp+iJsGM9wYNlboaZmDMowIK5ny8sQ5vIb+QakS3ybRa3DfX/T3yNvuhOeCt+367/3oV0yNmCEBK4qKpTsAkWctxXooX1wcAkOwMesqfE42I5Mt+s/UnbU5fXJdzM0YI7WZreEy5oaG1shDxp1PhXoc12yCt5KobTj0xlttUVFVb8IaOY7r4oSI74vrL8KGuZdny0oeWvVbiwA/SvOt7S05RdryYWf66jN71/Aku5LnKJwRoa7veGeX9S5pUe1wZyVSDN6trkJcG5ZJRmEerr4MOZ4YX9cB2FktEmd+estjIlm/UhEIRN8Qv4qd54t6j2Ajhk6EJ3Ky6mI9xiun+0ti9880rIHQiW5MpiZVB+nQlAosTVQu4wRjdnP6Z0ndP83e2rPkHJ/jF2iawXOBoS0Eh11UaXvRQyNQOt3ReIba7E0aSbynpULViOg/lVNLA2qgyp+37Veb44Mi2k7sHg7I8e6MOMVjBhfmv3HvMpdHHBIHSq2vaDlF/0i5o5OT0F1O+06OngfQAaQQc1SdpLeoPKget5fbNF9zgmfKxPodjayq+h6n3vm5QOc4TagtcG1PV38LsiQ==
redis-password: AgCWDT6n+wmF9+Qk4+bu1byc7TFmRwPGqrhBIdVvZrTMRh6jt43E8urutTAlqKO6JPbRw+gw7zA40uOOHYzU3UaIXdAueQtCRMhHAzKWMwvTuzKGqLmmKcxVF452wilyhMjLBgRuBvX43VK4kynIthM3LZmw9a/HAlbQqn624N3wvdOYXyrWG0YKisXJunEFPgQyygWozdFD/N+b2loBq5YvH3mLuOuJDcuAC+Ti7URRbHigZXOhpZK6ilycAcJxJlOE9FVDRXMYSophjDWtD/Wb7WNLU7iakdXjNMFNVlE89mzrLxOskI918l6hrMG+Tk9FrhwKZx9ZuVwoUOdLBhF7I0jjYWKnJ1gEIMKXNBcrQWcnqX392VTu4RG0YNIIzasYkJ4/i3bjDnIH9zpSnRn6VSL2ZRhikJBOGJRXlXamd93XcCC+wg7gLu9XGi6g7ddC9UksxFzfIoMvj6aZ5EzERwJ7Td/qH1mWcfm5iesXKP1Y7PUSElIXIVmx9ifLgzIfbreb5VJDj2v+gTD44zxy+zHhSgdyefR2FcXT2eZv9CFO/VS8WB/F8+edJai0wHmJv0ooYVNS0PtIkyD8DEUC3Egt97SmWlQlEn1rfX1hj7jpN7HTpW19l9kV3r9n84ZzVJf62qybHElKOQWoqdz2Xxv6gPannZ8XQbk3nR0dG99jrUhvTpqjLFaWV+27PE0bRuV6w1G5Zm7X6Jdr/y3p8UvH2UonA2/8xjPANci/tA==
secret-key: AgBGLb8gPEET4udFwIMlgqWz5nIvu0/Tq6AhkCvxYTF4z2Gl4I7uOA4QtsnqDfOeQXJStpJ02ndc+q5l1uoP+hVgwhX1yWdeAtlQgubCpGraCQqofqVrwQwt9DoZqre+8rCp3llEugTP72Vekx9s9/8nDs+JqfBtfgLSdYqaJDO7fd3P4DDvA+DPhRTuT8j1YkX9mejxaWxd9lDss2OXWgZ/HDvGrm61FS3ByVqAo0uuayBcC8TtVrcjA6o2bfCFzz7g1uwzDC10bE7RNuJzpErulrOv/QzgxB/yTmQ4JlJmbgonAC3ZUBBc5hAl7m7hKuq6CFyHD1kZCWJ/cZkg9AagI0u9f96+y5kYh+KZK8/WuPHF3LhM9dam9KYKVJRqWE4nq5/QYcpbkQtKBqKlGPZZCyEmH/ylL6r3djMHNjKTpdCwlMqNFetDPLDMNFB1i2Nqg7PAzqOE3Dq5AHShSBG//losKiTfoNF3uYwbrA3cQhxCOAM/1EiLEvz1KerHaJrlcV5Y32ZaOj6P4aQeBAzEpmS8sRr0yooYmA1iJce+wYMsvI1VlNKP4HU+wLm5xKNca1SRvZaOmz1RUp3l+Q+jckhHmRFubLOR6RpmdiGtTAyvjfMRkRtzDfnyu+xGvCqlontPIPWh7yl8jsqrjhr5/tXVtSs+yZhdfn1M7oiDbv7xa4o2jAxt+MpP1XtMaoH/Rnt3x2JprDrSU+1YICE9Ibzo6xjJYFs5I/fM7auUvF3cmX40zafRHw5DYehWCBU3mA==
template:
metadata:
creationTimestamp: null
name: authentik-secret
namespace: authentik-ns

File diff suppressed because it is too large Load Diff

View File

@ -1,15 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: firefly-iii-ai
namespace: firefly-ns
spec:
encryptedData:
FIREFLY_PERSONAL_TOKEN: AgDimybAXS9nGY5qbE+YXcWklHXrl6aOTl3dC2pZGEQ5abwbE7H7vsccOMJWPhfbT/x0/gK63qEQeucu3CB8va+QKRF82DE1h9TNe++hjneKl51htDAG8wnzpyLZfQIRMrmYejjC2t1ID3ti5JXVwfzrMwge9bsx5FLSBZQJbTy74gNRBU/q+zz2bvcRXKmQS5kocUBsmJXGBKYYreVd8qAA1eEb13YQYllZ+iJXFmJqYoK/pkYuQO1ClzZHLMA1AIWVughhvQeOmvfNXxm7hMopTHMMRdeXFuGnv1J45ktE0YYInnlGrJoJY3hjRNlWy+fQgqzA0IfcIVF58w2A29pL5oXr+J5v5grAr6gm60Xm+P/c8uRMQeQ9Yv6W3L+mG9ECr9DhNCvAdadGAyK1oVYUoJ1AISMLTbGkQ98uNTG9ExG9zcoYJ4teXgTmqDN+HP5wRhDdz00ELIedxOhtXK0mhT1lEuZN3E6nyid/knRX5aGY0vts3V5odJvfur4xRQKlwegTR4lAhPdn7rW+LAeWrl+3pfyQnsR8gkc8/8kgNLcqlefX/2K/tt9vsGbb6H9ciBdrrzpLja/Ml7v/hAJSPqFEZSz1DLaI3TIEq8xUmjaJu9RlqpTh8gi9oCYDIVVdjbphpWeL+O8w5hHuUgaX0EcSDQPI9Nsmi4Z643CbqhWa6lvL07FvwQ5AyjA4xMOhEFi/2pB5lUu46NFa3ZzMw3UYynLeKLVF9Tgs8G/UcWWSJvkLu2hEc/EVavjtj6iTw93/qxPS6klDRjFX46FUr1DMJBTDcYhsEH/PEcIwJGeafgCOdVoVfAc+eJknJ2dDSBxNLkxTMgM8SenUr7kdmm844qWR9H6DzfNOtbn+uaOnpeHvhU4qxu+/UzqAT9/HCuds68XAGvhRltHrphPr6LgVWjPi+BrdIghpiAY5LtNWhgeQLyrQ2Due3Dt4tUL8wC4/xi9hYltx/AynCzNIwzs76/eQdl1VYDjToXDWye0NENqOCL8GK59V+1rXflRRX2q4iR72zttBgjpbWCNZ6wNnWWbQdH3dHxBvJ7Db736fRQtAoEC4rdKTbVw8A1W8X1Zn6Mt9SIYY+fssNF5aNb0YSNzNYrthIfYmtG/TWKkOR9Z5lvzdeTfzlS34y9xYEF1IAmjRC2bUywp7RNHRzMrgENYp3kyq5F1FruHkYwfX9pRQX1ZqyjcC10Dh/CB4pj2ivygqaBEwdqQIETgVzRQKYpDcRPJi9lyd2oaIcI4k+v47ZJX32ZJLDJBVUf3w6pwFVbiuuf5cdiWo+CvzsBmvGNtwa2Nn/K0b29piPlNBSvlFDSixSW2egmmLiSujYU5RjZy07bbPf9mFHdRNjiHB4GRUHafeL6BNJ7+4mdMIzVMPgjmQ2HfwVwaup7JpTY2D5vnd5cJwKreyKfMigg9w7l9fFx7WrDp1u2qiZtRQ07Xd6LsEaLkQDrKGgyuqu33BgOsOX82F5RPpkCYezeo0D4m3mLYMLOhp1+U3Kpt+OlvmuXPrWoJeVzwtGRoiv2FBk3Kz471wl0qwxYZxXom1I0XojVpJf8wtTtY9zI0gT7U1RZP2YIKW87dHAOWmM96miOLbsunCejRIFiRFEpJkwlnscsAn7woknl4hYj2oW7k2imO7mSwcyZiAFx+CitNqvXxoX9foMdJ3G7uXFKgqz0w1FEm0hSqDzut9DyHPunovLlMk+piBHl2eFwEtG6ODvx8SQBZ4DEJF30hmGL6NLmJiTNANDVhBgnmuiIvZRonj9gzHuCic9LUnf6DMIcMp3KLTBcpEPE/PLihmscswTuRpzF1TliCuDjhjVs2HyEx3WisY0w76w/YX/s4ipCInHmXV7HeGL0+RNAjlVGgAhRrNbUuAn0BwTDm/PMQvV/XJmwjd0964GkpK1lwBcms979WJzN+Gg6sTNa5Y9xRzm6p5QiXj8M7P8QzKHrZxSu+lMqYGtipONcBKb6pWk50Q0YGjSDkJ9lUymj3iEg5y5A==
OPENAI: AgAK3CS9m5SHyxnyk8tP7MlKu2XCLd+Qt8xBuYiFO/PtfxcE+j8MV1haSFUdEwcoySpkBYlctifqJc8uaBdmW1aewYhv6jZIXynScnEj4VZUe0sM1TGasuKduDWNgZ8jDRS+2fQmfzZh67zvtBVHHsiagAL6i+GHsjD3i9Fj2EoNbKueGgYgc7sMzAAoTcaEaP9SiTEzhujYb2HhRY7IoJkvmU2yyjOeFc7n3l6txkA4UsJ+c9wUblCTX4sXnU/pqTA2UAmWCfWPqYlXlKGsqZEPX7q+HcrR6H8TmiCBb/RIa5cZM87WdLTUxT5U85gcoBjj+Q7mAQlLAsfPTyncJgZSNBsNKm5UCrz7GyWr4NCnZonQIzE0GjlhQnP5ERq1+VjWdxTjsH1/QUEjjaJgq4JiJQV1OvRAemhiGXF7m8grqopwYMjBtrHt7tuIqDVNZhx8lWhZ/p21f8zvluR8WREEdmff/wsFNJkIbYJGwpwy8VpF4hiuqkcXSPGcz6OdBfY5sktUYwQNkRqEQV4wDLjU23hngL9P47dXY5Mx81AMvyD49V6DnRH8az1zaxcexvyC/m+4UEqm4Aw8REIf5sogH4Kpu2pKXW/ZrDMI27zyjammG9EF3AGo9Wi2ND8JJH0j4GJXu2auGDrENdJZaM6qDVauYrDFMVw9jj+ZlMLqUd797qMEYDP64ai4VjXXAj2Q4qS4F6wGu29jrHQK8tWeQx5hxl0pwlfUd5i7NyUjGczGaniXelpmQ3Lcpzp/HIXQd+4=
template:
metadata:
creationTimestamp: null
name: firefly-iii-ai
namespace: firefly-ns

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: catagorize-ai-service
namespace: firefly-ns
spec:
selector:
app: firefly-iii
ports:
- protocol: TCP
port: 3000
targetPort: 3000
type: ClusterIP

View File

@ -1,35 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: catagorize-ai
namespace: firefly-ns
spec:
replicas: 1
selector:
matchLabels:
app: firefly-iii
template:
metadata:
labels:
app: firefly-iii
spec:
containers:
- name: catagorize-ai
image: ghcr.io/bahuma20/firefly-iii-ai-categorize
ports:
- containerPort: 3000
env:
- name: FIREFLY_URL
value: https://money.clortox.com
- name: ENABLE_UI
value: "true"
- name: FIREFLY_PERSONAL_TOKEN
valueFrom:
secretKeyRef:
name: firefly-iii-ai
key: FIREFLY_PERSONAL_TOKEN
- name: OPENAI_API_KEY
valueFrom:
secretKeyRef:
name: firefly-iii-ai
key: OPENAI

View File

@ -1,173 +0,0 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: firefly
namespace: firefly-ns
spec:
chart:
spec:
chart: firefly-iii
sourceRef:
kind: HelmRepository
name: firefly-iii
namespace: flux-system
interval: 15m0s
timeout: 5m
releaseName: firefly-iii
values:
replicaCount: 1
image:
repository: "fireflyiii/core"
pullPolicy: IfNotPresent
tag: version-6.1.6
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
persistence:
# -- If you set this to false, uploaded attachments are not stored persistently and will be lost with every restart of the pod
enabled: true
storageClassName: "longhorn"
accessModes: ReadWriteMany
storage: 20Gi
# -- If you want to use an existing claim, set it here
existingClaim: ""
# -- Environment variables for Firefly III. See docs at: https://github.com/firefly-iii/firefly-iii/blob/main/.env.example
config:
# -- Set this to the name of a secret to load environment variables from. If defined, values in the secret will override values in config.env
existingSecret: "firefly-iii-secret"
# -- Set environment variables from configMaps or Secrets
envValueFrom: {}
# -- Directly defined environment variables. Use this for non-secret configuration values.
env:
DB_HOST: postgresql.postgresql-system.svc.cluster.local
DB_CONNECTION: pgsql
DB_PORT: "5432"
DB_DATABASE: firefly
DB_USERNAME: firefly
DEFAULT_LANGUAGE: "en_US"
DEFAULT_LOCALE: "equal"
TZ: "America/New_York"
TRUSTED_PROXIES: "**"
APP_URL: "https://money.clortox.com"
AUTHENTICATION_GUARD: "remote_user_guard"
AUTHENTICATION_GUARD_HEADER: "X-authentik-email"
# -- Create a new Secret from values file to store sensitive environment variables. Make sure to keep your secrets encrypted in the repository! For example, you can use the 'helm secrets' plugin (https://github.com/jkroepke/helm-secrets) to encrypt and manage secrets. If the 'config.existingSecret' value is set, a new Secret will not be created.
secrets:
env:
APP_PASSWORD: "CHANGE_ENCRYPT_ME"
DB_PASSWORD: "CHANGE_ENCRYPT_ME"
# -- A cronjob for [recurring Firefly III tasks](https://docs.firefly-iii.org/firefly-iii/advanced-installation/cron/).
cronjob:
# -- Set to true to enable the CronJob. Note that you need to specify either cronjob.auth.existingSecret or cronjob.auth.token for it to actually be deployed.
enabled: false
# -- Authorization for the CronJob. See https://docs.firefly-iii.org/firefly-iii/advanced-installation/cron/#request-a-page-over-the-web
auth:
# -- The name of a secret containing a data.token field with the cronjob token
existingSecret: ""
# -- The name of the key in the existing secret to get the cronjob token from
secretKey: "token"
# -- The token in plain text
token: ""
# -- Annotations for the CronJob
annotations: {}
# -- When to run the CronJob. Defaults to 03:00 as this is when Firefly III executes regular tasks.
schedule: "0 3 * * *"
# -- How many pods to keep around for successful jobs
successfulJobsHistoryLimit: 3
# -- How many pods to keep around for failed jobs
failedJobsHistoryLimit: 1
# -- How to treat failed jobs
restartPolicy: OnFailure
image:
repository: curlimages/curl
pullPolicy: IfNotPresent
tag: 7.81.0
imagePullSecrets: []
podAnnotations: {}
securityContext: {}
podSecurityContext: {}
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: LoadBalancer
port: 80
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -1,17 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: firefly-iii-secret
namespace: firefly-ns
spec:
encryptedData:
APP_KEY: AgCSE+/lOiQJV3HE/UiAzeIXc2hgOMY95RSUO8Q22sK+R6WdpLcc0/gkxhOYtAdFGp1r9TIQQcWcbR2cEZ84GsnhoNJxh2vgaME+g5m0EgzQouczW/GTR56qfu/P+zp/IlIjepJVeAhVAOAInLDn/XUJf6kXyfWG7kHLhB/CHI6P0VC1RcHXAjkArDmpn5wOwDzVSMOCWszd6BXjl/LacRPkC58Oj4GCIlEqXo1meBJ7Lc6IG+x7VSjNv19xKVFqULt/Aep2YowIf3TPlQDhkv39Rro434dzm9q/M88JndE6sOqw1MTO+QqPPSUKPDSTWwD72FV4rmVkeXiTKtvMLlAWywQIOFL7ZIVJ50DjYgWV/tx3xz81lnfgwtFa6cT1OwUOfLrAEAe4iF/3hzgY1dfTMB7eKbY+XGpGvrcqAImfcBfMwc2wqSWj7FA3V5qOwrfeObDE96nvOuDyUqgPgzyyG6JZwkM1R9pgTABbG3sEkbtyxLQfJftooKtQ3obDsP56aS9uzfZ0rsZpT6Ek7fNd9dqG0XEqDOjNgOxW8aCJBq+Uy9Pbvx9e0flBwXJM0FV963ql7b4i5vrG5IuBC/CC5t1qPwaQPd/fMARjF8hIjhcfF8lnwFzT5vYRHIaky68U2u7gUx91vkiM5X3W8G3N4TX9kZI8QKr0pHcMy3zdM4ou95qUrhr6s/BYrULKjtZ4jucVRoX1PXa+D/laa6qk0Di0iw9S+KMdni5XK+If5A==
APP_PASSWORD: AgBPGz6DZ4R5L/MnGFbEu91VtBnuT4XiV1h2LGCwg5XTtlPcaB4EgNTQqFihAqt1GbHjGnJeaNMLgTVRXUuVJgklZ7Si4MGeBOhcUaIkuxa/7/p9I0n1mHYKlic+c7oRUMPYEbB6R2xjLeFNALIuCr6sQyX8JcIYlYh+arWqlWgQ7MRVSZW3VaWSPiQGMgRUIequ4CAS6exjzQUwZwwP5yGqKJcV7tBoPKDeQ5lNL2BqcG6xtk1Uin7M/dEYa9hEXBDD6eZrc9IzEfuxIOJiPvJvteXvCFeX6MltA1rEbYorub1p1u+m9ROFBx7K74iR+jJt3Fw1/LP319x3lXS3G7tg0TCwxLwytwRjLfPHlYiHRosUk9n8K+sluHqReyKnDN87DEDvGgKYiToF6oXq67rLd/fQlFYLMy86sIrAIQYEAjrDNvkGVOR557GVMR8XaEvxz2nHBLKuNIXtY4Shr7vUABHf1ove2MgUPnFzNd6UTLnud/YSpkXunlYtyu9zaf1xhJq4a0AXWK9HJpyMoZz7CG5aJTT4+PGpeq3dwMNqcjmX8WmgYIpD8JwxCu09l8cmb5HwxhhV2M0Qdn0X8fo2HyR+hhXN/5P2qWmEHSOXkz7I6nefDAco93PsDQkg+1IAc4IyrmP4HJgAVwh8TD3/OFcpwlO8rG6CTC+hgI/hcnyEZCkDfRczRLUlQ3fG5HWKnm0yX/GP4A+2j4kh8FpMTKsP9U48U6a93Mmp3trVkg==
DB_PASSWORD: AgB1oty1IbWAVgYNDjaIS+ATAccxhUEoKA0zjwwbYZyNtU+Me2Z3vRPb0n4Sqm23VglNx/AGYqIf0t9ewctlib0FbU404mX8IYKMS1/+0VhoFymrLNxlTR0CTlcatOmZBbwvbqh5esEyZ9LVglr1TQWS7p5KNiJB+6b8H9tuTxraHaMBMZhDTdoAhIxyzcSvaKTmJSCPvR38q06ggNeeFIE72hh4v97diJ50h78/P4ScoG8CYbuinQpND3Jg07GoAvhdpZk1PgAZQwSeWBBECmov6rGKmJuCAx5YzReGXQOXpUYk+K3YR5mgeEIGjvABoIoHCmYoMP8T56IIX1uZLGQFpMqIbqnJ828i6qy9gvO7Qxng8zIO4WO4pNZNo+dQw49Ri087TQpT9fq17+wykDj2zvDpvasqh3bc3K0NbaJQo3F0hFzZhtw7ZdQFQ9TrKD0oG1fNscP4jvvvXIKJ9IDEghPUmd+w3C4stIwsICgpUTGTytHQ1lUzL6OebBiQjXWablwEGbtcFWhqAx07esuFpe2hx4+6HDNpEG5MH7T2/IUrwBS1vrlD4OgzT/TKT2bwjuy62ralrr7CmPcbcqax7pfkpnjK3kDna85xz4JIC4/nguqVqztjTkk7fSxDckAFlp/WLvjwvG7byU3gbQX1Y1X9O8bELvLGA+QuaZ3mMYJNEtyi3lx/RVuiO139PQOfVwrK4jsTjhxp5xxn0dk02haVWlDBg15Mb85D8mn9sA==
STATIC_CRON_TOKEN: AgBkzS4jR9phFQ/LLfwETTQM30sbLO63nNVkdUcmoZJQb85iuNtn9Ji7ROdIFyFCgWeBygOPGtrSXYlnFf3YIfdM+tXOJ9YtYRX78pqCr4mm84ZpNAir2VTMRLQDbrtqejv6LQNPkrRPpC+Xia/nUiqoGo2RFi66ypheegH7DNW1gHHxZibpiFvrwxudYj1q5rFNvw3NMwb6sJZ6FfJyKhMoKlQIJ1P+H2YoqAWU29hFiP1ZCZJ4j8+2cfaKiImlHsmGuTkuZuUi+0F8vCSUncSOtNMXJpD3XYFqexcefalls7OZJ/U4gD8LAS6kMZwBqRsCgKFKXTSpQwFW8L01xXrb6NQyPdnItO1IRg/65BJQeFdSMzgfAGec22+MBRNbJc9y2mw3iKUxPf42y+Gij/8Th9kLA9pmX8NYcBaIBCXbv3B/9O8FEDsf62XaREG1mwU3wEP1HZ6YeWEeRCR6+0MCGUWrVQK6pAS8uWQPDz1LkoKqjnMsa5OsHnmi211KUAqOV7vDOGu0gKF3t035ojfoGiHO8XgLLd2viHH0t5iaRWtWLV46UuA4uLgIwrQtw+0IQnLfb5xaIQet9zQacBIi7t2eelJvvgkdyAJ2lqexQMrHzicKjvfJQ1BbHtK1Lsisw77jvARF71IywC18fHmr0e+zK1obSva3+dPthu36xCCuoJiZ2E9GmvMkveNA0qMQYavct497T7w0iR71p3k80hOsVGIRnqUaTGifzNJNfg==
template:
metadata:
creationTimestamp: null
name: firefly-iii-secret
namespace: firefly-ns

View File

@ -1,43 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: homarr
namespace: homarr-ns
spec:
replicas: 1
selector:
matchLabels:
app: homarr
template:
metadata:
labels:
app: homarr
spec:
containers:
- name: homarr
image: ghcr.io/ajnart/homarr:latest
ports:
- containerPort: 7575
env:
- name: EDIT_MODE_PASSWORD
valueFrom:
secretKeyRef:
name: homarr-edit-key
key: edit-key
volumeMounts:
- name: homarr-config
mountPath: /app/data/configs
- name: homarr-icons
mountPath: /app/public/icons
- name: homarr-data
mountPath: /data
volumes:
- name: homarr-config
persistentVolumeClaim:
claimName: homarr-config-pvc
- name: homarr-icons
persistentVolumeClaim:
claimName: homarr-icons-pvc
- name: homarr-data
persistentVolumeClaim:
claimName: homarr-data-pvc

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: homarr-config-pvc
namespace: homarr-ns
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 512Mi

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: homarr-data-pvc
namespace: homarr-ns
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: homarr-icons-pvc
namespace: homarr-ns
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: homarr-service
namespace: homarr-ns
spec:
type: LoadBalancer
ports:
- port: 80
targetPort: 7575
selector:
app: homarr

View File

@ -1,14 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: homarr-edit-key
namespace: homarr-ns
spec:
encryptedData:
edit-key: AgBnP6HGyQv63BuvrbO9JWdDu/aS7GadN+6dJ/4uBziMT6HxvBBbunrO5ZROHD1Hl9F3BSQs1GECkzYTQDVd5Hji93L39InCpo3+G0GGg0m6BH8j5WarheWS4837WynOUOfHncCCtXzG9iRqFZAUKE3xYtbNMULXXBiMyY625aonF3Agqz9MAtz4Dv56N5cPE4C4Ck0VPi4POQCP6RezHteCktlBBwpbPAem61mUUx+P+V7hEX3/TItl0j4HOvC6ttbHtVLPUwvHHdBcH/0stKhPben4Hnp7qLZe1A16+RCAbaAYF2TS9JbrQsCwtDq8mkQeAQg1sU0S1092b9OZKk9s1QpGGlKuH7G1iwQcaTpdVIj57QVIOPNoGWuuOiVzWe8hf+b1jITEJNT7VYWmBYcIZjLakYFr8zbkWPlvinkTv0GHo8uBOWsqLF+w3ekYk9HNSJ6dFEBpeMpvllXcbKnggb222otyqJ2Z9Kh2svIBqq2+0VulhFtEfjXFYLOMHqi+ZUz/MkPuREevDQXjwJTBoHD5OaB1OFRo6Kp1jyLogkTnUO/j2qv5DZDkofE0ha4PR9/9olqoYzTfs0IOa2+yUQZJ0OJ5dQbrnxNqbUWjCrVn6xVeCqKrZzsK+96wJVBgiPBzruO0y5ZYreNyW0GdBDS1ubvkkv8eMKbVOM+GTEtC1AburtCwuVYwOxgOJ31zudWmDzqEnrDK1Qp91eyzk4W2J+TRd52fxLQUukq9SA==
template:
metadata:
creationTimestamp: null
name: homarr-edit-key
namespace: homarr-ns

View File

@ -1,46 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: immich-machine-learning
namespace: immich-ns
spec:
replicas: 1
selector:
matchLabels:
app: immich-machine-learning
template:
metadata:
labels:
app: immich-machine-learning
spec:
containers:
- name: immich-machine-learning
image: ghcr.io/immich-app/immich-machine-learning:v1.109.2
env:
- name: UPLOAD_LOCATION
value: /usr/src/app/upload
- name: DB_HOSTNAME
value: postgresql.postgresql-system.svc.cluster.local
- name: DB_USERNAME
valueFrom:
secretKeyRef:
name: immich-secret
key: username
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: immich-secret
key: password
- name: DB_DATABASE_NAME
valueFrom:
secretKeyRef:
name: immich-secret
key: database
- name: REDIS_HOSTNAME
value: redis-master.redis-system.svc.cluster.local
volumeMounts:
- name: model-cache
mountPath: /cache
volumes:
- name: model-cache
emptyDir: {}

View File

@ -1,55 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: immich-microservices
namespace: immich-ns
spec:
replicas: 1
selector:
matchLabels:
app: immich-microservices
template:
metadata:
labels:
app: immich-microservices
spec:
containers:
- name: immich-microservices
image: ghcr.io/immich-app/immich-server:v1.109.2
args: ["start.sh", "microservices"]
env:
- name: UPLOAD_LOCATION
value: /usr/src/app/upload
- name: DB_VECTOR_EXTENSION
value: pgvector
- name: DB_HOSTNAME
value: postgresql.postgresql-system.svc.cluster.local
- name: DB_USERNAME
valueFrom:
secretKeyRef:
name: immich-secret
key: username
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: immich-secret
key: password
- name: DB_DATABASE_NAME
valueFrom:
secretKeyRef:
name: immich-secret
key: database
- name: REDIS_HOSTNAME
value: redis-master.redis-system.svc.cluster.local
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: redis-immich-secret
key: REDIS_PASS
volumeMounts:
- name: upload-volume
mountPath: /usr/src/app/upload
volumes:
- name: upload-volume
persistentVolumeClaim:
claimName: immich-library-pvc

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: immich-library-pvc
namespace: immich-ns
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 100Gi

View File

@ -1,57 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: immich-server
namespace: immich-ns
spec:
replicas: 1
selector:
matchLabels:
app: immich-server
template:
metadata:
labels:
app: immich-server
spec:
containers:
- name: immich-server
image: ghcr.io/immich-app/immich-server:v1.109.2
args: ["start.sh", "immich"]
ports:
- containerPort: 3001
env:
- name: UPLOAD_LOCATION
value: /usr/src/app/upload
- name: DB_VECTOR_EXTENSION
value: pgvector
- name: DB_HOSTNAME
value: postgresql.postgresql-system.svc.cluster.local
- name: DB_USERNAME
valueFrom:
secretKeyRef:
name: immich-secret
key: username
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: immich-secret
key: password
- name: DB_DATABASE_NAME
valueFrom:
secretKeyRef:
name: immich-secret
key: database
- name: REDIS_HOSTNAME
value: redis-master.redis-system.svc.cluster.local
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: redis-immich-secret
key: REDIS_PASS
volumeMounts:
- name: upload-volume
mountPath: /usr/src/app/upload
volumes:
- name: upload-volume
persistentVolumeClaim:
claimName: immich-library-pvc

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: immich-server-service
namespace: immich-ns
spec:
type: LoadBalancer
ports:
- port: 80
targetPort: 3001
selector:
app: immich-server

View File

@ -1,14 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: redis-immich-secret
namespace: immich-ns
spec:
encryptedData:
REDIS_PASS: AgA87rwcuMmDmgvDRl6pcObFFNBPKSH1qCkXUFgIqB/jX/ursxPgP+5f9ANY7PZjZTJ3QSAenJKPKUu9ER5B04o9b09EIcSpTQ0eVQRl6jwMRRzCbFWedb1bsNPuNyQBaf7IhaLshfQPSsjamp4oAaczLjbQPs/musn/3TUYVThIdgWBltv9i/12+BkbA98sS3gsMVWyP+cCcVQ+mMTGNsLZbxP1XC50yAAWifqJk6NbT+m9CA1wnesgegyr1W7KUGxudKnRA7iaGiP+fC+LbLIbD63tkme6/65b9x5qXZLM9qpiBEX+Yrv7YTn+ZJ94KwMnDjV8Y3Izom4etOawnLaRIIal/PGJPjSLE+PqtVRKXpTO8I3ExKSHb3MfLpfqTQ24N1yoNOnYu6dv2Rhd0Q9lMA6RBX4XUfsjYxHwIWyN1HhdAkbAS+ZqIlcnzT/rVIkkLcU/3/2Ptjj1IRDHFZplibUTbmkiKBvSDeOWDDRXC0FPvMegcfv2mYXY03W70N1uW39JVd0hcDhMxVaaW7yB7rmNEdOpFmpSPBScNtJj7bjEkAQCqXfqogclPs7FJOkrEJKK92Mon8ZMRdeD7GAbh4UqiRIe/SnjD2PsxWKDIMX3uqHN4PpxtsI5F3cY8mQNLG9nP4QzS5b8uU3vfJ4aSX2WpY7UhCXZ1ZuZDMNUDyQ9ULNcFh0FAkB3KzFi35Kqlxf6CsiY2pkxmtHm4w1WJkq09n2iNlsORJayzwDu6Q==
template:
metadata:
creationTimestamp: null
name: redis-immich-secret
namespace: immich-ns

View File

@ -1,16 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: immich-secret
namespace: immich-ns
spec:
encryptedData:
database: AgA+Vgab29fZ+NPF1PxzvcT3StAlEiOOKO77tYH+IgfKhdK7wTP4q+OVdV6gWPahK1ssZ8lPISml1HDMPx/IIlCYHmp1xi+wtoOgvyOGq5/8czupMQ4dLwiMVWFyRnCUm94119dCA9KImIqyhrNZ/FebqrcqvykI3h8/XDGCZujjMlHhnhRSUF3AohL3cW72tnZkDeSKebp1Mkmi0LEij2v0/+dZXuIEsfLPVHgxJKvCfPX7ND3TigBlFsa1VQOSZY19MI283rS9keqX0pFP+h0LAT6iGw/4p9fOjVYPNZySVn/z/XXcxnKjO477edJp9TGb+xd1m/kSmUhKF2w58jkKoZMlUwwCxteh9H1zj9rHMQfSmVG+tg9j5WoSsfIaWbDIFIf91l07XSwa8MGJ91NE6nvHEgf7C/OtZ52SjHTKEielLHsvTPRn2lIi14P9tMadI1z11POTf416CIcB2fXzuu619FHARSJseBpBLYwPM5pSpF0XKqTl7mW0kypa46kikjGou6CuJWhrFkh8Yqpth6hfsIV0BkLxXUpoWW9/dMQztfnuB7OvogNUJRTn+g9tzGLyY5bWddokV9s6uxyyaDAi9wPe48HRhX6bGwOgEPdprV5VRSuXu7A2g2YGYsxvvsEr9dXZA3rY9dW63wAzIhydxO8i0+9JHd9CMKohj60S5Llh402p4fDm3JIXchpeNwJzyo4=
password: AgC68pWUzY4eghLcYSxEkwVtBL7BlQ8ytG11hk8NuGcPK+B9kA0VFtw5gFTYMIb0UL95O0BN/L7A6O7oXZm6skWlwOaYmUUOhdCnws1vRA7RamA+gWiT6qV+aFVdeiWLm2pgTdwquqB/Ky2/K3FF2tLoA2Gmp+uGGbet8txMb5RlCWA5jdb6xqsszCFu8NKpcb85kaRtBAP1AzXwWWnP1E+ITM8FjsL1QXlwkxra/uChN99w6Sc66GR8VUb3M3lmtv26AX2hHhqOeWNNJzIbWpmThS+DuluopF4UF+rEixTnR5jBtl8Let6ZA/UwgZ0sfBOijFLyoSFK0ly0f1p3bDH7jtgL2f7OQNPv/VkY6RKi5LViE20m2fYKmt2Fx+FdrIAw64jK3fhLuWF9MKuHOLhgbcrpCvuIMcR1P+/TEPoOrwLy8qzSyGlHZlYHo2m16FqdHqvwHF2vd3A2OnblBx8RN51Hxr0PaRb11FxGQSdQgVU4IoQp0GlvDrhzRXHU1g4G7BnG7+fQpFHujw5QB0rrSLP8WgfWkdYOo6E7xF5EXZ+E2vWsRPRJ2bkVH0mywIo8BC1e7WCR28uLK29e2kBMxiwzDxu+7x/g8rbXxLZGVakEhvZMlWPUSBpcU6rEdW1x7+TEJCGxxBUf3/e6K60MqvOQIe3gRrevY8DddkCFbi6+ZIPmTpd95K9MwnwDDWub+CzKZaWBn6+23NMiBkMa2mgFIWn0QMxEtazTWwJITw==
username: AgA/sz7ukcLAtrSfiGncgMC/VkekQYAYhUmsVTR/sS9di8gv98+pBZbC2i1CC+Qy0yagVEmpstqD46AlkI4d/38S1YLoEolJomEn8KUcdvle7RXK5d+HXSDQCbWdhdhJsbw094rLd2pPzJ1ykVpJglbg+Ec9pzydorjS5LA8vXyujmH3YXW3OU2GCI+B8rgiedetlP6zyZciKuSNd/yDPB7cYzch0lmheGHREulvAzXE6xPv4hiyZtY0FA26zjixtQjW/CJnmwzD6/F1MBZWXtColxZob6I9I5DY4zGawNgS8n4qF/bRoIr75LYkD77KEfBWba5QkQcfnvsEmJWKFmMBchdrM8+wHulgElzTRn8HIfaslk6Aq9RBasXEBDtumBgLiOVCr4TNNX6RHNooyF6uc+Ms4zTdTsibBmMs3X0W8ON1qZx+oXf5M7QW3x+rz+cl7o1TQUsGaHeAcLjh1xGJWddSo1gRL8kqX7wlVucm2LZwIwdWnGT+Bp97FJmJ+R+xgjrmzy9lhboSK58LnpHk65psIngp0XCZ6b3pNrKbDc7H/v8EAjElSAhTGwX7nIwZ4jGCdgPICcX0FtWW17nlJIXJoHmQL08fPa7dqqkpx2JgLQ2E19TywfItxxRApYtRP2AXuf53XLiyQjDgo6STldASysj4MgpJti0lKZNUQkK2QedaXKhyLO3/n53SADSac+P8s0E=
template:
metadata:
creationTimestamp: null
name: immich-secret
namespace: immich-ns

View File

@ -1,61 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: invidious
namespace: invidious-ns
spec:
replicas: 1
selector:
matchLabels:
app: invidious
template:
metadata:
labels:
app: invidious
spec:
containers:
- name: wait-and-die
image: alpine:latest
command: ["/bin/sh", "-c"]
args: ["sleep 21600; exit 0"]
- name: invidious
image: quay.io/invidious/invidious:2024.04.26-eda7444
env:
- name: INVIDIOUS_PORT
value: "3000"
- name: INVIDIOUS_DB_PASSWORD
valueFrom:
secretKeyRef:
name: invidious-secret
key: invidious-postgres-password
- name: INVIDIOUS_HMAC_KEY
valueFrom:
secretKeyRef:
name: invidious-secret
key: hmac
- name: INVIDIOUS_CONFIG
value: |
db:
dbname: Invidious
user: invidious
password: $(INVIDIOUS_DB_PASSWORD)
host: postgresql.postgresql-system.svc.cluster.local
port: 5432
check_tables: true
hmac_key: "$(INVIDIOUS_HMAC_KEY)"
pool_size: 100
statistics_enabled: true
admins: ["tyler"]
channel_threads: 2
channel_refresh_interval: 15m
feed_threads: 2
banner: "Lol. Lmao even."
default_user_preferences:
default_home: "Subscriptions"
quality: dash
save_player_pos: true
port: 3000
#external_port: 443
#domain: watch.clortox.com
ports:
- containerPort: 3000

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: invidious
namespace: invidious-ns
spec:
type: LoadBalancer
ports:
- protocol: TCP
port: 80
targetPort: 3000
selector:
app: invidious

View File

@ -1,15 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: invidious-secret
namespace: invidious-ns
spec:
encryptedData:
hmac: AgBnXw0QxXIHdSyv1jruFE7gKlnWZwHjIF6yqpx/VwXdV1G6WWCfvv+ZMA9RNvnFGP3QmNttNpErFXgpGJKP6a9gr7nIK9ilPgm9oZZP0gt8MDnNSm/17sLeMv0X84uT5SfKCbzukTPKQj2NICWLYO9M3XV5x4CXNi+1E7r+F5qtAYV/V0ZPdo35QHALKjDYv5hofsvJNaUXxamMGzMjrOBtMZKDAGx4K0ftOVr348IbKb8R3WgSrJDN2YQdk+8U1lyRZoK2yBsMYEx1/z3/YsYF/ZvE8Z6tPnRCImJSr+jkEDde0So0DkXTESdBKVnkRQ2e31pyRHGu7+z3dqZlNITFbVt3YN54+P7jDMGEEbPEgVfjJTk/MhqsfaY2WrqONXJvBFcsfVooDXG3rQinG5UkPUBLWPCnInD1mvbSyN5whC7oVh5+qwCrEN3WSsEpMUig8re10sVDwmwXehf0TqWwsIPdT/4OxYnBjzjqJ5HYopBHqCcHxeHD6o+6fNjZPSofNo2YkIX1yI+9laSjEHBmIwdFBCty10yaDsF625X07zlqFBMzSaPRcK3MVReFfUrI5w7mZuM+bzT4OG3Zf4bolQp18glzltSPxWPOsc7RRRImkcjf+PkyXmGVwZ2oPXISX+8xuOIuxhMMGAke0a7b8R7hNb/vvZ6dbtStMwZWUd0IB3Rnmb8rWmdy5qHoANYbmVmwTfcDSKxp0hqfoPNYBG7xJKAg3FjdoYjcmVmbAQ==
invidious-postgres-password: AgDCqXfmNpRx1XQeKqVrXw7u9BXLvoyWiy16S3H5MgGf7SkBffIM9fbE3bFsOI8ow0obxd1vJRw/7XZtFoGYwumoGvFLU/5N1AeluHLD8c6muBNEH7hBQmXj7rGlZ2PGKIZ+C0iqMLrt0xWpiPsPKuSxeXBwyTuZpdcw5PpTQ9N6pWhLyAM5Aw7BHXzWN3PiH4dplWnYcilj0MkNAueTwQtwksHrmPrA7ezE965adfhWzn+IWS0Rco5/QqNMArmFQqYKNkfh0mkCKz258TOLGGbznNbvWU5PQklElBUTqB2r1nJc5nYdAN0cOYYRbXhql5s61Q0S4REXG0gZVfqZMxGFpomeVx09tQRbYHKW/ptp4HKb0x2GbA/Wk1qcvvHAOqhU9f1/+MhIeyUShNeQdTthbm2hnS3Z46KPw0EEdLuSo9xG8hu+saak/xIs4bOaKbtkjSqdeTH3UzEKCjK0bQDoB6JvS6tq+CVzxoUGVYYDzbS0ADDKgdVGkOsGzVswtUOo7yYzOY9jLHanbMCZjvDfOByyYdTnegtS/iIExCPhM0V/9WzY1Y1/crX2RIgdWzTsV2djG24/tZvIggMTZE3PZH83pEduWzcMyi4JED/OYCaWlJRWFqhq+3g/K/0DgM3YPDRwul3yGhoKiWr3bRDC2RPMRTlINd10ctocnDupV1yxFzgLPimrG0LLxcmk2foRkTeJ2d/3LtjN0HfvmLSvVKrAOUDOTVcOsenoyVauNg==
template:
metadata:
creationTimestamp: null
name: invidious-secret
namespace: invidious-ns

View File

@ -1,20 +0,0 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: ntfy-notification-cronjob
namespace: default
spec:
schedule: "03 21 * * *"
jobTemplate:
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- name: ntfy-notifier
image: curlimages/curl:7.80.0
command: ["/bin/sh", "-c"]
args:
- |
curl -X POST -d "this is a test notification from a k3s cronjob!" https://ntfy.sh/test

View File

@ -1,19 +0,0 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: longhorn
namespace: longhorn-system
spec:
interval: 5m
chart:
spec:
chart: longhorn
version: "1.5.1"
sourceRef:
kind: HelmRepository
name: longhorn
namespace: flux-system
values:
service:
ui:
type: LoadBalancer

View File

@ -1,16 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: longhorn-backup
namespace: longhorn-system
spec:
encryptedData:
AWS_ACCESS_KEY_ID: AgAaiekhcrroR4st3tc4Bk1GQQYa+1thkHqYz7gANqClaHCbwWWDKi312YN32xGdlxtCLWeqp+oU1yJjXa71Ha1U6R3IT4oLqh+3CrmE0HvqWEVWbaiInB/J8S91ejFKhSniL4W45667ntqByIh7yG/sR2ROpW4NGy1iDOH+B/D3j47XJFPEbLQVci1fWN9inLATNZlHOWvml5xjNaQCQP9xCTdBWBCPvqby9oQZGp3/I2z75/2bAu9UCV9U7ARY550v30EwrBpfROIRUGVw7TCPtiNabhmcy2PU1QWleFTZNf2wAvIMoFxH6DgfmaiRY39JFuGFypRviAf9t+OyP9YYplkrk8pdXGImTTsLl+y6PSM+ibdJyOH4g/cyWA/+882aPpg0f8u1fmfOjonmYPSGi9UqKEJoJsWUnRBW+INZoxnG3vQvYF0hoaMgqi6QnLcMlOP+zjd/2sLlPRqlwPUUF35Q67RmAahAHUA2P1a25ojcBVGFBMQ8voKy3QtMyOJkrsS8Uq0iX1gVNLZuZRZsCoxJDxU13043XF7OD/lSggzG7gElAsuGHZ0/3ltF17TuPDx0/GawW+fIzU9yQBnLFvYn1UJwC1m9lQvRmlg5yQm1uFO1oBP4rX6N7GWttn8zmLzyUeqnuOvUFI0kZHrUsfTYEM+FqUAIPJFKFO2OZGRagVn59Vr3/aEgn83CODveD8g00oq2PU5rqQ61zWYZNoQOGA==
AWS_ENDPOINTS: AgBytK+0Q+PqVpzW1thju/ntgZmMm8n5V3ZaoJ8ZmkEm+9USijqrpx0N4aGdrm1Z4a/OYKhBm0Xtwm4s3MSIsipAkB5cireUNrd0ggeCAqG9nhqIqZhqsbkbll3g1QNFxuprE4196Byo9nCdcxkR7OMHJ86FMraPkjxqLzHNb8XuNpQn9CinFaUdXcbhO03ZKZFCtxwlnRYtr2Anx2EBT38tLcg/dIt25+4QY95djZdw8NwFP1xlslwE+rbwMQHAxpM6bZLqGtqWNDTdJ0NummoJXnWiOwVPEpjiPM6vdXiBkjRlGpldXuF2JK/c3t4G1CA5URck8jcFaVNfO+wUsG45hp+EkE5XwXMXdY1KB8oveYU7ehCrI8wCY2obhYsbQVve34S2Mv/C47uCtFYHSE7lZsHttabWbYS4QSmSe+BM8uCFoUkJErBPsIZ73TzRgjhSR1Y/qtoyNDlYSH8jZEm8OlIsk5fdOWMN87e/l4TIqSVd02kJAKvn/o564BjrcdqB8gU6adV0vh4q1GBtwwF8nquQP3mL9zxDiljCikDwDD7DNVbzSUwA7tg4YYmQAwpRDfhEZkr5uPaQJ8pyhVp3ZyjACfHB0i2zF/x/UnUogs42OunchrPlkBAPUvY1vbM8uRcy+E0UO6h5gTOf9O1cB0pnIxz//MIbg4ve2SYftLstdtlJr4xL/QhqPWb7VZDB0QZUceFvKpbourbE1M6htGr8jGxR/UI=
AWS_SECRET_ACCESS_KEY: AgC33pdA8wIcOtyYeh0+rsrBtw5VsT8r9z35ovM8za2hcUnEg8ON8SMlGjdPSUSUVQPvT3NoXCZhzNvIwvdBPDsqSGdIeEb1zQ1hGz4jycd5OYPCNOA8yiTv9UfzYaO/YthyoeYOufHHSfz4o7uZFAqr7xIYX+1tOobtJjiQQopKxSbtlbkwNUAH86TSwJp66jhXsy35aAWVcfycAhwtVzc1TLyTJ6EB43BT/0f+qiLxiAqRWnfMQ063swnAoQ1RDAto3LMBMsJOCnYk2sxIXrlg+l2vdAH5OSRHxLTKAK7i2z7h74NhlXhJmWMNm+M3rz8pDIUfpYlgmDW96B/JRkq/xVgbUXdNuSE1E6w04QVVcgqeh9xgYuCVwxaIpFKY27cnX5Z3SG/WcELHw4QUzoPwaNmOKho0cSFzseI8r1inIj7OTafP4/j8/3gunlvTilUI6O/Nz7n0gt/ZJwzhX6Un9ETstPYsCFFGCpYgi0Mpr4bVOKiRvdR/00r29lYmoTs49U2FPdoqZWo9h+m2VBl0WLnMqSHvrCR4y6Gz8iQQAlSxIVEaQ+5i6N0K51Ba+9g7PLKljpCIhAL356MT1595FfQrhg6GozUuv184paa4SR/sqX2k70RrdAULAEFoGvkv9dIYJRFPL7RPxXKhr1UO1/HgJbCGZvCpa+VbgzB9Fztf3w4gwY2rxHhPf+kRC788xsUEY1n6L1qOUgQaQJtxlLf53RHVNsj8eEWC3gPcer0qyIH0lGQE
template:
metadata:
creationTimestamp: null
name: longhorn-backup
namespace: longhorn-system

View File

@ -1,8 +0,0 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: metallb-pool-addresses
namespace: metallb-system
spec:
addresses:
- 10.0.3.64-10.0.3.253

View File

@ -1,8 +0,0 @@
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: my-l2-advertisment
namespace: metallb-system
spec:
ipAddressPools:
- metallb-pool-addresses

View File

@ -1,349 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: metallb-helm-chart-value-overrides
namespace: metallb-system
data:
values.yaml: |-
# Default values for metallb.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
loadBalancerClass: ""
# To configure MetalLB, you must specify ONE of the following two
# options.
rbac:
# create specifies whether to install and use RBAC rules.
create: true
prometheus:
# scrape annotations specifies whether to add Prometheus metric
# auto-collection annotations to pods. See
# https://github.com/prometheus/prometheus/blob/release-2.1/documentation/examples/prometheus-kubernetes.yml
# for a corresponding Prometheus configuration. Alternatively, you
# may want to use the Prometheus Operator
# (https://github.com/coreos/prometheus-operator) for more powerful
# monitoring configuration. If you use the Prometheus operator, this
# can be left at false.
scrapeAnnotations: false
# port both controller and speaker will listen on for metrics
metricsPort: 7472
# if set, enables rbac proxy on the controller and speaker to expose
# the metrics via tls.
# secureMetricsPort: 9120
# the name of the secret to be mounted in the speaker pod
# to expose the metrics securely. If not present, a self signed
# certificate to be used.
speakerMetricsTLSSecret: ""
# the name of the secret to be mounted in the controller pod
# to expose the metrics securely. If not present, a self signed
# certificate to be used.
controllerMetricsTLSSecret: ""
# prometheus doens't have the permission to scrape all namespaces so we give it permission to scrape metallb's one
rbacPrometheus: true
# the service account used by prometheus
# required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true "
serviceAccount: ""
# the namespace where prometheus is deployed
# required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true "
namespace: ""
# the image to be used for the kuberbacproxy container
rbacProxy:
repository: gcr.io/kubebuilder/kube-rbac-proxy
tag: v0.12.0
pullPolicy:
# Prometheus Operator PodMonitors
podMonitor:
# enable support for Prometheus Operator
enabled: false
# optional additionnal labels for podMonitors
additionalLabels: {}
# optional annotations for podMonitors
annotations: {}
# Job label for scrape target
jobLabel: "app.kubernetes.io/name"
# Scrape interval. If not set, the Prometheus default scrape interval is used.
interval:
# metric relabel configs to apply to samples before ingestion.
metricRelabelings: []
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
# relabel configs to apply to samples before ingestion.
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# target_label: nodename
# replacement: $1
# action: replace
# Prometheus Operator ServiceMonitors. To be used as an alternative
# to podMonitor, supports secure metrics.
serviceMonitor:
# enable support for Prometheus Operator
enabled: false
speaker:
# optional additional labels for the speaker serviceMonitor
additionalLabels: {}
# optional additional annotations for the speaker serviceMonitor
annotations: {}
# optional tls configuration for the speaker serviceMonitor, in case
# secure metrics are enabled.
tlsConfig:
insecureSkipVerify: true
controller:
# optional additional labels for the controller serviceMonitor
additionalLabels: {}
# optional additional annotations for the controller serviceMonitor
annotations: {}
# optional tls configuration for the controller serviceMonitor, in case
# secure metrics are enabled.
tlsConfig:
insecureSkipVerify: true
# Job label for scrape target
jobLabel: "app.kubernetes.io/name"
# Scrape interval. If not set, the Prometheus default scrape interval is used.
interval:
# metric relabel configs to apply to samples before ingestion.
metricRelabelings: []
# - action: keep
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
# sourceLabels: [__name__]
# relabel configs to apply to samples before ingestion.
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# target_label: nodename
# replacement: $1
# action: replace
# Prometheus Operator alertmanager alerts
prometheusRule:
# enable alertmanager alerts
enabled: false
# optional additionnal labels for prometheusRules
additionalLabels: {}
# optional annotations for prometheusRules
annotations: {}
# MetalLBStaleConfig
staleConfig:
enabled: true
labels:
severity: warning
# MetalLBConfigNotLoaded
configNotLoaded:
enabled: true
labels:
severity: warning
# MetalLBAddressPoolExhausted
addressPoolExhausted:
enabled: true
labels:
severity: alert
addressPoolUsage:
enabled: true
thresholds:
- percent: 75
labels:
severity: warning
- percent: 85
labels:
severity: warning
- percent: 95
labels:
severity: alert
# MetalLBBGPSessionDown
bgpSessionDown:
enabled: true
labels:
severity: alert
extraAlerts: []
# controller contains configuration specific to the MetalLB cluster
# controller.
controller:
enabled: true
# -- Controller log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none`
logLevel: info
# command: /controller
# webhookMode: enabled
image:
repository: quay.io/metallb/controller
tag:
pullPolicy:
## @param controller.updateStrategy.type Metallb controller deployment strategy type.
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
## e.g:
## strategy:
## type: RollingUpdate
## rollingUpdate:
## maxSurge: 25%
## maxUnavailable: 25%
##
strategy:
type: RollingUpdate
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use. If not set and create is
# true, a name is generated using the fullname template
name: ""
annotations: {}
securityContext:
runAsNonRoot: true
# nobody
runAsUser: 65534
fsGroup: 65534
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
nodeSelector: {}
tolerations: []
priorityClassName: ""
runtimeClassName: ""
affinity: {}
podAnnotations: {}
labels: {}
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
# speaker contains configuration specific to the MetalLB speaker
# daemonset.
speaker:
enabled: true
# command: /speaker
# -- Speaker log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none`
logLevel: info
tolerateMaster: true
memberlist:
enabled: true
mlBindPort: 7946
mlSecretKeyPath: "/etc/ml_secret_key"
excludeInterfaces:
enabled: true
image:
repository: quay.io/metallb/speaker
tag:
pullPolicy:
## @param speaker.updateStrategy.type Speaker daemonset strategy type
## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
##
updateStrategy:
## StrategyType
## Can be set to RollingUpdate or OnDelete
##
type: RollingUpdate
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use. If not set and create is
# true, a name is generated using the fullname template
name: ""
annotations: {}
## Defines a secret name for the controller to generate a memberlist encryption secret
## By default secretName: {{ "metallb.fullname" }}-memberlist
##
# secretName:
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
nodeSelector: {}
tolerations: []
priorityClassName: ""
affinity: {}
## Selects which runtime class will be used by the pod.
runtimeClassName: ""
podAnnotations: {}
labels: {}
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
startupProbe:
enabled: true
failureThreshold: 30
periodSeconds: 5
# frr contains configuration specific to the MetalLB FRR container,
# for speaker running alongside FRR.
frr:
enabled: true
image:
repository: quay.io/frrouting/frr
tag: 8.5.2
pullPolicy:
metricsPort: 7473
resources: {}
# if set, enables a rbac proxy sidecar container on the speaker to
# expose the frr metrics via tls.
# secureMetricsPort: 9121
reloader:
resources: {}
frrMetrics:
resources: {}
crds:
enabled: true
validationFailurePolicy: Fail

View File

@ -1,21 +0,0 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: metallb
namespace: metallb-system
spec:
chart:
spec:
chart: metallb
version: 0.13.11
sourceRef:
kind: HelmRepository
name: metallb
namespace: flux-system
interval: 15m
timeout: 5m
releaseName: metallb
valuesFrom:
- kind: ConfigMap
name: metallb-helm-chart-value-overrides
valuesKey: values.yaml

View File

@ -1,564 +0,0 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: minio
namespace: minio-ns
spec:
chart:
spec:
chart: minio
sourceRef:
kind: HelmRepository
name: minio
namespace: flux-system
interval: 15m0s
timeout: 5m
releaseName: minio
values:
## Provide a name in place of minio for `app:` labels
##
nameOverride: ""
## Provide a name to substitute for the full names of resources
##
fullnameOverride: ""
## set kubernetes cluster domain where minio is running
##
clusterDomain: cluster.local
## Set default image, imageTag, and imagePullPolicy. mode is used to indicate the
##
image:
repository: quay.io/minio/minio
tag: RELEASE.2023-09-30T07-02-29Z
pullPolicy: IfNotPresent
imagePullSecrets: []
# - name: "image-pull-secret"
## Set default image, imageTag, and imagePullPolicy for the `mc` (the minio
## client used to create a default bucket).
##
mcImage:
repository: quay.io/minio/mc
tag: RELEASE.2023-09-29T16-41-22Z
pullPolicy: IfNotPresent
## minio mode, i.e. standalone or distributed
mode: standalone ## other supported values are "standalone"
## Additional labels to include with deployment or statefulset
additionalLabels: {}
## Additional annotations to include with deployment or statefulset
additionalAnnotations: {}
## Typically the deployment/statefulset includes checksums of secrets/config,
## So that when these change on a subsequent helm install, the deployment/statefulset
## is restarted. This can result in unnecessary restarts under GitOps tooling such as
## flux, so set to "true" to disable this behaviour.
ignoreChartChecksums: false
## Additional arguments to pass to minio binary
extraArgs: []
## Additional volumes to minio container
extraVolumes: []
## Additional volumeMounts to minio container
extraVolumeMounts: []
## Additional sidecar containers
extraContainers: []
## Internal port number for MinIO S3 API container
## Change service.port to change external port number
minioAPIPort: "9000"
## Internal port number for MinIO Browser Console container
## Change consoleService.port to change external port number
minioConsolePort: "9001"
## Update strategy for Deployments
deploymentUpdate:
type: RollingUpdate
maxUnavailable: 0
maxSurge: 100%
## Update strategy for StatefulSets
statefulSetUpdate:
updateStrategy: RollingUpdate
## Pod priority settings
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
##
priorityClassName: ""
## Pod runtime class name
## ref https://kubernetes.io/docs/concepts/containers/runtime-class/
##
runtimeClassName: ""
## Set default rootUser, rootPassword
## AccessKey and secretKey is generated when not set
## Distributed MinIO ref: https://min.io/docs/minio/linux/operations/install-deploy-manage/deploy-minio-multi-node-multi-drive.html
##
#rootUser: ""
#rootPassword: ""
#
## Use existing Secret that store following variables:
##
## | Chart var | .data.<key> in Secret |
## |:----------------------|:-------------------------|
## | rootUser | rootUser |
## | rootPassword | rootPassword |
##
## All mentioned variables will be ignored in values file.
## .data.rootUser and .data.rootPassword are mandatory,
## others depend on enabled status of corresponding sections.
existingSecret: "minio-default-credentials"
## Directory on the MinIO pof
certsPath: "/etc/minio/certs/"
configPathmc: "/etc/minio/mc/"
## Path where PV would be mounted on the MinIO Pod
mountPath: "/export"
## Override the root directory which the minio server should serve from.
## If left empty, it defaults to the value of {{ .Values.mountPath }}
## If defined, it must be a sub-directory of the path specified in {{ .Values.mountPath }}
##
bucketRoot: ""
# Number of drives attached to a node
drivesPerNode: 1
# Number of MinIO containers running
replicas: 1
# Number of expanded MinIO clusters
pools: 1
## TLS Settings for MinIO
tls:
enabled: false
## Create a secret with private.key and public.crt files and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret
certSecret: ""
publicCrt: public.crt
privateKey: private.key
## Trusted Certificates Settings for MinIO. Ref: https://min.io/docs/minio/linux/operations/network-encryption.html#third-party-certificate-authorities
## Bundle multiple trusted certificates into one secret and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret
## When using self-signed certificates, remember to include MinIO's own certificate in the bundle with key public.crt.
## If certSecret is left empty and tls is enabled, this chart installs the public certificate from .Values.tls.certSecret.
trustedCertsSecret: ""
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
#annotations: {}
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
#existingClaim: ""
## minio data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
## Storage class of PV to bind. By default it looks for standard storage class.
## If the PV uses a different storage class, specify that here.
storageClass: "longhorn"
#volumeName: ""
accessMode: ReadWriteOnce
size: 30Gi
## If subPath is set mount a sub folder of a volume instead of the root of the volume.
## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
##
subPath: ""
## Expose the MinIO service to be accessed from outside the cluster (LoadBalancer service).
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
type: LoadBalancer
clusterIP: ~
port: "9000"
nodePort: 9000
loadBalancerIP: ~
externalIPs: []
annotations: {}
## Configure Ingress based on the documentation here: https://kubernetes.io/docs/concepts/services-networking/ingress/
##
ingress:
enabled: false
ingressClassName: ~
labels: {}
# node-role.kubernetes.io/ingress: platform
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# kubernetes.io/ingress.allow-http: "false"
# kubernetes.io/ingress.global-static-ip-name: ""
# nginx.ingress.kubernetes.io/secure-backends: "true"
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
# nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0
path: /
hosts:
- minio-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
consoleService:
type: LoadBalancer
clusterIP: ~
port: "9001"
nodePort: 80
loadBalancerIP: ~
externalIPs: []
annotations: {}
consoleIngress:
enabled: false
ingressClassName: ~
labels: {}
# node-role.kubernetes.io/ingress: platform
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# kubernetes.io/ingress.allow-http: "false"
# kubernetes.io/ingress.global-static-ip-name: ""
# nginx.ingress.kubernetes.io/secure-backends: "true"
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
# nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0
path: /
hosts:
- console.minio-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
tolerations: []
affinity: {}
topologySpreadConstraints: []
## Add stateful containers to have security context, if enabled MinIO will run as this
## user and group NOTE: securityContext is only enabled if persistence.enabled=true
securityContext:
enabled: true
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
fsGroupChangePolicy: "OnRootMismatch"
# Additational pod annotations
podAnnotations: {}
# Additional pod labels
podLabels: {}
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 16Gi
## List of policies to be created after minio install
##
## In addition to default policies [readonly|readwrite|writeonly|consoleAdmin|diagnostics]
## you can define additional policies with custom supported actions and resources
policies: []
## writeexamplepolicy policy grants creation or deletion of buckets with name
## starting with example. In addition, grants objects write permissions on buckets starting with
## example.
# - name: writeexamplepolicy
# statements:
# - effect: Allow # this is the default
# resources:
# - 'arn:aws:s3:::example*/*'
# actions:
# - "s3:AbortMultipartUpload"
# - "s3:GetObject"
# - "s3:DeleteObject"
# - "s3:PutObject"
# - "s3:ListMultipartUploadParts"
# - resources:
# - 'arn:aws:s3:::example*'
# actions:
# - "s3:CreateBucket"
# - "s3:DeleteBucket"
# - "s3:GetBucketLocation"
# - "s3:ListBucket"
# - "s3:ListBucketMultipartUploads"
## readonlyexamplepolicy policy grants access to buckets with name starting with example.
## In addition, grants objects read permissions on buckets starting with example.
# - name: readonlyexamplepolicy
# statements:
# - resources:
# - 'arn:aws:s3:::example*/*'
# actions:
# - "s3:GetObject"
# - resources:
# - 'arn:aws:s3:::example*'
# actions:
# - "s3:GetBucketLocation"
# - "s3:ListBucket"
# - "s3:ListBucketMultipartUploads"
## conditionsexample policy creates all access to example bucket with aws:username="johndoe" and source ip range 10.0.0.0/8 and 192.168.0.0/24 only
# - name: conditionsexample
# statements:
# - resources:
# - 'arn:aws:s3:::example/*'
# actions:
# - 's3:*'
# conditions:
# - StringEquals: '"aws:username": "johndoe"'
# - IpAddress: |
# "aws:SourceIp": [
# "10.0.0.0/8",
# "192.168.0.0/24"
# ]
#
## Additional Annotations for the Kubernetes Job makePolicyJob
makePolicyJob:
securityContext:
enabled: false
runAsUser: 1000
runAsGroup: 1000
resources:
requests:
memory: 128Mi
# Command to run after the main command on exit
exitCommand: ""
## List of users to be created after minio install
##
users:
## Username, password and policy to be assigned to the user
## Default policies are [readonly|readwrite|writeonly|consoleAdmin|diagnostics]
## Add new policies as explained here https://min.io/docs/minio/kubernetes/upstream/administration/identity-access-management.html#access-management
## NOTE: this will fail if LDAP is enabled in your MinIO deployment
## make sure to disable this if you are using LDAP.
- accessKey: console
secretKey: console123
policy: consoleAdmin
# Or you can refer to specific secret
#- accessKey: externalSecret
# existingSecret: my-secret
# existingSecretKey: password
# policy: readonly
## Additional Annotations for the Kubernetes Job makeUserJob
makeUserJob:
securityContext:
enabled: false
runAsUser: 1000
runAsGroup: 1000
resources:
requests:
memory: 128Mi
# Command to run after the main command on exit
exitCommand: ""
## List of service accounts to be created after minio install
##
svcaccts: []
## accessKey, secretKey and parent user to be assigned to the service accounts
## Add new service accounts as explained here https://min.io/docs/minio/kubernetes/upstream/administration/identity-access-management/minio-user-management.html#service-accounts
# - accessKey: console-svcacct
# secretKey: console123
# user: console
## Or you can refer to specific secret
# - accessKey: externalSecret
# existingSecret: my-secret
# existingSecretKey: password
# user: console
## You also can pass custom policy
# - accessKey: console-svcacct
# secretKey: console123
# user: console
# policy:
# statements:
# - resources:
# - 'arn:aws:s3:::example*/*'
# actions:
# - "s3:AbortMultipartUpload"
# - "s3:GetObject"
# - "s3:DeleteObject"
# - "s3:PutObject"
# - "s3:ListMultipartUploadParts"
makeServiceAccountJob:
securityContext:
enabled: false
runAsUser: 1000
runAsGroup: 1000
resources:
requests:
memory: 128Mi
# Command to run after the main command on exit
exitCommand: ""
## List of buckets to be created after minio install
##
buckets: []
# # Name of the bucket
# - name: bucket1
# # Policy to be set on the
# # bucket [none|download|upload|public]
# policy: none
# # Purge if bucket exists already
# purge: false
# # set versioning for
# # bucket [true|false]
# versioning: false
# # set objectlocking for
# # bucket [true|false] NOTE: versioning is enabled by default if you use locking
# objectlocking: false
# - name: bucket2
# policy: none
# purge: false
# versioning: true
# # set objectlocking for
# # bucket [true|false] NOTE: versioning is enabled by default if you use locking
# objectlocking: false
## Additional Annotations for the Kubernetes Job makeBucketJob
makeBucketJob:
securityContext:
enabled: false
runAsUser: 1000
runAsGroup: 1000
resources:
requests:
memory: 128Mi
# Command to run after the main command on exit
exitCommand: ""
## List of command to run after minio install
## NOTE: the mc command TARGET is always "myminio"
customCommands:
# - command: "admin policy attach myminio consoleAdmin --group='cn=ops,cn=groups,dc=example,dc=com'"
## Additional Annotations for the Kubernetes Job customCommandJob
customCommandJob:
securityContext:
enabled: false
runAsUser: 1000
runAsGroup: 1000
resources:
requests:
memory: 128Mi
# Command to run after the main command on exit
exitCommand: ""
## Merge jobs
postJob:
podAnnotations: {}
annotations: {}
securityContext:
enabled: false
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
nodeSelector: {}
tolerations: []
affinity: {}
## Use this field to add environment variables relevant to MinIO server. These fields will be passed on to MinIO container(s)
## when Chart is deployed
environment:
## Please refer for comprehensive list https://min.io/docs/minio/linux/reference/minio-server/minio-server.html
## MINIO_SUBNET_LICENSE: "License key obtained from https://subnet.min.io"
## MINIO_BROWSER: "off"
## The name of a secret in the same kubernetes namespace which contain secret values
## This can be useful for LDAP password, etc
## The key in the secret must be 'config.env'
##
extraSecret: ~
## OpenID Identity Management
## The following section documents environment variables for enabling external identity management using an OpenID Connect (OIDC)-compatible provider.
## See https://min.io/docs/minio/linux/operations/external-iam/configure-openid-external-identity-management.html for a tutorial on using these variables.
oidc:
enabled: false
configUrl: "https://identity-provider-url/.well-known/openid-configuration"
clientId: "minio"
clientSecret: ""
# Provide existing client secret from the Kubernetes Secret resource, existing secret will have priority over `clientSecret`
existingClientSecretName: ""
existingClientSecretKey: ""
claimName: "policy"
scopes: "openid,profile,email"
redirectUri: "https://console-endpoint-url/oauth_callback"
# Can leave empty
claimPrefix: ""
comment: ""
networkPolicy:
enabled: false
allowExternal: true
## PodDisruptionBudget settings
## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
##
podDisruptionBudget:
enabled: false
maxUnavailable: 1
## Specify the service account to use for the MinIO pods. If 'create' is set to 'false'
## and 'name' is left unspecified, the account 'default' will be used.
serviceAccount:
create: true
## The name of the service account to use. If 'create' is 'true', a service account with that name
## will be created.
name: "minio-sa"
metrics:
serviceMonitor:
enabled: false
# scrape each node/pod individually for additional metrics
includeNode: false
public: true
additionalLabels: {}
annotations: {}
# for node metrics
relabelConfigs: {}
# for cluster metrics
relabelConfigsCluster: {}
# metricRelabelings:
# - regex: (server|pod)
# action: labeldrop
namespace: ~
# Scrape interval, for example `interval: 30s`
interval: ~
# Scrape timeout, for example `scrapeTimeout: 10s`
scrapeTimeout: ~
## ETCD settings: https://github.com/minio/minio/blob/master/docs/sts/etcd.md
## Define endpoints to enable this section.
etcd:
endpoints: []
pathPrefix: ""
corednsPathPrefix: ""
clientCert: ""
clientCertKey: ""

View File

@ -1,15 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: minio-default-credentials
namespace: minio-ns
spec:
encryptedData:
rootPassword: AgASkMrZq0TO6z/oeMyzGjDmSbJLBQCYW/7IQHdRS8M8vZkioEujShT/8IE6etxTOzGLwOkmpO8PyExTgMD3atyRRdiyBs2jaT0SIOyEZUA0PjiAgmYTWx9cAXBROOYzkT7u8IvMomEjiKx/EZG2XPhxgg0/Z9tBCVkstuEYyUfRokSco4icJ/JyHAz1Gg2F9w/KMiQJProcoAV5ajRdI4Bfb9e6E5GIW2Z0WKSH4fcCyM07nW+QnwlNGZNaAgLmSZygnUeF2PN/qD5aSj5YSjK5Va7KQRIlYszmzJcFg70yeustMIcE2nD2YVFFHb0CXKqEgnF9QrieBagorwoRvqU5XtXoXiBmzgvXtDQTJJ7ODT4XAB0oVF0QOdreBuVZ34D+Fb61O5HtFvSHRN3HsGXdvkKKgywJbjL+LaQBcEgztq0xjiGj/tjf3UDZOdOeHPZYJno9gdJX5eCTTjWVnaPxMyfwl3y4YmmHKVenCS6tsBkABk2/+lEthGUBRY9CyKl/ugwDQCJviX4tf7ZvMGGuPAxqIlZuM69jU53Zgp9Vq/8+UuTlksJzwQlH/VoyZsQl+/vSekyjDyPR3g3AunjoLsQDNnBwcghMzBFgeJzB/dSiyg0dQpiMUCcwe8i/20N/ER4pIC+ag1IyBAoKMQpWWJWyPU7IQ+JbYPdCI9Q1bMhQIpBNLkJsaiaRCvwrWaK07Ml9T4i+wMat8z8v0gIbnK+2JZ7FIeA66uuhxXhMi2Coqs5L0/vk
rootUser: AgDUG6LKdvzJorlYnxlW6cnJaqrhQkumFheLwZTD3aRf8ufFqQaGM/IPyNXwhKj4YAlr17gSR9kzIhYnkrKwVq9rtzo/arm2hF4cDWwQEZlrfmkqZfAec4p81KvyYgL19fuhDOeiZQfuCHl0MvDw+j6AzAk6Q6bbNdjWElaRzNLzjRAM892OCS2pubzRPLJl2+/9Ny/lZ2ePmZHHdin7ca73aCrcO1PryrhqQxZRMM0iaNKjUGsY8WMeoNnayhJ34KbsEMDTtPkWXlZb2FGtJDafw0A0fNn19PlU7wN7HeMK05SPgp4Sjs9LFrHNBanjF/rKqInCSg2lN57bUcJcVotpXEt6rmTEySo2QhnfFAXafX6hfl/HHT9GSrya+vFLKNXVf8hxVZMRjXmNIi0N3obvHOqGIJFDiy4iWEwOdrn/yetHs8ctS+DrO4pNY1cz/6SzaBayqaPqcxIAWhCKxXtNWb6sHBpTRsXpwUFq2Hoc9idB1uTGOpmpSWl8awUUsanXv4Kb2sZkXNc3iCCwx6TBDLQ1fukISj4n30RcTFDqa++3Nxq1n1immNerX30PjMWewxlUvAm5O9kwcIplfk8iW9ii3gRlth0Qs8FGhbfrghz5xs8CIgxEhnrCRphNeIow3JT1wxGU0r/QKoQu8zgEz+TsNdCXmB8bnauYyrW6ANhZaWx/wGoB29j7mHWfvLsTIwB2Q8HeV4agwKXoGSsp
template:
metadata:
creationTimestamp: null
name: minio-default-credentials
namespace: minio-ns

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: navidrome-pvc-data
namespace: navidrome-ns
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 16Gi

View File

@ -1,81 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: navidrome
namespace: navidrome-ns
spec:
selector:
matchLabels:
app: navidrome
template:
metadata:
labels:
app: navidrome
spec:
nodeSelector:
kubernetes.io/hostname: gluttony
securityContext:
fsGroup: 1000
containers:
- name: navidrome
image: deluan/navidrome:latest
securityContext:
runAsUser: 1000
runAsGroup: 1000
ports:
- containerPort: 4533
env:
- name: ND_BASEURL
value: "https://music.clortox.com"
- name: ND_CACHEFOLDER
value: "/cache"
- name: ND_MUSICFOLDER
value: "/music"
- name: ND_DATAFOLDER
value: "/data"
- name: ND_SCANSCHEDULE
value: "1h"
- name: ND_LOGLEVEL
value: "info"
- name: ND_SESSIONTIMEOUT
value: "24h"
- name: ND_ENABLESHARING
value: "true"
- name: ND_UILOGINBACKGROUNDURL
value: "https://general.api.clortox.com/images/background/today"
- name: ND_UIWELCOMEMESSAGE
value: "Lol. Lmao even"
- name: ND_REVERSEPROXYUSERHEADER
value: "X-Authentik-Username"
- name: ND_REVERSEPROXYWHITELIST
value: "0.0.0.0/0"
#- name: ND_SPOTIFY_ID
# valueFrom:
# secretKeyRef:
# name: spotify-creds
# key: CLIENT_ID
#- name: ND_SPOTIFY_SECRET
# valueFrom:
# secretKeyRef:
# name: spotify-creds
# key: CLIENT_ID
volumeMounts:
- name: data
mountPath: "/data"
- name: music
mountPath: "/music"
readOnly: true
- name: cache
mountPath: "/cache"
volumes:
- name: data
persistentVolumeClaim:
claimName: navidrome-pvc-data
- name: music
persistentVolumeClaim:
claimName: navidrome-pvc-music
- name: cache
emptyDir: {}

View File

@ -1,25 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: navidrome-ingress
namespace: navidrome-ns
annotations:
kubernetes.io/ingress.class: "nginx"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
tls:
- hosts:
- music.clortox.com
secretName: music-clortox-com-tls
rules:
- host: music.clortox.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: navidrome-service
port:
number: 80 # Ensure this is the correct port your Navidrome service listens on

View File

@ -1,21 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: navidrome-pv-music
namespace: navidrome-ns
spec:
storageClassName: local-storage
capacity:
storage: 18000Gi
accessModes:
- ReadWriteMany
hostPath:
path: "/Main/Media"
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- gluttony

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: navidrome-pvc-music
namespace: navidrome-ns
spec:
volumeName: navidrome-pv-music
storageClassName: local-storage
accessModes:
- ReadWriteMany
resources:
requests:
storage: 18000Gi

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: navidrome-services
namespace: navidrome-ns
spec:
type: LoadBalancer
ports:
- port: 80
targetPort: 4533
protocol: TCP
selector:
app: navidrome

File diff suppressed because it is too large Load Diff

View File

@ -1,556 +0,0 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: gpu-operator
namespace: nvidia-system
spec:
chart:
spec:
chart: gpu-operator
sourceRef:
kind: HelmRepository
name: nvidia-operator
namespace: flux-system
interval: 15m0s
timeout: 5m
releaseName: gpu-operator
values:
# Default values for gpu-operator.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
platform:
openshift: false
nfd:
enabled: true
nodefeaturerules: false
psa:
enabled: false
cdi:
enabled: false
default: false
sandboxWorkloads:
enabled: false
defaultWorkload: "container"
daemonsets:
labels: {}
annotations: {}
priorityClassName: system-node-critical
tolerations:
- key: nvidia.com/gpu
operator: Exists
effect: NoSchedule
# configuration for controlling update strategy("OnDelete" or "RollingUpdate") of GPU Operands
# note that driver Daemonset is always set with OnDelete to avoid unintended disruptions
updateStrategy: "RollingUpdate"
# configuration for controlling rolling update of GPU Operands
rollingUpdate:
# maximum number of nodes to simultaneously apply pod updates on.
# can be specified either as number or percentage of nodes. Default 1.
maxUnavailable: "1"
validator:
repository: nvcr.io/nvidia/cloud-native
image: gpu-operator-validator
# If version is not specified, then default is to use chart.AppVersion
#version: ""
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env: []
args: []
resources: {}
plugin:
env:
- name: WITH_WORKLOAD
value: "false"
operator:
repository: nvcr.io/nvidia
image: gpu-operator
# If version is not specified, then default is to use chart.AppVersion
#version: ""
imagePullPolicy: IfNotPresent
imagePullSecrets: []
priorityClassName: system-node-critical
defaultRuntime: docker
runtimeClass: nvidia
use_ocp_driver_toolkit: false
# cleanup CRD on chart un-install
cleanupCRD: false
# upgrade CRD on chart upgrade, requires --disable-openapi-validation flag
# to be passed during helm upgrade.
upgradeCRD: false
initContainer:
image: cuda
repository: nvcr.io/nvidia
version: 12.3.2-base-ubi8
imagePullPolicy: IfNotPresent
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Equal"
value: ""
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Equal"
value: ""
effect: "NoSchedule"
annotations:
openshift.io/scc: restricted-readonly
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: "node-role.kubernetes.io/master"
operator: In
values: [""]
- weight: 1
preference:
matchExpressions:
- key: "node-role.kubernetes.io/control-plane"
operator: In
values: [""]
logging:
# Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano')
timeEncoding: epoch
# Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', or any integer value > 0 which corresponds to custom debug levels of increasing verbosity
level: info
# Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn)
# Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error)
develMode: false
resources:
limits:
cpu: 500m
memory: 350Mi
requests:
cpu: 200m
memory: 100Mi
mig:
strategy: single
driver:
enabled: true
nvidiaDriverCRD:
enabled: false
deployDefaultCR: true
driverType: gpu
nodeSelector: {}
useOpenKernelModules: false
# use pre-compiled packages for NVIDIA driver installation.
# only supported for as a tech-preview feature on ubuntu22.04 kernels.
usePrecompiled: false
repository: nvcr.io/nvidia
image: driver
version: "550.54.15"
imagePullPolicy: IfNotPresent
imagePullSecrets: []
startupProbe:
initialDelaySeconds: 60
periodSeconds: 10
# nvidia-smi can take longer than 30s in some cases
# ensure enough timeout is set
timeoutSeconds: 60
failureThreshold: 120
rdma:
enabled: false
useHostMofed: false
upgradePolicy:
# global switch for automatic upgrade feature
# if set to false all other options are ignored
autoUpgrade: true
# how many nodes can be upgraded in parallel
# 0 means no limit, all nodes will be upgraded in parallel
maxParallelUpgrades: 1
# maximum number of nodes with the driver installed, that can be unavailable during
# the upgrade. Value can be an absolute number (ex: 5) or
# a percentage of total nodes at the start of upgrade (ex:
# 10%). Absolute number is calculated from percentage by rounding
# up. By default, a fixed value of 25% is used.'
maxUnavailable: 25%
# options for waiting on pod(job) completions
waitForCompletion:
timeoutSeconds: 0
podSelector: ""
# options for gpu pod deletion
gpuPodDeletion:
force: false
timeoutSeconds: 300
deleteEmptyDir: false
# options for node drain (`kubectl drain`) before the driver reload
# this is required only if default GPU pod deletions done by the operator
# are not sufficient to re-install the driver
drain:
enable: false
force: false
podSelector: ""
# It's recommended to set a timeout to avoid infinite drain in case non-fatal error keeps happening on retries
timeoutSeconds: 300
deleteEmptyDir: false
manager:
image: k8s-driver-manager
repository: nvcr.io/nvidia/cloud-native
# When choosing a different version of k8s-driver-manager, DO NOT downgrade to a version lower than v0.6.4
# to ensure k8s-driver-manager stays compatible with gpu-operator starting from v24.3.0
version: v0.6.7
imagePullPolicy: IfNotPresent
env:
- name: ENABLE_GPU_POD_EVICTION
value: "true"
- name: ENABLE_AUTO_DRAIN
value: "false"
- name: DRAIN_USE_FORCE
value: "false"
- name: DRAIN_POD_SELECTOR_LABEL
value: ""
- name: DRAIN_TIMEOUT_SECONDS
value: "0s"
- name: DRAIN_DELETE_EMPTYDIR_DATA
value: "false"
env: []
resources: {}
# Private mirror repository configuration
repoConfig:
configMapName: ""
# custom ssl key/certificate configuration
certConfig:
name: ""
# vGPU licensing configuration
licensingConfig:
configMapName: ""
nlsEnabled: true
# vGPU topology daemon configuration
virtualTopology:
config: ""
# kernel module configuration for NVIDIA driver
kernelModuleConfig:
name: ""
toolkit:
enabled: true
repository: nvcr.io/nvidia/k8s
image: container-toolkit
version: v1.15.0-rc.4-ubuntu20.04
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env:
- name: CONTAINERD_CONFIG
value: /var/lib/rancher/k3s/agent/etc/containerd/config.toml
- name: CONTAINERD_SOCKET
value: /run/k3s/containerd/containerd.sock
resources: {}
installDir: "/usr/local/nvidia"
devicePlugin:
enabled: true
repository: nvcr.io/nvidia
image: k8s-device-plugin
version: v0.15.0-rc.2-ubi8
imagePullPolicy: IfNotPresent
imagePullSecrets: []
args: []
env:
- name: PASS_DEVICE_SPECS
value: "true"
- name: FAIL_ON_INIT_ERROR
value: "true"
- name: DEVICE_LIST_STRATEGY
value: envvar
- name: DEVICE_ID_STRATEGY
value: uuid
- name: NVIDIA_VISIBLE_DEVICES
value: all
- name: NVIDIA_DRIVER_CAPABILITIES
value: all
resources: {}
# Plugin configuration
# Use "name" to either point to an existing ConfigMap or to create a new one with a list of configurations(i.e with create=true).
# Use "data" to build an integrated ConfigMap from a set of configurations as
# part of this helm chart. An example of setting "data" might be:
# config:
# name: device-plugin-config
# create: true
# data:
# default: |-
# version: v1
# flags:
# migStrategy: none
# mig-single: |-
# version: v1
# flags:
# migStrategy: single
# mig-mixed: |-
# version: v1
# flags:
# migStrategy: mixed
config:
# Create a ConfigMap (default: false)
create: false
# ConfigMap name (either exiting or to create a new one with create=true above)
name: ""
# Default config name within the ConfigMap
default: ""
# Data section for the ConfigMap to create (i.e only applies when create=true)
data: {}
# MPS related configuration for the plugin
mps:
# MPS root path on the host
root: "/run/nvidia/mps"
# standalone dcgm hostengine
dcgm:
# disabled by default to use embedded nv-hostengine by exporter
enabled: false
repository: nvcr.io/nvidia/cloud-native
image: dcgm
version: 3.3.3-1-ubuntu22.04
imagePullPolicy: IfNotPresent
hostPort: 5555
args: []
env: []
resources: {}
dcgmExporter:
enabled: true
repository: nvcr.io/nvidia/k8s
image: dcgm-exporter
version: 3.3.5-3.4.0-ubuntu22.04
imagePullPolicy: IfNotPresent
env:
- name: DCGM_EXPORTER_LISTEN
value: ":9400"
- name: DCGM_EXPORTER_KUBERNETES
value: "true"
- name: DCGM_EXPORTER_COLLECTORS
value: "/etc/dcgm-exporter/dcp-metrics-included.csv"
resources: {}
serviceMonitor:
enabled: false
interval: 15s
honorLabels: false
additionalLabels: {}
relabelings: []
# - source_labels:
# - __meta_kubernetes_pod_node_name
# regex: (.*)
# target_label: instance
# replacement: $1
# action: replace
gfd:
enabled: true
repository: nvcr.io/nvidia
image: k8s-device-plugin
version: v0.15.0-rc.2-ubi8
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env:
- name: GFD_SLEEP_INTERVAL
value: 60s
- name: GFD_FAIL_ON_INIT_ERROR
value: "true"
resources: {}
migManager:
enabled: true
repository: nvcr.io/nvidia/cloud-native
image: k8s-mig-manager
version: v0.6.0-ubuntu20.04
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env:
- name: WITH_REBOOT
value: "false"
resources: {}
config:
name: "default-mig-parted-config"
default: "all-disabled"
gpuClientsConfig:
name: ""
nodeStatusExporter:
enabled: false
repository: nvcr.io/nvidia/cloud-native
image: gpu-operator-validator
# If version is not specified, then default is to use chart.AppVersion
#version: ""
imagePullPolicy: IfNotPresent
imagePullSecrets: []
resources: {}
gds:
enabled: false
repository: nvcr.io/nvidia/cloud-native
image: nvidia-fs
version: "2.17.5"
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env: []
args: []
gdrcopy:
enabled: false
repository: nvcr.io/nvidia/cloud-native
image: gdrdrv
version: "v2.4.1"
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env: []
args: []
vgpuManager:
enabled: false
repository: ""
image: vgpu-manager
version: ""
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env: []
resources: {}
driverManager:
image: k8s-driver-manager
repository: nvcr.io/nvidia/cloud-native
# When choosing a different version of k8s-driver-manager, DO NOT downgrade to a version lower than v0.6.4
# to ensure k8s-driver-manager stays compatible with gpu-operator starting from v24.3.0
version: v0.6.7
imagePullPolicy: IfNotPresent
env:
- name: ENABLE_GPU_POD_EVICTION
value: "false"
- name: ENABLE_AUTO_DRAIN
value: "false"
vgpuDeviceManager:
enabled: true
repository: nvcr.io/nvidia/cloud-native
image: vgpu-device-manager
version: "v0.2.5"
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env: []
config:
name: ""
default: "default"
vfioManager:
enabled: true
repository: nvcr.io/nvidia
image: cuda
version: 12.3.2-base-ubi8
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env: []
resources: {}
driverManager:
image: k8s-driver-manager
repository: nvcr.io/nvidia/cloud-native
# When choosing a different version of k8s-driver-manager, DO NOT downgrade to a version lower than v0.6.4
# to ensure k8s-driver-manager stays compatible with gpu-operator starting from v24.3.0
version: v0.6.7
imagePullPolicy: IfNotPresent
env:
- name: ENABLE_GPU_POD_EVICTION
value: "false"
- name: ENABLE_AUTO_DRAIN
value: "false"
kataManager:
enabled: false
config:
artifactsDir: "/opt/nvidia-gpu-operator/artifacts/runtimeclasses"
runtimeClasses:
- name: kata-qemu-nvidia-gpu
nodeSelector: {}
artifacts:
url: nvcr.io/nvidia/cloud-native/kata-gpu-artifacts:ubuntu22.04-535.54.03
pullSecret: ""
- name: kata-qemu-nvidia-gpu-snp
nodeSelector:
"nvidia.com/cc.capable": "true"
artifacts:
url: nvcr.io/nvidia/cloud-native/kata-gpu-artifacts:ubuntu22.04-535.86.10-snp
pullSecret: ""
repository: nvcr.io/nvidia/cloud-native
image: k8s-kata-manager
version: v0.1.2
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env: []
resources: {}
sandboxDevicePlugin:
enabled: true
repository: nvcr.io/nvidia
image: kubevirt-gpu-device-plugin
version: v1.2.6
imagePullPolicy: IfNotPresent
imagePullSecrets: []
args: []
env: []
resources: {}
ccManager:
enabled: false
defaultMode: "off"
repository: nvcr.io/nvidia/cloud-native
image: k8s-cc-manager
version: v0.1.1
imagePullPolicy: IfNotPresent
imagePullSecrets: []
env:
- name: CC_CAPABLE_DEVICE_IDS
value: "0x2339,0x2331,0x2330,0x2324,0x2322,0x233d"
resources: {}
node-feature-discovery:
enableNodeFeatureApi: true
gc:
enable: true
replicaCount: 1
serviceAccount:
name: node-feature-discovery
create: false
worker:
serviceAccount:
name: node-feature-discovery
# disable creation to avoid duplicate serviceaccount creation by master spec below
create: false
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Equal"
value: ""
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Equal"
value: ""
effect: "NoSchedule"
- key: nvidia.com/gpu
operator: Exists
effect: NoSchedule
config:
sources:
pci:
deviceClassWhitelist:
- "02"
- "0200"
- "0207"
- "0300"
- "0302"
deviceLabelFields:
- vendor
master:
serviceAccount:
name: node-feature-discovery
create: true
config:
extraLabelNs: ["nvidia.com"]
# noPublish: false
# resourceLabels: ["nvidia.com/feature-1","nvidia.com/feature-2"]
# enableTaints: false
# labelWhiteList: "nvidia.com/gpu"

View File

@ -1,376 +0,0 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: pgadmin
namespace: postgresql-system
spec:
chart:
spec:
chart: pgadmin4
sourceRef:
kind: HelmRepository
name: runix
namespace: flux-system
interval: 15m0s
timeout: 5m
releaseName: pgadmin
values:
# Default values for pgAdmin4.
replicaCount: 1
## pgAdmin4 container image
##
image:
registry: docker.io
repository: dpage/pgadmin4
# Overrides the image tag whose default is the chart appVersion.
tag: ""
pullPolicy: IfNotPresent
## Deployment annotations
annotations: {}
## priorityClassName
priorityClassName: ""
## Deployment entrypoint override
## Useful when there's a requirement to modify container's default:
## https://www.vaultproject.io/docs/platform/k8s/injector/examples#environment-variable-example
## ref: https://github.com/postgres/pgadmin4/blob/master/Dockerfile#L206
# command: "['/bin/sh', '-c', 'source /vault/secrets/config && <entrypoint script>']"
service:
type: LoadBalancer
clusterIP: ""
loadBalancerIP: ""
port: 80
targetPort: 80
# targetPort: 4181 To be used with a proxy extraContainer
portName: http
annotations: {}
## Special annotations at the service level, e.g
## this will set vnet internal IP's rather than public ip's
## service.beta.kubernetes.io/azure-load-balancer-internal: "true"
## Specify the nodePort value for the service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Pod Service Account
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
##
serviceAccount:
# Specifies whether a service account should be created
create: false
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# Opt out of API credential automounting.
# If you don't want the kubelet to automatically mount a ServiceAccount's API credentials,
# you can opt out of the default behavior
automountServiceAccountToken: false
## Strategy used to replace old Pods by new ones
## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
##
strategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 0
# maxUnavailable: 1
## Server definitions will be loaded at launch time. This allows connection
## information to be pre-loaded into the instance of pgAdmin4 in the container.
## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/import_export_servers.html
##
serverDefinitions:
## If true, server definitions will be created
##
enabled: false
## The resource type to use for deploying server definitions.
## Can either be ConfigMap or Secret
resourceType: ConfigMap
servers:
# firstServer:
# Name: "Minimally Defined Server"
# Group: "Servers"
# Port: 5432
# Username: "postgres"
# Host: "localhost"
# SSLMode: "prefer"
# MaintenanceDB: "postgres"
networkPolicy:
enabled: true
## Ingress
## Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# ingressClassName: ""
hosts:
- host: chart-example.local
paths:
- path: /
pathType: Prefix
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
# Additional config maps to be mounted inside a container
# Can be used to map config maps for sidecar as well
extraConfigmapMounts: []
# - name: certs-configmap
# mountPath: /etc/ssl/certs
# subPath: ca-certificates.crt # (optional)
# configMap: certs-configmap
# readOnly: true
extraSecretMounts: []
# - name: pgpassfile
# secret: pgpassfile
# subPath: pgpassfile
# mountPath: "/var/lib/pgadmin/storage/pgadmin/file.pgpass"
# readOnly: true
## Additional volumes to be mounted inside a container
##
extraVolumeMounts: []
## Specify additional containers in extraContainers.
## For example, to add an authentication proxy to a pgadmin4 pod.
extraContainers: |
# - name: proxy
# image: quay.io/gambol99/keycloak-proxy:latest
# args:
# - -provider=github
# - -client-id=
# - -client-secret=
# - -github-org=<ORG_NAME>
# - -email-domain=*
# - -cookie-secret=
# - -http-address=http://0.0.0.0:4181
# - -upstream-url=http://127.0.0.1:3000
# ports:
# - name: proxy-web
# containerPort: 4181
## @param existingSecret Name of existing secret to use for default pgadmin credentials. `env.password` will be ignored and picked up from this secret.
##
existingSecret: ""
## @param secretKeys.pgadminPasswordKey Name of key in existing secret to use for default pgadmin credentials. Only used when `existingSecret` is set.
##
secretKeys:
pgadminPasswordKey: password
## pgAdmin4 startup configuration
## Values in here get injected as environment variables
## Needed chart reinstall for apply changes
env:
# can be email or nickname
email: tyler@clortox.com
password: defaultpassword
# pgpassfile: /var/lib/pgadmin/storage/pgadmin/file.pgpass
# set context path for application (e.g. /pgadmin4/*)
# contextPath: /pgadmin4
## If True, allows pgAdmin4 to create session cookies based on IP address
## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/config_py.html
##
enhanced_cookie_protection: "False"
## Add custom environment variables that will be injected to deployment
## Ref: https://www.pgadmin.org/docs/pgadmin4/latest/container_deployment.html
##
variables: []
# - name: PGADMIN_LISTEN_ADDRESS
# value: "0.0.0.0"
# - name: PGADMIN_LISTEN_PORT
# value: "8080"
## Additional environment variables from ConfigMaps
envVarsFromConfigMaps: []
# - array-of
# - config-map-names
## Additional environment variables from Secrets
envVarsFromSecrets: []
# - array-of
# - secret-names
persistentVolume:
## If true, pgAdmin4 will create/use a Persistent Volume Claim
## If false, use emptyDir
enabled: true
## pgAdmin4 Persistent Volume Claim annotations
##
annotations: {}
## pgAdmin4 Persistent Volume access modes
## Must match those of existing PV or dynamic provisioner
## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
accessModes:
- ReadWriteOnce
## pgAdmin4 Persistent Volume Size
##
size: 1Gi
storageClass: "longhorn"
## pgAdmin4 Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
#existingClaim: "pgadmin-pvc"
## Additional volumes to be added to the deployment
##
extraVolumes: []
## Security context to be added to pgAdmin4 pods
##
securityContext:
runAsUser: 5050
runAsGroup: 5050
fsGroup: 5050
containerSecurityContext:
enabled: false
allowPrivilegeEscalation: false
## pgAdmin4 readiness and liveness probe initial delay and timeout
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
##
livenessProbe:
initialDelaySeconds: 30
periodSeconds: 60
timeoutSeconds: 15
successThreshold: 1
failureThreshold: 3
readinessProbe:
initialDelaySeconds: 30
periodSeconds: 60
timeoutSeconds: 15
successThreshold: 1
failureThreshold: 3
## Required to be enabled pre pgAdmin4 4.16 release, to set the ACL on /var/lib/pgadmin.
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
##
VolumePermissions:
## If true, enables an InitContainer to set permissions on /var/lib/pgadmin.
##
enabled: false
## @param extraDeploy list of extra manifests to deploy
##
extraDeploy: []
## Additional InitContainers to initialize the pod
##
extraInitContainers: |
# - name: add-folder-for-pgpass
# image: "dpage/pgadmin4:latest"
# command: ["/bin/mkdir", "-p", "/var/lib/pgadmin/storage/pgadmin"]
# volumeMounts:
# - name: pgadmin-data
# mountPath: /var/lib/pgadmin
# securityContext:
# runAsUser: 5050
containerPorts:
http: 80
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Horizontal Pod Autoscaling
## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
#
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
## Node labels for pgAdmin4 pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Node tolerations for server scheduling to nodes with taints
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
##
tolerations: []
## Pod affinity
##
affinity: {}
## Pod annotations
##
podAnnotations: {}
## Pod labels
##
podLabels: {}
# key1: value1
# key2: value2
# -- The name of the Namespace to deploy
# If not set, `.Release.Namespace` is used
namespace: null
init:
## Init container resources
##
resources: {}
## Define values for chart tests
test:
## Container image for test-connection.yaml
image:
registry: docker.io
repository: busybox
tag: latest
## Resources request/limit for test-connection Pod
resources: {}
# limits:
# cpu: 50m
# memory: 32Mi
# requests:
# cpu: 25m
# memory: 16Mi
## Security context for test-connection Pod
securityContext:
runAsUser: 5051
runAsGroup: 5051
fsGroup: 5051

View File

@ -1,100 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: plex
namespace: plex-ns
annotations:
force-recreate: true
spec:
replicas: 1
selector:
matchLabels:
app: plex
template:
metadata:
labels:
app: plex
spec:
nodeSelector:
kubernetes.io/hostname: gluttony
containers:
- name: plex
image: plexinc/pms-docker:public
env:
- name: TZ
value: EST
- name: PLEX_UID
value: "1000"
- name: PLEX_GID
value: "1000"
- name: PLEX_CLAIM
valueFrom:
secretKeyRef:
name: plex-claim
key: PLEX_CLAIM
ports:
- containerPort: 32400
- containerPort: 8234
- containerPort: 32469
- containerPort: 1900
- containerPort: 32410
- containerPort: 32412
- containerPort: 32413
- containerPort: 32414
volumeMounts:
- name: plex-config
mountPath: /config
- name: plex-media
mountPath: /data
# Sidecar providing access to upload/view/download raw media files
- name: filebrowswer
image: git.clortox.com/infrastructure/filebrowser:v1.0.1
env:
- name: ADMIN_PASS
valueFrom:
secretKeyRef:
name: filebrowser-secret
key: ADMIN-PASS
- name: DEFAULT_USERNAME
value: "default"
- name: DEFAULT_PASSWORD
valueFrom:
secretKeyRef:
name: filebrowser-secret
key: DEFAULT-PASS
- name: BRANDING_NAME
value: "Media Storage"
- name: AUTH_METHOD
value: "proxy"
- name: AUTH_HEADER
value: "X-Auth-User"
- name: PERM_ADMIN
value: "false"
- name: PERM_EXECUTE
value: "false"
- name: PERM_CREATE
value: "true"
- name: PERM_RENAME
value: "true"
- name: PERM_MODIFY
value: "true"
- name: PERM_DELETE
value: "false"
- name: PERM_SHARE
value: "true"
- name: PERM_DOWNLOAD
value: "true"
volumeMounts:
- name: plex-media
mountPath: /srv
ports:
- containerPort: 80
volumes:
- name: plex-config
persistentVolumeClaim:
claimName: plex-pvc-config
- name: plex-media
persistentVolumeClaim:
claimName: plex-pvc-media

View File

@ -1,20 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: plex-pv-media
spec:
storageClassName: local-storage
capacity:
storage: 18000Gi
accessModes:
- ReadWriteMany
hostPath:
path: "/Main/Media"
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- gluttony

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: plex-pvc-config
namespace: plex-ns
spec:
storageClassName: longhorn
accessModes:
- ReadWriteMany
resources:
requests:
storage: 200Gi

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: plex-pvc-media
namespace: plex-ns
spec:
volumeName: plex-pv-media
storageClassName: local-storage
accessModes:
- ReadWriteMany
resources:
requests:
storage: 18000Gi

View File

@ -1,15 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: plex-claim
namespace: plex-ns
spec:
encryptedData:
PLEX_CLAIM: AgBKYZur5cfL/WOGEs6sciEoqE4Jf5+nNVXTzLYni7bGkifPed7b4PTPK0+Mdao3Z6AkGceYnNlYMOUoEJBjC+5aEWf1+enJHmlbjwqYrRAIyG90nlA79e35dRHd6XcEoxKxshOL9HNQbywXaSdk/FlXSz3y2JbiOxU+x3Tnkl4vJRVBBncL+B2EFpNwF5tK9A7s3pYXvrVSPYJuk5yUH8USyu9EOUrS3wzqggq5Y2zTXEngtXRl42qPHWHFtYqeE+2wlutPKvcqtnoIxVr88910NFvv4+h1PSrJztEPgUiStE4APpehZlF8X0UpGK4kF0ozsDHY0NJoJpa8rK69vYfj/y1EX9RBs5s076lfwHtpItaKxQ4+a/dnk0vs2qLj4AEG57iKrPbnTicreYTCFyeSUKwTKUTXc+8PALWnu8sk4JAR1c0lQhLnPbHzX/yURlYzMGTlCue03hllS4r7ZknOatW7HjAQveqn6j0s893Ntbo6vAr8Ooe1EjevSED1cA7OUaQnYLge7zQjzlL928raWhu44ltRQG0xswyQpbphBJCIcGqxEq/yVozTKb5X+J7Jir1xmE4x9NG/PAkUVtlrNhM7yjfF/2xZITyqNlAHJlU3UfQg4yI18DB6RdgvzLCXEqhvg6brtjQyhRtg4PowVvB3nCZPSH9Qjn2u1aeeSqEypCyQRe77X7awc5hLkAQRjyO+I1N5Z/S/x1FTabZfhR0n7/MYHbQTsA==
template:
metadata:
creationTimestamp: null
name: plex-claim
namespace: plex-ns
---

View File

@ -1,18 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: plex-service
namespace: plex-ns
spec:
selector:
app: plex
type: LoadBalancer
ports:
- name: plex
protocol: TCP
port: 32400
targetPort: 32400
- name: filebrowswer
protocol: TCP
port: 80
targetPort: 80

View File

@ -1,15 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: filebrowser-secret
namespace: plex-ns
spec:
encryptedData:
ADMIN-PASS: AgAj7jrppKzPFfnYDV+FEuXQx9lrkppWFElR3DjtR017tpBQs6/KjZYU1TX81TkNh8cONg4mGB72zvk60Yft5b5TSgZWuKA4qTXYEoFusyMR3wyOU/Ft7ZMk7IAr/7Hi9dDAh3CkmrQ2lQ3C5hRlfTljaSxqC9abmEZIeSo7OhrkX8YIvFhanBMbPenfkulSsK38dp3PfIC8kntRV1u37Z7CxovVu+Kn7IoRC4sKa3gcdJ5lIA/Aq3rln8atmzZcPGPzjIAPY4P72mjPaeIvzqzLsNMcecIIr20MyLTOG/eI8WrM+WC+dgyvj/Pjq/hzTW1QD3z4jZW224o4ghKiPr6mW0BbN6KBBqv/JFtpBqiYgGi/ADBVxTG7YUA+FcT7YA6nuxlqg+TMpwqP8ZJBmghosBeqmBndjKUjpexoihmy+XTDbEr7e8RDpOdL9jS9hGPt47cmFITSFSEQIGM6kOtdYWcMw6+aKkTt5Ul4bUfV9TXultGyNYITibATXWNqbRfZDjYVrWOIfoVJOe19N3WZg9R4UeKzow3RkoJvn3MUTYOOrzr9Csx+VxUMeGxLPFftedUIy8zzXaqL/0OFogQZ2P+mesiYxc67Z4VS2u0+iCLkJdUDYnM+2q6TRQMI2nP40ko62xDuSE2BDcufqsKfHoddswlYDyelLVqJKee+P3sUoxcblYlv6kqz1GbVhBKQrHzFphx72KG219N9zwjOI6w8V6NXHUEFblQ3gt9RPA==
DEFAULT-PASS: AgDXxxyMBUb7Q0J8LvxPXNEAz75c1JcS7xL3rN7E2Wg7MLsZHj2/0hRf9jaCCyyVnr/Pabbdmjb0nth4Dlm50tLWH+rU7KtLPwHB0pMVi9zSxKBdyvOJurVdY/nlbSuanxSL37rBOrwRQRv6t8w/IIs4R9GEaFjxKoIJTuV8JRu3r62FiL/3o2zyok9UYcLxw2h9H7B9yn9wXn5CAFk0M4jNRUns3oU7d0/hPbfwC216vU0ZIdga8yYlZw3zvVz54mX2XECnHWZT8gPK1w6v8AEca/kDFuVFBi63OdXFgjBHCa/uSs5wifzNPBzcRA+A8s/JgoSHEeMXTmBsMOlihSSz0kSGHS/rUdu6nZamVZfzCWOHskb3RVjs23yNJsSEDlYR/AMeAjnkMDvMe5b/X/eV1AOYkAQ/pACrSk2aG+4kLmLoLYXaeDVf8pTHj4yOvdffWk39ClCqIOyWF2+//N02lDepVwis498cL+7I4kEVXqy9FugUCsbtzxVXX6OHym4KpBZpAmrMqH83rC6CtU4orF6gjmTKCe1Ufq5GmsQgFFZTZYTexnbeTKXz6yw+RbHLTGdsaJnMaAQx5uB3khO9Pkge7/HLDmXEx+mtaaTvk7AF8PWjFJSQZEWxVSCr6O1Zd4LKsg0EP6Mrk+s+8OOfGb42e3wfJ6gY7KlTBBu8KmKnHRQl9uoMVO7y5PWwl+B3Wam5j78ggV4L9UmiEw6gYvrc8rmQWZqQbuw7pClQ3Q==
template:
metadata:
creationTimestamp: null
name: filebrowser-secret
namespace: plex-ns

File diff suppressed because it is too large Load Diff

View File

@ -1,16 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: postgresql-default-credentials
namespace: postgresql-system
spec:
encryptedData:
password: AgAN2drH19WiBU8KYZyN8N0T1o8Sh9ti1M5kroU+xDpDD+pOLlZZEw63qcUeeK2paiTm7E3hVO/EnCNyGUBvrDKQzoMNsImbsTMJMVHldiTZedZV1IQxQXIYELgUtk93I2WoOiNvpfL+ro2vomjqPExlVeI1tuqPVdL1+xZYfKfFk+pPL3kLpRuO5HDmwcjy12yYd0E3RqU0g58O7UCCCdQRMOtU8/Z/MPM0I3ZGxG5DQCN3FEra8g1wacvsOplJVGYFzIBS2T7tPyW6I5zW9mFPDozMwqINukuoDC4uSUGSP2Ka2al3VyZiZnXySV3LJ38yj9TpZpTCKY2pgDeMue1hl50xMoCGhBXGzN3lCx6M4/us42a+oss/dn8oXFLAbOVaI2B7bpWHSz8fykdpogpqGgsa23gCuR2V8crZ7xVuACXqNDyunoHLgph8McFDsBXWNcyTg7ocILIjVKFLL4LDhtFQgHZcZXiTe6kMJNdKMxnH/0z/A00JO7dhU2uub31Oa0LwiE/rWO31E39tDZj4o0bRez1jsneuvbMVvwYyyr0OmOfdznv9qvGXbFSgGYCNUuTAPzFRMU4NkIup4RL3a0s2Fg28X79JIaWGjpuXgfyUKiBqUe7f6FAKkiEFeqYCJoccVQpbiYOODjyV5+89tfopmJaj7HZ1t3HfrDeGk/Vj6w4C6e2avCl+qLWqz2nFyM526ymdfVaWV5B945pFTTFrlxh8lRH7Ej3qGJEz1Q==
postgres-password: AgAwdjZR9WDv74oup8dhkzKxYYrMOd40O9S09t8pQspuw/xiO/CaeNFkggWrVVbNaFI1nnQrd/3JFTu6/1mwinr13MqAKKmile/rXSSKnXo+f90PSEFlsanDpfMSuhZKGF5gDHp5HwWIGDl8P6uBC50/Z7u/+muPcdgnCgg7rVMo5EiipgZgQOJXuAxHN8a8w1HMQoVd4PD1SewTHfbwxKsZoBYgy+RL5vS4Gd8d4UbqkSFk8h+uZHdsJALrZ9PCsUDMInT06Ll1YgmcSigtFR0bM46kfbGr1tvXKmv2lYMBjn8gcOS3rRxKmhqT/HUxaepg6TDjoG/Hw0oNtxGHRmwwnzuDBtPtCJOCb3LEodIAXsz7U3hUrvTI0/BMsXBP/wZFB7mZ6mvy4tfz2SkHEIoqu+um2I6sC9OHPNFIQPSq59PX2t2G9RH4aTJVWKFcLv7fZ7+w+ot0D4300z0fRbKYCgUowKUtkd+H3eYmu1AzMpFkEUh+9NahHNeXCf+YsAHZb0vm4mz39UWTVuRzdwGFFG193goOFI6A2t9n2E18f/UQnhKewi9z552THTqsFO3VE/Kq2C9q+AA3BaGVCSlNw7eRXRr+lSaNiuTGgEZeQZubBLriC7rTzrfJjP2ik2vayCXb1dvGPrSGst4oo9IYnb548uFjgpd7ZQz0WGCufBhM7GNv7XaC+ZQEsCHPwJLaiD6irl8d1IKn+7g9vnOPUYRhcTiPtuEuWFneQ9tNMw==
username: AgAn4TsaveRieevxEf80zFJeKLkQoLMf+o5upoeq5YdXRnyV00xk1yL0QBYdvNdt3FnEmZR5R0oiJKimZmQqHOHH9++G1cqACsmNmEbjU+BBUNwVOhXZAkU1xHvpAACNKaqiqlhR5uGYx+rP6GbsW11UrwTu4oeYBqyGXtO5i9FaFaIbK32UPJ4e5lsml0l01reWWwZI6DH9L2O6E7wif5Pxw6wEcQphfk2YlTddXGRZA0dI1xFSSuvjxRRaASpfJqU/ztqdzF/MVCKnheZneuVYyw7w7Suv87RIx4ddrJKqDz0Fla9LWAC1xJMqGxWPE0Bgd9jWlRK4Cy6DogZ5ZoJv+pZm8zXy3N60SSM2ZM7TaP+FOz8FhgoDYXSn3lKPlLY2mlOBFJ8PYL1dcrGqDkHKiklAUelbID1Hiw7CdcavohFTi9CsaZXWpyeAEiMmiMpXY2+nnwppqv0+Oc82DvU4305q8FzFmi9N2olR4vnvzjBclHsXsSesJ5rKjNHk3aApTT2MIbOexDXTyIXIkrLRFK1/KbUx86qjlPOycZ2YlqfvbakEajFVV3TyR5Bb7ZHf5yQOOyzkmJj6Z3xaNmfvskqHyhImQHKRHEKjyvoKqWMrAQZEUm4LwLTdSRMo3o9p00Xb2YFFCOOhJF9hoxf65TvXkWNJyYMNjBW1a1H8gnrjT0zEyV4Veh75YRDmzgtl2CR/2w==
template:
metadata:
creationTimestamp: null
name: postgresql-default-credentials
namespace: postgresql-system

View File

@ -1,16 +0,0 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: kube-prometheus-stack
namespace: monitoring
spec:
interval: 1m
chart:
spec:
chart: kube-prometheus-stack
version: "58.7.2"
sourceRef:
kind: HelmRepository
name: prometheus-community
namespace: flux-system
values: {}

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: redis
namespace: redis-system

View File

@ -1,14 +0,0 @@
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
creationTimestamp: null
name: redis-key
namespace: redis-system
spec:
encryptedData:
password: AgAQ9PHv4fJez1wAJtcvWSOMFEMOOHULZhX1wFzoO9JTm4WDeK9GaWbT4tSM3fXsd+9GfhggnsFHeP4t5G/4BlvQ8lNs0bXfUZiSomUL69zhH2YEg9EhJVm9eJWvvJ75m1HnfIL2yFMm9jsxgzajg+fn5a6h4od0gjPAah9+uiVYi4xdIAv8SJK+CEXKKLhuwzV+MkQ0XdiISdanHjrPvYKA5FGRLqjmJePfSTtea5xGhx4DkHzkQ2KwzKIM/v4JOhA3JnwXebZh+GrUv6cg/fh9xnBUxeFvoimAt0gzOD0ajUIWTqTEHCqmPfumNo4w2paG+s+0vAL2gercxeyamOhkRZuWfOLwnQ/eoAm+gQGItn7UhL0yjaFDpkdICTrIXOEebScv27aHKe+4Cdw1BcAS8lIrE9JelVVgOqxBCaIvIBBPVyaFFVXF/YmMK6VAYTO1c3MDPpJEeFyNGoMo82lzL3IwRRFrPYoDrKbfsrWfZUQRYKOxVWihgWYFYx/asceJxegPAdCLq7avQ7tCoIodm9qgZ4F7F0x+N38oFLLCCe3tAhorInC/sWjkrsLpDBtAkWEsJnN865a+yRpN2YHFz+NKf2rugGDre0jA7GgisPwukmY4sC6r8MSjxumkaBo22hMoyRXBpsEBzLTsWMDjI6155J60iamBIUUORYpEVOHVFmY4iDSY9mBbp/ZzIvOa+mJCcvI5U5apJBALOUrGY3hSXHm+am7FWZtM6U0rmw==
template:
metadata:
creationTimestamp: null
name: redis-key
namespace: redis-system