Add ollama
continuous-integration/drone/push Build is passing Details

This commit is contained in:
Tyler Perkins 2024-05-05 18:59:39 -04:00
parent a25bc56303
commit 714b400ae1
Signed by: tyler
GPG Key ID: 03B27509E17EFDC8
5 changed files with 52 additions and 54 deletions

View File

@ -1,37 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: local-ai
namespace: local-ai-ns
labels:
app: local-ai
spec:
selector:
matchLabels:
app: local-ai
replicas: 1
template:
metadata:
labels:
app: local-ai
name: local-ai
spec:
containers:
- args:
- dolphin-2.5-mixtral-8x7b
env:
- name: DEBUG
value: "true"
name: local-ai
image: quay.io/go-skynet/local-ai:v2.14.0-cublas-cuda12
imagePullPolicy: IfNotPresent
resources:
limits:
nvidia.com/gpu: 2
volumeMounts:
- name: models-volume
mountPath: /build/models
volumes:
- name: models-volume
persistentVolumeClaim:
claimName: models-pvc

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: local-ai
namespace: local-ai-ns
spec:
selector:
app: local-ai
type: LoadBalancer
ports:
- protocol: TCP
port: 80
targetPort: 8080

View File

@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ollama-deployment
namespace: ollama-ns
spec:
replicas: 1
selector:
matchLabels:
app: ollama
template:
metadata:
labels:
app: ollama
spec:
runtimeClassName: nvidia
containers:
- name: ollama
image: ollama/ollama
env:
- name: OLLAMA_HOST
value: 0.0.0.0
- name: OLLAMA_MODELS
value: "/models"
ports:
- containerPort: 11434
resources:
limits:
nvidia.com/gpu: 2
volumeMounts:
- name: ollama-volume
mountPath: "/my-models"
volumes:
- name: ollama-volume
persistentVolumeClaim:
claimName: ollama-pvc

View File

@ -1,12 +1,12 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: models-pvc
namespace: local-ai-ns
name: ollama-pvc
namespace: ollama-ns
spec:
accessModes:
- ReadWriteMany
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 100Gi
storage: 200Gi

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: ollama-service
namespace: ollama-ns
spec:
type: LoadBalancer
ports:
- port: 11434
targetPort: 11434
selector:
app: ollama