2023-06-08 22:45:44 +00:00
|
|
|
version: "3"
|
|
|
|
|
|
|
|
services:
|
|
|
|
api:
|
|
|
|
image: quay.io/go-skynet/local-ai:v1.18.0-ffmpeg
|
|
|
|
# As initially LocalAI will download the models defined in PRELOAD_MODELS
|
|
|
|
# you might need to tweak the healthcheck values here according to your network connection.
|
|
|
|
# Here we give a timespan of 20m to download all the required files.
|
|
|
|
healthcheck:
|
|
|
|
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"]
|
|
|
|
interval: 1m
|
|
|
|
timeout: 20m
|
|
|
|
retries: 20
|
|
|
|
ports:
|
|
|
|
- 8080:8080
|
|
|
|
environment:
|
|
|
|
- DEBUG=true
|
|
|
|
- MODELS_PATH=/models
|
|
|
|
- IMAGE_PATH=/tmp
|
|
|
|
# You can preload different models here as well.
|
|
|
|
# See: https://github.com/go-skynet/model-gallery
|
|
|
|
- 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}, {"url": "github:go-skynet/model-gallery/stablediffusion.yaml"}, {"url": "github:go-skynet/model-gallery/whisper-base.yaml", "name": "whisper-1"}]'
|
|
|
|
volumes:
|
|
|
|
- ./models:/models:cached
|
2023-07-21 18:56:04 +00:00
|
|
|
command: ["/usr/bin/local-ai"]
|
2023-06-08 22:45:44 +00:00
|
|
|
chatgpt_telegram_bot:
|
|
|
|
container_name: chatgpt_telegram_bot
|
|
|
|
command: python3 bot/bot.py
|
|
|
|
restart: always
|
|
|
|
environment:
|
|
|
|
- OPENAI_API_KEY=sk---anystringhere
|
|
|
|
- OPENAI_API_BASE=http://api:8080/v1
|
|
|
|
build:
|
|
|
|
context: "."
|
|
|
|
dockerfile: Dockerfile
|
|
|
|
depends_on:
|
|
|
|
api:
|
|
|
|
condition: service_healthy
|