LocalAI/examples/functions/.env.example

14 lines
557 B
Plaintext
Raw Normal View History

# CPU .env docs: https://localai.io/howtos/easy-setup-docker-cpu/
# GPU .env docs: https://localai.io/howtos/easy-setup-docker-gpu/
OPENAI_API_KEY=sk---anystringhere
OPENAI_API_BASE=http://api:8080/v1
# Models to preload at start
# Here we configure gpt4all as gpt-3.5-turbo and bert as embeddings,
# see other options in the model gallery at https://github.com/go-skynet/model-gallery
PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/openllama-7b-open-instruct.yaml", "name": "gpt-3.5-turbo"}]
## Change the default number of threads
#THREADS=14