backend: llama-cpp context_size: 4096 f16: true gpu_layers: 90 mmap: true name: llava roles: user: "USER:" assistant: "ASSISTANT:" system: "SYSTEM:" mmproj: bakllava-mmproj.gguf parameters: model: bakllava.gguf temperature: 0.2 top_k: 40 top_p: 0.95 template: chat: | A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. {{.Input}} ASSISTANT: download_files: - filename: bakllava.gguf uri: huggingface://mys/ggml_bakllava-1/ggml-model-q4_k.gguf - filename: bakllava-mmproj.gguf uri: huggingface://mys/ggml_bakllava-1/mmproj-model-f16.gguf