From fa7b2aee9cc1ffa84f2cb03a50180bf4b7a24c93 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Mon, 13 May 2024 18:44:25 +0200 Subject: [PATCH] models(gallery): add Bunny-llama (#2311) Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index d50885d7..93cb489b 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -852,6 +852,33 @@ - filename: Llava_1.5_Llama3_mmproj_updated.gguf sha256: 4f2bb77ca60f2c932d1c6647d334f5d2cd71966c19e850081030c9883ef1906c uri: https://huggingface.co/ChaoticNeutrals/LLaVA-Llama-3-8B-mmproj-Updated/resolve/main/llava-v1.5-8B-Updated-Stop-Token/mmproj-model-f16.gguf +- !!merge <<: *llama3 + name: "bunny-llama-3-8b-v" + urls: + - https://huggingface.co/BAAI/Bunny-Llama-3-8B-V-gguf + description: | + Bunny is a family of lightweight but powerful multimodal models. It offers multiple plug-and-play vision encoders, like EVA-CLIP, SigLIP and language backbones, including Llama-3-8B, Phi-1.5, StableLM-2, Qwen1.5, MiniCPM and Phi-2. To compensate for the decrease in model size, we construct more informative training data by curated selection from a broader data source. + + We provide Bunny-Llama-3-8B-V, which is built upon SigLIP and Llama-3-8B-Instruct. More details about this model can be found in GitHub. + icon: https://huggingface.co/BAAI/Bunny-Llama-3-8B-V-gguf/resolve/main/icon.png + tags: + - llm + - multimodal + - gguf + - gpu + - llama3 + - cpu + overrides: + mmproj: Bunny-Llama-3-8B-Q4_K_M-mmproj.gguf + parameters: + model: Bunny-Llama-3-8B-Q4_K_M.gguf + files: + - filename: Bunny-Llama-3-8B-Q4_K_M-mmproj.gguf + sha256: 96d033387a91e56cf97fa5d60e02c0128ce07c8fa83aaaefb74ec40541615ea5 + uri: huggingace://BAAI/Bunny-Llama-3-8B-V-gguf/mmproj-model-f16.gguf + - filename: Bunny-Llama-3-8B-Q4_K_M.gguf + sha256: 88f0a61f947dbf129943328be7262ae82e3a582a0c75e53544b07f70355a7c30 + uri: huggingace://BAAI/Bunny-Llama-3-8B-V-gguf/ggml-model-Q4_K_M.gguf - !!merge <<: *llama3 name: "llava-llama-3-8b-v1_1" description: |