From 8417fd781be57c6bdb1d4961c7f5fe07d8d102df Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Wed, 25 Dec 2024 11:45:32 +0100 Subject: [PATCH] chore(model gallery): add fastllama-3.2-1b-instruct Signed-off-by: Ettore Di Giacinto --- gallery/index.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index d1a055b36db2..78df4904a862 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -964,6 +964,21 @@ - filename: Llama-Chat-Summary-3.2-3B-Q4_K_M.gguf sha256: ed1be20d2374aa6db9940923f41fa229bd7ebe13d41b1ff1ff18a6f87e99df79 uri: huggingface://bartowski/Llama-Chat-Summary-3.2-3B-GGUF/Llama-Chat-Summary-3.2-3B-Q4_K_M.gguf +- !!merge <<: *llama32 + name: "fastllama-3.2-1b-instruct" + icon: https://huggingface.co/suayptalha/FastLlama-3.2-1B-Instruct/resolve/main/FastLlama.png + urls: + - https://huggingface.co/suayptalha/FastLlama-3.2-1B-Instruct + - https://huggingface.co/bartowski/FastLlama-3.2-1B-Instruct-GGUF + description: | + FastLlama is a highly optimized version of the Llama-3.2-1B-Instruct model. Designed for superior performance in constrained environments, it combines speed, compactness, and high accuracy. This version has been fine-tuned using the MetaMathQA-50k section of the HuggingFaceTB/smoltalk dataset to enhance its mathematical reasoning and problem-solving abilities. + overrides: + parameters: + model: FastLlama-3.2-1B-Instruct-Q4_K_M.gguf + files: + - filename: FastLlama-3.2-1B-Instruct-Q4_K_M.gguf + sha256: 3c0303e9560c441a9abdcd0e4c04c47e7f6b21277c1e8c00eed94fc656da0be9 + uri: huggingface://bartowski/FastLlama-3.2-1B-Instruct-GGUF/FastLlama-3.2-1B-Instruct-Q4_K_M.gguf - &qwen25 ## Qwen2.5 name: "qwen2.5-14b-instruct"