From 6169b4542a7b1c0dd449de27cc75e6e1f385a795 Mon Sep 17 00:00:00 2001 From: BBC-Esq Date: Thu, 5 Sep 2024 11:20:26 -0400 Subject: [PATCH] add deepseek and yi coder models --- src/constants.py | 24 ++++++++++++++++++++++++ src/module_chat.py | 28 +++++++++++++++++++++++++++- 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/src/constants.py b/src/constants.py index c84e2737..5e4e7119 100644 --- a/src/constants.py +++ b/src/constants.py @@ -9,7 +9,9 @@ 'Dolphin-Llama 3.1 - 8b': 8192, 'Hermes-3-Llama-3.1 - 8b': 8192, 'Dolphin-Qwen 2 - 7b': 8192, + 'Yi Coder - 9b': 8192, 'Dolphin-Mistral-Nemo - 12b': 8192, + 'DeepSeek Coder v2 - 16b': 8192, 'Internlm2_5 - 20b': 8192, } @@ -341,6 +343,18 @@ 'function': 'Dolphin_Yi_1_5_9b', 'precision': 'bfloat16' }, + + 'Yi Coder - 9b': { + 'model': 'Yi Coder - 9b', + 'repo_id': '01-ai/Yi-Coder-9B-Chat', + 'cache_dir': '01-ai--Yi-Coder-9B-Chat', + 'tokens_per_second': 30.85, + 'context_length': 8192, + 'avg_vram_usage': '7.2 GB', + 'function': 'Yi_Coder_9b', + 'precision': 'bfloat16' + }, + 'Orca 2 - 13b': { 'model': 'Orca 2 - 13b', 'repo_id': 'microsoft/Orca-2-13b', @@ -411,6 +425,16 @@ 'function': 'Dolphin_Mistral_Nemo', 'precision': 'bfloat16' }, + 'DeepSeek Coder v2 - 16b': { + 'model': 'DeepSeek Coder v2 - 16b', + 'repo_id': 'deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', + 'cache_dir': 'deepseek-ai--DeepSeek-Coder-V2-Lite-Instruct', + 'tokens_per_second': 35.86, + 'context_length': 8192, + 'avg_vram_usage': '10.0 GB', + 'function': 'DeepSeek_Coder_v2_lite', + 'precision': 'bfloat16' + }, 'Internlm2_5 - 20b': { 'model': 'Internlm2_5 - 20b', 'repo_id': 'internlm/internlm2_5-20b-chat', diff --git a/src/module_chat.py b/src/module_chat.py index ed37440a..68e05b71 100644 --- a/src/module_chat.py +++ b/src/module_chat.py @@ -16,7 +16,7 @@ def get_max_length(model_name): def get_generation_settings(max_length): return { 'max_length': max_length, - 'max_new_tokens': 1024, + 'max_new_tokens': 2048, 'do_sample': False, 'num_beams': 1, 'use_cache': True, @@ -313,6 +313,19 @@ def create_prompt(self, augmented_query): <|im_start|>assistant """ +class Yi_Coder_9b(BaseModel): + def __init__(self, generation_settings): + model_info = CHAT_MODELS['Yi Coder - 9b'] + super().__init__(model_info, bnb_bfloat16_settings, generation_settings) + + def create_prompt(self, augmented_query): + return f"""<|endoftext|><|im_start|>system + {system_message}<|im_end|> + <|im_start|>user + {augmented_query}<|im_end|> + <|im_start|>assistant + """ + class InternLM2_5_7b(BaseModel): def __init__(self, generation_settings): model_info = CHAT_MODELS['Internlm2_5 - 7b'] @@ -353,6 +366,19 @@ def create_prompt(self, augmented_query): {augmented_query}[/INST]""" + +class DeepSeek_Coder_v2_lite(BaseModel): + def __init__(self, generation_settings): + model_info = CHAT_MODELS['DeepSeek Coder v2 - 16b'] + super().__init__(model_info, bnb_float16_settings, generation_settings) + + def create_prompt(self, augmented_query): + return f"""<|begin▁of▁sentence|>{system_message} + User: {augmented_query} + + Assistant:""" + + class Neural_Chat_7b(BaseModel): def __init__(self, generation_settings): model_info = CHAT_MODELS['Neural-Chat - 7b']