Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add integration with DeepSeek API #1

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import {
} from '../playground/user-config';
import { textGenGpt } from '../../llms/gpt';
import { textGenGemini } from '../../llms/gemini';
import { textGenDeepSeek } from '../../llms/deepseek';
import { tooltipMouseEnter, tooltipMouseLeave } from '@xiaohk/utils';
import { hasLocalModelInCache, detectGPUDevice } from '../../llms/web-llm';

Expand All @@ -39,7 +40,9 @@ const apiKeyMap: Record<SupportedRemoteModel, string> = {
[SupportedRemoteModel['gpt-3.5']]: 'Open AI',
[SupportedRemoteModel['gpt-3.5-free']]: 'Open AI',
[SupportedRemoteModel['gpt-4']]: 'Open AI',
[SupportedRemoteModel['gemini-pro']]: 'Gemini'
[SupportedRemoteModel['gemini-pro']]: 'Gemini',
[SupportedRemoteModel['deepseek-chat']]: 'DeepSeek',
[SupportedRemoteModel['deepseek-coder']]: 'DeepSeek'
};

const apiKeyDescriptionMap: Record<ModelFamily, TemplateResult> = {
Expand All @@ -51,6 +54,10 @@ const apiKeyDescriptionMap: Record<ModelFamily, TemplateResult> = {
<a href="https://makersuite.google.com/" target="_blank"
>Google AI Studio</a
>`,
[ModelFamily.deepseek]: html`Get the key at
<a href="https://platform.deepseek.com/api_keys" target="_blank"
>DeepSeek Platform</a
>`,
[ModelFamily.local]: html``
};

Expand Down Expand Up @@ -410,6 +417,30 @@ export class MememoPanelSetting extends LitElement {
break;
}

case ModelFamily.deepseek: {
textGenDeepSeek(
apiKey,
requestID,
prompt,
temperature,
'deepseek-chat',
false
)
.then(value => {
this.showModelLoader = false;
this.textGenMessageHandler(
this.selectedModel as SupportedRemoteModel,
apiKey,
value
);
})
.then(
() => {},
() => {}
);
break;
}

case ModelFamily.local: {
break;
}
Expand Down
27 changes: 27 additions & 0 deletions examples/rag-playground/src/components/playground/playground.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import {
import { textGenGpt } from '../../llms/gpt';
import { textGenMememo } from '../../llms/mememo-gen';
import { textGenGemini } from '../../llms/gemini';
import { textGenDeepSeek } from '../../llms/deepseek';
import { promptTemplates } from '../../config/promptTemplates';

import type { TextGenMessage } from '../../llms/gpt';
Expand Down Expand Up @@ -574,6 +575,32 @@ export class MememoPlayground extends LitElement {
break;
}

case SupportedRemoteModel['deepseek-chat']: {
runRequest = textGenDeepSeek(
this.userConfig.llmAPIKeys[ModelFamily.deepseek],
'text-gen',
curPrompt,
temperature,
'deepseek-chat',
USE_CACHE
);
break;
}

case SupportedRemoteModel['deepseek-coder']: {
runRequest = textGenDeepSeek(
this.userConfig.llmAPIKeys[ModelFamily.deepseek],
'text-gen',
curPrompt,
temperature,
'deepseek-coder',
USE_CACHE
);
break;
}



case SupportedRemoteModel['gemini-pro']: {
runRequest = textGenGemini(
this.userConfig.llmAPIKeys[ModelFamily.google],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ export enum SupportedRemoteModel {
'gpt-3.5-free' = 'GPT 3.5',
'gpt-3.5' = 'GPT 3.5 (Token)',
'gpt-4' = 'GPT 4',
'gemini-pro' = 'Gemini Pro'
'gemini-pro' = 'Gemini Pro',
'deepseek-chat' = 'DeepSeek Chat',
'deepseek-coder' = 'DeepSeek Coder'
}

export const supportedModelReverseLookup: Record<
Expand All @@ -25,6 +27,8 @@ export const supportedModelReverseLookup: Record<
[SupportedRemoteModel['gpt-3.5']]: 'gpt-3.5',
[SupportedRemoteModel['gpt-4']]: 'gpt-4',
[SupportedRemoteModel['gemini-pro']]: 'gemini-pro',
[SupportedRemoteModel['deepseek-chat']]: 'deepseek-chat',
[SupportedRemoteModel['deepseek-coder']]: 'deepseek-coder',
[SupportedLocalModel['tinyllama-1.1b']]: 'tinyllama-1.1b',
[SupportedLocalModel['llama-2-7b']]: 'llama-2-7b',
[SupportedLocalModel['phi-2']]: 'phi-2',
Expand All @@ -35,6 +39,7 @@ export const supportedModelReverseLookup: Record<
export enum ModelFamily {
google = 'Google',
openAI = 'Open AI',
deepseek = 'DeepSeek',
local = 'Local'
}

Expand All @@ -46,6 +51,8 @@ export const modelFamilyMap: Record<
[SupportedRemoteModel['gpt-3.5-free']]: ModelFamily.openAI,
[SupportedRemoteModel['gpt-4']]: ModelFamily.openAI,
[SupportedRemoteModel['gemini-pro']]: ModelFamily.google,
[SupportedRemoteModel['deepseek-chat']]: ModelFamily.deepseek,
[SupportedRemoteModel['deepseek-coder']]: ModelFamily.deepseek,
[SupportedLocalModel['tinyllama-1.1b']]: ModelFamily.local,
[SupportedLocalModel['llama-2-7b']]: ModelFamily.local,
[SupportedLocalModel['gemma-2b']]: ModelFamily.local,
Expand All @@ -71,6 +78,7 @@ export class UserConfigManager {
this.#llmAPIKeys = {
[ModelFamily.openAI]: '',
[ModelFamily.google]: '',
[ModelFamily.deepseek]: '',
[ModelFamily.local]: ''
};
this.#preferredLLM = SupportedRemoteModel['gpt-3.5-free'];
Expand Down
132 changes: 132 additions & 0 deletions examples/rag-playground/src/llms/deepseek.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
import type {
ChatCompletionRequest,
ChatCompletion,
ChatMessage
} from '../types/gpt-types';

export type TextGenMessage =
| {
command: 'finishTextGen';
payload: {
requestID: string;
apiKey: string;
result: string;
prompt: string;
detail: string;
};
}
| {
command: 'error';
payload: {
requestID: string;
originalCommand: string;
message: string;
};
};

/**
* Use DeepSeek API to generate text based on a given prompt
* @param apiKey DeepSeek API key
* @param requestID Worker request ID
* @param prompt Prompt to give to the GPT model
* @param temperature Model temperature
* @param stopSequences Strings to stop the generation
* @param detail Extra string information to include (will be returned)
* @param model DeepSeek-Chat or DeepSeek-Coder
*/
export const textGenDeepSeek = async (
apiKey: string,
requestID: string,
prompt: string,
temperature: number,
model: 'deepseek-chat' | 'deepseek-coder',
useCache: boolean = false,
stopSequences: string[] = [],
detail: string = ''
) => {
// Compile the prompt into a chat format
const message: ChatMessage = {
role: 'user',
content: prompt
};

const body: ChatCompletionRequest = {
model,
messages: [message],
temperature,
stop: stopSequences
};

// Check if the model output is cached
const cachedValue = localStorage.getItem('[deepseek]' + prompt);
if (useCache && cachedValue !== null) {
console.log('Use cached output (text gen)');
await new Promise(resolve => setTimeout(resolve, 1000));
// await new Promise(resolve => setTimeout(resolve, 100000));
const message: TextGenMessage = {
command: 'finishTextGen',
payload: {
requestID,
apiKey,
result: cachedValue,
prompt: prompt,
detail: detail
}
};
return message;
}

const url = 'https://api.deepseek.com/chat/completions';

const requestOptions: RequestInit = {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*',
'Accept': 'application/json',
Authorization: `Bearer ${apiKey}`
},
body: JSON.stringify(body)
};

try {
const response = await fetch(url, requestOptions);
const data = (await response.json()) as ChatCompletion;
if (response.status !== 200) {
throw Error('DeepSeek API error' + JSON.stringify(data));
}

if (data.choices.length < 1) {
throw Error('DeepSeek error' + JSON.stringify(data));
}

// Send back the data to the main thread
const message: TextGenMessage = {
command: 'finishTextGen',
payload: {
requestID,
apiKey,
result: data.choices[0].message.content,
prompt: prompt,
detail: detail
}
};

// Also cache the model output
if (useCache && localStorage.getItem('[deepseek]' + prompt) === null) {
localStorage.setItem('[deepseek]' + prompt, data.choices[0].message.content);
}
return message;
} catch (error) {
// Throw the error to the main thread
const message: TextGenMessage = {
command: 'error',
payload: {
requestID,
originalCommand: 'startTextGen',
message: error as string
}
};
return message;
}
};