diff --git a/src/baileys/env.ts b/src/baileys/env.ts index b2b6552..dafa384 100644 --- a/src/baileys/env.ts +++ b/src/baileys/env.ts @@ -21,6 +21,7 @@ interface EnvInterface { // Services // // OpenAI + OPENAI_MODEL: string; OPENAI_PREFIX?: string; OPENAI_ENABLED: boolean; OPENAI_ICON_PREFIX?: string; @@ -59,7 +60,7 @@ export const ENV: EnvInterface = { API_KEY_STABILITY: process.env.API_KEY_STABILITY, MONGO_ENABLED: process.env.MONGO_ENABLED === 'True', MONGO_URL: process.env.MONGO_URL, - + OPENAI_MODEL: process.env.OPENAI_MODEL || 'gpt-3.5-turbo', OPENAI_PREFIX: process.env.OPENAI_PREFIX, OPENAI_ENABLED: process.env.OPENAI_ENABLED === 'True', OPENAI_ICON_PREFIX: process.env.OPENAI_ICON_PREFIX, diff --git a/src/baileys/handlers/message.ts b/src/baileys/handlers/message.ts index 850201b..4ab4e6c 100644 --- a/src/baileys/handlers/message.ts +++ b/src/baileys/handlers/message.ts @@ -10,27 +10,43 @@ import { GeminiModel } from './../../models/GeminiModel'; import { FluxModel } from './../../models/FluxModel'; import { ENV } from '../env'; +interface ModelByPrefix { + modelName: AIModels; + prefix: string; +} + /* Declare models */ const modelTable: Record = { ChatGPT: ENV.OPENAI_ENABLED ? new ChatGPTModel() : null, Gemini: ENV.GEMINI_ENABLED ? new GeminiModel() : null, FLUX: ENV.HF_ENABLED ? new FluxModel() : null, - Stability: ENV.STABILITY_ENABLED ? new StabilityModel() : null + Stability: ENV.STABILITY_ENABLED ? new StabilityModel() : null, + Dalle: null }; +if (ENV.DALLE_ENABLED && ENV.OPENAI_ENABLED) { + modelTable.Dalle = modelTable.ChatGPT; +} else if (ENV.DALLE_ENABLED && !ENV.OPENAI_ENABLED) { + modelTable.Dalle = new ChatGPTModel(); +} + // handles message export async function handleMessage({ client, msg, metadata }: MessageHandlerParams) { - const modelToUse = Util.getModelByPrefix(metadata.text) as AIModels; - if (!modelToUse) { + const modelInfo: ModelByPrefix | undefined = Util.getModelByPrefix(metadata.text); + if (!modelInfo) { if (ENV.Debug) { - console.log("[Debug] Model '" + (modelToUse as string) + "' not found"); + console.log("[Debug] Model '" + modelInfo + "' not found"); } - return; } - const model = modelTable[modelToUse]; - if (!model) return; + const model = modelTable[modelInfo.modelName]; + if (!model) { + if (ENV.Debug) { + console.log("[Debug] Model '" + modelInfo.modelName + "' is disabled or not found"); + } + return; + } const prompt: string = metadata.text.split(' ').slice(1).join(' '); const messageResponse = await client.sendMessage( @@ -40,7 +56,7 @@ export async function handleMessage({ client, msg, metadata }: MessageHandlerPar ); model.sendMessage( - { sender: metadata.sender, prompt: prompt, metadata: metadata }, + { sender: metadata.sender, prompt: prompt, metadata: metadata, prefix: modelInfo.prefix }, async (res: any, err: any) => { if (err) { client.sendMessage(metadata.remoteJid, { diff --git a/src/models/OpenAIModel.ts b/src/models/OpenAIModel.ts index 070cdb2..8d99e11 100644 --- a/src/models/OpenAIModel.ts +++ b/src/models/OpenAIModel.ts @@ -28,17 +28,17 @@ class ChatGPTModel extends AIModel { }); this.Dalle = new OpenAI({ - apiKey: ENV.API_KEY_OPENAI_DALLE + apiKey: ENV.API_KEY_OPENAI_DALLE || ENV.API_KEY_OPENAI }); - this.Dalle3 = config.models.ChatGPT?.settings.dalle_use_3; + this.Dalle3 = ENV.DALLE_USE_3; } /* Methods */ public async generateCompletion(user: string): Promise { const completion = await this.OpenAI.chat.completions.create({ messages: this.history[user], - model: config.models.ChatGPT?.modelToUse?.toString() || 'gpt-3.5-turbo' + model: ENV.OPENAI_MODEL }); const message = completion.choices[0].message; @@ -60,18 +60,29 @@ class ChatGPTModel extends AIModel { return { url: resInfo.url as string, caption: resInfo.revised_prompt as string }; } - public async sendMessage({ sender, prompt }: AIArguments, handle: AIHandle): Promise { + public async sendMessage( + { sender, prompt, prefix }: AIArguments, + handle: AIHandle + ): Promise { try { - if (!this.sessionExists(sender)) { - this.sessionCreate(sender); + // Use Dalle + if (ENV.DALLE_ENABLED && prefix === ENV.DALLE_PREFIX) { + const res = await this.generateImage(prompt); + await handle(res); + + // Use ChatGPT + } else { + if (!this.sessionExists(sender)) { + this.sessionCreate(sender); + } + this.sessionAddMessage(sender, { role: 'user', content: prompt }); + + const completion = await this.generateCompletion(sender); + const res = completion.content || ''; + await handle({ text: res }); } - this.sessionAddMessage(sender, { role: 'user', content: prompt }); - - const completion = await this.generateCompletion(sender); - const res = completion.content || ''; - await handle({ text: res }); } catch (err) { - await handle('', 'An error occur please see console for more information.'); + await handle('', err as string); } } } diff --git a/src/types/AiModels.d.ts b/src/types/AiModels.d.ts index 938cc0e..550e0a7 100644 --- a/src/types/AiModels.d.ts +++ b/src/types/AiModels.d.ts @@ -1,2 +1,2 @@ -export type AIModels = 'ChatGPT' | 'Gemini' | 'FLUX' | 'Stability'; +export type AIModels = 'ChatGPT' | 'Gemini' | 'FLUX' | 'Stability' | 'Dalle' ; export type AIModelsName = Exclude; diff --git a/src/types/Config.d.ts b/src/types/Config.d.ts index 7965bf8..6c356dc 100644 --- a/src/types/Config.d.ts +++ b/src/types/Config.d.ts @@ -3,15 +3,12 @@ import { AIModels, AIModelsName } from './AiModels'; export interface IModelConfig { prefix: string | undefined; enable: boolean; - modelToUse?: string; - settings?: any; } export interface IModelType extends IModelConfig { modelName: string; prefix: string; context: string; - modelToUse?: AIModelsName; includeSender?: boolean; } diff --git a/src/util/Util.ts b/src/util/Util.ts index c937ea0..7ddfd44 100644 --- a/src/util/Util.ts +++ b/src/util/Util.ts @@ -4,7 +4,9 @@ import { AIModels } from '../types/AiModels'; import config from '../whatsapp-ai.config'; export class Util { - public static getModelByPrefix(message: string): AIModels | undefined { + public static getModelByPrefix( + message: string + ): { modelName: AIModels; prefix: string } | undefined { for (let [modelName, model] of Object.entries(config.models)) { const currentModel = model as IModelConfig; if (!currentModel.enable) continue; @@ -12,7 +14,7 @@ export class Util { if ( message.toLocaleLowerCase().startsWith((currentModel.prefix as string).toLocaleLowerCase()) ) { - return modelName as AIModels; + return { modelName: modelName as AIModels, prefix: currentModel.prefix as string }; } } diff --git a/src/whatsapp-ai.config.ts b/src/whatsapp-ai.config.ts index e8f74ee..1331c7f 100644 --- a/src/whatsapp-ai.config.ts +++ b/src/whatsapp-ai.config.ts @@ -8,12 +8,10 @@ const config: Config = { ChatGPT: { prefix: ENV.OPENAI_PREFIX, enable: ENV.OPENAI_ENABLED, - modelToUse: 'gpt-3.5-turbo', // See all models here https://platform.openai.com/docs/models - settings: { - dalle_enabled: ENV.DALLE_ENABLED, - dalle_prefix: ENV.DALLE_PREFIX, - dalle_use_3: ENV.DALLE_USE_3 - } + }, + Dalle: { + prefix: ENV.DALLE_PREFIX, + enable: ENV.DALLE_ENABLED, }, Gemini: { prefix: ENV.GEMINI_PREFIX,