Skip to content

Commit

Permalink
Merge pull request #63 from vicentefelipechile/master
Browse files Browse the repository at this point in the history
Added support to Dalle and some changes
  • Loading branch information
Zain-ul-din authored Oct 24, 2024
2 parents 8c265d4 + 2b9ffb3 commit 0fb8b2a
Show file tree
Hide file tree
Showing 7 changed files with 58 additions and 33 deletions.
3 changes: 2 additions & 1 deletion src/baileys/env.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ interface EnvInterface {

// Services
// // OpenAI
OPENAI_MODEL: string;
OPENAI_PREFIX?: string;
OPENAI_ENABLED: boolean;
OPENAI_ICON_PREFIX?: string;
Expand Down Expand Up @@ -59,7 +60,7 @@ export const ENV: EnvInterface = {
API_KEY_STABILITY: process.env.API_KEY_STABILITY,
MONGO_ENABLED: process.env.MONGO_ENABLED === 'True',
MONGO_URL: process.env.MONGO_URL,

OPENAI_MODEL: process.env.OPENAI_MODEL || 'gpt-3.5-turbo',
OPENAI_PREFIX: process.env.OPENAI_PREFIX,
OPENAI_ENABLED: process.env.OPENAI_ENABLED === 'True',
OPENAI_ICON_PREFIX: process.env.OPENAI_ICON_PREFIX,
Expand Down
32 changes: 24 additions & 8 deletions src/baileys/handlers/message.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,27 +10,43 @@ import { GeminiModel } from './../../models/GeminiModel';
import { FluxModel } from './../../models/FluxModel';
import { ENV } from '../env';

interface ModelByPrefix {
modelName: AIModels;
prefix: string;
}

/* Declare models */
const modelTable: Record<AIModels, any> = {
ChatGPT: ENV.OPENAI_ENABLED ? new ChatGPTModel() : null,
Gemini: ENV.GEMINI_ENABLED ? new GeminiModel() : null,
FLUX: ENV.HF_ENABLED ? new FluxModel() : null,
Stability: ENV.STABILITY_ENABLED ? new StabilityModel() : null
Stability: ENV.STABILITY_ENABLED ? new StabilityModel() : null,
Dalle: null
};

if (ENV.DALLE_ENABLED && ENV.OPENAI_ENABLED) {
modelTable.Dalle = modelTable.ChatGPT;
} else if (ENV.DALLE_ENABLED && !ENV.OPENAI_ENABLED) {
modelTable.Dalle = new ChatGPTModel();
}

// handles message
export async function handleMessage({ client, msg, metadata }: MessageHandlerParams) {
const modelToUse = Util.getModelByPrefix(metadata.text) as AIModels;
if (!modelToUse) {
const modelInfo: ModelByPrefix | undefined = Util.getModelByPrefix(metadata.text);
if (!modelInfo) {
if (ENV.Debug) {
console.log("[Debug] Model '" + (modelToUse as string) + "' not found");
console.log("[Debug] Model '" + modelInfo + "' not found");
}

return;
}

const model = modelTable[modelToUse];
if (!model) return;
const model = modelTable[modelInfo.modelName];
if (!model) {
if (ENV.Debug) {
console.log("[Debug] Model '" + modelInfo.modelName + "' is disabled or not found");
}
return;
}

const prompt: string = metadata.text.split(' ').slice(1).join(' ');
const messageResponse = await client.sendMessage(
Expand All @@ -40,7 +56,7 @@ export async function handleMessage({ client, msg, metadata }: MessageHandlerPar
);

model.sendMessage(
{ sender: metadata.sender, prompt: prompt, metadata: metadata },
{ sender: metadata.sender, prompt: prompt, metadata: metadata, prefix: modelInfo.prefix },
async (res: any, err: any) => {
if (err) {
client.sendMessage(metadata.remoteJid, {
Expand Down
35 changes: 23 additions & 12 deletions src/models/OpenAIModel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -28,17 +28,17 @@ class ChatGPTModel extends AIModel<AIArguments, AIHandle> {
});

this.Dalle = new OpenAI({
apiKey: ENV.API_KEY_OPENAI_DALLE
apiKey: ENV.API_KEY_OPENAI_DALLE || ENV.API_KEY_OPENAI
});

this.Dalle3 = config.models.ChatGPT?.settings.dalle_use_3;
this.Dalle3 = ENV.DALLE_USE_3;
}

/* Methods */
public async generateCompletion(user: string): Promise<ChatCompletionMessage> {
const completion = await this.OpenAI.chat.completions.create({
messages: this.history[user],
model: config.models.ChatGPT?.modelToUse?.toString() || 'gpt-3.5-turbo'
model: ENV.OPENAI_MODEL
});

const message = completion.choices[0].message;
Expand All @@ -60,18 +60,29 @@ class ChatGPTModel extends AIModel<AIArguments, AIHandle> {
return { url: resInfo.url as string, caption: resInfo.revised_prompt as string };
}

public async sendMessage({ sender, prompt }: AIArguments, handle: AIHandle): Promise<any> {
public async sendMessage(
{ sender, prompt, prefix }: AIArguments,
handle: AIHandle
): Promise<any> {
try {
if (!this.sessionExists(sender)) {
this.sessionCreate(sender);
// Use Dalle
if (ENV.DALLE_ENABLED && prefix === ENV.DALLE_PREFIX) {
const res = await this.generateImage(prompt);
await handle(res);

// Use ChatGPT
} else {
if (!this.sessionExists(sender)) {
this.sessionCreate(sender);
}
this.sessionAddMessage(sender, { role: 'user', content: prompt });

const completion = await this.generateCompletion(sender);
const res = completion.content || '';
await handle({ text: res });
}
this.sessionAddMessage(sender, { role: 'user', content: prompt });

const completion = await this.generateCompletion(sender);
const res = completion.content || '';
await handle({ text: res });
} catch (err) {
await handle('', 'An error occur please see console for more information.');
await handle('', err as string);
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/types/AiModels.d.ts
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
export type AIModels = 'ChatGPT' | 'Gemini' | 'FLUX' | 'Stability';
export type AIModels = 'ChatGPT' | 'Gemini' | 'FLUX' | 'Stability' | 'Dalle' ;
export type AIModelsName = Exclude<AIModels, 'Custom'>;
3 changes: 0 additions & 3 deletions src/types/Config.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,12 @@ import { AIModels, AIModelsName } from './AiModels';
export interface IModelConfig {
prefix: string | undefined;
enable: boolean;
modelToUse?: string;
settings?: any;
}

export interface IModelType extends IModelConfig {
modelName: string;
prefix: string;
context: string;
modelToUse?: AIModelsName;
includeSender?: boolean;
}

Expand Down
6 changes: 4 additions & 2 deletions src/util/Util.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,17 @@ import { AIModels } from '../types/AiModels';
import config from '../whatsapp-ai.config';

export class Util {
public static getModelByPrefix(message: string): AIModels | undefined {
public static getModelByPrefix(
message: string
): { modelName: AIModels; prefix: string } | undefined {
for (let [modelName, model] of Object.entries(config.models)) {
const currentModel = model as IModelConfig;
if (!currentModel.enable) continue;

if (
message.toLocaleLowerCase().startsWith((currentModel.prefix as string).toLocaleLowerCase())
) {
return modelName as AIModels;
return { modelName: modelName as AIModels, prefix: currentModel.prefix as string };
}
}

Expand Down
10 changes: 4 additions & 6 deletions src/whatsapp-ai.config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,10 @@ const config: Config = {
ChatGPT: {
prefix: ENV.OPENAI_PREFIX,
enable: ENV.OPENAI_ENABLED,
modelToUse: 'gpt-3.5-turbo', // See all models here https://platform.openai.com/docs/models
settings: {
dalle_enabled: ENV.DALLE_ENABLED,
dalle_prefix: ENV.DALLE_PREFIX,
dalle_use_3: ENV.DALLE_USE_3
}
},
Dalle: {
prefix: ENV.DALLE_PREFIX,
enable: ENV.DALLE_ENABLED,
},
Gemini: {
prefix: ENV.GEMINI_PREFIX,
Expand Down

0 comments on commit 0fb8b2a

Please sign in to comment.