From de7f72c144768117bf6cef6da4323bbd492555a7 Mon Sep 17 00:00:00 2001 From: patcher99 Date: Sat, 16 Mar 2024 17:32:02 +0530 Subject: [PATCH] add azure openai --- package-lock.json | 174 ++++++++++++++++++ package.json | 3 +- src/azure_openai.js | 343 ++++++++++++++++++++++++++++++++++++ src/index.js | 5 +- tests/azure-openai.test.mjs | 92 ++++++++++ 5 files changed, 615 insertions(+), 2 deletions(-) create mode 100644 src/azure_openai.js create mode 100644 tests/azure-openai.test.mjs diff --git a/package-lock.json b/package-lock.json index 08203b3..b8ab140 100644 --- a/package-lock.json +++ b/package-lock.json @@ -13,6 +13,7 @@ }, "devDependencies": { "@anthropic-ai/sdk": "^0.17.1", + "@azure/openai": "^1.0.0-beta.11", "@mistralai/mistralai": "^0.1.3", "chai": "^5.0.3", "cohere-ai": "^7.7.3", @@ -48,6 +49,135 @@ "web-streams-polyfill": "^3.2.1" } }, + "node_modules/@azure-rest/core-client": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@azure-rest/core-client/-/core-client-1.3.0.tgz", + "integrity": "sha512-OmAB+qbWZJk4p9+aqF3zM3J3J371RTdz1gRvz4uxl/+MGLKfKBMzZqVkAUIY8h1qzux4ypozCiRPJ3wdWyPDUg==", + "dev": true, + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "@azure/core-auth": "^1.3.0", + "@azure/core-rest-pipeline": "^1.5.0", + "@azure/core-tracing": "^1.0.1", + "@azure/core-util": "^1.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/abort-controller": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@azure/abort-controller/-/abort-controller-2.1.0.tgz", + "integrity": "sha512-SYtcG13aiV7znycu6plCClWUzD9BBtfnsbIxT89nkkRvQRB4n0kuZyJJvJ7hqdKOn7x7YoGKZ9lVStLJpLnOFw==", + "dev": true, + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-auth": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/@azure/core-auth/-/core-auth-1.7.0.tgz", + "integrity": "sha512-OuDVn9z2LjyYbpu6e7crEwSipa62jX7/ObV/pmXQfnOG8cHwm363jYtg3FSX3GB1V7jsIKri1zgq7mfXkFk/qw==", + "dev": true, + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "@azure/core-util": "^1.1.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-rest-pipeline": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/@azure/core-rest-pipeline/-/core-rest-pipeline-1.15.0.tgz", + "integrity": "sha512-6kBQwE75ZVlOjBbp0/PX0fgNLHxoMDxHe3aIPV/RLVwrIDidxTbsHtkSbPNTkheMset3v9s1Z08XuMNpWRK/7w==", + "dev": true, + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "@azure/core-auth": "^1.4.0", + "@azure/core-tracing": "^1.0.1", + "@azure/core-util": "^1.3.0", + "@azure/logger": "^1.0.0", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-sse": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@azure/core-sse/-/core-sse-2.1.0.tgz", + "integrity": "sha512-wH5FEaaAYxfCtkbJQ07BjLhIpjyuVutaDbMvtsFmOfkQHZGNf+3VMMaFAkQG2S2qGaIahkLxOzp/zJ3KEqATYw==", + "dev": true, + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-tracing": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@azure/core-tracing/-/core-tracing-1.1.0.tgz", + "integrity": "sha512-MVeJvGHB4jmF7PeHhyr72vYJsBJ3ff1piHikMgRaabPAC4P3rxhf9fm42I+DixLysBunskJWhsDQD2A+O+plkQ==", + "dev": true, + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/core-util": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/@azure/core-util/-/core-util-1.8.0.tgz", + "integrity": "sha512-w8NrGnrlGDF7fj36PBnJhGXDK2Y3kpTOgL7Ksb5snEHXq/3EAbKYOp1yqme0yWCUlSDq5rjqvxSBAJmsqYac3w==", + "dev": true, + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/logger": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@azure/logger/-/logger-1.1.0.tgz", + "integrity": "sha512-BnfkfzVEsrgbVCtqq0RYRMePSH2lL/cgUUR5sYRF4yNN10zJZq/cODz0r89k3ykY83MqeM3twR292a3YBNgC3w==", + "dev": true, + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@azure/openai": { + "version": "1.0.0-beta.11", + "resolved": "https://registry.npmjs.org/@azure/openai/-/openai-1.0.0-beta.11.tgz", + "integrity": "sha512-OXS27xkG1abiGf5VZUKnkJKr1VCo8+6EUrTGW5aSVjc5COqX8jAUqVAOZsQVCHBdtWYSBULlZkc0ncKMTRQAiQ==", + "dev": true, + "dependencies": { + "@azure-rest/core-client": "^1.1.7", + "@azure/core-auth": "^1.4.0", + "@azure/core-rest-pipeline": "^1.13.0", + "@azure/core-sse": "^2.0.0", + "@azure/core-util": "^1.4.0", + "@azure/logger": "^1.0.3", + "tslib": "^2.4.0" + }, + "engines": { + "node": ">=18.0.0" + } + }, "node_modules/@eslint-community/eslint-utils": { "version": "4.4.0", "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", @@ -239,6 +369,18 @@ "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, + "node_modules/agent-base": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.0.tgz", + "integrity": "sha512-o/zjMZRhJxny7OyEF+Op8X+efiELC7k7yOjMzgfzVqOzXqkBkWI79YoTdOtsuWd5BWhAGAuOY/Xa6xpiaWXiNg==", + "dev": true, + "dependencies": { + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, "node_modules/agentkeepalive": { "version": "4.5.0", "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.5.0.tgz", @@ -1255,6 +1397,32 @@ "he": "bin/he" } }, + "node_modules/http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", + "dev": true, + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/https-proxy-agent": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.4.tgz", + "integrity": "sha512-wlwpilI7YdjSkWaQ/7omYBMTliDcmCN8OLihO6I9B86g06lMyAoqgoDpV0XqoaPOKj+0DIdAvnsWfyAAhmimcg==", + "dev": true, + "dependencies": { + "agent-base": "^7.0.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, "node_modules/humanize-ms": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", @@ -2164,6 +2332,12 @@ "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", "dev": true }, + "node_modules/tslib": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", + "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==", + "dev": true + }, "node_modules/type-check": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", diff --git a/package.json b/package.json index 2d18201..d61135f 100644 --- a/package.json +++ b/package.json @@ -27,12 +27,13 @@ "stream": "^0.0.2" }, "devDependencies": { + "@azure/openai": "^1.0.0-beta.11", "@anthropic-ai/sdk": "^0.17.1", + "@mistralai/mistralai": "^0.1.3", "chai": "^5.0.3", "cohere-ai": "^7.7.3", "eslint": "^8.56.0", "eslint-config-google": "^0.14.0", - "@mistralai/mistralai": "^0.1.3", "mocha": "^10.2.0", "openai": "^4.26.0" } diff --git a/src/azure_openai.js b/src/azure_openai.js new file mode 100644 index 0000000..5963946 --- /dev/null +++ b/src/azure_openai.js @@ -0,0 +1,343 @@ +import {sendData} from './helpers.js'; +import { Readable } from 'stream'; + +/** + * Initializes Azure OpenAI functionality with performance tracking and data logging. + * + * @param {Object} llm - The Azure OpenAI function object. + * @param {string} dokuUrl - The URL for logging data. + * @param {string} apiKey - The authentication apiKey. + * @param {string} environment - The environment. + * @param {string} applicationName - The application name. + * @param {boolean} skipResp - To skip waiting for API resopnse. + * @return {void} + * + * @jsondoc + * { + * "description": "Performance tracking for Azure OpenAI APIs", + * "params": [ + * {"name": "llm", "type": "Object", "description": "Azure OpenAI function."}, + * {"name": "dokuUrl", "type": "string", "description": "The URL"}, + * {"name": "apiKey", "type": "string", "description": "The auth apiKey."}, + * {"name": "environment", "type": "string", "description": "The environment."}, + * {"name": "applicationName", "type": "string", "description": "The application name."}, + * {"name": "skipResp", "type": "boolean", "description": "To skip waiting for API resopnse."} + * ], + * "returns": {"type": "void"}, + * "example": { + * "description": "Example usage of init function.", + * "code": "init(azureOenaiFunc, 'https://example.com/log', 'authToken');" + * } + * } + */ +export default function initAzureOpenAI({ llm, dokuUrl, apiKey, environment, applicationName, skipResp }) { + // Save original method + const originalChatCreate = llm.chat.completions.create; + const originalCompletionsCreate = llm.completions.create; + const originalEmbeddingsCreate = llm.embeddings.create; + const originalImagesCreate = llm.images.generate; + + // Define wrapped method + llm.chat.completions.create = async function(params) { + const start = performance.now(); + let streaming = params.stream || false; + if (streaming) { + // Call original method + const originalResponseStream = await originalChatCreate.call(this, params); + + // Create a pass-through stream + const passThroughStream = new Readable({ + read() {}, + objectMode: true // Set to true because the chunks are objects + }); + + let dataResponse = ''; + let chatModel = ''; + + // Immediately-invoked async function to handle streaming + (async () => { + for await (const chunk of originalResponseStream) { + var content = chunk.choices[0]?.delta?.content; + if (content) { + dataResponse += content; + passThroughStream.push(chunk); // Push chunk to the pass-through stream + } + var responseId = chunk.id; + chatModel = chunk.model; + } + passThroughStream.push(null); // Signal end of the pass-through stream + + // Process response data after stream has ended + const end = performance.now(); + const duration = (end - start) / 1000; + + let formattedMessages = []; + for (let message of params.messages) { + let role = message.role; + let content = message.content; + + if (Array.isArray(content)) { + let contentStr = content.map(item => { + if (item.type) { + return `${item.type}: ${item.text || item.image_url}`; + } else { + return `text: ${item.text}`; + } + }).join(", "); + formattedMessages.push(`${role}: ${contentStr}`); + } else { + formattedMessages.push(`${role}: ${content}`); + } + } + let prompt = formattedMessages.join("\n"); + + // Prepare the data object for Doku + const data = { + llmReqId: responseId, + environment: environment, + applicationName: applicationName, + sourceLanguage: 'Javascript', + endpoint: 'azure.chat.completions', + skipResp: skipResp, + requestDuration: duration, + model: chatModel, + prompt: prompt, + response: dataResponse + }; + + await sendData(data, dokuUrl, apiKey); + })(); + + // Return the pass-through stream to the original caller + return passThroughStream; + } + else { + // Call original method + const response = await originalChatCreate.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + + let formattedMessages = []; + for (let message of params.messages) { + let role = message.role; + let content = message.content; + + if (Array.isArray(content)) { + let contentStr = content.map(item => { + if (item.type) { + return `${item.type}: ${item.text || item.image_url}`; + } else { + return `text: ${item.text}`; + } + }).join(", "); + formattedMessages.push(`${role}: ${contentStr}`); + } else { + formattedMessages.push(`${role}: ${content}`); + } + } + let prompt = formattedMessages.join("\n"); + const data = { + llmReqId: response.id, + environment: environment, + applicationName: applicationName, + sourceLanguage: 'Javascript', + endpoint: 'azure.chat.completions', + skipResp: skipResp, + requestDuration: duration, + model: "azure_" + response.model, + prompt: prompt, + }; + + if (!params.hasOwnProperty('tools')) { + data.completionTokens = response.usage.completion_tokens; + data.promptTokens = response.usage.prompt_tokens; + data.totalTokens = response.usage.total_tokens; + data.finishReason = response.choices[0].finish_reason; + + if (!params.hasOwnProperty('n') || params.n === 1) { + data.response = response.choices[0].message.content; + } else { + let i = 0; + while (i < params.n && i < response.choices.length) { + data.response = response.choices[i].message.content; + i++; + await sendData(data, dokuUrl, apiKey); + } + return response; + } + } else if (params.hasOwnProperty('tools')) { + data.response = "Function called with tools"; + data.completionTokens = response.usage.completion_tokens; + data.promptTokens = response.usage.prompt_tokens; + data.totalTokens = response.usage.total_tokens; + } + + await sendData(data, dokuUrl, apiKey); + + return response; + } + }; + + llm.completions.create = async function(params) { + const start = performance.now(); + let streaming = params.stream || false; + if (streaming) { + // Call original method + const originalResponseStream = await originalChatCreate.call(this, params); + + // Create a pass-through stream + const passThroughStream = new Readable({ + read() {}, + objectMode: true // Set to true because the chunks are objects + }); + + let dataResponse = ''; + let chatModel = ''; + + // Immediately-invoked async function to handle streaming + (async () => { + for await (const chunk of originalResponseStream) { + var content = chunk.choices[0].text; + if (content) { + dataResponse += content; + passThroughStream.push(chunk); // Push chunk to the pass-through stream + } + var responseId = chunk.id; + chatModel = chunk.model; + } + passThroughStream.push(null); // Signal end of the pass-through stream + + // Process response data after stream has ended + const end = performance.now(); + const duration = (end - start) / 1000; + // Prepare the data object for Doku + const data = { + llmReqId: responseId, + environment: environment, + applicationName: applicationName, + sourceLanguage: 'Javascript', + endpoint: 'azure.completions', + skipResp: skipResp, + requestDuration: duration, + model: chatModel, + prompt: params.prompt, + response: dataResponse + }; + + await sendData(data, dokuUrl, apiKey); + })(); + + // Return the pass-through stream to the original caller + return passThroughStream; + } + else { + const response = await originalCompletionsCreate.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + + const data = { + llmReqId: response.id, + environment: environment, + applicationName: applicationName, + sourceLanguage: 'Javascript', + endpoint: 'azure.completions', + skipResp: skipResp, + requestDuration: duration, + model: "azure_" + response.model, + prompt: params.prompt, + }; + + if (!params.hasOwnProperty('tools')) { + data.completionTokens = response.usage.completion_tokens; + data.promptTokens = response.usage.prompt_tokens; + data.totalTokens = response.usage.total_tokens; + data.finishReason = response.choices[0].finish_reason; + + if (!params.hasOwnProperty('n') || params.n === 1) { + data.response = response.choices[0].text; + } else { + let i = 0; + while (i < params.n && i < response.choices.length) { + data.response = response.choices[i].text; + i++; + + await sendData(data, dokuUrl, apiKey); + } + return response; + } + } else if (params.hasOwnProperty('tools')) { + data.response = "Function called with tools"; + data.completionTokens = response.usage.completion_tokens; + data.promptTokens = response.usage.prompt_tokens; + data.totalTokens = response.usage.total_tokens; + } + + await sendData(data, dokuUrl, apiKey); + + return response; + } + }; + + llm.embeddings.create = async function(params) { + const start = performance.now(); + const response = await originalEmbeddingsCreate.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + + const data = { + environment: environment, + applicationName: applicationName, + sourceLanguage: 'Javascript', + endpoint: 'azure.embeddings', + skipResp: skipResp, + requestDuration: duration, + model: "azure_" + response.model, + prompt: params.input, + promptTokens: response.usage.prompt_tokens, + totalTokens: response.usage.total_tokens, + }; + + await sendData(data, dokuUrl, apiKey); + + return response; + }; + + llm.images.generate = async function(params) { + const start = performance.now(); + const response = await originalImagesCreate.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + const size = params.size || '1024x1024'; + const model = 'azure_dall-e-3'; + let imageFormat = 'url'; + + if (params.response_format && params.response_format === 'b64_json') { + imageFormat = 'b64_json'; + } + + const quality = params.quality ?? 'standard'; + var responseId = response.created; + for (const item of response.data) { + const data = { + llmReqId: responseId, + environment: environment, + applicationName: applicationName, + sourceLanguage: 'Javascript', + endpoint: 'azure.images.create', + skipResp: skipResp, + requestDuration: duration, + model: model, + prompt: params.prompt, + imageSize: size, + imageQuality: quality, + revisedPrompt: item.revised_prompt || null, + image: item[imageFormat], + }; + + await sendData(data, dokuUrl, apiKey); + } + + return response; + }; + +} diff --git a/src/index.js b/src/index.js index 9231808..f26ebe4 100644 --- a/src/index.js +++ b/src/index.js @@ -2,6 +2,7 @@ import initOpenAI from './openai.js'; import initCohere from './cohere.js'; import initAnthropic from './anthropic.js'; import initMistral from './mistral.js'; +import initAzureOpenAI from './azure_openai.js'; /** * Represents the configuration for Doku. @@ -53,8 +54,10 @@ function init({ llm, dokuUrl, apiKey, environment="default", applicationName="de DokuConfig.applicationName = applicationName; DokuConfig.skipResp = skipResp; - if (llm.fineTuning && typeof llm.completions.create === 'function') { + if (llm.fineTuning && typeof llm.completions.create === 'function' && !(llm.baseURL.includes('azure.com'))) { initOpenAI({ llm, dokuUrl, apiKey, environment, applicationName, skipResp }); + } else if (llm.fineTuning && typeof llm.completions.create === 'function' && llm.baseURL.includes('azure.com')) { + initAzureOpenAI({ llm, dokuUrl, apiKey, environment, applicationName, skipResp }); } else if (llm.generate && typeof llm.rerank === 'function') { initCohere({ llm, dokuUrl, apiKey, environment, applicationName, skipResp }); } else if (llm.messages && typeof llm.messages.create === 'function') { diff --git a/tests/azure-openai.test.mjs b/tests/azure-openai.test.mjs new file mode 100644 index 0000000..d5f2d40 --- /dev/null +++ b/tests/azure-openai.test.mjs @@ -0,0 +1,92 @@ +import OpenAI from 'openai'; +import {expect} from 'chai'; +import DokuMetry from '../src/index.js'; +import fs from "fs"; + +describe('OpenAI Test', () => { + let openai; + + before(async () => { + openai = new OpenAI({ + apiKey: process.env.OPENAI_API_KEY, + }); + }); + + it('should return a response with object as "chat.completion"', async () => { + await DokuMetry.init({llm: openai, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); + const chatCompletion = await openai.chat.completions.create({ + messages: [{role: 'user', content: 'What is LLM Monitoring?'}], + model: 'gpt-3.5-turbo', + }); + + expect(chatCompletion.object).to.equal('chat.completion'); + }).timeout(30000);; + + it('should return a response with object as "text_completion"', async () => { + await DokuMetry.init({llm: openai, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); + const completion = await openai.completions.create({ + model: 'gpt-3.5-turbo-instruct', + prompt: 'What is LLM Observability?', + max_tokens: 7, + }); + + expect(completion.object).to.equal('text_completion'); + }).timeout(30000);; + + it('should return a response with object as "embedding"', async () => { + await DokuMetry.init({llm: openai, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); + const embeddings = await openai.embeddings.create({ + model: 'text-embedding-ada-002', + input: 'The quick brown fox jumped over the lazy dog', + encoding_format: 'float', + }); + + expect(embeddings.data[0].object).to.equal('embedding'); + }).timeout(30000);; + + it('should return a response with object as "fine_tuning.job"', async () => { + await DokuMetry.init({llm: openai, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); + try { + const fineTuningJob = await openai.fineTuning.jobs.create({ + training_file: 'file-m36cc45komO83VJKAY1qVgeP', + model: 'gpt-3.5-turbo', + }); + + expect(fineTuningJob.object).to.equal('fine_tuning.job'); + } catch (error) { + // Check if it's a rate limit error + if (error.code == "daily_rate_limit_exceeded") { + console.error(`Daily Rate limit Reached`); + } + } + }).timeout(10000); + + it('should return a response with "created" field', async () => { + await DokuMetry.init({llm: openai, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); + const imageGeneration = await openai.images.generate({ + model: 'dall-e-2', + prompt: 'Generate an image of a cat.', + }); + + expect(imageGeneration.created).to.exist; + }).timeout(30000); + + it('should return a response with "created" field', async () => { + await DokuMetry.init({llm: openai, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); + const imageVariation = await openai.images.createVariation({ + image: fs.createReadStream('tests/test-image-for-openai.png'), + }); + + expect(imageVariation.created).to.exist; + }).timeout(30000); + + it('should return a response with url as "https://api.openai.com/v1/audio/speech"', async () => { + DokuMetry.init({llm: openai, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); + const audioSpeech = await openai.audio.speech.create({ + model: 'tts-1', + voice: 'alloy', + input: 'Today is a wonderful day to build something people love!', + }); + expect(audioSpeech.url).to.equal('https://api.openai.com/v1/audio/speech'); + }).timeout(30000); +});