diff --git a/README.md b/README.md index d2b8f9f..9d3f2d6 100644 --- a/README.md +++ b/README.md @@ -123,6 +123,9 @@ DokuMetry.init({llm: cohere, dokuUrl: "YOUR_DOKU_INGESTER_URL", apiKey: "YOUR_DO | skipResp | Skip response from the Doku Ingester for faster execution | Optional | +## Error +To make it easier to follow the server errors caused by dokumetry, we have logged the errors like `Dokumetry: Error transmitting Data: HTTP status 401`, where `Dokumetry` is the application name in blue and the rest is the error in red. + ## Semantic Versioning This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions: diff --git a/package-lock.json b/package-lock.json index 482b614..111a7d0 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "dokumetry", - "version": "0.1.1", + "version": "0.1.2", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "dokumetry", - "version": "0.1.1", + "version": "0.1.2", "license": "Apache-2.0", "dependencies": { "stream": "^0.0.2" diff --git a/package.json b/package.json index b67bf43..b44eeff 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "dokumetry", - "version": "0.1.1", + "version": "0.1.2", "description": "An NPM Package for tracking OpenAI API calls and sending usage metrics to Doku", "main": "src/index.js", "scripts": { @@ -27,8 +27,8 @@ "stream": "^0.0.2" }, "devDependencies": { - "@azure/openai": "^1.0.0-beta.11", "@anthropic-ai/sdk": "^0.18.0", + "@azure/openai": "^1.0.0-beta.11", "@mistralai/mistralai": "^0.1.3", "chai": "^5.0.3", "cohere-ai": "^7.7.3", diff --git a/src/anthropic.js b/src/anthropic.js deleted file mode 100644 index 51208ea..0000000 --- a/src/anthropic.js +++ /dev/null @@ -1,168 +0,0 @@ -import {sendData} from './helpers.js'; -import { Readable } from 'stream'; - -/** - * Initializes Anthropic functionality with performance tracking. - * - * @param {Object} llm - The Anthropic function object. - * @param {string} dokuUrl - The URL for logging data. - * @param {string} apiKey - The authentication apiKey. - * @param {string} environment - The environment. - * @param {string} applicationName - The application name. - * @param {boolean} skipResp - To skip waiting for API resopnse. - * @return {void} - * - * @jsondoc - * { - * "description": "Initializes Anthropic function and performance tracking", - * "params": [ - * {"name": "dokuUrl", "type": "string", "description": "Doku URL"}, - * {"name": "apiKey", "type": "string", "description": "Auth apiKey"}, - * {"name": "llm", "type": "Object", "description": "The Anthropic object"}, - * {"name": "environment", "type": "string", "description": "Environment"}, - * {"name": "applicationName", "type": "string", "description": "Application Name"}, - * {"name": "skipResp", "type": "boolean", "description": "To skip waiting for API resopnse."} - * ], - * "returns": {"type": "void"}, - * "example": { - * "description": "Example usage of init function.", - * "code": "init('https://example.com/log', 'authToken', anthropicFunc);" - * } - * } - */ -export default function initAnthropic({ llm, dokuUrl, apiKey, environment, applicationName, skipResp }) { - const originalMessagesCreate = llm.messages.create; - - // Define wrapped method - llm.messages.create = async function(params) { - let streaming = params.stream || false; - if (streaming) { - // Call original method - const start = performance.now(); - const originalResponseStream = await originalMessagesCreate.call(this, params); - - // Create a pass-through stream - const passThroughStream = new Readable({ - read() {}, - objectMode: true // Set to true because the chunks are objects - }); - - let dataResponse = ''; - var responseId = ''; - var promptTokens = 0; - var completionTokens = 0; - - // Immediately-invoked async function to handle streaming - (async () => { - for await (const chunk of originalResponseStream) { - if (chunk.type === 'message_start') { - responseId = chunk.message.id; - promptTokens = chunk.message.usage.input_tokens; - passThroughStream.push(chunk); // Push chunk to the pass-through stream - } - else if (chunk.type === 'content_block_delta') { - dataResponse += chunk.delta.text; - passThroughStream.push(chunk); // Push chunk to the pass-through stream - } - else if (chunk.type === 'message_delta') { - completionTokens = chunk.usage.output_tokens; - passThroughStream.push(chunk); // Push chunk to the pass-through stream - } - } - passThroughStream.push(null); // Signal end of the pass-through stream - - // Process response data after stream has ended - const end = performance.now(); - const duration = (end - start) / 1000; - - let formattedMessages = []; - for (let message of params.messages) { - let role = message.role; - let content = message.content; - - if (Array.isArray(content)) { - let contentStr = content.map(item => { - if (item.type) { - return `${item.type}: ${item.text || item.image_url}`; - } else { - return `text: ${item.text}`; - } - }).join(", "); - formattedMessages.push(`${role}: ${contentStr}`); - } else { - formattedMessages.push(`${role}: ${content}`); - } - } - let prompt = formattedMessages.join("\n"); - - // Prepare the data object for Doku - const data = { - llmReqId: responseId, - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'anthropic.messages', - skipResp: skipResp, - requestDuration: duration, - model: params.model, - prompt: prompt, - response: dataResponse, - promptTokens: promptTokens, - completionTokens: completionTokens, - }; - data.totalTokens = data.promptTokens + data.completionTokens; - - await sendData(data, dokuUrl, apiKey); - })(); - - // Return the pass-through stream to the original caller - return passThroughStream; - } - else{ - const start = performance.now(); - const response = await originalMessagesCreate.call(this, params); - const end = performance.now(); - const duration = (end - start) / 1000; - let formattedMessages = []; - for (let message of params.messages) { - let role = message.role; - let content = message.content; - - if (Array.isArray(content)) { - let contentStr = content.map(item => { - if (item.type) { - return `${item.type}: ${item.text || item.image_url}`; - } else { - return `text: ${item.text}`; - } - }).join(", "); - formattedMessages.push(`${role}: ${contentStr}`); - } else { - formattedMessages.push(`${role}: ${content}`); - } - } - let prompt = formattedMessages.join("\n"); - - const data = { - llmReqId: response.id, - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'anthropic.messages', - skipResp: skipResp, - completionTokens: response.usage.input_tokens, - promptTokens: response.usage.output_tokens, - requestDuration: duration, - model: params.model, - prompt: prompt, - finishReason: response.stop_reason, - response: response.content[0].text, - }; - data.totalTokens = data.promptTokens + data.completionTokens; - - await sendData(data, dokuUrl, apiKey); - - return response; - } - }; -} diff --git a/src/azure_openai.js b/src/azure_openai.js deleted file mode 100644 index 78776ae..0000000 --- a/src/azure_openai.js +++ /dev/null @@ -1,342 +0,0 @@ -import {sendData} from './helpers.js'; -import { Readable } from 'stream'; - -/** - * Initializes Azure OpenAI functionality with performance tracking and data logging. - * - * @param {Object} llm - The Azure OpenAI function object. - * @param {string} dokuUrl - The URL for logging data. - * @param {string} apiKey - The authentication apiKey. - * @param {string} environment - The environment. - * @param {string} applicationName - The application name. - * @param {boolean} skipResp - To skip waiting for API resopnse. - * @return {void} - * - * @jsondoc - * { - * "description": "Performance tracking for Azure OpenAI APIs", - * "params": [ - * {"name": "llm", "type": "Object", "description": "Azure OpenAI function."}, - * {"name": "dokuUrl", "type": "string", "description": "The URL"}, - * {"name": "apiKey", "type": "string", "description": "The auth apiKey."}, - * {"name": "environment", "type": "string", "description": "The environment."}, - * {"name": "applicationName", "type": "string", "description": "The application name."}, - * {"name": "skipResp", "type": "boolean", "description": "To skip waiting for API resopnse."} - * ], - * "returns": {"type": "void"}, - * "example": { - * "description": "Example usage of init function.", - * "code": "init(azureOenaiFunc, 'https://example.com/log', 'authToken');" - * } - * } - */ -export default function initAzureOpenAI({ llm, dokuUrl, apiKey, environment, applicationName, skipResp }) { - // Save original method - const originalChatCreate = llm.chat.completions.create; - const originalCompletionsCreate = llm.completions.create; - const originalEmbeddingsCreate = llm.embeddings.create; - const originalImagesCreate = llm.images.generate; - - // Define wrapped method - llm.chat.completions.create = async function(params) { - const start = performance.now(); - let streaming = params.stream || false; - if (streaming) { - // Call original method - const originalResponseStream = await originalChatCreate.call(this, params); - - // Create a pass-through stream - const passThroughStream = new Readable({ - read() {}, - objectMode: true // Set to true because the chunks are objects - }); - - let dataResponse = ''; - let chatModel = ''; - - // Immediately-invoked async function to handle streaming - (async () => { - for await (const chunk of originalResponseStream) { - var content = chunk.choices[0]?.delta?.content; - if (content) { - dataResponse += content; - passThroughStream.push(chunk); // Push chunk to the pass-through stream - } - var responseId = chunk.id; - chatModel = chunk.model; - } - passThroughStream.push(null); // Signal end of the pass-through stream - - // Process response data after stream has ended - const end = performance.now(); - const duration = (end - start) / 1000; - - let formattedMessages = []; - for (let message of params.messages) { - let role = message.role; - let content = message.content; - - if (Array.isArray(content)) { - let contentStr = content.map(item => { - if (item.type) { - return `${item.type}: ${item.text || item.image_url}`; - } else { - return `text: ${item.text}`; - } - }).join(", "); - formattedMessages.push(`${role}: ${contentStr}`); - } else { - formattedMessages.push(`${role}: ${content}`); - } - } - let prompt = formattedMessages.join("\n"); - - // Prepare the data object for Doku - const data = { - llmReqId: responseId, - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'azure.chat.completions', - skipResp: skipResp, - requestDuration: duration, - model: "azure_" + chatModel, - prompt: prompt, - response: dataResponse - }; - - await sendData(data, dokuUrl, apiKey); - })(); - - // Return the pass-through stream to the original caller - return passThroughStream; - } - else { - // Call original method - const response = await originalChatCreate.call(this, params); - const end = performance.now(); - const duration = (end - start) / 1000; - - let formattedMessages = []; - for (let message of params.messages) { - let role = message.role; - let content = message.content; - - if (Array.isArray(content)) { - let contentStr = content.map(item => { - if (item.type) { - return `${item.type}: ${item.text || item.image_url}`; - } else { - return `text: ${item.text}`; - } - }).join(", "); - formattedMessages.push(`${role}: ${contentStr}`); - } else { - formattedMessages.push(`${role}: ${content}`); - } - } - let prompt = formattedMessages.join("\n"); - const data = { - llmReqId: response.id, - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'azure.chat.completions', - skipResp: skipResp, - requestDuration: duration, - model: "azure_" + response.model, - prompt: prompt, - }; - - if (!params.hasOwnProperty('tools')) { - data.completionTokens = response.usage.completion_tokens; - data.promptTokens = response.usage.prompt_tokens; - data.totalTokens = response.usage.total_tokens; - data.finishReason = response.choices[0].finish_reason; - - if (!params.hasOwnProperty('n') || params.n === 1) { - data.response = response.choices[0].message.content; - } else { - let i = 0; - while (i < params.n && i < response.choices.length) { - data.response = response.choices[i].message.content; - i++; - await sendData(data, dokuUrl, apiKey); - } - return response; - } - } else if (params.hasOwnProperty('tools')) { - data.response = "Function called with tools"; - data.completionTokens = response.usage.completion_tokens; - data.promptTokens = response.usage.prompt_tokens; - data.totalTokens = response.usage.total_tokens; - } - - await sendData(data, dokuUrl, apiKey); - - return response; - } - }; - - llm.completions.create = async function(params) { - const start = performance.now(); - let streaming = params.stream || false; - if (streaming) { - // Call original method - const originalResponseStream = await originalCompletionsCreate.call(this, params); - - // Create a pass-through stream - const passThroughStream = new Readable({ - read() {}, - objectMode: true // Set to true because the chunks are objects - }); - - let dataResponse = ''; - let chatModel = ''; - - // Immediately-invoked async function to handle streaming - (async () => { - for await (const chunk of originalResponseStream) { - if (chunk.choices.length > 0) { - dataResponse += chunk.choices[0].text; - passThroughStream.push(chunk); // Push chunk to the pass-through stream - } - var responseId = chunk.id; - chatModel = chunk.model; - } - passThroughStream.push(null); // Signal end of the pass-through stream - - // Process response data after stream has ended - const end = performance.now(); - const duration = (end - start) / 1000; - // Prepare the data object for Doku - const data = { - llmReqId: responseId, - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'azure.completions', - skipResp: skipResp, - requestDuration: duration, - model: "azure_" + chatModel, - prompt: params.prompt, - response: dataResponse - }; - - await sendData(data, dokuUrl, apiKey); - })(); - - // Return the pass-through stream to the original caller - return passThroughStream; - } - else { - const response = await originalCompletionsCreate.call(this, params); - const end = performance.now(); - const duration = (end - start) / 1000; - - const data = { - llmReqId: response.id, - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'azure.completions', - skipResp: skipResp, - requestDuration: duration, - model: "azure_" + response.model, - prompt: params.prompt, - }; - - if (!params.hasOwnProperty('tools')) { - data.completionTokens = response.usage.completion_tokens; - data.promptTokens = response.usage.prompt_tokens; - data.totalTokens = response.usage.total_tokens; - data.finishReason = response.choices[0].finish_reason; - - if (!params.hasOwnProperty('n') || params.n === 1) { - data.response = response.choices[0].text; - } else { - let i = 0; - while (i < params.n && i < response.choices.length) { - data.response = response.choices[i].text; - i++; - - await sendData(data, dokuUrl, apiKey); - } - return response; - } - } else if (params.hasOwnProperty('tools')) { - data.response = "Function called with tools"; - data.completionTokens = response.usage.completion_tokens; - data.promptTokens = response.usage.prompt_tokens; - data.totalTokens = response.usage.total_tokens; - } - - await sendData(data, dokuUrl, apiKey); - - return response; - } - }; - - llm.embeddings.create = async function(params) { - const start = performance.now(); - const response = await originalEmbeddingsCreate.call(this, params); - const end = performance.now(); - const duration = (end - start) / 1000; - - const data = { - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'azure.embeddings', - skipResp: skipResp, - requestDuration: duration, - model: "azure_" + response.model, - prompt: params.input, - promptTokens: response.usage.prompt_tokens, - totalTokens: response.usage.total_tokens, - }; - - await sendData(data, dokuUrl, apiKey); - - return response; - }; - - llm.images.generate = async function(params) { - const start = performance.now(); - const response = await originalImagesCreate.call(this, params); - const end = performance.now(); - const duration = (end - start) / 1000; - const size = params.size || '1024x1024'; - const model = 'azure_dall-e-3'; - let imageFormat = 'url'; - - if (params.response_format && params.response_format === 'b64_json') { - imageFormat = 'b64_json'; - } - - const quality = params.quality ?? 'standard'; - var responseId = response.created; - for (const item of response.data) { - const data = { - llmReqId: responseId, - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'azure.images.create', - skipResp: skipResp, - requestDuration: duration, - model: model, - prompt: params.prompt, - imageSize: size, - imageQuality: quality, - revisedPrompt: item.revised_prompt || null, - image: item[imageFormat], - }; - - await sendData(data, dokuUrl, apiKey); - } - - return response; - }; - -} diff --git a/src/cohere.js b/src/cohere.js deleted file mode 100644 index 014dbe4..0000000 --- a/src/cohere.js +++ /dev/null @@ -1,200 +0,0 @@ -import {sendData} from './helpers.js'; - -/** - * Initializes Cohere functionality with performance tracking and data logging. - * - * @param {Object} llm - The Cohere function object. - * @param {string} dokuUrl - The URL for logging data. - * @param {string} apiKey - The authentication apiKey. - * @param {string} environment - The environment. - * @param {string} applicationName - The application name. - * @param {boolean} skipResp - To skip waiting for API resopnse. - * @return {void} - * - * @jsondoc - * { - * "description": "Initializes Cohere functionality and performance tracking", - * "params": [ - * {"name": "llm", "type": "Object", "description": "Cohere object"}, - * {"name": "dokuUrl", "type": "string", "description": "URL for Doku"}, - * {"name": "apiKey", "type": "string", "description": "Auth apiKey."}, - * {"name": "environment", "type": "string", "description": "Environment."}, - * {"name": "applicationName", "type": "string", "description": "Application name."}, - * {"name": "skipResp", "type": "boolean", "description": "To skip waiting for API resopnse."} - * ], - * "returns": {"type": "void"}, - * "example": { - * "description": "Example usage of init function.", - * "code": "init(cohereFunc, 'https://example.com/log', 'authToken');" - * } - * } - */ -export default function initCohere({ llm, dokuUrl, apiKey, environment, applicationName, skipResp }) { - const originalGenerate = llm.generate; - const originalEmbed = llm.embed; - const originalChat = llm.chat; - const originalChatStream = llm.chatStream; - const originalSummarize = llm.summarize; - - // Define wrapped methods - llm.generate = async function(params) { - const start = performance.now(); - const response = await originalGenerate.call(this, params); - const end = performance.now(); - const duration = (end - start) / 1000; - - const model = params.model || 'command'; - const prompt = params.prompt; - - for (const generation of response.generations) { - const data = { - llmReqId: generation.id, - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'cohere.generate', - skipResp: skipResp, - completionTokens: response.meta["billedUnits"]["outputTokens"], - promptTokens: response.meta["billedUnits"]["inputTokens"], - requestDuration: duration, - model: model, - prompt: prompt, - response: generation.text, - }; - data.totalTokens = data.promptTokens + data.completionTokens; - - if (!params.hasOwnProperty('stream') || params.stream !== true) { - data.finishReason = generation.finish_reason; - } - - await sendData(data, dokuUrl, apiKey); - } - - return response; - }; - - llm.embed = async function(params) { - const start = performance.now(); - const response = await originalEmbed.call(this, params); - const end = performance.now(); - const duration = (end - start) / 1000; - - const model = params.model || 'embed-english-v2.0'; - const prompt = params.texts.toString(); - - const data = { - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'cohere.embed', - skipResp: skipResp, - requestDuration: duration, - model: model, - prompt: prompt, - promptTokens: response.meta["billedUnits"]["inputTokens"], - totalTokens: response.meta["billedUnits"]["inputTokens"], - }; - - await sendData(data, dokuUrl, apiKey); - - return response; - }; - - llm.chat = async function(params) { - const start = performance.now(); - const response = await originalChat.call(this, params); - const end = performance.now(); - const duration = (end - start) / 1000; - - const model = params.model || 'command'; - const prompt = params.message; - - const data = { - llmReqId: response.response_id, - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'cohere.chat', - skipResp: skipResp, - requestDuration: duration, - model: model, - prompt: prompt, - promptTokens: response.meta["billed_units"]["output_tokens"], - completionTokens: response.meta["billed_units"]["input_tokens"], - totalTokens: response.token_count["billed_units"], - response: response.text, - }; - - await sendData(data, dokuUrl, apiKey); - - return response; - }; - - llm.chatStream = async function* (params) { - const start = performance.now(); - const response = await originalChatStream.call(this, params); - - const model = params.model || 'command'; - const prompt = params.message; - - const data = { - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'cohere.chat', - skipResp: skipResp, - model: model, - prompt: prompt, - }; - - data.response = "" - for await (const message of response) { - if (message.eventType === "stream-end") { - data.llmReqId = message.response.response_id; - data.promptTokens = message.response.meta.billed_units["input_tokens"]; - data.completionTokens = message.response.meta.billed_units["output_tokens"]; - } - data.response += message.eventType === "text-generation" ? message.text : ""; - // Pass the message along so it's not consumed - yield message; // this allows the message to flow back to the original caller - } - data.totalTokens = data.promptTokens + data.completionTokens - - const end = performance.now(); - data.requestDuration = (end - start) / 1000; - - await sendData(data, dokuUrl, apiKey); - - return response; - }; - - llm.summarize = async function(params) { - const start = performance.now(); - const response = await originalSummarize.call(this, params); - const end = performance.now(); - const duration = (end - start) / 1000; - - const model = params.model || 'command'; - const prompt = params.text; - - const data = { - llmReqId: response.id, - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'cohere.summarize', - skipResp: skipResp, - requestDuration: duration, - completionTokens: response.meta["billedUnits"]["outputTokens"], - promptTokens: response.meta["billedUnits"]["inputTokens"], - model: model, - prompt: prompt, - response: response.summary, - }; - data.totalTokens = data.promptTokens + data.completionTokens; - - await sendData(data, dokuUrl, apiKey); - - return response; - }; -} diff --git a/src/helpers.js b/src/helpers/api.js similarity index 65% rename from src/helpers.js rename to src/helpers/api.js index 03927c5..f905cdf 100644 --- a/src/helpers.js +++ b/src/helpers/api.js @@ -1,3 +1,5 @@ +import { logError } from "./logger.js"; + /** * Sends data to the specified Doku URL using the provided authentication apiKey. * @@ -25,24 +27,23 @@ * } */ export async function sendData(data, dokuUrl, authToken) { - // Remove trailing slash if present - const url = dokuUrl.endsWith('/') ? dokuUrl.slice(0, -1) : dokuUrl; - try { - const response = await fetch(`${url}/api/push`, { - method: 'post', - headers: { - 'Content-Type': 'application/json', - 'Authorization': authToken, - }, - body: JSON.stringify(data), - }); - - if (!response.ok) { - throw new Error(`Error sending Data: HTTP status ${response.status}`); - } - return response; - - } catch (err) { - throw new Error(`Error sending Metrics: ${err}`); - } + // Remove trailing slash if present + const url = dokuUrl.endsWith("/") ? dokuUrl.slice(0, -1) : dokuUrl; + try { + const response = await fetch(`${url}/api/push`, { + method: "post", + headers: { + "Content-Type": "application/json", + Authorization: authToken, + }, + body: JSON.stringify(data), + }); + + if (!response.ok) { + logError(`Error sending Data: HTTP status ${response.status}`); + } + return response; + } catch (err) { + logError(`Error sending Metrics: ${err}`); + } } diff --git a/src/helpers/logger.js b/src/helpers/logger.js new file mode 100644 index 0000000..12a01bc --- /dev/null +++ b/src/helpers/logger.js @@ -0,0 +1,12 @@ +/** + * Logs data on console with prefix `Dokumetry:` + * + * @param {Error} error - Error to be logged. + */ +export async function logError(error) { + const APPLICATION_NAME = "Dokumetry"; + const formattedError = "\x1b[31m" + error + "\x1b[0m"; // Red color for error + const formattedAppName = "\x1b[34m" + APPLICATION_NAME + "\x1b[0m"; // Blue color for application name + + console.log(formattedAppName + ": " + formattedError); +} diff --git a/src/index.js b/src/index.js index f26ebe4..92aa73b 100644 --- a/src/index.js +++ b/src/index.js @@ -1,20 +1,20 @@ -import initOpenAI from './openai.js'; -import initCohere from './cohere.js'; -import initAnthropic from './anthropic.js'; -import initMistral from './mistral.js'; -import initAzureOpenAI from './azure_openai.js'; +import initOpenAI from "./providers/openai.js"; +import initCohere from "./providers/cohere.js"; +import initAnthropic from "./providers/anthropic.js"; +import initMistral from "./providers/mistral.js"; +import initAzureOpenAI from "./providers/azure_openai.js"; /** * Represents the configuration for Doku. * @class DokuConfig */ class DokuConfig { - static dokuUrl = null; - static apiKey = null; - static llm = null; - static environment = null; - static applicationName = null; - static skipResp = null; + static dokuUrl = null; + static apiKey = null; + static llm = null; + static environment = null; + static applicationName = null; + static skipResp = null; } /** @@ -46,30 +46,80 @@ class DokuConfig { * } * } */ -function init({ llm, dokuUrl, apiKey, environment="default", applicationName="default", skipResp=false }) { - DokuConfig.dokuUrl = dokuUrl; - DokuConfig.apiKey = apiKey; - DokuConfig.llm = llm; - DokuConfig.environment = environment; - DokuConfig.applicationName = applicationName; - DokuConfig.skipResp = skipResp; +function init({ + llm, + dokuUrl, + apiKey, + environment = "default", + applicationName = "default", + skipResp = false, +}) { + DokuConfig.dokuUrl = dokuUrl; + DokuConfig.apiKey = apiKey; + DokuConfig.llm = llm; + DokuConfig.environment = environment; + DokuConfig.applicationName = applicationName; + DokuConfig.skipResp = skipResp; - if (llm.fineTuning && typeof llm.completions.create === 'function' && !(llm.baseURL.includes('azure.com'))) { - initOpenAI({ llm, dokuUrl, apiKey, environment, applicationName, skipResp }); - } else if (llm.fineTuning && typeof llm.completions.create === 'function' && llm.baseURL.includes('azure.com')) { - initAzureOpenAI({ llm, dokuUrl, apiKey, environment, applicationName, skipResp }); - } else if (llm.generate && typeof llm.rerank === 'function') { - initCohere({ llm, dokuUrl, apiKey, environment, applicationName, skipResp }); - } else if (llm.messages && typeof llm.messages.create === 'function') { - initAnthropic({ llm, dokuUrl, apiKey, environment, applicationName, skipResp }); - } else if (llm.listModels && typeof llm.chatStream === 'function') { - initMistral({ llm, dokuUrl, apiKey, environment, applicationName, skipResp }); - } + if ( + llm.fineTuning && + typeof llm.completions.create === "function" && + !llm.baseURL.includes("azure.com") + ) { + initOpenAI({ + llm, + dokuUrl, + apiKey, + environment, + applicationName, + skipResp, + }); + } else if ( + llm.fineTuning && + typeof llm.completions.create === "function" && + llm.baseURL.includes("azure.com") + ) { + initAzureOpenAI({ + llm, + dokuUrl, + apiKey, + environment, + applicationName, + skipResp, + }); + } else if (llm.generate && typeof llm.rerank === "function") { + initCohere({ + llm, + dokuUrl, + apiKey, + environment, + applicationName, + skipResp, + }); + } else if (llm.messages && typeof llm.messages.create === "function") { + initAnthropic({ + llm, + dokuUrl, + apiKey, + environment, + applicationName, + skipResp, + }); + } else if (llm.listModels && typeof llm.chatStream === "function") { + initMistral({ + llm, + dokuUrl, + apiKey, + environment, + applicationName, + skipResp, + }); + } } // Setting up the dokumetry namespace object const DokuMetry = { - init: init, + init: init, }; -export default DokuMetry; \ No newline at end of file +export default DokuMetry; diff --git a/src/mistral.js b/src/mistral.js deleted file mode 100644 index aed7be0..0000000 --- a/src/mistral.js +++ /dev/null @@ -1,171 +0,0 @@ -import {sendData} from './helpers.js'; - -/** - * Initializes Mistral functionality with performance tracking. - * - * @param {Object} llm - The Mistral function object. - * @param {string} dokuUrl - The URL for logging data. - * @param {string} apiKey - The authentication apiKey. - * @param {string} environment - The environment. - * @param {string} applicationName - The application name. - * @param {boolean} skipResp - To skip waiting for API resopnse. - * @return {void} - * - * @jsondoc - * { - * "description": "Initializes Mistral function and performance tracking", - * "params": [ - * {"name": "dokuUrl", "type": "string", "description": "Doku URL"}, - * {"name": "apiKey", "type": "string", "description": "Auth apiKey"}, - * {"name": "llm", "type": "Object", "description": "The Mistral object"}, - * {"name": "environment", "type": "string", "description": "Environment"}, - * {"name": "applicationName", "type": "string", "description": "Application Name"}, - * {"name": "skipResp", "type": "boolean", "description": "To skip waiting for API resopnse."} - * ], - * "returns": {"type": "void"}, - * "example": { - * "description": "Example usage of init function.", - * "code": "init('https://example.com/log', 'authToken', mistralFunc);" - * } - * } - */ -export default function initMistral({ llm, dokuUrl, apiKey, environment, applicationName, skipResp }) { - const origianlMistralChat = llm.chat; - const origianlMistralChatStream = llm.chatStream; - const originalMistralEmbedding = llm.embeddings; - - // Define wrapped method - llm.chat = async function(params) { - const start = performance.now(); - const response = await origianlMistralChat.call(this, params); - const end = performance.now(); - const duration = (end - start) / 1000; - let formattedMessages = []; - for (let message of params.messages) { - let role = message.role; - let content = message.content; - - if (Array.isArray(content)) { - let contentStr = content.map(item => { - if (item.type) { - return `${item.type}: ${item.text || item.image_url}`; - } else { - return `text: ${item.text}`; - } - }).join(", "); - formattedMessages.push(`${role}: ${contentStr}`); - } else { - formattedMessages.push(`${role}: ${content}`); - } - } - let prompt = formattedMessages.join("\n"); - - const data = { - llmReqId: response.id, - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'mistral.chat', - skipResp: skipResp, - completionTokens: response.usage.prompt_tokens, - promptTokens: response.usage.completion_tokens, - totalTokens: response.usage.total_tokens, - requestDuration: duration, - model: params.model, - prompt: String(prompt), - finishReason: response.choices[0].finish_reason, - response: String(response.choices[0].message.content), - }; - - await sendData(data, dokuUrl, apiKey); - - return response; - }; - - llm.chatStream = async function* (params) { - const start = performance.now(); - const response = await origianlMistralChatStream.call(this, params); - - const model = params.model || 'mistral-large-latest'; - let formattedMessages = []; - for (let message of params.messages) { - let role = message.role; - let content = message.content; - - if (Array.isArray(content)) { - let contentStr = content.map(item => { - if (item.type) { - return `${item.type}: ${item.text || item.image_url}`; - } else { - return `text: ${item.text}`; - } - }).join(", "); - formattedMessages.push(`${role}: ${contentStr}`); - } else { - formattedMessages.push(`${role}: ${content}`); - } - } - let prompt = formattedMessages.join("\n"); - - const data = { - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'mistral.chat', - skipResp: skipResp, - model: model, - prompt: prompt, - }; - - data.response = "" - for await (const message of response) { - data.llmReqId = message.id; - data.response += message.choices[0].delta.content - if (message.choices[0].finish_reason != null) { - data.promptTokens = message.usage.prompt_tokens; - data.completionTokens = message.usage.completion_tokens; - data.totalTokens = message.usage.total_tokens; - data.finishReason = message.choices[0].finish_reason; - } - // Pass the message along so it's not consumed - yield message; // this allows the message to flow back to the original caller - } - - const end = performance.now(); - data.requestDuration = (end - start) / 1000; - - await sendData(data, dokuUrl, apiKey); - - return response; - }; - - llm.embeddings = async function(params) { - const start = performance.now(); - const response = await originalMistralEmbedding.call(this, params); - const end = performance.now(); - const duration = (end - start) / 1000; - - const model = params.model || 'mistral-embed'; - const prompt = params.input.toString(); - - const data = { - llmReqId: response.id, - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'mistral.embeddings', - skipResp: skipResp, - requestDuration: duration, - model: model, - prompt: prompt, - promptTokens: response.usage.prompt_tokens, - completionTokens: response.usage.completion_tokens, - totalTokens: response.usage.total_tokens, - }; - - await sendData(data, dokuUrl, apiKey); - - return response; - }; - -} diff --git a/src/openai.js b/src/openai.js deleted file mode 100644 index 534d579..0000000 --- a/src/openai.js +++ /dev/null @@ -1,420 +0,0 @@ -import {sendData} from './helpers.js'; -import { Readable } from 'stream'; - -/** - * Initializes OpenAI functionality with performance tracking and data logging. - * - * @param {Object} llm - The OpenAI function object. - * @param {string} dokuUrl - The URL for logging data. - * @param {string} apiKey - The authentication apiKey. - * @param {string} environment - The environment. - * @param {string} applicationName - The application name. - * @param {boolean} skipResp - To skip waiting for API resopnse. - * @return {void} - * - * @jsondoc - * { - * "description": "Performance tracking for OpenAI APIs", - * "params": [ - * {"name": "llm", "type": "Object", "description": "OpenAI function."}, - * {"name": "dokuUrl", "type": "string", "description": "The URL"}, - * {"name": "apiKey", "type": "string", "description": "The auth apiKey."}, - * {"name": "environment", "type": "string", "description": "The environment."}, - * {"name": "applicationName", "type": "string", "description": "The application name."}, - * {"name": "skipResp", "type": "boolean", "description": "To skip waiting for API resopnse."} - * ], - * "returns": {"type": "void"}, - * "example": { - * "description": "Example usage of init function.", - * "code": "init(openaiFunc, 'https://example.com/log', 'authToken');" - * } - * } - */ -export default function initOpenAI({ llm, dokuUrl, apiKey, environment, applicationName, skipResp }) { - // Save original method - const originalChatCreate = llm.chat.completions.create; - const originalCompletionsCreate = llm.completions.create; - const originalEmbeddingsCreate = llm.embeddings.create; - const originalFineTuningJobsCreate = llm.fineTuning.jobs.create; - const originalImagesCreate = llm.images.generate; - const originalImagesCreateVariation = llm.images.createVariation; - const originalAudioSpeechCreate = llm.audio.speech.create; - - // Define wrapped method - llm.chat.completions.create = async function(params) { - const start = performance.now(); - let streaming = params.stream || false; - if (streaming) { - // Call original method - const originalResponseStream = await originalChatCreate.call(this, params); - - // Create a pass-through stream - const passThroughStream = new Readable({ - read() {}, - objectMode: true // Set to true because the chunks are objects - }); - - let dataResponse = ''; - - // Immediately-invoked async function to handle streaming - (async () => { - for await (const chunk of originalResponseStream) { - var content = chunk.choices[0]?.delta?.content; - if (content) { - dataResponse += content; - passThroughStream.push(chunk); // Push chunk to the pass-through stream - } - var responseId = chunk.id; - } - passThroughStream.push(null); // Signal end of the pass-through stream - - // Process response data after stream has ended - const end = performance.now(); - const duration = (end - start) / 1000; - - let formattedMessages = []; - for (let message of params.messages) { - let role = message.role; - let content = message.content; - - if (Array.isArray(content)) { - let contentStr = content.map(item => { - if (item.type) { - return `${item.type}: ${item.text || item.image_url}`; - } else { - return `text: ${item.text}`; - } - }).join(", "); - formattedMessages.push(`${role}: ${contentStr}`); - } else { - formattedMessages.push(`${role}: ${content}`); - } - } - let prompt = formattedMessages.join("\n"); - - // Prepare the data object for Doku - const data = { - llmReqId: responseId, - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'openai.chat.completions', - skipResp: skipResp, - requestDuration: duration, - model: params.model, - prompt: prompt, - response: dataResponse - }; - - await sendData(data, dokuUrl, apiKey); - })(); - - // Return the pass-through stream to the original caller - return passThroughStream; - } - else { - // Call original method - const response = await originalChatCreate.call(this, params); - const end = performance.now(); - const duration = (end - start) / 1000; - - let formattedMessages = []; - for (let message of params.messages) { - let role = message.role; - let content = message.content; - - if (Array.isArray(content)) { - let contentStr = content.map(item => { - if (item.type) { - return `${item.type}: ${item.text || item.image_url}`; - } else { - return `text: ${item.text}`; - } - }).join(", "); - formattedMessages.push(`${role}: ${contentStr}`); - } else { - formattedMessages.push(`${role}: ${content}`); - } - } - let prompt = formattedMessages.join("\n"); - const data = { - llmReqId: response.id, - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'openai.chat.completions', - skipResp: skipResp, - requestDuration: duration, - model: params.model, - prompt: prompt, - }; - - if (!params.hasOwnProperty('tools')) { - data.completionTokens = response.usage.completion_tokens; - data.promptTokens = response.usage.prompt_tokens; - data.totalTokens = response.usage.total_tokens; - data.finishReason = response.choices[0].finish_reason; - - if (!params.hasOwnProperty('n') || params.n === 1) { - data.response = response.choices[0].message.content; - } else { - let i = 0; - while (i < params.n && i < response.choices.length) { - data.response = response.choices[i].message.content; - i++; - await sendData(data, dokuUrl, apiKey); - } - return response; - } - } else if (params.hasOwnProperty('tools')) { - data.response = "Function called with tools"; - data.completionTokens = response.usage.completion_tokens; - data.promptTokens = response.usage.prompt_tokens; - data.totalTokens = response.usage.total_tokens; - } - - await sendData(data, dokuUrl, apiKey); - - return response; - } - }; - - llm.completions.create = async function(params) { - const start = performance.now(); - let streaming = params.stream || false; - if (streaming) { - // Call original method - const originalResponseStream = await originalCompletionsCreate.call(this, params); - - // Create a pass-through stream - const passThroughStream = new Readable({ - read() {}, - objectMode: true // Set to true because the chunks are objects - }); - - let dataResponse = ''; - - // Immediately-invoked async function to handle streaming - (async () => { - for await (const chunk of originalResponseStream) { - var content = chunk.choices[0].text; - if (content) { - dataResponse += content; - passThroughStream.push(chunk); // Push chunk to the pass-through stream - } - var responseId = chunk.id; - } - passThroughStream.push(null); // Signal end of the pass-through stream - - // Process response data after stream has ended - const end = performance.now(); - const duration = (end - start) / 1000; - // Prepare the data object for Doku - const data = { - llmReqId: responseId, - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'openai.completions', - skipResp: skipResp, - requestDuration: duration, - model: params.model, - prompt: params.prompt, - response: dataResponse - }; - - await sendData(data, dokuUrl, apiKey); - })(); - - // Return the pass-through stream to the original caller - return passThroughStream; - } - else { - const response = await originalCompletionsCreate.call(this, params); - const end = performance.now(); - const duration = (end - start) / 1000; - - const data = { - llmReqId: response.id, - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'openai.completions', - skipResp: skipResp, - requestDuration: duration, - model: params.model, - prompt: params.prompt, - }; - - if (!params.hasOwnProperty('tools')) { - data.completionTokens = response.usage.completion_tokens; - data.promptTokens = response.usage.prompt_tokens; - data.totalTokens = response.usage.total_tokens; - data.finishReason = response.choices[0].finish_reason; - - if (!params.hasOwnProperty('n') || params.n === 1) { - data.response = response.choices[0].text; - } else { - let i = 0; - while (i < params.n && i < response.choices.length) { - data.response = response.choices[i].text; - i++; - - await sendData(data, dokuUrl, apiKey); - } - return response; - } - } else if (params.hasOwnProperty('tools')) { - data.response = "Function called with tools"; - data.completionTokens = response.usage.completion_tokens; - data.promptTokens = response.usage.prompt_tokens; - data.totalTokens = response.usage.total_tokens; - } - - await sendData(data, dokuUrl, apiKey); - - return response; - } - }; - - llm.embeddings.create = async function(params) { - const start = performance.now(); - const response = await originalEmbeddingsCreate.call(this, params); - const end = performance.now(); - const duration = (end - start) / 1000; - - const data = { - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'openai.embeddings', - skipResp: skipResp, - requestDuration: duration, - model: params.model, - prompt: params.input, - promptTokens: response.usage.prompt_tokens, - totalTokens: response.usage.total_tokens, - }; - - await sendData(data, dokuUrl, apiKey); - - return response; - }; - - llm.fineTuning.jobs.create = async function(params) { - const start = performance.now(); - const response = await originalFineTuningJobsCreate.call(this, params); - const end = performance.now(); - const duration = (end - start) / 1000; - - const data = { - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'openai.fine_tuning', - skipResp: skipResp, - requestDuration: duration, - model: params.model, - llmReqId: response.id, - finetuneJobStatus: response.status, - }; - - await sendData(data, dokuUrl, apiKey); - - return response; - }; - - llm.images.generate = async function(params) { - const start = performance.now(); - const response = await originalImagesCreate.call(this, params); - const end = performance.now(); - const duration = (end - start) / 1000; - const size = params.size || '1024x1024'; - const model = params.model || 'dall-e-2'; - let imageFormat = 'url'; - - if (params.response_format && params.response_format === 'b64_json') { - imageFormat = 'b64_json'; - } - - const quality = params.quality ?? 'standard'; - var responseId = response.created; - for (const item of response.data) { - const data = { - llmReqId: responseId, - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'openai.images.create', - skipResp: skipResp, - requestDuration: duration, - model: model, - prompt: params.prompt, - imageSize: size, - imageQuality: quality, - revisedPrompt: item.revised_prompt || null, - image: item[imageFormat], - }; - - await sendData(data, dokuUrl, apiKey); - } - - return response; - }; - - llm.images.createVariation = async function(params) { - const start = performance.now(); - const response = await originalImagesCreateVariation.call(this, params); - const end = performance.now(); - const duration = (end - start) / 1000; - const size = params.size || '1024x1024'; // Default size if not provided - const model = params.model || 'dall-e-2'; - let imageFormat = 'url'; - if (params.response_format && params.response_format === 'b64_json') { - imageFormat = 'b64_json'; - } - var responseId = response.created; - for (const item of response.data) { - const data = { - llmReqId: responseId, - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'openai.images.create.variations', - skipResp: skipResp, - requestDuration: duration, - model: model, - imageSize: size, - imageQuality: "standard", - image: item[imageFormat], - }; - - await sendData(data, dokuUrl, apiKey); - } - - return response; - }; - - llm.audio.speech.create = async function(params) { - const start = performance.now(); - const response = await originalAudioSpeechCreate.call(this, params); - const end = performance.now(); - const duration = (end - start) / 1000; - - const data = { - environment: environment, - applicationName: applicationName, - sourceLanguage: 'Javascript', - endpoint: 'openai.audio.speech.create', - skipResp: skipResp, - requestDuration: duration, - model: params.model, - prompt: params.input, - audioVoice: params.voice, - }; - - await sendData(data, dokuUrl, apiKey); - - return response; - }; -} diff --git a/src/providers/anthropic.js b/src/providers/anthropic.js new file mode 100644 index 0000000..cb18cab --- /dev/null +++ b/src/providers/anthropic.js @@ -0,0 +1,179 @@ +import { sendData } from "../helpers/api.js"; +import { Readable } from "stream"; + +/** + * Initializes Anthropic functionality with performance tracking. + * + * @param {Object} llm - The Anthropic function object. + * @param {string} dokuUrl - The URL for logging data. + * @param {string} apiKey - The authentication apiKey. + * @param {string} environment - The environment. + * @param {string} applicationName - The application name. + * @param {boolean} skipResp - To skip waiting for API resopnse. + * @return {void} + * + * @jsondoc + * { + * "description": "Initializes Anthropic function and performance tracking", + * "params": [ + * {"name": "dokuUrl", "type": "string", "description": "Doku URL"}, + * {"name": "apiKey", "type": "string", "description": "Auth apiKey"}, + * {"name": "llm", "type": "Object", "description": "The Anthropic object"}, + * {"name": "environment", "type": "string", "description": "Environment"}, + * {"name": "applicationName", "type": "string", "description": "Application Name"}, + * {"name": "skipResp", "type": "boolean", "description": "To skip waiting for API resopnse."} + * ], + * "returns": {"type": "void"}, + * "example": { + * "description": "Example usage of init function.", + * "code": "init('https://example.com/log', 'authToken', anthropicFunc);" + * } + * } + */ +export default function initAnthropic({ + llm, + dokuUrl, + apiKey, + environment, + applicationName, + skipResp, +}) { + const originalMessagesCreate = llm.messages.create; + + // Define wrapped method + llm.messages.create = async function (params) { + let streaming = params.stream || false; + if (streaming) { + // Call original method + const start = performance.now(); + const originalResponseStream = await originalMessagesCreate.call( + this, + params + ); + + // Create a pass-through stream + const passThroughStream = new Readable({ + read() {}, + objectMode: true, // Set to true because the chunks are objects + }); + + let dataResponse = ""; + var responseId = ""; + var promptTokens = 0; + var completionTokens = 0; + + // Immediately-invoked async function to handle streaming + (async () => { + for await (const chunk of originalResponseStream) { + if (chunk.type === "message_start") { + responseId = chunk.message.id; + promptTokens = chunk.message.usage.input_tokens; + passThroughStream.push(chunk); // Push chunk to the pass-through stream + } else if (chunk.type === "content_block_delta") { + dataResponse += chunk.delta.text; + passThroughStream.push(chunk); // Push chunk to the pass-through stream + } else if (chunk.type === "message_delta") { + completionTokens = chunk.usage.output_tokens; + passThroughStream.push(chunk); // Push chunk to the pass-through stream + } + } + passThroughStream.push(null); // Signal end of the pass-through stream + + // Process response data after stream has ended + const end = performance.now(); + const duration = (end - start) / 1000; + + let formattedMessages = []; + for (let message of params.messages) { + let role = message.role; + let content = message.content; + + if (Array.isArray(content)) { + let contentStr = content + .map((item) => { + if (item.type) { + return `${item.type}: ${item.text || item.image_url}`; + } else { + return `text: ${item.text}`; + } + }) + .join(", "); + formattedMessages.push(`${role}: ${contentStr}`); + } else { + formattedMessages.push(`${role}: ${content}`); + } + } + let prompt = formattedMessages.join("\n"); + + // Prepare the data object for Doku + const data = { + llmReqId: responseId, + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "anthropic.messages", + skipResp: skipResp, + requestDuration: duration, + model: params.model, + prompt: prompt, + response: dataResponse, + promptTokens: promptTokens, + completionTokens: completionTokens, + }; + data.totalTokens = data.promptTokens + data.completionTokens; + + await sendData(data, dokuUrl, apiKey); + })(); + + // Return the pass-through stream to the original caller + return passThroughStream; + } else { + const start = performance.now(); + const response = await originalMessagesCreate.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + let formattedMessages = []; + for (let message of params.messages) { + let role = message.role; + let content = message.content; + + if (Array.isArray(content)) { + let contentStr = content + .map((item) => { + if (item.type) { + return `${item.type}: ${item.text || item.image_url}`; + } else { + return `text: ${item.text}`; + } + }) + .join(", "); + formattedMessages.push(`${role}: ${contentStr}`); + } else { + formattedMessages.push(`${role}: ${content}`); + } + } + let prompt = formattedMessages.join("\n"); + + const data = { + llmReqId: response.id, + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "anthropic.messages", + skipResp: skipResp, + completionTokens: response.usage.input_tokens, + promptTokens: response.usage.output_tokens, + requestDuration: duration, + model: params.model, + prompt: prompt, + finishReason: response.stop_reason, + response: response.content[0].text, + }; + data.totalTokens = data.promptTokens + data.completionTokens; + + await sendData(data, dokuUrl, apiKey); + + return response; + } + }; +} diff --git a/src/providers/azure_openai.js b/src/providers/azure_openai.js new file mode 100644 index 0000000..09331d8 --- /dev/null +++ b/src/providers/azure_openai.js @@ -0,0 +1,356 @@ +import { sendData } from "../helpers/api.js"; +import { Readable } from "stream"; + +/** + * Initializes Azure OpenAI functionality with performance tracking and data logging. + * + * @param {Object} llm - The Azure OpenAI function object. + * @param {string} dokuUrl - The URL for logging data. + * @param {string} apiKey - The authentication apiKey. + * @param {string} environment - The environment. + * @param {string} applicationName - The application name. + * @param {boolean} skipResp - To skip waiting for API resopnse. + * @return {void} + * + * @jsondoc + * { + * "description": "Performance tracking for Azure OpenAI APIs", + * "params": [ + * {"name": "llm", "type": "Object", "description": "Azure OpenAI function."}, + * {"name": "dokuUrl", "type": "string", "description": "The URL"}, + * {"name": "apiKey", "type": "string", "description": "The auth apiKey."}, + * {"name": "environment", "type": "string", "description": "The environment."}, + * {"name": "applicationName", "type": "string", "description": "The application name."}, + * {"name": "skipResp", "type": "boolean", "description": "To skip waiting for API resopnse."} + * ], + * "returns": {"type": "void"}, + * "example": { + * "description": "Example usage of init function.", + * "code": "init(azureOenaiFunc, 'https://example.com/log', 'authToken');" + * } + * } + */ +export default function initAzureOpenAI({ + llm, + dokuUrl, + apiKey, + environment, + applicationName, + skipResp, +}) { + // Save original method + const originalChatCreate = llm.chat.completions.create; + const originalCompletionsCreate = llm.completions.create; + const originalEmbeddingsCreate = llm.embeddings.create; + const originalImagesCreate = llm.images.generate; + + // Define wrapped method + llm.chat.completions.create = async function (params) { + const start = performance.now(); + let streaming = params.stream || false; + if (streaming) { + // Call original method + const originalResponseStream = await originalChatCreate.call( + this, + params + ); + + // Create a pass-through stream + const passThroughStream = new Readable({ + read() {}, + objectMode: true, // Set to true because the chunks are objects + }); + + let dataResponse = ""; + let chatModel = ""; + + // Immediately-invoked async function to handle streaming + (async () => { + for await (const chunk of originalResponseStream) { + var content = chunk.choices[0]?.delta?.content; + if (content) { + dataResponse += content; + passThroughStream.push(chunk); // Push chunk to the pass-through stream + } + var responseId = chunk.id; + chatModel = chunk.model; + } + passThroughStream.push(null); // Signal end of the pass-through stream + + // Process response data after stream has ended + const end = performance.now(); + const duration = (end - start) / 1000; + + let formattedMessages = []; + for (let message of params.messages) { + let role = message.role; + let content = message.content; + + if (Array.isArray(content)) { + let contentStr = content + .map((item) => { + if (item.type) { + return `${item.type}: ${item.text || item.image_url}`; + } else { + return `text: ${item.text}`; + } + }) + .join(", "); + formattedMessages.push(`${role}: ${contentStr}`); + } else { + formattedMessages.push(`${role}: ${content}`); + } + } + let prompt = formattedMessages.join("\n"); + + // Prepare the data object for Doku + const data = { + llmReqId: responseId, + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "azure.chat.completions", + skipResp: skipResp, + requestDuration: duration, + model: "azure_" + chatModel, + prompt: prompt, + response: dataResponse, + }; + + await sendData(data, dokuUrl, apiKey); + })(); + + // Return the pass-through stream to the original caller + return passThroughStream; + } else { + // Call original method + const response = await originalChatCreate.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + + let formattedMessages = []; + for (let message of params.messages) { + let role = message.role; + let content = message.content; + + if (Array.isArray(content)) { + let contentStr = content + .map((item) => { + if (item.type) { + return `${item.type}: ${item.text || item.image_url}`; + } else { + return `text: ${item.text}`; + } + }) + .join(", "); + formattedMessages.push(`${role}: ${contentStr}`); + } else { + formattedMessages.push(`${role}: ${content}`); + } + } + let prompt = formattedMessages.join("\n"); + const data = { + llmReqId: response.id, + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "azure.chat.completions", + skipResp: skipResp, + requestDuration: duration, + model: "azure_" + response.model, + prompt: prompt, + }; + + if (!params.hasOwnProperty("tools")) { + data.completionTokens = response.usage.completion_tokens; + data.promptTokens = response.usage.prompt_tokens; + data.totalTokens = response.usage.total_tokens; + data.finishReason = response.choices[0].finish_reason; + + if (!params.hasOwnProperty("n") || params.n === 1) { + data.response = response.choices[0].message.content; + } else { + let i = 0; + while (i < params.n && i < response.choices.length) { + data.response = response.choices[i].message.content; + i++; + await sendData(data, dokuUrl, apiKey); + } + return response; + } + } else if (params.hasOwnProperty("tools")) { + data.response = "Function called with tools"; + data.completionTokens = response.usage.completion_tokens; + data.promptTokens = response.usage.prompt_tokens; + data.totalTokens = response.usage.total_tokens; + } + + await sendData(data, dokuUrl, apiKey); + + return response; + } + }; + + llm.completions.create = async function (params) { + const start = performance.now(); + let streaming = params.stream || false; + if (streaming) { + // Call original method + const originalResponseStream = await originalCompletionsCreate.call( + this, + params + ); + + // Create a pass-through stream + const passThroughStream = new Readable({ + read() {}, + objectMode: true, // Set to true because the chunks are objects + }); + + let dataResponse = ""; + let chatModel = ""; + + // Immediately-invoked async function to handle streaming + (async () => { + for await (const chunk of originalResponseStream) { + if (chunk.choices.length > 0) { + dataResponse += chunk.choices[0].text; + passThroughStream.push(chunk); // Push chunk to the pass-through stream + } + var responseId = chunk.id; + chatModel = chunk.model; + } + passThroughStream.push(null); // Signal end of the pass-through stream + + // Process response data after stream has ended + const end = performance.now(); + const duration = (end - start) / 1000; + // Prepare the data object for Doku + const data = { + llmReqId: responseId, + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "azure.completions", + skipResp: skipResp, + requestDuration: duration, + model: "azure_" + chatModel, + prompt: params.prompt, + response: dataResponse, + }; + + await sendData(data, dokuUrl, apiKey); + })(); + + // Return the pass-through stream to the original caller + return passThroughStream; + } else { + const response = await originalCompletionsCreate.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + + const data = { + llmReqId: response.id, + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "azure.completions", + skipResp: skipResp, + requestDuration: duration, + model: "azure_" + response.model, + prompt: params.prompt, + }; + + if (!params.hasOwnProperty("tools")) { + data.completionTokens = response.usage.completion_tokens; + data.promptTokens = response.usage.prompt_tokens; + data.totalTokens = response.usage.total_tokens; + data.finishReason = response.choices[0].finish_reason; + + if (!params.hasOwnProperty("n") || params.n === 1) { + data.response = response.choices[0].text; + } else { + let i = 0; + while (i < params.n && i < response.choices.length) { + data.response = response.choices[i].text; + i++; + + await sendData(data, dokuUrl, apiKey); + } + return response; + } + } else if (params.hasOwnProperty("tools")) { + data.response = "Function called with tools"; + data.completionTokens = response.usage.completion_tokens; + data.promptTokens = response.usage.prompt_tokens; + data.totalTokens = response.usage.total_tokens; + } + + await sendData(data, dokuUrl, apiKey); + + return response; + } + }; + + llm.embeddings.create = async function (params) { + const start = performance.now(); + const response = await originalEmbeddingsCreate.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + + const data = { + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "azure.embeddings", + skipResp: skipResp, + requestDuration: duration, + model: "azure_" + response.model, + prompt: params.input, + promptTokens: response.usage.prompt_tokens, + totalTokens: response.usage.total_tokens, + }; + + await sendData(data, dokuUrl, apiKey); + + return response; + }; + + llm.images.generate = async function (params) { + const start = performance.now(); + const response = await originalImagesCreate.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + const size = params.size || "1024x1024"; + const model = "azure_dall-e-3"; + let imageFormat = "url"; + + if (params.response_format && params.response_format === "b64_json") { + imageFormat = "b64_json"; + } + + const quality = params.quality ?? "standard"; + var responseId = response.created; + for (const item of response.data) { + const data = { + llmReqId: responseId, + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "azure.images.create", + skipResp: skipResp, + requestDuration: duration, + model: model, + prompt: params.prompt, + imageSize: size, + imageQuality: quality, + revisedPrompt: item.revised_prompt || null, + image: item[imageFormat], + }; + + await sendData(data, dokuUrl, apiKey); + } + + return response; + }; +} diff --git a/src/providers/cohere.js b/src/providers/cohere.js new file mode 100644 index 0000000..851027b --- /dev/null +++ b/src/providers/cohere.js @@ -0,0 +1,209 @@ +import { sendData } from "../helpers/api.js"; + +/** + * Initializes Cohere functionality with performance tracking and data logging. + * + * @param {Object} llm - The Cohere function object. + * @param {string} dokuUrl - The URL for logging data. + * @param {string} apiKey - The authentication apiKey. + * @param {string} environment - The environment. + * @param {string} applicationName - The application name. + * @param {boolean} skipResp - To skip waiting for API resopnse. + * @return {void} + * + * @jsondoc + * { + * "description": "Initializes Cohere functionality and performance tracking", + * "params": [ + * {"name": "llm", "type": "Object", "description": "Cohere object"}, + * {"name": "dokuUrl", "type": "string", "description": "URL for Doku"}, + * {"name": "apiKey", "type": "string", "description": "Auth apiKey."}, + * {"name": "environment", "type": "string", "description": "Environment."}, + * {"name": "applicationName", "type": "string", "description": "Application name."}, + * {"name": "skipResp", "type": "boolean", "description": "To skip waiting for API resopnse."} + * ], + * "returns": {"type": "void"}, + * "example": { + * "description": "Example usage of init function.", + * "code": "init(cohereFunc, 'https://example.com/log', 'authToken');" + * } + * } + */ +export default function initCohere({ + llm, + dokuUrl, + apiKey, + environment, + applicationName, + skipResp, +}) { + const originalGenerate = llm.generate; + const originalEmbed = llm.embed; + const originalChat = llm.chat; + const originalChatStream = llm.chatStream; + const originalSummarize = llm.summarize; + + // Define wrapped methods + llm.generate = async function (params) { + const start = performance.now(); + const response = await originalGenerate.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + + const model = params.model || "command"; + const prompt = params.prompt; + + for (const generation of response.generations) { + const data = { + llmReqId: generation.id, + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "cohere.generate", + skipResp: skipResp, + completionTokens: response.meta["billedUnits"]["outputTokens"], + promptTokens: response.meta["billedUnits"]["inputTokens"], + requestDuration: duration, + model: model, + prompt: prompt, + response: generation.text, + }; + data.totalTokens = data.promptTokens + data.completionTokens; + + if (!params.hasOwnProperty("stream") || params.stream !== true) { + data.finishReason = generation.finish_reason; + } + + await sendData(data, dokuUrl, apiKey); + } + + return response; + }; + + llm.embed = async function (params) { + const start = performance.now(); + const response = await originalEmbed.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + + const model = params.model || "embed-english-v2.0"; + const prompt = params.texts.toString(); + + const data = { + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "cohere.embed", + skipResp: skipResp, + requestDuration: duration, + model: model, + prompt: prompt, + promptTokens: response.meta["billedUnits"]["inputTokens"], + totalTokens: response.meta["billedUnits"]["inputTokens"], + }; + + await sendData(data, dokuUrl, apiKey); + + return response; + }; + + llm.chat = async function (params) { + const start = performance.now(); + const response = await originalChat.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + + const model = params.model || "command"; + const prompt = params.message; + + const data = { + llmReqId: response.response_id, + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "cohere.chat", + skipResp: skipResp, + requestDuration: duration, + model: model, + prompt: prompt, + promptTokens: response.meta["billed_units"]["output_tokens"], + completionTokens: response.meta["billed_units"]["input_tokens"], + totalTokens: response.token_count["billed_tokens"], + response: response.text, + }; + + await sendData(data, dokuUrl, apiKey); + + return response; + }; + + llm.chatStream = async function* (params) { + const start = performance.now(); + const response = await originalChatStream.call(this, params); + + const model = params.model || "command"; + const prompt = params.message; + + const data = { + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "cohere.chat", + skipResp: skipResp, + model: model, + prompt: prompt, + }; + + data.response = ""; + for await (const message of response) { + if (message.eventType === "stream-end") { + data.llmReqId = message.response.response_id; + data.promptTokens = message.response.meta.billed_units["input_tokens"]; + data.completionTokens = + message.response.meta.billed_units["output_tokens"]; + } + data.response += + message.eventType === "text-generation" ? message.text : ""; + // Pass the message along so it's not consumed + yield message; // this allows the message to flow back to the original caller + } + data.totalTokens = data.promptTokens + data.completionTokens; + + const end = performance.now(); + data.requestDuration = (end - start) / 1000; + + await sendData(data, dokuUrl, apiKey); + + return response; + }; + + llm.summarize = async function (params) { + const start = performance.now(); + const response = await originalSummarize.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + + const model = params.model || "command"; + const prompt = params.text; + + const data = { + llmReqId: response.id, + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "cohere.summarize", + skipResp: skipResp, + requestDuration: duration, + completionTokens: response.meta["billedUnits"]["outputTokens"], + promptTokens: response.meta["billedUnits"]["inputTokens"], + model: model, + prompt: prompt, + response: response.summary, + }; + data.totalTokens = data.promptTokens + data.completionTokens; + + await sendData(data, dokuUrl, apiKey); + + return response; + }; +} diff --git a/src/providers/mistral.js b/src/providers/mistral.js new file mode 100644 index 0000000..69fe9a9 --- /dev/null +++ b/src/providers/mistral.js @@ -0,0 +1,181 @@ +import { sendData } from "../helpers/api.js"; + +/** + * Initializes Mistral functionality with performance tracking. + * + * @param {Object} llm - The Mistral function object. + * @param {string} dokuUrl - The URL for logging data. + * @param {string} apiKey - The authentication apiKey. + * @param {string} environment - The environment. + * @param {string} applicationName - The application name. + * @param {boolean} skipResp - To skip waiting for API resopnse. + * @return {void} + * + * @jsondoc + * { + * "description": "Initializes Mistral function and performance tracking", + * "params": [ + * {"name": "dokuUrl", "type": "string", "description": "Doku URL"}, + * {"name": "apiKey", "type": "string", "description": "Auth apiKey"}, + * {"name": "llm", "type": "Object", "description": "The Mistral object"}, + * {"name": "environment", "type": "string", "description": "Environment"}, + * {"name": "applicationName", "type": "string", "description": "Application Name"}, + * {"name": "skipResp", "type": "boolean", "description": "To skip waiting for API resopnse."} + * ], + * "returns": {"type": "void"}, + * "example": { + * "description": "Example usage of init function.", + * "code": "init('https://example.com/log', 'authToken', mistralFunc);" + * } + * } + */ +export default function initMistral({ + llm, + dokuUrl, + apiKey, + environment, + applicationName, + skipResp, +}) { + const origianlMistralChat = llm.chat; + const origianlMistralChatStream = llm.chatStream; + const originalMistralEmbedding = llm.embeddings; + + // Define wrapped method + llm.chat = async function (params) { + const start = performance.now(); + const response = await origianlMistralChat.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + let formattedMessages = []; + for (let message of params.messages) { + let role = message.role; + let content = message.content; + + if (Array.isArray(content)) { + let contentStr = content + .map((item) => { + if (item.type) { + return `${item.type}: ${item.text || item.image_url}`; + } else { + return `text: ${item.text}`; + } + }) + .join(", "); + formattedMessages.push(`${role}: ${contentStr}`); + } else { + formattedMessages.push(`${role}: ${content}`); + } + } + let prompt = formattedMessages.join("\n"); + + const data = { + llmReqId: response.id, + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "mistral.chat", + skipResp: skipResp, + completionTokens: response.usage.prompt_tokens, + promptTokens: response.usage.completion_tokens, + totalTokens: response.usage.total_tokens, + requestDuration: duration, + model: params.model, + prompt: String(prompt), + finishReason: response.choices[0].finish_reason, + response: String(response.choices[0].message.content), + }; + + await sendData(data, dokuUrl, apiKey); + + return response; + }; + + llm.chatStream = async function* (params) { + const start = performance.now(); + const response = await origianlMistralChatStream.call(this, params); + + const model = params.model || "mistral-large-latest"; + let formattedMessages = []; + for (let message of params.messages) { + let role = message.role; + let content = message.content; + + if (Array.isArray(content)) { + let contentStr = content + .map((item) => { + if (item.type) { + return `${item.type}: ${item.text || item.image_url}`; + } else { + return `text: ${item.text}`; + } + }) + .join(", "); + formattedMessages.push(`${role}: ${contentStr}`); + } else { + formattedMessages.push(`${role}: ${content}`); + } + } + let prompt = formattedMessages.join("\n"); + + const data = { + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "mistral.chat", + skipResp: skipResp, + model: model, + prompt: prompt, + }; + + data.response = ""; + for await (const message of response) { + data.llmReqId = message.id; + data.response += message.choices[0].delta.content; + if (message.choices[0].finish_reason != null) { + data.promptTokens = message.usage.prompt_tokens; + data.completionTokens = message.usage.completion_tokens; + data.totalTokens = message.usage.total_tokens; + data.finishReason = message.choices[0].finish_reason; + } + // Pass the message along so it's not consumed + yield message; // this allows the message to flow back to the original caller + } + + const end = performance.now(); + data.requestDuration = (end - start) / 1000; + + await sendData(data, dokuUrl, apiKey); + + return response; + }; + + llm.embeddings = async function (params) { + const start = performance.now(); + const response = await originalMistralEmbedding.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + + const model = params.model || "mistral-embed"; + const prompt = params.input.toString(); + + const data = { + llmReqId: response.id, + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "mistral.embeddings", + skipResp: skipResp, + requestDuration: duration, + model: model, + prompt: prompt, + promptTokens: response.usage.prompt_tokens, + completionTokens: response.usage.completion_tokens, + totalTokens: response.usage.total_tokens, + }; + + await sendData(data, dokuUrl, apiKey); + + return response; + }; +} diff --git a/src/providers/openai.js b/src/providers/openai.js new file mode 100644 index 0000000..1179577 --- /dev/null +++ b/src/providers/openai.js @@ -0,0 +1,435 @@ +import { sendData } from "../helpers/api.js"; +import { Readable } from "stream"; + +/** + * Initializes OpenAI functionality with performance tracking and data logging. + * + * @param {Object} llm - The OpenAI function object. + * @param {string} dokuUrl - The URL for logging data. + * @param {string} apiKey - The authentication apiKey. + * @param {string} environment - The environment. + * @param {string} applicationName - The application name. + * @param {boolean} skipResp - To skip waiting for API resopnse. + * @return {void} + * + * @jsondoc + * { + * "description": "Performance tracking for OpenAI APIs", + * "params": [ + * {"name": "llm", "type": "Object", "description": "OpenAI function."}, + * {"name": "dokuUrl", "type": "string", "description": "The URL"}, + * {"name": "apiKey", "type": "string", "description": "The auth apiKey."}, + * {"name": "environment", "type": "string", "description": "The environment."}, + * {"name": "applicationName", "type": "string", "description": "The application name."}, + * {"name": "skipResp", "type": "boolean", "description": "To skip waiting for API resopnse."} + * ], + * "returns": {"type": "void"}, + * "example": { + * "description": "Example usage of init function.", + * "code": "init(openaiFunc, 'https://example.com/log', 'authToken');" + * } + * } + */ +export default function initOpenAI({ + llm, + dokuUrl, + apiKey, + environment, + applicationName, + skipResp, +}) { + // Save original method + const originalChatCreate = llm.chat.completions.create; + const originalCompletionsCreate = llm.completions.create; + const originalEmbeddingsCreate = llm.embeddings.create; + const originalFineTuningJobsCreate = llm.fineTuning.jobs.create; + const originalImagesCreate = llm.images.generate; + const originalImagesCreateVariation = llm.images.createVariation; + const originalAudioSpeechCreate = llm.audio.speech.create; + + // Define wrapped method + llm.chat.completions.create = async function (params) { + const start = performance.now(); + let streaming = params.stream || false; + if (streaming) { + // Call original method + const originalResponseStream = await originalChatCreate.call( + this, + params + ); + + // Create a pass-through stream + const passThroughStream = new Readable({ + read() {}, + objectMode: true, // Set to true because the chunks are objects + }); + + let dataResponse = ""; + + // Immediately-invoked async function to handle streaming + (async () => { + for await (const chunk of originalResponseStream) { + var content = chunk.choices[0]?.delta?.content; + if (content) { + dataResponse += content; + passThroughStream.push(chunk); // Push chunk to the pass-through stream + } + var responseId = chunk.id; + } + passThroughStream.push(null); // Signal end of the pass-through stream + + // Process response data after stream has ended + const end = performance.now(); + const duration = (end - start) / 1000; + + let formattedMessages = []; + for (let message of params.messages) { + let role = message.role; + let content = message.content; + + if (Array.isArray(content)) { + let contentStr = content + .map((item) => { + if (item.type) { + return `${item.type}: ${item.text || item.image_url}`; + } else { + return `text: ${item.text}`; + } + }) + .join(", "); + formattedMessages.push(`${role}: ${contentStr}`); + } else { + formattedMessages.push(`${role}: ${content}`); + } + } + let prompt = formattedMessages.join("\n"); + + // Prepare the data object for Doku + const data = { + llmReqId: responseId, + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "openai.chat.completions", + skipResp: skipResp, + requestDuration: duration, + model: params.model, + prompt: prompt, + response: dataResponse, + }; + + await sendData(data, dokuUrl, apiKey); + })(); + + // Return the pass-through stream to the original caller + return passThroughStream; + } else { + // Call original method + const response = await originalChatCreate.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + + let formattedMessages = []; + for (let message of params.messages) { + let role = message.role; + let content = message.content; + + if (Array.isArray(content)) { + let contentStr = content + .map((item) => { + if (item.type) { + return `${item.type}: ${item.text || item.image_url}`; + } else { + return `text: ${item.text}`; + } + }) + .join(", "); + formattedMessages.push(`${role}: ${contentStr}`); + } else { + formattedMessages.push(`${role}: ${content}`); + } + } + let prompt = formattedMessages.join("\n"); + const data = { + llmReqId: response.id, + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "openai.chat.completions", + skipResp: skipResp, + requestDuration: duration, + model: params.model, + prompt: prompt, + }; + + if (!params.hasOwnProperty("tools")) { + data.completionTokens = response.usage.completion_tokens; + data.promptTokens = response.usage.prompt_tokens; + data.totalTokens = response.usage.total_tokens; + data.finishReason = response.choices[0].finish_reason; + + if (!params.hasOwnProperty("n") || params.n === 1) { + data.response = response.choices[0].message.content; + } else { + let i = 0; + while (i < params.n && i < response.choices.length) { + data.response = response.choices[i].message.content; + i++; + await sendData(data, dokuUrl, apiKey); + } + return response; + } + } else if (params.hasOwnProperty("tools")) { + data.response = "Function called with tools"; + data.completionTokens = response.usage.completion_tokens; + data.promptTokens = response.usage.prompt_tokens; + data.totalTokens = response.usage.total_tokens; + } + + await sendData(data, dokuUrl, apiKey); + + return response; + } + }; + + llm.completions.create = async function (params) { + const start = performance.now(); + let streaming = params.stream || false; + if (streaming) { + // Call original method + const originalResponseStream = await originalCompletionsCreate.call( + this, + params + ); + + // Create a pass-through stream + const passThroughStream = new Readable({ + read() {}, + objectMode: true, // Set to true because the chunks are objects + }); + + let dataResponse = ""; + + // Immediately-invoked async function to handle streaming + (async () => { + for await (const chunk of originalResponseStream) { + var content = chunk.choices[0].text; + if (content) { + dataResponse += content; + passThroughStream.push(chunk); // Push chunk to the pass-through stream + } + var responseId = chunk.id; + } + passThroughStream.push(null); // Signal end of the pass-through stream + + // Process response data after stream has ended + const end = performance.now(); + const duration = (end - start) / 1000; + // Prepare the data object for Doku + const data = { + llmReqId: responseId, + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "openai.completions", + skipResp: skipResp, + requestDuration: duration, + model: params.model, + prompt: params.prompt, + response: dataResponse, + }; + + await sendData(data, dokuUrl, apiKey); + })(); + + // Return the pass-through stream to the original caller + return passThroughStream; + } else { + const response = await originalCompletionsCreate.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + + const data = { + llmReqId: response.id, + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "openai.completions", + skipResp: skipResp, + requestDuration: duration, + model: params.model, + prompt: params.prompt, + }; + + if (!params.hasOwnProperty("tools")) { + data.completionTokens = response.usage.completion_tokens; + data.promptTokens = response.usage.prompt_tokens; + data.totalTokens = response.usage.total_tokens; + data.finishReason = response.choices[0].finish_reason; + + if (!params.hasOwnProperty("n") || params.n === 1) { + data.response = response.choices[0].text; + } else { + let i = 0; + while (i < params.n && i < response.choices.length) { + data.response = response.choices[i].text; + i++; + + await sendData(data, dokuUrl, apiKey); + } + return response; + } + } else if (params.hasOwnProperty("tools")) { + data.response = "Function called with tools"; + data.completionTokens = response.usage.completion_tokens; + data.promptTokens = response.usage.prompt_tokens; + data.totalTokens = response.usage.total_tokens; + } + + await sendData(data, dokuUrl, apiKey); + + return response; + } + }; + + llm.embeddings.create = async function (params) { + const start = performance.now(); + const response = await originalEmbeddingsCreate.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + + const data = { + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "openai.embeddings", + skipResp: skipResp, + requestDuration: duration, + model: params.model, + prompt: params.input, + promptTokens: response.usage.prompt_tokens, + totalTokens: response.usage.total_tokens, + }; + + await sendData(data, dokuUrl, apiKey); + + return response; + }; + + llm.fineTuning.jobs.create = async function (params) { + const start = performance.now(); + const response = await originalFineTuningJobsCreate.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + + const data = { + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "openai.fine_tuning", + skipResp: skipResp, + requestDuration: duration, + model: params.model, + llmReqId: response.id, + finetuneJobStatus: response.status, + }; + + await sendData(data, dokuUrl, apiKey); + + return response; + }; + + llm.images.generate = async function (params) { + const start = performance.now(); + const response = await originalImagesCreate.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + const size = params.size || "1024x1024"; + const model = params.model || "dall-e-2"; + let imageFormat = "url"; + + if (params.response_format && params.response_format === "b64_json") { + imageFormat = "b64_json"; + } + + const quality = params.quality ?? "standard"; + var responseId = response.created; + for (const item of response.data) { + const data = { + llmReqId: responseId, + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "openai.images.create", + skipResp: skipResp, + requestDuration: duration, + model: model, + prompt: params.prompt, + imageSize: size, + imageQuality: quality, + revisedPrompt: item.revised_prompt || null, + image: item[imageFormat], + }; + + await sendData(data, dokuUrl, apiKey); + } + + return response; + }; + + llm.images.createVariation = async function (params) { + const start = performance.now(); + const response = await originalImagesCreateVariation.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + const size = params.size || "1024x1024"; // Default size if not provided + const model = params.model || "dall-e-2"; + let imageFormat = "url"; + if (params.response_format && params.response_format === "b64_json") { + imageFormat = "b64_json"; + } + var responseId = response.created; + for (const item of response.data) { + const data = { + llmReqId: responseId, + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "openai.images.create.variations", + skipResp: skipResp, + requestDuration: duration, + model: model, + imageSize: size, + imageQuality: "standard", + image: item[imageFormat], + }; + + await sendData(data, dokuUrl, apiKey); + } + + return response; + }; + + llm.audio.speech.create = async function (params) { + const start = performance.now(); + const response = await originalAudioSpeechCreate.call(this, params); + const end = performance.now(); + const duration = (end - start) / 1000; + + const data = { + environment: environment, + applicationName: applicationName, + sourceLanguage: "Javascript", + endpoint: "openai.audio.speech.create", + skipResp: skipResp, + requestDuration: duration, + model: params.model, + prompt: params.input, + audioVoice: params.voice, + }; + + await sendData(data, dokuUrl, apiKey); + + return response; + }; +} diff --git a/tests/anthropic.test.mjs b/tests/anthropic.test.mjs new file mode 100644 index 0000000..6d45d55 --- /dev/null +++ b/tests/anthropic.test.mjs @@ -0,0 +1,50 @@ +import Anthropic from '@anthropic-ai/sdk'; +import {expect} from 'chai'; +import DokuMetry from '../src/index.js'; + +describe('Anthropic Test', () => { + const anthropic = new Anthropic({ + apiKey: process.env.ANTHROPIC_API_TOKEN, + }); + + // Non-streaming messages + it('should return a response with type as "message"', async () => { + DokuMetry.init({llm: anthropic, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); + try { + const message = await anthropic.messages.create({ + model: "claude-3-haiku-20240307", + max_tokens: 1, + messages: [{ role: "user", content: "How to monitor LLM Applications in one sentence?" }], + }); + expect(message.type).to.equal('message'); + } catch (error) { + if (typeof error.message === 'string' && error.message.includes('rate limit')) { + console.error("Daily Rate limit Reached"); + } + } + }).timeout(30000); + + it('should return a response with type as "message"', async () => { + DokuMetry.init({llm: anthropic, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); + try { + var stream = await anthropic.messages.create({ + max_tokens: 1, + messages: [{ role: 'user', content: 'How to monitor LLM Applications in one sentence?' }], + model: 'claude-3-haiku-20240307', + stream: true, + }); + for await (const messageStreamEvent of stream) { + if (messageStreamEvent.type === 'message_start') { + expect(messageStreamEvent.type).to.equal('message_start'); + } + if (messageStreamEvent.type === 'content_block_delta') { + expect(messageStreamEvent.type).to.equal('content_block_delta'); + } + } + } catch (error) { + if (typeof error.message === 'string' && error.message.includes('rate limit')) { + console.error("Daily Rate limit Reached"); + } + } + }).timeout(30000); +}); \ No newline at end of file diff --git a/tests/anthropic.test.mjs.hold b/tests/anthropic.test.mjs.hold deleted file mode 100644 index f09a3c7..0000000 --- a/tests/anthropic.test.mjs.hold +++ /dev/null @@ -1,39 +0,0 @@ -import Anthropic from '@anthropic-ai/sdk'; -import {expect} from 'chai'; -import DokuMetry from '../src/index.js'; - -describe('Anthropic Test', () => { - const anthropic = new Anthropic({ - apiKey: process.env.ANTHROPIC_API_TOKEN, - }); - - // Non-streaming messages - it('should return a response with type as "message"', async () => { - DokuMetry.init({llm: anthropic, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); - const message = await anthropic.messages.create({ - model: "claude-3-opus-20240229", - max_tokens: 100, - messages: [{ role: "user", content: "How to monitor LLM Applications in one sentence?" }], - }); - - expect(message.type).to.equal('message'); - }).timeout(30000); - - it('should return a response with type as "message"', async () => { - DokuMetry.init({llm: anthropic, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); - var stream = await anthropic.messages.create({ - max_tokens: 100, - messages: [{ role: 'user', content: 'How to monitor LLM Applications in one sentence?' }], - model: 'claude-3-opus-20240229', - stream: true, - }); - for await (const messageStreamEvent of stream) { - if (messageStreamEvent.type === 'message_start') { - expect(messageStreamEvent.type).to.equal('message_start'); - } - if (messageStreamEvent.type === 'content_block_delta') { - expect(messageStreamEvent.type).to.equal('content_block_delta'); - } - } - }).timeout(30000); -}); \ No newline at end of file diff --git a/tests/mistral.test.mjs.hold b/tests/mistral.test.mjs similarity index 95% rename from tests/mistral.test.mjs.hold rename to tests/mistral.test.mjs index 669da28..1f51204 100644 --- a/tests/mistral.test.mjs.hold +++ b/tests/mistral.test.mjs @@ -23,8 +23,9 @@ describe('Mistral Test', () => { it('should return a response with object as "chat.completion"', async () => { DokuMetry.init({llm: client, dokuUrl: process.env.DOKU_URL, apiKey: process.env.DOKU_TOKEN, environment: "dokumetry-testing", applicationName: "dokumetry-node-test", skipResp: false}); const message = await client.chat({ - model: 'mistral-large-latest', + model: 'open-mistral-7b', messages: [{role: 'user', content: 'What is LLM Observability?'}], + maxTokens: 1, }); expect(message.object).to.equal('chat.completion');