diff --git a/packages/cannoli-core/package.json b/packages/cannoli-core/package.json index 88d0cea..217b9c0 100644 --- a/packages/cannoli-core/package.json +++ b/packages/cannoli-core/package.json @@ -36,12 +36,18 @@ "typescript": "^5.3.3" }, "dependencies": { + "@arizeai/openinference-instrumentation-langchain": "0.2.0", + "@arizeai/openinference-semantic-conventions": "0.10.0", "@langchain/anthropic": "0.2.1", "@langchain/community": "0.2.12", "@langchain/core": "0.2.7", "@langchain/google-genai": "0.0.19", "@langchain/groq": "0.0.12", "@langchain/openai": "0.1.3", + "@opentelemetry/exporter-trace-otlp-proto": "0.53.0", + "@opentelemetry/instrumentation": "0.53.0", + "@opentelemetry/resources": "1.26.0", + "@opentelemetry/sdk-trace-web": "1.26.0", "js-yaml": "^4.1.0", "langchain": "0.2.5", "nanoid": "5.0.7", @@ -51,6 +57,7 @@ "tiny-invariant": "^1.3.1", "tslib": "2.4.0", "tsup": "^8.0.2", + "web-instrumentation-langchain": "workspace:*", "zod": "3.23.8" } } diff --git a/packages/cannoli-core/src/cannoli.ts b/packages/cannoli-core/src/cannoli.ts index 4ddc052..55f371f 100644 --- a/packages/cannoli-core/src/cannoli.ts +++ b/packages/cannoli-core/src/cannoli.ts @@ -1,87 +1,32 @@ import { Run, RunArgs, Stoppage } from "./run"; -export function run({ - cannoli, - llmConfigs, - args, - fileManager, - persistor, - actions, - httpTemplates, - replacers, - fetcher, - config, - secrets, - isMock, - resume, - onFinish, -}: RunArgs): [Promise, () => void] { - let resolver: (stoppage: Stoppage) => void; - const done = new Promise((resolve) => { - resolver = resolve; - }); - - const run = new Run({ - llmConfigs, - cannoli, - args, - persistor, - onFinish: (stoppage: Stoppage) => { - resolver(stoppage); - if (onFinish) onFinish(stoppage); - }, - fileManager, - actions, - httpTemplates, - replacers, - isMock, - fetcher, - config, - secrets, - resume, - }); - - run.start(); - - return [done, () => run.stop()]; +export function run({ onFinish, ...args }: RunArgs): [Promise, () => void] { + let resolver: (stoppage: Stoppage) => void; + const done = new Promise((resolve) => { + resolver = resolve; + }); + + const run = new Run({ + ...args, + onFinish: (stoppage: Stoppage) => { + resolver(stoppage); + if (onFinish) onFinish(stoppage); + }, + }); + + run.start(); + + return [done, () => run.stop()]; } -export async function resultsRun({ - cannoli, - llmConfigs, - args, - fileManager, - persistor, - actions, - httpTemplates, - replacers, - fetcher, - config, - secrets, - isMock, - resume, -}: RunArgs): Promise> { - const [done] = run({ - cannoli, - llmConfigs, - args, - fileManager, - persistor, - actions, - httpTemplates, - replacers, - fetcher, - config, - secrets, - isMock, - resume, - }); +export async function resultsRun(args: RunArgs): Promise> { + const [done] = run({ ...args }); - const stoppage = await done; + const stoppage = await done; - if (stoppage.reason === "error") { - throw new Error(`Error occurred during the run: ${stoppage.message}`); - } + if (stoppage.reason === "error") { + throw new Error(`Error occurred during the run: ${stoppage.message}`); + } - return stoppage.results; + return stoppage.results; } diff --git a/packages/cannoli-core/src/instrumentation.ts b/packages/cannoli-core/src/instrumentation.ts new file mode 100644 index 0000000..0878957 --- /dev/null +++ b/packages/cannoli-core/src/instrumentation.ts @@ -0,0 +1,54 @@ +import { WebTracerProvider, SimpleSpanProcessor } from "@opentelemetry/sdk-trace-web" +import { SEMRESATTRS_PROJECT_NAME } from "@arizeai/openinference-semantic-conventions"; +import { Resource } from "@opentelemetry/resources" +import * as lcCallbackManager from "@langchain/core/callbacks/manager"; +import { LangChainInstrumentation } from "web-instrumentation-langchain"; + +import { TracingConfig } from "src/run" +import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-proto"; + +const instrumentPhoenixLangchain = () => { + const lcInstrumentation = new LangChainInstrumentation(); + lcInstrumentation.manuallyInstrument(lcCallbackManager); + + console.log("🔎 Phoenix Langchain instrumentation enabled 🔎") +} + +export const createPhoenixWebTracerProvider = ({ tracingConfig }: { tracingConfig: TracingConfig }) => { + if (!tracingConfig.phoenix?.enabled) { + return + } + + try { + + const provider = new WebTracerProvider({ + resource: new Resource({ + [SEMRESATTRS_PROJECT_NAME]: tracingConfig.phoenix.projectName, + }), + }) + + const traceUrl = `${tracingConfig.phoenix.baseUrl.endsWith("/") ? tracingConfig.phoenix.baseUrl : `${tracingConfig.phoenix.baseUrl}/`}v1/traces` + // provider.addSpanProcessor(new SimpleSpanProcessor(new ConsoleSpanExporter())) + provider.addSpanProcessor(new SimpleSpanProcessor(new OTLPTraceExporter({ + url: traceUrl, + headers: { + // allow cross-origin requests + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Methods": "GET, POST, PUT, DELETE, OPTIONS", + "Access-Control-Allow-Headers": "Content-Type, Authorization, Content-Length, X-Requested-With, Accept, Origin", + "Access-Control-Allow-Credentials": "true", + } + }))) + + provider.register() + + console.log("🔎 Phoenix tracing enabled 🔎") + + instrumentPhoenixLangchain() + + return provider + } catch (error) { + console.error("Error enabling Phoenix tracing", error) + } +} + diff --git a/packages/cannoli-core/src/lcInstrumentation.ts b/packages/cannoli-core/src/lcInstrumentation.ts new file mode 100644 index 0000000..e69de29 diff --git a/packages/cannoli-core/src/providers.ts b/packages/cannoli-core/src/providers.ts index 3a95c88..6343ce3 100644 --- a/packages/cannoli-core/src/providers.ts +++ b/packages/cannoli-core/src/providers.ts @@ -20,6 +20,7 @@ export type SupportedProviders = "openai" | "ollama" | "gemini" | "anthropic" | import { z } from "zod"; import invariant from "tiny-invariant"; +import { TracingConfig } from "src/run"; export const GenericFunctionCallSchema = z.object({ name: z.string(), @@ -69,7 +70,11 @@ export type LLMConfig = (Omit & { provider: Supp type ConstructorArgs = { configs: LLMConfig[]; + tracingConfig?: TracingConfig | null; valtownApiKey?: string; + runId?: string; + runName?: string; + runDateEpochMs?: number; }; export type GenericCompletionParams = { @@ -105,6 +110,10 @@ export class LLMProvider { getDefaultConfigByProvider?: GetDefaultsByProvider; initialized = false; valtownApiKey?: string; + tracingConfig?: TracingConfig | null; + runId?: string; + runName?: string; + runDateEpochMs?: number; constructor(initArgs: ConstructorArgs) { this.init(initArgs); @@ -115,6 +124,10 @@ export class LLMProvider { this.provider = initArgs.configs[0].provider as SupportedProviders; this.baseConfig = initArgs.configs[0]; this.valtownApiKey = initArgs.valtownApiKey; + this.tracingConfig = initArgs.tracingConfig; + this.runId = initArgs.runId; + this.runDateEpochMs = initArgs.runDateEpochMs; + this.runName = initArgs.runName; this.getDefaultConfigByProvider = (provider: SupportedProviders) => { return initArgs.configs.find((config) => config.provider === provider); } @@ -166,9 +179,10 @@ export class LLMProvider { const url = urlString || undefined; const query = queryString ? Object.fromEntries(new URLSearchParams(queryString).entries()) : undefined + let client: BaseChatModel; switch (provider) { case "openai": - return new ChatOpenAI({ + client = new ChatOpenAI({ apiKey: config.apiKey, model: config.model, temperature: config.temperature, @@ -187,8 +201,9 @@ export class LLMProvider { defaultQuery: query } }); + break; case "azure_openai": - return new AzureChatOpenAI({ + client = new AzureChatOpenAI({ temperature: config.temperature, model: config.model, apiKey: config.apiKey, @@ -212,9 +227,10 @@ export class LLMProvider { defaultQuery: query, } }); + break; case "ollama": if (args?.hasFunctionCall) { - return new OllamaFunctions({ + client = new OllamaFunctions({ baseUrl: url, model: config.model, temperature: config.temperature, @@ -223,9 +239,10 @@ export class LLMProvider { presencePenalty: config.presence_penalty, stop: config.stop?.split(","), }); + break; } - return new ChatOllama({ + client = new ChatOllama({ baseUrl: url, model: config.model, temperature: config.temperature, @@ -234,8 +251,9 @@ export class LLMProvider { presencePenalty: config.presence_penalty, stop: config.stop?.split(","), }); + break; case "gemini": - return new ChatGoogleGenerativeAI({ + client = new ChatGoogleGenerativeAI({ maxRetries: 3, model: config.model, apiKey: config.apiKey, @@ -244,8 +262,9 @@ export class LLMProvider { topP: config.top_p, stopSequences: config.stop?.split(","), }); + break; case "anthropic": - return new ChatAnthropic({ + client = new ChatAnthropic({ apiKey: config.apiKey, model: config.model, temperature: config.temperature, @@ -261,17 +280,21 @@ export class LLMProvider { }, } }); + break; case "groq": - return new ChatGroq({ + client = new ChatGroq({ apiKey: config.apiKey, model: config.model, temperature: config.temperature, stopSequences: config.stop?.split(","), maxRetries: 3, }); + break; default: throw new Error("Unsupported provider"); } + + return client.withConfig({ metadata: { runId: this.runId, runName: this.runName, runDateEpochMs: this.runDateEpochMs } }) as unknown as BaseChatModel; }; static convertMessages = ( diff --git a/packages/cannoli-core/src/run.ts b/packages/cannoli-core/src/run.ts index d8e7292..db42c83 100644 --- a/packages/cannoli-core/src/run.ts +++ b/packages/cannoli-core/src/run.ts @@ -19,6 +19,9 @@ import { CannoliGroup } from "./graph/objects/vertices/CannoliGroup"; import { CannoliNode } from "./graph/objects/vertices/CannoliNode"; import { parseNamedNode } from "./utility"; import { resultsRun } from "./cannoli"; +import { z } from "zod"; +import { createPhoenixWebTracerProvider } from "src/instrumentation"; +import { nanoid } from "nanoid"; export interface HttpTemplate { id: string; @@ -100,6 +103,17 @@ export interface ModelUsage { export type ChatRole = "user" | "assistant" | "system"; +const tracingConfigSchema = z.object({ + phoenix: z.object({ + enabled: z.boolean().default(false), + apiKey: z.string().optional(), + baseUrl: z.string(), + projectName: z.string().default("cannoli"), + }).nullish(), +}); + +export type TracingConfig = z.infer; + enum DagCheckState { UNVISITED, VISITING, @@ -117,7 +131,7 @@ export interface RunArgs { cannoli: unknown; llmConfigs?: LLMConfig[]; fetcher?: ResponseTextFetcher; - config?: Record; + config?: Record; secrets?: Record; args?: Record; onFinish?: (stoppage: Stoppage) => void; @@ -128,6 +142,7 @@ export interface RunArgs { httpTemplates?: HttpTemplate[]; replacers?: Replacer[]; resume?: boolean; + runName?: string; } export class Run { @@ -150,6 +165,17 @@ export class Run { stopTime: number | null = null; currentNote: string | null = null; selection: string | null = null; + // tracing fields + /** The tracing configuration for this run */ + tracingConfig: TracingConfig | null = null; + /** The run ID for this run. Used to identify the run in your telemetry backend. */ + runId: string; + /** The run name for this run. Used to identify all runs from all executions of the same canvas. */ + runName: string; + /** The run date for this run. The date in which this run was started, in epoch milliseconds. */ + runDateEpochMs: number; + /** The filter to apply to the spans produced by this run. */ + postTraceFilter: string | undefined; subcannoliCallback: (cannoli: unknown, inputVariables: Record, scIsMock: boolean) => Promise>; @@ -171,12 +197,17 @@ export class Run { config, secrets, args, + runName, resume }: RunArgs) { this.onFinish = onFinish ?? ((stoppage: Stoppage) => { }); this.isMock = isMock ?? false; this.persistor = persistor ?? null; this.usage = {}; + this.runId = `${nanoid(16)}${isMock ? "-mock" : ""}`; + this.runDateEpochMs = Date.now(); + this.postTraceFilter = undefined; + this.runName = runName || "Unnamed Cannoli Run"; const defaultFetcher: ResponseTextFetcher = async (url, options) => { const res = await fetch(url, options); @@ -185,13 +216,30 @@ export class Run { this.fetcher = fetcher ?? defaultFetcher; - this.llm = llmConfigs ? new LLMProvider({ configs: llmConfigs, valtownApiKey: secrets?.["VALTOWN_API_KEY"] }) : null; + this.llm = llmConfigs ? new LLMProvider({ + configs: llmConfigs, + valtownApiKey: secrets?.["VALTOWN_API_KEY"], + runId: this.runId, + runDateEpochMs: this.runDateEpochMs, + runName: this.runName + }) : null; this.secrets = secrets ?? {}; this.config = { ...config ?? {}, ...this.secrets }; this.args = args ?? null; + const tracingConfig = tracingConfigSchema.safeParse(config?.tracingConfig); + + if (tracingConfig.success) { + this.tracingConfig = tracingConfig.data; + } + + if (this.tracingConfig && !this.isMock) { + createPhoenixWebTracerProvider({ tracingConfig: this.tracingConfig }) + this.postTraceFilter = `metadata['runId'] == '${this.runId}'\nmetadata['runName'] == '${this.runName}'\nmetadata['runDateEpochMs'] == '${this.runDateEpochMs}'` + } + let parsedCannoliJSON: CanvasData; try { @@ -427,6 +475,11 @@ export class Run { private handleFinish(reason: StoppageReason, message?: string) { this.stopTime = Date.now(); + + if (this.tracingConfig && !this.isMock && this.postTraceFilter) { + console.log(`To view spans for this run in Arize Phoenix, filter your spans with:\n\n${this.postTraceFilter}`) + } + this.onFinish({ reason, message, @@ -1132,4 +1185,4 @@ export class Run { } } } -} \ No newline at end of file +} diff --git a/packages/cannoli-plugin/src/main.ts b/packages/cannoli-plugin/src/main.ts index 652231e..ef7d143 100644 --- a/packages/cannoli-plugin/src/main.ts +++ b/packages/cannoli-plugin/src/main.ts @@ -357,6 +357,9 @@ export default class Cannoli extends Plugin { // Get the content of the file const content = JSON.parse(await this.app.vault.read(file)); + // eslint-disable-next-line @typescript-eslint/no-unused-vars + const { tracingConfig, ...config } = this.getConfig(true); + const bakeResult = await bake({ language: "typescript", runtime: "deno", @@ -365,7 +368,7 @@ export default class Cannoli extends Plugin { llmConfigs: this.getLLMConfigs(), fileManager: new VaultInterface(this), actions: this.getActions(), - config: this.getConfig(true), + config, secrets: this.getSecrets(), httpTemplates: this.settings.httpTemplates, includeTypes: false, @@ -516,6 +519,8 @@ export default class Cannoli extends Plugin { // Get the content of the file const content = JSON.parse(await this.app.vault.read(activeFile)); + // eslint-disable-next-line @typescript-eslint/no-unused-vars + const { tracingConfig, ...config } = this.getConfig(true); const bakeResult = await bake({ language: this.settings.bakeLanguage, @@ -526,7 +531,7 @@ export default class Cannoli extends Plugin { llmConfigs: this.getLLMConfigs(), fileManager: new VaultInterface(this), actions: this.getActions(), - config: this.getConfig(true), + config, secrets: this.getSecrets(), httpTemplates: this.settings.httpTemplates, includeTypes: true @@ -929,13 +934,14 @@ export default class Cannoli extends Plugin { return secrets; } - getConfig = (forBake = false) => { + getConfig = (forBake?: boolean) => { const chatFormatStringIsDefault = this.settings.chatFormatString === DEFAULT_SETTINGS.chatFormatString || forBake; return { ...(this.settings.contentIsColorless ? { contentIsColorless: this.settings.contentIsColorless } : {}), ...(!chatFormatStringIsDefault ? { chatFormatString: this.settings.chatFormatString } : {}), ...(this.settings.enableVision !== undefined ? { enableVision: this.settings.enableVision } : {}), + tracingConfig: forBake ? undefined : this.settings.tracingConfig, }; } @@ -1104,7 +1110,8 @@ export default class Cannoli extends Plugin { args: cannoliArgs, persistor: noCanvas ? undefined : canvas, fileManager: vaultInterface, - isMock: true + isMock: true, + runName: name, }); const validationStoppage = await validationStoppagePromise; @@ -1147,6 +1154,7 @@ export default class Cannoli extends Plugin { persistor: noCanvas ? undefined : canvas, fileManager: vaultInterface, isMock: false, + runName: name, }); // add to running cannolis diff --git a/packages/cannoli-plugin/src/settings/sections/tracingSettings.ts b/packages/cannoli-plugin/src/settings/sections/tracingSettings.ts new file mode 100644 index 0000000..c8da884 --- /dev/null +++ b/packages/cannoli-plugin/src/settings/sections/tracingSettings.ts @@ -0,0 +1,84 @@ +import { Setting } from "obsidian"; +import Cannoli from "src/main"; +import { TracingConfig } from "@deablabs/cannoli-core"; +import { DEFAULT_SETTINGS } from "src/settings/settings"; + +const defaultPhoenixTracingConfig: NonNullable = DEFAULT_SETTINGS.tracingConfig.phoenix! + +export function createTracingSettings(containerEl: HTMLElement, plugin: Cannoli, display: () => void): void { + // heading + containerEl.createEl("h1", { text: "Tracing" }); + + new Setting(containerEl) + .setName("Phoenix Tracing") + .setDesc("Enable Phoenix tracing for your Cannoli runs. Phoenix is a data tracing system that allows you to observe the history of your runs, and optimize your prompts over time.") + .addToggle((toggle) => { + toggle.setValue(plugin.settings.tracingConfig.phoenix?.enabled ?? false); + toggle.onChange(async (value) => { + if (plugin.settings.tracingConfig.phoenix) { + plugin.settings.tracingConfig.phoenix.enabled = value; + } else { + plugin.settings.tracingConfig.phoenix = { + ...defaultPhoenixTracingConfig, + enabled: value, + } + } + await plugin.saveSettings(); + display(); + }); + }); + + new Setting(containerEl) + .setName("Phoenix Project Name") + .setDesc("The name of the project to use for your Phoenix tracing. This is used to identify the project in the Phoenix console.") + .addText((text) => { + text.setValue(plugin.settings.tracingConfig.phoenix?.projectName ?? defaultPhoenixTracingConfig.projectName); + text.onChange(async (value) => { + if (plugin.settings.tracingConfig.phoenix) { + plugin.settings.tracingConfig.phoenix.projectName = value; + } else { + plugin.settings.tracingConfig.phoenix = { + ...defaultPhoenixTracingConfig, + projectName: value, + } + } + await plugin.saveSettings(); + }); + }); + + new Setting(containerEl) + .setName("Phoenix Base URL") + .setDesc("The base URL for your Phoenix tracing. This is used to send your tracing data to the Phoenix server.") + .addText((text) => { + text.setValue(plugin.settings.tracingConfig.phoenix?.baseUrl ?? defaultPhoenixTracingConfig.baseUrl); + text.onChange(async (value) => { + if (plugin.settings.tracingConfig.phoenix) { + plugin.settings.tracingConfig.phoenix.baseUrl = value; + } else { + plugin.settings.tracingConfig.phoenix = { + ...defaultPhoenixTracingConfig, + baseUrl: value, + } + } + await plugin.saveSettings(); + }); + }); + + new Setting(containerEl) + .setName("Phoenix API Key") + .setDesc("The API key to use for your Phoenix tracing. This is used to authenticate your tracing data to the Phoenix server.") + .addText((text) => { + text.setValue(plugin.settings.tracingConfig.phoenix?.apiKey ?? defaultPhoenixTracingConfig.apiKey ?? ""); + text.onChange(async (value) => { + if (plugin.settings.tracingConfig.phoenix) { + plugin.settings.tracingConfig.phoenix.apiKey = value; + } else { + plugin.settings.tracingConfig.phoenix = { + ...defaultPhoenixTracingConfig, + apiKey: value, + } + } + await plugin.saveSettings(); + }); + }); +} diff --git a/packages/cannoli-plugin/src/settings/settings.ts b/packages/cannoli-plugin/src/settings/settings.ts index 830b4f4..97ebd01 100644 --- a/packages/cannoli-plugin/src/settings/settings.ts +++ b/packages/cannoli-plugin/src/settings/settings.ts @@ -1,100 +1,109 @@ -import { BakeLanguage, BakeRuntime, HttpTemplate, SupportedProviders } from "@deablabs/cannoli-core"; +import { BakeLanguage, BakeRuntime, HttpTemplate, SupportedProviders, TracingConfig } from "@deablabs/cannoli-core"; export interface CannoliSettings { - llmProvider: SupportedProviders; - ollamaBaseUrl: string; - ollamaModel: string; - ollamaTemperature: number; - azureAPIKey: string; - azureModel: string; - azureTemperature: number; - azureOpenAIApiDeploymentName: string; - azureOpenAIApiInstanceName: string; - azureOpenAIApiVersion: string; - azureBaseURL: string; - geminiModel: string; - geminiAPIKey: string; - geminiTemperature: number; - anthropicModel: string; - anthropicAPIKey: string; - anthropicTemperature: number; - anthropicBaseURL: string; - groqModel: string; - groqAPIKey: string; - groqTemperature: number; - openaiAPIKey: string; - openaiBaseURL: string; - requestThreshold: number; - defaultModel: string; - defaultTemperature: number; - httpTemplates: HttpTemplate[]; - includeFilenameAsHeader: boolean; - includePropertiesInExtractedNotes: boolean; - includeLinkInExtractedNotes: boolean; - chatFormatString: string; - enableAudioTriggeredCannolis?: boolean; - deleteAudioFilesAfterAudioTriggeredCannolis?: boolean; - transcriptionPrompt?: string; - autoScrollWithTokenStream: boolean; - pLimit: number; - contentIsColorless: boolean; - valTownAPIKey: string; - exaAPIKey: string; - bakedCannoliFolder: string; - bakeLanguage: BakeLanguage; - bakeRuntime: BakeRuntime; - bakeIndent: "2" | "4"; - seenVersion2Modal: boolean; - enableVision: boolean; - secrets: { name: string; value: string; visibility: string }[]; - onlyRunCannoliGroups: boolean; + llmProvider: SupportedProviders; + ollamaBaseUrl: string; + ollamaModel: string; + ollamaTemperature: number; + azureAPIKey: string; + azureModel: string; + azureTemperature: number; + azureOpenAIApiDeploymentName: string; + azureOpenAIApiInstanceName: string; + azureOpenAIApiVersion: string; + azureBaseURL: string; + geminiModel: string; + geminiAPIKey: string; + geminiTemperature: number; + anthropicModel: string; + anthropicAPIKey: string; + anthropicTemperature: number; + anthropicBaseURL: string; + groqModel: string; + groqAPIKey: string; + groqTemperature: number; + openaiAPIKey: string; + openaiBaseURL: string; + requestThreshold: number; + defaultModel: string; + defaultTemperature: number; + httpTemplates: HttpTemplate[]; + includeFilenameAsHeader: boolean; + includePropertiesInExtractedNotes: boolean; + includeLinkInExtractedNotes: boolean; + chatFormatString: string; + enableAudioTriggeredCannolis?: boolean; + deleteAudioFilesAfterAudioTriggeredCannolis?: boolean; + transcriptionPrompt?: string; + autoScrollWithTokenStream: boolean; + pLimit: number; + contentIsColorless: boolean; + valTownAPIKey: string; + exaAPIKey: string; + bakedCannoliFolder: string; + bakeLanguage: BakeLanguage; + bakeRuntime: BakeRuntime; + bakeIndent: "2" | "4"; + seenVersion2Modal: boolean; + enableVision: boolean; + secrets: { name: string; value: string; visibility: string }[]; + onlyRunCannoliGroups: boolean; + tracingConfig: NonNullable; } export const DEFAULT_SETTINGS: CannoliSettings = { - llmProvider: "openai", - ollamaBaseUrl: "http://127.0.0.1:11434", - ollamaModel: "llama2", - ollamaTemperature: 1, - azureModel: "", - azureAPIKey: "", - azureTemperature: 1, - azureOpenAIApiDeploymentName: "", - azureOpenAIApiInstanceName: "", - azureOpenAIApiVersion: "", - azureBaseURL: "", - geminiModel: "gemini-1.0-pro-latest", - geminiAPIKey: "", - geminiTemperature: 1, - anthropicModel: "claude-3-5-sonnet-20240620", - anthropicAPIKey: "", - anthropicTemperature: 1, - anthropicBaseURL: "", - groqModel: "llama3-70b-8192", - groqAPIKey: "", - groqTemperature: 1, - openaiAPIKey: "", - openaiBaseURL: "", - requestThreshold: 20, - defaultModel: "gpt-4o", - defaultTemperature: 1, - httpTemplates: [], - includeFilenameAsHeader: false, - includePropertiesInExtractedNotes: false, - includeLinkInExtractedNotes: false, - chatFormatString: `---\n# {{role}}\n\n{{content}}`, - enableAudioTriggeredCannolis: false, - deleteAudioFilesAfterAudioTriggeredCannolis: false, - autoScrollWithTokenStream: false, - pLimit: 50, - contentIsColorless: false, - valTownAPIKey: "", - exaAPIKey: "", - bakedCannoliFolder: "Baked Cannoli", - bakeLanguage: "typescript", - bakeRuntime: "node", - bakeIndent: "2", - seenVersion2Modal: false, - secrets: [], - enableVision: true, - onlyRunCannoliGroups: false, + llmProvider: "openai", + ollamaBaseUrl: "http://127.0.0.1:11434", + ollamaModel: "llama2", + ollamaTemperature: 1, + azureModel: "", + azureAPIKey: "", + azureTemperature: 1, + azureOpenAIApiDeploymentName: "", + azureOpenAIApiInstanceName: "", + azureOpenAIApiVersion: "", + azureBaseURL: "", + geminiModel: "gemini-1.0-pro-latest", + geminiAPIKey: "", + geminiTemperature: 1, + anthropicModel: "claude-3-5-sonnet-20240620", + anthropicAPIKey: "", + anthropicTemperature: 1, + anthropicBaseURL: "", + groqModel: "llama3-70b-8192", + groqAPIKey: "", + groqTemperature: 1, + openaiAPIKey: "", + openaiBaseURL: "", + requestThreshold: 20, + defaultModel: "gpt-4o", + defaultTemperature: 1, + httpTemplates: [], + includeFilenameAsHeader: false, + includePropertiesInExtractedNotes: false, + includeLinkInExtractedNotes: false, + chatFormatString: `---\n# {{role}}\n\n{{content}}`, + enableAudioTriggeredCannolis: false, + deleteAudioFilesAfterAudioTriggeredCannolis: false, + autoScrollWithTokenStream: false, + pLimit: 50, + contentIsColorless: false, + valTownAPIKey: "", + exaAPIKey: "", + bakedCannoliFolder: "Baked Cannoli", + bakeLanguage: "typescript", + bakeRuntime: "node", + bakeIndent: "2", + seenVersion2Modal: false, + secrets: [], + enableVision: true, + onlyRunCannoliGroups: false, + tracingConfig: { + phoenix: { + enabled: false, + projectName: "cannoli", + baseUrl: "http://localhost:6006/", + apiKey: "", + } + } }; diff --git a/packages/cannoli-plugin/src/settings/settingsTab.ts b/packages/cannoli-plugin/src/settings/settingsTab.ts index d335005..7e962da 100644 --- a/packages/cannoli-plugin/src/settings/settingsTab.ts +++ b/packages/cannoli-plugin/src/settings/settingsTab.ts @@ -9,6 +9,7 @@ import { createSecretsSettings } from "./sections/secretsSettings"; import { createBakingSettings } from "./sections/bakingSettings"; import { createValTownSettings } from "./sections/valtownSettings"; import { createActionSettings } from "./sections/actionSettings"; +import { createTracingSettings } from "src/settings/sections/tracingSettings"; export class CannoliSettingTab extends PluginSettingTab { plugin: Cannoli; @@ -44,6 +45,8 @@ export class CannoliSettingTab extends PluginSettingTab { createLLMSettings(containerEl, this.plugin, this.display); + createTracingSettings(containerEl, this.plugin, this.display); + createCanvasSettings(containerEl, this.plugin); createNoteExtractionSettings(containerEl, this.plugin); diff --git a/packages/web-instrumentation-langchain/package.json b/packages/web-instrumentation-langchain/package.json new file mode 100644 index 0000000..f7f10e0 --- /dev/null +++ b/packages/web-instrumentation-langchain/package.json @@ -0,0 +1,45 @@ +{ + "name": "web-instrumentation-langchain", + "version": "0.1.0", + "description": "Opentelemetry Web instrumentation for Langchain", + "main": "dist/index.js", + "type": "module", + "types": "dist/index.d.ts", + "files": [ + "dist/**/*", + "package.json" + ], + "exports": { + ".": { + "types": "./dist/index.d.ts", + "import": "./dist/index.js", + "require": "./dist/index.cjs" + }, + "./package.json": "./package.json" + }, + "scripts": { + "build": "tsup src/index.ts --format cjs,esm --dts --treeshake --clean", + "dev": "tsup src/index.ts --format cjs,esm --dts --watch", + "typecheck": "tsc --noEmit" + }, + "keywords": [], + "author": "", + "license": "MIT", + "devDependencies": { + "@typescript-eslint/eslint-plugin": "^7.7.1", + "@typescript-eslint/parser": "^7.7.1", + "eslint": "^8.57.0", + "tsup": "^8.0.2", + "typescript": "^5.3.3" + }, + "dependencies": { + "@arizeai/openinference-core": "0.2.0", + "@arizeai/openinference-semantic-conventions": "0.10.0", + "@opentelemetry/api": "^1.9.0", + "@opentelemetry/core": "^1.25.1", + "@opentelemetry/instrumentation": "^0.46.0" + }, + "peerDependencies": { + "@langchain/core": "^0.2.0" + } +} diff --git a/packages/web-instrumentation-langchain/src/index.ts b/packages/web-instrumentation-langchain/src/index.ts new file mode 100644 index 0000000..d2e29c7 --- /dev/null +++ b/packages/web-instrumentation-langchain/src/index.ts @@ -0,0 +1 @@ +export * from "./instrumentation"; diff --git a/packages/web-instrumentation-langchain/src/instrumentation.ts b/packages/web-instrumentation-langchain/src/instrumentation.ts new file mode 100644 index 0000000..2d45ed6 --- /dev/null +++ b/packages/web-instrumentation-langchain/src/instrumentation.ts @@ -0,0 +1,113 @@ +import * as CallbackManagerModule from "@langchain/core/callbacks/manager"; +import { + InstrumentationBase, + InstrumentationConfig, + isWrapped +} from "@opentelemetry/instrumentation"; +import { diag } from "@opentelemetry/api"; +import { addTracerToHandlers } from "./instrumentationUtils"; + +const MODULE_NAME = "@langchain/core/callbacks"; + +/** + * Flag to check if the openai module has been patched + * Note: This is a fallback in case the module is made immutable (e.x. Deno, webpack, etc.) + */ +let _isOpenInferencePatched = false; + +/** + * function to check if instrumentation is enabled / disabled + */ +export function isPatched() { + return _isOpenInferencePatched; +} + +export class LangChainInstrumentation extends InstrumentationBase { + constructor(config?: InstrumentationConfig) { + super( + "@arizeai/openinference-instrumentation-langchain", + "1.0.0", + Object.assign({}, config), + ); + } + + manuallyInstrument(module: typeof CallbackManagerModule) { + diag.debug(`Manually instrumenting ${MODULE_NAME}`); + this.patch(module); + } + + protected init(): void { + } + + enable() { + // this.manuallyInstrument(CallbackManagerModule); + } + + disable() { + // this.unpatch(CallbackManagerModule); + } + + private patch( + module: typeof CallbackManagerModule & { + openInferencePatched?: boolean; + }, + moduleVersion?: string, + ) { + diag.debug( + `Applying patch for ${MODULE_NAME}${moduleVersion != null ? `@${moduleVersion}` : "" + }`, + ); + if (module?.openInferencePatched || _isOpenInferencePatched) { + return module; + } + // eslint-disable-next-line @typescript-eslint/no-this-alias + const instrumentation = this; + + this._wrap(module.CallbackManager, "configure", (original) => { + return function ( + this: typeof module.CallbackManager, + ...args: Parameters< + (typeof module.CallbackManager)["configure"] + > + ) { + const handlers = args[0]; + const newHandlers = addTracerToHandlers( + instrumentation.tracer, + handlers, + ); + args[0] = newHandlers; + + return original.apply(this, args); + }; + }); + _isOpenInferencePatched = true; + try { + // This can fail if the module is made immutable via the runtime or bundler + module.openInferencePatched = true; + } catch (e) { + diag.warn(`Failed to set ${MODULE_NAME} patched flag on the module`, e); + } + + return module; + } + + private unpatch( + module?: typeof CallbackManagerModule & { + openInferencePatched?: boolean; + }, + moduleVersion?: string, + ) { + if (module == null) { + return; + } + diag.debug( + `Removing patch for ${MODULE_NAME}${moduleVersion != null ? `@${moduleVersion}` : "" + }`, + ); + if (isWrapped(module.CallbackManager.configure)) { + this._unwrap(module.CallbackManager, "configure"); + } + + return module; + } +} diff --git a/packages/web-instrumentation-langchain/src/instrumentationUtils.ts b/packages/web-instrumentation-langchain/src/instrumentationUtils.ts new file mode 100644 index 0000000..f6fab0f --- /dev/null +++ b/packages/web-instrumentation-langchain/src/instrumentationUtils.ts @@ -0,0 +1,56 @@ +import type * as CallbackManagerModuleV02 from "@langchain/core/callbacks/manager"; +import type * as CallbackManagerModuleV01 from "@langchain/coreV0.1/callbacks/manager"; +import { Tracer } from "@opentelemetry/api"; +import { LangChainTracer } from "./tracer"; + +/** + * Adds the {@link LangChainTracer} to the callback handlers if it is not already present + * @param tracer the {@link tracer} to pass into the {@link LangChainTracer} when added to handlers + * @param handlers the LangChain callback handlers which may be an array of handlers or a CallbackManager + * @returns the callback handlers with the {@link LangChainTracer} added + * + * If the handlers are an array, we add the tracer to the array if it is not already present + * + * There are some slight differences in the CallbackHandler interface between V0.1 and v0.2 + * So we have to cast our tracer to any to avoid type errors + * We support both versions and our tracer is compatible with either as it will extend the BaseTracer from the installed version which will be the same as the version of handlers passed in here + */ +export function addTracerToHandlers( + tracer: Tracer, + handlers?: CallbackManagerModuleV01.Callbacks, +): CallbackManagerModuleV01.Callbacks; +export function addTracerToHandlers( + tracer: Tracer, + handlers?: CallbackManagerModuleV02.Callbacks, +): CallbackManagerModuleV02.Callbacks; +export function addTracerToHandlers( + tracer: Tracer, + handlers?: + | CallbackManagerModuleV01.Callbacks + | CallbackManagerModuleV02.Callbacks, +): CallbackManagerModuleV01.Callbacks | CallbackManagerModuleV02.Callbacks { + if (handlers == null) { + return [new LangChainTracer(tracer)]; + } + if (Array.isArray(handlers)) { + const tracerAlreadyRegistered = handlers.some( + (handler) => handler instanceof LangChainTracer, + ); + if (!tracerAlreadyRegistered) { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + handlers.push(new LangChainTracer(tracer) as any); + } + return handlers; + } + const tracerAlreadyRegistered = + handlers.inheritableHandlers.some( + (handler) => handler instanceof LangChainTracer, + ) || + handlers.handlers.some((handler) => handler instanceof LangChainTracer); + if (tracerAlreadyRegistered) { + return handlers; + } + // eslint-disable-next-line @typescript-eslint/no-explicit-any + handlers.addHandler(new LangChainTracer(tracer) as any, true); + return handlers; +} diff --git a/packages/web-instrumentation-langchain/src/tracer.ts b/packages/web-instrumentation-langchain/src/tracer.ts new file mode 100644 index 0000000..2f42fee --- /dev/null +++ b/packages/web-instrumentation-langchain/src/tracer.ts @@ -0,0 +1,157 @@ +import { BaseTracer, Run } from "@langchain/core/tracers/base"; +import { + Tracer, + SpanKind, + Span, + context, + trace, + SpanStatusCode, +} from "@opentelemetry/api"; +import { isTracingSuppressed } from "@opentelemetry/core"; +import { SemanticConventions } from "@arizeai/openinference-semantic-conventions"; +import { + safelyFlattenAttributes, + safelyFormatFunctionCalls, + safelyFormatIO, + safelyFormatInputMessages, + safelyFormatLLMParams, + safelyFormatMetadata, + safelyFormatOutputMessages, + safelyFormatPromptTemplate, + safelyFormatRetrievalDocuments, + safelyFormatTokenCounts, + safelyFormatToolCalls, + safelyGetOpenInferenceSpanKindFromRunType, +} from "./utils"; + +type RunWithSpan = { + run: Run; + span: Span; +}; + +export class LangChainTracer extends BaseTracer { + private tracer: Tracer; + private runs: Record = {}; + constructor(tracer: Tracer) { + super(); + this.tracer = tracer; + } + name: string = "OpenInferenceLangChainTracer"; + protected persistRun(_run: Run): Promise { + return Promise.resolve(); + } + + /** + * Called when a new run is created on v0.1.0 of langchain see {@link BaseTracer} + * @param run the langchain {@link Run} object + * + * This method is only available on langchain ^0.1.0 BaseTracer and has been replaced in 0.2 by onRunCreate + * we support both 0.1 and 0.2 so we need to check if the method exists on the super class before calling it + */ + protected async _startTrace(run: Run) { + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore + if (typeof super._startTrace === "function") { + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore + await super._startTrace(run); + } + await this.startTracing(run); + } + + /** + * Called when a new run is created on v0.2.0 of langchain see {@link BaseTracer} + * @param run the langchain {@link Run} object + * + * This method is only available on the langchain ^0.2.0 {@link BaseTracer} + */ + async onRunCreate(run: Run) { + if (typeof super.onRunCreate === "function") { + await super.onRunCreate(run); + } + await this.startTracing(run); + } + + async startTracing(run: Run) { + if (isTracingSuppressed(context.active())) { + return; + } + + /** + * If the parent span context is available, use it as the active context for the new span. + * This will allow the new span to be a child of the parent span. + */ + let activeContext = context.active(); + const parentCtx = this.getParentSpanContext(run); + if (parentCtx != null) { + activeContext = trace.setSpanContext(context.active(), parentCtx); + } + + const span = this.tracer.startSpan( + run.name, + { + kind: SpanKind.INTERNAL, + attributes: { + [SemanticConventions.OPENINFERENCE_SPAN_KIND]: + safelyGetOpenInferenceSpanKindFromRunType(run.run_type) ?? + undefined, + }, + }, + activeContext, + ); + + this.runs[run.id] = { run, span }; + } + + protected async _endTrace(run: Run) { + await super._endTrace(run); + if (isTracingSuppressed(context.active())) { + return; + } + const runWithSpan = this.runs[run.id]; + if (!runWithSpan) { + return; + } + const { span } = runWithSpan; + if (run.error != null) { + span.setStatus({ + code: SpanStatusCode.ERROR, + message: run.error, + }); + } else { + span.setStatus({ code: SpanStatusCode.OK }); + } + + const attributes = safelyFlattenAttributes({ + ...safelyFormatIO({ io: run.inputs, ioType: "input" }), + ...safelyFormatIO({ io: run.outputs, ioType: "output" }), + ...safelyFormatInputMessages(run.inputs), + ...safelyFormatOutputMessages(run.outputs), + ...safelyFormatRetrievalDocuments(run), + ...safelyFormatLLMParams(run.extra), + ...safelyFormatPromptTemplate(run), + ...safelyFormatTokenCounts(run.outputs), + ...safelyFormatFunctionCalls(run.outputs), + ...safelyFormatToolCalls(run), + ...safelyFormatMetadata(run), + }); + if (attributes != null) { + span.setAttributes(attributes); + } + + runWithSpan.span.end(); + delete this.runs[run.id]; + } + + private getParentSpanContext(run: Run) { + if (run.parent_run_id == null) { + return; + } + const maybeParent = this.runs[run.parent_run_id]; + if (maybeParent == null) { + return; + } + + return maybeParent.span.spanContext(); + } +} diff --git a/packages/web-instrumentation-langchain/src/typeUtils.ts b/packages/web-instrumentation-langchain/src/typeUtils.ts new file mode 100644 index 0000000..d45f033 --- /dev/null +++ b/packages/web-instrumentation-langchain/src/typeUtils.ts @@ -0,0 +1,30 @@ +/** + * Utility function that uses the type system to check if a switch statement is exhaustive. + * If the switch statement is not exhaustive, there will be a type error caught in typescript + * + * See https://stackoverflow.com/questions/39419170/how-do-i-check-that-a-switch-block-is-exhaustive-in-typescript for more details. + */ +export function assertUnreachable(_: never): never { + throw new Error("Unreachable"); +} + +/** + * A type-guard function for checking if a value is an object + */ +export function isObject( + value: unknown, +): value is Record { + return typeof value === "object" && value != null && !Array.isArray(value); +} + +export function isString(value: unknown): value is string { + return typeof value === "string"; +} + +export function isNumber(value: unknown): value is number { + return typeof value === "number"; +} + +export function isNonEmptyArray(value: unknown): value is unknown[] { + return value != null && Array.isArray(value) && value.length > 0; +} diff --git a/packages/web-instrumentation-langchain/src/types.ts b/packages/web-instrumentation-langchain/src/types.ts new file mode 100644 index 0000000..a63110c --- /dev/null +++ b/packages/web-instrumentation-langchain/src/types.ts @@ -0,0 +1,54 @@ +import { SemanticConventions } from "@arizeai/openinference-semantic-conventions"; + +type LLMMessageToolCall = { + [SemanticConventions.TOOL_CALL_FUNCTION_NAME]?: string; + [SemanticConventions.TOOL_CALL_FUNCTION_ARGUMENTS_JSON]?: string; +}; + +export type LLMMessageToolCalls = { + [SemanticConventions.MESSAGE_TOOL_CALLS]?: LLMMessageToolCall[]; +}; + +export type LLMMessageFunctionCall = { + [SemanticConventions.MESSAGE_FUNCTION_CALL_NAME]?: string; + [SemanticConventions.MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON]?: string; +}; + +export type LLMMessage = LLMMessageToolCalls & + LLMMessageFunctionCall & { + [SemanticConventions.MESSAGE_ROLE]?: string; + [SemanticConventions.MESSAGE_CONTENT]?: string; + }; + +export type LLMMessagesAttributes = + | { + [SemanticConventions.LLM_INPUT_MESSAGES]: LLMMessage[]; + } + | { + [SemanticConventions.LLM_OUTPUT_MESSAGES]: LLMMessage[]; + }; + +export type RetrievalDocument = { + [SemanticConventions.DOCUMENT_CONTENT]?: string; + [SemanticConventions.DOCUMENT_METADATA]?: string; +}; + +export type LLMParameterAttributes = { + [SemanticConventions.LLM_MODEL_NAME]?: string; + [SemanticConventions.LLM_INVOCATION_PARAMETERS]?: string; +}; + +export type PromptTemplateAttributes = { + [SemanticConventions.PROMPT_TEMPLATE_TEMPLATE]?: string; + [SemanticConventions.PROMPT_TEMPLATE_VARIABLES]?: string; +}; +export type TokenCountAttributes = { + [SemanticConventions.LLM_TOKEN_COUNT_COMPLETION]?: number; + [SemanticConventions.LLM_TOKEN_COUNT_PROMPT]?: number; + [SemanticConventions.LLM_TOKEN_COUNT_TOTAL]?: number; +}; + +export type ToolAttributes = { + [SemanticConventions.TOOL_NAME]?: string; + [SemanticConventions.TOOL_DESCRIPTION]?: string; +}; diff --git a/packages/web-instrumentation-langchain/src/utils.ts b/packages/web-instrumentation-langchain/src/utils.ts new file mode 100644 index 0000000..d3f7abd --- /dev/null +++ b/packages/web-instrumentation-langchain/src/utils.ts @@ -0,0 +1,643 @@ +import { Attributes, diag } from "@opentelemetry/api"; +import { + assertUnreachable, + isNonEmptyArray, + isNumber, + isObject, + isString, +} from "./typeUtils"; +import { isAttributeValue } from "@opentelemetry/core"; +import { + MimeType, + OpenInferenceSpanKind, + RetrievalAttributePostfixes, + SemanticAttributePrefixes, + SemanticConventions, +} from "@arizeai/openinference-semantic-conventions"; +import { Run } from "@langchain/core/tracers/base"; +import { + LLMMessage, + LLMMessageFunctionCall, + LLMMessageToolCalls, + LLMMessagesAttributes, + LLMParameterAttributes, + PromptTemplateAttributes, + RetrievalDocument, + TokenCountAttributes, + ToolAttributes, +} from "./types"; +import { withSafety } from "@arizeai/openinference-core"; + +export const RETRIEVAL_DOCUMENTS = + `${SemanticAttributePrefixes.retrieval}.${RetrievalAttributePostfixes.documents}` as const; + +/** + * Handler for any unexpected errors that occur during processing. + */ +const onError = (message: string) => (error: unknown) => { + diag.warn( + `OpenInference-LangChain: error processing langchain run, falling back to null. ${message}. ${error}`, + ); +}; + +const safelyJSONStringify = withSafety({ + fn: JSON.stringify, + onError: onError("Error stringifying JSON"), +}); + +/** + * Flattens a nested object into a single level object with keys as dot-separated paths. + * Specifies elements in arrays with their index as part of the path. + * @param attributes - Nested attributes to flatten. + * @param baseKey - Base key to prepend to all keys. + * @returns Flattened attributes + */ +function flattenAttributes( + attributes: Record, + baseKey: string = "", +): Attributes { + const result: Attributes = {}; + for (const key in attributes) { + const newKey = baseKey ? `${baseKey}.${key}` : key; + const value = attributes[key]; + + if (value == null) { + continue; + } + + if (isObject(value)) { + Object.assign(result, flattenAttributes(value, newKey)); + } else if (Array.isArray(value)) { + value.forEach((item, index) => { + if (isObject(item)) { + Object.assign(result, flattenAttributes(item, `${newKey}.${index}`)); + } else { + result[`${newKey}.${index}`] = item; + } + }); + } else if (isAttributeValue(value)) { + result[newKey] = value; + } + } + return result; +} + +/** + * Gets the OpenInferenceSpanKind based on the langchain run type. + * @param runType - The langchain run type + * @returns The OpenInferenceSpanKind based on the langchain run type or "UNKNOWN". + */ +function getOpenInferenceSpanKindFromRunType(runType: string) { + const normalizedRunType = runType.toUpperCase(); + if (normalizedRunType.includes("AGENT")) { + return OpenInferenceSpanKind.AGENT; + } + + if (normalizedRunType in OpenInferenceSpanKind) { + return OpenInferenceSpanKind[ + normalizedRunType as keyof typeof OpenInferenceSpanKind + ]; + } + return OpenInferenceSpanKind.CHAIN; +} + +/** + * Formats the input or output of a langchain run into OpenInference attributes for a span. + * @param ioConfig - The input or output of a langchain run and the type of IO + * @param ioConfig.io - The input or output of a langchain run + * @param ioConfig.ioType - The type of IO + * @returns The formatted input or output attributes for the span + */ +function formatIO({ + io, + ioType, +}: { + io: Run["inputs"] | Run["outputs"]; + ioType: "input" | "output"; +}) { + let valueAttribute: string; + let mimeTypeAttribute: string; + switch (ioType) { + case "input": { + valueAttribute = SemanticConventions.INPUT_VALUE; + mimeTypeAttribute = SemanticConventions.INPUT_MIME_TYPE; + break; + } + case "output": { + valueAttribute = SemanticConventions.OUTPUT_VALUE; + mimeTypeAttribute = SemanticConventions.OUTPUT_MIME_TYPE; + break; + } + default: + assertUnreachable(ioType); + } + if (io == null) { + return {}; + } + const values = Object.values(io); + if (values.length === 1 && typeof values[0] === "string") { + return { + [valueAttribute]: values[0], + [mimeTypeAttribute]: MimeType.TEXT, + }; + } + + return { + [valueAttribute]: safelyJSONStringify(io), + [mimeTypeAttribute]: MimeType.JSON, + }; +} + +/** + * Gets the role of a message from the langchain message data. + * @param messageData - The langchain message data to extract the role from + * @returns The role of the message or null + */ +function getRoleFromMessageData( + messageData: Record, +): string | null { + const messageIds = messageData.lc_id; + if (!isNonEmptyArray(messageIds)) { + return null; + } + const langchainMessageClass = messageIds[messageIds.length - 1]; + const normalizedLangchainMessageClass = isString(langchainMessageClass) + ? langchainMessageClass.toLowerCase() + : ""; + if (normalizedLangchainMessageClass.includes("human")) { + return "user"; + } + if (normalizedLangchainMessageClass.includes("ai")) { + return "assistant"; + } + if (normalizedLangchainMessageClass.includes("system")) { + return "system"; + } + if (normalizedLangchainMessageClass.includes("function")) { + return "function"; + } + if ( + normalizedLangchainMessageClass.includes("chat") && + isObject(messageData.kwargs) && + isString(messageData.kwargs.role) + ) { + return messageData.kwargs.role; + } + return null; +} + +/** + * Gets the content of a message from the langchain message kwargs. + * @param messageKwargs - The langchain message kwargs to extract the content from + * @returns The content of the message or null + */ +function getContentFromMessageData( + messageKwargs: Record, +): string | null { + return isString(messageKwargs.content) ? messageKwargs.content : null; +} + +function getFunctionCallDataFromAdditionalKwargs( + additionalKwargs: Record, +): LLMMessageFunctionCall { + const functionCall = additionalKwargs.function_call; + if (!isObject(functionCall)) { + return {}; + } + const functionCallName = isString(functionCall.name) + ? functionCall.name + : undefined; + const functionCallArgs = isString(functionCall.args) + ? functionCall.args + : undefined; + return { + [SemanticConventions.MESSAGE_FUNCTION_CALL_NAME]: functionCallName, + [SemanticConventions.MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON]: + functionCallArgs, + }; +} + +/** + * Gets the tool calls from the langchain message additional kwargs and formats them into OpenInference attributes. + * @param additionalKwargs - The langchain message additional kwargs to extract the tool calls from + * @returns the OpenInference attributes for the tool calls + */ +function getToolCallDataFromAdditionalKwargs( + additionalKwargs: Record, +): LLMMessageToolCalls { + const toolCalls = additionalKwargs.tool_calls; + if (!Array.isArray(toolCalls)) { + return {}; + } + const formattedToolCalls = toolCalls.map((toolCall) => { + if (!isObject(toolCall) && !isObject(toolCall.function)) { + return {}; + } + const toolCallName = isString(toolCall.function.name) + ? toolCall.function.name + : undefined; + const toolCallArgs = isString(toolCall.function.arguments) + ? toolCall.function.arguments + : undefined; + return { + [SemanticConventions.TOOL_CALL_FUNCTION_NAME]: toolCallName, + [SemanticConventions.TOOL_CALL_FUNCTION_ARGUMENTS_JSON]: toolCallArgs, + }; + }); + return { + [SemanticConventions.MESSAGE_TOOL_CALLS]: formattedToolCalls, + }; +} + +/** + * Parses a langchain message into OpenInference attributes. + * @param messageData - The langchain message data to parse + * @returns The OpenInference attributes for the message + */ +function parseMessage(messageData: Record): LLMMessage { + const message: LLMMessage = {}; + + const maybeRole = getRoleFromMessageData(messageData); + if (maybeRole != null) { + message[SemanticConventions.MESSAGE_ROLE] = maybeRole; + } + + const messageKwargs = messageData.lc_kwargs; + if (!isObject(messageKwargs)) { + return message; + } + const maybeContent = getContentFromMessageData(messageKwargs); + if (maybeContent != null) { + message[SemanticConventions.MESSAGE_CONTENT] = maybeContent; + } + + const additionalKwargs = messageKwargs.additional_kwargs; + if (!isObject(additionalKwargs)) { + return message; + } + return { + ...message, + ...getFunctionCallDataFromAdditionalKwargs(additionalKwargs), + ...getToolCallDataFromAdditionalKwargs(additionalKwargs), + }; +} + +/** + * Formats the input messages of a langchain run into OpenInference attributes. + * @param input - The input of a langchain run. + * @returns The OpenInference attributes for the input messages. + */ +function formatInputMessages( + input: Run["inputs"], +): LLMMessagesAttributes | null { + const maybeMessages = input.messages; + if (!isNonEmptyArray(maybeMessages)) { + return null; + } + + // Only support the first 'set' of messages + const firstMessages = maybeMessages[0]; + if (!isNonEmptyArray(firstMessages)) { + return null; + } + + const parsedMessages: LLMMessage[] = []; + firstMessages.forEach((messageData) => { + if (!isObject(messageData)) { + return; + } + parsedMessages.push(parseMessage(messageData)); + }); + + if (parsedMessages.length > 0) { + return { [SemanticConventions.LLM_INPUT_MESSAGES]: parsedMessages }; + } + + return null; +} + +/** + * Gets the first generation of the output of a langchain run. + * @param output - The output of a langchain run. + * @returns The first generation of the output or null. + */ +function getFirstOutputGeneration(output: Run["outputs"]) { + if (!isObject(output)) { + return null; + } + const maybeGenerations = output.generations; + if (!isNonEmptyArray(maybeGenerations)) { + return null; + } + // Only support the first 'set' of generations + const firstGeneration = maybeGenerations[0]; + if (!isNonEmptyArray(firstGeneration)) { + return null; + } + return firstGeneration; +} + +/** + * Formats the output messages of a langchain run into OpenInference attributes. + * @param output - The output of a langchain run. + * @returns The OpenInference attributes for the output messages. + */ +function formatOutputMessages( + output: Run["outputs"], +): LLMMessagesAttributes | null { + const firstGeneration = getFirstOutputGeneration(output); + if (firstGeneration == null) { + return null; + } + const parsedMessages: LLMMessage[] = []; + firstGeneration.forEach((generation) => { + if (!isObject(generation) || !isObject(generation.message)) { + return; + } + parsedMessages.push(parseMessage(generation.message)); + }); + + if (parsedMessages.length > 0) { + return { [SemanticConventions.LLM_OUTPUT_MESSAGES]: parsedMessages }; + } + + return null; +} + +/** + * Parses a langchain retrieval document into OpenInference attributes. + * @param document - The langchain retrieval document to parse + * @returns The OpenInference attributes for the retrieval document + */ +function parseRetrievalDocument(document: unknown) { + if (!isObject(document)) { + return null; + } + const parsedDocument: RetrievalDocument = {}; + if (isString(document.pageContent)) { + parsedDocument["document.content"] = document.pageContent; + } + if (isObject(document.metadata)) { + parsedDocument["document.metadata"] = + safelyJSONStringify(document.metadata) ?? undefined; + } + return parsedDocument; +} + +/** + * Formats the retrieval documents of a langchain run into OpenInference attributes. + * @param run - The langchain run to extract the retrieval documents from + * @returns The OpenInference attributes for the retrieval documents. + */ +function formatRetrievalDocuments(run: Run) { + const normalizedRunType = run.run_type.toLowerCase(); + if (normalizedRunType !== "retriever") { + return null; + } + if (!isObject(run.outputs) || !Array.isArray(run.outputs.documents)) { + return null; + } + return { + [RETRIEVAL_DOCUMENTS]: run.outputs.documents + .map(parseRetrievalDocument) + .filter((doc) => doc != null), + }; +} + +/** + * Gets the model name from the langchain run extra data. + * @param runExtra - The extra data from a langchain run + * @returns The OpenInference attributes for the model name + */ +function formatLLMParams( + runExtra: Run["extra"], +): LLMParameterAttributes | null { + if (!isObject(runExtra) || !isObject(runExtra.invocation_params)) { + return null; + } + const openInferenceParams: LLMParameterAttributes = {}; + + openInferenceParams[SemanticConventions.LLM_INVOCATION_PARAMETERS] = + safelyJSONStringify(runExtra.invocation_params) ?? undefined; + + if (isString(runExtra.invocation_params.model_name)) { + openInferenceParams[SemanticConventions.LLM_MODEL_NAME] = + runExtra.invocation_params.model_name; + } else if (isString(runExtra.invocation_params.model)) { + openInferenceParams[SemanticConventions.LLM_MODEL_NAME] = + runExtra.invocation_params.model; + } + return openInferenceParams; +} + +function getTemplateFromSerialized(serialized: Run["serialized"]) { + if (!isObject(serialized) || !isObject(serialized.kwargs)) { + return null; + } + const messages = serialized.kwargs.messages; + if (!isNonEmptyArray(messages)) { + return null; + } + const firstMessage = messages[0]; + if (!isObject(firstMessage) || !isObject(firstMessage.prompt)) { + return null; + } + const template = firstMessage.prompt.template; + if (!isString(template)) { + return null; + } + return template; +} + +const safelyGetTemplateFromSerialized = withSafety({ + fn: getTemplateFromSerialized, +}); + +/** + * A best effort function to extract the prompt template from a langchain run. + * @param run - The langchain run to extract the prompt template from + * @returns The OpenInference attributes for the prompt template + */ +function formatPromptTemplate(run: Run): PromptTemplateAttributes | null { + if (run.run_type.toLowerCase() !== "prompt") { + return null; + } + return { + [SemanticConventions.PROMPT_TEMPLATE_VARIABLES]: + safelyJSONStringify(run.inputs) ?? undefined, + [SemanticConventions.PROMPT_TEMPLATE_TEMPLATE]: + safelyGetTemplateFromSerialized(run.serialized) ?? undefined, + }; +} + +function getTokenCount(maybeCount: unknown) { + return isNumber(maybeCount) ? maybeCount : undefined; +} + +/** + * Formats the token counts of a langchain run into OpenInference attributes. + * @param outputs - The outputs of a langchain run + * @returns The OpenInference attributes for the token counts + */ +function formatTokenCounts( + outputs: Run["outputs"], +): TokenCountAttributes | null { + if (!isObject(outputs)) { + return null; + } + const llmOutput = outputs.llmOutput; + if (!isObject(llmOutput)) { + return null; + } + if (isObject(llmOutput.tokenUsage)) { + return { + [SemanticConventions.LLM_TOKEN_COUNT_COMPLETION]: getTokenCount( + llmOutput.tokenUsage.completionTokens, + ), + [SemanticConventions.LLM_TOKEN_COUNT_PROMPT]: getTokenCount( + llmOutput.tokenUsage.promptTokens, + ), + [SemanticConventions.LLM_TOKEN_COUNT_TOTAL]: getTokenCount( + llmOutput.tokenUsage.totalTokens, + ), + }; + } + /** + * In the case of streamed outputs, the token counts are not available + * only estimated counts provided by langchain (not the model provider) are available + */ + if (isObject(llmOutput.estimatedTokenUsage)) { + return { + [SemanticConventions.LLM_TOKEN_COUNT_COMPLETION]: getTokenCount( + llmOutput.estimatedTokenUsage.completionTokens, + ), + [SemanticConventions.LLM_TOKEN_COUNT_PROMPT]: getTokenCount( + llmOutput.estimatedTokenUsage.promptTokens, + ), + [SemanticConventions.LLM_TOKEN_COUNT_TOTAL]: getTokenCount( + llmOutput.estimatedTokenUsage.totalTokens, + ), + }; + } + return null; +} + +/** + * Formats the function calls of a langchain run into OpenInference attributes. + * @param outputs - The outputs of a langchain run + * @returns The OpenInference attributes for the function calls + */ +function formatFunctionCalls(outputs: Run["outputs"]) { + const firstGeneration = getFirstOutputGeneration(outputs); + if (firstGeneration == null) { + return null; + } + const maybeGeneration = firstGeneration[0]; + if (!isObject(maybeGeneration) || !isObject(maybeGeneration.message)) { + return null; + } + + const additionalKwargs = maybeGeneration.message.additional_kwargs; + + if ( + !isObject(additionalKwargs) || + !isObject(additionalKwargs.function_call) + ) { + return null; + } + + return { + [SemanticConventions.LLM_FUNCTION_CALL]: safelyJSONStringify( + additionalKwargs.function_call, + ), + }; +} + +/** + * Formats the tool calls of a langchain run into OpenInference attributes. + * @param run - The langchain run to extract the tool calls from + * @returns The OpenInference attributes for the tool calls + */ +function formatToolCalls(run: Run) { + const normalizedRunType = run.run_type.toLowerCase(); + if (normalizedRunType !== "tool") { + return null; + } + const toolAttributes: ToolAttributes = { + [SemanticConventions.TOOL_NAME]: run.name, + }; + if (!isObject(run.serialized)) { + return toolAttributes; + } + if (isString(run.serialized.name)) { + toolAttributes[SemanticConventions.TOOL_NAME] = run.serialized.name; + } + if (isString(run.serialized.description)) { + toolAttributes[SemanticConventions.TOOL_DESCRIPTION] = + run.serialized.description; + } + return toolAttributes; +} + +/** + * Formats the metadata of a langchain run into OpenInference attributes. + * @param run - The langchain run to extract the metadata from + * @returns The OpenInference attributes for the metadata + */ +function formatMetadata(run: Run) { + if (!isObject(run.extra) || !isObject(run.extra.metadata)) { + return null; + } + return { + metadata: safelyJSONStringify(run.extra.metadata), + }; +} + +export const safelyFlattenAttributes = withSafety({ + fn: flattenAttributes, + onError: onError("Error flattening attributes"), +}); +export const safelyFormatIO = withSafety({ + fn: formatIO, + onError: onError("Error formatting IO"), +}); +export const safelyFormatInputMessages = withSafety({ + fn: formatInputMessages, + onError: onError("Error formatting input messages"), +}); +export const safelyFormatOutputMessages = withSafety({ + fn: formatOutputMessages, + onError: onError("Error formatting output messages"), +}); +export const safelyGetOpenInferenceSpanKindFromRunType = withSafety({ + fn: getOpenInferenceSpanKindFromRunType, + onError: onError("Error getting OpenInference span kind from run type"), +}); +export const safelyFormatRetrievalDocuments = withSafety({ + fn: formatRetrievalDocuments, + onError: onError("Error formatting retrieval documents"), +}); +export const safelyFormatLLMParams = withSafety({ + fn: formatLLMParams, + onError: onError("Error formatting LLM params"), +}); +export const safelyFormatPromptTemplate = withSafety({ + fn: formatPromptTemplate, + onError: onError("Error formatting prompt template"), +}); +export const safelyFormatTokenCounts = withSafety({ + fn: formatTokenCounts, + onError: onError("Error formatting token counts"), +}); +export const safelyFormatFunctionCalls = withSafety({ + fn: formatFunctionCalls, + onError: onError("Error formatting function calls"), +}); +export const safelyFormatToolCalls = withSafety({ + fn: formatToolCalls, + onError: onError("Error formatting tool calls"), +}); +export const safelyFormatMetadata = withSafety({ + fn: formatMetadata, + onError: onError("Error formatting metadata"), +}); diff --git a/packages/web-instrumentation-langchain/tsconfig.json b/packages/web-instrumentation-langchain/tsconfig.json new file mode 100644 index 0000000..376e9c0 --- /dev/null +++ b/packages/web-instrumentation-langchain/tsconfig.json @@ -0,0 +1,26 @@ +// extend from the root tsconfig.json +{ + "compilerOptions": { + "baseUrl": ".", + "inlineSourceMap": true, + "inlineSources": true, + "module": "ESNext", + "target": "ES2017", + "allowJs": true, + "noImplicitAny": true, + "moduleResolution": "node", + "importHelpers": true, + "isolatedModules": true, + "strictNullChecks": true, + "lib": [ + "DOM", + "ES5", + "ES6", + "ES7" + ], + "skipLibCheck": true, + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist"] +} + diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index c7ef98c..fd95fbe 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -17,6 +17,12 @@ importers: packages/cannoli-core: dependencies: + '@arizeai/openinference-instrumentation-langchain': + specifier: 0.2.0 + version: 0.2.0(@langchain/core@0.2.7(langchain@0.2.5(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0)) + '@arizeai/openinference-semantic-conventions': + specifier: 0.10.0 + version: 0.10.0 '@langchain/anthropic': specifier: 0.2.1 version: 0.2.1(langchain@0.2.5(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0) @@ -35,6 +41,18 @@ importers: '@langchain/openai': specifier: 0.1.3 version: 0.1.3(langchain@0.2.5(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0)) + '@opentelemetry/exporter-trace-otlp-proto': + specifier: 0.53.0 + version: 0.53.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': + specifier: 0.53.0 + version: 0.53.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': + specifier: 1.26.0 + version: 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-web': + specifier: 1.26.0 + version: 1.26.0(@opentelemetry/api@1.9.0) js-yaml: specifier: ^4.1.0 version: 4.1.0 @@ -62,6 +80,9 @@ importers: tsup: specifier: ^8.0.2 version: 8.0.2(ts-node@10.9.2(@types/node@16.18.97)(typescript@5.4.5))(typescript@5.4.5) + web-instrumentation-langchain: + specifier: workspace:* + version: link:../web-instrumentation-langchain zod: specifier: 3.23.8 version: 3.23.8 @@ -153,7 +174,7 @@ importers: version: 8.57.0 obsidian: specifier: latest - version: 1.5.7-1(@codemirror/state@6.4.1)(@codemirror/view@6.26.3) + version: 1.7.2(@codemirror/state@6.4.1)(@codemirror/view@6.26.3) tslib: specifier: 2.4.0 version: 2.4.0 @@ -161,11 +182,61 @@ importers: specifier: ^5.3.3 version: 5.4.5 + packages/web-instrumentation-langchain: + dependencies: + '@arizeai/openinference-core': + specifier: 0.2.0 + version: 0.2.0(@opentelemetry/api@1.9.0) + '@arizeai/openinference-semantic-conventions': + specifier: 0.10.0 + version: 0.10.0 + '@langchain/core': + specifier: ^0.2.0 + version: 0.2.7(langchain@0.2.5(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0) + '@opentelemetry/api': + specifier: ^1.9.0 + version: 1.9.0 + '@opentelemetry/core': + specifier: ^1.25.1 + version: 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': + specifier: ^0.46.0 + version: 0.46.0(@opentelemetry/api@1.9.0) + devDependencies: + '@typescript-eslint/eslint-plugin': + specifier: ^7.7.1 + version: 7.10.0(@typescript-eslint/parser@7.10.0(eslint@8.57.0)(typescript@5.4.5))(eslint@8.57.0)(typescript@5.4.5) + '@typescript-eslint/parser': + specifier: ^7.7.1 + version: 7.10.0(eslint@8.57.0)(typescript@5.4.5) + eslint: + specifier: ^8.57.0 + version: 8.57.0 + tsup: + specifier: ^8.0.2 + version: 8.0.2(ts-node@10.9.2(@types/node@18.19.33)(typescript@5.4.5))(typescript@5.4.5) + typescript: + specifier: ^5.3.3 + version: 5.4.5 + packages: '@anthropic-ai/sdk@0.21.1': resolution: {integrity: sha512-fqdt74RTdplnaFOYhwNjjK/Ec09Dqv9ekYr7PuC6GdhV1RWkziqbpJBewn42CYYqCr92JeX6g+IXVgXmq9l7XQ==} + '@arizeai/openinference-core@0.2.0': + resolution: {integrity: sha512-ukRuOw8rVHDdlZn1Zq60399qsq51KOniwZ2UbSGEoVWfm/FFuHC2LiJYQ5pv+SJMaQ0SAVQBw85o3gC/SkeC/g==} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.9.0' + + '@arizeai/openinference-instrumentation-langchain@0.2.0': + resolution: {integrity: sha512-pBAy2w2KXB/AvyDGoniFSYqqVaxgNEi/VAUeidQCnkq6k+ybd88eyP9Pf6mGAjYNst8XPsf8CR/7e7b/y7Gecg==} + peerDependencies: + '@langchain/core': ^0.1.0 || ^0.2.0 + + '@arizeai/openinference-semantic-conventions@0.10.0': + resolution: {integrity: sha512-1HC3YEEQpDOp2ZYe2V3zdPhamTdZ1DZ9Km5iskKWLRMVZkswPty8GkOHYaSMUVd57UtFQt9WtF0FTH6moyyU4Q==} + '@codemirror/language@https://codeload.github.com/lishid/cm-language/tar.gz/2644bfc27afda707a7e1f3aedaf3ca7120f63cd9': resolution: {tarball: https://codeload.github.com/lishid/cm-language/tar.gz/2644bfc27afda707a7e1f3aedaf3ca7120f63cd9} version: 6.10.1 @@ -503,6 +574,7 @@ packages: '@humanwhocodes/config-array@0.11.14': resolution: {integrity: sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==} engines: {node: '>=10.10.0'} + deprecated: Use @eslint/config-array instead '@humanwhocodes/module-importer@1.0.1': resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==} @@ -510,6 +582,7 @@ packages: '@humanwhocodes/object-schema@2.0.3': resolution: {integrity: sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==} + deprecated: Use @eslint/object-schema instead '@isaacs/cliui@8.0.2': resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==} @@ -955,10 +1028,118 @@ packages: resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} engines: {node: '>= 8'} + '@opentelemetry/api-logs@0.53.0': + resolution: {integrity: sha512-8HArjKx+RaAI8uEIgcORbZIPklyh1YLjPSBus8hjRmvLi6DeFzgOcdZ7KwPabKj8mXF8dX0hyfAyGfycz0DbFw==} + engines: {node: '>=14'} + + '@opentelemetry/api@1.9.0': + resolution: {integrity: sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==} + engines: {node: '>=8.0.0'} + + '@opentelemetry/core@1.26.0': + resolution: {integrity: sha512-1iKxXXE8415Cdv0yjG3G6hQnB5eVEsJce3QaawX8SjDn0mAS0ZM8fAbZZJD4ajvhC15cePvosSCut404KrIIvQ==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/exporter-trace-otlp-proto@0.53.0': + resolution: {integrity: sha512-T/bdXslwRKj23S96qbvGtaYOdfyew3TjPEKOk5mHjkCmkVl1O9C/YMdejwSsdLdOq2YW30KjR9kVi0YMxZushQ==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': ^1.0.0 + + '@opentelemetry/instrumentation@0.46.0': + resolution: {integrity: sha512-a9TijXZZbk0vI5TGLZl+0kxyFfrXHhX6Svtz7Pp2/VBlCSKrazuULEyoJQrOknJyFWNMEmbbJgOciHCCpQcisw==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/instrumentation@0.53.0': + resolution: {integrity: sha512-DMwg0hy4wzf7K73JJtl95m/e0boSoWhH07rfvHvYzQtBD3Bmv0Wc1x733vyZBqmFm8OjJD0/pfiUg1W3JjFX0A==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/otlp-exporter-base@0.53.0': + resolution: {integrity: sha512-UCWPreGQEhD6FjBaeDuXhiMf6kkBODF0ZQzrk/tuQcaVDJ+dDQ/xhJp192H9yWnKxVpEjFrSSLnpqmX4VwX+eA==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': ^1.0.0 + + '@opentelemetry/otlp-transformer@0.53.0': + resolution: {integrity: sha512-rM0sDA9HD8dluwuBxLetUmoqGJKSAbWenwD65KY9iZhUxdBHRLrIdrABfNDP7aiTjcgK8XFyTn5fhDz7N+W6DA==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': ^1.3.0 + + '@opentelemetry/resources@1.26.0': + resolution: {integrity: sha512-CPNYchBE7MBecCSVy0HKpUISEeJOniWqcHaAHpmasZ3j9o6V3AyBzhRc90jdmemq0HOxDr6ylhUbDhBqqPpeNw==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/sdk-logs@0.53.0': + resolution: {integrity: sha512-dhSisnEgIj/vJZXZV6f6KcTnyLDx/VuQ6l3ejuZpMpPlh9S1qMHiZU9NMmOkVkwwHkMy3G6mEBwdP23vUZVr4g==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.4.0 <1.10.0' + + '@opentelemetry/sdk-metrics@1.26.0': + resolution: {integrity: sha512-0SvDXmou/JjzSDOjUmetAAvcKQW6ZrvosU0rkbDGpXvvZN+pQF6JbK/Kd4hNdK4q/22yeruqvukXEJyySTzyTQ==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.3.0 <1.10.0' + + '@opentelemetry/sdk-trace-base@1.26.0': + resolution: {integrity: sha512-olWQldtvbK4v22ymrKLbIcBi9L2SpMO84sCPY54IVsJhP9fRsxJT194C/AVaAuJzLE30EdhhM1VmvVYR7az+cw==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/sdk-trace-web@1.26.0': + resolution: {integrity: sha512-sxeKPcG/gUyxZ8iB8X1MI8/grfSCGgo1n2kxOE73zjVaO9yW/7JuVC3gqUaWRjtZ6VD/V3lo2/ZSwMlm6n2mdg==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/semantic-conventions@1.27.0': + resolution: {integrity: sha512-sAay1RrB+ONOem0OZanAR1ZI/k7yDpnOQSQmTMuGImUQb2y8EbSaCJ94FQluM74xoU03vlb2d2U90hZluL6nQg==} + engines: {node: '>=14'} + '@pkgjs/parseargs@0.11.0': resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==} engines: {node: '>=14'} + '@protobufjs/aspromise@1.1.2': + resolution: {integrity: sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==} + + '@protobufjs/base64@1.1.2': + resolution: {integrity: sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==} + + '@protobufjs/codegen@2.0.4': + resolution: {integrity: sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==} + + '@protobufjs/eventemitter@1.1.0': + resolution: {integrity: sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==} + + '@protobufjs/fetch@1.1.0': + resolution: {integrity: sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==} + + '@protobufjs/float@1.0.2': + resolution: {integrity: sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==} + + '@protobufjs/inquire@1.1.0': + resolution: {integrity: sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==} + + '@protobufjs/path@1.1.2': + resolution: {integrity: sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==} + + '@protobufjs/pool@1.1.0': + resolution: {integrity: sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==} + + '@protobufjs/utf8@1.1.0': + resolution: {integrity: sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==} + '@rollup/rollup-android-arm-eabi@4.18.0': resolution: {integrity: sha512-Tya6xypR10giZV1XzxmH5wr25VcZSncG0pZIjfePT0OVBvqNEurzValetGNarVrGiq66EBVAFn15iYX4w6FKgQ==} cpu: [arm] @@ -1087,6 +1268,9 @@ packages: '@types/retry@0.12.0': resolution: {integrity: sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==} + '@types/shimmer@1.2.0': + resolution: {integrity: sha512-UE7oxhQLLd9gub6JKIAhDq06T0F6FnztwMNRvYgjeQSBeMc1ZG/tA47EwfduvkuQS8apbkM/lpLpWsaCeYsXVg==} + '@types/tern@0.23.9': resolution: {integrity: sha512-ypzHFE/wBzh+BlH6rrBgS5I/Z7RD21pGhZ2rltb/+ZrVM1awdZwjx7hE5XfuYgHWk9uvV5HLZN3SloevCAp3Bw==} @@ -1234,6 +1418,16 @@ packages: resolution: {integrity: sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==} engines: {node: '>=6.5'} + acorn-import-assertions@1.9.0: + resolution: {integrity: sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==} + peerDependencies: + acorn: ^8 + + acorn-import-attributes@1.9.5: + resolution: {integrity: sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==} + peerDependencies: + acorn: ^8 + acorn-jsx@5.3.2: resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} peerDependencies: @@ -1314,8 +1508,8 @@ packages: binary-search@1.3.6: resolution: {integrity: sha512-nbE1WxOTTrUWIfsfZ4aHGYu5DOuNkbxGokjV6Z2kxfJK3uaAb8zNK1muzOeipoLHZjInT4Br88BHpzevc681xA==} - binaryen@118.0.0: - resolution: {integrity: sha512-KzekjPjpLE1zk29BKQSHNWLSHPYAfa80lcsIi5bDnev8vyfDyiMCVFPjaplhfXIKs7LI3r1RPyhoAj4qsRQwwg==} + binaryen@119.0.0: + resolution: {integrity: sha512-DTdcs8ijrj2OIEftWVPVkYsgJ8MzlYH+uSsC8156g88E7CNaG8kEfWNGSXxb3tPlzadrm6sD3mgSEKKZJu4Q3g==} hasBin: true brace-expansion@1.1.11: @@ -1361,6 +1555,9 @@ packages: resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==} engines: {node: '>= 8.10.0'} + cjs-module-lexer@1.4.1: + resolution: {integrity: sha512-cuSVIHi9/9E/+821Qjdvngor+xpnlwnuwIyZOaLmHBVdXL+gP+I6QQB9VkO7RI77YIcTV+S1W9AreJ5eN63JBA==} + color-convert@2.0.1: resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} engines: {node: '>=7.0.0'} @@ -1405,6 +1602,15 @@ packages: supports-color: optional: true + debug@4.3.7: + resolution: {integrity: sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + decamelize@1.2.0: resolution: {integrity: sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==} engines: {node: '>=0.10.0'} @@ -1592,6 +1798,9 @@ packages: engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} os: [darwin] + function-bind@1.1.2: + resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} + get-stream@6.0.1: resolution: {integrity: sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==} engines: {node: '>=10'} @@ -1611,6 +1820,7 @@ packages: glob@7.2.3: resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} + deprecated: Glob versions prior to v9 are no longer supported globals@13.24.0: resolution: {integrity: sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==} @@ -1630,6 +1840,10 @@ packages: resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} engines: {node: '>=8'} + hasown@2.0.2: + resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} + engines: {node: '>= 0.4'} + human-signals@2.1.0: resolution: {integrity: sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==} engines: {node: '>=10.17.0'} @@ -1648,6 +1862,12 @@ packages: resolution: {integrity: sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==} engines: {node: '>=6'} + import-in-the-middle@1.11.2: + resolution: {integrity: sha512-gK6Rr6EykBcc6cVWRSBR5TWf8nn6hZMYSRYqCcHa0l0d1fPK7JSYo6+Mlmck76jIX9aL/IZ71c06U2VpFwl1zA==} + + import-in-the-middle@1.7.1: + resolution: {integrity: sha512-1LrZPDtW+atAxH42S6288qyDFNQ2YCty+2mxEPRtfazH6Z5QwkaBSTS2ods7hnVJioF6rkRfNoA6A/MstpFXLg==} + imurmurhash@0.1.4: resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} engines: {node: '>=0.8.19'} @@ -1669,6 +1889,10 @@ packages: is-buffer@1.1.6: resolution: {integrity: sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==} + is-core-module@2.15.1: + resolution: {integrity: sha512-z0vtXSwucUJtANQWldhbtbt7BnL0vxiFjIdDLAatwhDYty2bad6s+rijD6Ri4YuYJubLzIJLUidCh09e1djEVQ==} + engines: {node: '>= 0.4'} + is-extglob@2.1.1: resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} engines: {node: '>=0.10.0'} @@ -2105,6 +2329,9 @@ packages: lodash@4.17.21: resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==} + long@5.2.3: + resolution: {integrity: sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==} + lru-cache@10.2.2: resolution: {integrity: sha512-9hp3Vp2/hFQUiIwKo8XCeFVnrg8Pk3TYNPIR7tJADKi5YfcF7vEaK7avFHTlSy3kOKYaJQaalfEo6YuXdceBOQ==} engines: {node: 14 || >=16.14} @@ -2178,6 +2405,9 @@ packages: ml-tree-similarity@1.0.0: resolution: {integrity: sha512-XJUyYqjSuUQkNQHMscr6tcjldsOoAekxADTplt40QKfwW6nd++1wHWV9AArl0Zvw/TIHgNaZZNvr8QGvE8wLRg==} + module-details-from-path@1.0.3: + resolution: {integrity: sha512-ySViT69/76t8VhE1xXHK6Ch4NcDd26gx0MzKXLO+F7NOtnqH68d9zF94nT8ZWSxXh8ELOERsnJO/sWt1xZYw5A==} + moment@2.29.4: resolution: {integrity: sha512-5LC9SOxjSc2HF6vO2CyuTDNivEdoz2IvyJJGj6X8DJ0eFyfszE0QiEd+iXmBvUP3WHxSjFH/vIsA0EN00cgr8w==} @@ -2247,15 +2477,15 @@ packages: obsidian-dataview@0.5.66: resolution: {integrity: sha512-5hYF1p4RnBpKS0PqxNJjZDoqAk2XrDZ6H3EQ+JOwcQwkoTMbMPMOH1sw9zg1hV5ZWbh+/uAPCRAI/nmvr68yxw==} - obsidian@1.5.7-1: - resolution: {integrity: sha512-T5ZRuQ1FnfXqEoakTTHVDYvzUEEoT8zSPnQCW31PVgYwG4D4tZCQfKHN2hTz1ifnCe8upvwa6mBTAP2WUA5Vng==} + obsidian@1.7.2: + resolution: {integrity: sha512-k9hN9brdknJC+afKr5FQzDRuEFGDKbDjfCazJwpgibwCAoZNYHYV8p/s3mM8I6AsnKrPKNXf8xGuMZ4enWelZQ==} peerDependencies: '@codemirror/state': ^6.0.0 '@codemirror/view': ^6.0.0 - obsidian@https://codeload.github.com/obsidianmd/obsidian-api/tar.gz/8be9ff1e1b708b9451f35ce2df33e3983e88565b: - resolution: {tarball: https://codeload.github.com/obsidianmd/obsidian-api/tar.gz/8be9ff1e1b708b9451f35ce2df33e3983e88565b} - version: 1.6.6 + obsidian@https://codeload.github.com/obsidianmd/obsidian-api/tar.gz/23947b58d372ea02225324308e31d36b4aa95869: + resolution: {tarball: https://codeload.github.com/obsidianmd/obsidian-api/tar.gz/23947b58d372ea02225324308e31d36b4aa95869} + version: 1.7.2 peerDependencies: '@codemirror/state': ^6.0.0 '@codemirror/view': ^6.0.0 @@ -2344,6 +2574,9 @@ packages: resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} engines: {node: '>=8'} + path-parse@1.0.7: + resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} + path-scurry@1.11.1: resolution: {integrity: sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==} engines: {node: '>=16 || 14 >=14.18'} @@ -2383,6 +2616,10 @@ packages: resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} engines: {node: '>= 0.8.0'} + protobufjs@7.4.0: + resolution: {integrity: sha512-mRUWCc3KUU4w1jU8sGxICXH/gNS94DvI1gxqDvBzhj1JpcsimQkYiOJfwsPUykUI5ZaspFbSgmBLER8IrQ3tqw==} + engines: {node: '>=12.0.0'} + punycode@2.3.1: resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} engines: {node: '>=6'} @@ -2397,6 +2634,10 @@ packages: remeda@1.61.0: resolution: {integrity: sha512-caKfSz9rDeSKBQQnlJnVW3mbVdFgxgGWQKq1XlFokqjf+hQD5gxutLGTTY2A/x24UxVyJe9gH5fAkFI63ULw4A==} + require-in-the-middle@7.4.0: + resolution: {integrity: sha512-X34iHADNbNDfr6OTStIAHWSAvvKQRYgLO6duASaVf7J2VA3lvmNYboAHOuLC2huav1IwgZJtyEcJCKVzFxOSMQ==} + engines: {node: '>=8.6.0'} + resolve-from@4.0.0: resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} engines: {node: '>=4'} @@ -2405,6 +2646,10 @@ packages: resolution: {integrity: sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==} engines: {node: '>=8'} + resolve@1.22.8: + resolution: {integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==} + hasBin: true + retry@0.13.1: resolution: {integrity: sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==} engines: {node: '>= 4'} @@ -2415,6 +2660,7 @@ packages: rimraf@3.0.2: resolution: {integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==} + deprecated: Rimraf versions prior to v4 are no longer supported hasBin: true rimraf@5.0.7: @@ -2450,6 +2696,9 @@ packages: resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} engines: {node: '>=8'} + shimmer@1.2.1: + resolution: {integrity: sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw==} + signal-exit@3.0.7: resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==} @@ -2507,6 +2756,10 @@ packages: resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} engines: {node: '>=8'} + supports-preserve-symlinks-flag@1.0.0: + resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} + engines: {node: '>= 0.4'} + svelte@3.35.0: resolution: {integrity: sha512-gknlZkR2sXheu/X+B7dDImwANVvK1R0QGQLd8CNIfxxGPeXBmePnxfzb6fWwTQRsYQG7lYkZXvpXJvxvpsoB7g==} engines: {node: '>= 8'} @@ -2755,6 +3008,25 @@ snapshots: transitivePeerDependencies: - encoding + '@arizeai/openinference-core@0.2.0(@opentelemetry/api@1.9.0)': + dependencies: + '@arizeai/openinference-semantic-conventions': 0.10.0 + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.26.0(@opentelemetry/api@1.9.0) + + '@arizeai/openinference-instrumentation-langchain@0.2.0(@langchain/core@0.2.7(langchain@0.2.5(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0))': + dependencies: + '@arizeai/openinference-core': 0.2.0(@opentelemetry/api@1.9.0) + '@arizeai/openinference-semantic-conventions': 0.10.0 + '@langchain/core': 0.2.7(langchain@0.2.5(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0) + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/instrumentation': 0.46.0(@opentelemetry/api@1.9.0) + transitivePeerDependencies: + - supports-color + + '@arizeai/openinference-semantic-conventions@0.10.0': {} + '@codemirror/language@https://codeload.github.com/lishid/cm-language/tar.gz/2644bfc27afda707a7e1f3aedaf3ca7120f63cd9': dependencies: '@codemirror/state': 6.4.1 @@ -3056,7 +3328,7 @@ snapshots: flat: 5.0.2 js-yaml: 4.1.0 langchain: 0.2.3(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0) - langsmith: 0.1.32(@langchain/core@0.2.7(langchain@0.2.3(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0))(langchain@0.2.3(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0) + langsmith: 0.1.32(@langchain/core@0.2.7(langchain@0.2.5(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0))(langchain@0.2.3(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0) uuid: 9.0.1 zod: 3.23.8 zod-to-json-schema: 3.23.0(zod@3.23.8) @@ -3098,7 +3370,7 @@ snapshots: camelcase: 6.3.0 decamelize: 1.2.0 js-tiktoken: 1.0.12 - langsmith: 0.1.32(@langchain/core@0.2.7(langchain@0.2.3(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0))(langchain@0.2.3(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0) + langsmith: 0.1.32(@langchain/core@0.2.7(langchain@0.2.5(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0))(langchain@0.2.3(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0) ml-distance: 4.0.1 mustache: 4.2.0 p-queue: 6.6.2 @@ -3220,9 +3492,127 @@ snapshots: '@nodelib/fs.scandir': 2.1.5 fastq: 1.17.1 + '@opentelemetry/api-logs@0.53.0': + dependencies: + '@opentelemetry/api': 1.9.0 + + '@opentelemetry/api@1.9.0': {} + + '@opentelemetry/core@1.26.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/semantic-conventions': 1.27.0 + + '@opentelemetry/exporter-trace-otlp-proto@0.53.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.53.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.53.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 1.26.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/instrumentation@0.46.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@types/shimmer': 1.2.0 + import-in-the-middle: 1.7.1 + require-in-the-middle: 7.4.0 + semver: 7.6.2 + shimmer: 1.2.1 + transitivePeerDependencies: + - supports-color + + '@opentelemetry/instrumentation@0.53.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.53.0 + '@types/shimmer': 1.2.0 + import-in-the-middle: 1.11.2 + require-in-the-middle: 7.4.0 + semver: 7.6.2 + shimmer: 1.2.1 + transitivePeerDependencies: + - supports-color + + '@opentelemetry/otlp-exporter-base@0.53.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.53.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/otlp-transformer@0.53.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.53.0 + '@opentelemetry/core': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.53.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 1.26.0(@opentelemetry/api@1.9.0) + protobufjs: 7.4.0 + + '@opentelemetry/resources@1.26.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.27.0 + + '@opentelemetry/sdk-logs@0.53.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.53.0 + '@opentelemetry/core': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 1.26.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/sdk-metrics@1.26.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 1.26.0(@opentelemetry/api@1.9.0) + + '@opentelemetry/sdk-trace-base@1.26.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.27.0 + + '@opentelemetry/sdk-trace-web@1.26.0(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 1.26.0(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.27.0 + + '@opentelemetry/semantic-conventions@1.27.0': {} + '@pkgjs/parseargs@0.11.0': optional: true + '@protobufjs/aspromise@1.1.2': {} + + '@protobufjs/base64@1.1.2': {} + + '@protobufjs/codegen@2.0.4': {} + + '@protobufjs/eventemitter@1.1.0': {} + + '@protobufjs/fetch@1.1.0': + dependencies: + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/inquire': 1.1.0 + + '@protobufjs/float@1.0.2': {} + + '@protobufjs/inquire@1.1.0': {} + + '@protobufjs/path@1.1.2': {} + + '@protobufjs/pool@1.1.0': {} + + '@protobufjs/utf8@1.1.0': {} + '@rollup/rollup-android-arm-eabi@4.18.0': optional: true @@ -3320,6 +3710,8 @@ snapshots: '@types/retry@0.12.0': {} + '@types/shimmer@1.2.0': {} + '@types/tern@0.23.9': dependencies: '@types/estree': 1.0.5 @@ -3504,6 +3896,14 @@ snapshots: dependencies: event-target-shim: 5.0.1 + acorn-import-assertions@1.9.0(acorn@8.11.3): + dependencies: + acorn: 8.11.3 + + acorn-import-attributes@1.9.5(acorn@8.11.3): + dependencies: + acorn: 8.11.3 + acorn-jsx@5.3.2(acorn@8.11.3): dependencies: acorn: 8.11.3 @@ -3566,7 +3966,7 @@ snapshots: binary-search@1.3.6: {} - binaryen@118.0.0: {} + binaryen@119.0.0: {} brace-expansion@1.1.11: dependencies: @@ -3613,6 +4013,8 @@ snapshots: optionalDependencies: fsevents: 2.3.3 + cjs-module-lexer@1.4.1: {} + color-convert@2.0.1: dependencies: color-name: 1.1.4 @@ -3646,6 +4048,10 @@ snapshots: dependencies: ms: 2.1.2 + debug@4.3.7: + dependencies: + ms: 2.1.3 + decamelize@1.2.0: {} deep-is@0.1.4: {} @@ -3706,7 +4112,7 @@ snapshots: '@webassemblyjs/wasm-parser': https://github.com/mitschabaude/webassemblyjs/releases/latest/download/webassemblyjs-wasm-parser.tgz '@webassemblyjs/wast-parser': https://github.com/mitschabaude/webassemblyjs/releases/latest/download/webassemblyjs-wast-parser.tgz '@webassemblyjs/wast-printer': https://github.com/mitschabaude/webassemblyjs/releases/latest/download/webassemblyjs-wast-printer.tgz - binaryen: 118.0.0 + binaryen: 119.0.0 find-cache-dir: 3.3.2 minimist: 1.2.8 parse-imports: 1.2.0 @@ -3930,6 +4336,8 @@ snapshots: fsevents@2.3.3: optional: true + function-bind@1.1.2: {} + get-stream@6.0.1: {} glob-parent@5.1.2: @@ -3988,6 +4396,10 @@ snapshots: has-flag@4.0.0: {} + hasown@2.0.2: + dependencies: + function-bind: 1.1.2 + human-signals@2.1.0: {} humanize-ms@1.2.1: @@ -4003,6 +4415,20 @@ snapshots: parent-module: 1.0.1 resolve-from: 4.0.0 + import-in-the-middle@1.11.2: + dependencies: + acorn: 8.11.3 + acorn-import-attributes: 1.9.5(acorn@8.11.3) + cjs-module-lexer: 1.4.1 + module-details-from-path: 1.0.3 + + import-in-the-middle@1.7.1: + dependencies: + acorn: 8.11.3 + acorn-import-assertions: 1.9.0(acorn@8.11.3) + cjs-module-lexer: 1.4.1 + module-details-from-path: 1.0.3 + imurmurhash@0.1.4: {} inflight@1.0.6: @@ -4020,6 +4446,10 @@ snapshots: is-buffer@1.1.6: {} + is-core-module@2.15.1: + dependencies: + hasown: 2.0.2 + is-extglob@2.1.1: {} is-fullwidth-code-point@3.0.0: {} @@ -4074,7 +4504,7 @@ snapshots: js-yaml: 4.1.0 jsonpointer: 5.0.1 langchainhub: 0.0.10 - langsmith: 0.1.32(@langchain/core@0.2.7(langchain@0.2.3(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0))(langchain@0.2.3(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0) + langsmith: 0.1.32(@langchain/core@0.2.7(langchain@0.2.5(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0))(langchain@0.2.3(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0) ml-distance: 4.0.1 openapi-types: 12.1.3 p-retry: 4.6.2 @@ -4128,7 +4558,7 @@ snapshots: optionalDependencies: openai: 4.52.0 - langsmith@0.1.32(@langchain/core@0.2.7(langchain@0.2.3(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0))(langchain@0.2.3(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0): + langsmith@0.1.32(@langchain/core@0.2.7(langchain@0.2.5(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0))(langchain@0.2.3(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0): dependencies: '@types/uuid': 9.0.8 commander: 10.0.1 @@ -4136,7 +4566,7 @@ snapshots: p-retry: 4.6.2 uuid: 9.0.1 optionalDependencies: - '@langchain/core': 0.2.7(langchain@0.2.3(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0) + '@langchain/core': 0.2.7(langchain@0.2.5(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0))(openai@4.52.0) langchain: 0.2.3(fast-xml-parser@4.4.0)(ignore@5.3.1)(openai@4.52.0)(ws@8.17.0) openai: 4.52.0 @@ -4189,6 +4619,8 @@ snapshots: lodash@4.17.21: optional: true + long@5.2.3: {} + lru-cache@10.2.2: {} luxon@3.4.4: {} @@ -4259,6 +4691,8 @@ snapshots: binary-search: 1.3.6 num-sort: 2.1.0 + module-details-from-path@1.0.3: {} + moment@2.29.4: {} moment@2.30.1: {} @@ -4310,7 +4744,7 @@ snapshots: obsidian-daily-notes-interface@0.8.4(@codemirror/state@6.4.1)(@codemirror/view@6.26.3): dependencies: - obsidian: https://codeload.github.com/obsidianmd/obsidian-api/tar.gz/8be9ff1e1b708b9451f35ce2df33e3983e88565b(@codemirror/state@6.4.1)(@codemirror/view@6.26.3) + obsidian: https://codeload.github.com/obsidianmd/obsidian-api/tar.gz/23947b58d372ea02225324308e31d36b4aa95869(@codemirror/state@6.4.1)(@codemirror/view@6.26.3) tslib: 2.1.0 transitivePeerDependencies: - '@codemirror/state' @@ -4329,14 +4763,14 @@ snapshots: parsimmon: 1.18.1 preact: 10.22.0 - obsidian@1.5.7-1(@codemirror/state@6.4.1)(@codemirror/view@6.26.3): + obsidian@1.7.2(@codemirror/state@6.4.1)(@codemirror/view@6.26.3): dependencies: '@codemirror/state': 6.4.1 '@codemirror/view': 6.26.3 '@types/codemirror': 5.60.8 moment: 2.29.4 - obsidian@https://codeload.github.com/obsidianmd/obsidian-api/tar.gz/8be9ff1e1b708b9451f35ce2df33e3983e88565b(@codemirror/state@6.4.1)(@codemirror/view@6.26.3): + obsidian@https://codeload.github.com/obsidianmd/obsidian-api/tar.gz/23947b58d372ea02225324308e31d36b4aa95869(@codemirror/state@6.4.1)(@codemirror/view@6.26.3): dependencies: '@codemirror/state': 6.4.1 '@codemirror/view': 6.26.3 @@ -4432,6 +4866,8 @@ snapshots: path-key@3.1.1: {} + path-parse@1.0.7: {} + path-scurry@1.11.1: dependencies: lru-cache: 10.2.2 @@ -4454,10 +4890,32 @@ snapshots: optionalDependencies: ts-node: 10.9.2(@types/node@16.18.97)(typescript@5.4.5) + postcss-load-config@4.0.2(ts-node@10.9.2(@types/node@18.19.33)(typescript@5.4.5)): + dependencies: + lilconfig: 3.1.1 + yaml: 2.4.2 + optionalDependencies: + ts-node: 10.9.2(@types/node@18.19.33)(typescript@5.4.5) + preact@10.22.0: {} prelude-ls@1.2.1: {} + protobufjs@7.4.0: + dependencies: + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/base64': 1.1.2 + '@protobufjs/codegen': 2.0.4 + '@protobufjs/eventemitter': 1.1.0 + '@protobufjs/fetch': 1.1.0 + '@protobufjs/float': 1.0.2 + '@protobufjs/inquire': 1.1.0 + '@protobufjs/path': 1.1.2 + '@protobufjs/pool': 1.1.0 + '@protobufjs/utf8': 1.1.0 + '@types/node': 16.18.97 + long: 5.2.3 + punycode@2.3.1: {} queue-microtask@1.2.3: {} @@ -4468,10 +4926,24 @@ snapshots: remeda@1.61.0: {} + require-in-the-middle@7.4.0: + dependencies: + debug: 4.3.7 + module-details-from-path: 1.0.3 + resolve: 1.22.8 + transitivePeerDependencies: + - supports-color + resolve-from@4.0.0: {} resolve-from@5.0.0: {} + resolve@1.22.8: + dependencies: + is-core-module: 2.15.1 + path-parse: 1.0.7 + supports-preserve-symlinks-flag: 1.0.0 + retry@0.13.1: {} reusify@1.0.4: {} @@ -4522,6 +4994,8 @@ snapshots: shebang-regex@3.0.0: {} + shimmer@1.2.1: {} + signal-exit@3.0.7: {} signal-exit@4.1.0: {} @@ -4576,6 +5050,8 @@ snapshots: dependencies: has-flag: 4.0.0 + supports-preserve-symlinks-flag@1.0.0: {} + svelte@3.35.0: {} text-table@0.2.0: {} @@ -4630,6 +5106,25 @@ snapshots: yn: 3.1.1 optional: true + ts-node@10.9.2(@types/node@18.19.33)(typescript@5.4.5): + dependencies: + '@cspotcode/source-map-support': 0.8.1 + '@tsconfig/node10': 1.0.11 + '@tsconfig/node12': 1.0.11 + '@tsconfig/node14': 1.0.3 + '@tsconfig/node16': 1.0.4 + '@types/node': 18.19.33 + acorn: 8.11.3 + acorn-walk: 8.3.2 + arg: 4.1.3 + create-require: 1.1.1 + diff: 4.0.2 + make-error: 1.3.6 + typescript: 5.4.5 + v8-compile-cache-lib: 3.0.1 + yn: 3.1.1 + optional: true + tslib@2.1.0: {} tslib@2.4.0: {} @@ -4659,6 +5154,28 @@ snapshots: - supports-color - ts-node + tsup@8.0.2(ts-node@10.9.2(@types/node@18.19.33)(typescript@5.4.5))(typescript@5.4.5): + dependencies: + bundle-require: 4.1.0(esbuild@0.19.12) + cac: 6.7.14 + chokidar: 3.6.0 + debug: 4.3.4 + esbuild: 0.19.12 + execa: 5.1.1 + globby: 11.1.0 + joycon: 3.1.1 + postcss-load-config: 4.0.2(ts-node@10.9.2(@types/node@18.19.33)(typescript@5.4.5)) + resolve-from: 5.0.0 + rollup: 4.18.0 + source-map: 0.8.0-beta.0 + sucrase: 3.35.0 + tree-kill: 1.2.2 + optionalDependencies: + typescript: 5.4.5 + transitivePeerDependencies: + - supports-color + - ts-node + turbo-darwin-64@1.13.3: optional: true