diff --git a/js/.changeset/tasty-masks-own.md b/js/.changeset/tasty-masks-own.md new file mode 100644 index 000000000..519d184b4 --- /dev/null +++ b/js/.changeset/tasty-masks-own.md @@ -0,0 +1,5 @@ +--- +"@arizeai/openinference-instrumentation-langchain": minor +--- + +add support for @langchain/core version ^0.3.0 diff --git a/js/package.json b/js/package.json index b269cf32c..e690c1d80 100644 --- a/js/package.json +++ b/js/package.json @@ -43,6 +43,7 @@ ], "pnpm": { "overrides": { + "@langchain/openai@^0.2>@langchain/core": "^0.2.36", "@langchain/openai@^0.1>@langchain/core": "^0.1.63" } } diff --git a/js/packages/openinference-instrumentation-langchain/examples/instrumentation.ts b/js/packages/openinference-instrumentation-langchain/examples/instrumentation.ts index 56e4c2937..2ae423647 100644 --- a/js/packages/openinference-instrumentation-langchain/examples/instrumentation.ts +++ b/js/packages/openinference-instrumentation-langchain/examples/instrumentation.ts @@ -6,15 +6,17 @@ import { } from "@opentelemetry/sdk-trace-node"; import { Resource } from "@opentelemetry/resources"; import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-proto"; -import { SEMRESATTRS_SERVICE_NAME } from "@opentelemetry/semantic-conventions"; +import { ATTR_SERVICE_NAME } from "@opentelemetry/semantic-conventions"; import { diag, DiagConsoleLogger, DiagLogLevel } from "@opentelemetry/api"; import * as CallbackManagerModule from "@langchain/core/callbacks/manager"; +import { SEMRESATTRS_PROJECT_NAME } from "@arizeai/openinference-semantic-conventions"; // For troubleshooting, set the log level to DiagLogLevel.DEBUG diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.DEBUG); const provider = new NodeTracerProvider({ resource: new Resource({ - [SEMRESATTRS_SERVICE_NAME]: "langchain-service", + [ATTR_SERVICE_NAME]: "langchain-service", + [SEMRESATTRS_PROJECT_NAME]: "langchain-project", }), }); diff --git a/js/packages/openinference-instrumentation-langchain/package.json b/js/packages/openinference-instrumentation-langchain/package.json index 5d04e3611..0b007610f 100644 --- a/js/packages/openinference-instrumentation-langchain/package.json +++ b/js/packages/openinference-instrumentation-langchain/package.json @@ -33,12 +33,14 @@ "@opentelemetry/instrumentation": "^0.46.0" }, "peerDependencies": { - "@langchain/core": "^0.1.0 || ^0.2.0" + "@langchain/core": "^0.1.0 || ^0.2.0 || ^0.3.0" }, "devDependencies": { - "@langchain/core": "^0.2.30", + "@langchain/core": "^0.3.13", + "@langchain/coreV0.2": "npm:@langchain/core@^0.2.0", "@langchain/coreV0.1": "npm:@langchain/core@^0.1.0", - "@langchain/openai": "^0.2.8", + "@langchain/openai": "^0.3.11", + "@langchain/openaiV0.2": "npm:@langchain/openai@^0.2.0", "@langchain/openaiV0.1": "npm:@langchain/openai@^0.1.0", "@opentelemetry/exporter-trace-otlp-proto": "^0.50.0", "@opentelemetry/resources": "^1.25.1", @@ -49,8 +51,9 @@ "@types/node": "^20.14.11", "dotenv": "^16.4.5", "jest": "^29.7.0", - "langchain": "^0.2.17", + "langchain": "^0.3.3", "langchainV0.1": "npm:langchain@^0.1.0", + "langchainV0.2": "npm:langchain@^0.2.0", "openai": "^4.52.7" } } diff --git a/js/packages/openinference-instrumentation-langchain/test/langchainV2.test.ts b/js/packages/openinference-instrumentation-langchain/test/langchainV2.test.ts index 1297a2461..3cfddc87f 100644 --- a/js/packages/openinference-instrumentation-langchain/test/langchainV2.test.ts +++ b/js/packages/openinference-instrumentation-langchain/test/langchainV2.test.ts @@ -4,14 +4,14 @@ import { } from "@opentelemetry/sdk-trace-base"; import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node"; import { LangChainInstrumentation } from "../src"; -import * as CallbackManager from "@langchain/core/callbacks/manager"; -import { ChatPromptTemplate } from "@langchain/core/prompts"; -import { MemoryVectorStore } from "langchain/vectorstores/memory"; -import { createStuffDocumentsChain } from "langchain/chains/combine_documents"; -import { createRetrievalChain } from "langchain/chains/retrieval"; -import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; +import * as CallbackManager from "@langchain/coreV0.2/callbacks/manager"; +import { ChatPromptTemplate } from "@langchain/coreV0.2/prompts"; +import { MemoryVectorStore } from "langchainV0.2/vectorstores/memory"; +import { createStuffDocumentsChain } from "langchainV0.2/chains/combine_documents"; +import { createRetrievalChain } from "langchainV0.2/chains/retrieval"; +import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openaiV0.2"; import { Stream } from "openai/streaming"; -import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"; +import { RecursiveCharacterTextSplitter } from "langchainV0.2/text_splitter"; import "dotenv/config"; import { MESSAGE_FUNCTION_CALL_NAME, @@ -22,7 +22,7 @@ import { import { LangChainTracer } from "../src/tracer"; import { trace } from "@opentelemetry/api"; import { completionsResponse, functionCallResponse } from "./fixtures"; -import { DynamicTool } from "@langchain/core/tools"; +import { DynamicTool } from "@langchain/coreV0.2/tools"; import { OITracer, setAttributes, @@ -57,8 +57,8 @@ const { RETRIEVAL_DOCUMENTS, } = SemanticConventions; -jest.mock("@langchain/openai", () => { - const originalModule = jest.requireActual("@langchain/openai"); +jest.mock("@langchain/openaiV0.2", () => { + const originalModule = jest.requireActual("@langchain/openaiV0.2"); class MockChatOpenAI extends originalModule.ChatOpenAI { constructor(...args: Parameters) { super(...args); @@ -287,7 +287,7 @@ describe("LangChainInstrumentation", () => { it("should add attributes to llm spans when streaming", async () => { // Do this to update the mock to return a streaming response // eslint-disable-next-line @typescript-eslint/no-var-requires - const { ChatOpenAI } = jest.requireMock("@langchain/openai"); + const { ChatOpenAI } = jest.requireMock("@langchain/openaiV0.2"); const chatModel = new ChatOpenAI({ openAIApiKey: "my-api-key", @@ -325,7 +325,14 @@ describe("LangChainInstrumentation", () => { `${LLM_OUTPUT_MESSAGES}.0.${MESSAGE_ROLE}` ]; - expect(span.attributes).toStrictEqual(expectedStreamingAttributes); + // Remove the id since it is randomly generated and inherited from the run + const actualAttributes = { ...span.attributes }; + const output = JSON.parse(String(actualAttributes[OUTPUT_VALUE])); + delete output.generations[0][0].message.kwargs.id; + const newOutputValue = JSON.stringify(output); + actualAttributes[OUTPUT_VALUE] = newOutputValue; + + expect(actualAttributes).toStrictEqual(expectedStreamingAttributes); }); it("should add documents to retriever spans", async () => { @@ -441,7 +448,7 @@ describe("LangChainInstrumentation", () => { it("should add function calls to spans", async () => { // Do this to update the mock to return a function call response // eslint-disable-next-line @typescript-eslint/no-var-requires - const { ChatOpenAI } = jest.requireMock("@langchain/openai"); + const { ChatOpenAI } = jest.requireMock("@langchain/openaiV0.2"); const chatModel = new ChatOpenAI({ openAIApiKey: "my-api-key", diff --git a/js/packages/openinference-instrumentation-langchain/test/langchainV3.test.ts b/js/packages/openinference-instrumentation-langchain/test/langchainV3.test.ts new file mode 100644 index 000000000..05009182d --- /dev/null +++ b/js/packages/openinference-instrumentation-langchain/test/langchainV3.test.ts @@ -0,0 +1,669 @@ +import { + InMemorySpanExporter, + SimpleSpanProcessor, +} from "@opentelemetry/sdk-trace-base"; +import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node"; +import { LangChainInstrumentation } from "../src"; +import * as CallbackManager from "@langchain/core/callbacks/manager"; +import { ChatPromptTemplate } from "@langchain/core/prompts"; +import { MemoryVectorStore } from "langchain/vectorstores/memory"; +import { createStuffDocumentsChain } from "langchain/chains/combine_documents"; +import { createRetrievalChain } from "langchain/chains/retrieval"; +import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; +import { Stream } from "openai/streaming"; +import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"; +import "dotenv/config"; +import { + MESSAGE_FUNCTION_CALL_NAME, + METADATA, + OpenInferenceSpanKind, + SemanticConventions, +} from "@arizeai/openinference-semantic-conventions"; +import { LangChainTracer } from "../src/tracer"; +import { trace } from "@opentelemetry/api"; +import { completionsResponse, functionCallResponse } from "./fixtures"; +import { DynamicTool } from "@langchain/core/tools"; +import { + OITracer, + setAttributes, + setSession, +} from "@arizeai/openinference-core"; +import { context } from "@opentelemetry/api"; +jest.useFakeTimers(); + +const memoryExporter = new InMemorySpanExporter(); + +const { + INPUT_VALUE, + LLM_INPUT_MESSAGES, + OUTPUT_VALUE, + LLM_OUTPUT_MESSAGES, + INPUT_MIME_TYPE, + OUTPUT_MIME_TYPE, + MESSAGE_ROLE, + MESSAGE_CONTENT, + DOCUMENT_CONTENT, + DOCUMENT_METADATA, + OPENINFERENCE_SPAN_KIND, + LLM_MODEL_NAME, + LLM_INVOCATION_PARAMETERS, + LLM_TOKEN_COUNT_COMPLETION, + LLM_TOKEN_COUNT_PROMPT, + LLM_TOKEN_COUNT_TOTAL, + TOOL_NAME, + LLM_FUNCTION_CALL, + PROMPT_TEMPLATE_TEMPLATE, + PROMPT_TEMPLATE_VARIABLES, + RETRIEVAL_DOCUMENTS, +} = SemanticConventions; + +jest.mock("@langchain/openai", () => { + const originalModule = jest.requireActual("@langchain/openai"); + class MockChatOpenAI extends originalModule.ChatOpenAI { + constructor(...args: Parameters) { + super(...args); + this.client = { + chat: { + completions: { + create: jest.fn().mockResolvedValue(completionsResponse), + }, + }, + }; + } + } + return { + ...originalModule, + ChatOpenAI: MockChatOpenAI, + OpenAIEmbeddings: class extends originalModule.OpenAIEmbeddings { + embedDocuments = async () => { + return Promise.resolve([ + [1, 2, 3], + [4, 5, 6], + [7, 8, 9], + ]); + }; + embedQuery = async () => { + return Promise.resolve([1, 2, 4]); + }; + }, + }; +}); + +const expectedSpanAttributes = { + [OPENINFERENCE_SPAN_KIND]: OpenInferenceSpanKind.LLM, + [INPUT_VALUE]: JSON.stringify({ + messages: [ + [ + { + lc: 1, + type: "constructor", + id: ["langchain_core", "messages", "HumanMessage"], + kwargs: { + content: "hello, this is a test", + additional_kwargs: {}, + response_metadata: {}, + }, + }, + ], + ], + }), + [INPUT_MIME_TYPE]: "application/json", + [OUTPUT_VALUE]: + '{"generations":[[{"text":"This is a test.","message":{"lc":1,"type":"constructor","id":["langchain_core","messages","AIMessage"],"kwargs":{"lc_serializable":true,"lc_kwargs":{"lc_serializable":true,"lc_kwargs":{"content":"This is a test.","tool_calls":[],"invalid_tool_calls":[],"additional_kwargs":{},"response_metadata":{},"id":"chatcmpl-8adq9JloOzNZ9TyuzrKyLpGXexh6p"},"lc_namespace":["langchain_core","messages"],"content":"This is a test.","additional_kwargs":{},"response_metadata":{},"id":"chatcmpl-8adq9JloOzNZ9TyuzrKyLpGXexh6p","tool_calls":[],"invalid_tool_calls":[],"usage_metadata":{"output_tokens":5,"input_tokens":12,"total_tokens":17,"input_token_details":{},"output_token_details":{}}},"lc_namespace":["langchain_core","messages"],"content":"This is a test.","additional_kwargs":{},"response_metadata":{"tokenUsage":{"promptTokens":12,"completionTokens":5,"totalTokens":17},"finish_reason":"stop"},"id":"chatcmpl-8adq9JloOzNZ9TyuzrKyLpGXexh6p","tool_calls":[],"invalid_tool_calls":[],"usage_metadata":{"output_tokens":5,"input_tokens":12,"total_tokens":17,"input_token_details":{},"output_token_details":{}}}},"generationInfo":{"finish_reason":"stop"}}]],"llmOutput":{"tokenUsage":{"promptTokens":12,"completionTokens":5,"totalTokens":17}}}', + [LLM_TOKEN_COUNT_COMPLETION]: 5, + [LLM_TOKEN_COUNT_PROMPT]: 12, + [LLM_TOKEN_COUNT_TOTAL]: 17, + [OUTPUT_MIME_TYPE]: "application/json", + [`${LLM_INPUT_MESSAGES}.0.${MESSAGE_ROLE}`]: "user", + [`${LLM_INPUT_MESSAGES}.0.${MESSAGE_CONTENT}`]: "hello, this is a test", + [`${LLM_OUTPUT_MESSAGES}.0.${MESSAGE_ROLE}`]: "assistant", + [`${LLM_OUTPUT_MESSAGES}.0.${MESSAGE_CONTENT}`]: "This is a test.", + [LLM_MODEL_NAME]: "gpt-3.5-turbo", + [LLM_INVOCATION_PARAMETERS]: + '{"model":"gpt-3.5-turbo","temperature":0,"top_p":1,"frequency_penalty":0,"presence_penalty":0,"n":1,"stream":true,"stream_options":{"include_usage":true}}', + metadata: + '{"ls_provider":"openai","ls_model_name":"gpt-3.5-turbo","ls_model_type":"chat","ls_temperature":0}', +}; + +describe("LangChainInstrumentation", () => { + const tracerProvider = new NodeTracerProvider(); + tracerProvider.register(); + const instrumentation = new LangChainInstrumentation(); + instrumentation.disable(); + + const provider = new NodeTracerProvider(); + provider.getTracer("default"); + + instrumentation.setTracerProvider(tracerProvider); + tracerProvider.addSpanProcessor(new SimpleSpanProcessor(memoryExporter)); + + const PROMPT_TEMPLATE = `Use the context below to answer the question. + ---------------- + {context} + + Question: + {input} + `; + const prompt = ChatPromptTemplate.fromTemplate(PROMPT_TEMPLATE); + + // @ts-expect-error the moduleExports property is private. This is needed to make the test work with auto-mocking + instrumentation._modules[0].moduleExports = CallbackManager; + beforeAll(() => { + instrumentation.enable(); + }); + afterAll(() => { + instrumentation.disable(); + }); + beforeEach(() => { + memoryExporter.reset(); + }); + afterEach(() => { + jest.resetAllMocks(); + jest.clearAllMocks(); + }); + it("should patch the callback manager module", async () => { + expect( + (CallbackManager as { openInferencePatched?: boolean }) + .openInferencePatched, + ).toBe(true); + }); + + const testDocuments = [ + "dogs are cute", + "rainbows are colorful", + "water is wet", + ]; + + it("should properly nest spans", async () => { + const chatModel = new ChatOpenAI({ + openAIApiKey: "my-api-key", + modelName: "gpt-3.5-turbo", + }); + const textSplitter = new RecursiveCharacterTextSplitter({ + chunkSize: 1000, + }); + const docs = await textSplitter.createDocuments(testDocuments); + const vectorStore = await MemoryVectorStore.fromDocuments( + docs, + new OpenAIEmbeddings({ + openAIApiKey: "my-api-key", + }), + ); + const combineDocsChain = await createStuffDocumentsChain({ + llm: chatModel, + prompt, + }); + const chain = await createRetrievalChain({ + combineDocsChain: combineDocsChain, + retriever: vectorStore.asRetriever(), + }); + + await chain.invoke({ + input: "What are cats?", + }); + + const spans = memoryExporter.getFinishedSpans(); + const rootSpan = spans.find((span) => span.parentSpanId == null); + const llmSpan = spans.find( + (span) => + span.attributes[SemanticConventions.OPENINFERENCE_SPAN_KIND] === + OpenInferenceSpanKind.LLM, + ); + const retrieverSpan = spans.find( + (span) => + span.attributes[SemanticConventions.OPENINFERENCE_SPAN_KIND] === + OpenInferenceSpanKind.RETRIEVER, + ); + + const retrievalChainSpan = spans.find( + (span) => span.name === "retrieval_chain", + ); + + const retrieveDocumentsSpan = spans.find( + (span) => span.name === "retrieve_documents", + ); + + // Langchain creates a ton of generic spans that are deeply nested. This is a simple test to ensure we have the spans we care about and they are at least nested under something. It is not possible to test the exact nesting structure because it is too complex and generic. + expect(rootSpan).toBe(retrievalChainSpan); + expect(retrieverSpan).toBeDefined(); + expect(llmSpan).toBeDefined(); + + expect(retrieverSpan?.parentSpanId).toBe( + retrieveDocumentsSpan?.spanContext().spanId, + ); + expect(llmSpan?.parentSpanId).toBeDefined(); + }); + + it("should add attributes to llm spans", async () => { + const chatModel = new ChatOpenAI({ + openAIApiKey: "my-api-key", + modelName: "gpt-3.5-turbo", + temperature: 0, + }); + + await chatModel.invoke("hello, this is a test"); + + const span = memoryExporter.getFinishedSpans()[0]; + expect(span).toBeDefined(); + + expect(span.attributes).toStrictEqual({ + ...expectedSpanAttributes, + [LLM_INVOCATION_PARAMETERS]: + '{"model":"gpt-3.5-turbo","temperature":0,"top_p":1,"frequency_penalty":0,"presence_penalty":0,"n":1,"stream":false}', + }); + }); + + it("should add attributes to llm spans when streaming", async () => { + // Do this to update the mock to return a streaming response + // eslint-disable-next-line @typescript-eslint/no-var-requires + const { ChatOpenAI } = jest.requireMock("@langchain/openai"); + + const chatModel = new ChatOpenAI({ + openAIApiKey: "my-api-key", + modelName: "gpt-3.5-turbo", + streaming: true, + }); + + chatModel.client.chat.completions.create.mockResolvedValue( + new Stream(async function* iterator() { + yield { choices: [{ delta: { content: "This is " } }] }; + yield { choices: [{ delta: { content: "a test stream." } }] }; + yield { choices: [{ delta: { finish_reason: "stop" } }] }; + }, new AbortController()), + ); + + await chatModel.invoke("hello, this is a test"); + + const span = memoryExporter.getFinishedSpans()[0]; + expect(span).toBeDefined(); + + const expectedStreamingAttributes = { + ...expectedSpanAttributes, + [`${LLM_OUTPUT_MESSAGES}.0.${MESSAGE_CONTENT}`]: "This is a test stream.", + [LLM_INVOCATION_PARAMETERS]: + '{"model":"gpt-3.5-turbo","temperature":1,"top_p":1,"frequency_penalty":0,"presence_penalty":0,"n":1,"stream":true,"stream_options":{"include_usage":true}}', + [LLM_TOKEN_COUNT_PROMPT]: 13, + [LLM_TOKEN_COUNT_COMPLETION]: 6, + [LLM_TOKEN_COUNT_TOTAL]: 19, + [OUTPUT_VALUE]: + '{"generations":[[{"text":"This is a test stream.","generationInfo":{"prompt":0,"completion":0},"message":{"lc":1,"type":"constructor","id":["langchain_core","messages","ChatMessageChunk"],"kwargs":{"content":"This is a test stream.","additional_kwargs":{},"response_metadata":{"estimatedTokenUsage":{"promptTokens":13,"completionTokens":6,"totalTokens":19},"prompt":0,"completion":0,"usage":{}}}}}]],"llmOutput":{"estimatedTokenUsage":{"promptTokens":13,"completionTokens":6,"totalTokens":19}}}', + [METADATA]: + '{"ls_provider":"openai","ls_model_name":"gpt-3.5-turbo","ls_model_type":"chat","ls_temperature":1}', + }; + delete expectedStreamingAttributes[ + `${LLM_OUTPUT_MESSAGES}.0.${MESSAGE_ROLE}` + ]; + + // Remove the id since it is randomly generated and inherited from the run + const actualAttributes = { ...span.attributes }; + const output = JSON.parse(String(actualAttributes[OUTPUT_VALUE])); + delete output.generations[0][0].message.kwargs.id; + const newOutputValue = JSON.stringify(output); + actualAttributes[OUTPUT_VALUE] = newOutputValue; + + expect(actualAttributes).toStrictEqual(expectedStreamingAttributes); + }); + + it("should add documents to retriever spans", async () => { + const chatModel = new ChatOpenAI({ + openAIApiKey: "my-api-key", + modelName: "gpt-3.5-turbo", + }); + + const textSplitter = new RecursiveCharacterTextSplitter({ + chunkSize: 1000, + }); + const docs = await textSplitter.createDocuments(testDocuments); + const vectorStore = await MemoryVectorStore.fromDocuments( + docs, + new OpenAIEmbeddings({ + openAIApiKey: "my-api-key", + }), + ); + const combineDocsChain = await createStuffDocumentsChain({ + llm: chatModel, + prompt, + }); + const chain = await createRetrievalChain({ + combineDocsChain: combineDocsChain, + retriever: vectorStore.asRetriever(), + }); + + await chain.invoke({ + input: "What are cats?", + }); + + const spans = memoryExporter.getFinishedSpans(); + const retrieverSpan = spans.find( + (span) => + span.attributes[SemanticConventions.OPENINFERENCE_SPAN_KIND] === + OpenInferenceSpanKind.RETRIEVER, + ); + + expect(retrieverSpan).toBeDefined(); + + expect(retrieverSpan?.attributes).toStrictEqual({ + [OPENINFERENCE_SPAN_KIND]: OpenInferenceSpanKind.RETRIEVER, + [OUTPUT_MIME_TYPE]: "application/json", + [OUTPUT_VALUE]: + '{"documents":[{"pageContent":"dogs are cute","metadata":{"loc":{"lines":{"from":1,"to":1}}}},{"pageContent":"rainbows are colorful","metadata":{"loc":{"lines":{"from":1,"to":1}}}},{"pageContent":"water is wet","metadata":{"loc":{"lines":{"from":1,"to":1}}}}]}', + [INPUT_MIME_TYPE]: "text/plain", + [INPUT_VALUE]: "What are cats?", + [`${RETRIEVAL_DOCUMENTS}.0.${DOCUMENT_CONTENT}`]: "dogs are cute", + [`${RETRIEVAL_DOCUMENTS}.0.${DOCUMENT_METADATA}`]: JSON.stringify({ + loc: { + lines: { + from: 1, + to: 1, + }, + }, + }), + [`${RETRIEVAL_DOCUMENTS}.1.${DOCUMENT_CONTENT}`]: "rainbows are colorful", + [`${RETRIEVAL_DOCUMENTS}.1.${DOCUMENT_METADATA}`]: JSON.stringify({ + loc: { + lines: { + from: 1, + to: 1, + }, + }, + }), + [`${RETRIEVAL_DOCUMENTS}.2.${DOCUMENT_CONTENT}`]: "water is wet", + [`${RETRIEVAL_DOCUMENTS}.2.${DOCUMENT_METADATA}`]: JSON.stringify({ + loc: { + lines: { + from: 1, + to: 1, + }, + }, + }), + metadata: "{}", + }); + }); + + it("should add a prompt template to a span if found ", async () => { + const chatModel = new ChatOpenAI({ + openAIApiKey: "my-api-key", + modelName: "gpt-3.5-turbo", + }); + const chain = prompt.pipe(chatModel); + await chain.invoke({ + context: "This is a test.", + input: "What is this?", + }); + + const spans = memoryExporter.getFinishedSpans(); + expect(spans).toBeDefined(); + + const promptSpan = spans.find((span) => span.name === "ChatPromptTemplate"); + + expect(promptSpan).toBeDefined(); + expect(promptSpan?.attributes).toStrictEqual({ + [OPENINFERENCE_SPAN_KIND]: OpenInferenceSpanKind.CHAIN, + [PROMPT_TEMPLATE_TEMPLATE]: PROMPT_TEMPLATE, + [PROMPT_TEMPLATE_VARIABLES]: JSON.stringify({ + context: "This is a test.", + input: "What is this?", + }), + [INPUT_VALUE]: '{"context":"This is a test.","input":"What is this?"}', + [INPUT_MIME_TYPE]: "application/json", + [OUTPUT_VALUE]: + '{"lc":1,"type":"constructor","id":["langchain_core","prompt_values","ChatPromptValue"],"kwargs":{"messages":[{"lc":1,"type":"constructor","id":["langchain_core","messages","HumanMessage"],"kwargs":{"content":"Use the context below to answer the question.\\n ----------------\\n This is a test.\\n \\n Question:\\n What is this?\\n ","additional_kwargs":{},"response_metadata":{}}}]}}', + [OUTPUT_MIME_TYPE]: "application/json", + metadata: "{}", + }); + setTimeout(() => {}, 10000); + }); + + it("should add function calls to spans", async () => { + // Do this to update the mock to return a function call response + // eslint-disable-next-line @typescript-eslint/no-var-requires + const { ChatOpenAI } = jest.requireMock("@langchain/openai"); + + const chatModel = new ChatOpenAI({ + openAIApiKey: "my-api-key", + modelName: "gpt-3.5-turbo", + temperature: 1, + }); + + chatModel.client.chat.completions.create.mockResolvedValue( + functionCallResponse, + ); + + const weatherFunction = { + name: "get_current_weather", + description: "Get the current weather in a given location", + parameters: { + type: "object", + properties: { + location: { + type: "string", + description: "The city and state, e.g. San Francisco, CA", + }, + unit: { type: "string", enum: ["celsius", "fahrenheit"] }, + }, + required: ["location"], + }, + }; + + await chatModel.invoke( + "whats the weather like in seattle, wa in fahrenheit?", + { + functions: [weatherFunction], + }, + ); + + const spans = memoryExporter.getFinishedSpans(); + expect(spans).toBeDefined(); + + const llmSpan = spans.find( + (span) => + span.attributes[OPENINFERENCE_SPAN_KIND] === OpenInferenceSpanKind.LLM, + ); + expect(llmSpan).toBeDefined(); + expect(llmSpan?.attributes).toStrictEqual({ + [OPENINFERENCE_SPAN_KIND]: OpenInferenceSpanKind.LLM, + [LLM_MODEL_NAME]: "gpt-3.5-turbo", + [LLM_FUNCTION_CALL]: + '{"name":"get_current_weather","arguments":"{\\"location\\":\\"Seattle, WA\\",\\"unit\\":\\"fahrenheit\\"}"}', + [`${LLM_INPUT_MESSAGES}.0.${MESSAGE_ROLE}`]: "user", + [`${LLM_INPUT_MESSAGES}.0.${MESSAGE_CONTENT}`]: + "whats the weather like in seattle, wa in fahrenheit?", + [`${LLM_OUTPUT_MESSAGES}.0.${MESSAGE_FUNCTION_CALL_NAME}`]: + "get_current_weather", + [`${LLM_OUTPUT_MESSAGES}.0.${MESSAGE_CONTENT}`]: "", + [`${LLM_OUTPUT_MESSAGES}.0.${MESSAGE_ROLE}`]: "assistant", + [LLM_TOKEN_COUNT_COMPLETION]: 22, + [LLM_TOKEN_COUNT_PROMPT]: 88, + [LLM_TOKEN_COUNT_TOTAL]: 110, + [LLM_INVOCATION_PARAMETERS]: + '{"model":"gpt-3.5-turbo","temperature":1,"top_p":1,"frequency_penalty":0,"presence_penalty":0,"n":1,"stream":false,"functions":[{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}]}', + [INPUT_VALUE]: + '{"messages":[[{"lc":1,"type":"constructor","id":["langchain_core","messages","HumanMessage"],"kwargs":{"content":"whats the weather like in seattle, wa in fahrenheit?","additional_kwargs":{},"response_metadata":{}}}]]}', + [INPUT_MIME_TYPE]: "application/json", + [OUTPUT_VALUE]: + '{"generations":[[{"text":"","message":{"lc":1,"type":"constructor","id":["langchain_core","messages","AIMessage"],"kwargs":{"lc_serializable":true,"lc_kwargs":{"lc_serializable":true,"lc_kwargs":{"content":"","tool_calls":[],"invalid_tool_calls":[],"additional_kwargs":{"function_call":{"name":"get_current_weather","arguments":"{\\"location\\":\\"Seattle, WA\\",\\"unit\\":\\"fahrenheit\\"}"}},"response_metadata":{},"id":"chatcmpl-9D6ZQKSVCtEeMT272J8h6xydy1jE2"},"lc_namespace":["langchain_core","messages"],"content":"","additional_kwargs":{"function_call":{"name":"get_current_weather","arguments":"{\\"location\\":\\"Seattle, WA\\",\\"unit\\":\\"fahrenheit\\"}"}},"response_metadata":{},"id":"chatcmpl-9D6ZQKSVCtEeMT272J8h6xydy1jE2","tool_calls":[],"invalid_tool_calls":[],"usage_metadata":{"output_tokens":22,"input_tokens":88,"total_tokens":110,"input_token_details":{},"output_token_details":{}}},"lc_namespace":["langchain_core","messages"],"content":"","additional_kwargs":{"function_call":{"name":"get_current_weather","arguments":"{\\"location\\":\\"Seattle, WA\\",\\"unit\\":\\"fahrenheit\\"}"}},"response_metadata":{"tokenUsage":{"promptTokens":88,"completionTokens":22,"totalTokens":110},"finish_reason":"function_call"},"id":"chatcmpl-9D6ZQKSVCtEeMT272J8h6xydy1jE2","tool_calls":[],"invalid_tool_calls":[],"usage_metadata":{"output_tokens":22,"input_tokens":88,"total_tokens":110,"input_token_details":{},"output_token_details":{}}}},"generationInfo":{"finish_reason":"function_call"}}]],"llmOutput":{"tokenUsage":{"promptTokens":88,"completionTokens":22,"totalTokens":110}}}', + [OUTPUT_MIME_TYPE]: "application/json", + metadata: + '{"ls_provider":"openai","ls_model_name":"gpt-3.5-turbo","ls_model_type":"chat","ls_temperature":1}', + }); + }); + + it("should add tool information to tool spans", async () => { + const simpleTool = new DynamicTool({ + name: "test_tool", + description: + "call this to get the value of a test, input should be an empty string", + func: async () => Promise.resolve("this is a test tool"), + }); + + await simpleTool.invoke("hello"); + + const spans = memoryExporter.getFinishedSpans(); + expect(spans).toBeDefined(); + + const toolSpan = spans.find( + (span) => + span.attributes[OPENINFERENCE_SPAN_KIND] === OpenInferenceSpanKind.TOOL, + ); + expect(toolSpan).toBeDefined(); + expect(toolSpan?.attributes).toStrictEqual({ + [OPENINFERENCE_SPAN_KIND]: OpenInferenceSpanKind.TOOL, + [TOOL_NAME]: "test_tool", + [INPUT_VALUE]: "hello", + [INPUT_MIME_TYPE]: "text/plain", + [OUTPUT_VALUE]: "this is a test tool", + [OUTPUT_MIME_TYPE]: "text/plain", + metadata: "{}", + }); + }); + + it("should capture context attributes and add them to spans", async () => { + await context.with( + setSession( + setAttributes(context.active(), { + "test-attribute": "test-value", + }), + { sessionId: "session-id" }, + ), + async () => { + const chatModel = new ChatOpenAI({ + openAIApiKey: "my-api-key", + modelName: "gpt-3.5-turbo", + temperature: 0, + }); + await chatModel.invoke("hello, this is a test"); + }, + ); + + const spans = memoryExporter.getFinishedSpans(); + expect(spans.length).toBe(1); + const span = spans[0]; + expect(span.attributes).toMatchInlineSnapshot(` +{ + "input.mime_type": "application/json", + "input.value": "{"messages":[[{"lc":1,"type":"constructor","id":["langchain_core","messages","HumanMessage"],"kwargs":{"content":"hello, this is a test","additional_kwargs":{},"response_metadata":{}}}]]}", + "llm.input_messages.0.message.content": "hello, this is a test", + "llm.input_messages.0.message.role": "user", + "llm.invocation_parameters": "{"model":"gpt-3.5-turbo","temperature":0,"top_p":1,"frequency_penalty":0,"presence_penalty":0,"n":1,"stream":false}", + "llm.model_name": "gpt-3.5-turbo", + "llm.output_messages.0.message.content": "This is a test.", + "llm.output_messages.0.message.role": "assistant", + "llm.token_count.completion": 5, + "llm.token_count.prompt": 12, + "llm.token_count.total": 17, + "metadata": "{"ls_provider":"openai","ls_model_name":"gpt-3.5-turbo","ls_model_type":"chat","ls_temperature":0}", + "openinference.span.kind": "LLM", + "output.mime_type": "application/json", + "output.value": "{"generations":[[{"text":"This is a test.","message":{"lc":1,"type":"constructor","id":["langchain_core","messages","AIMessage"],"kwargs":{"lc_serializable":true,"lc_kwargs":{"lc_serializable":true,"lc_kwargs":{"content":"This is a test.","tool_calls":[],"invalid_tool_calls":[],"additional_kwargs":{},"response_metadata":{},"id":"chatcmpl-8adq9JloOzNZ9TyuzrKyLpGXexh6p"},"lc_namespace":["langchain_core","messages"],"content":"This is a test.","additional_kwargs":{},"response_metadata":{},"id":"chatcmpl-8adq9JloOzNZ9TyuzrKyLpGXexh6p","tool_calls":[],"invalid_tool_calls":[],"usage_metadata":{"output_tokens":5,"input_tokens":12,"total_tokens":17,"input_token_details":{},"output_token_details":{}}},"lc_namespace":["langchain_core","messages"],"content":"This is a test.","additional_kwargs":{},"response_metadata":{"tokenUsage":{"promptTokens":12,"completionTokens":5,"totalTokens":17},"finish_reason":"stop"},"id":"chatcmpl-8adq9JloOzNZ9TyuzrKyLpGXexh6p","tool_calls":[],"invalid_tool_calls":[],"usage_metadata":{"output_tokens":5,"input_tokens":12,"total_tokens":17,"input_token_details":{},"output_token_details":{}}}},"generationInfo":{"finish_reason":"stop"}}]],"llmOutput":{"tokenUsage":{"promptTokens":12,"completionTokens":5,"totalTokens":17}}}", + "session.id": "session-id", + "test-attribute": "test-value", +} +`); + }); +}); + +describe("LangChainInstrumentation with TraceConfigOptions", () => { + const tracerProvider = new NodeTracerProvider(); + tracerProvider.register(); + const instrumentation = new LangChainInstrumentation({ + traceConfig: { + hideInputs: true, + }, + }); + instrumentation.disable(); + const provider = new NodeTracerProvider(); + provider.getTracer("default"); + instrumentation.setTracerProvider(tracerProvider); + tracerProvider.addSpanProcessor(new SimpleSpanProcessor(memoryExporter)); + + // @ts-expect-error the moduleExports property is private. This is needed to make the test work with auto-mocking + instrumentation._modules[0].moduleExports = CallbackManager; + beforeAll(() => { + instrumentation.enable(); + }); + afterAll(() => { + instrumentation.disable(); + }); + beforeEach(() => { + memoryExporter.reset(); + }); + afterEach(() => { + jest.resetAllMocks(); + jest.clearAllMocks(); + }); + it("should patch the callback manager module", async () => { + expect( + (CallbackManager as { openInferencePatched?: boolean }) + .openInferencePatched, + ).toBe(true); + }); + + it("should respect trace config options", async () => { + await context.with( + setSession( + setAttributes(context.active(), { + "test-attribute": "test-value", + }), + { sessionId: "session-id" }, + ), + async () => { + const chatModel = new ChatOpenAI({ + openAIApiKey: "my-api-key", + modelName: "gpt-3.5-turbo", + temperature: 0, + }); + await chatModel.invoke("hello, this is a test"); + }, + ); + + const spans = memoryExporter.getFinishedSpans(); + expect(spans.length).toBe(1); + const span = spans[0]; + expect(span.attributes).toMatchInlineSnapshot(` +{ + "input.value": "__REDACTED__", + "llm.invocation_parameters": "{"model":"gpt-3.5-turbo","temperature":0,"top_p":1,"frequency_penalty":0,"presence_penalty":0,"n":1,"stream":false}", + "llm.model_name": "gpt-3.5-turbo", + "llm.output_messages.0.message.content": "This is a test.", + "llm.output_messages.0.message.role": "assistant", + "llm.token_count.completion": 5, + "llm.token_count.prompt": 12, + "llm.token_count.total": 17, + "metadata": "{"ls_provider":"openai","ls_model_name":"gpt-3.5-turbo","ls_model_type":"chat","ls_temperature":0}", + "openinference.span.kind": "LLM", + "output.mime_type": "application/json", + "output.value": "{"generations":[[{"text":"This is a test.","message":{"lc":1,"type":"constructor","id":["langchain_core","messages","AIMessage"],"kwargs":{"lc_serializable":true,"lc_kwargs":{"lc_serializable":true,"lc_kwargs":{"content":"This is a test.","tool_calls":[],"invalid_tool_calls":[],"additional_kwargs":{},"response_metadata":{},"id":"chatcmpl-8adq9JloOzNZ9TyuzrKyLpGXexh6p"},"lc_namespace":["langchain_core","messages"],"content":"This is a test.","additional_kwargs":{},"response_metadata":{},"id":"chatcmpl-8adq9JloOzNZ9TyuzrKyLpGXexh6p","tool_calls":[],"invalid_tool_calls":[],"usage_metadata":{"output_tokens":5,"input_tokens":12,"total_tokens":17,"input_token_details":{},"output_token_details":{}}},"lc_namespace":["langchain_core","messages"],"content":"This is a test.","additional_kwargs":{},"response_metadata":{"tokenUsage":{"promptTokens":12,"completionTokens":5,"totalTokens":17},"finish_reason":"stop"},"id":"chatcmpl-8adq9JloOzNZ9TyuzrKyLpGXexh6p","tool_calls":[],"invalid_tool_calls":[],"usage_metadata":{"output_tokens":5,"input_tokens":12,"total_tokens":17,"input_token_details":{},"output_token_details":{}}}},"generationInfo":{"finish_reason":"stop"}}]],"llmOutput":{"tokenUsage":{"promptTokens":12,"completionTokens":5,"totalTokens":17}}}", + "session.id": "session-id", + "test-attribute": "test-value", +} +`); + }); +}); + +describe("LangChainTracer", () => { + const testSerialized = { + lc: 1, + type: "not_implemented" as const, + id: [], + }; + it("should delete runs after they are ended", async () => { + const oiTracer = new OITracer({ tracer: trace.getTracer("default") }); + const langChainTracer = new LangChainTracer(oiTracer); + for (let i = 0; i < 10; i++) { + await langChainTracer.handleLLMStart(testSerialized, [], "runId"); + expect(Object.keys(langChainTracer["runs"]).length).toBe(1); + + await langChainTracer.handleRetrieverStart(testSerialized, "", "runId2"); + expect(Object.keys(langChainTracer["runs"]).length).toBe(2); + + await langChainTracer.handleLLMEnd({ generations: [] }, "runId"); + expect(Object.keys(langChainTracer["runs"]).length).toBe(1); + + await langChainTracer.handleRetrieverEnd([], "runId2"); + expect(Object.keys(langChainTracer["runs"]).length).toBe(0); + } + + expect(langChainTracer["runs"]).toBeDefined(); + expect(Object.keys(langChainTracer["runs"]).length).toBe(0); + }); +}); diff --git a/js/pnpm-lock.yaml b/js/pnpm-lock.yaml index 435a8855c..dec394831 100644 --- a/js/pnpm-lock.yaml +++ b/js/pnpm-lock.yaml @@ -5,6 +5,7 @@ settings: excludeLinksFromLockfile: false overrides: + '@langchain/openai@^0.2>@langchain/core': ^0.2.36 '@langchain/openai@^0.1>@langchain/core': ^0.1.63 importers: @@ -37,7 +38,7 @@ importers: version: 5.0.10 ts-jest: specifier: ^29.2.2 - version: 29.2.4(@babel/core@7.24.9)(@jest/transform@29.7.0)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.24.9))(jest@29.7.0(@types/node@20.14.11))(typescript@5.5.4) + version: 29.2.4(@babel/core@7.24.9)(@jest/transform@29.7.0)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.24.9))(jest@29.7.0)(typescript@5.5.4) typescript: specifier: ^5.5.4 version: 5.5.4 @@ -98,17 +99,23 @@ importers: version: 0.46.0(@opentelemetry/api@1.9.0) devDependencies: '@langchain/core': - specifier: ^0.2.30 - version: 0.2.30(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) + specifier: ^0.3.13 + version: 0.3.13(openai@4.56.0(zod@3.23.8)) '@langchain/coreV0.1': specifier: npm:@langchain/core@^0.1.0 - version: '@langchain/core@0.1.63(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8))' + version: '@langchain/core@0.1.63(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8))' + '@langchain/coreV0.2': + specifier: npm:@langchain/core@^0.2.0 + version: '@langchain/core@0.2.36(openai@4.56.0(zod@3.23.8))' '@langchain/openai': - specifier: ^0.2.8 - version: 0.2.8(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8))) + specifier: ^0.3.11 + version: 0.3.11(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8))) '@langchain/openaiV0.1': specifier: npm:@langchain/openai@^0.1.0 - version: '@langchain/openai@0.1.3(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))' + version: '@langchain/openai@0.1.3(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))' + '@langchain/openaiV0.2': + specifier: npm:@langchain/openai@^0.2.0 + version: '@langchain/openai@0.2.8' '@opentelemetry/exporter-trace-otlp-proto': specifier: ^0.50.0 version: 0.50.0(@opentelemetry/api@1.9.0) @@ -137,11 +144,14 @@ importers: specifier: ^29.7.0 version: 29.7.0(@types/node@20.14.11) langchain: - specifier: ^0.2.17 - version: 0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)) + specifier: ^0.3.3 + version: 0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) langchainV0.1: specifier: npm:langchain@^0.1.0 version: langchain@0.1.37(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)) + langchainV0.2: + specifier: npm:langchain@^0.2.0 + version: langchain@0.2.17(@langchain/community@0.0.57(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)) openai: specifier: ^4.52.7 version: 4.56.0(zod@3.23.8) @@ -880,6 +890,14 @@ packages: resolution: {integrity: sha512-jeLmLTxnEq9zSq0J/fMlBCMT5Ix8tbZriqNYTm3oS7CPM2uHBcRQhV3fpsh4G8FnE7Pxa4sWfrFzc2jykhlk7A==} engines: {node: '>=18'} + '@langchain/core@0.2.36': + resolution: {integrity: sha512-qHLvScqERDeH7y2cLuJaSAlMwg3f/3Oc9nayRSXRU2UuaK/SOhI42cxiPLj1FnuHJSmN0rBQFkrLx02gI4mcVg==} + engines: {node: '>=18'} + + '@langchain/core@0.3.13': + resolution: {integrity: sha512-sHDlwyHhgeaYC+wfORrWO7sXxD6/GDtZZ5mqjY48YMwB58cVv8hTs8goR/9EwXapYt8fQi2uXTGUV87bHzvdZQ==} + engines: {node: '>=18'} + '@langchain/openai@0.0.34': resolution: {integrity: sha512-M+CW4oXle5fdoz2T2SwdOef8pl3/1XmUx1vjn2mXUVM/128aO0l23FMF0SNBsAbRV6P+p/TuzjodchJbi0Ht/A==} engines: {node: '>=18'} @@ -892,6 +910,12 @@ packages: resolution: {integrity: sha512-p5fxEAKuR8UV9jWIxkZ6AY/vAPSYxJI0Pf/UM4T3FKk/dn99G/mAEDLhfI4pBf7B8o8TudSVyBW2hRjZqlQu7g==} engines: {node: '>=18'} + '@langchain/openai@0.3.11': + resolution: {integrity: sha512-mEFbpJ8w8NPArsquUlCwxvZTKNkXxqwzvTEYzv6Jb7gUoBDOZtwLg6AdcngTJ+w5VFh3wxgPy0g3zb9Aw0Qbpw==} + engines: {node: '>=18'} + peerDependencies: + '@langchain/core': '>=0.2.26 <0.4.0' + '@langchain/textsplitters@0.0.3': resolution: {integrity: sha512-cXWgKE3sdWLSqAa8ykbCcUsUF1Kyr5J3HOWYGuobhPEycXW4WI++d5DhzdpL238mzoEXTi90VqfSCra37l5YqA==} engines: {node: '>=18'} @@ -1138,6 +1162,9 @@ packages: '@types/stack-utils@2.0.3': resolution: {integrity: sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==} + '@types/uuid@10.0.0': + resolution: {integrity: sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==} + '@types/uuid@9.0.8': resolution: {integrity: sha512-jg+97EGIcY9AGHJJRaaPVgetKDsrTgbRjQ5Msgjh/DQKEFl0DtyRr/VCOyD1T2R1MNeWPK/u7JoGhlDZnKBAfA==} @@ -2422,6 +2449,52 @@ packages: youtubei.js: optional: true + langchain@0.3.3: + resolution: {integrity: sha512-xy63PAh1PUuF2VdjLxacP8SeUQKF++ixvAhMhl/+3GkzloEKce41xlbQC3xNGVToYaqzIsDrueps/JU0zYYXHw==} + engines: {node: '>=18'} + peerDependencies: + '@langchain/anthropic': '*' + '@langchain/aws': '*' + '@langchain/cohere': '*' + '@langchain/core': '>=0.2.21 <0.4.0' + '@langchain/google-genai': '*' + '@langchain/google-vertexai': '*' + '@langchain/groq': '*' + '@langchain/mistralai': '*' + '@langchain/ollama': '*' + axios: '*' + cheerio: '*' + handlebars: ^4.7.8 + peggy: ^3.0.2 + typeorm: '*' + peerDependenciesMeta: + '@langchain/anthropic': + optional: true + '@langchain/aws': + optional: true + '@langchain/cohere': + optional: true + '@langchain/google-genai': + optional: true + '@langchain/google-vertexai': + optional: true + '@langchain/groq': + optional: true + '@langchain/mistralai': + optional: true + '@langchain/ollama': + optional: true + axios: + optional: true + cheerio: + optional: true + handlebars: + optional: true + peggy: + optional: true + typeorm: + optional: true + langchainhub@0.0.11: resolution: {integrity: sha512-WnKI4g9kU2bHQP136orXr2bcRdgz9iiTBpTN0jWt9IlScUKnJBoD0aa2HOzHURQKeQDnt2JwqVmQ6Depf5uDLQ==} @@ -2439,6 +2512,14 @@ packages: openai: optional: true + langsmith@0.1.66: + resolution: {integrity: sha512-ZhZ9g8t/qjj0oUWpvKLtUe3qxDL/N0wG0m+Ctkxf0keopYJkcMJg4/71jl6ZYyiSU8xlC27aixXOT0uvLhqcFA==} + peerDependencies: + openai: '*' + peerDependenciesMeta: + openai: + optional: true + leven@3.1.0: resolution: {integrity: sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==} engines: {node: '>=6'} @@ -2617,6 +2698,15 @@ packages: zod: optional: true + openai@4.68.3: + resolution: {integrity: sha512-KfnhZ7mR9rK/f0O1vJGRnB3aYuDGgVkNIegJFxGviV0SGDVGlTha7FR8UV9P0NbO6l/podml0E/rk2R1egY94w==} + hasBin: true + peerDependencies: + zod: ^3.23.8 + peerDependenciesMeta: + zod: + optional: true + openapi-types@12.1.3: resolution: {integrity: sha512-N4YtSYJqghVu4iek2ZUvcN/0aqH1kRDuNqzcycDxhOUpg7GdvLa2F3DgS6yBNhInhv2r/6I0Flkn7CqL8+nIcw==} @@ -3756,13 +3846,13 @@ snapshots: '@jridgewell/resolve-uri': 3.1.2 '@jridgewell/sourcemap-codec': 1.5.0 - '@langchain/community@0.0.57(langchain@0.2.17)(openai@4.56.0(zod@3.23.8))': + '@langchain/community@0.0.57(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8))': dependencies: - '@langchain/core': 0.1.63(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) - '@langchain/openai': 0.0.34(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8))) + '@langchain/core': 0.1.63(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) + '@langchain/openai': 0.0.34(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8))) expr-eval: 2.0.2 flat: 5.0.2 - langsmith: 0.1.45(@langchain/core@0.1.63(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) + langsmith: 0.1.45(@langchain/core@0.1.63(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) uuid: 9.0.1 zod: 3.23.8 zod-to-json-schema: 3.23.2(zod@3.23.8) @@ -3771,13 +3861,13 @@ snapshots: - langchain - openai - '@langchain/core@0.1.63(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8))': + '@langchain/core@0.1.63(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8))': dependencies: ansi-styles: 5.2.0 camelcase: 6.3.0 decamelize: 1.2.0 js-tiktoken: 1.0.14 - langsmith: 0.1.45(@langchain/core@0.1.63(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) + langsmith: 0.1.45(@langchain/core@0.1.63(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) ml-distance: 4.0.1 mustache: 4.2.0 p-queue: 6.6.2 @@ -3789,13 +3879,13 @@ snapshots: - langchain - openai - '@langchain/core@0.2.30(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8))': + '@langchain/core@0.2.30(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8))': dependencies: ansi-styles: 5.2.0 camelcase: 6.3.0 decamelize: 1.2.0 js-tiktoken: 1.0.14 - langsmith: 0.1.45(@langchain/core@0.2.30(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) + langsmith: 0.1.45(@langchain/core@0.2.30(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) mustache: 4.2.0 p-queue: 6.6.2 p-retry: 4.6.2 @@ -3806,9 +3896,41 @@ snapshots: - langchain - openai - '@langchain/openai@0.0.34(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))': + '@langchain/core@0.2.36(openai@4.56.0(zod@3.23.8))': + dependencies: + ansi-styles: 5.2.0 + camelcase: 6.3.0 + decamelize: 1.2.0 + js-tiktoken: 1.0.14 + langsmith: 0.1.66(openai@4.56.0(zod@3.23.8)) + mustache: 4.2.0 + p-queue: 6.6.2 + p-retry: 4.6.2 + uuid: 10.0.0 + zod: 3.23.8 + zod-to-json-schema: 3.23.2(zod@3.23.8) + transitivePeerDependencies: + - openai + + '@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8))': dependencies: - '@langchain/core': 0.2.30(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) + ansi-styles: 5.2.0 + camelcase: 6.3.0 + decamelize: 1.2.0 + js-tiktoken: 1.0.14 + langsmith: 0.1.66(openai@4.56.0(zod@3.23.8)) + mustache: 4.2.0 + p-queue: 6.6.2 + p-retry: 4.6.2 + uuid: 10.0.0 + zod: 3.23.8 + zod-to-json-schema: 3.23.2(zod@3.23.8) + transitivePeerDependencies: + - openai + + '@langchain/openai@0.0.34(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))': + dependencies: + '@langchain/core': 0.2.30(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) js-tiktoken: 1.0.14 openai: 4.56.0(zod@3.23.8) zod: 3.23.8 @@ -3817,9 +3939,9 @@ snapshots: - encoding - langchain - '@langchain/openai@0.1.3(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))': + '@langchain/openai@0.1.3(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))': dependencies: - '@langchain/core': 0.1.63(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) + '@langchain/core': 0.1.63(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) js-tiktoken: 1.0.14 openai: 4.56.0(zod@3.23.8) zod: 3.23.8 @@ -3828,20 +3950,29 @@ snapshots: - encoding - langchain - '@langchain/openai@0.2.8(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))': + '@langchain/openai@0.2.8': dependencies: - '@langchain/core': 0.2.30(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) + '@langchain/core': 0.2.36(openai@4.56.0(zod@3.23.8)) js-tiktoken: 1.0.14 openai: 4.56.0(zod@3.23.8) zod: 3.23.8 zod-to-json-schema: 3.23.2(zod@3.23.8) transitivePeerDependencies: - encoding - - langchain - '@langchain/textsplitters@0.0.3(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8))': + '@langchain/openai@0.3.11(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))': dependencies: - '@langchain/core': 0.2.30(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) + '@langchain/core': 0.3.13(openai@4.56.0(zod@3.23.8)) + js-tiktoken: 1.0.14 + openai: 4.68.3(zod@3.23.8) + zod: 3.23.8 + zod-to-json-schema: 3.23.2(zod@3.23.8) + transitivePeerDependencies: + - encoding + + '@langchain/textsplitters@0.0.3(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8))': + dependencies: + '@langchain/core': 0.2.30(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) js-tiktoken: 1.0.14 transitivePeerDependencies: - langchain @@ -4125,6 +4256,8 @@ snapshots: '@types/stack-utils@2.0.3': {} + '@types/uuid@10.0.0': {} + '@types/uuid@9.0.8': {} '@types/yargs-parser@21.0.3': {} @@ -5280,16 +5413,16 @@ snapshots: langchain@0.1.37(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)): dependencies: '@anthropic-ai/sdk': 0.9.1 - '@langchain/community': 0.0.57(langchain@0.2.17)(openai@4.56.0(zod@3.23.8)) - '@langchain/core': 0.1.63(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) - '@langchain/openai': 0.0.34(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8))) - '@langchain/textsplitters': 0.0.3(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) + '@langchain/community': 0.0.57(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) + '@langchain/core': 0.1.63(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) + '@langchain/openai': 0.0.34(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8))) + '@langchain/textsplitters': 0.0.3(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) binary-extensions: 2.3.0 js-tiktoken: 1.0.14 js-yaml: 4.1.0 jsonpointer: 5.0.1 langchainhub: 0.0.11 - langsmith: 0.1.45(@langchain/core@0.1.63(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) + langsmith: 0.1.45(@langchain/core@0.1.63(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) ml-distance: 4.0.1 openapi-types: 12.1.3 p-retry: 4.6.2 @@ -5371,16 +5504,16 @@ snapshots: - vectordb - voy-search - langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)): + langchain@0.2.17(@langchain/community@0.0.57(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)): dependencies: - '@langchain/core': 0.2.30(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) - '@langchain/openai': 0.2.8(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8))) - '@langchain/textsplitters': 0.0.3(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) + '@langchain/core': 0.2.36(openai@4.56.0(zod@3.23.8)) + '@langchain/openai': 0.2.8 + '@langchain/textsplitters': 0.0.3(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) binary-extensions: 2.3.0 js-tiktoken: 1.0.14 js-yaml: 4.1.0 jsonpointer: 5.0.1 - langsmith: 0.1.45(@langchain/core@0.2.30(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) + langsmith: 0.1.45(@langchain/core@0.2.36(openai@4.56.0(zod@3.23.8)))(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) openapi-types: 12.1.3 p-retry: 4.6.2 uuid: 10.0.0 @@ -5388,15 +5521,34 @@ snapshots: zod: 3.23.8 zod-to-json-schema: 3.23.2(zod@3.23.8) optionalDependencies: - '@langchain/community': 0.0.57(langchain@0.2.17)(openai@4.56.0(zod@3.23.8)) + '@langchain/community': 0.0.57(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) ignore: 5.3.1 transitivePeerDependencies: - encoding - openai + langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)): + dependencies: + '@langchain/core': 0.3.13(openai@4.56.0(zod@3.23.8)) + '@langchain/openai': 0.3.11(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8))) + '@langchain/textsplitters': 0.0.3(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) + js-tiktoken: 1.0.14 + js-yaml: 4.1.0 + jsonpointer: 5.0.1 + langsmith: 0.1.66(openai@4.56.0(zod@3.23.8)) + openapi-types: 12.1.3 + p-retry: 4.6.2 + uuid: 10.0.0 + yaml: 2.5.0 + zod: 3.23.8 + zod-to-json-schema: 3.23.2(zod@3.23.8) + transitivePeerDependencies: + - encoding + - openai + langchainhub@0.0.11: {} - langsmith@0.1.45(@langchain/core@0.1.63(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)): + langsmith@0.1.45(@langchain/core@0.1.63(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)): dependencies: '@types/uuid': 9.0.8 commander: 10.0.1 @@ -5405,11 +5557,11 @@ snapshots: semver: 7.6.3 uuid: 9.0.1 optionalDependencies: - '@langchain/core': 0.1.63(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) - langchain: 0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)) + '@langchain/core': 0.1.63(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) + langchain: 0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) openai: 4.56.0(zod@3.23.8) - langsmith@0.1.45(@langchain/core@0.2.30(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)): + langsmith@0.1.45(@langchain/core@0.2.30(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)): dependencies: '@types/uuid': 9.0.8 commander: 10.0.1 @@ -5418,8 +5570,32 @@ snapshots: semver: 7.6.3 uuid: 9.0.1 optionalDependencies: - '@langchain/core': 0.2.30(langchain@0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) - langchain: 0.2.17(@langchain/community@0.0.57)(ignore@5.3.1)(openai@4.56.0(zod@3.23.8)) + '@langchain/core': 0.2.30(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) + langchain: 0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) + openai: 4.56.0(zod@3.23.8) + + langsmith@0.1.45(@langchain/core@0.2.36(openai@4.56.0(zod@3.23.8)))(langchain@0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)): + dependencies: + '@types/uuid': 9.0.8 + commander: 10.0.1 + p-queue: 6.6.2 + p-retry: 4.6.2 + semver: 7.6.3 + uuid: 9.0.1 + optionalDependencies: + '@langchain/core': 0.2.36(openai@4.56.0(zod@3.23.8)) + langchain: 0.3.3(@langchain/core@0.3.13(openai@4.56.0(zod@3.23.8)))(openai@4.56.0(zod@3.23.8)) + openai: 4.56.0(zod@3.23.8) + + langsmith@0.1.66(openai@4.56.0(zod@3.23.8)): + dependencies: + '@types/uuid': 10.0.0 + commander: 10.0.1 + p-queue: 6.6.2 + p-retry: 4.6.2 + semver: 7.6.3 + uuid: 10.0.0 + optionalDependencies: openai: 4.56.0(zod@3.23.8) leven@3.1.0: {} @@ -5587,6 +5763,20 @@ snapshots: transitivePeerDependencies: - encoding + openai@4.68.3(zod@3.23.8): + dependencies: + '@types/node': 18.19.45 + '@types/node-fetch': 2.6.11 + abort-controller: 3.0.0 + agentkeepalive: 4.5.0 + form-data-encoder: 1.7.2 + formdata-node: 4.4.1 + node-fetch: 2.7.0 + optionalDependencies: + zod: 3.23.8 + transitivePeerDependencies: + - encoding + openapi-types@12.1.3: {} optionator@0.9.4: @@ -5903,7 +6093,7 @@ snapshots: dependencies: typescript: 5.5.4 - ts-jest@29.2.4(@babel/core@7.24.9)(@jest/transform@29.7.0)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.24.9))(jest@29.7.0(@types/node@20.14.11))(typescript@5.5.4): + ts-jest@29.2.4(@babel/core@7.24.9)(@jest/transform@29.7.0)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.24.9))(jest@29.7.0)(typescript@5.5.4): dependencies: bs-logger: 0.2.6 ejs: 3.1.10