Skip to content

Commit

Permalink
feat(langchain): support version 0.3 (#1080)
Browse files Browse the repository at this point in the history
Co-authored-by: Parker Stafford <[email protected]>
  • Loading branch information
cephalization and Parker-Stafford authored Oct 24, 2024
1 parent c03a5b6 commit 518f298
Show file tree
Hide file tree
Showing 7 changed files with 940 additions and 63 deletions.
5 changes: 5 additions & 0 deletions js/.changeset/tasty-masks-own.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"@arizeai/openinference-instrumentation-langchain": minor
---

add support for @langchain/core version ^0.3.0
1 change: 1 addition & 0 deletions js/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
],
"pnpm": {
"overrides": {
"@langchain/openai@^0.2>@langchain/core": "^0.2.36",
"@langchain/openai@^0.1>@langchain/core": "^0.1.63"
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,17 @@ import {
} from "@opentelemetry/sdk-trace-node";
import { Resource } from "@opentelemetry/resources";
import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-proto";
import { SEMRESATTRS_SERVICE_NAME } from "@opentelemetry/semantic-conventions";
import { ATTR_SERVICE_NAME } from "@opentelemetry/semantic-conventions";
import { diag, DiagConsoleLogger, DiagLogLevel } from "@opentelemetry/api";
import * as CallbackManagerModule from "@langchain/core/callbacks/manager";
import { SEMRESATTRS_PROJECT_NAME } from "@arizeai/openinference-semantic-conventions";
// For troubleshooting, set the log level to DiagLogLevel.DEBUG
diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.DEBUG);

const provider = new NodeTracerProvider({
resource: new Resource({
[SEMRESATTRS_SERVICE_NAME]: "langchain-service",
[ATTR_SERVICE_NAME]: "langchain-service",
[SEMRESATTRS_PROJECT_NAME]: "langchain-project",
}),
});

Expand Down
11 changes: 7 additions & 4 deletions js/packages/openinference-instrumentation-langchain/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,14 @@
"@opentelemetry/instrumentation": "^0.46.0"
},
"peerDependencies": {
"@langchain/core": "^0.1.0 || ^0.2.0"
"@langchain/core": "^0.1.0 || ^0.2.0 || ^0.3.0"
},
"devDependencies": {
"@langchain/core": "^0.2.30",
"@langchain/core": "^0.3.13",
"@langchain/coreV0.2": "npm:@langchain/core@^0.2.0",
"@langchain/coreV0.1": "npm:@langchain/core@^0.1.0",
"@langchain/openai": "^0.2.8",
"@langchain/openai": "^0.3.11",
"@langchain/openaiV0.2": "npm:@langchain/openai@^0.2.0",
"@langchain/openaiV0.1": "npm:@langchain/openai@^0.1.0",
"@opentelemetry/exporter-trace-otlp-proto": "^0.50.0",
"@opentelemetry/resources": "^1.25.1",
Expand All @@ -49,8 +51,9 @@
"@types/node": "^20.14.11",
"dotenv": "^16.4.5",
"jest": "^29.7.0",
"langchain": "^0.2.17",
"langchain": "^0.3.3",
"langchainV0.1": "npm:langchain@^0.1.0",
"langchainV0.2": "npm:langchain@^0.2.0",
"openai": "^4.52.7"
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,14 @@ import {
} from "@opentelemetry/sdk-trace-base";
import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node";
import { LangChainInstrumentation } from "../src";
import * as CallbackManager from "@langchain/core/callbacks/manager";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
import { createRetrievalChain } from "langchain/chains/retrieval";
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import * as CallbackManager from "@langchain/coreV0.2/callbacks/manager";
import { ChatPromptTemplate } from "@langchain/coreV0.2/prompts";
import { MemoryVectorStore } from "langchainV0.2/vectorstores/memory";
import { createStuffDocumentsChain } from "langchainV0.2/chains/combine_documents";
import { createRetrievalChain } from "langchainV0.2/chains/retrieval";
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openaiV0.2";
import { Stream } from "openai/streaming";
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";
import { RecursiveCharacterTextSplitter } from "langchainV0.2/text_splitter";
import "dotenv/config";
import {
MESSAGE_FUNCTION_CALL_NAME,
Expand All @@ -22,7 +22,7 @@ import {
import { LangChainTracer } from "../src/tracer";
import { trace } from "@opentelemetry/api";
import { completionsResponse, functionCallResponse } from "./fixtures";
import { DynamicTool } from "@langchain/core/tools";
import { DynamicTool } from "@langchain/coreV0.2/tools";
import {
OITracer,
setAttributes,
Expand Down Expand Up @@ -57,8 +57,8 @@ const {
RETRIEVAL_DOCUMENTS,
} = SemanticConventions;

jest.mock("@langchain/openai", () => {
const originalModule = jest.requireActual("@langchain/openai");
jest.mock("@langchain/openaiV0.2", () => {
const originalModule = jest.requireActual("@langchain/openaiV0.2");
class MockChatOpenAI extends originalModule.ChatOpenAI {
constructor(...args: Parameters<typeof originalModule.ChatOpenAI>) {
super(...args);
Expand Down Expand Up @@ -287,7 +287,7 @@ describe("LangChainInstrumentation", () => {
it("should add attributes to llm spans when streaming", async () => {
// Do this to update the mock to return a streaming response
// eslint-disable-next-line @typescript-eslint/no-var-requires
const { ChatOpenAI } = jest.requireMock("@langchain/openai");
const { ChatOpenAI } = jest.requireMock("@langchain/openaiV0.2");

const chatModel = new ChatOpenAI({
openAIApiKey: "my-api-key",
Expand Down Expand Up @@ -325,7 +325,14 @@ describe("LangChainInstrumentation", () => {
`${LLM_OUTPUT_MESSAGES}.0.${MESSAGE_ROLE}`
];

expect(span.attributes).toStrictEqual(expectedStreamingAttributes);
// Remove the id since it is randomly generated and inherited from the run
const actualAttributes = { ...span.attributes };
const output = JSON.parse(String(actualAttributes[OUTPUT_VALUE]));
delete output.generations[0][0].message.kwargs.id;
const newOutputValue = JSON.stringify(output);
actualAttributes[OUTPUT_VALUE] = newOutputValue;

expect(actualAttributes).toStrictEqual(expectedStreamingAttributes);
});

it("should add documents to retriever spans", async () => {
Expand Down Expand Up @@ -441,7 +448,7 @@ describe("LangChainInstrumentation", () => {
it("should add function calls to spans", async () => {
// Do this to update the mock to return a function call response
// eslint-disable-next-line @typescript-eslint/no-var-requires
const { ChatOpenAI } = jest.requireMock("@langchain/openai");
const { ChatOpenAI } = jest.requireMock("@langchain/openaiV0.2");

const chatModel = new ChatOpenAI({
openAIApiKey: "my-api-key",
Expand Down
Loading

0 comments on commit 518f298

Please sign in to comment.