diff --git a/langchain/package.json b/langchain/package.json index 92b3cdfc23ae..4fa0f5f67f78 100644 --- a/langchain/package.json +++ b/langchain/package.json @@ -1420,7 +1420,7 @@ }, "dependencies": { "@anthropic-ai/sdk": "^0.9.1", - "@langchain/core": "~0.0.5", + "@langchain/core": "~0.0.6", "binary-extensions": "^2.2.0", "expr-eval": "^2.0.2", "flat": "^5.0.2", diff --git a/langchain/src/callbacks/tests/langchain_tracer.int.test.ts b/langchain/src/callbacks/tests/langchain_tracer.int.test.ts index b27f01c88338..2f651f2d77f6 100644 --- a/langchain/src/callbacks/tests/langchain_tracer.int.test.ts +++ b/langchain/src/callbacks/tests/langchain_tracer.int.test.ts @@ -87,7 +87,7 @@ test("Test traced chain with tags", async () => { test("Test Traced Agent with concurrency", async () => { process.env.LANGCHAIN_TRACING_V2 = "true"; - const model = new OpenAI({ temperature: 0 }); + const model = new ChatOpenAI({ temperature: 0 }); const tools = [ new SerpAPI(process.env.SERPAPI_API_KEY, { location: "Austin,Texas,United States", @@ -98,7 +98,7 @@ test("Test Traced Agent with concurrency", async () => { ]; const executor = await initializeAgentExecutorWithOptions(tools, model, { - agentType: "zero-shot-react-description", + agentType: "openai-functions", verbose: true, }); @@ -130,7 +130,7 @@ test("Test Traced Agent with chat model", async () => { ]; const executor = await initializeAgentExecutorWithOptions(tools, model, { - agentType: "chat-zero-shot-react-description", + agentType: "openai-functions", verbose: true, metadata: { c: "d" }, }); diff --git a/langchain/src/util/tiktoken.ts b/langchain/src/util/tiktoken.ts index a823b5b18637..5876107bcfab 100644 --- a/langchain/src/util/tiktoken.ts +++ b/langchain/src/util/tiktoken.ts @@ -1,44 +1 @@ -import { - Tiktoken, - TiktokenBPE, - TiktokenEncoding, - TiktokenModel, - getEncodingNameForModel, -} from "js-tiktoken/lite"; -import { AsyncCaller } from "./async_caller.js"; - -const cache: Record> = {}; - -const caller = /* #__PURE__ */ new AsyncCaller({}); - -export async function getEncoding( - encoding: TiktokenEncoding, - options?: { - signal?: AbortSignal; - extendedSpecialTokens?: Record; - } -) { - if (!(encoding in cache)) { - cache[encoding] = caller - .fetch(`https://tiktoken.pages.dev/js/${encoding}.json`, { - signal: options?.signal, - }) - .then((res) => res.json()) - .catch((e) => { - delete cache[encoding]; - throw e; - }); - } - - return new Tiktoken(await cache[encoding], options?.extendedSpecialTokens); -} - -export async function encodingForModel( - model: TiktokenModel, - options?: { - signal?: AbortSignal; - extendedSpecialTokens?: Record; - } -) { - return getEncoding(getEncodingNameForModel(model), options); -} +export * from "@langchain/core/utils/tiktoken"; diff --git a/yarn.lock b/yarn.lock index e634a96ae3a0..4a4b0fe8b086 100644 --- a/yarn.lock +++ b/yarn.lock @@ -7997,7 +7997,7 @@ __metadata: languageName: unknown linkType: soft -"@langchain/core@workspace:*, @langchain/core@workspace:langchain-core, @langchain/core@~0.0.5": +"@langchain/core@workspace:*, @langchain/core@workspace:langchain-core, @langchain/core@~0.0.6": version: 0.0.0-use.local resolution: "@langchain/core@workspace:langchain-core" dependencies: @@ -22607,7 +22607,7 @@ __metadata: "@gradientai/nodejs-sdk": ^1.2.0 "@huggingface/inference": ^2.6.4 "@jest/globals": ^29.5.0 - "@langchain/core": ~0.0.5 + "@langchain/core": ~0.0.6 "@mozilla/readability": ^0.4.4 "@notionhq/client": ^2.2.10 "@opensearch-project/opensearch": ^2.2.0