Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(AI Agent Node): Fix tools agent when using memory and Anthropic models #10513

Merged
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,20 +1,20 @@
import { BINARY_ENCODING, NodeConnectionType, NodeOperationError } from 'n8n-workflow';
import type { IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';

import type { AgentAction, AgentFinish, AgentStep } from 'langchain/agents';
import type { AgentAction, AgentFinish } from 'langchain/agents';
import { AgentExecutor, createToolCallingAgent } from 'langchain/agents';
import type { BaseChatMemory } from '@langchain/community/memory/chat_memory';
import type { BaseMessagePromptTemplateLike } from '@langchain/core/prompts';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import { omit } from 'lodash';
import type { Tool } from '@langchain/core/tools';
import { DynamicStructuredTool } from '@langchain/core/tools';
import { RunnableSequence } from '@langchain/core/runnables';
import type { ZodObject } from 'zod';
import { z } from 'zod';
import type { BaseOutputParser, StructuredOutputParser } from '@langchain/core/output_parsers';
import { OutputFixingParser } from 'langchain/output_parsers';
import { HumanMessage } from '@langchain/core/messages';
import { RunnableSequence } from '@langchain/core/runnables';
import {
isChatInstance,
getPromptInputByType,
Expand Down Expand Up @@ -93,7 +93,43 @@ export async function toolsAgentExecute(this: IExecuteFunctions): Promise<INodeE
const tools = (await getConnectedTools(this, true, false)) as Array<DynamicStructuredTool | Tool>;
const outputParser = (await getOptionalOutputParsers(this))?.[0];
let structuredOutputParserTool: DynamicStructuredTool | undefined;
/**
* Handles the agent text output and transforms it in case of multi-output.
OlegIvaniv marked this conversation as resolved.
Show resolved Hide resolved
*
* @param steps - The agent finish or agent action steps.
* @returns The modified agent finish steps or the original steps.
*/
function handleAgentFinishOutput(steps: AgentFinish | AgentAction[]) {
// Check if the steps contain multiple outputs
type AgentMultiOutputFinish = AgentFinish & {
returnValues: { output: Array<{ text: string; type: string; index: number }> };
};
const agentFinishSteps = steps as AgentMultiOutputFinish | AgentFinish;
burivuhster marked this conversation as resolved.
Show resolved Hide resolved

if (agentFinishSteps.returnValues) {
const isMultiOutput = Array.isArray(agentFinishSteps.returnValues?.output);
if (isMultiOutput) {
// Define the type for each item in the multi-output array
type MultiOutputItem = { index: number; type: string; text: string };
const multiOutputSteps = agentFinishSteps.returnValues.output as MultiOutputItem[];

// Check if all items in the multi-output array are of type 'text'
const isTextOnly = (multiOutputSteps ?? []).every((output) => output.type === 'text');

if (isTextOnly) {
// If all items are of type 'text', merge them into a single string
agentFinishSteps.returnValues.output = multiOutputSteps
.map((output) => output.text)
.join('\n')
.trim();
}
return agentFinishSteps;
}
}

// If the steps do not contain multiple outputs, return them as is
return steps;
}
async function agentStepsParser(
steps: AgentFinish | AgentAction[],
): Promise<AgentFinish | AgentAction[]> {
Expand All @@ -113,20 +149,7 @@ export async function toolsAgentExecute(this: IExecuteFunctions): Promise<INodeE
}
}

// If the steps are an AgentFinish and the outputParser is defined it must mean that the LLM didn't use `format_final_response` tool so we will parse the output manually
if (outputParser && typeof steps === 'object' && (steps as AgentFinish).returnValues) {
const finalResponse = (steps as AgentFinish).returnValues;
const returnValues = (await outputParser.parse(finalResponse as unknown as string)) as Record<
string,
unknown
>;

return {
returnValues,
log: 'Final response formatted',
};
}
return steps;
return handleAgentFinishOutput(steps);
}

if (outputParser) {
Expand Down Expand Up @@ -172,9 +195,7 @@ export async function toolsAgentExecute(this: IExecuteFunctions): Promise<INodeE
});
agent.streamRunnable = false;

const runnableAgent = RunnableSequence.from<{
steps: AgentStep[];
}>([agent, agentStepsParser]);
const runnableAgent = RunnableSequence.from([agent, agentStepsParser]);

const executor = AgentExecutor.fromAgentAndTools({
agent: runnableAgent,
Expand All @@ -196,7 +217,7 @@ export async function toolsAgentExecute(this: IExecuteFunctions): Promise<INodeE
});

if (input === undefined) {
throw new NodeOperationError(this.getNode(), 'The ‘text parameter is empty.');
throw new NodeOperationError(this.getNode(), 'The ‘text parameter is empty.');
}

// OpenAI doesn't allow empty tools array so we will provide a more user-friendly error message
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import {
type INodeTypeDescription,
type SupplyData,
} from 'n8n-workflow';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { OllamaEmbeddings } from '@langchain/ollama';
import { logWrapper } from '../../../utils/logWrapper';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { ollamaDescription, ollamaModel } from '../../llms/LMOllama/description';
Expand Down
26 changes: 13 additions & 13 deletions packages/@n8n/nodes-langchain/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -137,17 +137,17 @@
"@google-cloud/resource-manager": "5.3.0",
"@google/generative-ai": "0.11.4",
"@huggingface/inference": "2.7.0",
"@langchain/anthropic": "0.2.9",
"@langchain/anthropic": "0.2.15",
"@langchain/cohere": "0.0.10",
"@langchain/community": "0.2.20",
"@langchain/core": "0.2.18",
"@langchain/google-genai": "0.0.23",
"@langchain/google-vertexai": "0.0.21",
"@langchain/groq": "0.0.15",
"@langchain/mistralai": "0.0.27",
"@langchain/ollama": "^0.0.2",
"@langchain/openai": "0.2.5",
"@langchain/pinecone": "0.0.8",
"@langchain/groq": "0.0.17",
"@langchain/mistralai": "0.0.28",
"@langchain/community": "0.2.28",
"@langchain/core": "0.2.27",
"@langchain/google-genai": "0.0.26",
"@langchain/google-vertexai": "0.0.26",
netroy marked this conversation as resolved.
Show resolved Hide resolved
"@langchain/ollama": "^0.0.4",
"@langchain/openai": "0.2.7",
"@langchain/pinecone": "0.0.9",
"@langchain/qdrant": "^0.0.5",
"@langchain/redis": "0.0.5",
"@langchain/textsplitters": "0.0.3",
Expand All @@ -156,7 +156,7 @@
"@n8n/vm2": "3.9.25",
"@pinecone-database/pinecone": "3.0.0",
"@qdrant/js-client-rest": "1.9.0",
"@supabase/supabase-js": "2.43.4",
"@supabase/supabase-js": "2.45.1",
"@types/pg": "^8.11.6",
"@xata.io/client": "0.28.4",
"basic-auth": "catalog:",
Expand All @@ -169,12 +169,12 @@
"html-to-text": "9.0.5",
"jsdom": "^23.0.1",
"json-schema-to-zod": "2.1.0",
"langchain": "0.2.11",
"langchain": "0.2.16",
"lodash": "catalog:",
"mammoth": "1.7.2",
"n8n-nodes-base": "workspace:*",
"n8n-workflow": "workspace:*",
"openai": "4.53.0",
"openai": "4.56.0",
"pdf-parse": "1.1.1",
"pg": "8.12.0",
"redis": "4.6.12",
Expand Down
2 changes: 1 addition & 1 deletion packages/workflow/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
"dist/**/*"
],
"devDependencies": {
"@langchain/core": "^0.2.18",
"@langchain/core": "^0.2.27",
"@types/deep-equal": "^1.0.1",
"@types/express": "catalog:",
"@types/jmespath": "^0.15.0",
Expand Down
Loading
Loading