diff --git a/packages/@n8n/nodes-langchain/nodes/agents/OpenAiAssistant/OpenAiAssistant.node.ts b/packages/@n8n/nodes-langchain/nodes/agents/OpenAiAssistant/OpenAiAssistant.node.ts
new file mode 100644
index 0000000000000..824ea03d35eaf
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/agents/OpenAiAssistant/OpenAiAssistant.node.ts
@@ -0,0 +1,352 @@
+import { AgentExecutor } from 'langchain/agents';
+import { OpenAI as OpenAIClient } from 'openai';
+import { OpenAIAssistantRunnable } from 'langchain/experimental/openai_assistant';
+import { type Tool } from 'langchain/tools';
+import { NodeConnectionType, NodeOperationError } from 'n8n-workflow';
+import type {
+ IExecuteFunctions,
+ INodeExecutionData,
+ INodeType,
+ INodeTypeDescription,
+} from 'n8n-workflow';
+import type { OpenAIToolType } from 'langchain/dist/experimental/openai_assistant/schema';
+import { formatToOpenAIAssistantTool } from './utils';
+
+export class OpenAiAssistant implements INodeType {
+ description: INodeTypeDescription = {
+ displayName: 'OpenAI Assistant',
+ name: 'openAiAssistant',
+ icon: 'fa:robot',
+ group: ['transform'],
+ version: 1,
+ description: 'Utilizes Assistant API from Open AI.',
+ subtitle: 'Open AI Assistant',
+ defaults: {
+ name: 'OpenAI Assistant',
+ color: '#404040',
+ },
+ codex: {
+ alias: ['LangChain'],
+ categories: ['AI'],
+ subcategories: {
+ AI: ['Agents'],
+ },
+ resources: {
+ primaryDocumentation: [
+ {
+ url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.openaiassistant/',
+ },
+ ],
+ },
+ },
+ inputs: [
+ { type: NodeConnectionType.Main },
+ { type: NodeConnectionType.AiTool, displayName: 'Tools' },
+ ],
+ outputs: [NodeConnectionType.Main],
+ credentials: [
+ {
+ name: 'openAiApi',
+ required: true,
+ },
+ ],
+ requestDefaults: {
+ ignoreHttpStatusErrors: true,
+ baseURL:
+ '={{ $parameter.options?.baseURL?.split("/").slice(0,-1).join("/") || "https://api.openai.com" }}',
+ },
+ properties: [
+ {
+ displayName: 'Operation',
+ name: 'mode',
+ type: 'options',
+ noDataExpression: true,
+ default: 'existing',
+ options: [
+ {
+ name: 'Create New Assistant',
+ value: 'new',
+ },
+ {
+ name: 'Use Existing Assistant',
+ value: 'existing',
+ },
+ ],
+ },
+ {
+ displayName: 'Name',
+ name: 'name',
+ type: 'string',
+ default: '',
+ required: true,
+ displayOptions: {
+ show: {
+ '/mode': ['new'],
+ },
+ },
+ },
+ {
+ displayName: 'Instructions',
+ name: 'instructions',
+ type: 'string',
+ description: 'How the Assistant and model should behave or respond',
+ default: '',
+ typeOptions: {
+ rows: 5,
+ },
+ required: true,
+ displayOptions: {
+ show: {
+ '/mode': ['new'],
+ },
+ },
+ },
+ {
+ displayName: 'Model',
+ name: 'model',
+ type: 'options',
+ description:
+ 'The model which will be used to power the assistant. Learn more. The Retrieval tool requires gpt-3.5-turbo-1106 and gpt-4-1106-preview models.',
+ required: true,
+ displayOptions: {
+ show: {
+ '/mode': ['new'],
+ },
+ },
+ typeOptions: {
+ loadOptions: {
+ routing: {
+ request: {
+ method: 'GET',
+ url: '={{ $parameter.options?.baseURL?.split("/").slice(-1).pop() || "v1" }}/models',
+ },
+ output: {
+ postReceive: [
+ {
+ type: 'rootProperty',
+ properties: {
+ property: 'data',
+ },
+ },
+ {
+ type: 'filter',
+ properties: {
+ pass: "={{ $responseItem.id.startsWith('gpt-') && !$responseItem.id.includes('instruct') }}",
+ },
+ },
+ {
+ type: 'setKeyValue',
+ properties: {
+ name: '={{$responseItem.id}}',
+ value: '={{$responseItem.id}}',
+ },
+ },
+ {
+ type: 'sort',
+ properties: {
+ key: 'name',
+ },
+ },
+ ],
+ },
+ },
+ },
+ },
+ routing: {
+ send: {
+ type: 'body',
+ property: 'model',
+ },
+ },
+ default: 'gpt-3.5-turbo-1106',
+ },
+ {
+ displayName: 'Assistant',
+ name: 'assistantId',
+ type: 'options',
+ noDataExpression: true,
+ displayOptions: {
+ show: {
+ '/mode': ['existing'],
+ },
+ },
+ description:
+ 'The assistant to use. Learn more.',
+ typeOptions: {
+ loadOptions: {
+ routing: {
+ request: {
+ method: 'GET',
+ headers: {
+ 'OpenAI-Beta': 'assistants=v1',
+ },
+ url: '={{ $parameter.options?.baseURL?.split("/").slice(-1).pop() || "v1" }}/assistants',
+ },
+ output: {
+ postReceive: [
+ {
+ type: 'rootProperty',
+ properties: {
+ property: 'data',
+ },
+ },
+ {
+ type: 'setKeyValue',
+ properties: {
+ name: '={{$responseItem.name}}',
+ value: '={{$responseItem.id}}',
+ // eslint-disable-next-line n8n-local-rules/no-interpolation-in-regular-string
+ description: '={{$responseItem.model}}',
+ },
+ },
+ {
+ type: 'sort',
+ properties: {
+ key: 'name',
+ },
+ },
+ ],
+ },
+ },
+ },
+ },
+ routing: {
+ send: {
+ type: 'body',
+ property: 'assistant',
+ },
+ },
+ required: true,
+ default: '',
+ },
+ {
+ displayName: 'Text',
+ name: 'text',
+ type: 'string',
+ required: true,
+ default: '={{ $json.chat_input }}',
+ },
+ {
+ displayName: 'OpenAI Tools',
+ name: 'nativeTools',
+ type: 'multiOptions',
+ default: [],
+ options: [
+ {
+ name: 'Code Interpreter',
+ value: 'code_interpreter',
+ },
+ {
+ name: 'Retrieval',
+ value: 'retrieval',
+ },
+ ],
+ },
+ {
+ displayName: 'Options',
+ name: 'options',
+ placeholder: 'Add Option',
+ description: 'Additional options to add',
+ type: 'collection',
+ default: {},
+ options: [
+ {
+ displayName: 'Base URL',
+ name: 'baseURL',
+ default: 'https://api.openai.com/v1',
+ description: 'Override the default base URL for the API',
+ type: 'string',
+ },
+ {
+ displayName: 'Max Retries',
+ name: 'maxRetries',
+ default: 2,
+ description: 'Maximum number of retries to attempt',
+ type: 'number',
+ },
+ {
+ displayName: 'Timeout',
+ name: 'timeout',
+ default: 10000,
+ description: 'Maximum amount of time a request is allowed to take in milliseconds',
+ type: 'number',
+ },
+ ],
+ },
+ ],
+ };
+
+ async execute(this: IExecuteFunctions): Promise {
+ const tools = (await this.getInputConnectionData(NodeConnectionType.AiTool, 0)) as Tool[];
+ const credentials = await this.getCredentials('openAiApi');
+
+ const items = this.getInputData();
+ const returnData: INodeExecutionData[] = [];
+
+ for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
+ const input = this.getNodeParameter('text', itemIndex) as string;
+ const assistantId = this.getNodeParameter('assistantId', itemIndex, '') as string;
+ const nativeTools = this.getNodeParameter('nativeTools', itemIndex, []) as Array<
+ 'code_interpreter' | 'retrieval'
+ >;
+
+ const options = this.getNodeParameter('options', itemIndex, {}) as {
+ baseURL?: string;
+ maxRetries: number;
+ timeout: number;
+ };
+
+ if (input === undefined) {
+ throw new NodeOperationError(this.getNode(), 'The ‘text‘ parameter is empty.');
+ }
+
+ const client = new OpenAIClient({
+ apiKey: credentials.apiKey as string,
+ maxRetries: options.maxRetries ?? 2,
+ timeout: options.timeout ?? 10000,
+ baseURL: options.baseURL,
+ });
+ let agent;
+ const nativeToolsParsed: OpenAIToolType = nativeTools.map((tool) => ({ type: tool }));
+ const transformedConnectedTools = tools?.map(formatToOpenAIAssistantTool) ?? [];
+ const newTools = [...transformedConnectedTools, ...nativeToolsParsed];
+
+ // Existing agent, update tools with currently assigned
+ if (assistantId) {
+ agent = new OpenAIAssistantRunnable({ assistantId, client, asAgent: true });
+
+ await client.beta.assistants.update(assistantId, {
+ tools: newTools,
+ });
+ } else {
+ const name = this.getNodeParameter('name', itemIndex, '') as string;
+ const instructions = this.getNodeParameter('instructions', itemIndex, '') as string;
+ const model = this.getNodeParameter('model', itemIndex, 'gpt-3.5-turbo-1106') as string;
+
+ agent = await OpenAIAssistantRunnable.createAssistant({
+ model,
+ client,
+ instructions,
+ name,
+ tools: newTools,
+ asAgent: true,
+ });
+ }
+
+ const agentExecutor = AgentExecutor.fromAgentAndTools({
+ agent,
+ tools,
+ });
+
+ const response = await agentExecutor.call({
+ content: input,
+ signal: this.getExecutionCancelSignal(),
+ timeout: options.timeout ?? 10000,
+ });
+
+ returnData.push({ json: response });
+ }
+
+ return this.prepareOutputData(returnData);
+ }
+}
diff --git a/packages/@n8n/nodes-langchain/nodes/agents/OpenAiAssistant/utils.ts b/packages/@n8n/nodes-langchain/nodes/agents/OpenAiAssistant/utils.ts
new file mode 100644
index 0000000000000..575444193c2de
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/agents/OpenAiAssistant/utils.ts
@@ -0,0 +1,47 @@
+import { zodToJsonSchema } from 'zod-to-json-schema';
+import type { OpenAI as OpenAIClient } from 'openai';
+import type { StructuredTool } from 'langchain/tools';
+
+// Copied from langchain(`langchain/src/tools/convert_to_openai.ts`)
+// since these functions are not exported
+
+/**
+ * Formats a `StructuredTool` instance into a format that is compatible
+ * with OpenAI's ChatCompletionFunctions. It uses the `zodToJsonSchema`
+ * function to convert the schema of the `StructuredTool` into a JSON
+ * schema, which is then used as the parameters for the OpenAI function.
+ */
+export function formatToOpenAIFunction(
+ tool: StructuredTool,
+): OpenAIClient.Chat.ChatCompletionCreateParams.Function {
+ return {
+ name: tool.name,
+ description: tool.description,
+ parameters: zodToJsonSchema(tool.schema),
+ };
+}
+
+export function formatToOpenAITool(tool: StructuredTool): OpenAIClient.Chat.ChatCompletionTool {
+ const schema = zodToJsonSchema(tool.schema);
+ return {
+ type: 'function',
+ function: {
+ name: tool.name,
+ description: tool.description,
+ parameters: schema,
+ },
+ };
+}
+
+export function formatToOpenAIAssistantTool(
+ tool: StructuredTool,
+): OpenAIClient.Beta.AssistantCreateParams.AssistantToolsFunction {
+ return {
+ type: 'function',
+ function: {
+ name: tool.name,
+ description: tool.description,
+ parameters: zodToJsonSchema(tool.schema),
+ },
+ };
+}
diff --git a/packages/@n8n/nodes-langchain/package.json b/packages/@n8n/nodes-langchain/package.json
index c9dea7e99afee..d5fb003505998 100644
--- a/packages/@n8n/nodes-langchain/package.json
+++ b/packages/@n8n/nodes-langchain/package.json
@@ -40,6 +40,7 @@
],
"nodes": [
"dist/nodes/agents/Agent/Agent.node.js",
+ "dist/nodes/agents/OpenAiAssistant/OpenAiAssistant.node.js",
"dist/nodes/chains/ChainSummarization/ChainSummarization.node.js",
"dist/nodes/chains/ChainLLM/ChainLlm.node.js",
"dist/nodes/chains/ChainRetrievalQA/ChainRetrievalQa.node.js",
@@ -117,9 +118,9 @@
"dependencies": {
"@aws-sdk/client-bedrock-runtime": "^3.422.0",
"@getzep/zep-js": "^0.7.1",
- "@huggingface/inference": "^2.6.4",
"@google-ai/generativelanguage": "^0.2.1",
"@gxl/epub-parser": "^2.0.4",
+ "@huggingface/inference": "^2.6.4",
"@pinecone-database/pinecone": "^1.1.0",
"@supabase/supabase-js": "^2.33.2",
"@tensorflow-models/universal-sentence-encoder": "1.3.3",
@@ -132,18 +133,20 @@
"d3-dsv": "^3.0.1",
"html-to-text": "^9.0.5",
"json-schema-to-zod": "^1.1.1",
- "langchain": "^0.0.189",
+ "langchain": "^0.0.195",
"lodash": "^4.17.21",
"mammoth": "^1.6.0",
"mssql": "^8.1.2",
"n8n-nodes-base": "workspace:*",
"n8n-workflow": "workspace:*",
+ "openai": "^4.19.0",
"pdf-parse": "^1.1.1",
"pg": "^8.11.3",
"redis": "^4.6.8",
"sqlite3": "^5.1.6",
"temp": "^0.9.4",
"typeorm": "^0.3.17",
- "zod": "^3.22.2"
+ "zod": "^3.22.2",
+ "zod-to-json-schema": "^3.22.0"
}
}
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index ccf5395dcd676..06049cbef9c68 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -319,8 +319,8 @@ importers:
specifier: ^1.1.1
version: 1.2.0
langchain:
- specifier: ^0.0.189
- version: 0.0.189(@aws-sdk/client-bedrock-runtime@3.454.0)(@getzep/zep-js@0.7.2)(@google-ai/generativelanguage@0.2.1)(@huggingface/inference@2.6.4)(@pinecone-database/pinecone@1.1.2)(@supabase/supabase-js@2.38.5)(@tensorflow-models/universal-sentence-encoder@1.3.3)(@tensorflow/tfjs-converter@4.11.0)(@tensorflow/tfjs-core@4.11.0)(@xata.io/client@0.25.3)(cohere-ai@6.2.2)(d3-dsv@3.0.1)(html-to-text@9.0.5)(lodash@4.17.21)(mammoth@1.6.0)(pdf-parse@1.1.1)(pg@8.11.3)(redis@4.6.11)(typeorm@0.3.17)
+ specifier: ^0.0.195
+ version: 0.0.195(@aws-sdk/client-bedrock-runtime@3.454.0)(@getzep/zep-js@0.7.2)(@google-ai/generativelanguage@0.2.1)(@huggingface/inference@2.6.4)(@pinecone-database/pinecone@1.1.2)(@supabase/supabase-js@2.38.5)(@tensorflow-models/universal-sentence-encoder@1.3.3)(@tensorflow/tfjs-converter@4.11.0)(@tensorflow/tfjs-core@4.11.0)(@xata.io/client@0.25.3)(cohere-ai@6.2.2)(d3-dsv@3.0.1)(html-to-text@9.0.5)(lodash@4.17.21)(mammoth@1.6.0)(pdf-parse@1.1.1)(pg@8.11.3)(redis@4.6.11)(typeorm@0.3.17)
lodash:
specifier: ^4.17.21
version: 4.17.21
@@ -336,6 +336,9 @@ importers:
n8n-workflow:
specifier: workspace:*
version: link:../../workflow
+ openai:
+ specifier: ^4.19.0
+ version: 4.20.0
pdf-parse:
specifier: ^1.1.1
version: 1.1.1
@@ -357,6 +360,9 @@ importers:
zod:
specifier: ^3.22.2
version: 3.22.4
+ zod-to-json-schema:
+ specifier: ^3.22.0
+ version: 3.22.0(zod@3.22.4)
devDependencies:
'@types/express':
specifier: ^4.17.6
@@ -1672,8 +1678,8 @@ packages:
resolution: {integrity: sha512-pvFiLP2BeOKA/ZOS6jxx4XhKzdVLHDhGlFEaZ2flWWYf2xOqVniqpk38I04DFRyz+L0ASggl7SkItTc+ZLju4w==}
dev: true
- /@anthropic-ai/sdk@0.6.8:
- resolution: {integrity: sha512-z4gDFrBf+W2wOVvwA3CA+5bfKOxQhPeXQo7+ITWj3r3XPulIMEasVT0KrD41G+anr5Yc3d2PKvXKB6b1LSon5w==}
+ /@anthropic-ai/sdk@0.9.1:
+ resolution: {integrity: sha512-wa1meQ2WSfoY8Uor3EdrJq0jTiZJoKoSii2ZVWRY1oN4Tlr5s59pADg9T79FTbPe1/se5c3pBeZgJL63wmuoBA==}
dependencies:
'@types/node': 18.16.16
'@types/node-fetch': 2.6.4
@@ -18688,8 +18694,8 @@ packages:
resolution: {integrity: sha512-Xq9nH7KlWZmXAtodXDDRE7vs6DU1gTU8zYDHDiWLSip45Egwq3plLHzPn27NgvzL2r1LMPC1vdqh98sQxtqj4A==}
dev: false
- /langchain@0.0.189(@aws-sdk/client-bedrock-runtime@3.454.0)(@getzep/zep-js@0.7.2)(@google-ai/generativelanguage@0.2.1)(@huggingface/inference@2.6.4)(@pinecone-database/pinecone@1.1.2)(@supabase/supabase-js@2.38.5)(@tensorflow-models/universal-sentence-encoder@1.3.3)(@tensorflow/tfjs-converter@4.11.0)(@tensorflow/tfjs-core@4.11.0)(@xata.io/client@0.25.3)(cohere-ai@6.2.2)(d3-dsv@3.0.1)(html-to-text@9.0.5)(lodash@4.17.21)(mammoth@1.6.0)(pdf-parse@1.1.1)(pg@8.11.3)(redis@4.6.11)(typeorm@0.3.17):
- resolution: {integrity: sha512-74L0xGe7Me9RhTWgpImyYjkZm7Qo4JC2WCXkKCL3QP1ANi/Bk76pWPXCYwaI5DG+wlmgAUF+DWOi+Wi20p3fiA==}
+ /langchain@0.0.195(@aws-sdk/client-bedrock-runtime@3.454.0)(@getzep/zep-js@0.7.2)(@google-ai/generativelanguage@0.2.1)(@huggingface/inference@2.6.4)(@pinecone-database/pinecone@1.1.2)(@supabase/supabase-js@2.38.5)(@tensorflow-models/universal-sentence-encoder@1.3.3)(@tensorflow/tfjs-converter@4.11.0)(@tensorflow/tfjs-core@4.11.0)(@xata.io/client@0.25.3)(cohere-ai@6.2.2)(d3-dsv@3.0.1)(html-to-text@9.0.5)(lodash@4.17.21)(mammoth@1.6.0)(pdf-parse@1.1.1)(pg@8.11.3)(redis@4.6.11)(typeorm@0.3.17):
+ resolution: {integrity: sha512-b3TOfhSUwxe2ZCr4yYlR5dciyy0qyqrDeqAfVFJKM0H7/ueFHFllr4uJydsbzC5+ahlqDFRbzUEK005sbHgdSQ==}
engines: {node: '>=18'}
peerDependencies:
'@aws-crypto/sha256-js': ^5.0.0
@@ -18720,6 +18726,7 @@ packages:
'@planetscale/database': ^1.8.0
'@qdrant/js-client-rest': ^1.2.0
'@raycast/api': ^1.55.2
+ '@rockset/client': ^0.9.1
'@smithy/eventstream-codec': ^2.0.5
'@smithy/protocol-http': ^3.0.6
'@smithy/signature-v4': ^2.0.10
@@ -18846,6 +18853,8 @@ packages:
optional: true
'@raycast/api':
optional: true
+ '@rockset/client':
+ optional: true
'@smithy/eventstream-codec':
optional: true
'@smithy/protocol-http':
@@ -18985,7 +18994,7 @@ packages:
youtubei.js:
optional: true
dependencies:
- '@anthropic-ai/sdk': 0.6.8
+ '@anthropic-ai/sdk': 0.9.1
'@aws-sdk/client-bedrock-runtime': 3.454.0
'@getzep/zep-js': 0.7.2
'@google-ai/generativelanguage': 0.2.1
@@ -19024,7 +19033,7 @@ packages:
uuid: 9.0.0
yaml: 2.3.4
zod: 3.22.4
- zod-to-json-schema: 3.22.0(zod@3.22.4)
+ zod-to-json-schema: 3.20.3(zod@3.22.4)
transitivePeerDependencies:
- encoding
- supports-color
@@ -26985,6 +26994,14 @@ packages:
commander: 9.4.1
dev: true
+ /zod-to-json-schema@3.20.3(zod@3.22.4):
+ resolution: {integrity: sha512-/Q3wnyxAfCt94ZcrGiXXoiAfRqasxl9CX64LZ9fj+4dKH68zulUtU0uk1WMxQPfAxQ0ZI70dKzcoW7hHj+DwSQ==}
+ peerDependencies:
+ zod: ^3.20.0
+ dependencies:
+ zod: 3.22.4
+ dev: false
+
/zod-to-json-schema@3.22.0(zod@3.22.4):
resolution: {integrity: sha512-XQr8EwxPMzJGhoR+d/nRFWdi15VaZ+R5Uhssm+Xx5yS30xCpuutfKRm4rerE0SK9j2dWB5Z3FvDD0w8WMVGzkA==}
peerDependencies: