Skip to content

Commit

Permalink
Merge branch 'master' of github.com:n8n-io/n8n into toggl-api-update
Browse files Browse the repository at this point in the history
  • Loading branch information
Joffcom committed Jul 30, 2024
2 parents fc93604 + f5722e8 commit 5c0dd7e
Show file tree
Hide file tree
Showing 363 changed files with 3,090 additions and 1,561 deletions.
22 changes: 21 additions & 1 deletion packages/@n8n/config/src/index.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { Config, Nested } from './decorators';
import { Config, Env, Nested } from './decorators';
import { CredentialsConfig } from './configs/credentials';
import { DatabaseConfig } from './configs/database';
import { EmailConfig } from './configs/email';
Expand Down Expand Up @@ -51,4 +51,24 @@ export class GlobalConfig {

@Nested
readonly workflows: WorkflowsConfig;

/** Path n8n is deployed to */
@Env('N8N_PATH')
readonly path: string = '/';

/** Host name n8n can be reached */
@Env('N8N_HOST')
readonly host: string = 'localhost';

/** HTTP port n8n can be reached */
@Env('N8N_PORT')
readonly port: number = 5678;

/** IP address n8n should listen on */
@Env('N8N_LISTEN_ADDRESS')
readonly listen_address: string = '0.0.0.0';

/** HTTP Protocol via which n8n can be reached */
@Env('N8N_PROTOCOL')
readonly protocol: 'http' | 'https' = 'http';
}
5 changes: 5 additions & 0 deletions packages/@n8n/config/test/config.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,11 @@ describe('GlobalConfig', () => {
});

const defaultConfig: GlobalConfig = {
path: '/',
host: 'localhost',
port: 5678,
listen_address: '0.0.0.0',
protocol: 'http',
database: {
logging: {
enabled: false,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,7 @@ function getInputs(
'@n8n/n8n-nodes-langchain.lmChatAnthropic',
'@n8n/n8n-nodes-langchain.lmChatAzureOpenAi',
'@n8n/n8n-nodes-langchain.lmChatMistralCloud',
'@n8n/n8n-nodes-langchain.lmChatOllama',
'@n8n/n8n-nodes-langchain.lmChatOpenAi',
'@n8n/n8n-nodes-langchain.lmChatGroq',
'@n8n/n8n-nodes-langchain.lmChatGoogleVertex',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import { LLMChain } from 'langchain/chains';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { HumanMessage } from '@langchain/core/messages';
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { ChatOllama } from '@langchain/ollama';
import { getTemplateNoticeField } from '../../../utils/sharedFields';
import {
getOptionalOutputParsers,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,257 @@
import type {
IDataObject,
IExecuteFunctions,
INodeExecutionData,
INodeParameters,
INodeType,
INodeTypeDescription,
} from 'n8n-workflow';

import { NodeConnectionType, NodeOperationError } from 'n8n-workflow';

import type { BaseLanguageModel } from '@langchain/core/language_models/base';
import { HumanMessage } from '@langchain/core/messages';
import { SystemMessagePromptTemplate, ChatPromptTemplate } from '@langchain/core/prompts';
import { OutputFixingParser, StructuredOutputParser } from 'langchain/output_parsers';
import { z } from 'zod';
import { getTracingConfig } from '../../../utils/tracing';

const DEFAULT_SYSTEM_PROMPT_TEMPLATE =
'You are highly intelligent and accurate sentiment analyzer. Analyze the sentiment of the provided text. Categorize it into one of the following: {categories}. Use the provided formatting instructions. Only output the JSON.';

const DEFAULT_CATEGORIES = 'Positive, Neutral, Negative';
const configuredOutputs = (parameters: INodeParameters, defaultCategories: string) => {
const options = (parameters?.options ?? {}) as IDataObject;
const categories = (options?.categories as string) ?? defaultCategories;
const categoriesArray = categories.split(',').map((cat) => cat.trim());

const ret = categoriesArray.map((cat) => ({ type: NodeConnectionType.Main, displayName: cat }));
return ret;
};

export class SentimentAnalysis implements INodeType {
description: INodeTypeDescription = {
displayName: 'Sentiment Analysis',
name: 'sentimentAnalysis',
icon: 'fa:balance-scale-left',
iconColor: 'black',
group: ['transform'],
version: 1,
description: 'Analyze the sentiment of your text',
codex: {
categories: ['AI'],
subcategories: {
AI: ['Chains', 'Root Nodes'],
},
resources: {
primaryDocumentation: [
{
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.sentimentanalysis/',
},
],
},
},
defaults: {
name: 'Sentiment Analysis',
},
inputs: [
{ displayName: '', type: NodeConnectionType.Main },
{
displayName: 'Model',
maxConnections: 1,
type: NodeConnectionType.AiLanguageModel,
required: true,
},
],
outputs: `={{(${configuredOutputs})($parameter, "${DEFAULT_CATEGORIES}")}}`,
properties: [
{
displayName: 'Text to Analyze',
name: 'inputText',
type: 'string',
required: true,
default: '',
description: 'Use an expression to reference data in previous nodes or enter static text',
typeOptions: {
rows: 2,
},
},
{
displayName:
'Sentiment scores are LLM-generated estimates, not statistically rigorous measurements. They may be inconsistent across runs and should be used as rough indicators only.',
name: 'detailedResultsNotice',
type: 'notice',
default: '',
displayOptions: {
show: {
'/options.includeDetailedResults': [true],
},
},
},
{
displayName: 'Options',
name: 'options',
type: 'collection',
default: {},
placeholder: 'Add Option',
options: [
{
displayName: 'Sentiment Categories',
name: 'categories',
type: 'string',
default: DEFAULT_CATEGORIES,
description: 'A comma-separated list of categories to analyze',
noDataExpression: true,
typeOptions: {
rows: 2,
},
},
{
displayName: 'System Prompt Template',
name: 'systemPromptTemplate',
type: 'string',
default: DEFAULT_SYSTEM_PROMPT_TEMPLATE,
description: 'String to use directly as the system prompt template',
typeOptions: {
rows: 6,
},
},
{
displayName: 'Include Detailed Results',
name: 'includeDetailedResults',
type: 'boolean',
default: false,
description:
'Whether to include sentiment strength and confidence scores in the output',
},
{
displayName: 'Enable Auto-Fixing',
name: 'enableAutoFixing',
type: 'boolean',
default: true,
description: 'Whether to enable auto-fixing for the output parser',
},
],
},
],
};

async execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {
const items = this.getInputData();

const llm = (await this.getInputConnectionData(
NodeConnectionType.AiLanguageModel,
0,
)) as BaseLanguageModel;

const returnData: INodeExecutionData[][] = [];

for (let i = 0; i < items.length; i++) {
try {
const sentimentCategories = this.getNodeParameter(
'options.categories',
i,
DEFAULT_CATEGORIES,
) as string;

const categories = sentimentCategories
.split(',')
.map((cat) => cat.trim())
.filter(Boolean);

if (categories.length === 0) {
throw new NodeOperationError(this.getNode(), 'No sentiment categories provided', {
itemIndex: i,
});
}

// Initialize returnData with empty arrays for each category
if (returnData.length === 0) {
returnData.push(...Array.from({ length: categories.length }, () => []));
}

const options = this.getNodeParameter('options', i, {}) as {
systemPromptTemplate?: string;
includeDetailedResults?: boolean;
enableAutoFixing?: boolean;
};

const schema = z.object({
sentiment: z.enum(categories as [string, ...string[]]),
strength: z
.number()
.min(0)
.max(1)
.describe('Strength score for sentiment in relation to the category'),
confidence: z.number().min(0).max(1),
});

const structuredParser = StructuredOutputParser.fromZodSchema(schema);

const parser = options.enableAutoFixing
? OutputFixingParser.fromLLM(llm, structuredParser)
: structuredParser;

const systemPromptTemplate = SystemMessagePromptTemplate.fromTemplate(
`${options.systemPromptTemplate ?? DEFAULT_SYSTEM_PROMPT_TEMPLATE}
{format_instructions}`,
);

const input = this.getNodeParameter('inputText', i) as string;
const inputPrompt = new HumanMessage(input);
const messages = [
await systemPromptTemplate.format({
categories: sentimentCategories,
format_instructions: parser.getFormatInstructions(),
}),
inputPrompt,
];

const prompt = ChatPromptTemplate.fromMessages(messages);
const chain = prompt.pipe(llm).pipe(parser).withConfig(getTracingConfig(this));

try {
const output = await chain.invoke(messages);
const sentimentIndex = categories.findIndex(
(s) => s.toLowerCase() === output.sentiment.toLowerCase(),
);

if (sentimentIndex !== -1) {
const resultItem = { ...items[i] };
const sentimentAnalysis: IDataObject = {
category: output.sentiment,
};
if (options.includeDetailedResults) {
sentimentAnalysis.strength = output.strength;
sentimentAnalysis.confidence = output.confidence;
}
resultItem.json = {
...resultItem.json,
sentimentAnalysis,
};
returnData[sentimentIndex].push(resultItem);
}
} catch (error) {
throw new NodeOperationError(
this.getNode(),
'Error during parsing of LLM output, please check your LLM model and configuration',
{
itemIndex: i,
},
);
}
} catch (error) {
if (this.continueOnFail(error)) {
const executionErrorData = this.helpers.constructExecutionMetaData(
this.helpers.returnJsonArray({ error: error.message }),
{ itemData: { item: i } },
);
returnData[0].push(...executionErrorData);
continue;
}
throw error;
}
}
return returnData;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@ import {
type SupplyData,
} from 'n8n-workflow';

import type { ChatOllamaInput } from '@langchain/community/chat_models/ollama';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import type { ChatOllamaInput } from '@langchain/ollama';
import { ChatOllama } from '@langchain/ollama';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { ollamaModel, ollamaOptions, ollamaDescription } from '../LMOllama/description';
import { N8nLlmTracing } from '../N8nLlmTracing';
Expand Down
30 changes: 16 additions & 14 deletions packages/@n8n/nodes-langchain/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
"dist/nodes/chains/ChainSummarization/ChainSummarization.node.js",
"dist/nodes/chains/ChainLLM/ChainLlm.node.js",
"dist/nodes/chains/ChainRetrievalQA/ChainRetrievalQa.node.js",
"dist/nodes/chains/SentimentAnalysis/SentimentAnalysis.node.js",
"dist/nodes/chains/InformationExtractor/InformationExtractor.node.js",
"dist/nodes/chains/TextClassifier/TextClassifier.node.js",
"dist/nodes/code/Code.node.js",
Expand Down Expand Up @@ -130,29 +131,30 @@
"dependencies": {
"@aws-sdk/client-bedrock-runtime": "3.535.0",
"@aws-sdk/credential-provider-node": "3.535.0",
"@getzep/zep-cloud": "1.0.6",
"@getzep/zep-cloud": "1.0.11",
"@getzep/zep-js": "0.9.0",
"@google-ai/generativelanguage": "2.5.0",
"@google-cloud/resource-manager": "5.3.0",
"@google/generative-ai": "0.11.4",
"@huggingface/inference": "2.7.0",
"@langchain/anthropic": "0.1.21",
"@langchain/anthropic": "0.2.9",
"@langchain/cohere": "0.0.10",
"@langchain/community": "0.2.13",
"@langchain/core": "0.2.9",
"@langchain/google-genai": "0.0.16",
"@langchain/google-vertexai": "0.0.19",
"@langchain/groq": "0.0.12",
"@langchain/mistralai": "0.0.22",
"@langchain/openai": "0.0.33",
"@langchain/pinecone": "0.0.6",
"@langchain/community": "0.2.20",
"@langchain/core": "0.2.18",
"@langchain/google-genai": "0.0.23",
"@langchain/google-vertexai": "0.0.21",
"@langchain/groq": "0.0.15",
"@langchain/mistralai": "0.0.27",
"@langchain/ollama": "^0.0.2",
"@langchain/openai": "0.2.5",
"@langchain/pinecone": "0.0.8",
"@langchain/qdrant": "^0.0.5",
"@langchain/redis": "0.0.5",
"@langchain/textsplitters": "0.0.2",
"@langchain/textsplitters": "0.0.3",
"@mozilla/readability": "^0.5.0",
"@n8n/typeorm": "0.3.20-10",
"@n8n/vm2": "3.9.20",
"@pinecone-database/pinecone": "2.2.1",
"@pinecone-database/pinecone": "3.0.0",
"@qdrant/js-client-rest": "1.9.0",
"@supabase/supabase-js": "2.43.4",
"@types/pg": "^8.11.3",
Expand All @@ -167,12 +169,12 @@
"html-to-text": "9.0.5",
"jsdom": "^23.0.1",
"json-schema-to-zod": "2.1.0",
"langchain": "0.2.2",
"langchain": "0.2.11",
"lodash": "4.17.21",
"mammoth": "1.7.2",
"n8n-nodes-base": "workspace:*",
"n8n-workflow": "workspace:*",
"openai": "4.47.1",
"openai": "4.53.0",
"pdf-parse": "1.1.1",
"pg": "8.11.3",
"redis": "4.6.12",
Expand Down
Loading

0 comments on commit 5c0dd7e

Please sign in to comment.