You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
When i run the attached agent flow, i get this error in the logs:
2024-11-22 18:39:47 [ERROR]: [server]: Error: Cannot read properties of undefined (reading 'label')
Error: Cannot read properties of undefined (reading 'label')
at buildAgentGraph (/usr/src/packages/server/dist/utils/buildAgentGraph.js:399:19)
at async utilBuildAgentResponse (/usr/src/packages/server/dist/utils/buildChatflow.js:428:31)
at async utilBuildChatflow (/usr/src/packages/server/dist/utils/buildChatflow.js:195:20)
at async createAndStreamInternalPrediction (/usr/src/packages/server/dist/controllers/internal-predictions/index.js:
33:29)
2024-11-22 18:39:47 [ERROR]: [server]: Error: Error buildAgentGraph - Cannot read properties of undefined (reading 'label')
Error: Error buildAgentGraph - Cannot read properties of undefined (reading 'label')
at buildAgentGraph (/usr/src/packages/server/dist/utils/buildAgentGraph.js:405:15)
at async utilBuildAgentResponse (/usr/src/packages/server/dist/utils/buildChatflow.js:428:31)
at async utilBuildChatflow (/usr/src/packages/server/dist/utils/buildChatflow.js:195:20)
at async createAndStreamInternalPrediction (/usr/src/packages/server/dist/controllers/internal-predictions/index.js:
33:29)
2024-11-22 18:39:47 [ERROR]: [server]: Error: Error buildAgentGraph - Cannot read properties of undefined (reading 'label')
Error: Error buildAgentGraph - Cannot read properties of undefined (reading 'label')
at utilBuildAgentResponse (/usr/src/packages/server/dist/utils/buildChatflow.js:534:15)
at async utilBuildChatflow (/usr/src/packages/server/dist/utils/buildChatflow.js:195:20)
at async createAndStreamInternalPrediction (/usr/src/packages/server/dist/controllers/internal-predictions/index.js:33:29)
Reproduction
This is the json for the agentflow
{
"nodes": [
{
"id": "seqStart_0",
"position": {
"x": 524,
"y": 158
},
"type": "customNode",
"data": {
"id": "seqStart_0",
"label": "Start",
"version": 2,
"name": "seqStart",
"type": "Start",
"baseClasses": [
"Start"
],
"category": "Sequential Agents",
"description": "Starting point of the conversation",
"inputParams": [],
"inputAnchors": [
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel",
"description": "Only compatible with models that are capable of function calling: ChatOpenAI, ChatMistral, ChatAnthropic, ChatGoogleGenerativeAI, ChatVertexAI, GroqChat",
"id": "seqStart_0-input-model-BaseChatModel"
},
{
"label": "Agent Memory",
"name": "agentMemory",
"type": "BaseCheckpointSaver",
"description": "Save the state of the agent",
"optional": true,
"id": "seqStart_0-input-agentMemory-BaseCheckpointSaver"
},
{
"label": "State",
"name": "state",
"type": "State",
"description": "State is an object that is updated by nodes in the graph, passing from one node to another. By default, state contains "messages" that got updated with each message sent and received.",
"optional": true,
"id": "seqStart_0-input-state-State"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "seqStart_0-input-inputModeration-Moderation"
}
],
"inputs": {
"model": "{{chatOllama_0.data.instance}}",
"agentMemory": "",
"state": "{{seqState_0.data.instance}}",
"inputModeration": ""
},
"outputAnchors": [
{
"id": "seqStart_0-output-seqStart-Start",
"name": "seqStart",
"label": "Start",
"description": "Starting point of the conversation",
"type": "Start"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 382,
"positionAbsolute": {
"x": 524,
"y": 158
},
"selected": false
},
{
"id": "chatOllama_0",
"position": {
"x": 123.11177315139341,
"y": 148.53721021519857
},
"type": "customNode",
"data": {
"id": "chatOllama_0",
"label": "ChatOllama",
"version": 4,
"name": "chatOllama",
"type": "ChatOllama",
"baseClasses": [
"ChatOllama",
"ChatOllama",
"BaseChatModel",
"BaseLanguageModel",
"Runnable"
],
"category": "Chat Models",
"description": "Chat completion using open-source LLM on Ollama",
"inputParams": [
{
"label": "Base URL",
"name": "baseUrl",
"type": "string",
"default": "http://localhost:11434",
"id": "chatOllama_0-input-baseUrl-string"
},
{
"label": "Model Name",
"name": "modelName",
"type": "string",
"placeholder": "llama2",
"id": "chatOllama_0-input-modelName-string"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"description": "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 0.1,
"default": 0.9,
"optional": true,
"id": "chatOllama_0-input-temperature-number"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Allow image uploads for multimodal models. e.g. llama3.2-vision",
"default": false,
"optional": true,
"id": "chatOllama_0-input-allowImageUploads-boolean"
},
{
"label": "JSON Mode",
"name": "jsonMode",
"type": "boolean",
"description": "Coerces model outputs to only return JSON. Specify in the system prompt to return JSON. Ex: Format all responses as JSON object",
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-jsonMode-boolean"
},
{
"label": "Keep Alive",
"name": "keepAlive",
"type": "string",
"description": "How long to keep connection alive. A duration string (such as "10m" or "24h")",
"default": "5m",
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-keepAlive-string"
},
{
"label": "Top P",
"name": "topP",
"type": "number",
"description": "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-topP-number"
},
{
"label": "Top K",
"name": "topK",
"type": "number",
"description": "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-topK-number"
},
{
"label": "Mirostat",
"name": "mirostat",
"type": "number",
"description": "Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-mirostat-number"
},
{
"label": "Mirostat ETA",
"name": "mirostatEta",
"type": "number",
"description": "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-mirostatEta-number"
},
{
"label": "Mirostat TAU",
"name": "mirostatTau",
"type": "number",
"description": "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-mirostatTau-number"
},
{
"label": "Context Window Size",
"name": "numCtx",
"type": "number",
"description": "Sets the size of the context window used to generate the next token. (Default: 2048) Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-numCtx-number"
},
{
"label": "Number of GPU",
"name": "numGpu",
"type": "number",
"description": "The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-numGpu-number"
},
{
"label": "Number of Thread",
"name": "numThread",
"type": "number",
"description": "Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-numThread-number"
},
{
"label": "Repeat Last N",
"name": "repeatLastN",
"type": "number",
"description": "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-repeatLastN-number"
},
{
"label": "Repeat Penalty",
"name": "repeatPenalty",
"type": "number",
"description": "Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-repeatPenalty-number"
},
{
"label": "Stop Sequence",
"name": "stop",
"type": "string",
"rows": 4,
"placeholder": "AI assistant:",
"description": "Sets the stop sequences to use. Use comma to seperate different sequences. Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-stop-string"
},
{
"label": "Tail Free Sampling",
"name": "tfsZ",
"type": "number",
"description": "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (Default: 1). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-tfsZ-number"
}
],
"inputAnchors": [
{
"label": "Cache",
"name": "cache",
"type": "BaseCache",
"optional": true,
"id": "chatOllama_0-input-cache-BaseCache"
}
],
"inputs": {
"cache": "",
"baseUrl": "http://10.0.0.142:11434",
"modelName": "qwn:7b",
"temperature": "0.2",
"allowImageUploads": "",
"jsonMode": true,
"keepAlive": "5m",
"topP": "",
"topK": "",
"mirostat": "",
"mirostatEta": "",
"mirostatTau": "",
"numCtx": "30000",
"numGpu": "",
"numThread": "",
"repeatLastN": "",
"repeatPenalty": "",
"stop": "",
"tfsZ": ""
},
"outputAnchors": [
{
"id": "chatOllama_0-output-chatOllama-ChatOllama|ChatOllama|BaseChatModel|BaseLanguageModel|Runnable",
"name": "chatOllama",
"label": "ChatOllama",
"description": "Chat completion using open-source LLM on Ollama",
"type": "ChatOllama | ChatOllama | BaseChatModel | BaseLanguageModel | Runnable"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 675,
"selected": false,
"positionAbsolute": {
"x": 123.11177315139341,
"y": 148.53721021519857
},
"dragging": false
},
{
"id": "seqState_0",
"position": {
"x": 515.9666105603796,
"y": 564.3660147241255
},
"type": "customNode",
"data": {
"id": "seqState_0",
"label": "State",
"version": 2,
"name": "seqState",
"type": "State",
"baseClasses": [
"State"
],
"category": "Sequential Agents",
"description": "A centralized state object, updated by nodes in the graph, passing from one node to another",
"inputParams": [
{
"label": "Custom State",
"name": "stateMemory",
"type": "tabs",
"tabIdentifier": "selectedStateTab",
"additionalParams": true,
"default": "stateMemoryUI",
"tabs": [
{
"label": "Custom State (Table)",
"name": "stateMemoryUI",
"type": "datagrid",
"description": "Structure for state. By default, state contains "messages" that got updated with each message sent and received.",
"hint": {
"label": "How to use",
"value": "\nSpecify the Key, Operation Type, and Default Value for the state object. The Operation Type can be either "Replace" or "Append".\n\nReplace\n- Replace the existing value with the new value.\n- If the new value is null, the existing value will be retained.\n\nAppend\n- Append the new value to the existing value.\n- Default value can be empty or an array. Ex: ["a", "b"]\n- Final value is an array.\n"
},
"datagrid": [
{
"field": "key",
"headerName": "Key",
"editable": true
},
{
"field": "type",
"headerName": "Operation",
"type": "singleSelect",
"valueOptions": [
"Replace",
"Append"
],
"editable": true
},
{
"field": "defaultValue",
"headerName": "Default Value",
"flex": 1,
"editable": true
}
],
"optional": true,
"additionalParams": true
},
{
"label": "Custom State (Code)",
"name": "stateMemoryCode",
"type": "code",
"description": "JSON object representing the state",
"hideCodeExecute": true,
"codeExample": "{\n aggregate: {\n value: (x, y) => x.concat(y), // here we append the new message to the existing messages\n default: () => []\n }\n}",
"optional": true,
"additionalParams": true
}
],
"id": "seqState_0-input-stateMemory-tabs"
}
],
"inputAnchors": [],
"inputs": {
"stateMemory": "stateMemoryUI",
"stateMemoryUI": "[{"key":"next","type":"Replace","defaultValue":"","actions":"","id":1}]"
},
"outputAnchors": [
{
"id": "seqState_0-output-seqState-State",
"name": "seqState",
"label": "State",
"description": "A centralized state object, updated by nodes in the graph, passing from one node to another",
"type": "State"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 251,
"selected": false,
"positionAbsolute": {
"x": 515.9666105603796,
"y": 564.3660147241255
},
"dragging": false
},
{
"id": "seqLLMNode_0",
"position": {
"x": 896.1857660643981,
"y": 146.23981350520455
},
"type": "customNode",
"data": {
"id": "seqLLMNode_0",
"label": "LLM Node",
"version": 3,
"name": "seqLLMNode",
"type": "LLMNode",
"baseClasses": [
"LLMNode"
],
"category": "Sequential Agents",
"description": "Run Chat Model and return the output",
"inputParams": [
{
"label": "Name",
"name": "llmNodeName",
"type": "string",
"placeholder": "LLM",
"id": "seqLLMNode_0-input-llmNodeName-string"
},
{
"label": "System Prompt",
"name": "systemMessagePrompt",
"type": "string",
"rows": 4,
"optional": true,
"additionalParams": true,
"id": "seqLLMNode_0-input-systemMessagePrompt-string"
},
{
"label": "Human Prompt",
"name": "humanMessagePrompt",
"type": "string",
"description": "This prompt will be added at the end of the messages as human message",
"rows": 4,
"optional": true,
"additionalParams": true,
"id": "seqLLMNode_0-input-humanMessagePrompt-string"
},
{
"label": "Messages History",
"name": "messageHistory",
"description": "Return a list of messages between System Prompt and Human Prompt. This is useful when you want to provide few shot examples",
"type": "code",
"hideCodeExecute": true,
"codeExample": "const { AIMessage, HumanMessage, ToolMessage } = require('@langchain/core/messages');\n\nreturn [\n new HumanMessage("What is 333382 🦜 1932?"),\n new AIMessage({\n content: "",\n tool_calls: [\n {\n id: "12345",\n name: "calulator",\n args: {\n number1: 333382,\n number2: 1932,\n operation: "divide",\n },\n },\n ],\n }),\n new ToolMessage({\n tool_call_id: "12345",\n content: "The answer is 172.558.",\n }),\n new AIMessage("The answer is 172.558."),\n]",
"optional": true,
"additionalParams": true,
"id": "seqLLMNode_0-input-messageHistory-code"
},
{
"label": "Format Prompt Values",
"name": "promptValues",
"description": "Assign values to the prompt variables. You can also use $flow.state. to get the state value",
"type": "json",
"optional": true,
"acceptVariable": true,
"list": true,
"additionalParams": true,
"id": "seqLLMNode_0-input-promptValues-json"
},
{
"label": "JSON Structured Output",
"name": "llmStructuredOutput",
"type": "datagrid",
"description": "Instruct the LLM to give output in a JSON structured schema",
"datagrid": [
{
"field": "key",
"headerName": "Key",
"editable": true
},
{
"field": "type",
"headerName": "Type",
"type": "singleSelect",
"valueOptions": [
"String",
"String Array",
"Number",
"Boolean",
"Enum"
],
"editable": true
},
{
"field": "enumValues",
"headerName": "Enum Values",
"editable": true
},
{
"field": "description",
"headerName": "Description",
"flex": 1,
"editable": true
}
],
"optional": true,
"additionalParams": true,
"id": "seqLLMNode_0-input-llmStructuredOutput-datagrid"
},
{
"label": "Update State",
"name": "updateStateMemory",
"type": "tabs",
"tabIdentifier": "selectedUpdateStateMemoryTab",
"default": "updateStateMemoryUI",
"additionalParams": true,
"tabs": [
{
"label": "Update State (Table)",
"name": "updateStateMemoryUI",
"type": "datagrid",
"hint": {
"label": "How to use",
"value": "\n1. Key and value pair to be updated. For example: if you have the following State:\n | Key | Operation | Default Value |\n |-----------|---------------|-------------------|\n | user | Replace | |\n\n You can update the "user" value with the following:\n | Key | Value |\n |-----------|-----------|\n | user | john doe |\n\n2. If you want to use the LLM Node's output as the value to update state, it is available as available as $flow.output with the following structure:\n json\n {\n \"content\": 'Hello! How can I assist you today?',\n \"name\": \"\",\n \"additional_kwargs\": {},\n \"response_metadata\": {},\n \"tool_calls\": [],\n \"invalid_tool_calls\": [],\n \"usage_metadata\": {}\n }\n \n\n For example, if the output content is the value you want to update the state with, you can do the following:\n | Key | Value |\n |-----------|---------------------------|\n | user | $flow.output.content |\n\n3. You can get default flow config, including the current "state":\n - $flow.sessionId\n - $flow.chatId\n - $flow.chatflowId\n - $flow.input\n - $flow.state\n\n4. You can get custom variables: $vars.<variable-name>\n\n"
},
"description": "This is only applicable when you have a custom State at the START node. After agent execution, you might want to update the State values",
"datagrid": [
{
"field": "key",
"headerName": "Key",
"type": "asyncSingleSelect",
"loadMethod": "loadStateKeys",
"flex": 0.5,
"editable": true
},
{
"field": "value",
"headerName": "Value",
"type": "freeSolo",
"valueOptions": [
{
"label": "LLM Node Output (string)",
"value": "$flow.output.content"
},
{
"label": "LLM JSON Output Key (string)",
"value": "$flow.output."
},
{
"label": "Global variable (string)",
"value": "$vars."
},
{
"label": "Input Question (string)",
"value": "$flow.input"
},
{
"label": "Session Id (string)",
"value": "$flow.sessionId"
},
{
"label": "Chat Id (string)",
"value": "$flow.chatId"
},
{
"label": "Chatflow Id (string)",
"value": "$flow.chatflowId"
}
],
"editable": true,
"flex": 1
}
],
"optional": true,
"additionalParams": true
},
{
"label": "Update State (Code)",
"name": "updateStateMemoryCode",
"type": "code",
"hint": {
"label": "How to use",
"value": "\n1. Return the key value JSON object. For example: if you have the following State:\n json\n {\n \"user\": null\n }\n \n\n You can update the "user" value by returning the following:\n js\n return {\n \"user\": \"john doe\"\n }\n \n\n2. If you want to use the LLM Node's output as the value to update state, it is available as $flow.output with the following structure:\n json\n {\n \"content\": 'Hello! How can I assist you today?',\n \"name\": \"\",\n \"additional_kwargs\": {},\n \"response_metadata\": {},\n \"tool_calls\": [],\n \"invalid_tool_calls\": [],\n \"usage_metadata\": {}\n }\n \n\n For example, if the output content is the value you want to update the state with, you can return the following:\n js\n return {\n \"user\": $flow.output.content\n }\n \n\n3. You can also get default flow config, including the current "state":\n - $flow.sessionId\n - $flow.chatId\n - $flow.chatflowId\n - $flow.input\n - $flow.state\n\n4. You can get custom variables: $vars.<variable-name>\n\n"
},
"description": "This is only applicable when you have a custom State at the START node. After agent execution, you might want to update the State values. Must return an object representing the state",
"hideCodeExecute": true,
"codeExample": "const result = $flow.output;\n\n/* Suppose we have a custom State schema like this:\n* {\n aggregate: {\n value: (x, y) => x.concat(y),\n default: () => []\n }\n }\n*/\n\nreturn {\n aggregate: [result.content]\n};",
"optional": true,
"additionalParams": true
}
],
"id": "seqLLMNode_0-input-updateStateMemory-tabs"
}
],
"inputAnchors": [
{
"label": "Start | Agent | Condition | LLM | Tool Node",
"name": "sequentialNode",
"type": "Start | Agent | Condition | LLMNode | ToolNode",
"list": true,
"id": "seqLLMNode_0-input-sequentialNode-Start | Agent | Condition | LLMNode | ToolNode"
},
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel",
"optional": true,
"description": "Overwrite model to be used for this node",
"id": "seqLLMNode_0-input-model-BaseChatModel"
}
],
"inputs": {
"llmNodeName": "youtube title writer",
"systemMessagePrompt": "[INSTRUCTION]\nAnalyze the video transcript and provide a possible title. \nDo not explain what you are doing: only output the suggested title with no additional comments.\nIf the word Qigong has spelling mistakes, please fix them. Common spelling mistakes are: chicun, chico.\n\nA good title meets the following criteria:\n1. Clear and Descriptive: Accurately describe the content.\n2. Keyword-Rich: Include relevant keywords like "Zhi Neng Qigong" as part of the title, but avoid randomly inserting keywords.\n3. Engaging and Compelling: Grab attention and pique curiosity.\n4. Concise: Keep it under 60 characters.\n5. Questions: Consider posing a question to intrigue viewers.\n6. Use of Capitalization: Capitalize the first letter of each word.\n\nRules of the final result:\n- The final result should be a string written in Spanish.\n- Do not explain what you are doing or add notes or comments, simply output the proposed title as a plain string, ready to be published.\n- Do not leave variables to be filled in, the final result must be the final title that will be published.\n- Do not add any markdown formatting, or hashtags.\n\nAdditional information:\n- The name of the channel is: Matias Hegoburu\n- The channel is an educational channel about Zhi Neng Qigong.\n- The content is for educational purposes only, and should not replace professional medical advice\n- Important keywords: Zhi Neng Qigong, Qigong\n\nOnce you generate the title, send it to the reviewer for approval. If the reviewer sends feedback, implement it.",
"humanMessagePrompt": "",
"messageHistory": "",
"sequentialNode": [
"{{seqStart_0.data.instance}}"
],
"model": "",
"promptValues": "",
"llmStructuredOutput": "[{"key":"title","type":"String","enumValues":"","description":"the suggested title for the given script","actions":"","id":1}]",
"updateStateMemory": "updateStateMemoryUI"
},
"outputAnchors": [
{
"id": "seqLLMNode_0-output-seqLLMNode-LLMNode",
"name": "seqLLMNode",
"label": "LLMNode",
"description": "Run Chat Model and return the output",
"type": "LLMNode"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 450,
"selected": false,
"positionAbsolute": {
"x": 896.1857660643981,
"y": 146.23981350520455
},
"dragging": false
},
{
"id": "seqLLMNode_1",
"position": {
"x": 1263.7692396634495,
"y": 150.83460692519276
},
"type": "customNode",
"data": {
"id": "seqLLMNode_1",
"label": "LLM Node",
"version": 3,
"name": "seqLLMNode",
"type": "LLMNode",
"baseClasses": [
"LLMNode"
],
"category": "Sequential Agents",
"description": "Run Chat Model and return the output",
"inputParams": [
{
"label": "Name",
"name": "llmNodeName",
"type": "string",
"placeholder": "LLM",
"id": "seqLLMNode_1-input-llmNodeName-string"
},
{
"label": "System Prompt",
"name": "systemMessagePrompt",
"type": "string",
"rows": 4,
"optional": true,
"additionalParams": true,
"id": "seqLLMNode_1-input-systemMessagePrompt-string"
},
{
"label": "Human Prompt",
"name": "humanMessagePrompt",
"type": "string",
"description": "This prompt will be added at the end of the messages as human message",
"rows": 4,
"optional": true,
"additionalParams": true,
"id": "seqLLMNode_1-input-humanMessagePrompt-string"
},
{
"label": "Messages History",
"name": "messageHistory",
"description": "Return a list of messages between System Prompt and Human Prompt. This is useful when you want to provide few shot examples",
"type": "code",
"hideCodeExecute": true,
"codeExample": "const { AIMessage, HumanMessage, ToolMessage } = require('@langchain/core/messages');\n\nreturn [\n new HumanMessage("What is 333382 🦜 1932?"),\n new AIMessage({\n content: "",\n tool_calls: [\n {\n id: "12345",\n name: "calulator",\n args: {\n number1: 333382,\n number2: 1932,\n operation: "divide",\n },\n },\n ],\n }),\n new ToolMessage({\n tool_call_id: "12345",\n content: "The answer is 172.558.",\n }),\n new AIMessage("The answer is 172.558."),\n]",
"optional": true,
"additionalParams": true,
"id": "seqLLMNode_1-input-messageHistory-code"
},
{
"label": "Format Prompt Values",
"name": "promptValues",
"description": "Assign values to the prompt variables. You can also use $flow.state. to get the state value",
"type": "json",
"optional": true,
"acceptVariable": true,
"list": true,
"additionalParams": true,
"id": "seqLLMNode_1-input-promptValues-json"
},
{
"label": "JSON Structured Output",
"name": "llmStructuredOutput",
"type": "datagrid",
"description": "Instruct the LLM to give output in a JSON structured schema",
"datagrid": [
{
"field": "key",
"headerName": "Key",
"editable": true
},
{
"field": "type",
"headerName": "Type",
"type": "singleSelect",
"valueOptions": [
"String",
"String Array",
"Number",
"Boolean",
"Enum"
],
"editable": true
},
{
"field": "enumValues",
"headerName": "Enum Values",
"editable": true
},
{
"field": "description",
"headerName": "Description",
"flex": 1,
"editable": true
}
],
"optional": true,
"additionalParams": true,
"id": "seqLLMNode_1-input-llmStructuredOutput-datagrid"
},
{
"label": "Update State",
"name": "updateStateMemory",
"type": "tabs",
"tabIdentifier": "selectedUpdateStateMemoryTab",
"default": "updateStateMemoryUI",
"additionalParams": true,
"tabs": [
{
"label": "Update State (Table)",
"name": "updateStateMemoryUI",
"type": "datagrid",
"hint": {
"label": "How to use",
"value": "\n1. Key and value pair to be updated. For example: if you have the following State:\n | Key | Operation | Default Value |\n |-----------|---------------|-------------------|\n | user | Replace | |\n\n You can update the "user" value with the following:\n | Key | Value |\n |-----------|-----------|\n | user | john doe |\n\n2. If you want to use the LLM Node's output as the value to update state, it is available as available as $flow.output with the following structure:\n json\n {\n \"content\": 'Hello! How can I assist you today?',\n \"name\": \"\",\n \"additional_kwargs\": {},\n \"response_metadata\": {},\n \"tool_calls\": [],\n \"invalid_tool_calls\": [],\n \"usage_metadata\": {}\n }\n \n\n For example, if the output content is the value you want to update the state with, you can do the following:\n | Key | Value |\n |-----------|---------------------------|\n | user | $flow.output.content |\n\n3. You can get default flow config, including the current "state":\n - $flow.sessionId\n - $flow.chatId\n - $flow.chatflowId\n - $flow.input\n - $flow.state\n\n4. You can get custom variables: $vars.<variable-name>\n\n"
},
"description": "This is only applicable when you have a custom State at the START node. After agent execution, you might want to update the State values",
"datagrid": [
{
"field": "key",
"headerName": "Key",
"type": "asyncSingleSelect",
"loadMethod": "loadStateKeys",
"flex": 0.5,
"editable": true
},
{
"field": "value",
"headerName": "Value",
"type": "freeSolo",
"valueOptions": [
{
"label": "LLM Node Output (string)",
"value": "$flow.output.content"
},
{
"label": "LLM JSON Output Key (string)",
"value": "$flow.output."
},
{
"label": "Global variable (string)",
"value": "$vars."
},
{
"label": "Input Question (string)",
"value": "$flow.input"
},
{
"label": "Session Id (string)",
"value": "$flow.sessionId"
},
{
"label": "Chat Id (string)",
"value": "$flow.chatId"
},
{
"label": "Chatflow Id (string)",
"value": "$flow.chatflowId"
}
],
"editable": true,
"flex": 1
}
],
"optional": true,
"additionalParams": true
},
{
"label": "Update State (Code)",
"name": "updateStateMemoryCode",
"type": "code",
"hint": {
"label": "How to use",
"value": "\n1. Return the key value JSON object. For example: if you have the following State:\n json\n {\n \"user\": null\n }\n \n\n You can update the "user" value by returning the following:\n js\n return {\n \"user\": \"john doe\"\n }\n \n\n2. If you want to use the LLM Node's output as the value to update state, it is available as $flow.output with the following structure:\n json\n {\n \"content\": 'Hello! How can I assist you today?',\n \"name\": \"\",\n \"additional_kwargs\": {},\n \"response_metadata\": {},\n \"tool_calls\": [],\n \"invalid_tool_calls\": [],\n \"usage_metadata\": {}\n }\n \n\n For example, if the output content is the value you want to update the state with, you can return the following:\n js\n return {\n \"user\": $flow.output.content\n }\n \n\n3. You can also get default flow config, including the current "state":\n - $flow.sessionId\n - $flow.chatId\n - $flow.chatflowId\n - $flow.input\n - $flow.state\n\n4. You can get custom variables: $vars.<variable-name>\n\n"
},
"description": "This is only applicable when you have a custom State at the START node. After agent execution, you might want to update the State values. Must return an object representing the state",
"hideCodeExecute": true,
"codeExample": "const result = $flow.output;\n\n/* Suppose we have a custom State schema like this:\n* {\n aggregate: {\n value: (x, y) => x.concat(y),\n default: () => []\n }\n }\n*/\n\nreturn {\n aggregate: [result.content]\n};",
"optional": true,
"additionalParams": true
}
],
"id": "seqLLMNode_1-input-updateStateMemory-tabs"
}
],
"inputAnchors": [
{
"label": "Start | Agent | Condition | LLM | Tool Node",
"name": "sequentialNode",
"type": "Start | Agent | Condition | LLMNode | ToolNode",
"list": true,
"id": "seqLLMNode_1-input-sequentialNode-Start | Agent | Condition | LLMNode | ToolNode"
},
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel",
"optional": true,
"description": "Overwrite model to be used for this node",
"id": "seqLLMNode_1-input-model-BaseChatModel"
}
],
"inputs": {
"llmNodeName": "reviewer",
"systemMessagePrompt": "Your job is to review the youtube titles generated and give feedback to the writer. You need to check that the title has good grammar and spelling, and follows the main characteristics of a good youtube title:\n1. Clear and Descriptive: Accurately describe the content.\n2. Keyword-Rich: Include relevant keywords like "Zhi Neng Qigong" as part of the title, but avoid randomly inserting keywords.\n3. Engaging and Compelling: Grab attention and pique curiosity.\n4. Concise: Keep it under 60 characters.\n5. Questions: Consider posing a question to intrigue viewers.\n6. Use of Capitalization: Capitalize the first letter of each word.\n\nIf you have any feedback items, you will give instructions to the writer to improve the suggested title.\nIf you are satisfied with the work from the writer, you will say "APPROVED".",
"humanMessagePrompt": "",
"messageHistory": "",
"sequentialNode": [
"{{seqLLMNode_0.data.instance}}"
],
"model": "",
"promptValues": "",
"llmStructuredOutput": "[{"key":"next","type":"Enum","enumValues":"APPROVED,feedback","description":"whether the title is approved, or if there is feedback for the writer","actions":"","id":1},{"key":"feedback","type":"String","enumValues":"","description":"the feedback items, if any","actions":"","id":2}]",
"updateStateMemory": "updateStateMemoryUI",
"updateStateMemoryUI": "[{"key":"next","value":"$flow.output.next","actions":"","id":1}]"
},
"outputAnchors": [
{
"id": "seqLLMNode_1-output-seqLLMNode-LLMNode",
"name": "seqLLMNode",
"label": "LLMNode",
"description": "Run Chat Model and return the output",
"type": "LLMNode"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 450,
"selected": false,
"positionAbsolute": {
"x": 1263.7692396634495,
"y": 150.83460692519276
},
"dragging": false
},
{
"id": "seqEnd_0",
"position": {
"x": 1661.2188704924235,
"y": 687.2767387088082
},
"type": "customNode",
"data": {
"id": "seqEnd_0",
"label": "End",
"version": 2,
"name": "seqEnd",
"type": "End",
"baseClasses": [
"End"
],
"category": "Sequential Agents",
"description": "End conversation",
"inputParams": [],
"inputAnchors": [
{
"label": "Agent | Condition | LLM | Tool Node",
"name": "sequentialNode",
"type": "Agent | Condition | LLMNode | ToolNode",
"id": "seqEnd_0-input-sequentialNode-Agent | Condition | LLMNode | ToolNode"
}
],
"inputs": {
"sequentialNode": "{{seqLLMNode_1.data.instance}}"
},
"outputAnchors": [],
"outputs": {},
"selected": false
},
"width": 300,
"height": 143,
"positionAbsolute": {
"x": 1661.2188704924235,
"y": 687.2767387088082
},
"selected": false
}
],
"edges": [
{
"source": "chatOllama_0",
"sourceHandle": "chatOllama_0-output-chatOllama-ChatOllama|ChatOllama|BaseChatModel|BaseLanguageModel|Runnable",
"target": "seqStart_0",
"targetHandle": "seqStart_0-input-model-BaseChatModel",
"type": "buttonedge",
"id": "chatOllama_0-chatOllama_0-output-chatOllama-ChatOllama|ChatOllama|BaseChatModel|BaseLanguageModel|Runnable-seqStart_0-seqStart_0-input-model-BaseChatModel"
},
{
"source": "seqState_0",
"sourceHandle": "seqState_0-output-seqState-State",
"target": "seqStart_0",
"targetHandle": "seqStart_0-input-state-State",
"type": "buttonedge",
"id": "seqState_0-seqState_0-output-seqState-State-seqStart_0-seqStart_0-input-state-State"
},
{
"source": "seqStart_0",
"sourceHandle": "seqStart_0-output-seqStart-Start",
"target": "seqLLMNode_0",
"targetHandle": "seqLLMNode_0-input-sequentialNode-Start | Agent | Condition | LLMNode | ToolNode",
"type": "buttonedge",
"id": "seqStart_0-seqStart_0-output-seqStart-Start-seqLLMNode_0-seqLLMNode_0-input-sequentialNode-Start | Agent | Condition | LLMNode | ToolNode"
},
{
"source": "seqLLMNode_0",
"sourceHandle": "seqLLMNode_0-output-seqLLMNode-LLMNode",
"target": "seqLLMNode_1",
"targetHandle": "seqLLMNode_1-input-sequentialNode-Start | Agent | Condition | LLMNode | ToolNode",
"type": "buttonedge",
"id": "seqLLMNode_0-seqLLMNode_0-output-seqLLMNode-LLMNode-seqLLMNode_1-seqLLMNode_1-input-sequentialNode-Start | Agent | Condition | LLMNode | ToolNode"
},
{
"source": "seqLLMNode_1",
"sourceHandle": "seqLLMNode_1-output-seqLLMNode-LLMNode",
"target": "seqEnd_0",
"targetHandle": "seqEnd_0-input-sequentialNode-Agent | Condition | LLMNode | ToolNode",
"type": "buttonedge",
"id": "seqLLMNode_1-seqLLMNode_1-output-seqLLMNode-LLMNode-seqEnd_0-seqEnd_0-input-sequentialNode-Agent | Condition | LLMNode | ToolNode"
}
]
}
Expected behavior
no error :)
Who can help?
No response
Operating System
ubuntu 24.04
Langflow Version
whatever is in the latest docker image
Python Version
None
Screenshot
No response
Flow File
No response
The text was updated successfully, but these errors were encountered:
Hey there, @matiashegoburu! I'm here to help you with any issues, answer your questions, and guide you on your open source journey. Let's tackle this challenge together!
The error "Cannot read properties of undefined (reading 'label')" typically occurs when the code attempts to access the 'label' property of an object that is undefined. In the context of Langflow, this might happen if a node in your flow is missing the 'label' property or if the node data is not being correctly passed or initialized.
Here are a few steps you can take to troubleshoot and resolve this issue:
Check Node Definitions: Ensure that all nodes in your flow have a 'label' property defined in their data. In the JSON you provided, each node seems to have a 'label', so double-check if there are any nodes not included in the JSON that might be missing this property.
Validate Node Data: Use the validateNode function to check for any missing or undefined properties in your node data. This function can help identify nodes with missing required fields or other issues that might lead to undefined properties [1].
Review Flow Configuration: Make sure that the flow configuration is complete and that all nodes are correctly connected. Sometimes, an incomplete or incorrect configuration can lead to unexpected undefined properties.
Debugging: Add logging or debugging statements in the buildAgentGraph function to trace where the undefined property is being accessed. This can help pinpoint the exact location and cause of the error.
By ensuring that all nodes have the necessary properties and that the flow is correctly configured, you should be able to resolve the error. If the issue persists, further investigation into the specific implementation of the buildAgentGraph function might be necessary.
Bug Description
When i run the attached agent flow, i get this error in the logs:
2024-11-22 18:39:47 [ERROR]: [server]: Error: Cannot read properties of undefined (reading 'label')
Error: Cannot read properties of undefined (reading 'label')
at buildAgentGraph (/usr/src/packages/server/dist/utils/buildAgentGraph.js:399:19)
at async utilBuildAgentResponse (/usr/src/packages/server/dist/utils/buildChatflow.js:428:31)
at async utilBuildChatflow (/usr/src/packages/server/dist/utils/buildChatflow.js:195:20)
at async createAndStreamInternalPrediction (/usr/src/packages/server/dist/controllers/internal-predictions/index.js:
33:29)
2024-11-22 18:39:47 [ERROR]: [server]: Error: Error buildAgentGraph - Cannot read properties of undefined (reading 'label')
Error: Error buildAgentGraph - Cannot read properties of undefined (reading 'label')
at buildAgentGraph (/usr/src/packages/server/dist/utils/buildAgentGraph.js:405:15)
at async utilBuildAgentResponse (/usr/src/packages/server/dist/utils/buildChatflow.js:428:31)
at async utilBuildChatflow (/usr/src/packages/server/dist/utils/buildChatflow.js:195:20)
at async createAndStreamInternalPrediction (/usr/src/packages/server/dist/controllers/internal-predictions/index.js:
33:29)
2024-11-22 18:39:47 [ERROR]: [server]: Error: Error buildAgentGraph - Cannot read properties of undefined (reading 'label')
Error: Error buildAgentGraph - Cannot read properties of undefined (reading 'label')
at utilBuildAgentResponse (/usr/src/packages/server/dist/utils/buildChatflow.js:534:15)
at async utilBuildChatflow (/usr/src/packages/server/dist/utils/buildChatflow.js:195:20)
at async createAndStreamInternalPrediction (/usr/src/packages/server/dist/controllers/internal-predictions/index.js:33:29)
Reproduction
This is the json for the agentflow
{
"nodes": [
{
"id": "seqStart_0",
"position": {
"x": 524,
"y": 158
},
"type": "customNode",
"data": {
"id": "seqStart_0",
"label": "Start",
"version": 2,
"name": "seqStart",
"type": "Start",
"baseClasses": [
"Start"
],
"category": "Sequential Agents",
"description": "Starting point of the conversation",
"inputParams": [],
"inputAnchors": [
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel",
"description": "Only compatible with models that are capable of function calling: ChatOpenAI, ChatMistral, ChatAnthropic, ChatGoogleGenerativeAI, ChatVertexAI, GroqChat",
"id": "seqStart_0-input-model-BaseChatModel"
},
{
"label": "Agent Memory",
"name": "agentMemory",
"type": "BaseCheckpointSaver",
"description": "Save the state of the agent",
"optional": true,
"id": "seqStart_0-input-agentMemory-BaseCheckpointSaver"
},
{
"label": "State",
"name": "state",
"type": "State",
"description": "State is an object that is updated by nodes in the graph, passing from one node to another. By default, state contains "messages" that got updated with each message sent and received.",
"optional": true,
"id": "seqStart_0-input-state-State"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "seqStart_0-input-inputModeration-Moderation"
}
],
"inputs": {
"model": "{{chatOllama_0.data.instance}}",
"agentMemory": "",
"state": "{{seqState_0.data.instance}}",
"inputModeration": ""
},
"outputAnchors": [
{
"id": "seqStart_0-output-seqStart-Start",
"name": "seqStart",
"label": "Start",
"description": "Starting point of the conversation",
"type": "Start"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 382,
"positionAbsolute": {
"x": 524,
"y": 158
},
"selected": false
},
{
"id": "chatOllama_0",
"position": {
"x": 123.11177315139341,
"y": 148.53721021519857
},
"type": "customNode",
"data": {
"id": "chatOllama_0",
"label": "ChatOllama",
"version": 4,
"name": "chatOllama",
"type": "ChatOllama",
"baseClasses": [
"ChatOllama",
"ChatOllama",
"BaseChatModel",
"BaseLanguageModel",
"Runnable"
],
"category": "Chat Models",
"description": "Chat completion using open-source LLM on Ollama",
"inputParams": [
{
"label": "Base URL",
"name": "baseUrl",
"type": "string",
"default": "http://localhost:11434",
"id": "chatOllama_0-input-baseUrl-string"
},
{
"label": "Model Name",
"name": "modelName",
"type": "string",
"placeholder": "llama2",
"id": "chatOllama_0-input-modelName-string"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"description": "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 0.1,
"default": 0.9,
"optional": true,
"id": "chatOllama_0-input-temperature-number"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Allow image uploads for multimodal models. e.g. llama3.2-vision",
"default": false,
"optional": true,
"id": "chatOllama_0-input-allowImageUploads-boolean"
},
{
"label": "JSON Mode",
"name": "jsonMode",
"type": "boolean",
"description": "Coerces model outputs to only return JSON. Specify in the system prompt to return JSON. Ex: Format all responses as JSON object",
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-jsonMode-boolean"
},
{
"label": "Keep Alive",
"name": "keepAlive",
"type": "string",
"description": "How long to keep connection alive. A duration string (such as "10m" or "24h")",
"default": "5m",
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-keepAlive-string"
},
{
"label": "Top P",
"name": "topP",
"type": "number",
"description": "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-topP-number"
},
{
"label": "Top K",
"name": "topK",
"type": "number",
"description": "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-topK-number"
},
{
"label": "Mirostat",
"name": "mirostat",
"type": "number",
"description": "Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-mirostat-number"
},
{
"label": "Mirostat ETA",
"name": "mirostatEta",
"type": "number",
"description": "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-mirostatEta-number"
},
{
"label": "Mirostat TAU",
"name": "mirostatTau",
"type": "number",
"description": "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-mirostatTau-number"
},
{
"label": "Context Window Size",
"name": "numCtx",
"type": "number",
"description": "Sets the size of the context window used to generate the next token. (Default: 2048) Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-numCtx-number"
},
{
"label": "Number of GPU",
"name": "numGpu",
"type": "number",
"description": "The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-numGpu-number"
},
{
"label": "Number of Thread",
"name": "numThread",
"type": "number",
"description": "Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-numThread-number"
},
{
"label": "Repeat Last N",
"name": "repeatLastN",
"type": "number",
"description": "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-repeatLastN-number"
},
{
"label": "Repeat Penalty",
"name": "repeatPenalty",
"type": "number",
"description": "Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-repeatPenalty-number"
},
{
"label": "Stop Sequence",
"name": "stop",
"type": "string",
"rows": 4,
"placeholder": "AI assistant:",
"description": "Sets the stop sequences to use. Use comma to seperate different sequences. Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-stop-string"
},
{
"label": "Tail Free Sampling",
"name": "tfsZ",
"type": "number",
"description": "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (Default: 1). Refer to <a target="_blank" href="https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values">docs for more details",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOllama_0-input-tfsZ-number"
}
],
"inputAnchors": [
{
"label": "Cache",
"name": "cache",
"type": "BaseCache",
"optional": true,
"id": "chatOllama_0-input-cache-BaseCache"
}
],
"inputs": {
"cache": "",
"baseUrl": "http://10.0.0.142:11434",
"modelName": "qwn:7b",
"temperature": "0.2",
"allowImageUploads": "",
"jsonMode": true,
"keepAlive": "5m",
"topP": "",
"topK": "",
"mirostat": "",
"mirostatEta": "",
"mirostatTau": "",
"numCtx": "30000",
"numGpu": "",
"numThread": "",
"repeatLastN": "",
"repeatPenalty": "",
"stop": "",
"tfsZ": ""
},
"outputAnchors": [
{
"id": "chatOllama_0-output-chatOllama-ChatOllama|ChatOllama|BaseChatModel|BaseLanguageModel|Runnable",
"name": "chatOllama",
"label": "ChatOllama",
"description": "Chat completion using open-source LLM on Ollama",
"type": "ChatOllama | ChatOllama | BaseChatModel | BaseLanguageModel | Runnable"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 675,
"selected": false,
"positionAbsolute": {
"x": 123.11177315139341,
"y": 148.53721021519857
},
"dragging": false
},
{
"id": "seqState_0",
"position": {
"x": 515.9666105603796,
"y": 564.3660147241255
},
"type": "customNode",
"data": {
"id": "seqState_0",
"label": "State",
"version": 2,
"name": "seqState",
"type": "State",
"baseClasses": [
"State"
],
"category": "Sequential Agents",
"description": "A centralized state object, updated by nodes in the graph, passing from one node to another",
"inputParams": [
{
"label": "Custom State",
"name": "stateMemory",
"type": "tabs",
"tabIdentifier": "selectedStateTab",
"additionalParams": true,
"default": "stateMemoryUI",
"tabs": [
{
"label": "Custom State (Table)",
"name": "stateMemoryUI",
"type": "datagrid",
"description": "Structure for state. By default, state contains "messages" that got updated with each message sent and received.",
"hint": {
"label": "How to use",
"value": "\nSpecify the Key, Operation Type, and Default Value for the state object. The Operation Type can be either "Replace" or "Append".\n\nReplace\n- Replace the existing value with the new value.\n- If the new value is null, the existing value will be retained.\n\nAppend\n- Append the new value to the existing value.\n- Default value can be empty or an array. Ex: ["a", "b"]\n- Final value is an array.\n"
},
"datagrid": [
{
"field": "key",
"headerName": "Key",
"editable": true
},
{
"field": "type",
"headerName": "Operation",
"type": "singleSelect",
"valueOptions": [
"Replace",
"Append"
],
"editable": true
},
{
"field": "defaultValue",
"headerName": "Default Value",
"flex": 1,
"editable": true
}
],
"optional": true,
"additionalParams": true
},
{
"label": "Custom State (Code)",
"name": "stateMemoryCode",
"type": "code",
"description": "JSON object representing the state",
"hideCodeExecute": true,
"codeExample": "{\n aggregate: {\n value: (x, y) => x.concat(y), // here we append the new message to the existing messages\n default: () => []\n }\n}",
"optional": true,
"additionalParams": true
}
],
"id": "seqState_0-input-stateMemory-tabs"
}
],
"inputAnchors": [],
"inputs": {
"stateMemory": "stateMemoryUI",
"stateMemoryUI": "[{"key":"next","type":"Replace","defaultValue":"","actions":"","id":1}]"
},
"outputAnchors": [
{
"id": "seqState_0-output-seqState-State",
"name": "seqState",
"label": "State",
"description": "A centralized state object, updated by nodes in the graph, passing from one node to another",
"type": "State"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 251,
"selected": false,
"positionAbsolute": {
"x": 515.9666105603796,
"y": 564.3660147241255
},
"dragging": false
},
{
"id": "seqLLMNode_0",
"position": {
"x": 896.1857660643981,
"y": 146.23981350520455
},
"type": "customNode",
"data": {
"id": "seqLLMNode_0",
"label": "LLM Node",
"version": 3,
"name": "seqLLMNode",
"type": "LLMNode",
"baseClasses": [
"LLMNode"
],
"category": "Sequential Agents",
"description": "Run Chat Model and return the output",
"inputParams": [
{
"label": "Name",
"name": "llmNodeName",
"type": "string",
"placeholder": "LLM",
"id": "seqLLMNode_0-input-llmNodeName-string"
},
{
"label": "System Prompt",
"name": "systemMessagePrompt",
"type": "string",
"rows": 4,
"optional": true,
"additionalParams": true,
"id": "seqLLMNode_0-input-systemMessagePrompt-string"
},
{
"label": "Human Prompt",
"name": "humanMessagePrompt",
"type": "string",
"description": "This prompt will be added at the end of the messages as human message",
"rows": 4,
"optional": true,
"additionalParams": true,
"id": "seqLLMNode_0-input-humanMessagePrompt-string"
},
{
"label": "Messages History",
"name": "messageHistory",
"description": "Return a list of messages between System Prompt and Human Prompt. This is useful when you want to provide few shot examples",
"type": "code",
"hideCodeExecute": true,
"codeExample": "const { AIMessage, HumanMessage, ToolMessage } = require('@langchain/core/messages');\n\nreturn [\n new HumanMessage("What is 333382 🦜 1932?"),\n new AIMessage({\n content: "",\n tool_calls: [\n {\n id: "12345",\n name: "calulator",\n args: {\n number1: 333382,\n number2: 1932,\n operation: "divide",\n },\n },\n ],\n }),\n new ToolMessage({\n tool_call_id: "12345",\n content: "The answer is 172.558.",\n }),\n new AIMessage("The answer is 172.558."),\n]",
"optional": true,
"additionalParams": true,
"id": "seqLLMNode_0-input-messageHistory-code"
},
{
"label": "Format Prompt Values",
"name": "promptValues",
"description": "Assign values to the prompt variables. You can also use $flow.state. to get the state value",
"type": "json",
"optional": true,
"acceptVariable": true,
"list": true,
"additionalParams": true,
"id": "seqLLMNode_0-input-promptValues-json"
},
{
"label": "JSON Structured Output",
"name": "llmStructuredOutput",
"type": "datagrid",
"description": "Instruct the LLM to give output in a JSON structured schema",
"datagrid": [
{
"field": "key",
"headerName": "Key",
"editable": true
},
{
"field": "type",
"headerName": "Type",
"type": "singleSelect",
"valueOptions": [
"String",
"String Array",
"Number",
"Boolean",
"Enum"
],
"editable": true
},
{
"field": "enumValues",
"headerName": "Enum Values",
"editable": true
},
{
"field": "description",
"headerName": "Description",
"flex": 1,
"editable": true
}
],
"optional": true,
"additionalParams": true,
"id": "seqLLMNode_0-input-llmStructuredOutput-datagrid"
},
{
"label": "Update State",
"name": "updateStateMemory",
"type": "tabs",
"tabIdentifier": "selectedUpdateStateMemoryTab",
"default": "updateStateMemoryUI",
"additionalParams": true,
"tabs": [
{
"label": "Update State (Table)",
"name": "updateStateMemoryUI",
"type": "datagrid",
"hint": {
"label": "How to use",
"value": "\n1. Key and value pair to be updated. For example: if you have the following State:\n | Key | Operation | Default Value |\n |-----------|---------------|-------------------|\n | user | Replace | |\n\n You can update the "user" value with the following:\n | Key | Value |\n |-----------|-----------|\n | user | john doe |\n\n2. If you want to use the LLM Node's output as the value to update state, it is available as available as
$flow.output
with the following structure:\njson\n {\n \"content\": 'Hello! How can I assist you today?',\n \"name\": \"\",\n \"additional_kwargs\": {},\n \"response_metadata\": {},\n \"tool_calls\": [],\n \"invalid_tool_calls\": [],\n \"usage_metadata\": {}\n }\n
\n\n For example, if the outputcontent
is the value you want to update the state with, you can do the following:\n | Key | Value |\n |-----------|---------------------------|\n | user |$flow.output.content
|\n\n3. You can get default flow config, including the current "state":\n -$flow.sessionId
\n -$flow.chatId
\n -$flow.chatflowId
\n -$flow.input
\n -$flow.state
\n\n4. You can get custom variables:$vars.<variable-name>
\n\n"},
"description": "This is only applicable when you have a custom State at the START node. After agent execution, you might want to update the State values",
"datagrid": [
{
"field": "key",
"headerName": "Key",
"type": "asyncSingleSelect",
"loadMethod": "loadStateKeys",
"flex": 0.5,
"editable": true
},
{
"field": "value",
"headerName": "Value",
"type": "freeSolo",
"valueOptions": [
{
"label": "LLM Node Output (string)",
"value": "$flow.output.content"
},
{
"label": "LLM JSON Output Key (string)",
"value": "$flow.output."
},
{
"label": "Global variable (string)",
"value": "$vars."
},
{
"label": "Input Question (string)",
"value": "$flow.input"
},
{
"label": "Session Id (string)",
"value": "$flow.sessionId"
},
{
"label": "Chat Id (string)",
"value": "$flow.chatId"
},
{
"label": "Chatflow Id (string)",
"value": "$flow.chatflowId"
}
],
"editable": true,
"flex": 1
}
],
"optional": true,
"additionalParams": true
},
{
"label": "Update State (Code)",
"name": "updateStateMemoryCode",
"type": "code",
"hint": {
"label": "How to use",
"value": "\n1. Return the key value JSON object. For example: if you have the following State:\n
json\n {\n \"user\": null\n }\n
\n\n You can update the "user" value by returning the following:\njs\n return {\n \"user\": \"john doe\"\n }\n
\n\n2. If you want to use the LLM Node's output as the value to update state, it is available as$flow.output
with the following structure:\njson\n {\n \"content\": 'Hello! How can I assist you today?',\n \"name\": \"\",\n \"additional_kwargs\": {},\n \"response_metadata\": {},\n \"tool_calls\": [],\n \"invalid_tool_calls\": [],\n \"usage_metadata\": {}\n }\n
\n\n For example, if the outputcontent
is the value you want to update the state with, you can return the following:\njs\n return {\n \"user\": $flow.output.content\n }\n
\n\n3. You can also get default flow config, including the current "state":\n -$flow.sessionId
\n -$flow.chatId
\n -$flow.chatflowId
\n -$flow.input
\n -$flow.state
\n\n4. You can get custom variables:$vars.<variable-name>
\n\n"},
"description": "This is only applicable when you have a custom State at the START node. After agent execution, you might want to update the State values. Must return an object representing the state",
"hideCodeExecute": true,
"codeExample": "const result = $flow.output;\n\n/* Suppose we have a custom State schema like this:\n* {\n aggregate: {\n value: (x, y) => x.concat(y),\n default: () => []\n }\n }\n*/\n\nreturn {\n aggregate: [result.content]\n};",
"optional": true,
"additionalParams": true
}
],
"id": "seqLLMNode_0-input-updateStateMemory-tabs"
}
],
"inputAnchors": [
{
"label": "Start | Agent | Condition | LLM | Tool Node",
"name": "sequentialNode",
"type": "Start | Agent | Condition | LLMNode | ToolNode",
"list": true,
"id": "seqLLMNode_0-input-sequentialNode-Start | Agent | Condition | LLMNode | ToolNode"
},
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel",
"optional": true,
"description": "Overwrite model to be used for this node",
"id": "seqLLMNode_0-input-model-BaseChatModel"
}
],
"inputs": {
"llmNodeName": "youtube title writer",
"systemMessagePrompt": "[INSTRUCTION]\nAnalyze the video transcript and provide a possible title. \nDo not explain what you are doing: only output the suggested title with no additional comments.\nIf the word Qigong has spelling mistakes, please fix them. Common spelling mistakes are: chicun, chico.\n\nA good title meets the following criteria:\n1. Clear and Descriptive: Accurately describe the content.\n2. Keyword-Rich: Include relevant keywords like "Zhi Neng Qigong" as part of the title, but avoid randomly inserting keywords.\n3. Engaging and Compelling: Grab attention and pique curiosity.\n4. Concise: Keep it under 60 characters.\n5. Questions: Consider posing a question to intrigue viewers.\n6. Use of Capitalization: Capitalize the first letter of each word.\n\nRules of the final result:\n- The final result should be a string written in Spanish.\n- Do not explain what you are doing or add notes or comments, simply output the proposed title as a plain string, ready to be published.\n- Do not leave variables to be filled in, the final result must be the final title that will be published.\n- Do not add any markdown formatting, or hashtags.\n\nAdditional information:\n- The name of the channel is: Matias Hegoburu\n- The channel is an educational channel about Zhi Neng Qigong.\n- The content is for educational purposes only, and should not replace professional medical advice\n- Important keywords: Zhi Neng Qigong, Qigong\n\nOnce you generate the title, send it to the reviewer for approval. If the reviewer sends feedback, implement it.",
"humanMessagePrompt": "",
"messageHistory": "",
"sequentialNode": [
"{{seqStart_0.data.instance}}"
],
"model": "",
"promptValues": "",
"llmStructuredOutput": "[{"key":"title","type":"String","enumValues":"","description":"the suggested title for the given script","actions":"","id":1}]",
"updateStateMemory": "updateStateMemoryUI"
},
"outputAnchors": [
{
"id": "seqLLMNode_0-output-seqLLMNode-LLMNode",
"name": "seqLLMNode",
"label": "LLMNode",
"description": "Run Chat Model and return the output",
"type": "LLMNode"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 450,
"selected": false,
"positionAbsolute": {
"x": 896.1857660643981,
"y": 146.23981350520455
},
"dragging": false
},
{
"id": "seqLLMNode_1",
"position": {
"x": 1263.7692396634495,
"y": 150.83460692519276
},
"type": "customNode",
"data": {
"id": "seqLLMNode_1",
"label": "LLM Node",
"version": 3,
"name": "seqLLMNode",
"type": "LLMNode",
"baseClasses": [
"LLMNode"
],
"category": "Sequential Agents",
"description": "Run Chat Model and return the output",
"inputParams": [
{
"label": "Name",
"name": "llmNodeName",
"type": "string",
"placeholder": "LLM",
"id": "seqLLMNode_1-input-llmNodeName-string"
},
{
"label": "System Prompt",
"name": "systemMessagePrompt",
"type": "string",
"rows": 4,
"optional": true,
"additionalParams": true,
"id": "seqLLMNode_1-input-systemMessagePrompt-string"
},
{
"label": "Human Prompt",
"name": "humanMessagePrompt",
"type": "string",
"description": "This prompt will be added at the end of the messages as human message",
"rows": 4,
"optional": true,
"additionalParams": true,
"id": "seqLLMNode_1-input-humanMessagePrompt-string"
},
{
"label": "Messages History",
"name": "messageHistory",
"description": "Return a list of messages between System Prompt and Human Prompt. This is useful when you want to provide few shot examples",
"type": "code",
"hideCodeExecute": true,
"codeExample": "const { AIMessage, HumanMessage, ToolMessage } = require('@langchain/core/messages');\n\nreturn [\n new HumanMessage("What is 333382 🦜 1932?"),\n new AIMessage({\n content: "",\n tool_calls: [\n {\n id: "12345",\n name: "calulator",\n args: {\n number1: 333382,\n number2: 1932,\n operation: "divide",\n },\n },\n ],\n }),\n new ToolMessage({\n tool_call_id: "12345",\n content: "The answer is 172.558.",\n }),\n new AIMessage("The answer is 172.558."),\n]",
"optional": true,
"additionalParams": true,
"id": "seqLLMNode_1-input-messageHistory-code"
},
{
"label": "Format Prompt Values",
"name": "promptValues",
"description": "Assign values to the prompt variables. You can also use $flow.state. to get the state value",
"type": "json",
"optional": true,
"acceptVariable": true,
"list": true,
"additionalParams": true,
"id": "seqLLMNode_1-input-promptValues-json"
},
{
"label": "JSON Structured Output",
"name": "llmStructuredOutput",
"type": "datagrid",
"description": "Instruct the LLM to give output in a JSON structured schema",
"datagrid": [
{
"field": "key",
"headerName": "Key",
"editable": true
},
{
"field": "type",
"headerName": "Type",
"type": "singleSelect",
"valueOptions": [
"String",
"String Array",
"Number",
"Boolean",
"Enum"
],
"editable": true
},
{
"field": "enumValues",
"headerName": "Enum Values",
"editable": true
},
{
"field": "description",
"headerName": "Description",
"flex": 1,
"editable": true
}
],
"optional": true,
"additionalParams": true,
"id": "seqLLMNode_1-input-llmStructuredOutput-datagrid"
},
{
"label": "Update State",
"name": "updateStateMemory",
"type": "tabs",
"tabIdentifier": "selectedUpdateStateMemoryTab",
"default": "updateStateMemoryUI",
"additionalParams": true,
"tabs": [
{
"label": "Update State (Table)",
"name": "updateStateMemoryUI",
"type": "datagrid",
"hint": {
"label": "How to use",
"value": "\n1. Key and value pair to be updated. For example: if you have the following State:\n | Key | Operation | Default Value |\n |-----------|---------------|-------------------|\n | user | Replace | |\n\n You can update the "user" value with the following:\n | Key | Value |\n |-----------|-----------|\n | user | john doe |\n\n2. If you want to use the LLM Node's output as the value to update state, it is available as available as
$flow.output
with the following structure:\njson\n {\n \"content\": 'Hello! How can I assist you today?',\n \"name\": \"\",\n \"additional_kwargs\": {},\n \"response_metadata\": {},\n \"tool_calls\": [],\n \"invalid_tool_calls\": [],\n \"usage_metadata\": {}\n }\n
\n\n For example, if the outputcontent
is the value you want to update the state with, you can do the following:\n | Key | Value |\n |-----------|---------------------------|\n | user |$flow.output.content
|\n\n3. You can get default flow config, including the current "state":\n -$flow.sessionId
\n -$flow.chatId
\n -$flow.chatflowId
\n -$flow.input
\n -$flow.state
\n\n4. You can get custom variables:$vars.<variable-name>
\n\n"},
"description": "This is only applicable when you have a custom State at the START node. After agent execution, you might want to update the State values",
"datagrid": [
{
"field": "key",
"headerName": "Key",
"type": "asyncSingleSelect",
"loadMethod": "loadStateKeys",
"flex": 0.5,
"editable": true
},
{
"field": "value",
"headerName": "Value",
"type": "freeSolo",
"valueOptions": [
{
"label": "LLM Node Output (string)",
"value": "$flow.output.content"
},
{
"label": "LLM JSON Output Key (string)",
"value": "$flow.output."
},
{
"label": "Global variable (string)",
"value": "$vars."
},
{
"label": "Input Question (string)",
"value": "$flow.input"
},
{
"label": "Session Id (string)",
"value": "$flow.sessionId"
},
{
"label": "Chat Id (string)",
"value": "$flow.chatId"
},
{
"label": "Chatflow Id (string)",
"value": "$flow.chatflowId"
}
],
"editable": true,
"flex": 1
}
],
"optional": true,
"additionalParams": true
},
{
"label": "Update State (Code)",
"name": "updateStateMemoryCode",
"type": "code",
"hint": {
"label": "How to use",
"value": "\n1. Return the key value JSON object. For example: if you have the following State:\n
json\n {\n \"user\": null\n }\n
\n\n You can update the "user" value by returning the following:\njs\n return {\n \"user\": \"john doe\"\n }\n
\n\n2. If you want to use the LLM Node's output as the value to update state, it is available as$flow.output
with the following structure:\njson\n {\n \"content\": 'Hello! How can I assist you today?',\n \"name\": \"\",\n \"additional_kwargs\": {},\n \"response_metadata\": {},\n \"tool_calls\": [],\n \"invalid_tool_calls\": [],\n \"usage_metadata\": {}\n }\n
\n\n For example, if the outputcontent
is the value you want to update the state with, you can return the following:\njs\n return {\n \"user\": $flow.output.content\n }\n
\n\n3. You can also get default flow config, including the current "state":\n -$flow.sessionId
\n -$flow.chatId
\n -$flow.chatflowId
\n -$flow.input
\n -$flow.state
\n\n4. You can get custom variables:$vars.<variable-name>
\n\n"},
"description": "This is only applicable when you have a custom State at the START node. After agent execution, you might want to update the State values. Must return an object representing the state",
"hideCodeExecute": true,
"codeExample": "const result = $flow.output;\n\n/* Suppose we have a custom State schema like this:\n* {\n aggregate: {\n value: (x, y) => x.concat(y),\n default: () => []\n }\n }\n*/\n\nreturn {\n aggregate: [result.content]\n};",
"optional": true,
"additionalParams": true
}
],
"id": "seqLLMNode_1-input-updateStateMemory-tabs"
}
],
"inputAnchors": [
{
"label": "Start | Agent | Condition | LLM | Tool Node",
"name": "sequentialNode",
"type": "Start | Agent | Condition | LLMNode | ToolNode",
"list": true,
"id": "seqLLMNode_1-input-sequentialNode-Start | Agent | Condition | LLMNode | ToolNode"
},
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel",
"optional": true,
"description": "Overwrite model to be used for this node",
"id": "seqLLMNode_1-input-model-BaseChatModel"
}
],
"inputs": {
"llmNodeName": "reviewer",
"systemMessagePrompt": "Your job is to review the youtube titles generated and give feedback to the writer. You need to check that the title has good grammar and spelling, and follows the main characteristics of a good youtube title:\n1. Clear and Descriptive: Accurately describe the content.\n2. Keyword-Rich: Include relevant keywords like "Zhi Neng Qigong" as part of the title, but avoid randomly inserting keywords.\n3. Engaging and Compelling: Grab attention and pique curiosity.\n4. Concise: Keep it under 60 characters.\n5. Questions: Consider posing a question to intrigue viewers.\n6. Use of Capitalization: Capitalize the first letter of each word.\n\nIf you have any feedback items, you will give instructions to the writer to improve the suggested title.\nIf you are satisfied with the work from the writer, you will say "APPROVED".",
"humanMessagePrompt": "",
"messageHistory": "",
"sequentialNode": [
"{{seqLLMNode_0.data.instance}}"
],
"model": "",
"promptValues": "",
"llmStructuredOutput": "[{"key":"next","type":"Enum","enumValues":"APPROVED,feedback","description":"whether the title is approved, or if there is feedback for the writer","actions":"","id":1},{"key":"feedback","type":"String","enumValues":"","description":"the feedback items, if any","actions":"","id":2}]",
"updateStateMemory": "updateStateMemoryUI",
"updateStateMemoryUI": "[{"key":"next","value":"$flow.output.next","actions":"","id":1}]"
},
"outputAnchors": [
{
"id": "seqLLMNode_1-output-seqLLMNode-LLMNode",
"name": "seqLLMNode",
"label": "LLMNode",
"description": "Run Chat Model and return the output",
"type": "LLMNode"
}
],
"outputs": {},
"selected": false
},
"width": 300,
"height": 450,
"selected": false,
"positionAbsolute": {
"x": 1263.7692396634495,
"y": 150.83460692519276
},
"dragging": false
},
{
"id": "seqEnd_0",
"position": {
"x": 1661.2188704924235,
"y": 687.2767387088082
},
"type": "customNode",
"data": {
"id": "seqEnd_0",
"label": "End",
"version": 2,
"name": "seqEnd",
"type": "End",
"baseClasses": [
"End"
],
"category": "Sequential Agents",
"description": "End conversation",
"inputParams": [],
"inputAnchors": [
{
"label": "Agent | Condition | LLM | Tool Node",
"name": "sequentialNode",
"type": "Agent | Condition | LLMNode | ToolNode",
"id": "seqEnd_0-input-sequentialNode-Agent | Condition | LLMNode | ToolNode"
}
],
"inputs": {
"sequentialNode": "{{seqLLMNode_1.data.instance}}"
},
"outputAnchors": [],
"outputs": {},
"selected": false
},
"width": 300,
"height": 143,
"positionAbsolute": {
"x": 1661.2188704924235,
"y": 687.2767387088082
},
"selected": false
}
],
"edges": [
{
"source": "chatOllama_0",
"sourceHandle": "chatOllama_0-output-chatOllama-ChatOllama|ChatOllama|BaseChatModel|BaseLanguageModel|Runnable",
"target": "seqStart_0",
"targetHandle": "seqStart_0-input-model-BaseChatModel",
"type": "buttonedge",
"id": "chatOllama_0-chatOllama_0-output-chatOllama-ChatOllama|ChatOllama|BaseChatModel|BaseLanguageModel|Runnable-seqStart_0-seqStart_0-input-model-BaseChatModel"
},
{
"source": "seqState_0",
"sourceHandle": "seqState_0-output-seqState-State",
"target": "seqStart_0",
"targetHandle": "seqStart_0-input-state-State",
"type": "buttonedge",
"id": "seqState_0-seqState_0-output-seqState-State-seqStart_0-seqStart_0-input-state-State"
},
{
"source": "seqStart_0",
"sourceHandle": "seqStart_0-output-seqStart-Start",
"target": "seqLLMNode_0",
"targetHandle": "seqLLMNode_0-input-sequentialNode-Start | Agent | Condition | LLMNode | ToolNode",
"type": "buttonedge",
"id": "seqStart_0-seqStart_0-output-seqStart-Start-seqLLMNode_0-seqLLMNode_0-input-sequentialNode-Start | Agent | Condition | LLMNode | ToolNode"
},
{
"source": "seqLLMNode_0",
"sourceHandle": "seqLLMNode_0-output-seqLLMNode-LLMNode",
"target": "seqLLMNode_1",
"targetHandle": "seqLLMNode_1-input-sequentialNode-Start | Agent | Condition | LLMNode | ToolNode",
"type": "buttonedge",
"id": "seqLLMNode_0-seqLLMNode_0-output-seqLLMNode-LLMNode-seqLLMNode_1-seqLLMNode_1-input-sequentialNode-Start | Agent | Condition | LLMNode | ToolNode"
},
{
"source": "seqLLMNode_1",
"sourceHandle": "seqLLMNode_1-output-seqLLMNode-LLMNode",
"target": "seqEnd_0",
"targetHandle": "seqEnd_0-input-sequentialNode-Agent | Condition | LLMNode | ToolNode",
"type": "buttonedge",
"id": "seqLLMNode_1-seqLLMNode_1-output-seqLLMNode-LLMNode-seqEnd_0-seqEnd_0-input-sequentialNode-Agent | Condition | LLMNode | ToolNode"
}
]
}
Expected behavior
no error :)
Who can help?
No response
Operating System
ubuntu 24.04
Langflow Version
whatever is in the latest docker image
Python Version
None
Screenshot
No response
Flow File
No response
The text was updated successfully, but these errors were encountered: