diff --git a/examples/agents/experimental/human.ts b/examples/agents/experimental/human.ts new file mode 100644 index 00000000..98d8c7ef --- /dev/null +++ b/examples/agents/experimental/human.ts @@ -0,0 +1,86 @@ +import "dotenv/config.js"; +import { BeeAgent } from "bee-agent-framework/agents/bee/agent"; +import { createConsoleReader } from "../../helpers/io.js"; // Use the examples console reader +import { FrameworkError } from "bee-agent-framework/errors"; +import { TokenMemory } from "bee-agent-framework/memory/tokenMemory"; +import { Logger } from "bee-agent-framework/logger/logger"; +import { OpenMeteoTool } from "bee-agent-framework/tools/weather/openMeteo"; + +// Import the HumanTool from the updated file +import { HumanTool } from "../../tools/experimental/human.js"; + +// Set up logger +Logger.root.level = "silent"; // Disable internal logs +const logger = new Logger({ name: "app", level: "trace" }); + +// Initialize LLM (test against llama as requested) +import { OllamaChatLLM } from "bee-agent-framework/adapters/ollama/chat"; +const llm = new OllamaChatLLM({ + modelId: "llama3.1", +}); + +// Create the console reader once, share it with HumanTool +const reader = createConsoleReader(); + +// Initialize BeeAgent with shared reader for HumanTool +const agent = new BeeAgent({ + llm, + memory: new TokenMemory({ llm }), + tools: [ + new OpenMeteoTool(), + new HumanTool({ + reader: reader, + }), + ], +}); + +// Main loop +try { + for await (const { prompt } of reader) { + // Run the agent and observe events + const response = await agent + .run( + { prompt }, + { + execution: { + maxRetriesPerStep: 3, + totalMaxRetries: 10, + maxIterations: 20, + }, + }, + ) + .observe((emitter) => { + // Show only final answers + emitter.on("update", async ({ update }) => { + if (update.key === "final_answer") { + reader.write("Agent 🤖 : ", update.value); + } + }); + + // Log errors + emitter.on("error", ({ error }) => { + reader.write("Agent 🤖 : ", FrameworkError.ensure(error).dump()); + }); + + // Retry notifications + emitter.on("retry", () => { + reader.write("Agent 🤖 : ", "Retrying the action..."); + }); + }); + + // Print the final response + if (response.result?.text) { + reader.write("Agent 🤖 : ", response.result.text); + } else { + reader.write( + "Agent 🤖 : ", + "No result was returned. Ensure your input is valid or check tool configurations.", + ); + } + } +} catch (error) { + logger.error(FrameworkError.ensure(error).dump()); +} finally { + // Gracefully close the reader when exiting the app + reader.close(); +} diff --git a/examples/helpers/io.ts b/examples/helpers/io.ts index 06f1085a..dac68a87 100644 --- a/examples/helpers/io.ts +++ b/examples/helpers/io.ts @@ -3,6 +3,7 @@ import { stdin, stdout } from "node:process"; import picocolors from "picocolors"; import * as R from "remeda"; import stripAnsi from "strip-ansi"; +import type { Abortable } from "node:events"; interface ReadFromConsoleInput { fallback?: string; @@ -27,16 +28,27 @@ export function createConsoleReader({ .concat("\n"), ); }, + async prompt(): Promise { for await (const { prompt } of this) { return prompt; } process.exit(0); }, + + async askSingleQuestion(queryMessage: string, options?: Abortable): Promise { + const answer = await rl.question( + R.piped(picocolors.cyan, picocolors.bold)(queryMessage), + options ?? { signal: undefined }, + ); + return stripAnsi(answer.trim()); + }, + close() { stdin.pause(); rl.close(); }, + async *[Symbol.asyncIterator]() { if (!isActive) { return; @@ -64,10 +76,6 @@ export function createConsoleReader({ } yield { prompt, iteration }; } - } catch (e) { - if (e.code === "ERR_USE_AFTER_CLOSE") { - return; - } } finally { isActive = false; rl.close(); diff --git a/examples/tools/experimental/human.ts b/examples/tools/experimental/human.ts new file mode 100644 index 00000000..f8171723 --- /dev/null +++ b/examples/tools/experimental/human.ts @@ -0,0 +1,102 @@ +import { Emitter } from "bee-agent-framework/emitter/emitter"; +import { + Tool, + BaseToolOptions, + BaseToolRunOptions, + JSONToolOutput, + ToolInput, + ToolEmitter, +} from "bee-agent-framework/tools/base"; +import { RunContext } from "bee-agent-framework/context"; +import { z } from "zod"; + +interface HumanToolOutput { + clarification: string; +} + +export interface Reader { + write(prefix: string, message: string): void; + askSingleQuestion(prompt: string, options?: { signal?: AbortSignal }): Promise; +} + +export interface HumanToolInput extends BaseToolOptions { + reader: Reader; + name?: string; + description?: string; +} + +export class HumanTool extends Tool, HumanToolInput> { + name = "HumanTool"; + description = ` + This tool is used whenever the user's input is unclear, ambiguous, or incomplete. + The agent MUST invoke this tool when additional clarification is required to proceed. + The output must adhere strictly to the following structure: + - Thought: A single-line description of the need for clarification. + - Function Name: HumanTool + - Function Input: { "message": "Your question to the user for clarification." } + - Function Output: The user's response in JSON format. + Examples: + - Example 1: + Input: "What is the weather?" + Thought: "The user's request lacks a location. I need to ask for clarification." + Function Name: HumanTool + Function Input: { "message": "Could you provide the location for which you would like to know the weather?" } + Function Output: { "clarification": "Santa Fe, Argentina" } + Final Answer: The current weather in Santa Fe, Argentina is 17.3°C with a relative humidity of 48% and a wind speed of 10.1 km/h. + + - Example 2: + Input: "Can you help me?" + Thought: "The user's request is too vague. I need to ask for more details." + Function Name: HumanTool + Function Input: { "message": "Could you clarify what kind of help you need?" } + Function Output: { "clarification": "I need help understanding how to use the project management tool." } + Final Answer: Sure, I can help you with the project management tool. Let me know which feature you'd like to learn about or if you'd like a general overview. + + - Example 3: + Input: "Translate this sentence." + Thought: "The user's request is incomplete. I need to ask for the sentence they want translated." + Function Name: HumanTool + Function Input: { "message": "Could you specify the sentence you would like me to translate?" } + Function Output: { "clarification": "Translate 'Hello, how are you?' to French." } + Final Answer: The French translation of 'Hello, how are you?' is 'Bonjour, comment vas-tu?' + + Note: Do NOT attempt to guess or provide incomplete responses. Always use this tool when in doubt to ensure accurate and meaningful interactions. +`; + + public readonly emitter: ToolEmitter, JSONToolOutput> = + Emitter.root.child({ + namespace: ["tool", "human"], + creator: this, + }); + + constructor(protected readonly input: HumanToolInput) { + super(input); + this.name = input?.name || this.name; + this.description = input?.description || this.description; + } + + inputSchema() { + return z.object({ + message: z.string().min(1, "Message cannot be empty"), + }); + } + + async _run( + input: ToolInput, + _options: Partial, + run: RunContext, + ): Promise> { + // Use the reader from input + this.input.reader.write("HumanTool", input.message); + + // Use askSingleQuestion with the signal + const userInput = await this.input.reader.askSingleQuestion("User 👤 : ", { + signal: run.signal, + }); + + // Return JSONToolOutput with the clarification + return new JSONToolOutput({ + clarification: userInput.trim(), + }); + } +}