Skip to content

Commit

Permalink
docs(examples): add HumanTool for user interaction during agent workf…
Browse files Browse the repository at this point in the history
…lows (#255)

Ref: #121
  • Loading branch information
matiasmolinas authored Jan 7, 2025
1 parent 873b38d commit a4cca54
Show file tree
Hide file tree
Showing 3 changed files with 200 additions and 4 deletions.
86 changes: 86 additions & 0 deletions examples/agents/experimental/human.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
import "dotenv/config.js";
import { BeeAgent } from "bee-agent-framework/agents/bee/agent";
import { createConsoleReader } from "../../helpers/io.js"; // Use the examples console reader
import { FrameworkError } from "bee-agent-framework/errors";
import { TokenMemory } from "bee-agent-framework/memory/tokenMemory";
import { Logger } from "bee-agent-framework/logger/logger";
import { OpenMeteoTool } from "bee-agent-framework/tools/weather/openMeteo";

// Import the HumanTool from the updated file
import { HumanTool } from "../../tools/experimental/human.js";

// Set up logger
Logger.root.level = "silent"; // Disable internal logs
const logger = new Logger({ name: "app", level: "trace" });

// Initialize LLM (test against llama as requested)
import { OllamaChatLLM } from "bee-agent-framework/adapters/ollama/chat";
const llm = new OllamaChatLLM({
modelId: "llama3.1",
});

// Create the console reader once, share it with HumanTool
const reader = createConsoleReader();

// Initialize BeeAgent with shared reader for HumanTool
const agent = new BeeAgent({
llm,
memory: new TokenMemory({ llm }),
tools: [
new OpenMeteoTool(),
new HumanTool({
reader: reader,
}),
],
});

// Main loop
try {
for await (const { prompt } of reader) {
// Run the agent and observe events
const response = await agent
.run(
{ prompt },
{
execution: {
maxRetriesPerStep: 3,
totalMaxRetries: 10,
maxIterations: 20,
},
},
)
.observe((emitter) => {
// Show only final answers
emitter.on("update", async ({ update }) => {
if (update.key === "final_answer") {
reader.write("Agent 🤖 : ", update.value);
}
});

// Log errors
emitter.on("error", ({ error }) => {
reader.write("Agent 🤖 : ", FrameworkError.ensure(error).dump());
});

// Retry notifications
emitter.on("retry", () => {
reader.write("Agent 🤖 : ", "Retrying the action...");
});
});

// Print the final response
if (response.result?.text) {
reader.write("Agent 🤖 : ", response.result.text);
} else {
reader.write(
"Agent 🤖 : ",
"No result was returned. Ensure your input is valid or check tool configurations.",
);
}
}
} catch (error) {
logger.error(FrameworkError.ensure(error).dump());
} finally {
// Gracefully close the reader when exiting the app
reader.close();
}
16 changes: 12 additions & 4 deletions examples/helpers/io.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ import { stdin, stdout } from "node:process";
import picocolors from "picocolors";
import * as R from "remeda";
import stripAnsi from "strip-ansi";
import type { Abortable } from "node:events";

interface ReadFromConsoleInput {
fallback?: string;
Expand All @@ -27,16 +28,27 @@ export function createConsoleReader({
.concat("\n"),
);
},

async prompt(): Promise<string> {
for await (const { prompt } of this) {
return prompt;
}
process.exit(0);
},

async askSingleQuestion(queryMessage: string, options?: Abortable): Promise<string> {
const answer = await rl.question(
R.piped(picocolors.cyan, picocolors.bold)(queryMessage),
options ?? { signal: undefined },
);
return stripAnsi(answer.trim());
},

close() {
stdin.pause();
rl.close();
},

async *[Symbol.asyncIterator]() {
if (!isActive) {
return;
Expand Down Expand Up @@ -64,10 +76,6 @@ export function createConsoleReader({
}
yield { prompt, iteration };
}
} catch (e) {
if (e.code === "ERR_USE_AFTER_CLOSE") {
return;
}
} finally {
isActive = false;
rl.close();
Expand Down
102 changes: 102 additions & 0 deletions examples/tools/experimental/human.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
import { Emitter } from "bee-agent-framework/emitter/emitter";
import {
Tool,
BaseToolOptions,
BaseToolRunOptions,
JSONToolOutput,
ToolInput,
ToolEmitter,
} from "bee-agent-framework/tools/base";
import { RunContext } from "bee-agent-framework/context";
import { z } from "zod";

interface HumanToolOutput {
clarification: string;
}

export interface Reader {
write(prefix: string, message: string): void;
askSingleQuestion(prompt: string, options?: { signal?: AbortSignal }): Promise<string>;
}

export interface HumanToolInput extends BaseToolOptions {
reader: Reader;
name?: string;
description?: string;
}

export class HumanTool extends Tool<JSONToolOutput<HumanToolOutput>, HumanToolInput> {
name = "HumanTool";
description = `
This tool is used whenever the user's input is unclear, ambiguous, or incomplete.
The agent MUST invoke this tool when additional clarification is required to proceed.
The output must adhere strictly to the following structure:
- Thought: A single-line description of the need for clarification.
- Function Name: HumanTool
- Function Input: { "message": "Your question to the user for clarification." }
- Function Output: The user's response in JSON format.
Examples:
- Example 1:
Input: "What is the weather?"
Thought: "The user's request lacks a location. I need to ask for clarification."
Function Name: HumanTool
Function Input: { "message": "Could you provide the location for which you would like to know the weather?" }
Function Output: { "clarification": "Santa Fe, Argentina" }
Final Answer: The current weather in Santa Fe, Argentina is 17.3°C with a relative humidity of 48% and a wind speed of 10.1 km/h.
- Example 2:
Input: "Can you help me?"
Thought: "The user's request is too vague. I need to ask for more details."
Function Name: HumanTool
Function Input: { "message": "Could you clarify what kind of help you need?" }
Function Output: { "clarification": "I need help understanding how to use the project management tool." }
Final Answer: Sure, I can help you with the project management tool. Let me know which feature you'd like to learn about or if you'd like a general overview.
- Example 3:
Input: "Translate this sentence."
Thought: "The user's request is incomplete. I need to ask for the sentence they want translated."
Function Name: HumanTool
Function Input: { "message": "Could you specify the sentence you would like me to translate?" }
Function Output: { "clarification": "Translate 'Hello, how are you?' to French." }
Final Answer: The French translation of 'Hello, how are you?' is 'Bonjour, comment vas-tu?'
Note: Do NOT attempt to guess or provide incomplete responses. Always use this tool when in doubt to ensure accurate and meaningful interactions.
`;

public readonly emitter: ToolEmitter<ToolInput<this>, JSONToolOutput<HumanToolOutput>> =
Emitter.root.child({
namespace: ["tool", "human"],
creator: this,
});

constructor(protected readonly input: HumanToolInput) {
super(input);
this.name = input?.name || this.name;
this.description = input?.description || this.description;
}

inputSchema() {
return z.object({
message: z.string().min(1, "Message cannot be empty"),
});
}

async _run(
input: ToolInput<this>,
_options: Partial<BaseToolRunOptions>,
run: RunContext<this>,
): Promise<JSONToolOutput<HumanToolOutput>> {
// Use the reader from input
this.input.reader.write("HumanTool", input.message);

// Use askSingleQuestion with the signal
const userInput = await this.input.reader.askSingleQuestion("User 👤 : ", {
signal: run.signal,
});

// Return JSONToolOutput with the clarification
return new JSONToolOutput({
clarification: userInput.trim(),
});
}
}

0 comments on commit a4cca54

Please sign in to comment.