diff --git a/.env.template b/.env.template index ecd238eb..5c8dd583 100644 --- a/.env.template +++ b/.env.template @@ -11,9 +11,6 @@ BEE_FRAMEWORK_LOG_SINGLE_LINE="false" # WATSONX_PROJECT_ID= # WATSONX_REGION="us-south" -# For BAM LLM Adapter -# GENAI_API_KEY= - # For OpenAI LLM Adapter # OPENAI_API_KEY= @@ -26,6 +23,24 @@ AZURE_OPENAI_ENDPOINT= # For Groq LLM Adapter # GROQ_API_KEY= +# For IBM BAM LLM Adapter +# GENAI_API_KEY= + +# For IBM VLLM LLM Adapter +# IBM_VLLM_URL= +# IBM_VLLM_ROOT_CERT= +# IBM_VLLM_CERT_CHAIN= +# IBM_VLLM_PRIVATE_KEY= + +# For IBM RITS LLM Adapter +# IBM_RITS_URL= +# IBM_RITS_API_KEY= +# IBM_RITS_MODEL=ibm-granite/granite-3.0-8b-instruct + +# LLM Provider, used for some of the example agents +# (watsonx/ollama/openai/groq/bam/ibmvllm/ibmrits) +# LLM_BACKEND=ollama + # For GCP VertexAI Adapter # GOOGLE_APPLICATION_CREDENTIALS= # GCP_VERTEXAI_PROJECT= diff --git a/examples/README.md b/examples/README.md index 2834eabe..9a8b0693 100644 --- a/examples/README.md +++ b/examples/README.md @@ -21,6 +21,7 @@ This repository contains examples demonstrating the usage of the Bee Agent Frame - [`bee_advanced.ts`](agents/bee_advanced.ts): Advanced Bee Agent with custom configurations - [`bee_reusable.ts`](agents/bee_reusable.ts): Demonstration of serializing and reusing Bee Agents - [`custom_agent.ts`](agents/custom_agent.ts): Example of creating a custom agent +- [`ibm_granite_bee.ts`](agents/ibm_granite_bee.ts): Basic Bee Agent using an IBM Granite LLM - [`simple.ts`](agents/simple.ts): Simple agent implementation - [`sql.ts`](agents/sql.ts): Agent for SQL-related tasks diff --git a/examples/agents/ibm_granite_bee.ts b/examples/agents/ibm_granite_bee.ts new file mode 100644 index 00000000..b6377dde --- /dev/null +++ b/examples/agents/ibm_granite_bee.ts @@ -0,0 +1,116 @@ +import "dotenv/config.js"; +import { BeeAgent } from "bee-agent-framework/agents/bee/agent"; +import { ChatLLM, ChatLLMOutput } from "bee-agent-framework/llms/chat"; +import { getEnv, parseEnv } from "bee-agent-framework/internals/env"; +import { FrameworkError } from "bee-agent-framework/errors"; +import { TokenMemory } from "bee-agent-framework/memory/tokenMemory"; +import { WatsonXChatLLM } from "bee-agent-framework/adapters/watsonx/chat"; +import { OpenAIChatLLM } from "bee-agent-framework/adapters/openai/chat"; +import { OllamaChatLLM } from "bee-agent-framework/adapters/ollama/chat"; +import { IBMVllmChatLLM } from "bee-agent-framework/adapters/ibm-vllm/chat"; +import { IBMVllmModel } from "bee-agent-framework/adapters/ibm-vllm/chatPreset"; +import { OpenMeteoTool } from "bee-agent-framework/tools/weather/openMeteo"; +import { DuckDuckGoSearchTool } from "bee-agent-framework/tools/search/duckDuckGoSearch"; +import { Ollama } from "ollama"; +import OpenAI from "openai"; +import { z } from "zod"; +import * as process from "node:process"; +import fs from "node:fs"; + +const Providers = { + WATSONX: "watsonx", + OLLAMA: "ollama", + IBMVLLM: "ibmvllm", + IBMRITS: "ibmrits", +} as const; +type Provider = (typeof Providers)[keyof typeof Providers]; + +function getChatLLM(provider?: Provider): ChatLLM { + const LLMFactories: Record ChatLLM> = { + [Providers.OLLAMA]: () => + new OllamaChatLLM({ + modelId: getEnv("OLLAMA_MODEL") || "granite3-dense:8b", + parameters: { + temperature: 0, + repeat_penalty: 1, + num_predict: 2000, + }, + client: new Ollama({ + host: getEnv("OLLAMA_HOST"), + }), + }), + [Providers.WATSONX]: () => + WatsonXChatLLM.fromPreset(getEnv("WATSONX_MODEL") || "ibm/granite-3-8b-instruct", { + apiKey: getEnv("WATSONX_API_KEY"), + projectId: getEnv("WATSONX_PROJECT_ID"), + region: getEnv("WATSONX_REGION"), + }), + [Providers.IBMVLLM]: () => IBMVllmChatLLM.fromPreset(IBMVllmModel.GRANITE_INSTRUCT), + [Providers.IBMRITS]: () => + new OpenAIChatLLM({ + client: new OpenAI({ + baseURL: process.env.IBM_RITS_URL, + apiKey: process.env.IBM_RITS_API_KEY, + defaultHeaders: { + RITS_API_KEY: process.env.IBM_RITS_API_KEY, + }, + }), + modelId: getEnv("IBM_RITS_MODEL") || "ibm-granite/granite-3.0-8b-instruct", + parameters: { + temperature: 0, + max_tokens: 2048, + }, + }), + }; + + if (!provider) { + provider = parseEnv("LLM_BACKEND", z.nativeEnum(Providers), Providers.OLLAMA); + } + + const factory = LLMFactories[provider]; + if (!factory) { + throw new Error(`Provider "${provider}" not found.`); + } + return factory(); +} + +function getPrompt(fallback: string) { + if (process.stdin.isTTY) { + return fallback; + } + return fs.readFileSync(process.stdin.fd).toString().trim() || fallback; +} + +const llm = getChatLLM(); +const agent = new BeeAgent({ + llm, + memory: new TokenMemory({ llm }), + tools: [new OpenMeteoTool(), new DuckDuckGoSearchTool({ maxResults: 3 })], +}); + +try { + const prompt = getPrompt(`What is the current weather in London?`); + console.info(`User 👤 : ${prompt}`); + + const response = await agent + .run( + { prompt }, + { + execution: { + maxIterations: 8, + maxRetriesPerStep: 3, + totalMaxRetries: 0, + }, + }, + ) + .observe((emitter) => { + emitter.on("update", (data) => { + console.info(`Agent 🤖 (${data.update.key}) : ${data.update.value}`); + }); + }); + console.info(`Agent 🤖 : ${response.result.text}`); +} catch (error) { + console.error(FrameworkError.ensure(error).dump()); +} finally { + process.exit(0); +}