Skip to content

Commit

Permalink
feat: example agent using IBM Granite LLM (#213)
Browse files Browse the repository at this point in the history
Signed-off-by: Graham White <[email protected]>
  • Loading branch information
grahamwhiteuk authored Dec 3, 2024
1 parent 62cc4ce commit 9745920
Show file tree
Hide file tree
Showing 3 changed files with 135 additions and 3 deletions.
21 changes: 18 additions & 3 deletions .env.template
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,6 @@ BEE_FRAMEWORK_LOG_SINGLE_LINE="false"
# WATSONX_PROJECT_ID=
# WATSONX_REGION="us-south"

# For BAM LLM Adapter
# GENAI_API_KEY=

# For OpenAI LLM Adapter
# OPENAI_API_KEY=

Expand All @@ -26,6 +23,24 @@ AZURE_OPENAI_ENDPOINT=
# For Groq LLM Adapter
# GROQ_API_KEY=

# For IBM BAM LLM Adapter
# GENAI_API_KEY=

# For IBM VLLM LLM Adapter
# IBM_VLLM_URL=
# IBM_VLLM_ROOT_CERT=
# IBM_VLLM_CERT_CHAIN=
# IBM_VLLM_PRIVATE_KEY=

# For IBM RITS LLM Adapter
# IBM_RITS_URL=
# IBM_RITS_API_KEY=
# IBM_RITS_MODEL=ibm-granite/granite-3.0-8b-instruct

# LLM Provider, used for some of the example agents
# (watsonx/ollama/openai/groq/bam/ibmvllm/ibmrits)
# LLM_BACKEND=ollama

# For GCP VertexAI Adapter
# GOOGLE_APPLICATION_CREDENTIALS=
# GCP_VERTEXAI_PROJECT=
Expand Down
1 change: 1 addition & 0 deletions examples/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ This repository contains examples demonstrating the usage of the Bee Agent Frame
- [`bee_advanced.ts`](agents/bee_advanced.ts): Advanced Bee Agent with custom configurations
- [`bee_reusable.ts`](agents/bee_reusable.ts): Demonstration of serializing and reusing Bee Agents
- [`custom_agent.ts`](agents/custom_agent.ts): Example of creating a custom agent
- [`ibm_granite_bee.ts`](agents/ibm_granite_bee.ts): Basic Bee Agent using an IBM Granite LLM
- [`simple.ts`](agents/simple.ts): Simple agent implementation
- [`sql.ts`](agents/sql.ts): Agent for SQL-related tasks

Expand Down
116 changes: 116 additions & 0 deletions examples/agents/ibm_granite_bee.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
import "dotenv/config.js";
import { BeeAgent } from "bee-agent-framework/agents/bee/agent";
import { ChatLLM, ChatLLMOutput } from "bee-agent-framework/llms/chat";
import { getEnv, parseEnv } from "bee-agent-framework/internals/env";
import { FrameworkError } from "bee-agent-framework/errors";
import { TokenMemory } from "bee-agent-framework/memory/tokenMemory";
import { WatsonXChatLLM } from "bee-agent-framework/adapters/watsonx/chat";
import { OpenAIChatLLM } from "bee-agent-framework/adapters/openai/chat";
import { OllamaChatLLM } from "bee-agent-framework/adapters/ollama/chat";
import { IBMVllmChatLLM } from "bee-agent-framework/adapters/ibm-vllm/chat";
import { IBMVllmModel } from "bee-agent-framework/adapters/ibm-vllm/chatPreset";
import { OpenMeteoTool } from "bee-agent-framework/tools/weather/openMeteo";
import { DuckDuckGoSearchTool } from "bee-agent-framework/tools/search/duckDuckGoSearch";
import { Ollama } from "ollama";
import OpenAI from "openai";
import { z } from "zod";
import * as process from "node:process";
import fs from "node:fs";

const Providers = {
WATSONX: "watsonx",
OLLAMA: "ollama",
IBMVLLM: "ibmvllm",
IBMRITS: "ibmrits",
} as const;
type Provider = (typeof Providers)[keyof typeof Providers];

function getChatLLM(provider?: Provider): ChatLLM<ChatLLMOutput> {
const LLMFactories: Record<Provider, () => ChatLLM<ChatLLMOutput>> = {
[Providers.OLLAMA]: () =>
new OllamaChatLLM({
modelId: getEnv("OLLAMA_MODEL") || "granite3-dense:8b",
parameters: {
temperature: 0,
repeat_penalty: 1,
num_predict: 2000,
},
client: new Ollama({
host: getEnv("OLLAMA_HOST"),
}),
}),
[Providers.WATSONX]: () =>
WatsonXChatLLM.fromPreset(getEnv("WATSONX_MODEL") || "ibm/granite-3-8b-instruct", {
apiKey: getEnv("WATSONX_API_KEY"),
projectId: getEnv("WATSONX_PROJECT_ID"),
region: getEnv("WATSONX_REGION"),
}),
[Providers.IBMVLLM]: () => IBMVllmChatLLM.fromPreset(IBMVllmModel.GRANITE_INSTRUCT),
[Providers.IBMRITS]: () =>
new OpenAIChatLLM({
client: new OpenAI({
baseURL: process.env.IBM_RITS_URL,
apiKey: process.env.IBM_RITS_API_KEY,
defaultHeaders: {
RITS_API_KEY: process.env.IBM_RITS_API_KEY,
},
}),
modelId: getEnv("IBM_RITS_MODEL") || "ibm-granite/granite-3.0-8b-instruct",
parameters: {
temperature: 0,
max_tokens: 2048,
},
}),
};

if (!provider) {
provider = parseEnv("LLM_BACKEND", z.nativeEnum(Providers), Providers.OLLAMA);
}

const factory = LLMFactories[provider];
if (!factory) {
throw new Error(`Provider "${provider}" not found.`);
}
return factory();
}

function getPrompt(fallback: string) {
if (process.stdin.isTTY) {
return fallback;
}
return fs.readFileSync(process.stdin.fd).toString().trim() || fallback;
}

const llm = getChatLLM();
const agent = new BeeAgent({
llm,
memory: new TokenMemory({ llm }),
tools: [new OpenMeteoTool(), new DuckDuckGoSearchTool({ maxResults: 3 })],
});

try {
const prompt = getPrompt(`What is the current weather in London?`);
console.info(`User 👤 : ${prompt}`);

const response = await agent
.run(
{ prompt },
{
execution: {
maxIterations: 8,
maxRetriesPerStep: 3,
totalMaxRetries: 0,
},
},
)
.observe((emitter) => {
emitter.on("update", (data) => {
console.info(`Agent 🤖 (${data.update.key}) : ${data.update.value}`);
});
});
console.info(`Agent 🤖 : ${response.result.text}`);
} catch (error) {
console.error(FrameworkError.ensure(error).dump());
} finally {
process.exit(0);
}

0 comments on commit 9745920

Please sign in to comment.