Skip to content

Commit

Permalink
Refactor autogen agent to use sync memgpt, add notebook example (#157)
Browse files Browse the repository at this point in the history
* Refactor autogen agent to use sync memgpt, add notebook example

* Add colab badge to notebook

* Update colab badge to point to main

* Add imports lost in the merge

* Changes to make autogenagent work with cli refactor
  • Loading branch information
vivi authored Oct 31, 2023
1 parent db16552 commit 41762e7
Show file tree
Hide file tree
Showing 4 changed files with 208 additions and 26 deletions.
147 changes: 147 additions & 0 deletions memgpt/autogen/examples/memgpt_coder_autogen.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "591be0c0-7332-4c57-adcf-fecc578eeb67",
"metadata": {},
"source": [
"<a target=\"_blank\" href=\"https://colab.research.google.com/github/cpacker/MemGPT/blob/main/memgpt/autogen/examples/memgpt_coder_autogen.ipynb\">\n",
" <img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/>\n",
"</a>"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "43d71a67-3a01-4543-99ad-7dce12d793da",
"metadata": {},
"outputs": [],
"source": [
"%pip install pyautogen"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b3754942-819b-4df9-be3f-6cfb3ca101dc",
"metadata": {},
"outputs": [],
"source": [
"%pip install pymemgpt"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bd6df0ac-66a6-4dc7-9262-4c2ad05fab91",
"metadata": {},
"outputs": [],
"source": [
"import openai\n",
"openai.api_key=\"YOUR_API_KEY\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0cb9b18c-3662-4206-9ff5-de51a3aafb36",
"metadata": {},
"outputs": [],
"source": [
"\"\"\"Example of how to add MemGPT into an AutoGen groupchat\n",
"\n",
"Based on the official AutoGen example here: https://github.com/microsoft/autogen/blob/main/notebook/agentchat_groupchat.ipynb\n",
"\n",
"Begin by doing:\n",
" pip install \"pyautogen[teachable]\"\n",
" pip install pymemgpt\n",
" or\n",
" pip install -e . (inside the MemGPT home directory)\n",
"\"\"\"\n",
"\n",
"import os\n",
"import autogen\n",
"from memgpt.autogen.memgpt_agent import create_autogen_memgpt_agent\n",
"\n",
"config_list = [\n",
" {\n",
" \"model\": \"gpt-4\",\n",
" \"api_key\": os.getenv(\"OPENAI_API_KEY\"),\n",
" },\n",
"]\n",
"\n",
"# If USE_MEMGPT is False, then this example will be the same as the official AutoGen repo (https://github.com/microsoft/autogen/blob/main/notebook/agentchat_groupchat.ipynb)\n",
"# If USE_MEMGPT is True, then we swap out the \"coder\" agent with a MemGPT agent\n",
"USE_MEMGPT = True\n",
"# If DEBUG is False, a lot of MemGPT's inner workings output is suppressed and only the final send_message is displayed.\n",
"# If DEBUG is True, then all of MemGPT's inner workings (function calls, etc.) will be output.\n",
"DEBUG = False\n",
"\n",
"llm_config = {\"config_list\": config_list, \"seed\": 42}\n",
"\n",
"# The user agent\n",
"user_proxy = autogen.UserProxyAgent(\n",
" name=\"User_proxy\",\n",
" system_message=\"A human admin.\",\n",
" code_execution_config={\"last_n_messages\": 2, \"work_dir\": \"groupchat\"},\n",
" human_input_mode=\"TERMINATE\", # needed?\n",
")\n",
"\n",
"# The agent playing the role of the product manager (PM)\n",
"pm = autogen.AssistantAgent(\n",
" name=\"Product_manager\",\n",
" system_message=\"Creative in software product ideas.\",\n",
" llm_config=llm_config,\n",
")\n",
"\n",
"if not USE_MEMGPT:\n",
" # In the AutoGen example, we create an AssistantAgent to play the role of the coder\n",
" coder = autogen.AssistantAgent(\n",
" name=\"Coder\",\n",
" llm_config=llm_config,\n",
" )\n",
"\n",
"else:\n",
" # In our example, we swap this AutoGen agent with a MemGPT agent\n",
" # This MemGPT agent will have all the benefits of MemGPT, ie persistent memory, etc.\n",
" coder = create_autogen_memgpt_agent(\n",
" \"MemGPT_coder\",\n",
" persona_description=\"I am a 10x engineer, trained in Python. I was the first engineer at Uber (which I make sure to tell everyone I work with).\",\n",
" user_description=f\"You are participating in a group chat with a user ({user_proxy.name}) and a product manager ({pm.name}).\",\n",
" interface_kwargs={\"debug\": DEBUG},\n",
" )\n",
"\n",
"# Initialize the group chat between the user and two LLM agents (PM and coder)\n",
"groupchat = autogen.GroupChat(agents=[user_proxy, pm, coder], messages=[], max_round=12)\n",
"manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)\n",
"\n",
"# Begin the group chat with a message from the user\n",
"user_proxy.initiate_chat(\n",
" manager,\n",
" message=\"I want to design an app to make me one million dollars in one month. Yes, your heard that right.\",\n",
")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
24 changes: 12 additions & 12 deletions memgpt/autogen/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,22 +14,22 @@ class DummyInterface(object):
def set_message_list(self, message_list):
pass

async def internal_monologue(self, msg):
def internal_monologue(self, msg):
pass

async def assistant_message(self, msg):
def assistant_message(self, msg):
pass

async def memory_message(self, msg):
def memory_message(self, msg):
pass

async def system_message(self, msg):
def system_message(self, msg):
pass

async def user_message(self, msg, raw=False):
def user_message(self, msg, raw=False):
pass

async def function_message(self, msg):
def function_message(self, msg):
pass


Expand Down Expand Up @@ -62,7 +62,7 @@ def reset_message_list(self):
"""Clears the buffer. Call before every agent.step() when using MemGPT+AutoGen"""
self.message_list = []

async def internal_monologue(self, msg):
def internal_monologue(self, msg):
# ANSI escape code for italic is '\x1B[3m'
if self.debug:
print(f"inner thoughts :: {msg}")
Expand All @@ -71,25 +71,25 @@ async def internal_monologue(self, msg):
message = f"\x1B[3m{Fore.LIGHTBLACK_EX}💭 {msg}{Style.RESET_ALL}" if self.fancy else f"[inner thoughts] {msg}"
self.message_list.append(message)

async def assistant_message(self, msg):
def assistant_message(self, msg):
if self.debug:
print(f"assistant :: {msg}")
message = f"{Fore.YELLOW}{Style.BRIGHT}🤖 {Fore.YELLOW}{msg}{Style.RESET_ALL}" if self.fancy else msg
self.message_list.append(message)

async def memory_message(self, msg):
def memory_message(self, msg):
if self.debug:
print(f"memory :: {msg}")
message = f"{Fore.LIGHTMAGENTA_EX}{Style.BRIGHT}🧠 {Fore.LIGHTMAGENTA_EX}{msg}{Style.RESET_ALL}" if self.fancy else f"[memory] {msg}"
self.message_list.append(message)

async def system_message(self, msg):
def system_message(self, msg):
if self.debug:
print(f"system :: {msg}")
message = f"{Fore.MAGENTA}{Style.BRIGHT}🖥️ [system] {Fore.MAGENTA}{msg}{Style.RESET_ALL}" if self.fancy else f"[system] {msg}"
self.message_list.append(message)

async def user_message(self, msg, raw=False):
def user_message(self, msg, raw=False):
if self.debug:
print(f"user :: {msg}")
if not self.show_user_message:
Expand Down Expand Up @@ -126,7 +126,7 @@ async def user_message(self, msg, raw=False):

self.message_list.append(message)

async def function_message(self, msg):
def function_message(self, msg):
if self.debug:
print(f"function :: {msg}")
if not self.show_function_outputs:
Expand Down
27 changes: 14 additions & 13 deletions memgpt/autogen/memgpt_agent.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from autogen.agentchat import Agent, ConversableAgent, UserProxyAgent, GroupChat, GroupChatManager
from ..agent import AgentAsync
from ..agent import Agent as _Agent

import asyncio
from typing import Callable, Optional, List, Dict, Union, Any, Tuple
Expand All @@ -11,6 +11,7 @@
from .. import presets
from ..personas import personas
from ..humans import humans
from ..config import AgentConfig


def create_memgpt_autogen_agent_from_config(
Expand Down Expand Up @@ -86,7 +87,7 @@ def create_memgpt_autogen_agent_from_config(

def create_autogen_memgpt_agent(
autogen_name,
preset=presets.DEFAULT_PRESET,
preset=presets.SYNC_CHAT,
model=constants.DEFAULT_MEMGPT_MODEL,
persona_description=personas.DEFAULT,
user_description=humans.DEFAULT,
Expand All @@ -112,8 +113,16 @@ def create_autogen_memgpt_agent(
interface = AutoGenInterface(**interface_kwargs) if interface is None else interface
persistence_manager = InMemoryStateManager(**persistence_manager_kwargs) if persistence_manager is None else persistence_manager

agent_config = AgentConfig(
persona=persona_description,
human=user_description,
model=model,
preset=presets.SYNC_CHAT,
)

memgpt_agent = presets.use_preset(
preset,
agent_config,
model,
persona_description,
user_description,
Expand All @@ -133,7 +142,7 @@ class MemGPTAgent(ConversableAgent):
def __init__(
self,
name: str,
agent: AgentAsync,
agent: _Agent,
skip_verify=False,
concat_other_agent_messages=False,
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
Expand All @@ -142,7 +151,6 @@ def __init__(
self.agent = agent
self.skip_verify = skip_verify
self.concat_other_agent_messages = concat_other_agent_messages
self.register_reply([Agent, None], MemGPTAgent._a_generate_reply_for_user_message)
self.register_reply([Agent, None], MemGPTAgent._generate_reply_for_user_message)
self.messages_processed_up_to_idx = 0

Expand Down Expand Up @@ -171,14 +179,6 @@ def _generate_reply_for_user_message(
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
config: Optional[Any] = None,
) -> Tuple[bool, Union[str, Dict, None]]:
return asyncio.run(self._a_generate_reply_for_user_message(messages=messages, sender=sender, config=config))

async def _a_generate_reply_for_user_message(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
config: Optional[Any] = None,
) -> Tuple[bool, Union[str, Dict, None]]:
self.agent.interface.reset_message_list()

Expand Down Expand Up @@ -206,7 +206,7 @@ async def _a_generate_reply_for_user_message(
heartbeat_request,
function_failed,
token_warning,
) = await self.agent.step(user_message, first_message=False, skip_verify=self.skip_verify)
) = self.agent.step(user_message, first_message=False, skip_verify=self.skip_verify)
# Skip user inputs if there's a memory warning, function execution failed, or the agent asked for control
if token_warning:
user_message = system.get_token_limit_warning()
Expand All @@ -225,6 +225,7 @@ async def _a_generate_reply_for_user_message(
pretty_ret = MemGPTAgent.pretty_concat(self.agent.interface.message_list)
self.messages_processed_up_to_idx += len(new_messages)
return True, pretty_ret
return asyncio.run(self._a_generate_reply_for_user_message(messages=messages, sender=sender, config=config))

@staticmethod
def pretty_concat(messages):
Expand Down
36 changes: 35 additions & 1 deletion memgpt/presets.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,13 @@
DEFAULT_PRESET = "memgpt_chat"
preset_options = [DEFAULT_PRESET]

SYNC_CHAT = "memgpt_chat_sync" # TODO: remove me after we move the CLI to AgentSync


def use_preset(preset_name, agent_config, model, persona, human, interface, persistence_manager):
"""Storing combinations of SYSTEM + FUNCTION prompts"""

from memgpt.agent import AgentAsync
from memgpt.agent import AgentAsync, Agent
from memgpt.utils import printd

if preset_name == DEFAULT_PRESET:
Expand Down Expand Up @@ -43,5 +45,37 @@ def use_preset(preset_name, agent_config, model, persona, human, interface, pers
first_message_verify_mono=True if "gpt-4" in model else False,
)

if preset_name == "memgpt_chat_sync": # TODO: remove me after we move the CLI to AgentSync
functions = [
"send_message",
"pause_heartbeats",
"core_memory_append",
"core_memory_replace",
"conversation_search",
"conversation_search_date",
"archival_memory_insert",
"archival_memory_search",
]
available_functions = [v for k, v in gpt_functions.FUNCTIONS_CHAINING.items() if k in functions]
printd(f"Available functions:\n", [x["name"] for x in available_functions])
assert len(functions) == len(available_functions)

if "gpt-3.5" in model:
# use a different system message for gpt-3.5
preset_name = "memgpt_gpt35_extralong"

return Agent(
config=agent_config,
model=model,
system=gpt_system.get_system_text(DEFAULT_PRESET),
functions=available_functions,
interface=interface,
persistence_manager=persistence_manager,
persona_notes=persona,
human_notes=human,
# gpt-3.5-turbo tends to omit inner monologue, relax this requirement for now
first_message_verify_mono=True if "gpt-4" in model else False,
)

else:
raise ValueError(preset_name)

0 comments on commit 41762e7

Please sign in to comment.