Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Integration with AutoGen workflow #126

Merged
merged 5 commits into from
Oct 27, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
66 changes: 66 additions & 0 deletions memgpt/autogen/examples/agent_autoreply.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
"""Example of how to add MemGPT into an AutoGen groupchat

Based on the official AutoGen example here: https://github.com/microsoft/autogen/blob/main/notebook/agentchat_groupchat.ipynb

Begin by doing:
pip install "pyautogen[teachable]"
pip install pymemgpt
or
pip install -e . (inside the MemGPT home directory)
"""

import os
import autogen
from memgpt.autogen.memgpt_agent import create_memgpt_autogen_agent_from_config

config_list = [
{
"model": "gpt-4",
"api_key": os.getenv("OPENAI_API_KEY"),
},
]

# If USE_MEMGPT is False, then this example will be the same as the official AutoGen repo
# (https://github.com/microsoft/autogen/blob/main/notebook/agentchat_groupchat.ipynb)
# If USE_MEMGPT is True, then we swap out the "coder" agent with a MemGPT agent
USE_MEMGPT = True

llm_config = {"config_list": config_list, "seed": 42}

# The user agent
user_proxy = autogen.UserProxyAgent(
name="User_proxy",
system_message="A human admin.",
code_execution_config={"last_n_messages": 2, "work_dir": "groupchat"},
human_input_mode="TERMINATE", # needed?
default_auto_reply="You are going to figure all out by your own. "
"Work by yourself, the user won't reply until you output `TERMINATE` to end the conversation.",
)

if not USE_MEMGPT:
# In the AutoGen example, we create an AssistantAgent to play the role of the coder
coder = autogen.AssistantAgent(
name="Coder",
llm_config=llm_config,
system_message=f"I am a 10x engineer, trained in Python. I was the first engineer at Uber "
f"(which I make sure to tell everyone I work with).",
human_input_mode="TERMINATE",
)

else:
# In our example, we swap this AutoGen agent with a MemGPT agent
# This MemGPT agent will have all the benefits of MemGPT, ie persistent memory, etc.
coder = create_memgpt_autogen_agent_from_config(
"MemGPT_coder",
llm_config=llm_config,
system_message=f"I am a 10x engineer, trained in Python. I was the first engineer at Uber "
f"(which I make sure to tell everyone I work with).",
human_input_mode="TERMINATE",
)

# Begin the group chat with a message from the user
user_proxy.initiate_chat(
coder,
message="I want to design an app to make me one million dollars in one month. "
"Tell me all the details, then try out every steps.",
)
36 changes: 26 additions & 10 deletions memgpt/autogen/examples/agent_groupchat.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

import os
import autogen
from memgpt.autogen.memgpt_agent import create_autogen_memgpt_agent
from memgpt.autogen.memgpt_agent import create_autogen_memgpt_agent, create_memgpt_autogen_agent_from_config

config_list = [
{
Expand All @@ -20,10 +20,13 @@
},
]

# If USE_MEMGPT is False, then this example will be the same as the official AutoGen repo (https://github.com/microsoft/autogen/blob/main/notebook/agentchat_groupchat.ipynb)
# If USE_MEMGPT is False, then this example will be the same as the official AutoGen repo
# (https://github.com/microsoft/autogen/blob/main/notebook/agentchat_groupchat.ipynb)
# If USE_MEMGPT is True, then we swap out the "coder" agent with a MemGPT agent
USE_MEMGPT = True

USE_AUTOGEN_WORKFLOW = False

llm_config = {"config_list": config_list, "seed": 42}

# The user agent
Expand Down Expand Up @@ -51,13 +54,25 @@
else:
# In our example, we swap this AutoGen agent with a MemGPT agent
# This MemGPT agent will have all the benefits of MemGPT, ie persistent memory, etc.
coder = create_autogen_memgpt_agent(
"MemGPT_coder",
persona_description="I am a 10x engineer, trained in Python. I was the first engineer at Uber (which I make sure to tell everyone I work with).",
user_description=f"You are participating in a group chat with a user ({user_proxy.name}) and a product manager ({pm.name}).",
# extra options
# interface_kwargs={"debug": True},
)
if not USE_AUTOGEN_WORKFLOW:
coder = create_autogen_memgpt_agent(
"MemGPT_coder",
persona_description="I am a 10x engineer, trained in Python. I was the first engineer at Uber "
"(which I make sure to tell everyone I work with).",
user_description=f"You are participating in a group chat with a user ({user_proxy.name}) "
f"and a product manager ({pm.name}).",
# extra options
# interface_kwargs={"debug": True},
)
else:
coder = create_memgpt_autogen_agent_from_config(
"MemGPT_coder",
llm_config=llm_config,
system_message=f"I am a 10x engineer, trained in Python. I was the first engineer at Uber "
f"(which I make sure to tell everyone I work with).\n"
f"You are participating in a group chat with a user ({user_proxy.name}) "
f"and a product manager ({pm.name}).",
)

# Initialize the group chat between the user and two LLM agents (PM and coder)
groupchat = autogen.GroupChat(agents=[user_proxy, pm, coder], messages=[], max_round=12)
Expand All @@ -66,5 +81,6 @@
# Begin the group chat with a message from the user
user_proxy.initiate_chat(
manager,
message="I want to design an app to make me one million dollars in one month. Yes, your heard that right.",
message="I want to design an app to make me one million dollars in one month. "
"Yes, your heard that right.",
)
81 changes: 74 additions & 7 deletions memgpt/autogen/memgpt_agent.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from autogen.agentchat import ConversableAgent, Agent
from autogen.agentchat import Agent, ConversableAgent, UserProxyAgent, GroupChat, GroupChatManager
from ..agent import AgentAsync

import asyncio
Expand All @@ -18,16 +18,70 @@ def create_memgpt_autogen_agent_from_config(
system_message: Optional[str] = "You are a helpful AI Assistant.",
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
max_consecutive_auto_reply: Optional[int] = None,
human_input_mode: Optional[str] = "TERMINATE",
human_input_mode: Optional[str] = "ALWAYS",
function_map: Optional[Dict[str, Callable]] = None,
code_execution_config: Optional[Union[Dict, bool]] = None,
llm_config: Optional[Union[Dict, bool]] = None,
default_auto_reply: Optional[Union[str, Dict, None]] = "",
):
"""
TODO support AutoGen config workflow in a clean way with constructors
"""
raise NotImplementedError
"""Construct AutoGen config workflow in a clean way."""

model = constants.DEFAULT_MEMGPT_MODEL if llm_config is None else llm_config["config_list"][0]["model"]
persona_desc = personas.DEFAULT if system_message == "" else system_message
if human_input_mode == "ALWAYS":
user_desc = humans.DEFAULT
elif human_input_mode == "TERMINATE":
user_desc = "Work by yourself, the user won't reply until you output `TERMINATE` to end the conversation."
else:
user_desc = "Work by yourself, the user won't reply. Elaborate as much as possible."

if function_map is not None or code_execution_config is not None:
raise NotImplementedError

autogen_memgpt_agent = create_autogen_memgpt_agent(
name,
preset=presets.DEFAULT,
model=model,
persona_description=persona_desc,
user_description=user_desc,
is_termination_msg=is_termination_msg,
)

if human_input_mode != "ALWAYS":
coop_agent1 = create_autogen_memgpt_agent(
name,
preset=presets.DEFAULT,
model=model,
persona_description=persona_desc,
user_description=user_desc,
is_termination_msg=is_termination_msg,
)
if default_auto_reply != "":
coop_agent2 = UserProxyAgent(
name,
human_input_mode="NEVER",
default_auto_reply=default_auto_reply,
)
else:
coop_agent2 = create_autogen_memgpt_agent(
name,
preset=presets.DEFAULT,
model=model,
persona_description=persona_desc,
user_description=user_desc,
is_termination_msg=is_termination_msg,
)

groupchat = GroupChat(
agents=[autogen_memgpt_agent, coop_agent1, coop_agent2],
messages=[],
max_round=12 if max_consecutive_auto_reply is None else max_consecutive_auto_reply
)
manager = GroupChatManager(name=name, groupchat=groupchat, llm_config=llm_config)
return manager

else:
return autogen_memgpt_agent


def create_autogen_memgpt_agent(
Expand All @@ -40,6 +94,7 @@ def create_autogen_memgpt_agent(
interface_kwargs={},
persistence_manager=None,
persistence_manager_kwargs={},
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
):
"""
See AutoGenInterface.__init__ for available options you can pass into
Expand Down Expand Up @@ -73,6 +128,7 @@ def create_autogen_memgpt_agent(
autogen_memgpt_agent = MemGPTAgent(
name=autogen_name,
agent=memgpt_agent,
is_termination_msg=is_termination_msg,
)
return autogen_memgpt_agent

Expand All @@ -84,6 +140,7 @@ def __init__(
agent: AgentAsync,
skip_verify=False,
concat_other_agent_messages=False,
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
):
super().__init__(name)
self.agent = agent
Expand All @@ -95,6 +152,10 @@ def __init__(
self.register_reply([Agent, None], MemGPTAgent._generate_reply_for_user_message)
self.messages_processed_up_to_idx = 0

self._is_termination_msg = (
is_termination_msg if is_termination_msg is not None else (lambda x: x == "TERMINATE")
)

def format_other_agent_message(self, msg):
if "name" in msg:
user_message = f"{msg['name']}: {msg['content']}"
Expand Down Expand Up @@ -144,8 +205,10 @@ async def _a_generate_reply_for_user_message(
# Extend the MemGPT message list with multiple 'user' messages, then push the last one with agent.step()
self.agent.messages.extend(new_messages[:-1])
user_message = new_messages[-1]
else:
elif len(new_messages) == 1:
user_message = new_messages[0]
else:
return True, self._default_auto_reply

# Package the user message
user_message = system.package_user_message(user_message)
Expand All @@ -172,6 +235,10 @@ async def _a_generate_reply_for_user_message(
else:
break

# Stop the conversation
if self._is_termination_msg(new_messages[-1]['content']):
return True, None

# Pass back to AutoGen the pretty-printed calls MemGPT made to the interface
pretty_ret = MemGPTAgent.pretty_concat(self.agent.interface.message_list)
self.messages_processed_up_to_idx += len(new_messages)
Expand Down