Skip to content

Commit

Permalink
Remove AsyncAgent and async from cli (#400)
Browse files Browse the repository at this point in the history
* Remove AsyncAgent and async from cli

Refactor agent.py memory.py

Refactor interface.py

Refactor main.py

Refactor openai_tools.py

Refactor cli/cli.py

stray asyncs

save

make legacy embeddings not use async

Refactor presets

Remove deleted function from import

* remove stray prints

* typo

* another stray print

* patch test

---------

Co-authored-by: cpacker <[email protected]>
  • Loading branch information
vivi and cpacker authored Nov 9, 2023
1 parent 19bfa81 commit 34e6371
Show file tree
Hide file tree
Showing 13 changed files with 128 additions and 774 deletions.
406 changes: 3 additions & 403 deletions memgpt/agent.py

Large diffs are not rendered by default.

7 changes: 0 additions & 7 deletions memgpt/agent_base.py

This file was deleted.

10 changes: 5 additions & 5 deletions memgpt/autogen/memgpt_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def create_memgpt_autogen_agent_from_config(

autogen_memgpt_agent = create_autogen_memgpt_agent(
name,
preset=presets.SYNC_CHAT,
preset=presets.DEFAULT_PRESET,
model=model,
persona_description=persona_desc,
user_description=user_desc,
Expand All @@ -57,7 +57,7 @@ def create_memgpt_autogen_agent_from_config(
if human_input_mode != "ALWAYS":
coop_agent1 = create_autogen_memgpt_agent(
name,
preset=presets.SYNC_CHAT,
preset=presets.DEFAULT_PRESET,
model=model,
persona_description=persona_desc,
user_description=user_desc,
Expand All @@ -73,7 +73,7 @@ def create_memgpt_autogen_agent_from_config(
else:
coop_agent2 = create_autogen_memgpt_agent(
name,
preset=presets.SYNC_CHAT,
preset=presets.DEFAULT_PRESET,
model=model,
persona_description=persona_desc,
user_description=user_desc,
Expand All @@ -95,7 +95,7 @@ def create_memgpt_autogen_agent_from_config(

def create_autogen_memgpt_agent(
autogen_name,
preset=presets.SYNC_CHAT,
preset=presets.DEFAULT_PRESET,
model=constants.DEFAULT_MEMGPT_MODEL,
persona_description=personas.DEFAULT,
user_description=humans.DEFAULT,
Expand Down Expand Up @@ -126,7 +126,7 @@ def create_autogen_memgpt_agent(
persona=persona_description,
human=user_description,
model=model,
preset=presets.SYNC_CHAT,
preset=presets.DEFAULT_PRESET,
)

interface = AutoGenInterface(**interface_kwargs) if interface is None else interface
Expand Down
8 changes: 3 additions & 5 deletions memgpt/cli/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import sys
import io
import logging
import asyncio
import os
from prettytable import PrettyTable
import questionary
Expand All @@ -24,7 +23,7 @@
from memgpt.persistence_manager import LocalStateManager
from memgpt.config import MemGPTConfig, AgentConfig
from memgpt.constants import MEMGPT_DIR
from memgpt.agent import AgentAsync
from memgpt.agent import Agent
from memgpt.embeddings import embedding_model
from memgpt.openai_tools import (
configure_azure_support,
Expand Down Expand Up @@ -121,7 +120,7 @@ def run(
agent_config.save()

# load existing agent
memgpt_agent = AgentAsync.load_agent(memgpt.interface, agent_config)
memgpt_agent = Agent.load_agent(memgpt.interface, agent_config)
else: # create new agent
# create new agent config: override defaults with args if provided
typer.secho("Creating new agent...", fg=typer.colors.GREEN)
Expand Down Expand Up @@ -162,8 +161,7 @@ def run(
if config.model_endpoint == "azure":
configure_azure_support()

loop = asyncio.get_event_loop()
loop.run_until_complete(run_agent_loop(memgpt_agent, first, no_verify, config)) # TODO: add back no_verify
run_agent_loop(memgpt_agent, first, no_verify, config) # TODO: add back no_verify


def attach(
Expand Down
6 changes: 3 additions & 3 deletions memgpt/cli/cli_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def configure():
config = MemGPTConfig.load()

# openai credentials
use_openai = questionary.confirm("Do you want to enable MemGPT with Open AI?", default=True).ask()
use_openai = questionary.confirm("Do you want to enable MemGPT with OpenAI?", default=True).ask()
if use_openai:
# search for key in enviornment
openai_key = os.getenv("OPENAI_API_KEY")
Expand Down Expand Up @@ -119,10 +119,10 @@ def configure():

# defaults
personas = [os.path.basename(f).replace(".txt", "") for f in utils.list_persona_files()]
print(personas)
# print(personas)
default_persona = questionary.select("Select default persona:", personas, default=config.default_persona).ask()
humans = [os.path.basename(f).replace(".txt", "") for f in utils.list_human_files()]
print(humans)
# print(humans)
default_human = questionary.select("Select default human:", humans, default=config.default_human).ask()

# TODO: figure out if we should set a default agent or not
Expand Down
50 changes: 25 additions & 25 deletions memgpt/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ def save(self):

# archival storage
config.add_section("archival_storage")
print("archival storage", self.archival_storage_type)
# print("archival storage", self.archival_storage_type)
config.set("archival_storage", "type", self.archival_storage_type)
if self.archival_storage_path:
config.set("archival_storage", "path", self.archival_storage_path)
Expand Down Expand Up @@ -350,7 +350,7 @@ def __init__(self):
self.preload_archival = False

@classmethod
async def legacy_flags_init(
def legacy_flags_init(
cls: Type["Config"],
model: str,
memgpt_persona: str,
Expand All @@ -372,19 +372,19 @@ async def legacy_flags_init(
if self.archival_storage_index:
recompute_embeddings = False # TODO Legacy support -- can't recompute embeddings on a path that's not specified.
if self.archival_storage_files:
await self.configure_archival_storage(recompute_embeddings)
self.configure_archival_storage(recompute_embeddings)
return self

@classmethod
async def config_init(cls: Type["Config"], config_file: str = None):
def config_init(cls: Type["Config"], config_file: str = None):
self = cls()
self.config_file = config_file
if self.config_file is None:
cfg = Config.get_most_recent_config()
use_cfg = False
if cfg:
print(f"{Style.BRIGHT}{Fore.MAGENTA}⚙️ Found saved config file.{Style.RESET_ALL}")
use_cfg = await questionary.confirm(f"Use most recent config file '{cfg}'?").ask_async()
use_cfg = questionary.confirm(f"Use most recent config file '{cfg}'?").ask()
if use_cfg:
self.config_file = cfg

Expand All @@ -393,74 +393,74 @@ async def config_init(cls: Type["Config"], config_file: str = None):
recompute_embeddings = False
if self.compute_embeddings:
if self.archival_storage_index:
recompute_embeddings = await questionary.confirm(
recompute_embeddings = questionary.confirm(
f"Would you like to recompute embeddings? Do this if your files have changed.\n Files: {self.archival_storage_files}",
default=False,
).ask_async()
).ask()
else:
recompute_embeddings = True
if self.load_type:
await self.configure_archival_storage(recompute_embeddings)
self.configure_archival_storage(recompute_embeddings)
self.write_config()
return self

# print("No settings file found, configuring MemGPT...")
print(f"{Style.BRIGHT}{Fore.MAGENTA}⚙️ No settings file found, configuring MemGPT...{Style.RESET_ALL}")

self.model = await questionary.select(
self.model = questionary.select(
"Which model would you like to use?",
model_choices,
default=model_choices[0],
).ask_async()
).ask()

self.memgpt_persona = await questionary.select(
self.memgpt_persona = questionary.select(
"Which persona would you like MemGPT to use?",
Config.get_memgpt_personas(),
).ask_async()
).ask()
print(self.memgpt_persona)

self.human_persona = await questionary.select(
self.human_persona = questionary.select(
"Which user would you like to use?",
Config.get_user_personas(),
).ask_async()
).ask()

self.archival_storage_index = None
self.preload_archival = await questionary.confirm(
self.preload_archival = questionary.confirm(
"Would you like to preload anything into MemGPT's archival memory?", default=False
).ask_async()
).ask()
if self.preload_archival:
self.load_type = await questionary.select(
self.load_type = questionary.select(
"What would you like to load?",
choices=[
questionary.Choice("A folder or file", value="folder"),
questionary.Choice("A SQL database", value="sql"),
questionary.Choice("A glob pattern", value="glob"),
],
).ask_async()
).ask()
if self.load_type == "folder" or self.load_type == "sql":
archival_storage_path = await questionary.path("Please enter the folder or file (tab for autocomplete):").ask_async()
archival_storage_path = questionary.path("Please enter the folder or file (tab for autocomplete):").ask()
if os.path.isdir(archival_storage_path):
self.archival_storage_files = os.path.join(archival_storage_path, "*")
else:
self.archival_storage_files = archival_storage_path
else:
self.archival_storage_files = await questionary.path("Please enter the glob pattern (tab for autocomplete):").ask_async()
self.compute_embeddings = await questionary.confirm(
self.archival_storage_files = questionary.path("Please enter the glob pattern (tab for autocomplete):").ask()
self.compute_embeddings = questionary.confirm(
"Would you like to compute embeddings over these files to enable embeddings search?"
).ask_async()
await self.configure_archival_storage(self.compute_embeddings)
).ask()
self.configure_archival_storage(self.compute_embeddings)

self.write_config()
return self

async def configure_archival_storage(self, recompute_embeddings):
def configure_archival_storage(self, recompute_embeddings):
if recompute_embeddings:
if self.host:
interface.warning_message(
"⛔️ Embeddings on a non-OpenAI endpoint are not yet supported, falling back to substring matching search."
)
else:
self.archival_storage_index = await utils.prepare_archival_index_from_files_compute_embeddings(self.archival_storage_files)
self.archival_storage_index = utils.prepare_archival_index_from_files_compute_embeddings(self.archival_storage_files)
if self.compute_embeddings and self.archival_storage_index:
self.index, self.archival_database = utils.prepare_archival_index(self.archival_storage_index)
else:
Expand Down
38 changes: 19 additions & 19 deletions memgpt/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,36 +28,36 @@ def warning_message(msg):
print(fstr.format(msg=msg))


async def internal_monologue(msg):
def internal_monologue(msg):
# ANSI escape code for italic is '\x1B[3m'
fstr = f"\x1B[3m{Fore.LIGHTBLACK_EX}💭 {{msg}}{Style.RESET_ALL}"
if STRIP_UI:
fstr = "{msg}"
print(fstr.format(msg=msg))


async def assistant_message(msg):
def assistant_message(msg):
fstr = f"{Fore.YELLOW}{Style.BRIGHT}🤖 {Fore.YELLOW}{{msg}}{Style.RESET_ALL}"
if STRIP_UI:
fstr = "{msg}"
print(fstr.format(msg=msg))


async def memory_message(msg):
def memory_message(msg):
fstr = f"{Fore.LIGHTMAGENTA_EX}{Style.BRIGHT}🧠 {Fore.LIGHTMAGENTA_EX}{{msg}}{Style.RESET_ALL}"
if STRIP_UI:
fstr = "{msg}"
print(fstr.format(msg=msg))


async def system_message(msg):
def system_message(msg):
fstr = f"{Fore.MAGENTA}{Style.BRIGHT}🖥️ [system] {Fore.MAGENTA}{msg}{Style.RESET_ALL}"
if STRIP_UI:
fstr = "{msg}"
print(fstr.format(msg=msg))


async def user_message(msg, raw=False, dump=False, debug=DEBUG):
def user_message(msg, raw=False, dump=False, debug=DEBUG):
def print_user_message(icon, msg, printf=print):
if STRIP_UI:
printf(f"{icon} {msg}")
Expand Down Expand Up @@ -103,7 +103,7 @@ def printd_user_message(icon, msg):
printd_user_message("🧑", msg_json)


async def function_message(msg, debug=DEBUG):
def function_message(msg, debug=DEBUG):
def print_function_message(icon, msg, color=Fore.RED, printf=print):
if STRIP_UI:
printf(f"⚡{icon} [function] {msg}")
Expand Down Expand Up @@ -171,7 +171,7 @@ def printd_function_message(icon, msg, color=Fore.RED):
printd_function_message("", msg)


async def print_messages(message_sequence, dump=False):
def print_messages(message_sequence, dump=False):
idx = len(message_sequence)
for msg in message_sequence:
if dump:
Expand All @@ -181,42 +181,42 @@ async def print_messages(message_sequence, dump=False):
content = msg["content"]

if role == "system":
await system_message(content)
system_message(content)
elif role == "assistant":
# Differentiate between internal monologue, function calls, and messages
if msg.get("function_call"):
if content is not None:
await internal_monologue(content)
internal_monologue(content)
# I think the next one is not up to date
# await function_message(msg["function_call"])
# function_message(msg["function_call"])
args = json.loads(msg["function_call"].get("arguments"))
await assistant_message(args.get("message"))
assistant_message(args.get("message"))
# assistant_message(content)
else:
await internal_monologue(content)
internal_monologue(content)
elif role == "user":
await user_message(content, dump=dump)
user_message(content, dump=dump)
elif role == "function":
await function_message(content, debug=dump)
function_message(content, debug=dump)
else:
print(f"Unknown role: {content}")


async def print_messages_simple(message_sequence):
def print_messages_simple(message_sequence):
for msg in message_sequence:
role = msg["role"]
content = msg["content"]

if role == "system":
await system_message(content)
system_message(content)
elif role == "assistant":
await assistant_message(content)
assistant_message(content)
elif role == "user":
await user_message(content, raw=True)
user_message(content, raw=True)
else:
print(f"Unknown role: {content}")


async def print_messages_raw(message_sequence):
def print_messages_raw(message_sequence):
for msg in message_sequence:
print(msg)
Loading

0 comments on commit 34e6371

Please sign in to comment.