diff --git a/autogpt/__init__.py b/autogpt/__init__.py index 5f5b20ef2311..909f8bf4b1db 100644 --- a/autogpt/__init__.py +++ b/autogpt/__init__.py @@ -1,5 +1,13 @@ +import os +import random +import sys + from dotenv import load_dotenv +if "pytest" in sys.argv or "pytest" in sys.modules or os.getenv("CI"): + print("Setting random seed to 42") + random.seed(42) + # Load the users .env file into environment variables load_dotenv(verbose=True, override=True) diff --git a/autogpt/chat.py b/autogpt/chat.py index 4b906a001555..469ec9bd38df 100644 --- a/autogpt/chat.py +++ b/autogpt/chat.py @@ -1,4 +1,5 @@ import time +from random import shuffle from openai.error import RateLimitError @@ -80,12 +81,17 @@ def chat_with_ai( logger.debug(f"Token limit: {token_limit}") send_token_limit = token_limit - 1000 - - relevant_memory = ( - "" - if len(full_message_history) == 0 - else permanent_memory.get_relevant(str(full_message_history[-9:]), 10) - ) + if len(full_message_history) == 0: + relevant_memory = "" + else: + recent_history = full_message_history[-5:] + shuffle(recent_history) + relevant_memories = permanent_memory.get_relevant( + str(recent_history), 5 + ) + if relevant_memories: + shuffle(relevant_memories) + relevant_memory = str(relevant_memories) logger.debug(f"Memory Stats: {permanent_memory.get_stats()}") diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py index ca0586f44887..590f95cc0511 100644 --- a/autogpt/commands/execute_code.py +++ b/autogpt/commands/execute_code.py @@ -1,6 +1,7 @@ """Execute code in a Docker container""" import os import subprocess +from pathlib import Path import docker from docker.errors import ImageNotFound @@ -40,7 +41,6 @@ def execute_python_file(filename: str) -> str: try: client = docker.from_env() - # You can replace this with the desired Python image/version # You can find available Python images on Docker Hub: # https://hub.docker.com/_/python @@ -60,10 +60,9 @@ def execute_python_file(filename: str) -> str: print(f"{status}: {progress}") elif status: print(status) - container = client.containers.run( image_name, - f"python {filename}", + f"python {Path(filename).relative_to(CFG.workspace_path)}", volumes={ CFG.workspace_path: { "bind": "/workspace", diff --git a/autogpt/json_utils/json_fix_llm.py b/autogpt/json_utils/json_fix_llm.py index 869aed125cfb..10317accb98f 100644 --- a/autogpt/json_utils/json_fix_llm.py +++ b/autogpt/json_utils/json_fix_llm.py @@ -91,14 +91,33 @@ def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]: Returns: str: The fixed JSON string. """ + assistant_reply = assistant_reply.strip() + if assistant_reply.startswith("```json"): + assistant_reply = assistant_reply[7:] + if assistant_reply.endswith("```"): + assistant_reply = assistant_reply[:-3] + try: + return json.loads(assistant_reply) # just check the validity + except json.JSONDecodeError: # noqa: E722 + pass + + if assistant_reply.startswith("json "): + assistant_reply = assistant_reply[5:] + assistant_reply = assistant_reply.strip() + try: + return json.loads(assistant_reply) # just check the validity + except json.JSONDecodeError: # noqa: E722 + pass # Parse and print Assistant response assistant_reply_json = fix_and_parse_json(assistant_reply) + logger.debug("Assistant reply JSON: %s", str(assistant_reply_json)) if assistant_reply_json == {}: assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets( assistant_reply ) + logger.debug("Assistant reply JSON 2: %s", str(assistant_reply_json)) if assistant_reply_json != {}: return assistant_reply_json diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py index b20a1757762a..1d169fe16d36 100644 --- a/autogpt/prompts/prompt.py +++ b/autogpt/prompts/prompt.py @@ -71,6 +71,9 @@ def build_default_prompt_generator() -> PromptGenerator: "Every command has a cost, so be smart and efficient. Aim to complete tasks in" " the least number of steps." ) + prompt_generator.add_performance_evaluation( + "If you cannot think of a valid command to perform start or message an agent to determine the next command." + ) prompt_generator.add_performance_evaluation("Write all code to a file.") return prompt_generator diff --git a/autogpt/workspace/workspace.py b/autogpt/workspace/workspace.py index 9165bd963932..c1050230a8d4 100644 --- a/autogpt/workspace/workspace.py +++ b/autogpt/workspace/workspace.py @@ -11,6 +11,8 @@ from pathlib import Path +from autogpt.logs import logger + class Workspace: """A class that represents a workspace for an AutoGPT agent.""" @@ -112,8 +114,12 @@ def _sanitize_path( if root is None: return Path(relative_path).resolve() + logger.debug(f"Resolving path '{relative_path}' in workspace '{root}'") + root, relative_path = Path(root).resolve(), Path(relative_path) + logger.debug(f"Resolved root as '{root}'") + if relative_path.is_absolute(): raise ValueError( f"Attempted to access absolute path '{relative_path}' in workspace '{root}'." @@ -121,6 +127,8 @@ def _sanitize_path( full_path = root.joinpath(relative_path).resolve() + logger.debug(f"Joined paths as '{full_path}'") + if restrict_to_root and not full_path.is_relative_to(root): raise ValueError( f"Attempted to access path '{full_path}' outside of workspace '{root}'."