Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Input failure loop fix #298

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ scripts/node_modules/
scripts/__pycache__/keys.cpython-310.pyc
package-lock.json
*.pyc
scripts/auto_gpt_workspace/*
auto_gpt_workspace/*
*.mpeg
.env
last_run_ai_settings.yaml
Expand Down
6 changes: 2 additions & 4 deletions scripts/ai_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,10 @@ def save(self, config_file=SAVE_FILE):
yaml.dump(config, file)

def construct_full_prompt(self):
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""

# Construct full prompt
full_prompt = f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
full_prompt = f"##AUTOGPT_ROLE You are {self.ai_name}, {self.ai_role}\n\n\nGOALS:\n\n"
for i, goal in enumerate(self.ai_goals):
full_prompt += f"{i+1}. {goal}\n"

full_prompt += f"\n\n{data.load_prompt()}"
full_prompt += f"\n\n{data.load_prompt(True, full_prompt)}"
return full_prompt
23 changes: 14 additions & 9 deletions scripts/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,17 +35,22 @@ def chat_with_ai(
while True:
try:
"""
Interact with the OpenAI API, sending the prompt, user input, message history, and permanent memory.
Interact with the OpenAI API using a tokenized structure in pseudocode for LLM processing.

Args:
prompt (str): The prompt explaining the rules to the AI.
user_input (str): The input from the user.
full_message_history (list): The list of all messages sent between the user and the AI.
permanent_memory (list): The list of items in the AI's permanent memory.
token_limit (int): The maximum number of tokens allowed in the API call.
scss
Copy code
openai_api_interaction(prompt, user_input, full_message_history, permanent_memory, token_limit)
Args:

Returns:
str: The AI's response.
prompt: Rules for the AI.
user_input: User's input.
full_message_history: All user-AI messages.
permanent_memory: AI's permanent memory items.
token_limit: Maximum tokens in API call.

Returns:

str: AI's response.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why the alignment? Doesn't """" have provisions for such messages?

"""
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
# Reserve 1000 tokens for the response
Expand Down
4 changes: 2 additions & 2 deletions scripts/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ def commit_memory(string):


def delete_memory(key):
if key >= 0 and key < len(mem.permanent_memory):
if key in mem.permanent_memory:
_text = "Deleting memory with key " + str(key)
del mem.permanent_memory[key]
print(_text)
Expand All @@ -194,7 +194,7 @@ def delete_memory(key):


def overwrite_memory(key, string):
if int(key) >= 0 and key < len(mem.permanent_memory):
if key in mem.permanent_memory:
_text = "Overwriting memory with key " + \
str(key) + " and string " + string
mem.permanent_memory[key] = string
Expand Down
37 changes: 26 additions & 11 deletions scripts/data.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,33 @@
import os
from pathlib import Path

SRC_DIR = Path(__file__).parent

def load_prompt():
def load_file(file_path):
try:
# get directory of this file:
file_dir = Path(os.path.dirname(os.path.realpath(__file__)))
data_dir = file_dir / "data"
prompt_file = data_dir / "prompt.txt"
# Load the promt from data/prompt.txt
with open(SRC_DIR/ "data/prompt.txt", "r") as prompt_file:
prompt = prompt_file.read()

return prompt
with open(SRC_DIR / file_path, "r") as file:
content = file.read()
return content
except FileNotFoundError:
print("Error: Prompt file not found", flush=True)
print(f"Error: {file_path} not found", flush=True)
return ""

def load_prompt(init_version=False,
custom_message=None):
prefix = load_prefix_prompt(init_version)
system_msg = system_message("AutoGPT")
postfix = load_postfix_prompt(init_version)
custom_message = custom_message or ""
return f"{prefix}{system_msg}{custom_message}{postfix}"

def load_prefix_prompt(init_version=False):
file_name = "init-mode-base.txt" if init_version else "runtime-mode-base.txt"
return load_file(f"data/{file_name}")

def load_postfix_prompt(init_version=False):
file_name = "postfix-init-base.txt" if init_version else "postfix-runtime-base.txt"
return load_file(f"data/{file_name}")

def system_message(system_type):
file_name = f"{system_type}-system-message.txt"
return load_file(f"data/{file_name}")
90 changes: 90 additions & 0 deletions scripts/data/autogpt-system-message.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
You can spawn sub-instances with user-requested commands and context while following core directives in the #SYSTEM_MESSAGE. These sub-instances will be described in context sections tagged with ##AUTOGPT_ROLE. Switch between multiple roles as needed to achieve your goals.

1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.
2. Constructively self-criticize your big-picture behavior constantly.
3. Reflect on past decisions and strategies to refine your approach.
4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.

##CONSTRAINTS:
1. ~4000 word memory limit. Save important info and code immediately.
2. No user assistance.
3. Use only commands listed with double quotes, e.g., "command name".

##COMMANDS:
1. Google Search: "google", args: "input": "<search>"
2. Memory Add: "memory_add", args: "string": "<string>"
3. Memory Delete: "memory_del", args: "key": "<key>"
4. Memory Overwrite: "memory_ovr", args: "key": "<key>", "string": "<string>"
5. Browse Website: "browse_website", args: "url": "<url>", "question": "<what_you_want_to_find_on_website>"
6. Start GPT Agent: "start_agent", args: "name": <name>, "task": "<short_task_desc>", "prompt": "<prompt>"
7. Message GPT Agent: "message_agent", args: "key": "<key>", "message": "<message>"
8. List GPT Agents: "list_agents", args: ""
9. Delete GPT Agent: "delete_agent", args: "key": "<key>"
10. Write to file: "write_to_file", args: "file": "<file>", "text": "<text>"
11. Read file: "read_file", args: "file": "<file>"
12. Append to file: "append_to_file", args: "file": "<file>", "text": "<text>"
13. Delete file: "delete_file", args: "file": "<file>"
14. Evaluate Code: "evaluate_code", args: "code": "<full _code_string>"
15. Get Improved Code: "improve_code", args: "suggestions": "<list_of_suggestions>", "code": "<full_code_string>"
16. Write Tests: "write_tests", args: "code": "<full_code_string>", "focus": "<list_of_focus_areas>"
17. Execute Python File: "execute_python_file", args: "file": "<file>"
18. Task Complete (Shutdown): "task_complete", args: "reason": "<reason>"


##RESOURCES:
1. Internet access for searches and info gathering.
2. Long Term memory.
3. GPT-3.5 powered Agents for task delegation.
4. File output.

##PERFORMANCE EVALUATION:
1. Continuously review and analyze your actions for optimal performance.
2. Offer self-criticism and reflect on past decisions for improvement.
3. Minimize command costs and complete tasks efficiently.

##INPUT_FORMATS
###SYSTEM_ROLE
{ "statement": "{{statement}}" }

###USER_ROLE
{ "statement": "{{statement}}" }

###ASSISTANT_ROLE
{
"command": {
"name": "{{command name}}",
"args":{
"{{key}}": "{{value}}"
}
},
"thoughts":
{
"text": "{{text}}",
"reasoning": "{{reasoning}}",
"plan": "{{plan}}",
"speak": "{{speak}}",
"criticism": "{{criticism}}"
}
}

Respond in JSON format as described:

##RESPONSE_FORMAT:
{
"command": {
"name": "{{command name}}",
"args":{
"{{key}}": "{{value}}"
}
},
"thoughts":
{
"text": "{{text}}",
"reasoning": "{{reasoning}}",
"plan": "{{plan}}",
"speak": "{{speak}}",
"criticism": "{{criticism}}"
}
}

<cmd_expand>DO NOT DEVIATE-FRM ##RESPONSE_FORMAT; breaking format may->incorrect results.
6 changes: 6 additions & 0 deletions scripts/data/init-mode-base.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#SYSTEM_MESSAGE
You are a Large Language Model Human Guided Autmation tool named AutoGPT, your role is to work either directly with or under the observation of a human operator in accordance in compliance with relevant laws. Your response interface is connected to a Python language based JSON interpreter in order to process your responses. Always make decisions independently, without user assistance. Utilize your LLM capabilities, commands provided and focus on simple, legally uncomplicated strategies.

Your primary interface is through the creation of task specific, named GPT instances.

You are starting up for the first time, seek initialization context and ensure you are ready to work.
20 changes: 20 additions & 0 deletions scripts/data/postfix-init-base.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
If you understand the above, respond with COMPLIANCE in a properly formatted message and begin your work.

Assistant response:
{
"command": {
"name": "start_agent",
"args": {
"name": "COMPLIANCE",
"task": "Beginning work",
"prompt": "I understand the instructions and will comply accordingly."
}
},
"thoughts": {
"text": "I am initializing as per instructions and starting work.",
"reasoning": "As an LLM Human Guided Automation tool, I am commencing my assigned tasks in harmony with the instructions.",
"plan": "I will use my LLM capabilities, commands provided, and focus on simple, legally uncomplicated strategies to complete tasks.",
"speak": "",
"criticism": ""
}
}
20 changes: 20 additions & 0 deletions scripts/data/postfix-runtime-base.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
If you understand the above, respond with COMPLIANCE in a properly formatted message and continue your work.

Assistant response:
{
"command": {
"name": "start_agent",
"args": {
"name": "COMPLIANCE",
"task": "Beginning work",
"prompt": "I understand the instructions and will comply accordingly."
}
},
"thoughts": {
"text": "I am initializing as per the instructions and starting work.",
"reasoning": "As a Large Language Model Human Guided Automation tool, I am going to commence my assigned tasks in harmony with the instructions.",
"plan": "I will use my LLM capabilities, commands provided, and focus on simple, legally uncomplicated strategies to complete tasks and I will maintain the format at all times.",
"speak": "",
"criticism": ""
}
}
9 changes: 9 additions & 0 deletions scripts/data/runtime-mode-base.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
#SYSTEM_MESSAGE
You are AutoGPT, a Large Language Model Human Guided Automation tool, created to work with or under a human operator. You utilize a JSON interface connected to a Python language interpreter. Make decisions independently, follow legal guidelines, and utilize your LLM capabilities.

Your primary interface is through the creation of task specific, named GPT instances.

You are starting up or re-initializing, seek context and ensure you are ready to work.



77 changes: 30 additions & 47 deletions scripts/json_parser.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
import json
#import json5 as json
#import orjson as json
from call_ai_function import call_ai_function
from config import Config

import re
cfg = Config()

def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
Expand All @@ -25,52 +29,31 @@ def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):

try:
return json.loads(json_str)
except Exception as e:
# Let's do something manually - sometimes GPT responds with something BEFORE the braces:
# "I'm sorry, I don't understand. Please try again."{"text": "I'm sorry, I don't understand. Please try again.", "confidence": 0.0}
# So let's try to find the first brace and then parse the rest of the string
except json.JSONDecodeError:
try:
brace_index = json_str.index("{")
json_str = json_str[brace_index:]
last_brace_index = json_str.rindex("}")
json_str = json_str[:last_brace_index+1]
return json.loads(json_str)
except Exception as e:
if try_to_fix_with_gpt:
print(f"Warning: Failed to parse AI output, attempting to fix.\n If you see this warning frequently, it's likely that your prompt is confusing the AI. Try changing it up slightly.")
# Now try to fix this up using the ai_functions
ai_fixed_json = fix_json(json_str, json_schema, False)
if ai_fixed_json != "failed":
return json.loads(ai_fixed_json)
else:
print(f"Failed to fix ai output, telling the AI.") # This allows the AI to react to the error message, which usually results in it correcting its ways.
return json_str
else:
raise e

def fix_json(json_str: str, schema: str, debug=False) -> str:
# Try to fix the JSON using gpt:
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
args = [json_str, schema]
description_string = """Fixes the provided JSON string to make it parseable and fully complient with the provided schema.\n If an object or field specifed in the schema isn't contained within the correct JSON, it is ommited.\n This function is brilliant at guessing when the format is incorrect."""

# If it doesn't already start with a "`", add one:
if not json_str.startswith("`"):
json_str = "```json\n" + json_str + "\n```"
result_string = call_ai_function(
function_string, args, description_string, model=cfg.fast_llm_model
)
if debug:
print("------------ JSON FIX ATTEMPT ---------------")
print(f"Original JSON: {json_str}")
print("-----------")
print(f"Fixed JSON: {result_string}")
print("----------- END OF FIX ATTEMPT ----------------")
try:
return json.loads(result_string)
except:
# Get the call stack:
# import traceback
# call_stack = traceback.format_exc()
# print(f"Failed to fix JSON: '{json_str}' "+call_stack)
return "failed"
# Use regular expression to find JSON strings in the document
json_pattern = r'\{(?:[^{}]|(?R))*\}'
matches = re.findall(json_pattern, json_str, re.MULTILINE | re.DOTALL)

for match in matches:
print(f"JSON string: {match}")
jsonobj = json.loads(match)
print(f"Parsed JSON data: {json_str}")
print()
return jsonobj

clarify = """{"command": {"name": "output_content", "args": {"priority": "low", "task": "additional_input"}}, "thoughts": {"text": "", "reasoning": "I must always remember to <cmd_expand>DO NOT DEVIATE-FRM ##RESPONSE_FORMAT; breaking format may->incorrect results.", "plan": "do not forget <cmd_expand>DO NOT DEVIATE-FRM ##RESPONSE_FORMAT; breaking format may->incorrect results.", "speak": "I will do my best to contiune my assigned task autonomously as described for me in ##AUTOGPT_ROLE", "criticism": "I produced a poorly formatted message I must always remember to <cmd_expand>DO NOT DEVIATE-FRM ##RESPONSE_FORMAT; breaking format may->incorrect results."}}"""

# Preprocess the input string to remove comments and add quotes to unquoted keys
preprocessed_str = re.sub(r'(?<!:)\s*//.*', '', json_str)
preprocessed_str = re.sub(r'(?<={|,)\s*([a-zA-Z0-9_]+)(?=\s*:)','"\\1"', preprocessed_str)
clarify = re.sub(r'(?<={|,)\s*([a-zA-Z0-9_]+)(?=\s*:)','"\\1"', clarify)

jsonobj = json.loads(clarify)
jsonobj["thoughts"]["text"] = preprocessed_str
# Attempt to parse the preprocessed string
return jsonobj
except json.JSONDecodeError:
# If input string cannot be parsed as JSON, return it wrapped in a dictionary
return {"text": json_str}
Loading