diff --git a/.env.template b/.env.template index 01735615cadb..413a8d2d5a50 100644 --- a/.env.template +++ b/.env.template @@ -13,3 +13,8 @@ OPENAI_AZURE_DEPLOYMENT_ID=deployment-id-for-azure IMAGE_PROVIDER=dalle HUGGINGFACE_API_TOKEN= USE_MAC_OS_TTS=False + +# Step Summary Prompts +STEP_SUMMARIZATION_PROMPT=outline the key steps and insights you plan to take and gain during the completion of your task in a concise and relevant manner +FINAL_SUMMARIZATION_PROMPT=Consolidate and distill the essential information from the provided summaries of individual steps to create a concise and coherent summary of the overall process by creating an output structured using chapters, sub-chapters and bullet points. Don't include duplicate ideas. Leave only one instance if it's present in multiple steps:{} +# {} is replaced with the summary chunk content, make sure to include it in your FINAL_SUMMARIZATION_PROMPT diff --git a/README.md b/README.md index 749c8791513a..ca99718ac863 100644 --- a/README.md +++ b/README.md @@ -43,6 +43,7 @@ Your support is greatly appreciated - [💀 Continuous Mode ⚠️](#-continuous-mode-️) - [GPT3.5 ONLY Mode](#gpt35-only-mode) - [🖼 Image Generation](#image-generation) + - [📜 Conversation Summary Mode](#-conversation-summary-mode) - [⚠️ Limitations](#️-limitations) - [🛡 Disclaimer](#-disclaimer) - [🐦 Connect with Us on Twitter](#-connect-with-us-on-twitter) @@ -238,6 +239,18 @@ IMAGE_PROVIDER=sd HUGGINGFACE_API_TOKEN="YOUR_HUGGINGFACE_API_TOKEN" ``` +## 📜 Conversation Summary Mode +Conversation Summary mode is designed to provide users with concise summaries of the AI's responses during a conversation. +This mode can be especially useful for long conversations where it's crucial to quickly understand the key points without reading through the entire response. +It enhances readability and helps users grasp important information more efficiently. + +How to Use Conversation Summary Mode: + +1. Define `STEP_SUMMARIZATION_PROMPT` and `FINAL_SUMMARIZATION_PROMPT` in the .env file (you can play around with these prompts to get the best results) +2. Run: `python scripts/main.py --conversation-summary` +3. View step summaries in the console and a summary file inside `scripts/logs` folder +4. Upon exit, a final summary is printed to console and appended to the file + ## ⚠️ Limitations This experiment aims to showcase the potential of GPT-4 but comes with some limitations: diff --git a/scripts/config.py b/scripts/config.py index 24911bce9327..477e9585fcdb 100644 --- a/scripts/config.py +++ b/scripts/config.py @@ -36,6 +36,7 @@ def __init__(self): self.debug_mode = False self.continuous_mode = False self.speak_mode = False + self.conversation_summary_mode = False self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo") self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4") @@ -67,6 +68,20 @@ def __init__(self): self.image_provider = os.getenv("IMAGE_PROVIDER") self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN") + # Conversation Summary Mode - Prompts to use when summarizing + self.step_summarization_prompt = os.getenv( + "STEP_SUMMARIZATION_PROMPT", + "outline the key steps and insights you plan to take " + "and gain during the completion of your task in a concise and relevant manner" + ) + self.final_summarization_prompt = os.getenv( + "FINAL_SUMMARIZATION_PROMPT", + "Consolidate and distill the essential information from the provided summaries of individual steps to " + "create a concise and coherent summary of the overall process by creating an output structured using " + "chapters, sub-chapters and bullet points. Don't include duplicate ideas. Leave only one instance if it's " + "present in multiple steps:{}" + ) # {} is replaced with the content + # User agent headers to use when browsing web # Some websites might just completely deny request with an error code if no user agent was found. self.user_agent_header = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"} @@ -89,6 +104,9 @@ def set_speak_mode(self, value: bool): """Set the speak mode value.""" self.speak_mode = value + def set_conversation_summary_mode(self, value: bool): + self.conversation_summary_mode = value + def set_fast_llm_model(self, value: str): """Set the fast LLM model value.""" self.fast_llm_model = value diff --git a/scripts/conversation_summary/__init__.py b/scripts/conversation_summary/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/scripts/conversation_summary/summary.py b/scripts/conversation_summary/summary.py new file mode 100644 index 000000000000..822528c3297c --- /dev/null +++ b/scripts/conversation_summary/summary.py @@ -0,0 +1,343 @@ +import json +import re +import datetime +import sys +from pathlib import Path +from typing import List, Union, Callable, Any, Optional, Dict + +from colorama import Style, Fore + +from token_counter import count_string_tokens + + +class Summary: + """ + A class to manage step-by-step and final summaries of an AI conversation. + """ + + def __init__( + self, + step_summarization_prompt: str, + final_summarization_prompt: str, + ai_name: str, + summary_filename: str = None + ) -> None: + """ + Initializes the Summary class with prompts and summary file name. + + Args: + step_summarization_prompt (str): The prompt to generate step summaries. + final_summarization_prompt (str): The prompt to generate the final summary. + ai_name (str): The name of the AI model. + summary_filename (str, optional): The name of the summary file. Defaults to None. + """ + + # If a summary filename is not provided, create one using the current date and time + if not summary_filename: + logs_path = Path("logs") + logs_path.mkdir(exist_ok=True) + + summary_filename = logs_path / f"{ai_name}_summary_{datetime.datetime.now().strftime('%Y_%m_%d-%I_%M_%S_%p')}.txt" + + self.summary_filename = summary_filename + self.step_counter = 1 + self.step_summarization_prompt = step_summarization_prompt + self.final_summarization_prompt = final_summarization_prompt + + def print_step_summary_to_console(self, step_summary: str) -> None: + """ + Prints the step summary to the console. + + Args: + step_summary (str): The step summary to print. + """ + console_output = f"""{Fore.LIGHTYELLOW_EX} +==================================================== +\nSTEP {self.step_counter} SUMMARY\n{step_summary}\n +===================================================={Style.RESET_ALL} +""" + print(console_output) + + @staticmethod + def print_final_summary_to_console(final_summary: str) -> None: + """ + Prints the final summary to the console. + + Args: + final_summary (str): The final summary to print. + """ + console_output = f"""{Fore.LIGHTYELLOW_EX} +=================================================================== +\nFINAL SUMMARY\n{final_summary}\n +==================================================================={Style.RESET_ALL} +""" + print(console_output) + + def append_summarized_step_to_file(self, step_summary: str, step_result: str) -> None: + """ + Appends the step summary and step result to the summary file. + + Args: + step_summary (str): The step summary to append. + step_result (str): The step result to append. + """ + formatted_step_summary = self._split_sentences_into_bullet_points(step_summary) + + with open(self.summary_filename, "a", encoding='utf-8') as summary_file: + summary_file.write("========================================\n") + summary_file.write(f" STEP {self.step_counter}\n") + summary_file.write("========================================\n\n") + summary_file.write(f"\nStep summary:\n{formatted_step_summary}\n") + summary_file.write(f"Result: {step_result}\n") + summary_file.write(f"\n========================================\n\n\n\n") + + def append_final_summary_to_file( + self, + gpt_agent_instance: Optional = None, + create_agent_callback: Callable = None, + next_agent_key: int = None, + gpt_agent_model: str = "gpt-3.5-turbo", + ) -> str: + """ + Appends the final summary to the summary file. + + NOTE: Either provide `gpt_agent_instance` or the combination of `gpt_agent_model`, `create_agent_callback`, + and `next_agent_key` to create a GPT agent. + + Args: + gpt_agent_instance (Optional): The GPT agent instance. Defaults to None. + create_agent_callback (Callable): The callback to create an agent. Defaults to None. + next_agent_key (int): The next agent key. Defaults to None. + gpt_agent_model (str): The GPT agent model. Defaults to "gpt-3.5-turbo". + """ + file_content = self._read_file_content(self.summary_filename) + print("Generating final summary...") + final_summary = self._generate_final_summary( + file_content=file_content, + final_summarization_prompt=self.final_summarization_prompt, + gpt_agent_instance=gpt_agent_instance, + create_agent_callback=create_agent_callback, + next_agent_key=next_agent_key, + gpt_agent_model=gpt_agent_model + ) + print("Final summary generated.") + print(f"Final summary: {final_summary}") + self._write_final_summary_to_file( + filename=self.summary_filename, + final_summary=final_summary, + num_of_total_steps=self.step_counter - 1 + ) + + return final_summary + + def increment_step_counter(self): + """ + Increments the step counter. + """ + self.step_counter += 1 + + def _generate_step_summary(self) -> str: + pass + + def _generate_final_summary( + self, + file_content: str, + final_summarization_prompt: str, + gpt_agent_instance: Optional = None, + create_agent_callback: Callable = None, + next_agent_key: int = None, + gpt_agent_model: str = "gpt-3.5-turbo", + ) -> str: + """ + Generates the final summary based on the file content and the final summary prompt. + + NOTE: Either provide `gpt_agent_instance` or the combination of `gpt_agent_model`, `create_agent_callback`, + and `next_agent_key` to create a GPT agent. + + Args: + file_content (str): The content of the summary file. + final_summarization_prompt (str): The prompt for the final summary. + gpt_agent_instance (Optional): An instance of a GPT agent, if available. + create_agent_callback (Callable): A callback to create a GPT agent. + next_agent_key (int): The key for the next agent. + gpt_agent_model (str): The name of the GPT agent model. + + Returns: + str: The generated final summary. + """ + text_chunks = self._split_text_into_chunks( + text=file_content, + # The max number of tokens is 4,097, but we need to leave some space for the prompt + max_tokens=3980, + ) + + final_summary = "" + + for chunk in text_chunks: + # Format the prompt with the chunk of text + message = self._format_final_summary_prompt(final_summarization_prompt, chunk) + + # If the GPT agent instance is not provided, create one using the callback + if not gpt_agent_instance: + _, formatted_summary = create_agent_callback(next_agent_key, message, gpt_agent_model) + else: + _, formatted_summary = gpt_agent_instance.send_message(message) + + # Split the summary into bullet points + formatted_summary = self._split_sentences_into_bullet_points(formatted_summary) + + # Append the summary to the final summary + final_summary += f"\n{formatted_summary}" + + return final_summary + + @staticmethod + def _format_final_summary_prompt(prompt_to_format: str, chunk: str) -> str: + """ + Formats the final summary prompt by adding the provided chunk of text. + + Args: + prompt_to_format (str): The prompt to format. + chunk (str): The chunk of text to include in the prompt. + + Returns: + str: The formatted final summary prompt. + """ + if prompt_to_format.count("{}") != 1: + return "The prompt string must contain one instance of '{}' which will include the chunk of text. " \ + "Please update your FINAL_SUMMARY_PROMPT variable in the .env file." + return prompt_to_format.format(chunk) + + @staticmethod + def _write_final_summary_to_file(filename: str, final_summary: str, num_of_total_steps: int) -> None: + with open(filename, "a", encoding='utf-8') as summary_file: + summary_file.write("=======================================================\n") + summary_file.write(f" FINAL SUMMARY ({num_of_total_steps} TOTAL STEPS) \n") + summary_file.write("=======================================================\n\n") + summary_file.write(final_summary) + summary_file.write("\n\n=======================================================\n\n") + + @staticmethod + def _read_file_content(filename) -> str: + if not Path(filename).exists(): + with open(filename, "w", encoding='utf-8') as summary_file: + summary_file.write("No steps logged.\n") + + with open(filename, 'r', encoding='utf-8') as summary_file: + file_content = summary_file.read() + + return file_content + + @staticmethod + def _split_sentences_into_bullet_points(text: str) -> str: + sentences = re.split(r'(? List[str]: + tokens = text.split() + chunks = [] + current_chunk = [] + + for token in tokens: + current_chunk.append(token) + current_chunk_str = " ".join(current_chunk) + token_count = count_string_tokens(current_chunk_str, gpt_agent_model) + + if token_count > max_tokens: + current_chunk.pop() # Remove the last token that exceeded the limit + chunks.append(" ".join(current_chunk)) + current_chunk = [token] # Start a new chunk with the removed token + + # Add the last chunk if it's not empty + if current_chunk: + chunks.append(" ".join(current_chunk)) + + return chunks + + +class SummaryUtils: + """ + A utility class for working with summaries. + """ + @staticmethod + def get_step_summary_from_assistant_reply( + assistant_reply: Union[str, dict], + fix_and_parse_json: Callable[[str], Any], + print_to_console: Callable[..., None] + ) -> str: + """ + Extracts the step summary from the assistant's reply. + + Args: + assistant_reply (Union[str, dict]): The assistant's reply. + fix_and_parse_json (Callable[[str], Any]): A function to fix and parse JSON. + print_to_console (Callable[..., None]): A function to print to console. + + Returns: + str: The step summary. + """ + assistant_reply_json = fix_and_parse_json(assistant_reply) + + if isinstance(assistant_reply_json, str): + try: + assistant_reply_json = assistant_reply_json.replace('\n', '').replace('\r', '').replace('\t', '').replace(' ', '') + assistant_reply_json = json.loads(assistant_reply_json) + except json.JSONDecodeError as e: + print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply) + assistant_reply_json = {} + + step_summary = assistant_reply_json.get("summary", "No summary provided.") + + return step_summary + + @staticmethod + def add_summary_field_to_json(json_schema: Union[str, dict], value: str) -> str: + """ + If the provided JSON schema is a dictionary, adds a summary field to it, and returns the updated JSON as a string. + If the provided JSON schema is a string, converts it to a dictionary, adds a summary field to it, and returns the updated JSON as a string. + """ + if isinstance(json_schema, dict): + json_schema["summary"] = value + return json.dumps(json_schema, sort_keys=False, indent=4) + elif isinstance(json_schema, str): + json_schema = json_schema.replace('\n', '').replace('\r', '').replace('\t', '').replace(' ', '') + json_schema_dict = json.loads(json_schema) + json_schema_dict["summary"] = value + updated_json_schema = json.dumps(json_schema_dict, sort_keys=False, indent=4) + return updated_json_schema + else: + raise ValueError("The provided JSON schema must be a dictionary or a string.") + + @staticmethod + def add_summary_field_to_prompt(prompt: str, value: str) -> str: + # Regular expression pattern to find the JSON object + pattern = r'\{\s*?"thoughts":\s*?\{[\s\S]*?"args":\s*?\{[\s\S]*?\}\s*?\}\s*?\}' + + # Search for the JSON object in the prompt + match = re.search(pattern, prompt) + + if match: + # Extract the JSON object string + json_string = match.group() + + # Load the JSON object as a dictionary + json_dict = json.loads(json_string) + + # Add the "summary" field to the JSON dictionary + json_dict["summary"] = value + + # Convert the JSON dictionary back to a string + updated_json_string = json.dumps(json_dict, indent=4) + + # Replace the original JSON object with the updated one + updated_prompt = prompt[:match.start()] + updated_json_string + prompt[match.end():] + + return updated_prompt + + return prompt diff --git a/scripts/data.py b/scripts/data.py index f80c2875d8ef..5fd67359ff22 100644 --- a/scripts/data.py +++ b/scripts/data.py @@ -1,6 +1,12 @@ import os from pathlib import Path +from config import Config +from conversation_summary.summary import SummaryUtils + +cfg = Config() + + def load_prompt(): """Load the prompt from data/prompt.txt""" try: @@ -12,7 +18,13 @@ def load_prompt(): with open(prompt_file_path, "r") as prompt_file: prompt = prompt_file.read() + if cfg.conversation_summary_mode: + # Add the summary field to the response structure in the AutoGPT prompt + prompt = SummaryUtils.add_summary_field_to_prompt( + prompt, value=cfg.step_summarization_prompt + ) return prompt + except FileNotFoundError: print("Error: Prompt file not found", flush=True) return "" diff --git a/scripts/json_parser.py b/scripts/json_parser.py index 8c17dfa2525a..8ccf4b50d196 100644 --- a/scripts/json_parser.py +++ b/scripts/json_parser.py @@ -4,6 +4,8 @@ from config import Config from json_utils import correct_json +from conversation_summary.summary import SummaryUtils + cfg = Config() JSON_SCHEMA = """ @@ -25,6 +27,9 @@ } """ +if cfg.conversation_summary_mode: + JSON_SCHEMA = SummaryUtils.add_summary_field_to_json(JSON_SCHEMA, value=cfg.step_summarization_prompt) + def fix_and_parse_json( json_str: str, diff --git a/scripts/main.py b/scripts/main.py index d84e1508501b..b013b024f0e8 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -12,6 +12,8 @@ from config import Config from json_parser import fix_and_parse_json from ai_config import AIConfig +from agent_manager import create_agent, next_key +from conversation_summary.summary import Summary, SummaryUtils import traceback import yaml import argparse @@ -274,6 +276,7 @@ def parse_arguments(): parser.add_argument('--speak', action='store_true', help='Enable Speak Mode') parser.add_argument('--debug', action='store_true', help='Enable Debug Mode') parser.add_argument('--gpt3only', action='store_true', help='Enable GPT3.5 Only Mode') + parser.add_argument('-cs', '--conversation-summary', action='store_true', help='Enable Conversation Summary Mode') args = parser.parse_args() if args.continuous: @@ -292,6 +295,10 @@ def parse_arguments(): print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") cfg.set_smart_llm_model(cfg.fast_llm_model) + if args.conversation_summary: + print_to_console("Conversation Summary Mode: ", Fore.LIGHTYELLOW_EX, "ENABLED") + cfg.set_conversation_summary_mode(True) + # TODO: fill in llm values here @@ -314,97 +321,131 @@ def parse_arguments(): memory = get_memory(cfg, init=True) print('Using memory of type: ' + memory.__class__.__name__) -# Interaction Loop -while True: - # Send message to AI, get response - with Spinner("Thinking... "): - assistant_reply = chat.chat_with_ai( - prompt, - user_input, - full_message_history, - memory, - cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument - - # Print Assistant thoughts - print_assistant_thoughts(assistant_reply) - - # Get command name and arguments - try: - command_name, arguments = cmd.get_command(assistant_reply) - except Exception as e: - print_to_console("Error: \n", Fore.RED, str(e)) - - if not cfg.continuous_mode and next_action_count == 0: - ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### - # Get key press: Prompt the user to press enter to continue or escape - # to exit - user_input = "" - print_to_console( - "NEXT ACTION: ", - Fore.CYAN, - f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") - print( - f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {ai_name}...", - flush=True) - while True: - console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL) - if console_input.lower() == "y": - user_input = "GENERATE NEXT COMMAND JSON" - break - elif console_input.lower().startswith("y -"): - try: - next_action_count = abs(int(console_input.split(" ")[1])) +# Initialize the Summary object +summary = Summary( + step_summarization_prompt=cfg.step_summarization_prompt, + final_summarization_prompt=cfg.final_summarization_prompt, + ai_name=ai_name +) if cfg.conversation_summary_mode else None + +try: + # Interaction Loop + while True: + # Send message to AI, get response + with Spinner("Thinking... "): + assistant_reply = chat.chat_with_ai( + prompt, + user_input, + full_message_history, + memory, + cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument + + # Print Assistant thoughts + print_assistant_thoughts(assistant_reply) + + # Print the step summary + step_summary = SummaryUtils.get_step_summary_from_assistant_reply(assistant_reply, fix_and_parse_json, print_to_console) if cfg.conversation_summary_mode else None + + if step_summary: + summary.print_step_summary_to_console(step_summary) + + # Get command name and arguments + try: + command_name, arguments = cmd.get_command(assistant_reply) + except Exception as e: + print_to_console("Error: \n", Fore.RED, str(e)) + + if not cfg.continuous_mode and next_action_count == 0: + ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### + # Get key press: Prompt the user to press enter to continue or escape + # to exit + user_input = "" + print_to_console( + "NEXT ACTION: ", + Fore.CYAN, + f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") + print( + f"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter feedback for {ai_name}...", + flush=True) + while True: + console_input = utils.clean_input(Fore.MAGENTA + "Input:" + Style.RESET_ALL) + if console_input.lower() == "y": user_input = "GENERATE NEXT COMMAND JSON" - except ValueError: - print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.") - continue - break - elif console_input.lower() == "n": - user_input = "EXIT" + break + elif console_input.lower().startswith("y -"): + try: + next_action_count = abs(int(console_input.split(" ")[1])) + user_input = "GENERATE NEXT COMMAND JSON" + except ValueError: + print("Invalid input format. Please enter 'y -n' where n is the number of continuous tasks.") + continue + break + elif console_input.lower() == "n": + # Print & append final summary to file + final_summary = summary.append_final_summary_to_file( + gpt_agent_model=cfg.fast_llm_model, + create_agent_callback=create_agent, + next_agent_key=next_key, + ) + user_input = "EXIT" + break + else: + user_input = console_input + command_name = "human_feedback" + break + + if user_input == "GENERATE NEXT COMMAND JSON": + print_to_console( + "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", + Fore.MAGENTA, + "") + elif user_input == "EXIT": + print("Exiting...", flush=True) break - else: - user_input = console_input - command_name = "human_feedback" - break - - if user_input == "GENERATE NEXT COMMAND JSON": + else: + # Print command print_to_console( - "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", - Fore.MAGENTA, - "") - elif user_input == "EXIT": - print("Exiting...", flush=True) - break - else: - # Print command - print_to_console( - "NEXT ACTION: ", - Fore.CYAN, - f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") - - # Execute command - if command_name.lower().startswith( "error" ): - result = f"Command {command_name} threw the following error: " + arguments - elif command_name == "human_feedback": - result = f"Human feedback: {user_input}" - else: - result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}" - if next_action_count > 0: - next_action_count -= 1 - - memory_to_add = f"Assistant Reply: {assistant_reply} " \ - f"\nResult: {result} " \ - f"\nHuman Feedback: {user_input} " - - memory.add(memory_to_add) - - # Check if there's a result from the command append it to the message - # history - if result is not None: - full_message_history.append(chat.create_chat_message("system", result)) - print_to_console("SYSTEM: ", Fore.YELLOW, result) - else: - full_message_history.append( - chat.create_chat_message( - "system", "Unable to execute command")) - print_to_console("SYSTEM: ", Fore.YELLOW, "Unable to execute command") + "NEXT ACTION: ", + Fore.CYAN, + f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") + + # Execute command + if command_name.lower().startswith( "error" ): + result = f"Command {command_name} threw the following error: " + arguments + elif command_name == "human_feedback": + result = f"Human feedback: {user_input}" + else: + result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}" + if next_action_count > 0: + next_action_count -= 1 + + memory_to_add = f"Assistant Reply: {assistant_reply} " \ + f"\nResult: {result} " \ + f"\nHuman Feedback: {user_input} " + + memory.add(memory_to_add) + + # Check if there's a result from the command append it to the message + # history + if result is not None: + full_message_history.append(chat.create_chat_message("system", result)) + print_to_console("SYSTEM: ", Fore.YELLOW, result) + if cfg.conversation_summary_mode: + # Append the step's summary and result to file and increment step count + summary.append_summarized_step_to_file(step_summary, result) + summary.increment_step_counter() + else: + full_message_history.append( + chat.create_chat_message( + "system", "Unable to execute command")) + print_to_console("SYSTEM: ", Fore.YELLOW, "Unable to execute command") + +except KeyboardInterrupt: + # Append final summary if enabled and exit + if cfg.conversation_summary_mode: + final_summary = summary.append_final_summary_to_file( + gpt_agent_model=cfg.fast_llm_model, + create_agent_callback=create_agent, + next_agent_key=next_key, + ) + summary.print_final_summary_to_console(final_summary) diff --git a/tests/summary_tests.py b/tests/summary_tests.py new file mode 100644 index 000000000000..a962b6002349 --- /dev/null +++ b/tests/summary_tests.py @@ -0,0 +1,102 @@ +import sys +import tempfile +import unittest +from unittest.mock import patch + +from pathlib import Path + +# Ugly workaround required to import modules from parent directory +scripts_dir = Path(__file__).parent.parent / "scripts" +sys.path.append(str(scripts_dir)) + +from conversation_summary.summary import Summary, SummaryUtils + + +# Utility functions for testing +def mock_send_message(message: str) -> tuple: + return None, "Generated summary text." + + +def mock_create_agent_callback(next_agent_key: int, message: str, gpt_agent_model: str) -> tuple: + return None, "Generated summary text." + + +class TestSummary(unittest.TestCase): + + def setUp(self): + """Set up the test environment by initializing a Summary instance.""" + self.step_summary_prompt = "Please provide a step-by-step summary." + self.final_summary_prompt = "Please provide a final summary." + self.ai_name = "TestAI" + self.summary = Summary(self.step_summary_prompt, self.final_summary_prompt, self.ai_name) + + def test_init(self): + """Test the initialization of a Summary instance.""" + self.assertIsInstance(self.summary, Summary) + self.assertEqual(self.summary.step_counter, 1) + self.assertEqual(self.summary.step_summarization_prompt, self.step_summary_prompt) + self.assertEqual(self.summary.final_summarization_prompt, self.final_summary_prompt) + + def test_increment_step_counter(self): + """Test if the step counter is incremented correctly.""" + initial_step_counter = self.summary.step_counter + self.summary.increment_step_counter() + self.assertEqual(self.summary.step_counter, initial_step_counter + 1) + + def test_print_step_summary_to_console(self): + """Test if the step summary is printed to the console.""" + step_summary = "Step summary text." + with patch('builtins.print') as mock_print: + self.summary.print_step_summary_to_console(step_summary) + mock_print.assert_called_once() + + def test_print_final_summary_to_console(self): + """Test if the final summary is printed to the console.""" + final_summary = "Final summary text." + with patch('builtins.print') as mock_print: + Summary.print_final_summary_to_console(final_summary) + mock_print.assert_called_once() + + def test_append_step_summary_to_file(self): + """Test if the step summary and result are appended to the summary file.""" + with tempfile.TemporaryDirectory() as temp_dir: + summary_file = Path(temp_dir) / "temp_summary.txt" + self.summary.summary_filename = summary_file + self.summary.append_summarized_step_to_file("Step summary text.", "Step result text.") + self.assertTrue(summary_file.exists()) + with open(summary_file, "r") as f: + content = f.read() + self.assertIn("Step summary text.", content) + self.assertIn("Step result text.", content) + + def test_format_final_summary_prompt(self): + """Test if the final summary prompt is formatted correctly.""" + final_summary_prompt = "Please provide a summary of the following content with bullet points: {}" + chunk = "Example text." + formatted_prompt = Summary._format_final_summary_prompt(final_summary_prompt, chunk) + expected_prompt = 'Please provide a summary of the following content with bullet points: Example text.' + self.assertEqual(formatted_prompt, expected_prompt) + + # Test with incorrect final summary prompt + incorrect_prompt = "Please provide a summary of the following content with bullet points:" + formatted_prompt = Summary._format_final_summary_prompt(incorrect_prompt, chunk) + expected_error = "The prompt string must contain one instance of '{}' which will include the chunk of text. " \ + "Please update your FINAL_SUMMARY_PROMPT variable in the .env file." + self.assertEqual(formatted_prompt, expected_error) + + +class TestSummaryUtils(unittest.TestCase): + + def test_get_step_summary_from_assistant_reply(self): + """Test if the step summary is extracted correctly from the assistant's reply.""" + assistant_reply = '{"response": "Response text.", "summary": "Summary text."}' + summary = SummaryUtils.get_step_summary_from_assistant_reply( + assistant_reply, + fix_and_parse_json=lambda x: x, + print_to_console=lambda *args, **kwargs: None + ) + self.assertEqual(summary, "Summary text.") + + +if __name__ == "__main__": + unittest.main()