From deaf43a1e7660f7141630e96973842fa2194ab78 Mon Sep 17 00:00:00 2001 From: Wael Karkoub Date: Tue, 6 Aug 2024 20:26:19 -0500 Subject: [PATCH 1/3] remove old files --- .../contrib/capabilities/context_handling.py | 138 --- .../agentchat/contrib/compressible_agent.py | 436 --------- ...hat_capability_long_context_handling.ipynb | 687 -------------- notebook/agentchat_compression.ipynb | 876 ------------------ .../capabilities/test_context_handling.py | 231 ----- .../contrib/test_compressible_agent.py | 230 ----- 6 files changed, 2598 deletions(-) delete mode 100644 autogen/agentchat/contrib/capabilities/context_handling.py delete mode 100644 autogen/agentchat/contrib/compressible_agent.py delete mode 100644 notebook/agentchat_capability_long_context_handling.ipynb delete mode 100644 notebook/agentchat_compression.ipynb delete mode 100755 test/agentchat/contrib/capabilities/test_context_handling.py delete mode 100755 test/agentchat/contrib/test_compressible_agent.py diff --git a/autogen/agentchat/contrib/capabilities/context_handling.py b/autogen/agentchat/contrib/capabilities/context_handling.py deleted file mode 100644 index 44b10259f1b..00000000000 --- a/autogen/agentchat/contrib/capabilities/context_handling.py +++ /dev/null @@ -1,138 +0,0 @@ -import sys -from typing import Dict, List, Optional -from warnings import warn - -import tiktoken -from termcolor import colored - -from autogen import ConversableAgent, token_count_utils - -warn( - "Context handling with TransformChatHistory is deprecated and will be removed in `0.2.30`. " - "Please use `TransformMessages`, documentation can be found at https://microsoft.github.io/autogen/docs/topics/handling_long_contexts/intro_to_transform_messages", - DeprecationWarning, - stacklevel=2, -) - - -class TransformChatHistory: - """ - An agent's chat history with other agents is a common context that it uses to generate a reply. - This capability allows the agent to transform its chat history prior to using it to generate a reply. - It does not permanently modify the chat history, but rather processes it on every invocation. - - This capability class enables various strategies to transform chat history, such as: - - Truncate messages: Truncate each message to first maximum number of tokens. - - Limit number of messages: Truncate the chat history to a maximum number of (recent) messages. - - Limit number of tokens: Truncate the chat history to number of recent N messages that fit in - maximum number of tokens. - Note that the system message, because of its special significance, is always kept as is. - - The three strategies can be combined. For example, when each of these parameters are specified - they are used in the following order: - 1. First truncate messages to a maximum number of tokens - 2. Second, it limits the number of message to keep - 3. Third, it limits the total number of tokens in the chat history - - When adding this capability to an agent, the following are modified: - - A hook is added to the hookable method `process_all_messages_before_reply` to transform the - received messages for possible truncation. - Not modifying the stored message history. - """ - - def __init__( - self, - *, - max_tokens_per_message: Optional[int] = None, - max_messages: Optional[int] = None, - max_tokens: Optional[int] = None, - ): - """ - Args: - max_tokens_per_message (Optional[int]): Maximum number of tokens to keep in each message. - max_messages (Optional[int]): Maximum number of messages to keep in the context. - max_tokens (Optional[int]): Maximum number of tokens to keep in the context. - """ - self.max_tokens_per_message = max_tokens_per_message if max_tokens_per_message else sys.maxsize - self.max_messages = max_messages if max_messages else sys.maxsize - self.max_tokens = max_tokens if max_tokens else sys.maxsize - - def add_to_agent(self, agent: ConversableAgent): - """ - Adds TransformChatHistory capability to the given agent. - """ - agent.register_hook(hookable_method="process_all_messages_before_reply", hook=self._transform_messages) - - def _transform_messages(self, messages: List[Dict]) -> List[Dict]: - """ - Args: - messages: List of messages to process. - - Returns: - List of messages with the first system message and the last max_messages messages, - ensuring each message does not exceed max_tokens_per_message. - """ - temp_messages = messages.copy() - processed_messages = [] - system_message = None - processed_messages_tokens = 0 - - if messages[0]["role"] == "system": - system_message = messages[0].copy() - temp_messages.pop(0) - - total_tokens = sum( - token_count_utils.count_token(msg["content"]) for msg in temp_messages - ) # Calculate tokens for all messages - - # Truncate each message's content to a maximum token limit of each message - - # Process recent messages first - for msg in reversed(temp_messages[-self.max_messages :]): - msg["content"] = truncate_str_to_tokens(msg["content"], self.max_tokens_per_message) - msg_tokens = token_count_utils.count_token(msg["content"]) - if processed_messages_tokens + msg_tokens > self.max_tokens: - break - # append the message to the beginning of the list to preserve order - processed_messages = [msg] + processed_messages - processed_messages_tokens += msg_tokens - if system_message: - processed_messages.insert(0, system_message) - # Optionally, log the number of truncated messages and tokens if needed - num_truncated = len(messages) - len(processed_messages) - - if num_truncated > 0 or total_tokens > processed_messages_tokens: - print( - colored( - f"Truncated {num_truncated} messages. Reduced from {len(messages)} to {len(processed_messages)}.", - "yellow", - ) - ) - print( - colored( - f"Truncated {total_tokens - processed_messages_tokens} tokens. Tokens reduced from {total_tokens} to {processed_messages_tokens}", - "yellow", - ) - ) - return processed_messages - - -def truncate_str_to_tokens(text: str, max_tokens: int, model: str = "gpt-3.5-turbo-0613") -> str: - """Truncate a string so that the number of tokens is less than or equal to max_tokens using tiktoken. - - Args: - text: The string to truncate. - max_tokens: The maximum number of tokens to keep. - model: The target OpenAI model for tokenization alignment. - - Returns: - The truncated string. - """ - - encoding = tiktoken.encoding_for_model(model) # Get the appropriate tokenizer - - encoded_tokens = encoding.encode(text) - truncated_tokens = encoded_tokens[:max_tokens] - truncated_text = encoding.decode(truncated_tokens) # Decode back to text - - return truncated_text diff --git a/autogen/agentchat/contrib/compressible_agent.py b/autogen/agentchat/contrib/compressible_agent.py deleted file mode 100644 index bea4058b94a..00000000000 --- a/autogen/agentchat/contrib/compressible_agent.py +++ /dev/null @@ -1,436 +0,0 @@ -import copy -import inspect -import logging -from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union -from warnings import warn - -from autogen import Agent, ConversableAgent, OpenAIWrapper -from autogen.token_count_utils import count_token, get_max_token_limit, num_tokens_from_functions - -from ...formatting_utils import colored - -logger = logging.getLogger(__name__) - -warn( - "Context handling with CompressibleAgent is deprecated and will be removed in `0.2.30`. " - "Please use `TransformMessages`, documentation can be found at https://microsoft.github.io/autogen/docs/topics/handling_long_contexts/intro_to_transform_messages", - DeprecationWarning, - stacklevel=2, -) - - -class CompressibleAgent(ConversableAgent): - """CompressibleAgent agent. While this agent retains all the default functionalities of the `AssistantAgent`, - it also provides the added feature of compression when activated through the `compress_config` setting. - - `compress_config` is set to False by default, making this agent equivalent to the `AssistantAgent`. - This agent does not work well in a GroupChat: The compressed messages will not be sent to all the agents in the group. - The default system message is the same as AssistantAgent. - `human_input_mode` is default to "NEVER" - and `code_execution_config` is default to False. - This agent doesn't execute code or function call by default. - """ - - DEFAULT_SYSTEM_MESSAGE = """You are a helpful AI assistant. -Solve tasks using your coding and language skills. -In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute. - 1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself. - 2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly. -Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill. -When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user. -If you want the user to save the code in a file before executing it, put # filename: inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user. -If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try. -When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible. -Reply "TERMINATE" in the end when everything is done. - """ - DEFAULT_COMPRESS_CONFIG = { - "mode": "TERMINATE", - "compress_function": None, - "trigger_count": 0.7, - "async": False, - "broadcast": True, - "verbose": False, - "leave_last_n": 2, - } - - def __init__( - self, - name: str, - system_message: Optional[str] = DEFAULT_SYSTEM_MESSAGE, - is_termination_msg: Optional[Callable[[Dict], bool]] = None, - max_consecutive_auto_reply: Optional[int] = None, - human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "NEVER", - function_map: Optional[Dict[str, Callable]] = None, - code_execution_config: Optional[Union[Dict, bool]] = False, - llm_config: Optional[Union[Dict, bool]] = None, - default_auto_reply: Optional[Union[str, Dict, None]] = "", - compress_config: Optional[Dict] = False, - description: Optional[str] = None, - **kwargs, - ): - """ - Args: - name (str): agent name. - system_message (str): system message for the ChatCompletion inference. - Please override this attribute if you want to reprogram the agent. - llm_config (dict): llm inference configuration. - Note: you must set `model` in llm_config. It will be used to compute the token count. - Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create) - for available options. - is_termination_msg (function): a function that takes a message in the form of a dictionary - and returns a boolean value indicating if this received message is a termination message. - The dict can contain the following keys: "content", "role", "name", "function_call". - max_consecutive_auto_reply (int): the maximum number of consecutive auto replies. - default to None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case). - The limit only plays a role when human_input_mode is not "ALWAYS". - compress_config (dict or True/False): config for compression before oai_reply. Default to False. - You should contain the following keys: - - "mode" (Optional, str, default to "TERMINATE"): Choose from ["COMPRESS", "TERMINATE", "CUSTOMIZED"]. - 1. `TERMINATE`: terminate the conversation ONLY when token count exceeds the max limit of current model. `trigger_count` is NOT used in this mode. - 2. `COMPRESS`: compress the messages when the token count exceeds the limit. - 3. `CUSTOMIZED`: pass in a customized function to compress the messages. - - "compress_function" (Optional, callable, default to None): Must be provided when mode is "CUSTOMIZED". - The function should takes a list of messages and returns a tuple of (is_compress_success: bool, compressed_messages: List[Dict]). - - "trigger_count" (Optional, float, int, default to 0.7): the threshold to trigger compression. - If a float between (0, 1], it is the percentage of token used. if a int, it is the number of tokens used. - - "async" (Optional, bool, default to False): whether to compress asynchronously. - - "broadcast" (Optional, bool, default to True): whether to update the compressed message history to sender. - - "verbose" (Optional, bool, default to False): Whether to print the content before and after compression. Used when mode="COMPRESS". - - "leave_last_n" (Optional, int, default to 0): If provided, the last n messages will not be compressed. Used when mode="COMPRESS". - description (str): a short description of the agent. This description is used by other agents - (e.g. the GroupChatManager) to decide when to call upon this agent. (Default: system_message) - **kwargs (dict): Please refer to other kwargs in - [ConversableAgent](../conversable_agent#__init__). - """ - super().__init__( - name=name, - system_message=system_message, - is_termination_msg=is_termination_msg, - max_consecutive_auto_reply=max_consecutive_auto_reply, - human_input_mode=human_input_mode, - function_map=function_map, - code_execution_config=code_execution_config, - llm_config=llm_config, - default_auto_reply=default_auto_reply, - description=description, - **kwargs, - ) - - self._set_compress_config(compress_config) - - # create a separate client for compression. - if llm_config is False: - self.llm_compress_config = False - self.compress_client = None - else: - if "model" not in llm_config: - raise ValueError("llm_config must contain the 'model' field.") - self.llm_compress_config = self.llm_config.copy() - # remove functions - if "functions" in self.llm_compress_config: - del self.llm_compress_config["functions"] - self.compress_client = OpenAIWrapper(**self.llm_compress_config) - - self._reply_func_list.clear() - self.register_reply([Agent, None], ConversableAgent.generate_oai_reply) - self.register_reply([Agent], CompressibleAgent.on_oai_token_limit) # check token limit - self.register_reply([Agent, None], ConversableAgent.generate_code_execution_reply) - self.register_reply([Agent, None], ConversableAgent.generate_function_call_reply) - self.register_reply([Agent, None], ConversableAgent.check_termination_and_human_reply) - - def _set_compress_config(self, compress_config: Optional[Dict] = False): - if compress_config: - if compress_config is True: - compress_config = {} - if not isinstance(compress_config, dict): - raise ValueError("compress_config must be a dict or True/False.") - - allowed_modes = ["COMPRESS", "TERMINATE", "CUSTOMIZED"] - if compress_config.get("mode", "TERMINATE") not in allowed_modes: - raise ValueError(f"Invalid compression mode. Allowed values are: {', '.join(allowed_modes)}") - - self.compress_config = self.DEFAULT_COMPRESS_CONFIG.copy() - self.compress_config.update(compress_config) - - if not isinstance(self.compress_config["leave_last_n"], int) or self.compress_config["leave_last_n"] < 0: - raise ValueError("leave_last_n must be a non-negative integer.") - - # convert trigger_count to int, default to 0.7 - trigger_count = self.compress_config["trigger_count"] - if not (isinstance(trigger_count, int) or isinstance(trigger_count, float)) or trigger_count <= 0: - raise ValueError("trigger_count must be a positive number.") - if isinstance(trigger_count, float) and 0 < trigger_count <= 1: - self.compress_config["trigger_count"] = int( - trigger_count * get_max_token_limit(self.llm_config["model"]) - ) - trigger_count = self.compress_config["trigger_count"] - init_count = self._compute_init_token_count() - if trigger_count < init_count: - print( - f"Warning: trigger_count {trigger_count} is less than the initial token count {init_count} (system message + function description if passed), compression will be disabled. Please increase trigger_count if you want to enable compression." - ) - self.compress_config = False - - if self.compress_config["mode"] == "CUSTOMIZED" and self.compress_config["compress_function"] is None: - raise ValueError("compress_function must be provided when mode is CUSTOMIZED.") - if self.compress_config["mode"] != "CUSTOMIZED" and self.compress_config["compress_function"] is not None: - print("Warning: compress_function is provided but mode is not 'CUSTOMIZED'.") - - else: - self.compress_config = False - - def generate_reply( - self, - messages: Optional[List[Dict]] = None, - sender: Optional[Agent] = None, - exclude: Optional[List[Callable]] = None, - ) -> Union[str, Dict, None]: - """ - - Adding to line 202: - ``` - if messages is not None and messages != self._oai_messages[sender]: - messages = self._oai_messages[sender] - ``` - """ - if all((messages is None, sender is None)): - error_msg = f"Either {messages=} or {sender=} must be provided." - logger.error(error_msg) - raise AssertionError(error_msg) - - if messages is None: - messages = self._oai_messages[sender] - - for reply_func_tuple in self._reply_func_list: - reply_func = reply_func_tuple["reply_func"] - if exclude and reply_func in exclude: - continue - if inspect.iscoroutinefunction(reply_func): - continue - if self._match_trigger(reply_func_tuple["trigger"], sender): - final, reply = reply_func(self, messages=messages, sender=sender, config=reply_func_tuple["config"]) - if messages is not None and sender is not None and messages != self._oai_messages[sender]: - messages = self._oai_messages[sender] - if final: - return reply - return self._default_auto_reply - - def _compute_init_token_count(self): - """Check if the agent is LLM-based and compute the initial token count.""" - if self.llm_config is False: - return 0 - - func_count = 0 - if "functions" in self.llm_config: - func_count = num_tokens_from_functions(self.llm_config["functions"], self.llm_config["model"]) - - return func_count + count_token(self._oai_system_message, self.llm_config["model"]) - - def _manage_history_on_token_limit(self, messages, token_used, max_token_allowed, model): - """Manage the message history with different modes when token limit is reached. - Return: - final (bool): whether to terminate the agent. - compressed_messages (List[Dict]): the compressed messages. None if no compression or compression failed. - """ - # 1. mode = "TERMINATE", terminate the agent if no token left. - if self.compress_config["mode"] == "TERMINATE": - if max_token_allowed - token_used <= 0: - # Terminate if no token left. - print( - colored( - f'Warning: Terminate Agent "{self.name}" due to no token left for oai reply. max token for {model}: {max_token_allowed}, existing token count: {token_used}', - "yellow", - ), - flush=True, - ) - return True, None - return False, None - - # if token_used is less than trigger_count, no compression will be used. - if token_used < self.compress_config["trigger_count"]: - return False, None - - # 2. mode = "COMPRESS" or mode = "CUSTOMIZED", compress the messages - copied_messages = copy.deepcopy(messages) - if self.compress_config["mode"] == "COMPRESS": - _, compress_messages = self.compress_messages(copied_messages) - elif self.compress_config["mode"] == "CUSTOMIZED": - _, compress_messages = self.compress_config["compress_function"](copied_messages) - else: - raise ValueError(f"Unknown compression mode: {self.compress_config['mode']}") - - if compress_messages is not None: - for i in range(len(compress_messages)): - compress_messages[i] = self._get_valid_oai_message(compress_messages[i]) - return False, compress_messages - - def _get_valid_oai_message(self, message): - """Convert a message into a valid OpenAI ChatCompletion message.""" - oai_message = {k: message[k] for k in ("content", "function_call", "name", "context", "role") if k in message} - if "content" not in oai_message: - if "function_call" in oai_message: - oai_message["content"] = None # if only function_call is provided, content will be set to None. - else: - raise ValueError( - "Message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided." - ) - if "function_call" in oai_message: - oai_message["role"] = "assistant" # only messages with role 'assistant' can have a function call. - oai_message["function_call"] = dict(oai_message["function_call"]) - return oai_message - - def _print_compress_info(self, init_token_count, token_used, token_after_compression): - to_print = "Token Count (including {} tokens from system msg and function descriptions). Before compression : {} | After: {}".format( - init_token_count, - token_used, - token_after_compression, - ) - print(colored(to_print, "magenta"), flush=True) - print("-" * 80, flush=True) - - def on_oai_token_limit( - self, - messages: Optional[List[Dict]] = None, - sender: Optional[Agent] = None, - config: Optional[Any] = None, - ) -> Tuple[bool, Union[str, Dict, None]]: - """(Experimental) Compress previous messages when a threshold of tokens is reached. - - TODO: async compress - TODO: maintain a list for old oai messages (messages before compression) - """ - llm_config = self.llm_config if config is None else config - if self.compress_config is False: - return False, None - if messages is None: - messages = self._oai_messages[sender] - - model = llm_config["model"] - init_token_count = self._compute_init_token_count() - token_used = init_token_count + count_token(messages, model) - final, compressed_messages = self._manage_history_on_token_limit( - messages, token_used, get_max_token_limit(model), model - ) - - # update message history with compressed messages - if compressed_messages is not None: - self._print_compress_info( - init_token_count, token_used, count_token(compressed_messages, model) + init_token_count - ) - self._oai_messages[sender] = compressed_messages - if self.compress_config["broadcast"]: - # update the compressed message history to sender - sender._oai_messages[self] = copy.deepcopy(compressed_messages) - # switching the role of the messages for the sender - for i in range(len(sender._oai_messages[self])): - cmsg = sender._oai_messages[self][i] - if "function_call" in cmsg or cmsg["role"] == "user": - cmsg["role"] = "assistant" - elif cmsg["role"] == "assistant": - cmsg["role"] = "user" - sender._oai_messages[self][i] = cmsg - - # successfully compressed, return False, None for generate_oai_reply to be called with the updated messages - return False, None - return final, None - - def compress_messages( - self, - messages: Optional[List[Dict]] = None, - config: Optional[Any] = None, - ) -> Tuple[bool, Union[str, Dict, None, List]]: - """Compress a list of messages into one message. - - The first message (the initial prompt) will not be compressed. - The rest of the messages will be compressed into one message, the model is asked to distinguish the role of each message: USER, ASSISTANT, FUNCTION_CALL, FUNCTION_RETURN. - Check out the compress_sys_msg. - - TODO: model used in compression agent is different from assistant agent: For example, if original model used by is gpt-4; we start compressing at 70% of usage, 70% of 8092 = 5664; and we use gpt 3.5 here max_toke = 4096, it will raise error. choosinng model automatically? - """ - # 1. use the compression client - client = self.compress_client if config is None else config - - # 2. stop if there is only one message in the list - leave_last_n = self.compress_config.get("leave_last_n", 0) - if leave_last_n + 1 >= len(messages): - logger.warning( - f"Warning: Compression skipped at trigger count threshold. The first msg and last {leave_last_n} msgs will not be compressed. current msg count: {len(messages)}. Consider raising trigger_count." - ) - return False, None - - # 3. put all history into one, except the first one - if self.compress_config["verbose"]: - print(colored("*" * 30 + "Start compressing the following content:" + "*" * 30, "magenta"), flush=True) - - compressed_prompt = "Below is the compressed content from the previous conversation, evaluate the process and continue if necessary:\n" - chat_to_compress = "To be compressed:\n" - - for m in messages[1 : len(messages) - leave_last_n]: # 0, 1, 2, 3, 4 - # Handle function role - if m.get("role") == "function": - chat_to_compress += f"##FUNCTION_RETURN## (from function \"{m['name']}\"): \n{m['content']}\n" - - # If name exists in the message - elif "name" in m: - chat_to_compress += f"##{m['name']}({m['role'].upper()})## {m['content']}\n" - - # Handle case where content is not None and name is absent - elif m.get("content"): # This condition will also handle None and empty string - if compressed_prompt in m["content"]: - chat_to_compress += m["content"].replace(compressed_prompt, "") + "\n" - else: - chat_to_compress += f"##{m['role'].upper()}## {m['content']}\n" - - # Handle function_call in the message - if "function_call" in m: - function_name = m["function_call"].get("name") - function_args = m["function_call"].get("arguments") - - if not function_name or not function_args: - chat_to_compress += f"##FUNCTION_CALL## {m['function_call']}\n" - else: - chat_to_compress += f"##FUNCTION_CALL## \nName: {function_name}\nArgs: {function_args}\n" - - chat_to_compress = [{"role": "user", "content": chat_to_compress}] - - if self.compress_config["verbose"]: - print(chat_to_compress[0]["content"]) - - # 4. use LLM to compress - compress_sys_msg = """You are a helpful assistant that will summarize and compress conversation history. -Rules: -1. Please summarize each of the message and reserve the exact titles: ##USER##, ##ASSISTANT##, ##FUNCTION_CALL##, ##FUNCTION_RETURN##, ##SYSTEM##, ##()## (e.g. ##Bob(ASSISTANT)##). -2. Try to compress the content but reserve important information (a link, a specific number, etc.). -3. Use words to summarize the code blocks or functions calls (##FUNCTION_CALL##) and their goals. For code blocks, please use ##CODE## to mark it. -4. For returns from functions (##FUNCTION_RETURN##) or returns from code execution: summarize the content and indicate the status of the return (e.g. success, error, etc.). -""" - try: - response = client.create( - context=None, - messages=[{"role": "system", "content": compress_sys_msg}] + chat_to_compress, - ) - except Exception as e: - print(colored(f"Failed to compress the content due to {e}", "red"), flush=True) - return False, None - - compressed_message = self.client.extract_text_or_completion_object(response)[0] - assert isinstance(compressed_message, str), f"compressed_message should be a string: {compressed_message}" - if self.compress_config["verbose"]: - print( - colored("*" * 30 + "Content after compressing:" + "*" * 30, "magenta"), - flush=True, - ) - print(compressed_message, colored("\n" + "*" * 80, "magenta")) - - # 5. add compressed message to the first message and return - return ( - True, - [ - messages[0], - { - "content": compressed_prompt + compressed_message, - "role": "system", - }, - ] - + messages[len(messages) - leave_last_n :], - ) diff --git a/notebook/agentchat_capability_long_context_handling.ipynb b/notebook/agentchat_capability_long_context_handling.ipynb deleted file mode 100644 index 0a9d715e3e5..00000000000 --- a/notebook/agentchat_capability_long_context_handling.ipynb +++ /dev/null @@ -1,687 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Handling A Long Context via `TransformChatHistory`\n", - "\n", - "<div class=\"alert alert-danger\" role=\"alert\">\n", - " <strong>Deprecation Notice:</strong> <code>TransformChatHistory</code> is no longer supported and will be removed in version <code>0.2.30</code>. Please transition to using <code>TransformMessages</code> as the new standard method. For a detailed introduction to this method, including how to limit the number of tokens in message context history to replace <code>TransformChatHistory</code>, visit our guide <a href=\"https://microsoft.github.io/autogen/docs/topics/handling_long_contexts/intro_to_transform_messages\" target=\"_blank\">Introduction to Transform Messages</a>.\n", - "</div>\n", - "\n", - "This notebook illustrates how you can use the `TransformChatHistory` capability to give any `Conversable` agent an ability to handle a long context. \n", - "\n", - "````{=mdx}\n", - ":::info Requirements\n", - "Install `pyautogen`:\n", - "```bash\n", - "pip install pyautogen\n", - "```\n", - "\n", - "For more information, please refer to the [installation guide](/docs/installation/).\n", - ":::\n", - "````" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "\n", - "import autogen\n", - "from autogen.agentchat.contrib.capabilities import context_handling" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "llm_config = {\n", - " \"config_list\": [{\"model\": \"gpt-3.5-turbo\", \"api_key\": os.environ.get(\"OPENAI_API_KEY\")}],\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "````{=mdx}\n", - ":::tip\n", - "Learn more about configuring LLMs for agents [here](/docs/topics/llm_configuration).\n", - ":::\n", - "````\n", - "\n", - "To add this ability to any agent, define the capability and then use `add_to_agent`." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", - "\n", - "plot and save a graph of x^2 from -10 to 10\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33massistant\u001b[0m (to user_proxy):\n", - "\n", - "To plot and save a graph of the function x^2 from -10 to 10, you can use the matplotlib library in Python. Here is the code:\n", - "\n", - "```python\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "\n", - "# Generate x values from -10 to 10\n", - "x = np.linspace(-10, 10, 100)\n", - "\n", - "# Calculate corresponding y values (x^2)\n", - "y = x**2\n", - "\n", - "# Create the plot\n", - "plt.plot(x, y)\n", - "\n", - "# Add labels and title\n", - "plt.xlabel('x')\n", - "plt.ylabel('y')\n", - "plt.title('Plot of x^2')\n", - "\n", - "# Save the plot as a file\n", - "plt.savefig('x_squared_plot.png')\n", - "\n", - "# Show the plot\n", - "plt.show()\n", - "```\n", - "\n", - "This code will create a plot of the function x^2 and save it as \"x_squared_plot.png\" in the current directory. Make sure you have the matplotlib library installed before running this code.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[31m\n", - ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", - "\n", - "exitcode: 0 (execution succeeded)\n", - "Code output: \n", - "Figure(640x480)\n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mTruncated 0 messages. Reduced from 3 to 3.\u001b[0m\n", - "\u001b[33mTruncated 139 tokens. Tokens reduced from 223 to 84\u001b[0m\n", - "\u001b[33massistant\u001b[0m (to user_proxy):\n", - "\n", - "Great! The code executed successfully and generated a plot of the function x^2 from -10 to 10. The plot was displayed in a figure with size 640x480. \n", - "\n", - "To save the graph as an image file, you can modify the code as follows:\n", - "\n", - "```python\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "\n", - "# Generate x values from -10 to 10\n", - "x = np.linspace(-10, 10, 100)\n", - "\n", - "# Generate y values by squaring x\n", - "y = x ** 2\n", - "\n", - "# Plot the graph\n", - "plt.plot(x, y)\n", - "plt.xlabel('x')\n", - "plt.ylabel('x^2')\n", - "plt.title('Graph of x^2')\n", - "plt.grid(True)\n", - "\n", - "# Save the graph as an image file, for example as 'graph.png'\n", - "plt.savefig('graph.png')\n", - "```\n", - "\n", - "By executing this updated code, the graph will be saved as an image file named 'graph.png' in the same directory as your Python script.\n", - "\n", - "Please let me know if you need any further assistance.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[31m\n", - ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", - "\n", - "exitcode: 0 (execution succeeded)\n", - "Code output: \n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mTruncated 0 messages. Reduced from 5 to 5.\u001b[0m\n", - "\u001b[33mTruncated 159 tokens. Tokens reduced from 306 to 147\u001b[0m\n", - "\u001b[33massistant\u001b[0m (to user_proxy):\n", - "\n", - "Great! The code executed successfully and generated a plot of the function x^2 from -10 to 10. The plot was displayed in a figure with size 640x480.\n", - "\n", - "To save the graph as an image file, you can modify the code as follows:\n", - "\n", - "```python\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "\n", - "# Generate x values from -10 to 10\n", - "x = np.linspace(-10, 10, 100)\n", - "\n", - "# Calculate y values (x^2)\n", - "y = x**2\n", - "\n", - "# Plot the graph\n", - "plt.plot(x, y)\n", - "\n", - "# Add labels and title\n", - "plt.xlabel('x')\n", - "plt.ylabel('y')\n", - "plt.title('Graph of x^2')\n", - "\n", - "# Save the graph as an image file\n", - "plt.savefig('graph.png')\n", - "\n", - "# Close the plot\n", - "plt.close()\n", - "```\n", - "\n", - "This code will save the plot as an image file named \"graph.png\" in the current directory. You can change the filename and path if needed.\n", - "\n", - "Please let me know if you need any further assistance.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[31m\n", - ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", - "\n", - "exitcode: 0 (execution succeeded)\n", - "Code output: \n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mTruncated 0 messages. Reduced from 7 to 7.\u001b[0m\n", - "\u001b[33mTruncated 159 tokens. Tokens reduced from 369 to 210\u001b[0m\n", - "\u001b[33massistant\u001b[0m (to user_proxy):\n", - "\n", - "Great! The code executed successfully and generated a plot of the function x^2 from -10 to 10. The plot was displayed in a figure with size 640x480.\n", - "\n", - "To save the graph as an image file, you can modify the code as follows:\n", - "\n", - "```python\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "\n", - "# Generate x values\n", - "x = np.linspace(-10, 10, 100)\n", - "\n", - "# Generate y values\n", - "y = x**2\n", - "\n", - "# Plot the graph\n", - "plt.plot(x, y)\n", - "\n", - "# Set labels and title\n", - "plt.xlabel('x')\n", - "plt.ylabel('y')\n", - "plt.title('Graph of x^2')\n", - "\n", - "# Save the graph as an image file\n", - "plt.savefig('graph.png')\n", - "```\n", - "\n", - "This code will save the graph as a PNG image file named 'graph.png' in the same directory where you are running the script. You can change the filename and format (e.g., 'graph.jpg') if desired.\n", - "\n", - "Please note that you will need to have the matplotlib library installed to run this code. If you don't have it installed, you can install it by running `pip install matplotlib` in your terminal or command prompt.\n", - "\n", - "Let me know if you need any further assistance!\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[31m\n", - ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", - "\n", - "exitcode: 0 (execution succeeded)\n", - "Code output: \n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mTruncated 0 messages. Reduced from 9 to 9.\u001b[0m\n", - "\u001b[33mTruncated 198 tokens. Tokens reduced from 471 to 273\u001b[0m\n", - "\u001b[33massistant\u001b[0m (to user_proxy):\n", - "\n", - "Great! The code executed successfully and generated a plot of the function x^2 from -10 to 10. The plot was displayed in a figure with size 640x480.\n", - "\n", - "To save the graph as an image file, you can modify the code as follows:\n", - "\n", - "```python\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "\n", - "# Generate x values from -10 to 10\n", - "x = np.linspace(-10, 10, 100)\n", - "\n", - "# Generate y values by squaring x\n", - "y = x**2\n", - "\n", - "# Plot the graph\n", - "plt.plot(x, y)\n", - "\n", - "# Add labels and title\n", - "plt.xlabel('x')\n", - "plt.ylabel('x^2')\n", - "plt.title('Graph of x^2')\n", - "\n", - "# Save the graph as an image file\n", - "plt.savefig('x_squared_plot.png')\n", - "\n", - "# Display the graph\n", - "plt.show()\n", - "```\n", - "\n", - "This code will save the graph as a PNG image file named \"x_squared_plot.png\" in the current working directory. You can customize the filename and file format according to your needs.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[31m\n", - ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", - "\n", - "exitcode: 0 (execution succeeded)\n", - "Code output: \n", - "Figure(640x480)\n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mTruncated 1 messages. Reduced from 11 to 10.\u001b[0m\n", - "\u001b[33mTruncated 174 tokens. Tokens reduced from 501 to 327\u001b[0m\n", - "\u001b[33massistant\u001b[0m (to user_proxy):\n", - "\n", - "Great! The code executed successfully and generated a plot of the function x^2 from -10 to 10. The plot was displayed in a figure with size 640x480.\n", - "\n", - "To save the graph as an image file, you can modify the code as follows:\n", - "\n", - "```python\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "\n", - "# Generate x values\n", - "x = np.linspace(-10, 10, 100)\n", - "\n", - "# Generate y values\n", - "y = x ** 2\n", - "\n", - "# Create the plot\n", - "plt.plot(x, y)\n", - "plt.xlabel('x')\n", - "plt.ylabel('y')\n", - "plt.title('Graph of x^2')\n", - "\n", - "# Save the plot as an image file\n", - "plt.savefig('x_squared.png')\n", - "\n", - "plt.show()\n", - "```\n", - "\n", - "The code will save the plot as a PNG image file named \"x_squared.png\" in the current directory. You can change the filename or file extension as needed.\n", - "\n", - "Please let me know if you need any further assistance!\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[31m\n", - ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", - "\n", - "exitcode: 0 (execution succeeded)\n", - "Code output: \n", - "Figure(640x480)\n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mTruncated 3 messages. Reduced from 13 to 10.\u001b[0m\n", - "\u001b[33mTruncated 227 tokens. Tokens reduced from 554 to 327\u001b[0m\n", - "\u001b[33massistant\u001b[0m (to user_proxy):\n", - "\n", - "To save the graph as an image file, you can modify the code as follows:\n", - "\n", - "```python\n", - "import matplotlib.pyplot as plt\n", - "\n", - "# Create the plot\n", - "plt.plot(x, y)\n", - "\n", - "# Set the plot title and axes labels\n", - "plt.title(\"Plot of x^2\")\n", - "plt.xlabel(\"x\")\n", - "plt.ylabel(\"y\")\n", - "\n", - "# Save the plot as an image file\n", - "plt.savefig(\"plot.png\")\n", - "```\n", - "\n", - "This code will save the plot as a PNG file with the name \"plot.png\" in the same directory as your Python script. You can change the file name and format (e.g., \"plot.jpg\") if desired.\n", - "\n", - "Once you run this modified code, the plot will be saved as an image file. You can then use this file for further analysis or to share the plot with others.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[31m\n", - ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", - "\n", - "exitcode: 1 (execution failed)\n", - "Code output: \n", - "Traceback (most recent call last):\n", - " File \"\", line 4, in <module>\n", - " plt.plot(x, y)\n", - "NameError: name 'x' is not defined\n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mTruncated 5 messages. Reduced from 15 to 10.\u001b[0m\n", - "\u001b[33mTruncated 258 tokens. Tokens reduced from 622 to 364\u001b[0m\n", - "\u001b[33massistant\u001b[0m (to user_proxy):\n", - "\n", - "It seems that the code encountered an error because the variables `x` and `y` were not defined.\n", - "\n", - "Let's modify the code to define the variables `x` and `y` and then save the graph as an image file:\n", - "\n", - "```python\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "\n", - "# Define the x values as an array from -10 to 10\n", - "x = np.linspace(-10, 10, 100)\n", - "\n", - "# Define the y values as the square of x\n", - "y = x ** 2\n", - "\n", - "# Create the plot\n", - "plt.plot(x, y)\n", - "\n", - "# Set the plot title and axes labels\n", - "plt.title(\"Plot of x^2\")\n", - "plt.xlabel(\"x\")\n", - "plt.ylabel(\"y\")\n", - "\n", - "# Save the plot as an image file\n", - "plt.savefig('plot.png')\n", - "```\n", - "This code uses the `numpy` library to generate an array of x values from -10 to 10 and calculates the corresponding y values as the square of x. The plot is then saved as an image file named `plot.png`.\n", - "\n", - "Please make sure you have the `numpy` and `matplotlib` libraries installed before running this code. Let me know if you need help with that.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[31m\n", - ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", - "\n", - "exitcode: 0 (execution succeeded)\n", - "Code output: \n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mTruncated 7 messages. Reduced from 17 to 10.\u001b[0m\n", - "\u001b[33mTruncated 401 tokens. Tokens reduced from 765 to 364\u001b[0m\n", - "\u001b[33massistant\u001b[0m (to user_proxy):\n", - "\n", - "Great! The code executed successfully. The graph was saved as an image file with the name \"plot.png\" in the current working directory.\n", - "\n", - "Now you can share the \"plot.png\" file or view it using an image viewer.\n", - "\n", - "Let me know if there's anything else I can help with!\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", - "\n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mTruncated 9 messages. Reduced from 19 to 10.\u001b[0m\n", - "\u001b[33mTruncated 282 tokens. Tokens reduced from 633 to 351\u001b[0m\n", - "\u001b[33massistant\u001b[0m (to user_proxy):\n", - "\n", - "Is there anything else I can help you with?\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", - "\n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mTruncated 11 messages. Reduced from 21 to 10.\u001b[0m\n", - "\u001b[33mTruncated 342 tokens. Tokens reduced from 634 to 292\u001b[0m\n", - "\u001b[33massistant\u001b[0m (to user_proxy):\n", - "\n", - "If you need any further assistance, feel free to ask. I'm here to help!\n", - "\n", - "--------------------------------------------------------------------------------\n" - ] - }, - { - "data": { - "text/plain": [ - "ChatResult(chat_history=[{'content': 'plot and save a graph of x^2 from -10 to 10', 'role': 'assistant'}, {'content': 'To plot and save a graph of the function x^2 from -10 to 10, you can use the matplotlib library in Python. Here is the code:\\n\\n```python\\nimport matplotlib.pyplot as plt\\nimport numpy as np\\n\\n# Generate x values from -10 to 10\\nx = np.linspace(-10, 10, 100)\\n\\n# Calculate corresponding y values (x^2)\\ny = x**2\\n\\n# Create the plot\\nplt.plot(x, y)\\n\\n# Add labels and title\\nplt.xlabel(\\'x\\')\\nplt.ylabel(\\'y\\')\\nplt.title(\\'Plot of x^2\\')\\n\\n# Save the plot as a file\\nplt.savefig(\\'x_squared_plot.png\\')\\n\\n# Show the plot\\nplt.show()\\n```\\n\\nThis code will create a plot of the function x^2 and save it as \"x_squared_plot.png\" in the current directory. Make sure you have the matplotlib library installed before running this code.', 'role': 'user'}, {'content': 'exitcode: 0 (execution succeeded)\\nCode output: \\nFigure(640x480)\\n', 'role': 'assistant'}, {'content': \"Great! The code executed successfully and generated a plot of the function x^2 from -10 to 10. The plot was displayed in a figure with size 640x480. \\n\\nTo save the graph as an image file, you can modify the code as follows:\\n\\n```python\\nimport matplotlib.pyplot as plt\\nimport numpy as np\\n\\n# Generate x values from -10 to 10\\nx = np.linspace(-10, 10, 100)\\n\\n# Generate y values by squaring x\\ny = x ** 2\\n\\n# Plot the graph\\nplt.plot(x, y)\\nplt.xlabel('x')\\nplt.ylabel('x^2')\\nplt.title('Graph of x^2')\\nplt.grid(True)\\n\\n# Save the graph as an image file, for example as 'graph.png'\\nplt.savefig('graph.png')\\n```\\n\\nBy executing this updated code, the graph will be saved as an image file named 'graph.png' in the same directory as your Python script.\\n\\nPlease let me know if you need any further assistance.\", 'role': 'user'}, {'content': 'exitcode: 0 (execution succeeded)\\nCode output: \\n', 'role': 'assistant'}, {'content': 'Great! The code executed successfully and generated a plot of the function x^2 from -10 to 10. The plot was displayed in a figure with size 640x480.\\n\\nTo save the graph as an image file, you can modify the code as follows:\\n\\n```python\\nimport matplotlib.pyplot as plt\\nimport numpy as np\\n\\n# Generate x values from -10 to 10\\nx = np.linspace(-10, 10, 100)\\n\\n# Calculate y values (x^2)\\ny = x**2\\n\\n# Plot the graph\\nplt.plot(x, y)\\n\\n# Add labels and title\\nplt.xlabel(\\'x\\')\\nplt.ylabel(\\'y\\')\\nplt.title(\\'Graph of x^2\\')\\n\\n# Save the graph as an image file\\nplt.savefig(\\'graph.png\\')\\n\\n# Close the plot\\nplt.close()\\n```\\n\\nThis code will save the plot as an image file named \"graph.png\" in the current directory. You can change the filename and path if needed.\\n\\nPlease let me know if you need any further assistance.', 'role': 'user'}, {'content': 'exitcode: 0 (execution succeeded)\\nCode output: \\n', 'role': 'assistant'}, {'content': \"Great! The code executed successfully and generated a plot of the function x^2 from -10 to 10. The plot was displayed in a figure with size 640x480.\\n\\nTo save the graph as an image file, you can modify the code as follows:\\n\\n```python\\nimport matplotlib.pyplot as plt\\nimport numpy as np\\n\\n# Generate x values\\nx = np.linspace(-10, 10, 100)\\n\\n# Generate y values\\ny = x**2\\n\\n# Plot the graph\\nplt.plot(x, y)\\n\\n# Set labels and title\\nplt.xlabel('x')\\nplt.ylabel('y')\\nplt.title('Graph of x^2')\\n\\n# Save the graph as an image file\\nplt.savefig('graph.png')\\n```\\n\\nThis code will save the graph as a PNG image file named 'graph.png' in the same directory where you are running the script. You can change the filename and format (e.g., 'graph.jpg') if desired.\\n\\nPlease note that you will need to have the matplotlib library installed to run this code. If you don't have it installed, you can install it by running `pip install matplotlib` in your terminal or command prompt.\\n\\nLet me know if you need any further assistance!\", 'role': 'user'}, {'content': 'exitcode: 0 (execution succeeded)\\nCode output: \\n', 'role': 'assistant'}, {'content': 'Great! The code executed successfully and generated a plot of the function x^2 from -10 to 10. The plot was displayed in a figure with size 640x480.\\n\\nTo save the graph as an image file, you can modify the code as follows:\\n\\n```python\\nimport matplotlib.pyplot as plt\\nimport numpy as np\\n\\n# Generate x values from -10 to 10\\nx = np.linspace(-10, 10, 100)\\n\\n# Generate y values by squaring x\\ny = x**2\\n\\n# Plot the graph\\nplt.plot(x, y)\\n\\n# Add labels and title\\nplt.xlabel(\\'x\\')\\nplt.ylabel(\\'x^2\\')\\nplt.title(\\'Graph of x^2\\')\\n\\n# Save the graph as an image file\\nplt.savefig(\\'x_squared_plot.png\\')\\n\\n# Display the graph\\nplt.show()\\n```\\n\\nThis code will save the graph as a PNG image file named \"x_squared_plot.png\" in the current working directory. You can customize the filename and file format according to your needs.', 'role': 'user'}, {'content': 'exitcode: 0 (execution succeeded)\\nCode output: \\nFigure(640x480)\\n', 'role': 'assistant'}, {'content': 'Great! The code executed successfully and generated a plot of the function x^2 from -10 to 10. The plot was displayed in a figure with size 640x480.\\n\\nTo save the graph as an image file, you can modify the code as follows:\\n\\n```python\\nimport matplotlib.pyplot as plt\\nimport numpy as np\\n\\n# Generate x values\\nx = np.linspace(-10, 10, 100)\\n\\n# Generate y values\\ny = x ** 2\\n\\n# Create the plot\\nplt.plot(x, y)\\nplt.xlabel(\\'x\\')\\nplt.ylabel(\\'y\\')\\nplt.title(\\'Graph of x^2\\')\\n\\n# Save the plot as an image file\\nplt.savefig(\\'x_squared.png\\')\\n\\nplt.show()\\n```\\n\\nThe code will save the plot as a PNG image file named \"x_squared.png\" in the current directory. You can change the filename or file extension as needed.\\n\\nPlease let me know if you need any further assistance!', 'role': 'user'}, {'content': 'exitcode: 0 (execution succeeded)\\nCode output: \\nFigure(640x480)\\n', 'role': 'assistant'}, {'content': 'To save the graph as an image file, you can modify the code as follows:\\n\\n```python\\nimport matplotlib.pyplot as plt\\n\\n# Create the plot\\nplt.plot(x, y)\\n\\n# Set the plot title and axes labels\\nplt.title(\"Plot of x^2\")\\nplt.xlabel(\"x\")\\nplt.ylabel(\"y\")\\n\\n# Save the plot as an image file\\nplt.savefig(\"plot.png\")\\n```\\n\\nThis code will save the plot as a PNG file with the name \"plot.png\" in the same directory as your Python script. You can change the file name and format (e.g., \"plot.jpg\") if desired.\\n\\nOnce you run this modified code, the plot will be saved as an image file. You can then use this file for further analysis or to share the plot with others.', 'role': 'user'}, {'content': 'exitcode: 1 (execution failed)\\nCode output: \\nTraceback (most recent call last):\\n File \"\", line 4, in <module>\\n plt.plot(x, y)\\nNameError: name \\'x\\' is not defined\\n', 'role': 'assistant'}, {'content': 'It seems that the code encountered an error because the variables `x` and `y` were not defined.\\n\\nLet\\'s modify the code to define the variables `x` and `y` and then save the graph as an image file:\\n\\n```python\\nimport matplotlib.pyplot as plt\\nimport numpy as np\\n\\n# Define the x values as an array from -10 to 10\\nx = np.linspace(-10, 10, 100)\\n\\n# Define the y values as the square of x\\ny = x ** 2\\n\\n# Create the plot\\nplt.plot(x, y)\\n\\n# Set the plot title and axes labels\\nplt.title(\"Plot of x^2\")\\nplt.xlabel(\"x\")\\nplt.ylabel(\"y\")\\n\\n# Save the plot as an image file\\nplt.savefig(\\'plot.png\\')\\n```\\nThis code uses the `numpy` library to generate an array of x values from -10 to 10 and calculates the corresponding y values as the square of x. The plot is then saved as an image file named `plot.png`.\\n\\nPlease make sure you have the `numpy` and `matplotlib` libraries installed before running this code. Let me know if you need help with that.', 'role': 'user'}, {'content': 'exitcode: 0 (execution succeeded)\\nCode output: \\n', 'role': 'assistant'}, {'content': 'Great! The code executed successfully. The graph was saved as an image file with the name \"plot.png\" in the current working directory.\\n\\nNow you can share the \"plot.png\" file or view it using an image viewer.\\n\\nLet me know if there\\'s anything else I can help with!', 'role': 'user'}, {'content': '', 'role': 'assistant'}, {'content': 'Is there anything else I can help you with?', 'role': 'user'}, {'content': '', 'role': 'assistant'}, {'content': \"If you need any further assistance, feel free to ask. I'm here to help!\", 'role': 'user'}], summary=\"If you need any further assistance, feel free to ask. I'm here to help!\", cost=({'total_cost': 0.015855, 'gpt-3.5-turbo-0613': {'cost': 0.015855, 'prompt_tokens': 8242, 'completion_tokens': 1746, 'total_tokens': 9988}}, {'total_cost': 0.0147465, 'gpt-3.5-turbo-0613': {'cost': 0.0147465, 'prompt_tokens': 7755, 'completion_tokens': 1557, 'total_tokens': 9312}}), human_input=[])" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "assistant = autogen.AssistantAgent(\n", - " \"assistant\",\n", - " llm_config=llm_config,\n", - ")\n", - "\n", - "\n", - "# Instantiate the capability to manage chat history\n", - "manage_chat_history = context_handling.TransformChatHistory(max_tokens_per_message=50, max_messages=10, max_tokens=1000)\n", - "# Add the capability to the assistant\n", - "manage_chat_history.add_to_agent(assistant)\n", - "\n", - "user_proxy = autogen.UserProxyAgent(\n", - " \"user_proxy\",\n", - " human_input_mode=\"NEVER\",\n", - " is_termination_msg=lambda x: \"TERMINATE\" in x.get(\"content\", \"\"),\n", - " code_execution_config={\n", - " \"work_dir\": \"coding\",\n", - " \"use_docker\": False,\n", - " },\n", - " max_consecutive_auto_reply=10,\n", - ")\n", - "\n", - "user_proxy.initiate_chat(assistant, message=\"plot and save a graph of x^2 from -10 to 10\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Why is this important?\n", - "This capability is especially useful if you expect the agent histories to become exceptionally large and exceed the context length offered by your LLM.\n", - "For example, in the example below, we will define two agents -- one without this ability and one with this ability.\n", - "\n", - "The agent with this ability will be able to handle longer chat history without crashing." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", - "\n", - "plot and save a graph of x^2 from -10 to 10\n", - "\n", - "--------------------------------------------------------------------------------\n", - "Encountered an error with the base assistant\n", - "Error code: 400 - {'error': {'message': \"This model's maximum context length is 4097 tokens. However, your messages resulted in 1009487 tokens. Please reduce the length of the messages.\", 'type': 'invalid_request_error', 'param': 'messages', 'code': 'context_length_exceeded'}}\n", - "\n", - "\n", - "\n", - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", - "\n", - "plot and save a graph of x^2 from -10 to 10\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mTruncated 1991 messages. Reduced from 2001 to 10.\u001b[0m\n", - "\u001b[33mTruncated 1000800 tokens. Tokens reduced from 1001015 to 215\u001b[0m\n", - "\u001b[33massistant\u001b[0m (to user_proxy):\n", - "\n", - "Here's the Python code to plot and save a graph of x^2 from -10 to 10:\n", - "\n", - "```python\n", - "# filename: plot_graph.py\n", - "\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "\n", - "# Generate x values from -10 to 10\n", - "x = np.linspace(-10, 10, 100)\n", - "\n", - "# Calculate y values as x^2\n", - "y = x**2\n", - "\n", - "# Create plot\n", - "plt.plot(x, y)\n", - "\n", - "# Add labels and title\n", - "plt.xlabel('x')\n", - "plt.ylabel('y')\n", - "plt.title('Graph of y = x^2')\n", - "\n", - "# Save the plot as a PNG image\n", - "plt.savefig('graph.png')\n", - "\n", - "# Show the plot\n", - "plt.show()\n", - "```\n", - "\n", - "To execute this code, save it to a file called `plot_graph.py` and run it using Python. This will generate a file called `graph.png` in the same directory, which will contain the graph of x^2 from -10 to 10.\n", - "\n", - "Note: Make sure you have the matplotlib library installed. You can install it by running `pip install matplotlib` in your terminal or command prompt.\n", - "\n", - "Let me know if you need any further assistance!\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[31m\n", - ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", - "\n", - "exitcode: 0 (execution succeeded)\n", - "Code output: \n", - "Figure(640x480)\n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mTruncated 1993 messages. Reduced from 2003 to 10.\u001b[0m\n", - "\u001b[33mTruncated 997232 tokens. Tokens reduced from 997466 to 234\u001b[0m\n", - "\u001b[33massistant\u001b[0m (to user_proxy):\n", - "\n", - "Great! The graph of x^2 from -10 to 10 has been plotted and saved successfully. You can find the saved graph as an image file on your computer. \n", - "\n", - "Is there anything else I can help you with?\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", - "\n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mTruncated 1995 messages. Reduced from 2005 to 10.\u001b[0m\n", - "\u001b[33mTruncated 997096 tokens. Tokens reduced from 997326 to 230\u001b[0m\n", - "\u001b[33massistant\u001b[0m (to user_proxy):\n", - "\n", - "TERMINATE\n", - "\n", - "--------------------------------------------------------------------------------\n" - ] - } - ], - "source": [ - "assistant_base = autogen.AssistantAgent(\n", - " \"assistant\",\n", - " llm_config=llm_config,\n", - ")\n", - "\n", - "assistant_with_context_handling = autogen.AssistantAgent(\n", - " \"assistant\",\n", - " llm_config=llm_config,\n", - ")\n", - "# suppose this capability is not available\n", - "manage_chat_history = context_handling.TransformChatHistory(max_tokens_per_message=50, max_messages=10, max_tokens=1000)\n", - "manage_chat_history.add_to_agent(assistant_with_context_handling)\n", - "\n", - "user_proxy = autogen.UserProxyAgent(\n", - " \"user_proxy\",\n", - " human_input_mode=\"NEVER\",\n", - " is_termination_msg=lambda x: \"TERMINATE\" in x.get(\"content\", \"\"),\n", - " code_execution_config={\n", - " \"work_dir\": \"coding\",\n", - " \"use_docker\": False,\n", - " },\n", - " max_consecutive_auto_reply=2,\n", - ")\n", - "\n", - "# suppose the chat history is large\n", - "# Create a very long chat history that is bound to cause a crash\n", - "# for gpt 3.5\n", - "long_history = []\n", - "for i in range(1000):\n", - " # define a fake, very long message\n", - " assitant_msg = {\"role\": \"assistant\", \"content\": \"test \" * 1000}\n", - " user_msg = {\"role\": \"user\", \"content\": \"\"}\n", - "\n", - " assistant_base.send(assitant_msg, user_proxy, request_reply=False, silent=True)\n", - " assistant_with_context_handling.send(assitant_msg, user_proxy, request_reply=False, silent=True)\n", - " user_proxy.send(user_msg, assistant_base, request_reply=False, silent=True)\n", - " user_proxy.send(user_msg, assistant_with_context_handling, request_reply=False, silent=True)\n", - "\n", - "try:\n", - " user_proxy.initiate_chat(assistant_base, message=\"plot and save a graph of x^2 from -10 to 10\", clear_history=False)\n", - "except Exception as e:\n", - " print(\"Encountered an error with the base assistant\")\n", - " print(e)\n", - " print(\"\\n\\n\")\n", - "\n", - "try:\n", - " user_proxy.initiate_chat(\n", - " assistant_with_context_handling, message=\"plot and save a graph of x^2 from -10 to 10\", clear_history=False\n", - " )\n", - "except Exception as e:\n", - " print(e)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/notebook/agentchat_compression.ipynb b/notebook/agentchat_compression.ipynb deleted file mode 100644 index d7f3a7204db..00000000000 --- a/notebook/agentchat_compression.ipynb +++ /dev/null @@ -1,876 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Conversations with Chat History Compression Enabled\n", - "\n", - "<div class=\"alert alert-danger\" role=\"alert\">\n", - " <strong>Deprecation Notice:</strong> <code>CompressibleAgent</code> has been deprecated and will no longer be available as of version <code>0.2.30</code>. Please transition to using <code>TransformMessages</code>, which is now the recommended approach. For a detailed guide on implementing this new standard, refer to our user guide on <a href=\"https://microsoft.github.io/autogen/docs/topics/handling_long_contexts/compressing_text_w_llmligua\" target=\"_blank\">Compressing Text with LLMLingua</a>. This guide provides examples for effectively utilizing LLMLingua transform as a replacement for <code>CompressibleAgent</code>.\n", - "</div>\n", - "\n", - "AutoGen offers conversable agents powered by LLM, tools, or humans, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participance through multi-agent conversation. Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat).\n", - "\n", - "In this notebook, we demonstrate how to enable compression of history messages using the `CompressibleAgent`. While this agent retains all the default functionalities of the `AssistantAgent`, it also provides the added feature of compression when activated through the `compress_config` setting.\n", - "\n", - "Different compression modes are supported:\n", - "\n", - "1. `compress_config=False` (Default): `CompressibleAgent` is equivalent to `AssistantAgent`.\n", - "2. `compress_config=True` or `compress_config={\"mode\": \"TERMINATE\"}`: no compression will be performed. However, we will count token usage before sending requests to the OpenAI model. The conversation will be terminated directly if the total token usage exceeds the maximum token usage allowed by the model (to avoid the token limit error from OpenAI API).\n", - "3. `compress_config={\"mode\": \"COMPRESS\", \"trigger_count\": <your pre-set number>, \"leave_last_n\": <your pre-set number>}`: compression is enabled.\n", - "\n", - " ```python\n", - " # default compress_config\n", - " compress_config = {\n", - " \"mode\": \"COMPRESS\",\n", - " \"compress_function\": None,\n", - " \"trigger_count\": 0.7, # default to 0.7, or your pre-set number\n", - " \"broadcast\": True, # the compressed with be broadcast to sender. This will not be used in groupchat.\n", - "\n", - " # the following settings are for this mode only\n", - " \"leave_last_n\": 2, # leave the last n messages in the history to avoid compression\n", - " \"verbose\": False, # if True, print out the content to be compressed and the compressed content\n", - " }\n", - " ```\n", - "\n", - " Currently, our compression logic is as follows:\n", - " 1. We will always leave the first user message (as well as system prompts) and compress the rest of the history messages.\n", - " 2. You can choose to not compress the last n messages in the history with \"leave_last_n\".\n", - " 2. The summary is performed on a per-message basis, with the role of the messages (See compressed content in the example below).\n", - "\n", - "4. `compress_config={\"mode\": \"CUSTOMIZED\", \"compress_function\": <A customized function for compression>}t`: the `compress_function` function will be called on trigger count. The function should accept a list of messages as input and return a tuple of (is_success: bool, compressed_messages: List[Dict]). The whole message history (except system prompt) will be passed.\n", - "\n", - "\n", - "By adjusting `trigger_count`, you can decide when to compress the history messages based on existing tokens. If this is a float number between 0 and 1, it is interpreted as a ratio of max tokens allowed by the model. For example, the AssistantAgent uses gpt-4 with max tokens 8192, the trigger_count = 0.7 * 8192 = 5734.4 -> 5734. Do not set `trigger_count` to the max tokens allowed by the model, since the same LLM is employed for compression and it needs tokens to generate the compressed content. \n", - "\n", - "\n", - "\n", - "## Limitations\n", - "- For now, the compression feature **is not well-supported for groupchat**. If you initialize a `CompressibleAgent` in a groupchat with compression, the compressed cannot be broadcast to all other agents in the groupchat. If you use this feature in groupchat, extra cost will be incurred since compression will be performed on at per-agent basis.\n", - "- We do not support async compression for now.\n", - "\n", - "## Requirements\n", - "\n", - "````{=mdx}\n", - ":::info Requirements\n", - "Install `pyautogen`:\n", - "```bash\n", - "pip install pyautogen\n", - "```\n", - "\n", - "For more information, please refer to the [installation guide](/docs/installation/).\n", - ":::\n", - "````" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Set your API Endpoint\n", - "\n", - "The [`config_list_from_json`](https://microsoft.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "# define functions according to the function description\n", - "from IPython import get_ipython\n", - "\n", - "import autogen\n", - "from autogen.agentchat.contrib.compressible_agent import CompressibleAgent\n", - "from autogen.agentchat.contrib.math_user_proxy_agent import MathUserProxyAgent\n", - "\n", - "config_list = autogen.config_list_from_json(\n", - " \"OAI_CONFIG_LIST\",\n", - " filter_dict={\n", - " \"model\": [\"gpt-4-1106-preview\"],\n", - " },\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "````{=mdx}\n", - ":::tip\n", - "Learn more about configuring LLMs for agents [here](/docs/topics/llm_configuration).\n", - ":::\n", - "````" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Example 1\n", - "This example is from [agentchat_MathChat.ipynb](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_MathChat.ipynb). Compression with code execution.\n", - "\n", - "You must set the `model` field in `llm_config`, as it will be used to calculate the token usage.\n", - "\n", - "Note: we set `trigger_count=600`, and `leave_last_n=2`. In this example, we set a low trigger_count to demonstrate the compression feature. \n", - "The token count after compression is still bigger than trigger count, mainly because the trigger count is low an the first and last 2 messages are not compressed. Thus, the compression is performed at each turn. In practice, you want to adjust the trigger_count to a bigger number and properly set the `leave_last_n` to avoid compression at each turn. \n" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[33mmathproxyagent\u001b[0m (to assistant):\n", - "\n", - "Let's use Python to solve a math problem.\n", - "\n", - "Query requirements:\n", - "You should always use the 'print' function for the output and use fractions/radical forms instead of decimals.\n", - "You can use packages like sympy to help you.\n", - "You must follow the formats below to write your code:\n", - "```python\n", - "# your code\n", - "```\n", - "\n", - "First state the key idea to solve the problem. You may choose from three ways to solve the problem:\n", - "Case 1: If the problem can be solved with Python code directly, please write a program to solve it. You can enumerate all possible arrangements if needed.\n", - "Case 2: If the problem is mostly reasoning, you can solve it by yourself directly.\n", - "Case 3: If the problem cannot be handled in the above two ways, please follow this process:\n", - "1. Solve the problem step by step (do not over-divide the steps).\n", - "2. Take out any queries that can be asked through Python (for example, any calculations or equations that can be calculated).\n", - "3. Wait for me to give the results.\n", - "4. Continue if you think the result is correct. If the result is invalid or unexpected, please correct your query or reasoning.\n", - "\n", - "After all the queries are run and you get the answer, put the answer in \\boxed{}.\n", - "\n", - "Problem:\n", - "Find all $x$ that satisfy the inequality $(2x+10)(x+3)<(3x+9)(x+8)$. Express your answer in interval notation.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33massistant\u001b[0m (to mathproxyagent):\n", - "\n", - "Key Idea:\n", - "To solve this inequality $(2x + 10)(x + 3) < (3x + 9)(x + 8)$, we'll first expand both sides of the inequality, then collect all terms on one side to form a quadratic inequality. After simplifying, we will factor the quadratic expression if possible. Then, we'll determine the critical points of the inequality by setting the factors to zero. Finally, we'll use a sign chart or test values within intervals determined by the critical points to find the intervals where the original inequality is satisfied.\n", - "\n", - "Case 1 applies here, so I will write a Python program using sympy that simplifies the inequality, factors the resulting expression, solves for the critical points, and then determines the intervals where the inequality holds true.\n", - "\n", - "```python\n", - "from sympy import symbols, solve, simplify\n", - "\n", - "# Define the variable\n", - "x = symbols('x')\n", - "\n", - "# Given inequality\n", - "lhs = (2*x + 10)*(x + 3)\n", - "rhs = (3*x + 9)*(x + 8)\n", - "\n", - "# Move all terms to one side of the inequality\n", - "inequality = simplify(lhs - rhs) < 0\n", - "\n", - "# Solve the inequality\n", - "solutions = solve(inequality, x)\n", - "\n", - "# Print the solution using interval notation\n", - "print(solutions)\n", - "```\n", - "\n", - "Running this code will provide us with the solution in interval notation. We'll express the final answer in the requested format afterward.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mmathproxyagent\u001b[0m (to assistant):\n", - "\n", - "((-oo < x) & (x < -14)) | ((-3 < x) & (x < oo))\n", - "\n", - "--------------------------------------------------------------------------------\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Warning: Compression skipped at trigger count threshold. The first msg and last 2 msgs will not be compressed. current msg count: 3. Consider raising trigger_count.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[33massistant\u001b[0m (to mathproxyagent):\n", - "\n", - "The solution obtained from running the Python code suggests that the values of $x$ that satisfy the inequality $(2x + 10)(x + 3) < (3x + 9)(x + 8)$ are in the intervals $(-\\infty, -14)$ and $(-3, \\infty)$.\n", - "\n", - "Therefore, the answer in interval notation is:\n", - "\n", - "$$\n", - "\\boxed{(-\\infty, -14) \\cup (-3, \\infty)}\n", - "$$\n", - "\n", - "--------------------------------------------------------------------------------\n" - ] - }, - { - "data": { - "text/plain": [ - "ChatResult(chat_id=None, chat_history=[{'content': \"Let's use Python to solve a math problem.\\n\\nQuery requirements:\\nYou should always use the 'print' function for the output and use fractions/radical forms instead of decimals.\\nYou can use packages like sympy to help you.\\nYou must follow the formats below to write your code:\\n```python\\n# your code\\n```\\n\\nFirst state the key idea to solve the problem. You may choose from three ways to solve the problem:\\nCase 1: If the problem can be solved with Python code directly, please write a program to solve it. You can enumerate all possible arrangements if needed.\\nCase 2: If the problem is mostly reasoning, you can solve it by yourself directly.\\nCase 3: If the problem cannot be handled in the above two ways, please follow this process:\\n1. Solve the problem step by step (do not over-divide the steps).\\n2. Take out any queries that can be asked through Python (for example, any calculations or equations that can be calculated).\\n3. Wait for me to give the results.\\n4. Continue if you think the result is correct. If the result is invalid or unexpected, please correct your query or reasoning.\\n\\nAfter all the queries are run and you get the answer, put the answer in \\\\boxed{}.\\n\\nProblem:\\nFind all $x$ that satisfy the inequality $(2x+10)(x+3)<(3x+9)(x+8)$. Express your answer in interval notation.\", 'role': 'assistant'}, {'content': \"Key Idea:\\nTo solve this inequality $(2x + 10)(x + 3) < (3x + 9)(x + 8)$, we'll first expand both sides of the inequality, then collect all terms on one side to form a quadratic inequality. After simplifying, we will factor the quadratic expression if possible. Then, we'll determine the critical points of the inequality by setting the factors to zero. Finally, we'll use a sign chart or test values within intervals determined by the critical points to find the intervals where the original inequality is satisfied.\\n\\nCase 1 applies here, so I will write a Python program using sympy that simplifies the inequality, factors the resulting expression, solves for the critical points, and then determines the intervals where the inequality holds true.\\n\\n```python\\nfrom sympy import symbols, solve, simplify\\n\\n# Define the variable\\nx = symbols('x')\\n\\n# Given inequality\\nlhs = (2*x + 10)*(x + 3)\\nrhs = (3*x + 9)*(x + 8)\\n\\n# Move all terms to one side of the inequality\\ninequality = simplify(lhs - rhs) < 0\\n\\n# Solve the inequality\\nsolutions = solve(inequality, x)\\n\\n# Print the solution using interval notation\\nprint(solutions)\\n```\\n\\nRunning this code will provide us with the solution in interval notation. We'll express the final answer in the requested format afterward.\", 'role': 'user'}, {'content': '((-oo < x) & (x < -14)) | ((-3 < x) & (x < oo))', 'role': 'assistant'}, {'content': 'The solution obtained from running the Python code suggests that the values of $x$ that satisfy the inequality $(2x + 10)(x + 3) < (3x + 9)(x + 8)$ are in the intervals $(-\\\\infty, -14)$ and $(-3, \\\\infty)$.\\n\\nTherefore, the answer in interval notation is:\\n\\n$$\\n\\\\boxed{(-\\\\infty, -14) \\\\cup (-3, \\\\infty)}\\n$$', 'role': 'user'}], summary='The solution obtained from running the Python code suggests that the values of $x$ that satisfy the inequality $(2x + 10)(x + 3) < (3x + 9)(x + 8)$ are in the intervals $(-\\\\infty, -14)$ and $(-3, \\\\infty)$.\\n\\nTherefore, the answer in interval notation is:\\n\\n$$\\n\\\\boxed{(-\\\\infty, -14) \\\\cup (-3, \\\\infty)}\\n$$', cost=({'total_cost': 0.052199999999999996, 'gpt-4': {'cost': 0.052199999999999996, 'prompt_tokens': 954, 'completion_tokens': 393, 'total_tokens': 1347}}, {'total_cost': 0.052199999999999996, 'gpt-4': {'cost': 0.052199999999999996, 'prompt_tokens': 954, 'completion_tokens': 393, 'total_tokens': 1347}}), human_input=[])" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# 1. replace AssistantAgent with CompressibleAgent\n", - "assistant = CompressibleAgent(\n", - " name=\"assistant\",\n", - " system_message=\"You are a helpful assistant.\",\n", - " llm_config={\n", - " \"timeout\": 600,\n", - " \"cache_seed\": 42,\n", - " \"config_list\": config_list,\n", - " \"model\": \"gpt-4-1106-preview\", # you must set the model field in llm_config, as it will be used to calculate the token usage.\n", - " },\n", - " compress_config={\n", - " \"mode\": \"COMPRESS\",\n", - " \"trigger_count\": 600, # set this to a large number for less frequent compression\n", - " \"verbose\": True, # to allow printing of compression information: context before and after compression\n", - " \"leave_last_n\": 2,\n", - " },\n", - ")\n", - "\n", - "# 2. create the MathUserProxyAgent instance named \"mathproxyagent\"\n", - "mathproxyagent = MathUserProxyAgent(\n", - " name=\"mathproxyagent\",\n", - " human_input_mode=\"NEVER\",\n", - " code_execution_config={\n", - " \"use_docker\": False\n", - " }, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.\n", - " max_consecutive_auto_reply=5,\n", - ")\n", - "math_problem = (\n", - " \"Find all $x$ that satisfy the inequality $(2x+10)(x+3)<(3x+9)(x+8)$. Express your answer in interval notation.\"\n", - ")\n", - "mathproxyagent.initiate_chat(assistant, message=mathproxyagent.message_generator, problem=math_problem)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Example 2\n", - "This example is from [agentchat_function_call.ipynb](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_function_call.ipynb). Compression with function calls. " - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", - "\n", - "Draw two agents chatting with each other with an example dialog. Don't add plt.show().\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", - "\n", - "\u001b[32m***** Suggested function Call: python *****\u001b[0m\n", - "Arguments: \n", - "{\n", - " \"cell\": \"import matplotlib.pyplot as plt\\nimport numpy as np\\n\\nfig, ax = plt.subplots()\\n\\n# Define the agents as circles\\nagent1 = plt.Circle((0.4, 0.5), 0.1, color='blue')\\nagent2 = plt.Circle((0.6, 0.5), 0.1, color='red')\\n\\n# Draw the agents\\nax.add_artist(agent1)\\nax.add_artist(agent2)\\n\\n# Example dialog boxes\\nplt.text(0.28, 0.6, \\\"Hello!\\\", fontsize=12, bbox=dict(facecolor='white', alpha=0.5))\\nplt.text(0.58, 0.6, \\\"Hi there!\\\", fontsize=12, bbox=dict(facecolor='white', alpha=0.5))\\n\\n# Set the limits and remove axes\\nax.set_xlim(0, 1)\\nax.set_ylim(0, 1)\\nax.axis('off')\\n\"\n", - "}\n", - "\u001b[32m*******************************************\u001b[0m\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[35m\n", - ">>>>>>>> EXECUTING FUNCTION python...\u001b[0m\n" - ] - }, - { - "data": { - "text/plain": [ - "(0.0, 1.0, 0.0, 1.0)" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAgMAAAGFCAYAAABg2vAPAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAdR0lEQVR4nO3de3SU9Z3H8c8zMyEkIjcN18YGAkGE2iO4si1QoSpC16VILSq6HjheIko99ZYK1kUUQZG2dk2VrOi6eNl1o1h6WkWqBRQQXStdFQ0GMRQpYkAS0IEwycz+8esQQhBzmzxP8n2/zomcTJLJb5xn8rznufweL5FIJAQAAMwK+T0AAADgL2IAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMC7i9wDQdlVWVioajfo9jFaTmZmpLl26+D2Mds3aMpVqLLNoKGIATVJZWanCwkLFYjG/h9Jq0tLSNHPmTP64pojFZSrVWGbRUMQAmiQajSoWi2ny5MnKysryezgpV15ermXLlikajfKHNUWsLVOpxjKLxiAG0CxZWVnq3bu338NAO8IyBbQ+DiAEAMA4YgAAAOOIAQAAjCMG0OZMmzZNOTk5dW7zPE933nmnL+NBcOXk5GjatGlN/vlp06apU6dOLTcgIKCIAaTM448/Ls/z9NZbbx3z62PGjNHQoUNbeVT1rV69Wp7nafXq1X4PBceRquUpGo3qzjvvbHPPPwGMlsTZBADarc2bNysUOv57nmg0qrlz50pyQQFYxJYBAO1Wenq60tLS/B7GcSUSCR04cMDvYcA4YgCB8uSTT2r48OHKyMhQ9+7ddckll2j79u1Nuq+NGzdqwoQJ6ty5szp16qRzzjlHGzZsaOERI8i+7piBsrKywxMczZ07V57nHXPz+44dOzRp0iR16tRJWVlZuuWWW1RTU1Pne+LxuB544AENGTJEHTt2VM+ePZWfn6+9e/fWG9MFF1ygl156SWeeeaYyMjJUVFQkSaqoqNBPf/pTZWdnKz09XQMGDNB9992neDze/P8ZwHGwmwApV1lZqd27d9e7/ehpZ++55x7dcccdmjJliq666iqVl5frwQcf1Pe+9z1t3LhRXbt2bfDv3LRpk0aPHq3OnTuroKBAaWlpKioq0pgxY7RmzRqNGDGiuQ8LPmno8tQQWVlZevjhhzVjxgxdeOGFmjx5siTp9NNPP/w9NTU1Ov/88zVixAgtWrRIL7/8sn7xi18oNzdXM2bMOPx9+fn5evzxxzV9+nTdcMMN+vjjj1VYWKiNGzdq3bp1dbZQbN68WZdeeqny8/N19dVXa9CgQYpGozr77LO1Y8cO5efn65RTTtH69es1a9Ys7dy5Uw888ECjHx/QUMQAUu7cc8/9yq8NGTJEkrRt2zbNmTNH8+bN0+zZsw9/ffLkyTrjjDP00EMP1bn96/z85z9XLBbT2rVr1b9/f0nSFVdcoUGDBqmgoEBr1qxp4qOB3xqyPDXUCSecoIsuukgzZszQ6aefrssvv7ze9xw8eFAXX3yx7rjjDknStddeq2HDhunRRx89HANr167VkiVL9NRTT2nq1KmHf3bs2LEaP368iouL69y+ZcsWrVixQueff/7h2+bNm6ePPvpIGzdu1MCBAyW5wOjTp4/uv/9+3XzzzcrOzm7U4wMaihhAyv3mN79RXl5evdtvvvnmw5taly1bpng8rilTptR519erVy8NHDhQq1atanAM1NTUaOXKlZo0adLhEJCk3r17a+rUqXrkkUe0b98+de7cuZmPDH5oyPLU0q699to6n48ePVpPPPHE4c+Li4vVpUsXnXfeeXWW3+HDh6tTp05atWpVnRjo169fnRBI3sfo0aPVrVu3Ovdx7rnn6t5779Wrr76qyy67rKUfGiCJGEArOOuss3TmmWfWu/3IP3qlpaVKJBKH3xEdrTEHgZWXlysajWrQoEH1vjZ48GDF43Ft37690e8iEQwNWZ5aUseOHetdOKlbt251jgUoLS1VZWWlevToccz7+Oyzz+p83q9fv3rfU1paqnfeeecrL9J09H0ALYkYQCDE43F5nqcXX3xR4XC43teZ+AV+OdbyeLR4PK4ePXroqaeeOubXj17BZ2RkHPM+zjvvPBUUFBzzPo61NQRoKcQAAiE3N1eJREL9+vVr9h+9rKwsZWZmavPmzfW+VlJSolAoVGff65gxY5RIJJr1O9F2eZ7X7PvIzc3Vyy+/rJEjRx5zRd/Q+/jiiy+Oe0zEkVhm0ZI4tRCBMHnyZIXDYc2dO7feH7lEIqE9e/Y0+L7C4bDGjRun5cuXq6ys7PDtu3bt0tNPP61Ro0bVOV6gsrJSJSUlikajzX4caHsyMzMludP6mmrKlCmqqanR3XffXe9r1dXVDbrvKVOm6PXXX9dLL71U72sVFRWqrq4+/HksFlNJSUlKdovAJrYMIBByc3M1b948zZo1S2VlZZo0aZJOPPFEffzxx3r++ed1zTXX6JZbbmnw/c2bN09//OMfNWrUKF133XWKRCIqKipSVVWVFi5cWOd7n3/+eU2fPl2rVq065gx0VVXSrl1SWZn03HNSRoZ06JD7qK6W0tKkDh2k9HT3b6dO0oAB7iM9vZn/Y1Df559LJSXuCaipqf3Yt086cED63/+VwmEpEpFiMXdbVdVXPhkZGRk67bTT9MwzzygvL0/du3fX0KFDGzW18dlnn638/HwtWLBAf/nLXzRu3DilpaWptLRUxcXF+vWvf62LLrrouPdx66236ne/+50uuOACTZs2TcOHD9eXX36pd999V88++6zKysp08sknS3LzHgwePFhz5sxhSmK0CGIAgXHbbbcpLy9Pv/rVrw5PD5udna1x48Zp4sSJjbqvIUOG6LXXXtOsWbO0YMECxeNxjRgxQk8++eRXzjFQWSlt3izt2eM+ysul3bulaFTav19avVr6z/903+t5UnKW20Si9uNInif17SsNHiydeqqUl1f7kZ3t1lc4hnhcqqiofSL27HFPxIoV7uvPPiv16VP7/cnN/Hv3uifrhRdqn4wvv3RP6oIFUmamdPLJUlaWdNJJtR/dumnJkiX6yU9+ohtvvFGHDh3SnDlzGn2dg8WLF2v48OEqKirS7NmzFYlElJOTo8svv1wjR4782p/PzMzUmjVrNH/+fBUXF2vp0qXq3Lmz8vLyNHfuXHXp0qVR4wEaw0uw4wlNsHPnThUVFSk/P1+9e/f2eziNlkhIn37q1hPvv+9W/MlXgue5jyMnfdu/f6eee65I27blS2r8401Lc1sRkr8jLU0aMUKaPFn64Q+lI86AtGnrVu184gkV/eEPyh8zRr3/vun+cHW15CmDoVDdevM8qUcPV22DBkm9etUGRhvW1l+jaF1sGYAZNTXStm1uC/MHH7h3+6FQ3ZW+dOx3+c119OR4sZi0fr20bp10001uPfSjH7kwGD68XayLji+RkN56S1q+3O17KSlxt+fk1F3xJxItGwLSsZ/wXbtcEa5eLZ14Yu3mnG9+k004MIEYQLtWVSWVlrp1zYcfut3MRwaAn1O+H/m7P/jAbcmeN0/q2bN2i8H3v++2IrQLsZj0pz+5AFi2zK2Aw+H6K3+/JJ+Q/ftdqLz5pjsIJC/PhcHAgRwEgnaLGEC7FI1Kb7whbdjggiAoAXA8yXXirl3SI49IDz8sfeMb0uzZ0vTpUseO/o6vyQ4elB57TJo/X9qxwx3YlzwyPkUzBjZbciE5dMjtR3rvPRcC//iPbv9OcjcG0E5waiHalf37pZdekn75S+nVV10ISMENgK+SXFfu2CFdf7074HDRIumLL/wdV6Ps3+8GnZ0tzZwp/e1v7vYjTpFrE5ILT1WVW6h++Utp5Ur3+IB2gi0DaBcqKqS1a6W333aft7WV/1dJbjXfvVsqKHC7EW68UbrhBqlbN3/H9pX27pX+7d/cSnP/fn83/be0RMLFzIYN7mPYMGnUKKkRV9QEgogtA2jTqqvdMV8PPuhCIB5vPyFwtETCnf54111Sbq47zTFQ69lEwg0qN9cNct++gA2wBSUXtLffdgvf6tVtb4sHcARiAG3WRx9JhYXSmjVu13N7jYCjJU/DnzbNvSndtMnvEcntUx81yg2qosLWk1FT4xbCwkK3UAJtELsJ0Czl5eWt/jurqqRXXnHHdXle67z5jEZb/3EeT/Ixv/mm9O1vS//6r+5Aw0hrv6Krq92BgXfdVXs+ZDOfkPK2Oi30/v3SQw9Jp50mnXOO72ce+PHaRNvFpENoksrKShUWFip29An0KbZ7t7RqlTtboLWX3MrKNFVUzJQUvJngPM8d6P4//+POQGgVn3wiTZni9p23wJNRKamwa1fF2vpMe57nzjYYO9bNeOijtLQ0zZw5k9kL8bWIATRZZWVlq17cZ+lS6fbb3XrHn63QmQpiCCRFIm6+nOJi98Y0pV55Rfrxj9274RbcV14pqY1uF6grFHJRMH++9C//4tswMjMzCQE0CDGAwIvHpZ/9zJ2lhuNLXi/h8cdTuA5autRNfCDZOTagOW69VbrvPgPTSqItIwYQaLGYdOWV0hNP+D2StmfRIunmm1Nwp7fe2sJ3asAVV0hLlrSj6STR3hADCKyDB6ULL3STCLGUNk1BgXTvvS3wpjSRcJtn7r+/RcZljudJ48e7aZjb7FSSaM84tRCBVFMjXXqpm+iNEGi6hQvdbutmu+ceQqA5EglXtVOnBncKZpjGlgEETiLhZq99+GFCoKU89ljtbv4m/fCVV7boeMzyPOm669xERRxDgAAhBhA4994rzZrl9yjal1BI+v3vpQkTGvmDL7wg/fM/c6BgS1uwQLrtNr9HARxGDCBQVq1yp8WxVLYsz3OnHb73nrtuUINs3y4NGeKujsQT0rI8z13OecwYv0cCSOKYAQTI55+74wTYetryEgk3UdNllzVwl3VNjdu/feAAIZAKnucW9s8/93skgCRiAAGRSLjd0rt3s0U6Vaqrpddea+BxgAsXustAcvGd1IjHpfJy6eqriS0EArsJEAi//a07jRCpF4lIJSXu4oLHtGWLNHgwIdBann9emjTJ71HAOLYMwHeHDkk33VQ7ex5Sr6CgqV9EiwqF3MxQrXyND+Bo/PmF74qKpLIydg+0lupqN/fN+vXH+OK6de6dKlsFWkc8Lm3d6l4EgI/YTQBfRaNS375SRYXfI7ElHJZGjHDr/jpGjpTeeIOJcVpbt27uKpCZmX6PBEaxZQC+euYZQsAPNTVuy8A77xxx4//9n7uREGh9e/e6608DPiEG4KvCQo4V8EskIi1efMQNixe7G9H6QiE3KyHgE3YTwDd//rN05pl+j8K2jAxp1y7pRO2XevZ08wrAP3/+szRsmN+jgEG8J4Nviot5I+q3Awfc9XO0YgUh4LdIxL0oAB8QA/DNihUctO63SER6+WW5/1Bm/qqudi8KwAfsJoAv9uyRsrKYfC0IsrOlvyay3dHs8JfnuZkJTzrJ75HAGLYMwBerVxMCQRHaXkYIBEUiIa1Z4/coYBAxAF+UlLBVOihOVYnfQ0BScq5ooJURA/DF1q1+jwBJudqqhLhUZGDw4oAPiAH4orSUgweDIje0VTUhNtMEQnW1e3EArYy/APDFtm1+jwBJfeOfyBOzDgYGLw74gC0D8MWhQ36PAEkdVKWQuEpUYPDigA+IAfiCKxQGByEQMLw44ANiAL5IS/N7BEg6pA4cQBgkvDjgA2IAvuje3e8RIKnS66a4F/Z7GEjixQEfEAPwxaBBXK0wKLaqv0IeM0AFQijkXhxAK+PPMXzRv78U5s1oIGxJ9FcoztkEgRAOuxcH0MqIAfgiN1eKxfweBSTpI+X6PQQkxWLEAHxBDMAX3/mO3yNAUmn6t5To2NHvYSDpu9/1ewQwiBiAL4YOlU4+2e9RIBSSRn2/g7wxYziIIwiysqQhQ/weBQzi1Q9feJ40fjwXK/JbIiGdf77+/h/4KhJxLwqP0zzR+ogB+Oaf/onrE/gtkZAmTJD0gx8w2Y3fqqvd8wD4wEskuKo8/HHwoNSrl1RZ6fdIbAqH3e7pV1/9+w2jR0vr1xMFfunaVfr0Uyk93e+RwCC2DMA3HTtKV1/NKYZ+qamRrr/+iBuuv54Q8Es47F4MhAB8wpYB+GrLFmngQL9HYVP37tLOnVKHDn+/oapK6t1b2rvX13GZVVoqDRjg9yhgFFsG4KsBA6RLLuFAwtbmedLttx8RApJ7V3r77RzA1toiEenSSwkB+IotA/BdWZnbOsDBhK0jFJL69nVvROttla6qciulv/2NXQatJS1N+vBDKSfH75HAMLYMwHc5OdKNN3Kae2uJx6VFi75i93R6uvsiIdA6QiG38BMC8BlbBhAIFRXSqadK5eWsh1IpEnGzP65Zc5y9AYmEdPbZ0uuvs7kmlUIhN8nQ5s1Sly5+jwbG8V4MgdC1q/Rf/+XWQ0gNz5MyM6WnnvqawwI8z31TRgbHD6RSIiH9938TAggEYgCBMXas9LOfsf5JlURCeuwxKTu7Ad+cne2+mTpLDc+TbrtNGjPG75EAkthNgICJxaSRI6WNG9lC3ZI8T7rqKunf/72RP3jNNdKSJURBSwqHpWHDpHXr3MGDQAAQAwic8nLprLOkTz4hCFpCOCydc470+983Yd0Ti7l5o//0JzdLEZonEnFbXd54wx0vAAQEMYBA2rpV+od/cFMVsw5qukhE+ta33JTDnTo18U6++MJNVfzuuzwZzREOu4Nj3nxT6t/f79EAdXDMAAKpf39p5Uo3ZTHTFTdN8k3oihXNCAHJ/fCKFdIpp/BkNFU47BbmlSsJAQQSMYDAGj7c7Vbt1o0ZChsrHJaGDJE2bJB69GiBO+zZ051qOHQoQdBYkYhbiNetc8cKAAFEDCDQvv1tt3v1G99gHdRQoZDbqv/aay0UAkk9e7r9DaNHM0NUQ0UibuF98023MAMBxSsagde/vwuCM85gHdQQF1/stuqfeGIK7rxzZ3fnF1+cgjtvZzzPLbRvvCH16+f3aIDj4k8r2oQePdxW1hkz3OfMRVBXOOzehBYWuvmCUnol3PR090sKC90vZZNNXcmF8/rrpbVrW3jzDJAanE2ANmfZMmn6dCka5dRDyW0tycmRiot92CX99tvSj3/srjbFPNIujk44QfqP/5AuvNDv0QANxpYBtDmTJ0vvvy9NnOg+t7rrIBJxj/2mm6R33vHp2LRhw9wvT15pyupWguRCOHGitGkTIYA2hy0DaNNefFG69lpp+3Y7k+R5nnus3/mOVFTk5hEIhHffdTMWbthQO0gLPM+ddrl4sTR+vN+jAZrE6HsqtBcTJkglJdLcue6Aufa8lSD52Pr0cVuh164NUAhIbjDr1rnB9enjbmvPB3eEQm6hmztX+uADQgBtGlsG0G7s2yc99JC0cKG7JHJ7WbJDIbc7PjdXuuMOaerUNjClfSwmPf20dPfd0kcf1T6I9sDz3LwBBQXuiNbOnf0eEdBsxADanWhUevRRaf586dNP2+56KBx2s/8OHSrNmeN2Q7e5XfI1Ne6Iz7vukt57r/ZBtTXJhahXL2n2bOnKK931oIF2ghhAu3XokPTkk9LSpW4Cnng8+OuiSMSdIdGxo9sFctVV7t82v7U9kXAHeCxZ4v49eLD2wQZVcmFJzuJ0xRXS5ZdLHTr4PTKgxREDMGHvXumFF6Tf/lb6wx+kAweCsy5KrnNOOsmdKfHDH7qrDHbs6PfIUuTgQemVV6Tly91Wgz17gvNkJMeRkeGu1jhpkvSDH7jdAkA7RgzAnKoqadWq2nXRZ5+5d97J9UAqXxHJs+9iMff5wIHSj37kAuCss9r3AZDHFI+7qXqXL5eee04qLXW3p6W5Qkrl/p2jn/SePV2NTZwojR2b4pmbgGAhBmBaPO7mzXn7bWnzZvfx/vvSX/9auzvh6BV4Q6Sl1Q2L9HQ3rfKQIVJenvv47nddDOAIpaXS+vXShx+6j02b3PWsq6rc15Mr8MY+GUeGRTjsTgU87TTp1FPdkzFsmLsyVpvfHwM0DTEAHEMs5ibVS66TPvxQ2rJF+vJLt15KfsRibkWfnu52JWdkuIPLBw5065hBg9y/ffsafNffUuJxaccOV2rJJ6O01J0+cuCAOzgk+YSkpdU+IenpbjbAAQNqCywvz03XGPjTMYDWRQwAAGAc71UAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACM+3/SEA9yKFMnowAAAABJRU5ErkJggg==", - "text/plain": [ - "<Figure size 640x480 with 1 Axes>" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", - "\n", - "\u001b[32m***** Response from calling function \"python\" *****\u001b[0m\n", - "(0.0, 1.0, 0.0, 1.0)\n", - "\u001b[32m***************************************************\u001b[0m\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", - "\n", - "The two agents have been drawn, each represented as a circle, and an example of their dialogue is displayed above them. Since `plt.show()` was not to be included, the plot is not displayed here, but the agents along with their dialogue would appear within the figure's coordinate system, which extends from 0 to 1 on both the x and y axes.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", - "\n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", - "\n", - "TERMINATE\n", - "\n", - "--------------------------------------------------------------------------------\n" - ] - }, - { - "data": { - "text/plain": [ - "ChatResult(chat_id=None, chat_history=[{'content': \"Draw two agents chatting with each other with an example dialog. Don't add plt.show().\", 'role': 'assistant'}, {'function_call': {'arguments': '{\\n \"cell\": \"import matplotlib.pyplot as plt\\\\nimport numpy as np\\\\n\\\\nfig, ax = plt.subplots()\\\\n\\\\n# Define the agents as circles\\\\nagent1 = plt.Circle((0.4, 0.5), 0.1, color=\\'blue\\')\\\\nagent2 = plt.Circle((0.6, 0.5), 0.1, color=\\'red\\')\\\\n\\\\n# Draw the agents\\\\nax.add_artist(agent1)\\\\nax.add_artist(agent2)\\\\n\\\\n# Example dialog boxes\\\\nplt.text(0.28, 0.6, \\\\\"Hello!\\\\\", fontsize=12, bbox=dict(facecolor=\\'white\\', alpha=0.5))\\\\nplt.text(0.58, 0.6, \\\\\"Hi there!\\\\\", fontsize=12, bbox=dict(facecolor=\\'white\\', alpha=0.5))\\\\n\\\\n# Set the limits and remove axes\\\\nax.set_xlim(0, 1)\\\\nax.set_ylim(0, 1)\\\\nax.axis(\\'off\\')\\\\n\"\\n}', 'name': 'python'}, 'content': None, 'role': 'assistant'}, {'content': '(0.0, 1.0, 0.0, 1.0)', 'name': 'python', 'role': 'function'}, {'content': \"The two agents have been drawn, each represented as a circle, and an example of their dialogue is displayed above them. Since `plt.show()` was not to be included, the plot is not displayed here, but the agents along with their dialogue would appear within the figure's coordinate system, which extends from 0 to 1 on both the x and y axes.\", 'role': 'user'}, {'content': '', 'role': 'assistant'}, {'content': 'TERMINATE', 'role': 'user'}], summary='', cost=({'total_cost': 0.04767, 'gpt-4': {'cost': 0.04767, 'prompt_tokens': 973, 'completion_tokens': 308, 'total_tokens': 1281}}, {'total_cost': 0.04767, 'gpt-4': {'cost': 0.04767, 'prompt_tokens': 973, 'completion_tokens': 308, 'total_tokens': 1281}}), human_input=[])" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "llm_config = {\n", - " \"model\": \"gpt-4-1106-preview\",\n", - " \"functions\": [\n", - " {\n", - " \"name\": \"python\",\n", - " \"description\": \"run cell in ipython and return the execution result.\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"cell\": {\n", - " \"type\": \"string\",\n", - " \"description\": \"Valid Python cell to execute.\",\n", - " }\n", - " },\n", - " \"required\": [\"cell\"],\n", - " },\n", - " },\n", - " {\n", - " \"name\": \"sh\",\n", - " \"description\": \"run a shell script and return the execution result.\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"script\": {\n", - " \"type\": \"string\",\n", - " \"description\": \"Valid shell script to execute.\",\n", - " }\n", - " },\n", - " \"required\": [\"script\"],\n", - " },\n", - " },\n", - " ],\n", - " \"config_list\": config_list,\n", - " \"timeout\": 120,\n", - "}\n", - "\n", - "chatbot = CompressibleAgent(\n", - " name=\"chatbot\",\n", - " system_message=\"For coding tasks, only use the functions you have been provided with. Reply TERMINATE when the task is done.\",\n", - " llm_config=llm_config,\n", - " compress_config={\n", - " \"mode\": \"COMPRESS\",\n", - " \"trigger_count\": 600, # set this to a large number for less frequent compression\n", - " \"verbose\": True, # set this to False to suppress the compression log\n", - " \"leave_last_n\": 2,\n", - " },\n", - ")\n", - "\n", - "# create a UserProxyAgent instance named \"user_proxy\"\n", - "user_proxy = autogen.UserProxyAgent(\n", - " name=\"user_proxy\",\n", - " is_termination_msg=lambda x: x.get(\"content\", \"\") and x.get(\"content\", \"\").rstrip().endswith(\"TERMINATE\"),\n", - " human_input_mode=\"NEVER\",\n", - " max_consecutive_auto_reply=10,\n", - " code_execution_config={\n", - " \"work_dir\": \"coding\",\n", - " \"use_docker\": False,\n", - " }, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.\n", - ")\n", - "\n", - "\n", - "def exec_python(cell):\n", - " ipython = get_ipython()\n", - " result = ipython.run_cell(cell)\n", - " log = str(result.result)\n", - " if result.error_before_exec is not None:\n", - " log += f\"\\n{result.error_before_exec}\"\n", - " if result.error_in_exec is not None:\n", - " log += f\"\\n{result.error_in_exec}\"\n", - " return log\n", - "\n", - "\n", - "def exec_sh(script):\n", - " return user_proxy.execute_code_blocks([(\"sh\", script)])\n", - "\n", - "\n", - "# register the functions\n", - "user_proxy.register_function(\n", - " function_map={\n", - " \"python\": exec_python,\n", - " \"sh\": exec_sh,\n", - " }\n", - ")\n", - "\n", - "# start the conversation\n", - "user_proxy.initiate_chat(\n", - " chatbot,\n", - " message=\"Draw two agents chatting with each other with an example dialog. Don't add plt.show().\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Example 3\n", - "This example is from [agent_chat_web_info.ipynb](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_web_info.ipynb). \n", - "We use this example to demonstrate how to pass in a customized compression function. We pass in an compression function `constrain_num_messages`, which constrains the number of messages to be 3 or less. \n", - "The customized function should accept a list of messages as input and return a tuple of `(is_success: bool, compressed_messages: List[Dict])`." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", - "\n", - "Show me the YTD gain of 10 largest technology companies as of today.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33massistant\u001b[0m (to user_proxy):\n", - "\n", - "To compute the Year-To-Date (YTD) gains of the 10 largest technology companies, I can fetch the latest stock price and the closing price from the last trading day of the previous year. Then calculate the percentage increase for each company.\n", - "\n", - "First, we should fetch the current stock prices and the closing prices as of the last trading day of the previous year for these companies. For this, we can use a financial data API like Alpha Vantage, Yahoo Finance, or similar, which would require an API key and internet access, but I can't perform actions that require internet access.\n", - "\n", - "Instead, I will provide you with Python code that you'd need to run on your local machine. This code utilizes the `yfinance` Python library, which is widely used for retrieving historical market data from Yahoo Finance. If you don't have `yfinance` installed, you'll need to install it by running `pip install yfinance` in your command line.\n", - "\n", - "Here is the code you'll need to execute:\n", - "\n", - "```python\n", - "# filename: ytd_gains.py\n", - "\n", - "import yfinance as yf\n", - "from datetime import datetime, timedelta\n", - "\n", - "# Define the ticker symbols for the 10 largest tech companies.\n", - "# This is a sample list and may not represent the current top 10 companies.\n", - "# You would need to replace this with the actual tickers of the top 10 tech companies.\n", - "tech_companies = [\"AAPL\", \"MSFT\", \"GOOGL\", \"AMZN\", \"FB\", \"TSLA\", \"NVDA\", \"V\", \"ADBE\", \"INTC\"]\n", - "\n", - "# Compute the last day of the last year\n", - "end_of_last_year = datetime(datetime.now().year - 1, 12, 31)\n", - "\n", - "# Retrieve the data and calculate YTD gain for each company\n", - "ytd_gains = {}\n", - "for symbol in tech_companies:\n", - " try:\n", - " # Fetch historical data\n", - " stock = yf.Ticker(symbol)\n", - " last_price = stock.history(period=\"1d\")['Close'][-1]\n", - " prev_close = stock.history(start=end_of_last_year, end=end_of_last_year + timedelta(days=1))['Close'][0]\n", - "\n", - " # Calculate YTD gain\n", - " ytd_gain = ((last_price - prev_close) / prev_close) * 100\n", - " ytd_gains[symbol] = ytd_gain\n", - " except Exception as e:\n", - " # Handle errors by skipping the company and printing an error message\n", - " print(f\"Error retrieving data for {symbol}: {e}\")\n", - "\n", - "# Print the YTD gains\n", - "for symbol, gain in ytd_gains.items():\n", - " print(f\"{symbol}: {gain:.2f}% YTD Gain\")\n", - "\n", - "```\n", - "\n", - "Make sure that `yfinance` is installed and then run this Python script (`ytd_gains.py`). The script will print out the YTD gains for the listed technology companies as a percentage.\n", - "\n", - "Note that the list of the 10 largest technology companies must be updated to reflect the current market situation. If you do not have the updated list, let me know, and I can attempt to retrieve this information for you using different methods.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[31m\n", - ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", - "\u001b[31m\n", - ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", - "\n", - "exitcode: 0 (execution succeeded)\n", - "Code output: \n", - "Error retrieving data for AAPL: index 0 is out of bounds for axis 0 with size 0\n", - "Error retrieving data for MSFT: index 0 is out of bounds for axis 0 with size 0\n", - "Error retrieving data for GOOGL: index 0 is out of bounds for axis 0 with size 0\n", - "Error retrieving data for AMZN: index 0 is out of bounds for axis 0 with size 0\n", - "Error retrieving data for FB: index -1 is out of bounds for axis 0 with size 0\n", - "Error retrieving data for TSLA: index 0 is out of bounds for axis 0 with size 0\n", - "Error retrieving data for NVDA: index 0 is out of bounds for axis 0 with size 0\n", - "Error retrieving data for V: index 0 is out of bounds for axis 0 with size 0\n", - "Error retrieving data for ADBE: index 0 is out of bounds for axis 0 with size 0\n", - "Error retrieving data for INTC: index 0 is out of bounds for axis 0 with size 0\n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33massistant\u001b[0m (to user_proxy):\n", - "\n", - "It seems that the script encountered an error when trying to fetch data for the provided ticker symbols. This might be because either the market is closed and the latest data is not yet available, or because the dates specified do not have available data (e.g., the end of last year might have been a weekend or holiday).\n", - "\n", - "Let's adjust the code to be more robust by checking for valid trading days around the end of last year and to handle cases where the stock data might not be available. Here's the updated code:\n", - "\n", - "```python\n", - "# filename: ytd_gains.py\n", - "\n", - "import yfinance as yf\n", - "from datetime import datetime\n", - "\n", - "def get_last_valid_trading_day(year):\n", - " last_day = datetime(year, 12, 31)\n", - " # Start moving backwards from December 31st to find the last valid trading day of the year\n", - " while True:\n", - " stock_data = yf.download('SPY', start=last_day.strftime('%Y-%m-%d'), end=(last_day + timedelta(days=1)).strftime('%Y-%m-%d'))\n", - " if not stock_data.empty:\n", - " return last_day\n", - " last_day -= timedelta(days=1)\n", - "\n", - "# Define the ticker symbols for the 10 largest tech companies.\n", - "tech_companies = [\"AAPL\", \"MSFT\", \"GOOGL\", \"AMZN\", \"FB\", \"TSLA\", \"NVDA\", \"V\", \"ADBE\", \"INTC\"]\n", - "\n", - "# Compute the last valid trading day of the last year\n", - "end_of_last_year = get_last_valid_trading_day(datetime.now().year - 1)\n", - "\n", - "# Retrieve the data and calculate YTD gain for each company\n", - "ytd_gains = {}\n", - "for symbol in tech_companies:\n", - " try:\n", - " # Fetch historical data\n", - " stock = yf.Ticker(symbol)\n", - " last_price = stock.history(period='1d')['Close'].iloc[-1]\n", - " prev_close = stock.history(start=end_of_last_year, end=end_of_last_year + timedelta(days=1))['Close'].iloc[0]\n", - "\n", - " # Calculate YTD gain\n", - " ytd_gain = ((last_price - prev_close) / prev_close) * 100\n", - " ytd_gains[symbol] = ytd_gain\n", - " except Exception as e:\n", - " # Handle errors by skipping the company and printing an error message\n", - " print(f\"Error retrieving data for {symbol}: {e}\")\n", - "\n", - "# Print the YTD gains\n", - "for symbol, gain in ytd_gains.items():\n", - " print(f\"{symbol}: {gain:.2f}% YTD Gain\")\n", - "```\n", - "\n", - "Please execute the updated `ytd_gains.py` script. The function `get_last_valid_trading_day` will find the last valid trading day of the previous year to handle non-trading days correctly, and the fetch for current price will get the latest available data point even if the market has not yet closed for today.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[31m\n", - ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", - "\u001b[31m\n", - ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", - "\n", - "exitcode: 1 (execution failed)\n", - "Code output: \n", - "Traceback (most recent call last):\n", - " File \"ytd_gains.py\", line 19, in <module>\n", - " end_of_last_year = get_last_valid_trading_day(datetime.now().year - 1)\n", - " File \"ytd_gains.py\", line 10, in get_last_valid_trading_day\n", - " stock_data = yf.download('SPY', start=last_day.strftime('%Y-%m-%d'), end=(last_day + timedelta(days=1)).strftime('%Y-%m-%d'))\n", - "NameError: name 'timedelta' is not defined\n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[35mToken Count (including 468 tokens from system msg and function descriptions). Before compression : 2115 | After: 1223\u001b[0m\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33massistant\u001b[0m (to user_proxy):\n", - "\n", - "It seems I forgot to import the `timedelta` class from the `datetime` module. I will correct this oversight and provide the updated code. Here is the corrected code including the missed import:\n", - "\n", - "```python\n", - "# filename: ytd_gains.py\n", - "\n", - "import yfinance as yf\n", - "from datetime import datetime, timedelta\n", - "\n", - "def get_last_valid_trading_day(year):\n", - " last_day = datetime(year, 12, 31)\n", - " # Start moving backwards from December 31st to find the last valid trading day of the year\n", - " while True:\n", - " stock_data = yf.download('SPY', start=last_day.strftime('%Y-%m-%d'), end=(last_day + timedelta(days=1)).strftime('%Y-%m-%d'))\n", - " if not stock_data.empty:\n", - " return last_day\n", - " last_day -= timedelta(days=1)\n", - "\n", - "# Define the ticker symbols for the 10 largest tech companies.\n", - "tech_companies = [\"AAPL\", \"MSFT\", \"GOOGL\", \"AMZN\", \"FB\", \"TSLA\", \"NVDA\", \"V\", \"ADBE\", \"INTC\"]\n", - "\n", - "# Compute the last valid trading day of the last year\n", - "end_of_last_year = get_last_valid_trading_day(datetime.now().year - 1)\n", - "\n", - "# Retrieve the data and calculate YTD gain for each company\n", - "ytd_gains = {}\n", - "for symbol in tech_companies:\n", - " try:\n", - " # Fetch historical data\n", - " stock = yf.Ticker(symbol)\n", - " last_price = stock.history(period='1d')['Close'].iloc[-1]\n", - " prev_close = stock.history(start=end_of_last_year, end=end_of_last_year + timedelta(days=1))['Close'].iloc[0]\n", - "\n", - " # Calculate YTD gain\n", - " ytd_gain = ((last_price - prev_close) / prev_close) * 100\n", - " ytd_gains[symbol] = ytd_gain\n", - " except Exception as e:\n", - " # Handle errors by skipping the company and printing an error message\n", - " print(f\"Error retrieving data for {symbol}: {e}\")\n", - "\n", - "# Print the YTD gains\n", - "for symbol, gain in ytd_gains.items():\n", - " print(f\"{symbol}: {gain:.2f}% YTD Gain\")\n", - "```\n", - "\n", - "Please execute the corrected `ytd_gains.py` script. This should resolve the `NameError` and provide you with the YTD gain for each company.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[31m\n", - ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", - "\u001b[31m\n", - ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", - "\n", - "exitcode: 0 (execution succeeded)\n", - "Code output: \n", - "Error retrieving data for FB: single positional indexer is out-of-bounds\n", - "AAPL: -6.57% YTD Gain\n", - "MSFT: 10.70% YTD Gain\n", - "GOOGL: -1.83% YTD Gain\n", - "AMZN: 17.30% YTD Gain\n", - "TSLA: -18.45% YTD Gain\n", - "NVDA: 66.15% YTD Gain\n", - "V: 8.96% YTD Gain\n", - "ADBE: -4.30% YTD Gain\n", - "INTC: -12.54% YTD Gain\n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[35mToken Count (including 468 tokens from system msg and function descriptions). Before compression : 1865 | After: 1133\u001b[0m\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33massistant\u001b[0m (to user_proxy):\n", - "\n", - "It looks like an error occurred while retrieving data for FB, which stands for Facebook. This can happen if the ticker symbol has changed or if there was an issue with the data for the date range we were trying to fetch. Facebook, Inc. has been renamed to Meta Platforms, Inc., and its stock symbol is now \"META.\" \n", - "\n", - "I will update the list of technology companies to include \"META\" instead of \"FB\" and run the code again:\n", - "\n", - "```python\n", - "# filename: ytd_gains_updated.py\n", - "\n", - "import yfinance as yf\n", - "from datetime import datetime, timedelta\n", - "\n", - "def get_last_valid_trading_day(year):\n", - " last_day = datetime(year, 12, 31)\n", - " # Start moving backwards from December 31st to find the last valid trading day of the year\n", - " while True:\n", - " stock_data = yf.download('SPY', start=last_day.strftime('%Y-%m-%d'), end=(last_day + timedelta(days=1)).strftime('%Y-%m-%d'))\n", - " if not stock_data.empty:\n", - " return last_day\n", - " last_day -= timedelta(days=1)\n", - "\n", - "# Define the ticker symbols for the 10 largest tech companies with FB replaced by META.\n", - "tech_companies = [\"AAPL\", \"MSFT\", \"GOOGL\", \"AMZN\", \"META\", \"TSLA\", \"NVDA\", \"V\", \"ADBE\", \"INTC\"]\n", - "\n", - "# Compute the last valid trading day of the last year\n", - "end_of_last_year = get_last_valid_trading_day(datetime.now().year - 1)\n", - "\n", - "# Retrieve the data and calculate YTD gain for each company\n", - "ytd_gains = {}\n", - "for symbol in tech_companies:\n", - " try:\n", - " # Fetch historical data\n", - " stock = yf.Ticker(symbol)\n", - " last_price = stock.history(period='1d')['Close'].iloc[-1]\n", - " prev_close = stock.history(start=end_of_last_year, end=end_of_last_year + timedelta(days=1))['Close'].iloc[0]\n", - "\n", - " # Calculate YTD gain\n", - " ytd_gain = ((last_price - prev_close) / prev_close) * 100\n", - " ytd_gains[symbol] = ytd_gain\n", - " except Exception as e:\n", - " # Handle errors by skipping the company and printing an error message\n", - " print(f\"Error retrieving data for {symbol}: {e}\")\n", - "\n", - "# Print the YTD gains\n", - "for symbol, gain in ytd_gains.items():\n", - " print(f\"{symbol}: {gain:.2f}% YTD Gain\")\n", - "```\n", - "\n", - "Please execute the updated `ytd_gains_updated.py` script to get the YTD gain of Meta Platforms, Inc. along with the other technology companies.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[31m\n", - ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", - "\u001b[31m\n", - ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n", - "\u001b[33muser_proxy\u001b[0m (to assistant):\n", - "\n", - "exitcode: 0 (execution succeeded)\n", - "Code output: \n", - "AAPL: -6.57% YTD Gain\n", - "MSFT: 10.70% YTD Gain\n", - "GOOGL: -1.83% YTD Gain\n", - "AMZN: 17.30% YTD Gain\n", - "META: 42.06% YTD Gain\n", - "TSLA: -18.45% YTD Gain\n", - "NVDA: 66.15% YTD Gain\n", - "V: 8.96% YTD Gain\n", - "ADBE: -4.30% YTD Gain\n", - "INTC: -12.54% YTD Gain\n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[35mToken Count (including 468 tokens from system msg and function descriptions). Before compression : 1828 | After: 1186\u001b[0m\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33massistant\u001b[0m (to user_proxy):\n", - "\n", - "The YTD (Year-To-Date) gain for the 10 largest technology companies as of today are as follows:\n", - "\n", - "1. Apple Inc. (AAPL): -6.57% YTD Gain\n", - "2. Microsoft Corporation (MSFT): 10.70% YTD Gain\n", - "3. Alphabet Inc. (GOOGL): -1.83% YTD Gain\n", - "4. Amazon.com, Inc. (AMZN): 17.30% YTD Gain\n", - "5. Meta Platforms, Inc. (META, formerly FB): 42.06% YTD Gain\n", - "6. Tesla, Inc. (TSLA): -18.45% YTD Gain\n", - "7. NVIDIA Corporation (NVDA): 66.15% YTD Gain\n", - "8. Visa Inc. (V): 8.96% YTD Gain\n", - "9. Adobe Inc. (ADBE): -4.30% YTD Gain\n", - "10. Intel Corporation (INTC): -12.54% YTD Gain\n", - "\n", - "These YTD gains reflect the percentage change in the stock price of each company from the last trading day of the previous year to the most recent available trading data.\n", - "\n", - "If you need any further assistance, please let me know.\n", - "\n", - "TERMINATE\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[31m\n", - ">>>>>>>> NO HUMAN INPUT RECEIVED.\u001b[0m\n" - ] - }, - { - "data": { - "text/plain": [ - "ChatResult(chat_id=None, chat_history=[{'content': 'Show me the YTD gain of 10 largest technology companies as of today.', 'role': 'assistant'}, {'content': 'It looks like an error occurred while retrieving data for FB, which stands for Facebook. This can happen if the ticker symbol has changed or if there was an issue with the data for the date range we were trying to fetch. Facebook, Inc. has been renamed to Meta Platforms, Inc., and its stock symbol is now \"META.\" \\n\\nI will update the list of technology companies to include \"META\" instead of \"FB\" and run the code again:\\n\\n```python\\n# filename: ytd_gains_updated.py\\n\\nimport yfinance as yf\\nfrom datetime import datetime, timedelta\\n\\ndef get_last_valid_trading_day(year):\\n last_day = datetime(year, 12, 31)\\n # Start moving backwards from December 31st to find the last valid trading day of the year\\n while True:\\n stock_data = yf.download(\\'SPY\\', start=last_day.strftime(\\'%Y-%m-%d\\'), end=(last_day + timedelta(days=1)).strftime(\\'%Y-%m-%d\\'))\\n if not stock_data.empty:\\n return last_day\\n last_day -= timedelta(days=1)\\n\\n# Define the ticker symbols for the 10 largest tech companies with FB replaced by META.\\ntech_companies = [\"AAPL\", \"MSFT\", \"GOOGL\", \"AMZN\", \"META\", \"TSLA\", \"NVDA\", \"V\", \"ADBE\", \"INTC\"]\\n\\n# Compute the last valid trading day of the last year\\nend_of_last_year = get_last_valid_trading_day(datetime.now().year - 1)\\n\\n# Retrieve the data and calculate YTD gain for each company\\nytd_gains = {}\\nfor symbol in tech_companies:\\n try:\\n # Fetch historical data\\n stock = yf.Ticker(symbol)\\n last_price = stock.history(period=\\'1d\\')[\\'Close\\'].iloc[-1]\\n prev_close = stock.history(start=end_of_last_year, end=end_of_last_year + timedelta(days=1))[\\'Close\\'].iloc[0]\\n\\n # Calculate YTD gain\\n ytd_gain = ((last_price - prev_close) / prev_close) * 100\\n ytd_gains[symbol] = ytd_gain\\n except Exception as e:\\n # Handle errors by skipping the company and printing an error message\\n print(f\"Error retrieving data for {symbol}: {e}\")\\n\\n# Print the YTD gains\\nfor symbol, gain in ytd_gains.items():\\n print(f\"{symbol}: {gain:.2f}% YTD Gain\")\\n```\\n\\nPlease execute the updated `ytd_gains_updated.py` script to get the YTD gain of Meta Platforms, Inc. along with the other technology companies.', 'role': 'user'}, {'content': 'exitcode: 0 (execution succeeded)\\nCode output: \\nAAPL: -6.57% YTD Gain\\nMSFT: 10.70% YTD Gain\\nGOOGL: -1.83% YTD Gain\\nAMZN: 17.30% YTD Gain\\nMETA: 42.06% YTD Gain\\nTSLA: -18.45% YTD Gain\\nNVDA: 66.15% YTD Gain\\nV: 8.96% YTD Gain\\nADBE: -4.30% YTD Gain\\nINTC: -12.54% YTD Gain\\n', 'role': 'assistant'}, {'content': 'The YTD (Year-To-Date) gain for the 10 largest technology companies as of today are as follows:\\n\\n1. Apple Inc. (AAPL): -6.57% YTD Gain\\n2. Microsoft Corporation (MSFT): 10.70% YTD Gain\\n3. Alphabet Inc. (GOOGL): -1.83% YTD Gain\\n4. Amazon.com, Inc. (AMZN): 17.30% YTD Gain\\n5. Meta Platforms, Inc. (META, formerly FB): 42.06% YTD Gain\\n6. Tesla, Inc. (TSLA): -18.45% YTD Gain\\n7. NVIDIA Corporation (NVDA): 66.15% YTD Gain\\n8. Visa Inc. (V): 8.96% YTD Gain\\n9. Adobe Inc. (ADBE): -4.30% YTD Gain\\n10. Intel Corporation (INTC): -12.54% YTD Gain\\n\\nThese YTD gains reflect the percentage change in the stock price of each company from the last trading day of the previous year to the most recent available trading data.\\n\\nIf you need any further assistance, please let me know.\\n\\nTERMINATE', 'role': 'user'}], summary='The YTD (Year-To-Date) gain for the 10 largest technology companies as of today are as follows:\\n\\n1. Apple Inc. (AAPL): -6.57% YTD Gain\\n2. Microsoft Corporation (MSFT): 10.70% YTD Gain\\n3. Alphabet Inc. (GOOGL): -1.83% YTD Gain\\n4. Amazon.com, Inc. (AMZN): 17.30% YTD Gain\\n5. Meta Platforms, Inc. (META, formerly FB): 42.06% YTD Gain\\n6. Tesla, Inc. (TSLA): -18.45% YTD Gain\\n7. NVIDIA Corporation (NVDA): 66.15% YTD Gain\\n8. Visa Inc. (V): 8.96% YTD Gain\\n9. Adobe Inc. (ADBE): -4.30% YTD Gain\\n10. Intel Corporation (INTC): -12.54% YTD Gain\\n\\nThese YTD gains reflect the percentage change in the stock price of each company from the last trading day of the previous year to the most recent available trading data.\\n\\nIf you need any further assistance, please let me know.\\n\\n', cost=({'total_cost': 0.31437, 'gpt-4': {'cost': 0.31437, 'prompt_tokens': 5401, 'completion_tokens': 2539, 'total_tokens': 7940}}, {'total_cost': 0.31437, 'gpt-4': {'cost': 0.31437, 'prompt_tokens': 5401, 'completion_tokens': 2539, 'total_tokens': 7940}}), human_input=[''])" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "def constrain_num_messages(messages):\n", - " \"\"\"Constrain the number of messages to 3.\n", - "\n", - " This is an example of a customized compression function.\n", - "\n", - " Returns:\n", - " bool: whether the compression is successful.\n", - " list: the compressed messages.\n", - " \"\"\"\n", - " if len(messages) <= 3:\n", - " # do nothing\n", - " return False, None\n", - "\n", - " # save the first and last two messages\n", - " return True, messages[:1] + messages[-2:]\n", - "\n", - "\n", - "# create a CompressibleAgent instance named \"assistant\"\n", - "assistant = CompressibleAgent(\n", - " name=\"assistant\",\n", - " llm_config={\n", - " \"timeout\": 600,\n", - " \"cache_seed\": 43,\n", - " \"config_list\": config_list,\n", - " \"model\": \"gpt-4-1106-preview\",\n", - " },\n", - " compress_config={\n", - " \"mode\": \"CUSTOMIZED\",\n", - " \"compress_function\": constrain_num_messages, # this is required for customized compression\n", - " \"trigger_count\": 1600,\n", - " },\n", - ")\n", - "\n", - "# create a UserProxyAgent instance named \"user_proxy\"\n", - "user_proxy = autogen.UserProxyAgent(\n", - " name=\"user_proxy\",\n", - " human_input_mode=\"TERMINATE\",\n", - " max_consecutive_auto_reply=10,\n", - " is_termination_msg=lambda x: x.get(\"content\", \"\").rstrip().endswith(\"TERMINATE\")\n", - " or x.get(\"content\", \"\").rstrip().endswith(\"TERMINATE.\"),\n", - " code_execution_config={\n", - " \"work_dir\": \"web\",\n", - " \"use_docker\": False,\n", - " }, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.\n", - " system_message=\"\"\"Reply TERMINATE if the task has been solved at full satisfaction.\n", - "Otherwise, reply CONTINUE, or the reason why the task is not solved yet.\"\"\",\n", - ")\n", - "\n", - "user_proxy.initiate_chat(\n", - " assistant,\n", - " message=\"\"\"Show me the YTD gain of 10 largest technology companies as of today.\"\"\",\n", - ")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/test/agentchat/contrib/capabilities/test_context_handling.py b/test/agentchat/contrib/capabilities/test_context_handling.py deleted file mode 100755 index 8cb1b60aff4..00000000000 --- a/test/agentchat/contrib/capabilities/test_context_handling.py +++ /dev/null @@ -1,231 +0,0 @@ -#!/usr/bin/env python3 -m pytest - -import os -import sys - -import pytest - -import autogen -from autogen import AssistantAgent, UserProxyAgent, token_count_utils -from autogen.agentchat.contrib.capabilities.context_handling import TransformChatHistory - -# from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST - -sys.path.append(os.path.join(os.path.dirname(__file__), "../../..")) -from conftest import skip_openai # noqa: E402 - -sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..")) -from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST # noqa: E402 - -try: - from openai import OpenAI -except ImportError: - skip = True -else: - skip = False or skip_openai - - -def test_transform_chat_history(): - """ - Test the TransformChatHistory capability. - - In particular, test the following methods: - - _transform_messages - - truncate_string_to_tokens - """ - messages = [ - {"role": "system", "content": "System message"}, - {"role": "user", "content": "Hi"}, - {"role": "assistant", "content": "This is another test message"}, - ] - - # check whether num of messages is less than max_messages - transform_chat_history = TransformChatHistory(max_messages=1) - transformed_messages = transform_chat_history._transform_messages(messages) - assert len(transformed_messages) == 2 # System message and the last message - - # check whether num of tokens per message are is less than max_tokens - transform_chat_history = TransformChatHistory(max_tokens_per_message=5) - transformed_messages = transform_chat_history._transform_messages(messages) - for message in transformed_messages: - if message["role"] == "system": - continue - else: - assert token_count_utils.count_token(message["content"]) <= 5 - - transform_chat_history = TransformChatHistory(max_tokens=5) - transformed_messages = transform_chat_history._transform_messages(messages) - - token_count = 0 - for message in transformed_messages: - if message["role"] == "system": - continue - token_count += token_count_utils.count_token(message["content"]) - assert token_count <= 5 - - -@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip") -def test_transform_chat_history_with_agents(): - """ - This test create a GPT 3.5 agent with this capability and test the add_to_agent method. - Including whether it prevents a crash when chat histories become excessively long. - """ - config_list = autogen.config_list_from_json( - OAI_CONFIG_LIST, - KEY_LOC, - filter_dict={"tags": ["gpt-3.5-turbo"]}, - ) - assistant = AssistantAgent("assistant", llm_config={"config_list": config_list}, max_consecutive_auto_reply=1) - context_handling = TransformChatHistory(max_messages=10, max_tokens_per_message=5, max_tokens=1000) - context_handling.add_to_agent(assistant) - user = UserProxyAgent( - "user", - code_execution_config={"work_dir": "coding"}, - human_input_mode="NEVER", - is_termination_msg=lambda x: "TERMINATE" in x.get("content", ""), - max_consecutive_auto_reply=1, - ) - - # Create a very long chat history that is bound to cause a crash - # for gpt 3.5 - for i in range(1000): - assitant_msg = {"role": "assistant", "content": "test " * 1000} - user_msg = {"role": "user", "content": ""} - - assistant.send(assitant_msg, user, request_reply=False) - user.send(user_msg, assistant, request_reply=False) - - try: - user.initiate_chat( - assistant, message="Plot a chart of nvidia and tesla stock prices for the last 5 years", clear_history=False - ) - except Exception as e: - assert False, f"Chat initiation failed with error {str(e)}" - - -def test_transform_messages(): - """ - Test transform_messages_retain_order() - """ - # Test case 1: Test that the order of messages is retained after transformation and Test that the messages are properly truncating. - messages = [ - {"role": "system", "content": "System message"}, - {"role": "user", "content": "Hi"}, - {"role": "user", "content": "user sending the 2nd test message"}, - {"role": "assistant", "content": "assistant sending the 3rd test message"}, - {"role": "assistant", "content": "assistant sending the 4th test message"}, - ] - - transform_chat_history = TransformChatHistory(max_messages=3, max_tokens_per_message=10, max_tokens=100) - transformed_messages = transform_chat_history._transform_messages(messages) - - assert transformed_messages[0]["role"] == "system" - assert transformed_messages[0]["content"] == "System message" - assert transformed_messages[1]["role"] == "user" - assert transformed_messages[1]["content"] == "user sending the 2nd test message" - assert transformed_messages[2]["role"] == "assistant" - assert transformed_messages[2]["content"] == "assistant sending the 3rd test message" - assert transformed_messages[3]["role"] == "assistant" - assert transformed_messages[3]["content"] == "assistant sending the 4th test message" - - # Test case 2: Test when no system message - messages = [ - {"role": "user", "content": "Hi"}, - {"role": "user", "content": "user sending the 2nd test message"}, - {"role": "assistant", "content": "assistant sending the 3rd test message"}, - {"role": "assistant", "content": "assistant sending the 4th test message"}, - ] - - transform_chat_history = TransformChatHistory(max_messages=3, max_tokens_per_message=10, max_tokens=100) - transformed_messages = transform_chat_history._transform_messages(messages) - - assert transformed_messages[0]["role"] == "user" - assert transformed_messages[0]["content"] == "user sending the 2nd test message" - assert transformed_messages[1]["role"] == "assistant" - assert transformed_messages[1]["content"] == "assistant sending the 3rd test message" - assert transformed_messages[2]["role"] == "assistant" - assert transformed_messages[2]["content"] == "assistant sending the 4th test message" - - messages = [ - {"role": "user", "content": "Out of max messages"}, - {"role": "assistant", "content": "first second third fourth"}, - {"role": "user", "content": "a"}, - ] - print(f"----Messages (N={len(messages)})----") - orignal_tokens = 0 - for i, msg in enumerate(messages): - print(f"[{msg['role']}-{i}]: {msg['content']}") - tokens = token_count_utils.count_token(msg["content"]) - print("Number of tokens: ", tokens) - orignal_tokens += tokens - print("-----Total tokens: ", orignal_tokens, "-----") - - allowed_max_tokens = 2 - transform_chat_history = TransformChatHistory(max_messages=2, max_tokens=allowed_max_tokens) - transformed_messages = transform_chat_history._transform_messages(messages) - - print("Max allowed tokens: ", allowed_max_tokens) - - print("Transformed contents") - for msg in transformed_messages: - print(msg["content"]) - print("Number of tokens: ", token_count_utils.count_token(msg["content"])) - assert len(transformed_messages) == 1 - assert transformed_messages[0]["role"] == "user" - - -def test_truncate_str_to_tokens(): - """ - Test the truncate_str_to_tokens function. - """ - from autogen.agentchat.contrib.capabilities.context_handling import truncate_str_to_tokens - - # Test case 1: Truncate string with fewer tokens than max_tokens - text = "This is a test" - max_tokens = 5 - truncated_text = truncate_str_to_tokens(text, max_tokens) - assert truncated_text == text - - # Test case 2: Truncate string with more tokens than max_tokens - text = "This is a test" - max_tokens = 3 - truncated_text = truncate_str_to_tokens(text, max_tokens) - assert truncated_text == "This is a" - - # Test case 3: Truncate empty string - text = "" - max_tokens = 5 - truncated_text = truncate_str_to_tokens(text, max_tokens) - assert truncated_text == "" - - # Test case 4: Truncate string with exact number of tokens as max_tokens - text = "This is a test" - max_tokens = 4 - truncated_text = truncate_str_to_tokens(text, max_tokens) - assert truncated_text == "This is a test" - - # Test case 5: Truncate string with no tokens found - text = "This is a test" - max_tokens = 0 - truncated_text = truncate_str_to_tokens(text, max_tokens) - assert truncated_text == "" - - # Test case 6: Truncate string when actual tokens are more than max_tokens - text = "This is a test with a looooooonngggg word" - max_tokens = 8 - truncated_text = truncate_str_to_tokens(text, max_tokens) - word_count = len(truncated_text.split()) - assert word_count <= max_tokens - - # Test case 7: Truncate string with exact number of tokens as max_tokens - text = "This\nis\na test" - max_tokens = 4 - truncated_text = truncate_str_to_tokens(text, max_tokens) - assert "This\nis" in truncated_text - - -if __name__ == "__main__": - test_transform_chat_history() - test_transform_chat_history_with_agents() - test_truncate_str_to_tokens() - test_transform_messages() diff --git a/test/agentchat/contrib/test_compressible_agent.py b/test/agentchat/contrib/test_compressible_agent.py deleted file mode 100755 index 677dd47a951..00000000000 --- a/test/agentchat/contrib/test_compressible_agent.py +++ /dev/null @@ -1,230 +0,0 @@ -#!/usr/bin/env python3 -m pytest - -import os -import sys - -import pytest - -import autogen -from autogen.agentchat.contrib.compressible_agent import CompressibleAgent - -sys.path.append(os.path.join(os.path.dirname(__file__), "../..")) -from conftest import skip_openai # noqa: E402 - -here = os.path.abspath(os.path.dirname(__file__)) - -sys.path.append(os.path.join(os.path.dirname(__file__), "..")) -from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST # noqa: E402 - -try: - import openai -except ImportError: - skip = True -else: - skip = False or skip_openai - -if not skip: - config_list = autogen.config_list_from_json( - OAI_CONFIG_LIST, - file_location=KEY_LOC, - filter_dict={ - "model": ["gpt-3.5-turbo", "gpt-35-turbo", "gpt-3.5-turbo-16k", "gpt-35-turbo-16k"], - }, - ) - - -@pytest.mark.skipif( - sys.platform in ["darwin", "win32"] or skip, - reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip", -) -def test_mode_compress(): - conversations = {} - - assistant = CompressibleAgent( - name="assistant", - llm_config={ - "timeout": 600, - "cache_seed": 43, - "config_list": config_list, - "model": "gpt-3.5-turbo", - }, - compress_config={ - "mode": "COMPRESS", - "trigger_count": 600, - "verbose": True, - }, - ) - - user_proxy = autogen.UserProxyAgent( - name="user_proxy", - human_input_mode="NEVER", - max_consecutive_auto_reply=5, - is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE") - or x.get("content", "").rstrip().endswith("TERMINATE."), - code_execution_config={"work_dir": here}, - ) - - user_proxy.initiate_chat( - assistant, - message="Find all $x$ that satisfy the inequality $(2x+10)(x+3)<(3x+9)(x+8)$. Express your answer in interval notation.", - ) - - assistant.reset() - print(conversations) - - -@pytest.mark.skipif( - sys.platform in ["darwin", "win32"] or skip, - reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip", -) -def test_mode_customized(): - try: - assistant = CompressibleAgent( - name="assistant", - llm_config={ - "timeout": 600, - "cache_seed": 43, - "config_list": config_list, - "model": "gpt-3.5-turbo", - }, - compress_config={ - "mode": "CUSTOMIZED", - }, - ) - except ValueError: - print("ValueError raised as expected.") - - def constrain_num_messages(messages): - """Constrain the number of messages to 3. - - This is an example of a customized compression function. - - Returns: - bool: whether the compression is successful. - list: the compressed messages. - """ - if len(messages) <= 3: - # do nothing - return False, None - - # save the first and last two messages - return True, messages[:1] + messages[-2:] - - # create a CompressibleAgent instance named "assistant" - assistant = CompressibleAgent( - name="assistant", - llm_config={ - "timeout": 600, - "cache_seed": 43, - "config_list": config_list, - "model": "gpt-3.5-turbo", - }, - compress_config={ - "mode": "CUSTOMIZED", - "compress_function": constrain_num_messages, # this is required for customized compression - "trigger_count": 1000, - }, - ) - - # create a UserProxyAgent instance named "user_proxy" - user_proxy = autogen.UserProxyAgent( - name="user_proxy", - human_input_mode="NEVER", - max_consecutive_auto_reply=5, - is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE") - or x.get("content", "").rstrip().endswith("TERMINATE."), - code_execution_config={"work_dir": "web"}, - system_message="""Reply TERMINATE if the task has been solved at full satisfaction. - Otherwise, reply CONTINUE, or the reason why the task is not solved yet.""", - ) - - user_proxy.initiate_chat( - assistant, - message="""Show me the YTD gain of 10 largest technology companies as of today.""", - ) - - -@pytest.mark.skipif( - sys.platform in ["darwin", "win32"] or skip, - reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip", -) -def test_compress_message(): - assistant = CompressibleAgent( - name="assistant", - llm_config={ - "timeout": 600, - "cache_seed": 43, - "config_list": config_list, - "model": "gpt-3.5-turbo", - }, - compress_config={ - "mode": "COMPRESS", - "trigger_count": 600, - "verbose": True, - "leave_last_n": 0, - }, - ) - - assert assistant.compress_messages([{"content": "hello world", "role": "user"}]) == ( - False, - None, - ), "Single message should not be compressed" - - is_success, _ = assistant.compress_messages( - [ - {"content": "Hello!", "role": "user"}, - {"content": "How can I help you today?", "role": "assistant"}, - {"content": "Can you tell me a joke about programming?", "role": "assistant"}, - ] - ) - assert is_success, "Compression failed." - - -@pytest.mark.skipif(True, reason="Flaky test, CompressibleAgent no longer supported") -def test_mode_terminate(): - assistant = CompressibleAgent( - name="assistant", - llm_config={ - "timeout": 600, - "cache_seed": 43, - "config_list": config_list, - "model": "gpt-3.5-turbo", - }, - compress_config=True, - ) - - user_proxy = autogen.UserProxyAgent( - name="user_proxy", - is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"), - human_input_mode="NEVER", - max_consecutive_auto_reply=5, - code_execution_config={"work_dir": "coding"}, - ) - - final, _ = assistant.on_oai_token_limit( - [ - {"content": "Hello!", "role": "user"}, - {"content": "How can I help you today?", "role": "assistant"}, - {"content": "1&" * 5000, "role": "assistant"}, - ], - sender=user_proxy, - ) - assert final, "Terminating the conversation at max token limit is not working." - - -@pytest.mark.skipif( - sys.platform in ["darwin", "win32"] or skip, - reason="do not run on MacOS or windows OR dependency is not installed OR requested to skip", -) -def test_new_compressible_agent_description(): - assistant = CompressibleAgent(name="assistant", description="this is a description", llm_config=False) - - assert assistant.description == "this is a description", "description is not set correctly" - - -if __name__ == "__main__": - # test_mode_compress() - # test_mode_customized() - # test_compress_message() - # test_mode_terminate() - test_new_compressible_agent_description() From 01947e9d8c3c3a3674dd72008bdf4b78634aa103 Mon Sep 17 00:00:00 2001 From: Wael Karkoub <wael.karkoub96@gmail.com> Date: Tue, 6 Aug 2024 20:31:33 -0500 Subject: [PATCH 2/3] removes ci --- .github/workflows/contrib-openai.yml | 80 +--------------------------- .github/workflows/contrib-tests.yml | 71 ------------------------ 2 files changed, 2 insertions(+), 149 deletions(-) diff --git a/.github/workflows/contrib-openai.yml b/.github/workflows/contrib-openai.yml index b1b3e35e478..7e8fb003317 100644 --- a/.github/workflows/contrib-openai.yml +++ b/.github/workflows/contrib-openai.yml @@ -111,46 +111,7 @@ jobs: with: file: ./coverage.xml flags: unittests - CompressionTest: - strategy: - matrix: - os: [ubuntu-latest] - python-version: ["3.9"] - runs-on: ${{ matrix.os }} - environment: openai1 - steps: - # checkout to pr branch - - name: Checkout - uses: actions/checkout@v4 - with: - ref: ${{ github.event.pull_request.head.sha }} - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - name: Install packages and dependencies - run: | - docker --version - python -m pip install --upgrade pip wheel - pip install -e . - python -c "import autogen" - pip install pytest-cov>=5 pytest-asyncio - - name: Install packages for test when needed - run: | - pip install docker - - name: Coverage - env: - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }} - AZURE_OPENAI_API_BASE: ${{ secrets.AZURE_OPENAI_API_BASE }} - OAI_CONFIG_LIST: ${{ secrets.OAI_CONFIG_LIST }} - run: | - pytest test/agentchat/contrib/test_compressible_agent.py - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 - with: - file: ./coverage.xml - flags: unittests + GPTAssistantAgent: strategy: matrix: @@ -306,44 +267,7 @@ jobs: with: file: ./coverage.xml flags: unittests - ContextHandling: - strategy: - matrix: - os: [ubuntu-latest] - python-version: ["3.11"] - runs-on: ${{ matrix.os }} - environment: openai1 - steps: - # checkout to pr branch - - name: Checkout - uses: actions/checkout@v4 - with: - ref: ${{ github.event.pull_request.head.sha }} - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - name: Install packages and dependencies - run: | - docker --version - python -m pip install --upgrade pip wheel - pip install -e . - python -c "import autogen" - pip install pytest-cov>=5 - - name: Coverage - env: - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }} - AZURE_OPENAI_API_BASE: ${{ secrets.AZURE_OPENAI_API_BASE }} - OAI_CONFIG_LIST: ${{ secrets.OAI_CONFIG_LIST }} - BING_API_KEY: ${{ secrets.BING_API_KEY }} - run: | - pytest test/agentchat/contrib/capabilities/test_context_handling.py - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 - with: - file: ./coverage.xml - flags: unittests + ImageGen: strategy: matrix: diff --git a/.github/workflows/contrib-tests.yml b/.github/workflows/contrib-tests.yml index e257dc7f8ec..7aad6ebbf06 100644 --- a/.github/workflows/contrib-tests.yml +++ b/.github/workflows/contrib-tests.yml @@ -163,41 +163,6 @@ jobs: file: ./coverage.xml flags: unittests - CompressionTest: - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: [ubuntu-latest, macos-latest, windows-2019] - python-version: ["3.10"] - steps: - - uses: actions/checkout@v4 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - name: Install packages and dependencies for all tests - run: | - python -m pip install --upgrade pip wheel - pip install pytest-cov>=5 - - name: Install packages and dependencies for Compression - run: | - pip install -e . - - name: Set AUTOGEN_USE_DOCKER based on OS - shell: bash - run: | - if [[ ${{ matrix.os }} != ubuntu-latest ]]; then - echo "AUTOGEN_USE_DOCKER=False" >> $GITHUB_ENV - fi - - name: Coverage - run: | - pytest test/agentchat/contrib/test_compressible_agent.py --skip-openai - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 - with: - file: ./coverage.xml - flags: unittests - GPTAssistantAgent: runs-on: ${{ matrix.os }} strategy: @@ -384,41 +349,6 @@ jobs: file: ./coverage.xml flags: unittests - ContextHandling: - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: [ubuntu-latest, macos-latest, windows-2019] - python-version: ["3.11"] - steps: - - uses: actions/checkout@v4 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - name: Install packages and dependencies for all tests - run: | - python -m pip install --upgrade pip wheel - pip install pytest-cov>=5 - - name: Install packages and dependencies for Context Handling - run: | - pip install -e . - - name: Set AUTOGEN_USE_DOCKER based on OS - shell: bash - run: | - if [[ ${{ matrix.os }} != ubuntu-latest ]]; then - echo "AUTOGEN_USE_DOCKER=False" >> $GITHUB_ENV - fi - - name: Coverage - run: | - pytest test/agentchat/contrib/capabilities/test_context_handling.py --skip-openai - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 - with: - file: ./coverage.xml - flags: unittests - TransformMessages: runs-on: ${{ matrix.os }} strategy: @@ -485,7 +415,6 @@ jobs: file: ./coverage.xml flags: unittests - AnthropicTest: runs-on: ${{ matrix.os }} strategy: From dfd78d1e453f66040089623d49a98e53a93db4bd Mon Sep 17 00:00:00 2001 From: Wael Karkoub <wael.karkoub96@gmail.com> Date: Fri, 9 Aug 2024 19:09:09 -0500 Subject: [PATCH 3/3] removes faq --- website/docs/FAQ.mdx | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/website/docs/FAQ.mdx b/website/docs/FAQ.mdx index 5a0adece6b0..2798ae9375b 100644 --- a/website/docs/FAQ.mdx +++ b/website/docs/FAQ.mdx @@ -259,16 +259,6 @@ user_proxy = autogen.UserProxyAgent( code_execution_config={"work_dir":"coding", "use_docker":False}) ``` -## Migrating from `CompressibleAgent` and `TransformChatHistory` to `TransformMessages` - -### Why migrate to `TransformMessages`? - -Migrating enhances flexibility, modularity, and customization in handling chat message transformations. `TransformMessages` introduces an improved, extensible approach for pre-processing messages for conversational agents. - -### How to migrate? - -To ensure a smooth migration process, simply follow the detailed guide provided in [Introduction to TransformMessages](/docs/topics/handling_long_contexts/intro_to_transform_messages.md). - ### What should I do if I get the error "TypeError: Assistants.create() got an unexpected keyword argument 'file_ids'"? This error typically occurs when using Autogen version earlier than 0.2.27 in combination with OpenAI library version 1.21 or later. The issue arises because the older version of Autogen does not support the file_ids parameter used by newer versions of the OpenAI API.