From 55b3a7938b55c44ee64c9fb6ab4cbe27d016c36c Mon Sep 17 00:00:00 2001 From: "ignacio.pena@coinfabrik.com" Date: Fri, 26 Apr 2024 18:50:15 -0300 Subject: [PATCH 1/2] fix #37 --- geppetto/gemini_handler.py | 10 +++++++--- geppetto/llm_api_handler.py | 13 +++++++++++++ geppetto/openai_handler.py | 7 ++++++- geppetto/slack_handler.py | 25 ++++++++++++++++++------- tests/test_gemini.py | 4 ++-- 5 files changed, 46 insertions(+), 13 deletions(-) diff --git a/geppetto/gemini_handler.py b/geppetto/gemini_handler.py index 58c3f5b..650b34a 100644 --- a/geppetto/gemini_handler.py +++ b/geppetto/gemini_handler.py @@ -44,7 +44,6 @@ def convert_gemini_to_slack(text): return formatted_text - class GeminiHandler(LLMHandler): def __init__( @@ -72,7 +71,12 @@ def llm_generate_content(self, user_prompt, status_callback=None, *status_callba user_prompt = [merged_prompt] + user_prompt[2:] response= self.client.generate_content(user_prompt) markdown_response = convert_gemini_to_slack(response.text) - return markdown_response + if len(markdown_response) > 4000: + # Split the message if it's too long + response_parts = self.split_message(markdown_response) + return response_parts + else: + return markdown_response def get_prompt_from_thread(self, thread: List[Dict], assistant_tag: str, user_tag: str): prompt = super().get_prompt_from_thread(thread, assistant_tag, user_tag) @@ -81,4 +85,4 @@ def get_prompt_from_thread(self, thread: List[Dict], assistant_tag: str, user_ta msg[MSG_FIELD] = [msg.pop(MSG_INPUT_FIELD)] else: raise InvalidThreadFormatError("The input thread doesn't have the field %s" % MSG_INPUT_FIELD) - return prompt + return prompt \ No newline at end of file diff --git a/geppetto/llm_api_handler.py b/geppetto/llm_api_handler.py index b9b380b..868122e 100644 --- a/geppetto/llm_api_handler.py +++ b/geppetto/llm_api_handler.py @@ -28,3 +28,16 @@ def get_prompt_from_thread(self, thread: List[Dict], assistant_tag: str, user_ta else: raise InvalidThreadFormatError("The input thread doesn't have the field %s" % ROLE_FIELD) return prompt + + def split_message(self, message): + """ + Split a message into parts if it exceeds 4000 characters. + + Args: + message (str): The message to split. + + Returns: + List[str]: A list of message parts. + """ + max_length = 4000 + return [message[i:i+max_length] for i in range(0, len(message), max_length)] diff --git a/geppetto/openai_handler.py b/geppetto/openai_handler.py index 9e3c735..a995251 100644 --- a/geppetto/openai_handler.py +++ b/geppetto/openai_handler.py @@ -164,5 +164,10 @@ def llm_generate_content(self, user_prompt, status_callback=None, *status_callba else: response = response.choices[0].message.content markdown_response = convert_openai_markdown_to_slack(response) - return markdown_response + if len(markdown_response) > 4000: + # Split the message if it's too long + response_parts = self.split_message(markdown_response) + return response_parts + else: + return markdown_response diff --git a/geppetto/slack_handler.py b/geppetto/slack_handler.py index 8fb4eab..8ade157 100644 --- a/geppetto/slack_handler.py +++ b/geppetto/slack_handler.py @@ -96,12 +96,23 @@ def handle_message(self, msg, channel_id, thread_id): logging.info( "response from %s: %s" % (self.name, response_from_llm_api) ) - self.app.client.chat_update( - channel=channel_id, - text=response_from_llm_api, - thread_ts=thread_id, - ts=timestamp, - ) + # If there are multiple parts, send each part separately + if isinstance(response_from_llm_api, list): + for part in response_from_llm_api: + self.app.client.chat_postMessage( + channel=channel_id, + text=part, + thread_ts=thread_id, + mrkdwn=True + ) + else: + # If it is a single message, send it normally + self.app.client.chat_update( + channel=channel_id, + text=response_from_llm_api, + thread_ts=thread_id, + ts=timestamp, + ) except Exception as e: logging.error("Error posting message: %s", e) @@ -152,4 +163,4 @@ def select_llm_from_msg(self, message, last_llm=''): return last_llm else: # default first LLM - return controlled_llms[0] + return controlled_llms[0] \ No newline at end of file diff --git a/tests/test_gemini.py b/tests/test_gemini.py index 76ddb81..3a5642f 100644 --- a/tests/test_gemini.py +++ b/tests/test_gemini.py @@ -41,11 +41,11 @@ def test_llm_generate_content(self, mock_to_markdown): mock_response = Mock() mock_response.text = "Mocked Gemini response" self.gemini_handler.client.generate_content.return_value = mock_response - mock_to_markdown.return_value = Mock(data="Mocked Markdown data") + mock_to_markdown.return_value = "Mocked Markdown data" response = self.gemini_handler.llm_generate_content(user_prompt) - self.assertEqual(response.data, "Mocked Markdown data") + self.assertEqual(response, "Mocked Markdown data") mock_to_markdown.assert_called_once_with("Mocked Gemini response") def test_get_prompt_from_thread(self): From 6dc18114e5771a4f8856d38b9a748918ae20c6fc Mon Sep 17 00:00:00 2001 From: Diego Kelyacoubian Date: Fri, 26 Apr 2024 19:43:30 -0300 Subject: [PATCH 2/2] Geppetto 0.2.3 #44 --- geppetto/gemini_handler.py | 2 +- geppetto/openai_handler.py | 4 ++-- geppetto/slack_handler.py | 2 +- tests/test_slack.py | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/geppetto/gemini_handler.py b/geppetto/gemini_handler.py index 650b34a..c28d3d3 100644 --- a/geppetto/gemini_handler.py +++ b/geppetto/gemini_handler.py @@ -40,7 +40,7 @@ def convert_gemini_to_slack(text): formatted_text = formatted_text.replace("- ", "• ") formatted_text = re.sub(r"\[(.*?)\]\((.*?)\)", r"<\2|\1>", formatted_text) - formatted_text += f"\n\n_(Geppetto v0.2.1 Source: Gemini Model {GEMINI_MODEL})_" + formatted_text += f"\n\n_(Geppetto v0.2.3 Source: Gemini Model {GEMINI_MODEL})_" return formatted_text diff --git a/geppetto/openai_handler.py b/geppetto/openai_handler.py index a995251..783ef1c 100644 --- a/geppetto/openai_handler.py +++ b/geppetto/openai_handler.py @@ -44,7 +44,7 @@ def convert_openai_markdown_to_slack(text): formatted_text = formatted_text.replace("__", "_") formatted_text = formatted_text.replace("- ", "• ") formatted_text = re.sub(r"\[(.*?)\]\((.*?)\)", r"<\2|\1>", formatted_text) - formatted_text += f"\n\n_(Geppetto v0.2.1 Source: OpenAI Model {CHATGPT_MODEL})_" + formatted_text += f"\n\n_(Geppetto v0.2.3 Source: OpenAI Model {CHATGPT_MODEL})_" # Code blocks and italics remain unchanged but can be explicitly formatted if necessary return formatted_text @@ -157,7 +157,7 @@ def llm_generate_content(self, user_prompt, status_callback=None, *status_callba function_args = json.loads(tool_call.function.arguments) function = available_functions[function_name] if function_name == OPENAI_IMG_FUNCTION and status_callback: - status_callback(*status_callback_args, ":geppetto: I'm preparing the image, please be patient " + status_callback(*status_callback_args, "I'm preparing the image, please be patient " ":lower_left_paintbrush: ...") response = function(**function_args) return response diff --git a/geppetto/slack_handler.py b/geppetto/slack_handler.py index 8ade157..b2fcd1d 100644 --- a/geppetto/slack_handler.py +++ b/geppetto/slack_handler.py @@ -63,7 +63,7 @@ def handle_message(self, msg, channel_id, thread_id): response = self.send_message( channel_id, thread_id, - ":geppetto: :thought_balloon: ..." + ":thought_balloon:" ) if response["ok"]: diff --git a/tests/test_slack.py b/tests/test_slack.py index 4df5de6..6147fd8 100644 --- a/tests/test_slack.py +++ b/tests/test_slack.py @@ -84,7 +84,7 @@ def test_random_user_allowed_with_wildcard_permission(self): self.MockApp().client.chat_postMessage.assert_called_with( channel="test_channel", - text=":geppetto: :thought_balloon: ...", + text=":thought_balloon:", thread_ts="1", mrkdwn=True ) @@ -110,7 +110,7 @@ def test_handle_message(self): self.MockApp().client.chat_postMessage.assert_called_with( channel=channel_id, - text=":geppetto: :thought_balloon: ...", + text=":thought_balloon:", thread_ts=thread_id, mrkdwn=True )