From 9d13d857f8a8f14b26510cec337f4a7affb5f218 Mon Sep 17 00:00:00 2001 From: Sudhanshu Pandey Date: Mon, 1 Jan 2024 22:50:55 -0500 Subject: [PATCH 1/5] feat: Add method to add multiple outputs to a prompt in AIConfig --- python/src/aiconfig/schema.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/python/src/aiconfig/schema.py b/python/src/aiconfig/schema.py index df23faa43..7e11780e9 100644 --- a/python/src/aiconfig/schema.py +++ b/python/src/aiconfig/schema.py @@ -696,6 +696,29 @@ def add_output(self, prompt_name: str, output: Output, overwrite: bool = False): else: prompt.outputs.append(output) + def add_outputs(self, prompt_name: str, outputs: List[Output], overwrite: bool = False): + """ + Add multiple outputs to the prompt with the given name in the AIConfig + + Args: + prompt_name (str): The name of the prompt to add the outputs to. + outputs (List[Output]): List of outputs to add. + overwrite (bool, optional): Overwrites the existing output if True. Otherwise appends the outputs to the prompt's output list. Defaults to False. + """ + prompt = self.get_prompt(prompt_name) + if not prompt: + raise IndexError( + f"Cannot out output. Prompt '{prompt_name}' not found in config." + ) + if not outputs: + raise IndexError( + f"Cannot add outputs. No outputs provided for prompt '{prompt_name}'." + ) + if overwrite: + prompt.outputs = outputs + else: + prompt.outputs.extend(outputs) + def delete_output(self, prompt_name: str): """ Deletes the outputs for the prompt with the given prompt_name. From 3ebb3cd1a2dc45e5bd969ebeebdaee008814a1de Mon Sep 17 00:00:00 2001 From: Sudhanshu Pandey Date: Mon, 1 Jan 2024 22:54:59 -0500 Subject: [PATCH 2/5] tests: Add tests for adding outputs to an existing prompt --- ...est_programmatically_create_an_AIConfig.py | 69 +++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/python/tests/test_programmatically_create_an_AIConfig.py b/python/tests/test_programmatically_create_an_AIConfig.py index 43bb8624b..544989d9d 100644 --- a/python/tests/test_programmatically_create_an_AIConfig.py +++ b/python/tests/test_programmatically_create_an_AIConfig.py @@ -587,6 +587,75 @@ def test_add_output_existing_prompt_no_overwrite(ai_config_runtime: AIConfigRunt assert ai_config_runtime.get_latest_output("GreetingPrompt") == None +def test_add_outputs_existing_prompt_no_overwrite(ai_config_runtime: AIConfigRuntime): + """Test adding outputs to an existing prompt without overwriting.""" + base_result = ExecuteResult( + output_type="execute_result", + execution_count=0.0, + data={"role": "assistant", "content": "base output"}, + metadata={"finish_reason": "stop"},) + prompt1 = Prompt( + name="GreetingPrompt", + input="Hello, how are you?", + metadata=PromptMetadata(model="fakemodel"), + outputs=[base_result], + ) + ai_config_runtime.add_prompt(prompt1.name, prompt1) + + assert ai_config_runtime.get_latest_output("GreetingPrompt") == base_result + + test_result1 = ExecuteResult( + output_type="execute_result", + execution_count=0.0, + data={"role": "assistant", "content": "test output 1"}, + metadata={"finish_reason": "stop"}, + ) + + test_result2 = ExecuteResult( + output_type="execute_result", + execution_count=0.0, + data={"role": "assistant", "content": "test output 2"}, + metadata={"finish_reason": "stop"}, + ) + ai_config_runtime.add_outputs("GreetingPrompt", [test_result1, test_result2]) + + assert ai_config_runtime.get_latest_output("GreetingPrompt") == test_result2 + assert prompt1.outputs == [base_result, test_result1, test_result2] + +def test_add_outputs_existing_prompt_with_overwrite(ai_config_runtime: AIConfigRuntime): + """Test adding outputs to an existing prompt with overwriting.""" + base_result = ExecuteResult( + output_type="execute_result", + execution_count=0.0, + data={"role": "assistant", "content": "base output"}, + metadata={"finish_reason": "stop"},) + prompt1 = Prompt( + name="GreetingPrompt", + input="Hello, how are you?", + metadata=PromptMetadata(model="fakemodel"), + outputs=[base_result], + ) + ai_config_runtime.add_prompt(prompt1.name, prompt1) + + assert ai_config_runtime.get_latest_output("GreetingPrompt") == base_result + + test_result1 = ExecuteResult( + output_type="execute_result", + execution_count=0.0, + data={"role": "assistant", "content": "test output 1"}, + metadata={"finish_reason": "stop"}, + ) + + test_result2 = ExecuteResult( + output_type="execute_result", + execution_count=0.0, + data={"role": "assistant", "content": "test output 2"}, + metadata={"finish_reason": "stop"}, + ) + ai_config_runtime.add_outputs("GreetingPrompt", [test_result1, test_result2], True) + + assert ai_config_runtime.get_latest_output("GreetingPrompt") == test_result2 + assert prompt1.outputs == [test_result1, test_result2] def test_extract_override_settings(ai_config_runtime: AIConfigRuntime): initial_settings = {"topP": 0.9} From 13629cee644e7058b91055f38983844f05a96634 Mon Sep 17 00:00:00 2001 From: Sudhanshu Pandey Date: Mon, 1 Jan 2024 23:00:25 -0500 Subject: [PATCH 3/5] Test: Add tests for adding empty outputs to an existing prompt --- ...est_programmatically_create_an_AIConfig.py | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/python/tests/test_programmatically_create_an_AIConfig.py b/python/tests/test_programmatically_create_an_AIConfig.py index 544989d9d..ff527ea62 100644 --- a/python/tests/test_programmatically_create_an_AIConfig.py +++ b/python/tests/test_programmatically_create_an_AIConfig.py @@ -657,6 +657,28 @@ def test_add_outputs_existing_prompt_with_overwrite(ai_config_runtime: AIConfigR assert ai_config_runtime.get_latest_output("GreetingPrompt") == test_result2 assert prompt1.outputs == [test_result1, test_result2] +def test_add_empty_outputs_to_prompt(ai_config_runtime: AIConfigRuntime): + """Test for adding an empty output to an existing prompt with/without overwriting using add_ouputs.""" + prompt1 = Prompt( + name="GreetingPrompt", + input="Hello, how are you?", + metadata=PromptMetadata(model="fakemodel"), + ) + ai_config_runtime.add_prompt(prompt1.name, prompt1) + assert ai_config_runtime.get_latest_output("GreetingPrompt") == None + # Case 1: No outputs, no overwrite + with pytest.raises( + Exception, + match=r"Cannot add outputs. No outputs provided for prompt 'GreetingPrompt'.", + ): + ai_config_runtime.add_outputs("GreetingPrompt", []) + # Case 2: No outputs, with overwrite + with pytest.raises( + Exception, + match=r"Cannot add outputs. No outputs provided for prompt 'GreetingPrompt'.", + ): + ai_config_runtime.add_outputs("GreetingPrompt", [], True) + def test_extract_override_settings(ai_config_runtime: AIConfigRuntime): initial_settings = {"topP": 0.9} From f6a9ffc435fa792aa01ce36aeef9dfadc97f6c69 Mon Sep 17 00:00:00 2001 From: Sudhanshu Pandey Date: Tue, 2 Jan 2024 14:10:21 -0500 Subject: [PATCH 4/5] Fix error message for adding outputs in AIConfig schema --- python/src/aiconfig/schema.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/src/aiconfig/schema.py b/python/src/aiconfig/schema.py index 7e11780e9..bc5d30e5b 100644 --- a/python/src/aiconfig/schema.py +++ b/python/src/aiconfig/schema.py @@ -708,10 +708,10 @@ def add_outputs(self, prompt_name: str, outputs: List[Output], overwrite: bool = prompt = self.get_prompt(prompt_name) if not prompt: raise IndexError( - f"Cannot out output. Prompt '{prompt_name}' not found in config." + f"Cannot add outputs. Prompt '{prompt_name}' not found in config." ) if not outputs: - raise IndexError( + raise ValueError( f"Cannot add outputs. No outputs provided for prompt '{prompt_name}'." ) if overwrite: From eafd36b3daf5aede50b0d2874b9697d591348b72 Mon Sep 17 00:00:00 2001 From: Sudhanshu Pandey Date: Tue, 2 Jan 2024 14:24:18 -0500 Subject: [PATCH 5/5] Refactor test cases for adding outputs to an existing prompt --- ...est_programmatically_create_an_AIConfig.py | 92 +++++++++++-------- 1 file changed, 52 insertions(+), 40 deletions(-) diff --git a/python/tests/test_programmatically_create_an_AIConfig.py b/python/tests/test_programmatically_create_an_AIConfig.py index ff527ea62..a220ebf2f 100644 --- a/python/tests/test_programmatically_create_an_AIConfig.py +++ b/python/tests/test_programmatically_create_an_AIConfig.py @@ -589,92 +589,104 @@ def test_add_output_existing_prompt_no_overwrite(ai_config_runtime: AIConfigRunt def test_add_outputs_existing_prompt_no_overwrite(ai_config_runtime: AIConfigRuntime): """Test adding outputs to an existing prompt without overwriting.""" - base_result = ExecuteResult( + original_result = ExecuteResult( output_type="execute_result", - execution_count=0.0, - data={"role": "assistant", "content": "base output"}, - metadata={"finish_reason": "stop"},) - prompt1 = Prompt( + execution_count=0, + data="original result", + metadata={ + "raw_response": {"role": "assistant", "content": "original result"} + }, + ) + prompt = Prompt( name="GreetingPrompt", input="Hello, how are you?", metadata=PromptMetadata(model="fakemodel"), - outputs=[base_result], + outputs=[original_result], ) - ai_config_runtime.add_prompt(prompt1.name, prompt1) + ai_config_runtime.add_prompt(prompt.name, prompt) - assert ai_config_runtime.get_latest_output("GreetingPrompt") == base_result + assert ai_config_runtime.get_latest_output("GreetingPrompt") == original_result test_result1 = ExecuteResult( output_type="execute_result", - execution_count=0.0, - data={"role": "assistant", "content": "test output 1"}, - metadata={"finish_reason": "stop"}, + execution_count=0, + data="test output 1", + metadata={ + "raw_response": {"role": "assistant", "content": "test output 1"} + }, ) - test_result2 = ExecuteResult( output_type="execute_result", - execution_count=0.0, - data={"role": "assistant", "content": "test output 2"}, - metadata={"finish_reason": "stop"}, + execution_count=0, + data="test output 2", + metadata={ + "raw_response": {"role": "assistant", "content": "test output 2"} + }, ) ai_config_runtime.add_outputs("GreetingPrompt", [test_result1, test_result2]) assert ai_config_runtime.get_latest_output("GreetingPrompt") == test_result2 - assert prompt1.outputs == [base_result, test_result1, test_result2] + assert prompt.outputs == [original_result, test_result1, test_result2] def test_add_outputs_existing_prompt_with_overwrite(ai_config_runtime: AIConfigRuntime): """Test adding outputs to an existing prompt with overwriting.""" - base_result = ExecuteResult( + original_result = ExecuteResult( output_type="execute_result", - execution_count=0.0, - data={"role": "assistant", "content": "base output"}, - metadata={"finish_reason": "stop"},) - prompt1 = Prompt( + execution_count=0, + data="original result", + metadata={ + "raw_response": {"role": "assistant", "content": "original result"} + }, + ) + prompt = Prompt( name="GreetingPrompt", input="Hello, how are you?", metadata=PromptMetadata(model="fakemodel"), - outputs=[base_result], + outputs=[original_result], ) - ai_config_runtime.add_prompt(prompt1.name, prompt1) + ai_config_runtime.add_prompt(prompt.name, prompt) - assert ai_config_runtime.get_latest_output("GreetingPrompt") == base_result + assert ai_config_runtime.get_latest_output("GreetingPrompt") == original_result test_result1 = ExecuteResult( output_type="execute_result", - execution_count=0.0, - data={"role": "assistant", "content": "test output 1"}, - metadata={"finish_reason": "stop"}, + execution_count=0, + data="test output 1", + metadata={ + "raw_response": {"role": "assistant", "content": "test output 1"} + }, ) - test_result2 = ExecuteResult( output_type="execute_result", - execution_count=0.0, - data={"role": "assistant", "content": "test output 2"}, - metadata={"finish_reason": "stop"}, + execution_count=0, + data="test output 2", + metadata={ + "raw_response": {"role": "assistant", "content": "test output 2"} + }, ) ai_config_runtime.add_outputs("GreetingPrompt", [test_result1, test_result2], True) assert ai_config_runtime.get_latest_output("GreetingPrompt") == test_result2 - assert prompt1.outputs == [test_result1, test_result2] + assert prompt.outputs == [test_result1, test_result2] -def test_add_empty_outputs_to_prompt(ai_config_runtime: AIConfigRuntime): - """Test for adding an empty output to an existing prompt with/without overwriting using add_ouputs.""" - prompt1 = Prompt( +def test_add_undefined_outputs_to_prompt(ai_config_runtime: AIConfigRuntime): + """Test for adding undefined outputs to an existing prompt with/without overwriting. Should result in an error.""" + prompt = Prompt( name="GreetingPrompt", input="Hello, how are you?", metadata=PromptMetadata(model="fakemodel"), ) - ai_config_runtime.add_prompt(prompt1.name, prompt1) + ai_config_runtime.add_prompt(prompt.name, prompt) assert ai_config_runtime.get_latest_output("GreetingPrompt") == None - # Case 1: No outputs, no overwrite + # Case 1: No outputs, overwrite param not defined with pytest.raises( - Exception, + ValueError, match=r"Cannot add outputs. No outputs provided for prompt 'GreetingPrompt'.", ): ai_config_runtime.add_outputs("GreetingPrompt", []) - # Case 2: No outputs, with overwrite + # Case 2: No outputs, overwrite param set to True with pytest.raises( - Exception, + ValueError, match=r"Cannot add outputs. No outputs provided for prompt 'GreetingPrompt'.", ): ai_config_runtime.add_outputs("GreetingPrompt", [], True)