Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feat: Add method to add multiple outputs to a prompt in AIConfig #689

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions python/src/aiconfig/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -696,6 +696,29 @@ def add_output(self, prompt_name: str, output: Output, overwrite: bool = False):
else:
prompt.outputs.append(output)

def add_outputs(self, prompt_name: str, outputs: List[Output], overwrite: bool = False):
"""
Add multiple outputs to the prompt with the given name in the AIConfig

Args:
prompt_name (str): The name of the prompt to add the outputs to.
outputs (List[Output]): List of outputs to add.
overwrite (bool, optional): Overwrites the existing output if True. Otherwise appends the outputs to the prompt's output list. Defaults to False.
"""
prompt = self.get_prompt(prompt_name)
if not prompt:
raise IndexError(
f"Cannot add outputs. Prompt '{prompt_name}' not found in config."
)
if not outputs:
raise ValueError(
f"Cannot add outputs. No outputs provided for prompt '{prompt_name}'."
)
if overwrite:
prompt.outputs = outputs
else:
prompt.outputs.extend(outputs)

def delete_output(self, prompt_name: str):
"""
Deletes the outputs for the prompt with the given prompt_name.
Expand Down
103 changes: 103 additions & 0 deletions python/tests/test_programmatically_create_an_AIConfig.py
Original file line number Diff line number Diff line change
Expand Up @@ -587,6 +587,109 @@ def test_add_output_existing_prompt_no_overwrite(ai_config_runtime: AIConfigRunt

assert ai_config_runtime.get_latest_output("GreetingPrompt") == None

def test_add_outputs_existing_prompt_no_overwrite(ai_config_runtime: AIConfigRuntime):
"""Test adding outputs to an existing prompt without overwriting."""
original_result = ExecuteResult(
output_type="execute_result",
execution_count=0,
data="original result",
metadata={
"raw_response": {"role": "assistant", "content": "original result"}
},
)
prompt = Prompt(
name="GreetingPrompt",
input="Hello, how are you?",
metadata=PromptMetadata(model="fakemodel"),
outputs=[original_result],
)
ai_config_runtime.add_prompt(prompt.name, prompt)

assert ai_config_runtime.get_latest_output("GreetingPrompt") == original_result

test_result1 = ExecuteResult(
output_type="execute_result",
execution_count=0,
data="test output 1",
metadata={
"raw_response": {"role": "assistant", "content": "test output 1"}
},
)
test_result2 = ExecuteResult(
output_type="execute_result",
execution_count=0,
data="test output 2",
metadata={
"raw_response": {"role": "assistant", "content": "test output 2"}
},
)
ai_config_runtime.add_outputs("GreetingPrompt", [test_result1, test_result2])

assert ai_config_runtime.get_latest_output("GreetingPrompt") == test_result2
assert prompt.outputs == [original_result, test_result1, test_result2]

def test_add_outputs_existing_prompt_with_overwrite(ai_config_runtime: AIConfigRuntime):
"""Test adding outputs to an existing prompt with overwriting."""
original_result = ExecuteResult(
output_type="execute_result",
execution_count=0,
data="original result",
metadata={
"raw_response": {"role": "assistant", "content": "original result"}
},
)
prompt = Prompt(
name="GreetingPrompt",
input="Hello, how are you?",
metadata=PromptMetadata(model="fakemodel"),
outputs=[original_result],
)
ai_config_runtime.add_prompt(prompt.name, prompt)

assert ai_config_runtime.get_latest_output("GreetingPrompt") == original_result

test_result1 = ExecuteResult(
output_type="execute_result",
execution_count=0,
data="test output 1",
metadata={
"raw_response": {"role": "assistant", "content": "test output 1"}
},
)
test_result2 = ExecuteResult(
output_type="execute_result",
execution_count=0,
data="test output 2",
metadata={
"raw_response": {"role": "assistant", "content": "test output 2"}
},
)
ai_config_runtime.add_outputs("GreetingPrompt", [test_result1, test_result2], True)

assert ai_config_runtime.get_latest_output("GreetingPrompt") == test_result2
assert prompt.outputs == [test_result1, test_result2]

def test_add_undefined_outputs_to_prompt(ai_config_runtime: AIConfigRuntime):
"""Test for adding undefined outputs to an existing prompt with/without overwriting. Should result in an error."""
prompt = Prompt(
name="GreetingPrompt",
input="Hello, how are you?",
metadata=PromptMetadata(model="fakemodel"),
)
ai_config_runtime.add_prompt(prompt.name, prompt)
assert ai_config_runtime.get_latest_output("GreetingPrompt") == None
# Case 1: No outputs, overwrite param not defined
with pytest.raises(
ValueError,
match=r"Cannot add outputs. No outputs provided for prompt 'GreetingPrompt'.",
):
ai_config_runtime.add_outputs("GreetingPrompt", [])
# Case 2: No outputs, overwrite param set to True
with pytest.raises(
ValueError,
match=r"Cannot add outputs. No outputs provided for prompt 'GreetingPrompt'.",
):
ai_config_runtime.add_outputs("GreetingPrompt", [], True)

def test_extract_override_settings(ai_config_runtime: AIConfigRuntime):
initial_settings = {"topP": 0.9}
Expand Down