Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature method run multi prompts #33

Merged
merged 8 commits into from
Feb 1, 2024
5 changes: 3 additions & 2 deletions src/autora/doc/pipelines/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def evaluate_documentation(predictions: List[str], references: List[str]) -> Tup


@app.command(help="Evaluate a model for code-to-documentation generation for all prompts in the prompts_file")
def eval_on_prompts_file(
def eval_prompts(
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was about to ask you to add a doc-comment for this function. In particular because it's hard to tell what the List[Dict[str,str]] will contain. But I think a better option is to create a type (a dataclass?) for the return type, e.g. an EvalResult class.

data_file: str = typer.Argument(..., help="JSONL Data file to evaluate on"),
model_path: str = typer.Option("meta-llama/Llama-2-7b-chat-hf", help="Path to HF model"),
prompts_file: str = typer.Argument(..., help="JSON file with a list of dictionary of prompts"),
Expand All @@ -76,9 +76,10 @@ def eval_on_prompts_file(
mlflow.log_params(param_dict)
mlflow.log_param("model_path", model_path)
mlflow.log_param("data_file", data_file)
carlosgjs marked this conversation as resolved.
Show resolved Hide resolved
mlflow.log_param("prompts_file", prompts_file)
predictor = Predictor(model_path)
for i in range(len(prompts_list)):
logger.info(f"Starting to run model on prompt {i}: {prompts_list[i]}")
logger.info(f"Starting to run model on prompt {i}")
prediction_with_scores = eval_prompt(data_file, predictor, prompts_list[i], param_dict)
logger.info(f"Model run completed on prompt {i}: {prompts_list[i]}")
eval_result = get_eval_result_from_prediction(prediction_with_scores, prompts_list[i])
Expand Down
12 changes: 11 additions & 1 deletion tests/test_main.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
from pathlib import Path
from typing import Dict, List

import jsonlines
import pytest

from autora.doc.pipelines.main import eval, evaluate_documentation, generate, import_data
from autora.doc.pipelines.main import eval, eval_prompts, evaluate_documentation, generate, import_data
from autora.doc.runtime.prompts import PromptIds

# dummy HF model for testing
Expand Down Expand Up @@ -84,3 +85,12 @@ def test_import(tmp_path: Path) -> None:
import_data(str(code), str(text), str(data))
new_lines = data.read_text().splitlines()
assert len(new_lines) == 1, "Expected one new line"


def test_eval_prompts() -> None:
data_file = Path(__file__).parent.joinpath("../data/sweetpea/data.jsonl").resolve()
prompts_file = Path(__file__).parent.joinpath("../data/autora/prompts/all_prompt.json").resolve()
outputs: List[Dict[str, str]] = eval_prompts(str(data_file), TEST_HF_MODEL, str(prompts_file), [])
assert len(outputs) == 3, "Expected 3 outputs"
for output in outputs:
assert len(output) > 0, "Expected non-empty output"
32 changes: 32 additions & 0 deletions tests/test_util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
from pathlib import Path

from autora.doc.util import get_eval_result_from_prediction, get_prompts_from_file, load_file


def test_load_file() -> None:
prompts_file_path = Path(__file__).parent.joinpath("../data/autora/prompts/all_prompt.json").resolve()
data = load_file(str(prompts_file_path))
assert type(data) == list


def test_get_prompts_from_file() -> None:
prompts_file_path = Path(__file__).parent.joinpath("../data/autora/prompts/all_prompt.json").resolve()
prompts_list = get_prompts_from_file(str(prompts_file_path))

assert len(prompts_list) == 3, "Expected 3 outputs"
for prompt in prompts_list:
assert type(prompt) == str


def test_get_eval_result_from_prediction() -> None:
prediction = (["response1", "response2"], 0.8, 0.7)
prompt = "prompt1"
result = get_eval_result_from_prediction(prediction, prompt)
expected_result = {
"prediction": ["response1", "response2"],
"bleu": 0.8,
"meteor": 0.7,
"prompt": "prompt1",
}
assert type(result) == dict # Assert result is a dictionary
assert result == expected_result # Assert specific keys and values
Loading