Skip to content

Commit

Permalink
Feature: added method run multi prompts
Browse files Browse the repository at this point in the history
Feature method run multi prompts
  • Loading branch information
anujsinha3 authored Feb 1, 2024
2 parents 8ebbeb6 + 1f8c14a commit e7c86f5
Show file tree
Hide file tree
Showing 6 changed files with 120 additions and 1 deletion.
14 changes: 14 additions & 0 deletions data/autora/prompts/all_prompt.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
[
{
"SYS": "You are a technical documentation writer. You always write clear, concise, and accurate documentation for\nscientific experiments. Your documentation focuses on the experiment's purpose, procedure, and results. Therefore,\ndetails about specific python functions, packages, or libraries are not necessary. Your readers are experimental\nscientists.",
"INSTR": "Please generate high-level one or two paragraph documentation for the following experiment."
},
{
"SYS": "You are a technical documentation writer. You always write clear, concise, and accurate documentation\nfor scientific experiments. Your documentation focuses on the experiment's procedure. Therefore, details about specific\npython functions, packages, or libraries are NOT necessary. Your readers are experimental scientists.\nFor writing your descriptions, follow these instructions:\n- DO NOT write greetings or preambles\n- Use the Variable 'name' attribute and not the python variable names\n- Use LaTeX for math expressions\n- DO NOT include code or code-like syntax and do not use python function or class names\n- Write in paragraph style, NOT bullet points",
"INSTR": "Generate a one line description of the dependent and independent variables used in the following\npython code: "
},
{
"SYS": "You are a research scientist. You always write clear, concise, and accurate documentation\nfor scientific experiments from python code. Your documentation focuses on the experiment's procedure. Therefore, details about specific\npython functions, packages, or libraries are NOT necessary. Your readers are experimental scientists.\nFor writing your descriptions, follow these instructions:\n- DO NOT write greetings or preambles\n- Use the Variable 'name' attribute and not the python variable names\n- Use LaTeX for math expressions\n- DO NOT include code or code-like syntax and do not use python function or class names\n- Write in paragraph style, NOT bullet points",
"INSTR": "Generate a three line description of the dependent and independent variables used in the following\npython code: "
}
]
12 changes: 12 additions & 0 deletions src/autora/doc/classes/EvalResult.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
from dataclasses import dataclass
from typing import List, Optional


@dataclass
class EvalResult:
"""Class for storing LLM evaluation results"""

prediction: List[str]
prompt: str
bleu_score: Optional[float] = None
meteor_score: Optional[float] = None
46 changes: 46 additions & 0 deletions src/autora/doc/pipelines/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,10 @@
from nltk.translate.bleu_score import SmoothingFunction, corpus_bleu
from nltk.translate.meteor_score import single_meteor_score

from autora.doc.classes.EvalResult import EvalResult
from autora.doc.runtime.predict_hf import Predictor
from autora.doc.runtime.prompts import PROMPTS, PromptIds
from autora.doc.util import get_prompts_from_file

app = typer.Typer()
logging.basicConfig(
Expand Down Expand Up @@ -47,6 +49,50 @@ def evaluate_documentation(predictions: List[str], references: List[str]) -> Tup
return (bleu, meteor)


@app.command(help="Evaluate a model for code-to-documentation generation for all prompts in the prompts_file")
def eval_prompts(
data_file: str = typer.Argument(..., help="JSONL Data file to evaluate on"),
model_path: str = typer.Option("meta-llama/Llama-2-7b-chat-hf", help="Path to HF model"),
prompts_file: str = typer.Argument(..., help="JSON file with a list of dictionary of prompts"),
param: List[str] = typer.Option(
[], help="Additional float parameters to pass to the model as name=float pairs"
),
) -> List[EvalResult]:
import mlflow

results_list = []

mlflow.autolog()
param_dict = {pair[0]: float(pair[1]) for pair in [pair.split("=") for pair in param]}
run = mlflow.active_run()

prompts_list = get_prompts_from_file(prompts_file)

if run is None:
run = mlflow.start_run()
with run:
logger.info(f"Active run_id: {run.info.run_id}")
logger.info(f"running predict with {data_file}")
logger.info(f"model path: {model_path}")
mlflow.log_params(param_dict)
mlflow.log_param("model_path", model_path)
mlflow.log_param("data_file", data_file)
mlflow.log_param("prompts_file", prompts_file)
predictor = Predictor(model_path)
for i in range(len(prompts_list)):
logger.info(f"Starting to run model on prompt {i}")
prediction_with_scores = eval_prompt(data_file, predictor, prompts_list[i], param_dict)
logger.info(f"Model run completed on prompt {i}: {prompts_list[i]}")
eval_result = EvalResult(
prediction_with_scores[0],
prompts_list[i],
prediction_with_scores[1],
prediction_with_scores[2],
)
results_list.append(eval_result)
return results_list


@app.command(help="Evaluate model on a data file")
def eval(
data_file: str = typer.Argument(..., help="JSONL Data file to evaluate on"),
Expand Down
17 changes: 17 additions & 0 deletions src/autora/doc/util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import json
from typing import Any, Dict, List, Tuple

from autora.doc.runtime.prompts import PromptBuilder


def load_file(json_file_path: str) -> List[Dict[str, Any]]:
# Read and parse the JSON file
with open(json_file_path, "r") as file:
data: List[Dict[str, Any]] = json.load(file)
return data


def get_prompts_from_file(prompts_file: str) -> List[str]:
prompts_data = load_file(prompts_file)
prompts_list = [PromptBuilder(p["SYS"], p["INSTR"]).build() for p in prompts_data]
return prompts_list
14 changes: 13 additions & 1 deletion tests/test_main.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
from pathlib import Path
from typing import Dict, List

import jsonlines
import pytest

from autora.doc.pipelines.main import eval, evaluate_documentation, generate, import_data
from autora.doc.classes.EvalResult import EvalResult
from autora.doc.pipelines.main import eval, eval_prompts, evaluate_documentation, generate, import_data
from autora.doc.runtime.prompts import PromptIds

# dummy HF model for testing
Expand Down Expand Up @@ -84,3 +86,13 @@ def test_import(tmp_path: Path) -> None:
import_data(str(code), str(text), str(data))
new_lines = data.read_text().splitlines()
assert len(new_lines) == 1, "Expected one new line"


def test_eval_prompts() -> None:
data_file = Path(__file__).parent.joinpath("../data/sweetpea/data.jsonl").resolve()
prompts_file = Path(__file__).parent.joinpath("../data/autora/prompts/all_prompt.json").resolve()
results: List[EvalResult] = eval_prompts(str(data_file), TEST_HF_MODEL, str(prompts_file), [])
assert len(results) == 3, "Expected 3 outputs"
for result in results:
assert result.prediction is not None, "The prediction should not be None"
assert result.prompt is not None, "The prompt should not be None"
18 changes: 18 additions & 0 deletions tests/test_util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
from pathlib import Path

from autora.doc.util import get_prompts_from_file, load_file


def test_load_file() -> None:
prompts_file_path = Path(__file__).parent.joinpath("../data/autora/prompts/all_prompt.json").resolve()
data = load_file(str(prompts_file_path))
assert type(data) == list


def test_get_prompts_from_file() -> None:
prompts_file_path = Path(__file__).parent.joinpath("../data/autora/prompts/all_prompt.json").resolve()
prompts_list = get_prompts_from_file(str(prompts_file_path))

assert len(prompts_list) == 3, "Expected 3 outputs"
for prompt in prompts_list:
assert type(prompt) == str

0 comments on commit e7c86f5

Please sign in to comment.