From 89fb3bba1eccaecd539e0681f16f542a02266cec Mon Sep 17 00:00:00 2001 From: Merlin Kallenborn Date: Wed, 15 May 2024 11:24:48 +0200 Subject: [PATCH] WIP: refactor: Remove Grader Logic from Elo Eval Logic TASK: IL-394 --- src/documentation/elo_qa_eval.ipynb | 729 ++++++++++++++++++ .../elo_qa_evaluation_logic.py | 137 ++++ .../evaluation/evaluation/elo_evaluator.py | 44 +- tests/evaluation/test_elo_evaluator.py | 75 +- 4 files changed, 937 insertions(+), 48 deletions(-) create mode 100644 src/documentation/elo_qa_eval.ipynb create mode 100644 src/intelligence_layer/evaluation/evaluation/elo_evaluation_logics/elo_qa_evaluation_logic.py diff --git a/src/documentation/elo_qa_eval.ipynb b/src/documentation/elo_qa_eval.ipynb new file mode 100644 index 000000000..30b610e33 --- /dev/null +++ b/src/documentation/elo_qa_eval.ipynb @@ -0,0 +1,729 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# User Story for Calculating ELO Scores of QA Configurations for Ranking \n", + "\n", + "As a user of the Intelligence Layer (IL), I want to evaluate how well different configurations perform on a QA task with the given input data.\n", + "A configuration is a combination of a model with a fixed set of parameters.\n", + "In the following, we focus on comparing setups which differ only in the chosen model.\n", + "\n", + "We provide multiple inputs consisting of a longer texts and a questions related to each of those texts, as well as the expected answers.\n", + "A Llama-model is used as a grader to decide which answer of two different models is better.\n", + "The aggregation of all comparisons results in [ELO](https://en.wikipedia.org/wiki/Elo_rating_system) scores and win rates of the models.\n", + "\n", + "In this notebook, we go through the following steps: First, we create a set of examples of texts with a relevant question for each (Step 0), after which we use the models to generate answers (Step 1). The given answers are then compared against each other and judged by the Llama model (Step 2), which will result in a final ELO ranking and win rate (Step 3). Lastly, we include a new model in the evaluation without having to re-evaluate the previous models against each other, as is typically done in ELO rankings (Step 4).\n", + "\n", + "## Evaluating classification use-cases\n", + "\n", + "Before we can begin, we need to load the Aleph-Alpha access token from the environment and create the client." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from os import getenv\n", + "\n", + "from aleph_alpha_client import Client\n", + "from dotenv import load_dotenv\n", + "from intelligence_layer.connectors import LimitedConcurrencyClient\n", + "\n", + "\n", + "\n", + "load_dotenv()\n", + "\n", + "aa_client = Client(getenv(\"AA_TOKEN\"))\n", + "limited_concurrency_client = LimitedConcurrencyClient(aa_client)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Step 0 – Data set\n", + "\n", + "During the four steps of determining the ELO scores, we make use of the following four repositories for managing the intermediate data.\n", + "\n", + "First, we create and store an input dataset into a so-called `dataset_repository`.\n", + "\n", + "The IL will read the input dataset and produce outputs for each model, which will be stored in a `run_repository`.\n", + "\n", + "The result from the previous step can now be evaluated, in this case with an ELO evaluator (`EloQaEvaluator`). The evaluation is stored in the `eval_repository`.\n", + "\n", + "Finally, the evaluations are aggregated and stored in the `aggregation_repository`. The aggregation contains the ELO score and winning rate of each model along with additional metadata." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from intelligence_layer.evaluation import (\n", + " InMemoryAggregationRepository,\n", + " InMemoryDatasetRepository,\n", + " InMemoryEvaluationRepository,\n", + " InMemoryRunRepository,\n", + ")\n", + "\n", + "dataset_repository = InMemoryDatasetRepository()\n", + "run_repository = InMemoryRunRepository()\n", + "evaluation_repository = InMemoryEvaluationRepository()\n", + "aggregation_repository = InMemoryAggregationRepository()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here, we fill the `dataset_repository` with two `Example`s. Each `Example` contains a text, a question regarding said text, as well as an expected answer.\n", + "The newly created dataset in the repository has a unique id, which is stored in the `dataset_id` variable." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "from intelligence_layer.evaluation import Example\n", + "\n", + "from intelligence_layer.core import Language\n", + "from intelligence_layer.examples.qa.single_chunk_qa import SingleChunkQaInput\n", + "\n", + "qa_input_text_1 = \"\"\"Surface micromachining\n", + "\n", + "Surface micromachining builds microstructures by deposition and etching structural layers over a substrate.[1] This is different from Bulk micromachining, in which a silicon substrate wafer is selectively etched to produce structures.\n", + "\n", + "Layers\n", + "\n", + "Generally, polysilicon is used as one of the substrate layers while silicon dioxide is used as a sacrificial layer. The sacrificial layer is removed or etched out to create any necessary void in the thickness direction. Added layers tend to vary in size from 2-5 micrometres. The main advantage of this machining process is the ability to build electronic and mechanical components (functions) on the same substrate. Surface micro-machined components are smaller compared to their bulk micro-machined counterparts.\n", + "\n", + "As the structures are built on top of the substrate and not inside it, the substrate's properties are not as important as in bulk micro-machining. Expensive silicon wafers can be replaced by cheaper substrates, such as glass or plastic. The size of the substrates may be larger than a silicon wafer, and surface micro-machining is used to produce thin-film transistors on large area glass substrates for flat panel displays. This technology can also be used for the manufacture of thin film solar cells, which can be deposited on glass, polyethylene terepthalate substrates or other non-rigid materials.\n", + "\n", + "Fabrication process\n", + "\n", + "Micro-machining starts with a silicon wafer or other substrate upon which new layers are grown. These layers are selectively etched by photo-lithography; either a wet etch involving an acid, or a dry etch involving an ionized gas (or plasma). Dry etching can combine chemical etching with physical etching or ion bombardment. Surface micro-machining involves as many layers as are needed with a different mask (producing a different pattern) on each layer. Modern integrated circuit fabrication uses this technique and can use as many as 100 layers. Micro-machining is a younger technology and usually uses no more than 5 or 6 layers. Surface micro-machining uses developed technology (although sometimes not enough for demanding applications) which is easily repeatable for volume production.\"\"\"\n", + "\n", + "example_1 = Example(\n", + " input=SingleChunkQaInput(\n", + " chunk=qa_input_text_1,\n", + " question=\"What is micromachining?\",\n", + " language=Language(\"en\"),\n", + " ),\n", + " expected_output=\"Surface micromachining builds microstructures by deposition and etching structural layers over a substrate. This is different from Bulk micromachining, in which a silicon substrate wafer is selectively etched to produce structures.\",\n", + ")\n", + "\n", + "qa_input_text_2 = \"\"\"\n", + "Silicon is a chemical element; it has symbol Si and atomic number 14. It is a hard, brittle crystalline solid with a blue-grey metallic luster, and is a non metal and semiconductor. It is a member of group 14 in the periodic table: carbon is above it; and germanium, tin, lead, and flerovium are below it. It is relatively unreactive.\n", + "\n", + "Because of its high chemical affinity for oxygen, it was not until 1823 that Jöns Jakob Berzelius was first able to prepare it and characterize it in pure form. Its oxides form a family of anions known as silicates. Its melting and boiling points of 1414 °C and 3265 °C, respectively, are the second highest among all the metalloids and nonmetals, being surpassed only by boron.[a]\n", + "\n", + "Silicon is the eighth most common element in the universe by mass, but very rarely occurs as the pure element in the Earth's crust. It is widely distributed in space in cosmic dusts, planetoids, and planets as various forms of silicon dioxide (silica) or silicates. More than 90% of the Earth's crust is composed of silicate minerals, making silicon the second most abundant element in the Earth's crust (about 28% by mass), after oxygen. \n", + "\"\"\"\n", + "\n", + "example_2 = Example(\n", + " input=SingleChunkQaInput(\n", + " chunk=qa_input_text_2, question=\"What is silicon?\", language=Language(\"en\")\n", + " ),\n", + " expected_output=\"Silicon is a chemical element.\",\n", + ")\n", + "\n", + "examples = [example_1, example_2]" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "dataset_id = dataset_repository.create_dataset(\n", + " examples=examples, dataset_name=\"My-test-dataset\"\n", + ").id" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# ensure that we got a valid dataset ID\n", + "assert len(dataset_id) > 0, f\"The dataset with ID {dataset_id} is empty\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we stored the examples into the `dataset_repository`, we can retrieve them by the `dataset_id`" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Example ID = 3d7a7c26-5e01-4d2b-ab06-618a35e035a2\n", + "Input = chunk=\"\\nSilicon is a chemical element; it has symbol Si and atomic number 14. It is a hard, brittle crystalline solid with a blue-grey metallic luster, and is a non metal and semiconductor. It is a member of group 14 in the periodic table: carbon is above it; and germanium, tin, lead, and flerovium are below it. It is relatively unreactive.\\n\\nBecause of its high chemical affinity for oxygen, it was not until 1823 that Jöns Jakob Berzelius was first able to prepare it and characterize it in pure form. Its oxides form a family of anions known as silicates. Its melting and boiling points of 1414 °C and 3265 °C, respectively, are the second highest among all the metalloids and nonmetals, being surpassed only by boron.[a]\\n\\nSilicon is the eighth most common element in the universe by mass, but very rarely occurs as the pure element in the Earth's crust. It is widely distributed in space in cosmic dusts, planetoids, and planets as various forms of silicon dioxide (silica) or silicates. More than 90% of the Earth's crust is composed of silicate minerals, making silicon the second most abundant element in the Earth's crust (about 28% by mass), after oxygen. \\n\" question='What is silicon?' language=Language(iso_639_1='en')\n", + "Expected output = \"Silicon is a chemical element.\"\n", + "\n", + "Example ID = e47d60ca-60f0-441b-9127-5ea12434b90f\n", + "Input = chunk=\"Surface micromachining\\n\\nSurface micromachining builds microstructures by deposition and etching structural layers over a substrate.[1] This is different from Bulk micromachining, in which a silicon substrate wafer is selectively etched to produce structures.\\n\\nLayers\\n\\nGenerally, polysilicon is used as one of the substrate layers while silicon dioxide is used as a sacrificial layer. The sacrificial layer is removed or etched out to create any necessary void in the thickness direction. Added layers tend to vary in size from 2-5 micrometres. The main advantage of this machining process is the ability to build electronic and mechanical components (functions) on the same substrate. Surface micro-machined components are smaller compared to their bulk micro-machined counterparts.\\n\\nAs the structures are built on top of the substrate and not inside it, the substrate's properties are not as important as in bulk micro-machining. Expensive silicon wafers can be replaced by cheaper substrates, such as glass or plastic. The size of the substrates may be larger than a silicon wafer, and surface micro-machining is used to produce thin-film transistors on large area glass substrates for flat panel displays. This technology can also be used for the manufacture of thin film solar cells, which can be deposited on glass, polyethylene terepthalate substrates or other non-rigid materials.\\n\\nFabrication process\\n\\nMicro-machining starts with a silicon wafer or other substrate upon which new layers are grown. These layers are selectively etched by photo-lithography; either a wet etch involving an acid, or a dry etch involving an ionized gas (or plasma). Dry etching can combine chemical etching with physical etching or ion bombardment. Surface micro-machining involves as many layers as are needed with a different mask (producing a different pattern) on each layer. Modern integrated circuit fabrication uses this technique and can use as many as 100 layers. Micro-machining is a younger technology and usually uses no more than 5 or 6 layers. Surface micro-machining uses developed technology (although sometimes not enough for demanding applications) which is easily repeatable for volume production.\" question='What is micromachining?' language=Language(iso_639_1='en')\n", + "Expected output = \"Surface micromachining builds microstructures by deposition and etching structural layers over a substrate. This is different from Bulk micromachining, in which a silicon substrate wafer is selectively etched to produce structures.\"\n", + "\n" + ] + } + ], + "source": [ + "for example in dataset_repository.examples(dataset_id, SingleChunkQaInput, str):\n", + " print(example)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Step 1 - Run Models\n", + "\n", + "Given a `dataset_repository` with examples, we can now generate the output of the models for all examples.\n", + "First, we have to define which models we want to use. In this example, we use the _\"luminous-base-control\"_ model and the _\"luminous-supreme-control\"_ model.\n", + " \n", + "The previously created client is used to create a `Task` for each model. We use a `SingleChunkQa` task, meaning that in each task a model will give an answer to a question regarding a single chunk of text.\n", + "These tasks are executed by a `Runner`, using the input dataset via the previously stored `dataset_id`.\n", + "\n", + "Tasks require a `run_repository` to store the output. The generated output is automatically stored when calling `run_dataset` on the `runners`. The output for each model will have a unique _\"run id\"_.\n", + "In general, the output for each model consists of two parts. One part is a collection of example outputs. Each example outputs contains the `run_id`, `example_id`, and a field `output`. In this specific use-case, the `output` field contains the `answer` to the question. The other part is a _\"run overview\"_ with the run id stored as `id`, the `dataset_id`, and a description of the task, plus other metadata. " + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Running: 2it [00:17, 8.51s/it]\n", + "Running: 2it [00:22, 11.31s/it]\n" + ] + } + ], + "source": [ + "from intelligence_layer.evaluation.run.runner import Runner\n", + "from intelligence_layer.core import LuminousControlModel\n", + "from intelligence_layer.examples.qa.single_chunk_qa import SingleChunkQa, SingleChunkQaOutput\n", + "\n", + "\n", + "models = [\n", + " LuminousControlModel(name=\"luminous-base-control-20240215\", client=aa_client),\n", + " LuminousControlModel(name=\"luminous-supreme-control-20240215\", client=aa_client),\n", + "]\n", + "\n", + "for model in models:\n", + " runner = Runner[SingleChunkQaInput, SingleChunkQaOutput](\n", + " task=SingleChunkQa(model=model),\n", + " dataset_repository=dataset_repository,\n", + " run_repository=run_repository,\n", + " description=f\"QA with model {model.name}\",\n", + " )\n", + " runner.run_dataset(dataset_id)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "# ensure that all examples succeeded\n", + "for run_overview in run_repository.run_overviews():\n", + " assert (\n", + " run_overview.failed_example_count == 0\n", + " ), f\"There are failed runs for run overview ID {run_overview.id}\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The overviews and outputs can be retrieved via the unique run ids:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Run overview IDs saved in the run repository: ['6a45e898-5516-4e7a-84cc-a010580e335c', 'b66fc08b-068a-4219-bcb5-51637cfaa47e']\n", + "\n", + "Run Overview ID = 6a45e898-5516-4e7a-84cc-a010580e335c\n", + "Dataset ID = 2bbd6c69-dbde-4739-a0f1-e82214deeb2d\n", + "Start time = 2024-05-14 14:26:55.937724+00:00\n", + "End time = 2024-05-14 14:27:12.977689+00:00\n", + "Failed example count = 0\n", + "Successful example count = 2\n", + "Description = \"QA with model luminous-base-control-20240215\"\n", + "\n", + "Example ID=3d7a7c26-5e01-4d2b-ab06-618a35e035a2\n", + "Related Run ID=6a45e898-5516-4e7a-84cc-a010580e335c\n", + "Output=\"answer='Silicon is a chemical element with symbol Si and atomic number 14. It is a hard, brittle crystalline solid with a blue-grey metallic luster, and is a non metal and semiconductor.' highlights=[ScoredTextHighlight(start=71, end=182, score=1.0)]\"\n", + "\n", + "Example ID=e47d60ca-60f0-441b-9127-5ea12434b90f\n", + "Related Run ID=6a45e898-5516-4e7a-84cc-a010580e335c\n", + "Output=\"answer='Micromachining is a process of building microstructures by deposition and etching structural layers over a substrate.' highlights=[ScoredTextHighlight(start=24, end=131, score=1.0)]\"\n", + "\n", + "Run Overview ID = b66fc08b-068a-4219-bcb5-51637cfaa47e\n", + "Dataset ID = 2bbd6c69-dbde-4739-a0f1-e82214deeb2d\n", + "Start time = 2024-05-14 14:27:12.978007+00:00\n", + "End time = 2024-05-14 14:27:35.599692+00:00\n", + "Failed example count = 0\n", + "Successful example count = 2\n", + "Description = \"QA with model luminous-supreme-control-20240215\"\n", + "\n", + "Example ID=3d7a7c26-5e01-4d2b-ab06-618a35e035a2\n", + "Related Run ID=b66fc08b-068a-4219-bcb5-51637cfaa47e\n", + "Output=\"answer='Silicon is a chemical element with symbol Si and atomic number 14. It is a hard, brittle crystalline solid with a blue-grey metallic luster, and is a non metal and semiconductor.' highlights=[ScoredTextHighlight(start=71, end=182, score=1.0)]\"\n", + "\n", + "Example ID=e47d60ca-60f0-441b-9127-5ea12434b90f\n", + "Related Run ID=b66fc08b-068a-4219-bcb5-51637cfaa47e\n", + "Output=\"answer='Surface micromachining is a process of building microstructures by deposition and etching structural layers over a substrate.' highlights=[ScoredTextHighlight(start=24, end=131, score=1.0)]\"\n", + "\n" + ] + } + ], + "source": [ + "print(\n", + " f\"Run overview IDs saved in the run repository: {run_repository.run_overview_ids()}\\n\"\n", + ")\n", + "\n", + "for run_overview in run_repository.run_overviews():\n", + " print(run_overview)\n", + " for example_output in run_repository.example_outputs(\n", + " run_overview.id, SingleChunkQaOutput\n", + " ):\n", + " print(example_output)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Step 2 – Run Evaluation\n", + "\n", + "Now that we have generated the answers of all models for all examples in the `dataset_repository`, the next step is to evaluate those answers.\n", + "The evaluation is done by an `Evaluator`. Here we are interested in the ELO score, which can be calculated using the `EloQaEvaluator`. For each example, the `EloQaEvaluator` takes the two answers of two different models and uses Llama to decide which answer is better. You can also implement your own `Evaluator` to exactly match your use case." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "IDs of stored evaluations: []\n" + ] + } + ], + "source": [ + "# this should demonstrate that there are no stored evaluations yet in our repository\n", + "print(f\"IDs of stored evaluations: {evaluation_repository.evaluation_overview_ids()}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "from intelligence_layer.core.model import Llama2InstructModel\n", + "from intelligence_layer.evaluation import Evaluator\n", + "from intelligence_layer.evaluation.evaluation.elo_evaluator import EloEvaluationLogic\n", + "from intelligence_layer.evaluation.evaluation.elo_graders.elo_qa_grader import EloQaGrader\n", + "\n", + "\n", + "\n", + "elo_evaluation_logic: EloEvaluationLogic[SingleChunkQaInput, SingleChunkQaOutput, SingleChunkQaOutput] = EloEvaluationLogic()\n", + "\n", + "evaluator = Evaluator(\n", + " dataset_repository=dataset_repository,\n", + " run_repository=run_repository,\n", + " evaluation_repository=evaluation_repository,\n", + " description=\"ELO QA evaluation\", # this description will be used later to query for specific evaluations\n", + " evaluation_logic=elo_evaluation_logic,\n", + ")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "ename": "TypeError", + "evalue": "Alternatively overwrite input_type() in ", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)", + "File \u001b[0;32m~/Aleph-Alpha/intelligence-layer-sdk/src/intelligence_layer/evaluation/evaluation/evaluator.py:234\u001b[0m, in \u001b[0;36mEvaluator.input_type\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 233\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 234\u001b[0m input_type \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_get_types\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mInput\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\n\u001b[1;32m 235\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mKeyError\u001b[39;00m:\n", + "\u001b[0;31mKeyError\u001b[0m: 'Input'", + "\nDuring handling of the above exception, another exception occurred:\n", + "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[11], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m evaluation_overview \u001b[38;5;241m=\u001b[39m \u001b[43mevaluator\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mevaluate_runs\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mrun_repository\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun_overview_ids\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/Aleph-Alpha/intelligence-layer-sdk/src/intelligence_layer/evaluation/evaluation/evaluator.py:329\u001b[0m, in \u001b[0;36mEvaluator.evaluate_runs\u001b[0;34m(self, num_examples, abort_on_error, *run_ids)\u001b[0m\n\u001b[1;32m 325\u001b[0m eval_id \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_evaluation_repository\u001b[38;5;241m.\u001b[39minitialize_evaluation()\n\u001b[1;32m 326\u001b[0m dataset_id \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mnext\u001b[39m(\u001b[38;5;28miter\u001b[39m(run_overviews))\u001b[38;5;241m.\u001b[39mdataset_id\n\u001b[1;32m 327\u001b[0m examples \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_dataset_repository\u001b[38;5;241m.\u001b[39mexamples(\n\u001b[1;32m 328\u001b[0m dataset_id,\n\u001b[0;32m--> 329\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minput_type\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m,\n\u001b[1;32m 330\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mexpected_output_type(),\n\u001b[1;32m 331\u001b[0m )\n\u001b[1;32m 332\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m examples \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 333\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDataset: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mdataset_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m not found\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n", + "File \u001b[0;32m~/Aleph-Alpha/intelligence-layer-sdk/src/intelligence_layer/evaluation/evaluation/evaluator.py:236\u001b[0m, in \u001b[0;36mEvaluator.input_type\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 234\u001b[0m input_type \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_get_types()[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mInput\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n\u001b[1;32m 235\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mKeyError\u001b[39;00m:\n\u001b[0;32m--> 236\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAlternatively overwrite input_type() in \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mtype\u001b[39m(\u001b[38;5;28mself\u001b[39m)\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 237\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m cast(\u001b[38;5;28mtype\u001b[39m[Input], input_type)\n", + "\u001b[0;31mTypeError\u001b[0m: Alternatively overwrite input_type() in " + ] + } + ], + "source": [ + "\n", + "evaluation_overview = evaluator.evaluate_runs(*run_repository.run_overview_ids())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ensure that for each example there are evaluated comparisons\n", + "for example_evaluation in evaluation_repository.example_evaluations(\n", + " evaluation_overview.id, Match\n", + "):\n", + " assert (\n", + " len(example_evaluation.result.matches) > 0\n", + " ), f\"There are no matches (comparisons) for example ID {example_evaluation.example_id}\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The evaluation results can be retrieved via their unique ids:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for evaluation_overview in evaluation_repository.evaluation_overviews():\n", + " print(evaluation_overview)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Step 3 – Run Aggregation\n", + "\n", + "Finally, all individual evaluations are aggregated into metrics for each model - here, an ELO score and a win rate.\n", + "The aggregation is defined in the same evaluator that we used before in Step 2." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# this should demonstrate that there are no stored aggregated evaluations yet in our repository\n", + "print(\n", + " f\"IDs of stored aggregated evaluations: {aggregation_repository.aggregation_overview_ids()}\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from intelligence_layer.evaluation import Aggregator\n", + "from intelligence_layer_experiments.use_cases.elo_usecase.elo_qa_aggregator import (\n", + " EloQaAggregationLogic,\n", + ")\n", + "\n", + "aggregator = Aggregator(\n", + " evaluation_repository=evaluation_repository,\n", + " aggregation_repository=aggregation_repository,\n", + " description=\"ELO QA aggregation\",\n", + " aggregation_logic=EloQaAggregationLogic(),\n", + ")\n", + "\n", + "aggregated_evaluation = aggregator.aggregate_evaluation(evaluation_overview.id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ensure that there are no failed (aggregated) evaluations\n", + "assert (\n", + " aggregated_evaluation.crashed_during_evaluation_count == 0\n", + "), f\"There are crashed evaluations for aggregated evaluation ID {aggregated_evaluation.id}\"\n", + "assert (\n", + " aggregated_evaluation.failed_evaluation_count == 0\n", + "), f\"There are failed evaluations for aggregated evaluation ID {aggregated_evaluation.id}\"\n", + "# ensure that the result contains ELO scores\n", + "assert hasattr(\n", + " aggregated_evaluation.statistics, \"scores\"\n", + "), f\"There are no scores for aggregated evaluation ID {aggregated_evaluation.id}\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can get an overview of each aggregation from the aggregation repository as follows. Note that we need to provide the type of the aggregation to enable the deserialization. The given `statistics` field of the evaluation result contains only the aggregated metrics for each model. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from intelligence_layer.evaluation import AggregatedEvaluation\n", + "\n", + "for aggregation_overview in aggregation_repository.aggregation_overviews(\n", + " AggregatedEvaluation\n", + "):\n", + " print(aggregation_overview)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Step 4 Addition of New Models\n", + "\n", + "Now let us consider the case where we want to add new models to our existing evaluation.\n", + "Since the comparison of answers is rather time-consuming, we want to avoid recalculating the evaluation for the previous models, and just compare the new models to the old ones.\n", + "\n", + "To do so, we first define the new models _\"luminous-base-control-v10\"_ and _\"luminous-supreme-control-v15\"_, and generate their answers." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "newly_added_models = [\n", + " LuminousControlModel(name=\"luminous-base-control-20230501\", client=aa_client),\n", + " LuminousControlModel(name=\"luminous-supreme-control-20230501\", client=aa_client),\n", + "]\n", + "\n", + "for model in newly_added_models:\n", + " runner = Runner[SingleChunkQaInput, SingleChunkQaOutput](\n", + " task=SingleChunkQa(model),\n", + " dataset_repository=dataset_repository,\n", + " run_repository=run_repository,\n", + " description=f\"New QA with model {model.name}\", # used to query for new runs only later in the code\n", + " )\n", + " runner.run_dataset(dataset_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ensure that all examples succeeded\n", + "for run_overview in run_repository.run_overviews():\n", + " assert (\n", + " run_overview.failed_example_count == 0\n", + " ), f\"There are failed runs for run overview ID {run_overview.id}\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for run_overview in run_repository.run_overviews():\n", + " # skip runs done for previous models\n", + " if not run_overview.description.startswith(\"New\"):\n", + " continue\n", + " # print runs for the added models\n", + " print(run_overview)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To evaluate the new models against the previous models, we define a new evaluator that has an additional parameter `high_priority_runs`.\n", + "To limit the evaluator to comparisons where one of the answers is generated by a new model, we add the run_ids of the new models to `high_priority_runs`.\n", + "This way, the previous models are not compared against each other again." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_priority_runs = [\n", + " overview.id\n", + " for overview in run_repository.run_overviews()\n", + " if overview.description.startswith(\"New QA\")\n", + "]\n", + "\n", + "evaluator_missing_runs = Evaluator(\n", + " dataset_repository=dataset_repository,\n", + " run_repository=run_repository,\n", + " evaluation_repository=evaluation_repository,\n", + " description=\"ELO QA evaluation for newly added model\", # this description will be used later to query for specific evaluations\n", + " evaluation_logic=EloEvaluationLogic(\n", + " client=aa_client,\n", + " high_priority_runs=frozenset(high_priority_runs),\n", + " ),\n", + ")\n", + "\n", + "new_evaluation_overview = evaluator_missing_runs.evaluate_runs(\n", + " *run_repository.run_overview_ids()\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ensure that for each example there are evaluated comparisons\n", + "for example_evaluation in evaluation_repository.example_evaluations(\n", + " new_evaluation_overview.id, Match\n", + "):\n", + " assert (\n", + " len(example_evaluation.result.matches) > 0\n", + " ), f\"There are no matches (comparisons) for example ID {example_evaluation.example_id}\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In addition to the previous `evaluation_overview`, we now also have the newly generated `new_evaluation_overview` which includes our new model.\n", + "Finally, the aggregated evaluation of all models is calculated by passing in the evaluation ids of both evaluations into `aggregate_evaluation`. By doing so, the previously calculated ELO scores will be updated with the comparisons to the new models' answers." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# get the IDs of all the evaluation overviews which we created for the QA task\n", + "evaluation_overview_ids = [\n", + " evaluation_overview.id\n", + " for evaluation_overview in evaluation_repository.evaluation_overviews()\n", + " if evaluation_overview.description.find(\"QA\")\n", + "]\n", + "print(f\"Evaluation overviews to aggregate: {evaluation_overview_ids}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# run the aggregation\n", + "aggregated_evaluation_with_new_model = aggregator.aggregate_evaluation(\n", + " *evaluation_overview_ids\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ensure that there are no failed (aggregated) evaluations\n", + "assert (\n", + " aggregated_evaluation_with_new_model.crashed_during_evaluation_count == 0\n", + "), f\"There are crashed evaluations for aggregated evaluation ID {aggregated_evaluation.id}\"\n", + "assert (\n", + " aggregated_evaluation_with_new_model.failed_evaluation_count == 0\n", + "), f\"There are failed evaluations for aggregated evaluation ID {aggregated_evaluation.id}\"\n", + "# ensure that we result contains ELO scores\n", + "assert hasattr(\n", + " aggregated_evaluation_with_new_model.statistics, \"scores\"\n", + "), f\"There are no scores for aggregated evaluation ID {aggregated_evaluation.id}\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A look at the new aggregated evaluation shows that the new model have been added to the evaluation:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(aggregated_evaluation_with_new_model)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "intelligence-layer-tfT-HG2V-py3.11", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/src/intelligence_layer/evaluation/evaluation/elo_evaluation_logics/elo_qa_evaluation_logic.py b/src/intelligence_layer/evaluation/evaluation/elo_evaluation_logics/elo_qa_evaluation_logic.py new file mode 100644 index 000000000..a2abb6574 --- /dev/null +++ b/src/intelligence_layer/evaluation/evaluation/elo_evaluation_logics/elo_qa_evaluation_logic.py @@ -0,0 +1,137 @@ +import math +from typing import Mapping, Sequence, final +from aleph_alpha_client import Prompt +from liquid import Template +from intelligence_layer.core.detect_language import Language +from intelligence_layer.core.model import CompleteInput, CompleteOutput, ControlModel +from intelligence_layer.core.tracer.tracer import NoOpTracer, TaskSpan, Tracer +from intelligence_layer.evaluation.aggregation.elo import MatchOutcome +from intelligence_layer.evaluation.dataset.domain import Example +from intelligence_layer.evaluation.evaluation.elo_evaluator import EloEvaluationLogic, EloGradingInput +from intelligence_layer.evaluation.run.domain import SuccessfulExampleOutput +from intelligence_layer.examples.qa.single_chunk_qa import QA_INSTRUCTIONS, SingleChunkQaInput, SingleChunkQaOutput + + +class EloQaEvaluationLogic( + EloEvaluationLogic[SingleChunkQaInput, SingleChunkQaOutput, SingleChunkQaOutput] +): + INPUT_TEMPLATE = """ +Your task is to compare two answers to an instruction on one metric. + +Please make sure you read and understand these instruction carefully. Please keep this document open while reviewing, and refer to it as needed. + +The Instruction for the answers was:{instruction} + +Evaluation Procedure: +1. Read both answers carefully and identify the main facts and details they present. +2. Check if the answers contain any factual errors that are not supported by the instruction. +3. Evaluate which answer is more correct. + +Answer A:{first_completion} + +Answer B:{second_completion} + +Which answer is more correct given the Instruction and Evaluation Procedure, Answer A or Answer B? + +Response: Answer """ + VALUES = [ + " A", + " B", + ] # The space before the A and B is important due to tokenization + + def __init__( + self, + model: ControlModel, + tracer: Tracer = NoOpTracer(), + ): + super().__init__() + self._model = model + self.tracer = tracer + + def _create_grading_input( + self, + first: SuccessfulExampleOutput[SingleChunkQaOutput], + second: SuccessfulExampleOutput[SingleChunkQaOutput], + example: Example[SingleChunkQaInput, SingleChunkQaOutput], + ) -> EloGradingInput: + qa_instruction = Template( + QA_INSTRUCTIONS[Language("en")].unformatted_instruction + ).render(question=example.input.question) + + no_answer = "There is no answer." + return EloGradingInput( + instruction=f"{example.input.chunk} {qa_instruction}", + first_completion=( + first.output.answer if first.output.answer is not None else no_answer + ), + second_completion=( + second.output.answer if second.output.answer is not None else no_answer + ), + ) + + def do_run(self, input: EloGradingInput, task_span: TaskSpan) -> MatchOutcome: + text = self.INPUT_TEMPLATE.format( + instruction=input.instruction, + first_completion=input.first_completion, + second_completion=input.second_completion, + ) + + complete_input = CompleteInput( + prompt=Prompt.from_text(text), + maximum_tokens=1, + log_probs=3, + disable_optimizations=True, + ) + complete_output = self._model.complete_task().run(complete_input, task_span) + + return self.calculate_winners(complete_output) + + def grade( + self, + first: SuccessfulExampleOutput[SingleChunkQaOutput], + second: SuccessfulExampleOutput[SingleChunkQaOutput], + example: Example[SingleChunkQaInput, SingleChunkQaOutput], + ) -> MatchOutcome: + grading_input = self._create_grading_input(first, second, example) + + return MatchOutcome( + self.do_run( + grading_input, + self.tracer.task_span( + task_name="elo_qa_run_grader", input=grading_input + ), + ) + ) + + def calculate_winners(self, complete_output: CompleteOutput) -> MatchOutcome: + default_log_prob = float("-inf") + + def get_normalized_prob( + log_prob_list: Sequence[Mapping[str, float | None]] | None, + ) -> float: + assert log_prob_list is not None + log_probs = log_prob_list[0] + values = [ + math.exp(log_probs.get(str(key), default_log_prob) or default_log_prob) + for key in self.VALUES + ] + if all(v == 0 for v in values): + raise ValueError( + f"LLM evaluation response does not contain logprobs for the required tokens for the values: {self.VALUES}" + ) + return values[0] / sum(values) + + def categorize_value(value: float) -> MatchOutcome: + if value > 0.7: + return MatchOutcome.A_WINS + elif 0.3 > value: + return MatchOutcome.B_WINS + else: + return MatchOutcome.DRAW + + normalized_probability = get_normalized_prob( + complete_output.completions[0].log_probs + ) + return categorize_value(normalized_probability) + + \ No newline at end of file diff --git a/src/intelligence_layer/evaluation/evaluation/elo_evaluator.py b/src/intelligence_layer/evaluation/evaluation/elo_evaluator.py index 20dd98c1e..aae37fdcf 100644 --- a/src/intelligence_layer/evaluation/evaluation/elo_evaluator.py +++ b/src/intelligence_layer/evaluation/evaluation/elo_evaluator.py @@ -1,16 +1,32 @@ +from abc import abstractmethod from itertools import combinations +from typing import Sequence, final + +from pydantic import BaseModel from intelligence_layer.core import Input, Output from intelligence_layer.evaluation import EvaluationLogic +from intelligence_layer.evaluation.aggregation.elo import MatchOutcome from intelligence_layer.evaluation.dataset.domain import Example, ExpectedOutput -from intelligence_layer.evaluation.evaluation.elo_graders.elo_grader import ( - EloGrader, - Match, - Matches, -) from intelligence_layer.evaluation.run.domain import SuccessfulExampleOutput +class Match(BaseModel): + player_a: str + player_b: str + outcome: MatchOutcome + + +class Matches(BaseModel): + matches: Sequence[Match] + + +class EloGradingInput(BaseModel): + instruction: str + first_completion: str + second_completion: str + + class EloEvaluationLogic(EvaluationLogic[Input, Output, ExpectedOutput, Matches]): """Evaluation logic for a pair-wise ELO comparison. @@ -20,12 +36,7 @@ class EloEvaluationLogic(EvaluationLogic[Input, Output, ExpectedOutput, Matches] """ - def __init__( - self, - grader: EloGrader[Input, Output, ExpectedOutput], - ): - self._grader = grader - + @final def do_evaluate( self, example: Example[Input, ExpectedOutput], @@ -37,8 +48,17 @@ def do_evaluate( Match( player_a=first.run_id, player_b=second.run_id, - outcome=self._grader.grade(first, second, example), + outcome=self.grade(first, second, example), ) for [first, second] in pairs ] ) + + @abstractmethod + def grade( + self, + output_a: SuccessfulExampleOutput[Output], + output_b: SuccessfulExampleOutput[Output], + example: Example[Input, ExpectedOutput], + ) -> MatchOutcome: + pass diff --git a/tests/evaluation/test_elo_evaluator.py b/tests/evaluation/test_elo_evaluator.py index 817fffcc6..e21ece38d 100644 --- a/tests/evaluation/test_elo_evaluator.py +++ b/tests/evaluation/test_elo_evaluator.py @@ -1,4 +1,3 @@ -from itertools import combinations from typing import Sequence, Tuple from dotenv import load_dotenv @@ -12,6 +11,7 @@ TextChunk, utc_now, ) +from intelligence_layer.core.tracer.tracer import NoOpTracer, Tracer from intelligence_layer.evaluation import ( Evaluator, Example, @@ -28,15 +28,41 @@ Match, Matches, ) -from intelligence_layer.evaluation.evaluation.elo_graders.elo_qa_grader import ( - EloQaGrader, -) +from intelligence_layer.evaluation.evaluation.evaluator import EvaluationLogic from intelligence_layer.examples import SingleChunkQaInput, SingleChunkQaOutput load_dotenv() -class DummyEloQaGrader(EloQaGrader): +# class DummyEloQaGrader(EloQaGrader): +# def grade( +# self, +# first: SuccessfulExampleOutput[SingleChunkQaOutput], +# second: SuccessfulExampleOutput[SingleChunkQaOutput], +# example: Example[SingleChunkQaInput, SingleChunkQaOutput], +# ) -> MatchOutcome: +# _ = example +# if first.run_id < second.run_id: +# return MatchOutcome.A_WINS +# elif first.run_id > second.run_id: +# return MatchOutcome.B_WINS +# else: +# return MatchOutcome.DRAW +# + + +class DummyEloQaEvalLogic( + EloEvaluationLogic[SingleChunkQaInput, SingleChunkQaOutput, SingleChunkQaOutput] +): + def __init__( + self, + model: ControlModel, + tracer: Tracer = NoOpTracer(), + ): + super().__init__() + self._model = model + self.tracer = tracer + def grade( self, first: SuccessfulExampleOutput[SingleChunkQaOutput], @@ -52,35 +78,14 @@ def grade( return MatchOutcome.DRAW -class DummyEloQaEvalLogic( - EloEvaluationLogic[SingleChunkQaInput, SingleChunkQaOutput, SingleChunkQaOutput] -): - def do_evaluate( - self, - example: Example[SingleChunkQaInput, SingleChunkQaOutput], - *output: SuccessfulExampleOutput[SingleChunkQaOutput], - ) -> Matches: - pairs = combinations(output, 2) - return Matches( - matches=[ - Match( - player_a=first.run_id, - player_b=second.run_id, - outcome=self._grader.grade(first, second, example), - ) - for [first, second] in pairs - ] - ) - - @fixture def model(client: AlephAlphaClientProtocol) -> ControlModel: return LuminousControlModel(client=client, name="luminous-base-control") -@fixture -def dummy_elo_qa_grader(model: ControlModel) -> DummyEloQaGrader: - return DummyEloQaGrader(model=model) +# @fixture +# def dummy_elo_qa_grader(model: ControlModel) -> DummyEloQaGrader: +# return DummyEloQaGrader(model=model) @fixture @@ -99,10 +104,8 @@ def in_memory_evaluation_repository() -> InMemoryEvaluationRepository: @fixture -def dummy_eval_logic( - dummy_elo_qa_grader: EloQaGrader, -) -> DummyEloQaEvalLogic: - return DummyEloQaEvalLogic(grader=dummy_elo_qa_grader) +def dummy_eval_logic(model: ControlModel) -> DummyEloQaEvalLogic: + return DummyEloQaEvalLogic(model=model) @fixture @@ -110,8 +113,8 @@ def elo_evaluator( in_memory_dataset_repository: InMemoryDatasetRepository, in_memory_run_repository: InMemoryRunRepository, in_memory_evaluation_repository: InMemoryEvaluationRepository, - dummy_eval_logic: EloEvaluationLogic[ - SingleChunkQaInput, SingleChunkQaOutput, SingleChunkQaOutput + dummy_eval_logic: EvaluationLogic[ + SingleChunkQaInput, SingleChunkQaOutput, SingleChunkQaOutput, Matches ], ) -> Evaluator[SingleChunkQaInput, SingleChunkQaOutput, SingleChunkQaOutput, Matches]: return Evaluator( @@ -191,7 +194,7 @@ def qa_setup( return run_ids, dataset_id -def test_evauluate_runs_creates_correct_matches_for_elo_qa_eval( +def test_evaluate_runs_creates_correct_matches_for_elo_qa_eval( qa_setup: Tuple[Sequence[str], str], elo_evaluator: Evaluator[ SingleChunkQaInput, SingleChunkQaOutput, SingleChunkQaOutput, Matches