Skip to content

Commit

Permalink
refactor: Rename DocumentMeanAveragePrecision and `DocumentMeanReci…
Browse files Browse the repository at this point in the history
…procalRank` (#7470)

* Rename DocumentMeanAveragePrecision and DocumentMeanReciprocalRank

* Update releasenotes

* Simplify names
  • Loading branch information
silvanocerza authored Apr 4, 2024
1 parent bf8453e commit 8b8a93b
Show file tree
Hide file tree
Showing 6 changed files with 24 additions and 24 deletions.
8 changes: 4 additions & 4 deletions haystack/components/evaluators/document_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,20 +4,20 @@


@component
class DocumentMeanAveragePrecision:
class DocumentMAPEvaluator:
"""
Evaluator that calculates the mean average precision of the retrieved documents, a metric
that measures how high retrieved documents are ranked.
Each question can have multiple ground truth documents and multiple retrieved documents.
`DocumentMeanAveragePrecision` doesn't normalize its inputs, the `DocumentCleaner` component
`DocumentMAPEvaluator` doesn't normalize its inputs, the `DocumentCleaner` component
should be used to clean and normalize the documents before passing them to this evaluator.
Usage example:
```python
from haystack.components.evaluators import AnswerExactMatchEvaluator
evaluator = DocumentMeanAveragePrecision()
evaluator = DocumentMAPEvaluator()
result = evaluator.run(
ground_truth_documents=[
[Document(content="France")],
Expand All @@ -41,7 +41,7 @@ def run(
self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]
) -> Dict[str, Any]:
"""
Run the DocumentMeanAveragePrecision on the given inputs.
Run the DocumentMAPEvaluator on the given inputs.
All lists must have the same length.
:param ground_truth_documents:
Expand Down
8 changes: 4 additions & 4 deletions haystack/components/evaluators/document_mrr.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,20 +4,20 @@


@component
class DocumentMeanReciprocalRank:
class DocumentMRREvaluator:
"""
Evaluator that calculates the mean reciprocal rank of the retrieved documents.
MRR measures how high the first retrieved document is ranked.
Each question can have multiple ground truth documents and multiple retrieved documents.
`DocumentMeanReciprocalRank` doesn't normalize its inputs, the `DocumentCleaner` component
`DocumentMRREvaluator` doesn't normalize its inputs, the `DocumentCleaner` component
should be used to clean and normalize the documents before passing them to this evaluator.
Usage example:
```python
from haystack.components.evaluators import AnswerExactMatchEvaluator
evaluator = DocumentMeanReciprocalRank()
evaluator = DocumentMRREvaluator()
result = evaluator.run(
ground_truth_documents=[
[Document(content="France")],
Expand All @@ -40,7 +40,7 @@ def run(
self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]
) -> Dict[str, Any]:
"""
Run the DocumentMeanReciprocalRank on the given inputs.
Run the DocumentMRREvaluator on the given inputs.
`ground_truth_documents` and `retrieved_documents` must have the same length.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
---
features:
- |
Add DocumentMeanAveragePrecision, it can be used to calculate mean average precision of retrieved documents.
Add DocumentMAPEvaluator, it can be used to calculate mean average precision of retrieved documents.
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
---
features:
- |
Add DocumentMeanReciprocalRank, it can be used to calculate mean reciprocal rank of retrieved documents.
Add DocumentMRREvaluator, it can be used to calculate mean reciprocal rank of retrieved documents.
14 changes: 7 additions & 7 deletions test/components/evaluators/test_document_map.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import pytest

from haystack import Document
from haystack.components.evaluators.document_map import DocumentMeanAveragePrecision
from haystack.components.evaluators.document_map import DocumentMAPEvaluator


def test_run_with_all_matching():
evaluator = DocumentMeanAveragePrecision()
evaluator = DocumentMAPEvaluator()
result = evaluator.run(
ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
retrieved_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
Expand All @@ -15,7 +15,7 @@ def test_run_with_all_matching():


def test_run_with_no_matching():
evaluator = DocumentMeanAveragePrecision()
evaluator = DocumentMAPEvaluator()
result = evaluator.run(
ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
retrieved_documents=[[Document(content="Paris")], [Document(content="London")]],
Expand All @@ -25,7 +25,7 @@ def test_run_with_no_matching():


def test_run_with_partial_matching():
evaluator = DocumentMeanAveragePrecision()
evaluator = DocumentMAPEvaluator()
result = evaluator.run(
ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
retrieved_documents=[[Document(content="Berlin")], [Document(content="London")]],
Expand All @@ -35,7 +35,7 @@ def test_run_with_partial_matching():


def test_run_with_complex_data():
evaluator = DocumentMeanAveragePrecision()
evaluator = DocumentMAPEvaluator()
result = evaluator.run(
ground_truth_documents=[
[Document(content="France")],
Expand Down Expand Up @@ -64,14 +64,14 @@ def test_run_with_complex_data():

def test_run_with_different_lengths():
with pytest.raises(ValueError):
evaluator = DocumentMeanAveragePrecision()
evaluator = DocumentMAPEvaluator()
evaluator.run(
ground_truth_documents=[[Document(content="Berlin")]],
retrieved_documents=[[Document(content="Berlin")], [Document(content="London")]],
)

with pytest.raises(ValueError):
evaluator = DocumentMeanAveragePrecision()
evaluator = DocumentMAPEvaluator()
evaluator.run(
ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
retrieved_documents=[[Document(content="Berlin")]],
Expand Down
14 changes: 7 additions & 7 deletions test/components/evaluators/test_document_mrr.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import pytest

from haystack import Document
from haystack.components.evaluators.document_mrr import DocumentMeanReciprocalRank
from haystack.components.evaluators.document_mrr import DocumentMRREvaluator


def test_run_with_all_matching():
evaluator = DocumentMeanReciprocalRank()
evaluator = DocumentMRREvaluator()
result = evaluator.run(
ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
retrieved_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
Expand All @@ -15,7 +15,7 @@ def test_run_with_all_matching():


def test_run_with_no_matching():
evaluator = DocumentMeanReciprocalRank()
evaluator = DocumentMRREvaluator()
result = evaluator.run(
ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
retrieved_documents=[[Document(content="Paris")], [Document(content="London")]],
Expand All @@ -25,7 +25,7 @@ def test_run_with_no_matching():


def test_run_with_partial_matching():
evaluator = DocumentMeanReciprocalRank()
evaluator = DocumentMRREvaluator()
result = evaluator.run(
ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
retrieved_documents=[[Document(content="Berlin")], [Document(content="London")]],
Expand All @@ -35,7 +35,7 @@ def test_run_with_partial_matching():


def test_run_with_complex_data():
evaluator = DocumentMeanReciprocalRank()
evaluator = DocumentMRREvaluator()
result = evaluator.run(
ground_truth_documents=[
[Document(content="France")],
Expand Down Expand Up @@ -68,14 +68,14 @@ def test_run_with_complex_data():

def test_run_with_different_lengths():
with pytest.raises(ValueError):
evaluator = DocumentMeanReciprocalRank()
evaluator = DocumentMRREvaluator()
evaluator.run(
ground_truth_documents=[[Document(content="Berlin")]],
retrieved_documents=[[Document(content="Berlin")], [Document(content="London")]],
)

with pytest.raises(ValueError):
evaluator = DocumentMeanReciprocalRank()
evaluator = DocumentMRREvaluator()
evaluator.run(
ground_truth_documents=[[Document(content="Berlin")], [Document(content="Paris")]],
retrieved_documents=[[Document(content="Berlin")]],
Expand Down

0 comments on commit 8b8a93b

Please sign in to comment.