Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Raise ValueError if Annotation does not Support Target Type #706

Merged
merged 1 commit into from
Aug 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
147 changes: 147 additions & 0 deletions api/tests/functional-tests/backend/metrics/test_detection.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,16 @@
import numpy as np
import pytest
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session

from valor_api import crud, enums, schemas
from valor_api.backend import core
from valor_api.backend.metrics.detection import (
RankedPair,
_compute_detailed_curves,
_compute_detection_metrics,
_compute_detection_metrics_with_detailed_precision_recall_curve,
_convert_annotations_to_common_type,
compute_detection_metrics,
)
from valor_api.backend.models import (
Expand Down Expand Up @@ -2276,3 +2279,147 @@ def test_detection_exceptions(db: Session):

# show that no errors raised
compute_detection_metrics(db=db, evaluation_id=evaluation_id)


def test__convert_annotations_to_common_type(db: Session):

dataset_name = "dataset"
model_name = "model"

xmin, xmax, ymin, ymax = 11, 45, 37, 102
h, w = 150, 200
mask = np.zeros((h, w), dtype=bool)
mask[ymin:ymax, xmin:xmax] = True

pts = [
(xmin, ymin),
(xmin, ymax),
(xmax, ymax),
(xmax, ymin),
(xmin, ymin),
]
poly = schemas.Polygon(value=[pts])
raster = schemas.Raster.from_numpy(mask)
box = schemas.Box.from_extrema(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
datum = schemas.Datum(uid="123")

gt_box = schemas.GroundTruth(
datum=datum,
dataset_name=dataset_name,
annotations=[
schemas.Annotation(
bounding_box=box,
labels=[schemas.Label(key="box", value="value")],
is_instance=True,
)
],
)
gt_polygon = schemas.GroundTruth(
datum=datum,
dataset_name=dataset_name,
annotations=[
schemas.Annotation(
polygon=poly,
labels=[schemas.Label(key="polygon", value="value")],
is_instance=True,
)
],
)
gt_raster = schemas.GroundTruth(
datum=datum,
dataset_name=dataset_name,
annotations=[
schemas.Annotation(
raster=raster,
labels=[schemas.Label(key="raster", value="value")],
is_instance=True,
)
],
)

pd_box = schemas.Prediction(
datum=datum,
dataset_name=dataset_name,
model_name=model_name,
annotations=[
schemas.Annotation(
bounding_box=box,
labels=[schemas.Label(key="box", value="value", score=0.88)],
is_instance=True,
)
],
)
pd_polygon = schemas.Prediction(
datum=datum,
dataset_name=dataset_name,
model_name=model_name,
annotations=[
schemas.Annotation(
polygon=poly,
labels=[
schemas.Label(key="polygon", value="value", score=0.89)
],
is_instance=True,
)
],
)
pd_raster = schemas.Prediction(
datum=datum,
dataset_name=dataset_name,
model_name=model_name,
annotations=[
schemas.Annotation(
raster=raster,
labels=[schemas.Label(key="raster", value="value", score=0.9)],
is_instance=True,
)
],
)

gts = [
(enums.AnnotationType.BOX, gt_box),
(enums.AnnotationType.POLYGON, gt_polygon),
(enums.AnnotationType.RASTER, gt_raster),
]
pds = [
(enums.AnnotationType.BOX, pd_box),
(enums.AnnotationType.POLYGON, pd_polygon),
(enums.AnnotationType.RASTER, pd_raster),
]

for gt_type, gt in gts:
for pd_type, pd in pds:
crud.create_dataset(
db=db, dataset=schemas.Dataset(name=dataset_name)
)
crud.create_groundtruths(db=db, groundtruths=[gt])
crud.finalize(db=db, dataset_name="dataset")
crud.create_model(db=db, model=schemas.Model(name=model_name))
crud.create_predictions(db=db, predictions=[pd])

dataset = core.fetch_dataset(db=db, name=dataset_name)
model = core.fetch_model(db=db, name=model_name)

for target_type in [
enums.AnnotationType.RASTER,
enums.AnnotationType.POLYGON,
enums.AnnotationType.BOX,
]:
if min(gt_type, pd_type) >= target_type:
_convert_annotations_to_common_type(
db=db,
datasets=[dataset],
model=model,
target_type=target_type,
)
else:
with pytest.raises(ValueError):
_convert_annotations_to_common_type(
db=db,
datasets=[dataset],
model=model,
target_type=target_type,
)

crud.delete(db=db, dataset_name=dataset_name)
crud.delete(db=db, model_name=model_name)
8 changes: 7 additions & 1 deletion api/valor_api/backend/metrics/detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -788,7 +788,9 @@ def _convert_annotations_to_common_type(
db=db, dataset=dataset, task_type=enums.TaskType.OBJECT_DETECTION
)
if target_type > source_type:
continue
raise ValueError(
f"Cannot convert dataset {dataset.name} annotations from {source_type} to {target_type}."
)
core.convert_geometry(
db=db,
dataset=dataset,
Expand All @@ -803,6 +805,10 @@ def _convert_annotations_to_common_type(
model=model,
task_type=enums.TaskType.OBJECT_DETECTION,
)
if target_type > source_type:
raise ValueError(
f"Cannot convert model {model.name} annotations from {source_type} to {target_type}."
)
core.convert_geometry(
db=db,
dataset=dataset,
Expand Down
111 changes: 12 additions & 99 deletions integration_tests/client/metrics/test_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -3454,37 +3454,23 @@ def test_evaluate_mixed_annotations(
for m in expected:
assert m in eval_job.metrics

eval_job_raster = model.evaluate_detection(
eval_job_box = model.evaluate_detection(
[dset_box, dset_polygon, dset_raster],
iou_thresholds_to_compute=[0.1, 0.6],
iou_thresholds_to_return=[0.1, 0.6],
metrics_to_return=[
"AP",
],
convert_annotations_to_type=AnnotationType.RASTER,
convert_annotations_to_type=AnnotationType.BOX,
)
eval_job_raster.wait_for_completion()

expected = [
{
"type": "AP",
"parameters": {"iou": 0.1},
"value": 1.0,
"label": {"key": "raster", "value": "value"},
},
{
"type": "AP",
"parameters": {"iou": 0.6},
"value": 1.0,
"label": {"key": "raster", "value": "value"},
},
]
eval_job_box.wait_for_completion()

for m in eval_job_raster.metrics:
for m in eval_job_box.metrics:
assert m in expected
for m in expected:
assert m in eval_job_raster.metrics
assert m in eval_job_box.metrics

# cannot force to polygon as some datasets do not contain this type
eval_job_poly = model.evaluate_detection(
[dset_box, dset_polygon, dset_raster],
iou_thresholds_to_compute=[0.1, 0.6],
Expand All @@ -3495,90 +3481,17 @@ def test_evaluate_mixed_annotations(
convert_annotations_to_type=AnnotationType.POLYGON,
)
eval_job_poly.wait_for_completion()
assert eval_job_poly.status == EvaluationStatus.FAILED

expected = [
{
"type": "AP",
"parameters": {"iou": 0.1},
"value": 1.0,
"label": {"key": "raster", "value": "value"},
},
{
"type": "AP",
"parameters": {"iou": 0.6},
"value": 1.0,
"label": {"key": "raster", "value": "value"},
},
{
"type": "AP",
"parameters": {"iou": 0.1},
"value": 1.0,
"label": {"key": "polygon", "value": "value"},
},
{
"type": "AP",
"parameters": {"iou": 0.6},
"value": 1.0,
"label": {"key": "polygon", "value": "value"},
},
]

for m in eval_job_poly.metrics:
assert m in expected
for m in expected:
assert m in eval_job_poly.metrics

eval_job_box = model.evaluate_detection(
# cannot force to raster as some datasets do not contain this type
eval_job_raster = model.evaluate_detection(
[dset_box, dset_polygon, dset_raster],
iou_thresholds_to_compute=[0.1, 0.6],
iou_thresholds_to_return=[0.1, 0.6],
metrics_to_return=[
"AP",
],
convert_annotations_to_type=AnnotationType.BOX,
convert_annotations_to_type=AnnotationType.RASTER,
)
eval_job_box.wait_for_completion()

expected = [
{
"type": "AP",
"parameters": {"iou": 0.1},
"value": 1.0,
"label": {"key": "raster", "value": "value"},
},
{
"type": "AP",
"parameters": {"iou": 0.6},
"value": 1.0,
"label": {"key": "raster", "value": "value"},
},
{
"type": "AP",
"parameters": {"iou": 0.1},
"value": 1.0,
"label": {"key": "box", "value": "value"},
},
{
"type": "AP",
"parameters": {"iou": 0.6},
"value": 1.0,
"label": {"key": "box", "value": "value"},
},
{
"type": "AP",
"parameters": {"iou": 0.1},
"value": 1.0,
"label": {"key": "polygon", "value": "value"},
},
{
"type": "AP",
"parameters": {"iou": 0.6},
"value": 1.0,
"label": {"key": "polygon", "value": "value"},
},
]

for m in eval_job_box.metrics:
assert m in expected
for m in expected:
assert m in eval_job_box.metrics
eval_job_raster.wait_for_completion()
assert eval_job_raster.status == EvaluationStatus.FAILED
Loading