Skip to content

Commit

Permalink
Add Delete Evaluation Endpoint (#627)
Browse files Browse the repository at this point in the history
  • Loading branch information
czaloom authored Jun 14, 2024
1 parent 5be6ea4 commit 5eb8235
Show file tree
Hide file tree
Showing 10 changed files with 461 additions and 2 deletions.
175 changes: 174 additions & 1 deletion api/tests/functional-tests/crud/test_evaluation_crud.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
import pytest
from sqlalchemy import func, select
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session

from valor_api import crud, enums, exceptions, schemas
from valor_api.backend import core
from valor_api.backend import core, models


def test_evaluation_creation_exceptions(db: Session):
Expand Down Expand Up @@ -226,3 +227,175 @@ def test_restart_failed_evaluation(db: Session):
assert len(evaluations4) == 1
assert evaluations4[0].status == enums.EvaluationStatus.DONE
assert evaluations4[0].id == evaluations1[0].id


@pytest.fixture
def create_evaluations(db: Session):

rows = [
models.Evaluation(
id=idx,
dataset_names=["1", "2"],
model_name=str(idx),
parameters=schemas.EvaluationParameters(
task_type=enums.TaskType.CLASSIFICATION
).model_dump(),
filters=schemas.Filter().model_dump(),
status=status,
)
for idx, status in enumerate(enums.EvaluationStatus)
]

try:
db.add_all(rows)
db.commit()
except IntegrityError as e:
db.rollback()
raise e

yield [(row.id, row.status) for row in rows]

for row in rows:
try:
db.delete(row)
except IntegrityError:
db.rollback()


def test_delete_evaluation(db: Session, create_evaluations):

for idx, status in create_evaluations:
assert (
db.scalar(
select(func.count(models.Evaluation.id)).where(
models.Evaluation.id == idx
)
)
== 1
)
if status in {
enums.EvaluationStatus.PENDING,
enums.EvaluationStatus.RUNNING,
}:
with pytest.raises(exceptions.EvaluationRunningError):
crud.delete_evaluation(db=db, evaluation_id=idx)
assert (
db.scalar(
select(func.count(models.Evaluation.id)).where(
models.Evaluation.id == idx
)
)
== 1
)
elif status == enums.EvaluationStatus.DELETING:
with pytest.raises(exceptions.EvaluationDoesNotExistError):
crud.delete_evaluation(db=db, evaluation_id=idx)
assert (
db.scalar(
select(func.count(models.Evaluation.id)).where(
models.Evaluation.id == idx
)
)
== 1
)
else:
crud.delete_evaluation(db=db, evaluation_id=idx)
assert (
db.scalar(
select(func.count(models.Evaluation.id)).where(
models.Evaluation.id == idx
)
)
== 0
)

# check for id that doesnt exist
with pytest.raises(exceptions.EvaluationDoesNotExistError):
crud.delete_evaluation(db=db, evaluation_id=10000)


@pytest.fixture
def create_evaluation_with_metrics(db: Session):
evaluation_id = 0
number_of_metrics = 4

evaluation = models.Evaluation(
id=evaluation_id,
dataset_names=["1", "2"],
model_name="3",
parameters=schemas.EvaluationParameters(
task_type=enums.TaskType.CLASSIFICATION
).model_dump(),
filters=schemas.Filter().model_dump(),
status=enums.EvaluationStatus.DONE,
)
metrics = [
models.Metric(
evaluation_id=evaluation_id,
label_id=None,
type="Precision",
value=float(i) / float(number_of_metrics),
parameters=dict(),
)
for i in range(number_of_metrics)
]

try:
db.add(evaluation)
db.add_all(metrics)
db.commit()
except IntegrityError as e:
db.rollback()
raise e

yield (evaluation_id, number_of_metrics)

for row in [evaluation, *metrics]:
try:
db.delete(row)
db.commit()
except IntegrityError:
db.rollback()


def test_delete_evaluation_with_metrics(
db: Session, create_evaluation_with_metrics
):
row_id, num_metrics = create_evaluation_with_metrics

assert num_metrics == 4
assert (
db.scalar(
select(func.count(models.Evaluation.id)).where(
models.Evaluation.id == row_id
)
)
== 1
)
assert (
db.scalar(
select(func.count(models.Metric.id)).where(
models.Metric.evaluation_id == row_id
)
)
== num_metrics
)

crud.delete_evaluation(db=db, evaluation_id=row_id)

assert (
db.scalar(
select(func.count(models.Evaluation.id)).where(
models.Evaluation.id == row_id
)
)
== 0
)
assert (
db.scalar(
select(func.count(models.Metric.id)).where(
models.Metric.evaluation_id == row_id
)
)
== 0
)
2 changes: 2 additions & 0 deletions api/valor_api/backend/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
create_or_get_evaluations,
create_predictions,
delete_dataset,
delete_evaluation_from_id,
delete_model,
get_dataset,
get_dataset_status,
Expand Down Expand Up @@ -45,6 +46,7 @@
"create_predictions",
"delete_dataset",
"delete_model",
"delete_evaluation_from_id",
"get_dataset",
"get_paginated_datasets",
"get_dataset_summary",
Expand Down
2 changes: 2 additions & 0 deletions api/valor_api/backend/core/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
from .evaluation import (
count_active_evaluations,
create_or_get_evaluations,
delete_evaluation_from_id,
delete_evaluations,
fetch_evaluation_from_id,
get_evaluation_requests_from_model,
Expand Down Expand Up @@ -131,6 +132,7 @@
"get_paginated_evaluations",
"get_evaluation_status",
"set_evaluation_status",
"delete_evaluation_from_id",
"get_evaluation_requests_from_model",
"count_active_evaluations",
"delete_dataset_annotations",
Expand Down
35 changes: 35 additions & 0 deletions api/valor_api/backend/core/evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -1055,3 +1055,38 @@ def delete_evaluations(
except IntegrityError as e:
db.rollback()
raise e


def delete_evaluation_from_id(db: Session, evaluation_id: int):
"""
Delete a evaluation by id.
Parameters
----------
db : Session
The database session.
evaluation_id : int
The evaluation identifer.
Raises
------
EvaluationRunningError
If the evaluation is currently running.
EvaluationDoesNotExistError
If the evaluation does not exist.
"""
evaluation = fetch_evaluation_from_id(db=db, evaluation_id=evaluation_id)
if evaluation.status in {
enums.EvaluationStatus.PENDING,
enums.EvaluationStatus.RUNNING,
}:
raise exceptions.EvaluationRunningError
elif evaluation.status in enums.EvaluationStatus.DELETING:
return

try:
db.delete(evaluation)
db.commit()
except IntegrityError as e:
db.rollback()
raise e
3 changes: 2 additions & 1 deletion api/valor_api/crud/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
create_or_get_evaluations,
create_predictions,
)
from ._delete import delete
from ._delete import delete, delete_evaluation
from ._read import (
get_dataset,
get_dataset_summary,
Expand Down Expand Up @@ -44,4 +44,5 @@
"get_prediction",
"finalize",
"get_evaluations",
"delete_evaluation",
]
19 changes: 19 additions & 0 deletions api/valor_api/crud/_delete.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,3 +25,22 @@ def delete(
backend.delete_dataset(db, dataset_name)
elif model_name is not None:
backend.delete_model(db, model_name)


def delete_evaluation(*, db: Session, evaluation_id: int):
"""
Deletes an evaluation by id.
Parameters
----------
evaluation_id : int
The evaluation identifier.
Raises
------
EvaluationRunningError
If the evaluation is currently running.
EvaluationDoesNotExistError
If the evaluation does not exist.
"""
backend.delete_evaluation_from_id(db=db, evaluation_id=evaluation_id)
35 changes: 35 additions & 0 deletions api/valor_api/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -1183,6 +1183,41 @@ def get_evaluations(
raise exceptions.create_http_error(e)


@app.delete(
"/evaluations/{evaluation_id}",
dependencies=[Depends(token_auth_scheme)],
tags=["Evaluations"],
)
def delete_evaluation(
evaluation_id: int,
db: Session = Depends(get_db),
):
"""
Delete a evaluation from the database.
DELETE Endpoint: `/evaluations/{evaluation_id}`
Parameters
----------
evaluation_id : int
The evaluation identifier.
db : Session
The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user.
Raises
------
HTTPException (404)
If the evaluation doesn't exist.
HTTPException (409)
If the evaluation isn't in the correct state to be deleted.
"""
logger.debug(f"request to delete evaluation {evaluation_id}")
try:
crud.delete_evaluation(db=db, evaluation_id=evaluation_id)
except Exception as e:
raise exceptions.create_http_error(e)


""" AUTHENTICATION """


Expand Down
13 changes: 13 additions & 0 deletions client/valor/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -836,6 +836,19 @@ def _build_query_param(param_name, element, typ):

return self._requests_get_rel_host(endpoint).json()

def delete_evaluation(self, evaluation_id: int) -> None:
"""
Deletes an evaluation.
`DELETE` endpoint.
Parameters
----------
evaluation_id : int
The id of the evaluation to be deleted.
"""
self._requests_delete_rel_host(f"evaluations/{evaluation_id}")

def get_user(self) -> Union[str, None]:
"""
Gets the users e-mail address (in the case when auth is enabled)
Expand Down
24 changes: 24 additions & 0 deletions client/valor/coretypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -1804,3 +1804,27 @@ def evaluate(
request, allow_retries=allow_retries
)
]

def delete_evaluation(self, evaluation_id: int, timeout: int = 0) -> None:
"""
Deletes an evaluation.
Parameters
----------
evaluation_id : int
The id of the evaluation to be deleted.
timeout : int
The number of seconds to wait in order to confirm that the model was deleted.
"""
self.conn.delete_evaluation(evaluation_id)
if timeout:
for _ in range(timeout):
try:
self.get_evaluations(evaluation_ids=[evaluation_id])
except EvaluationDoesNotExist:
break
time.sleep(1)
else:
raise TimeoutError(
"Evaluation wasn't deleted within timeout interval"
)
Loading

0 comments on commit 5eb8235

Please sign in to comment.