diff --git a/api/tests/functional-tests/backend/core/test_dataset.py b/api/tests/functional-tests/backend/core/test_dataset.py index d267b0a80..744c2b4bf 100644 --- a/api/tests/functional-tests/backend/core/test_dataset.py +++ b/api/tests/functional-tests/backend/core/test_dataset.py @@ -151,6 +151,7 @@ def test_dataset_status_with_evaluations( created_dataset: str, created_model: str, ): + # create an evaluation core.set_dataset_status(db, created_dataset, enums.TableStatus.FINALIZED) evaluations = core.create_or_get_evaluations( diff --git a/api/tests/functional-tests/backend/core/test_label.py b/api/tests/functional-tests/backend/core/test_label.py index 0609956f7..578587c98 100644 --- a/api/tests/functional-tests/backend/core/test_label.py +++ b/api/tests/functional-tests/backend/core/test_label.py @@ -273,7 +273,13 @@ def test_get_labels_filtered( ): assert len(db.query(models.Label).all()) == 5 - filters = schemas.Filter(label_keys=["k1"]) + filters = schemas.Filter( + labels=schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_KEY), + rhs=schemas.Value.infer("k1"), + op=schemas.FilterOperator.EQ, + ), + ) labels = get_labels(db, filters=filters) assert len(labels) == 3 @@ -322,7 +328,13 @@ def test_get_label_keys_filtered( ): assert len(db.query(models.Label).all()) == 5 - filters = schemas.Filter(label_keys=["k1"]) + filters = schemas.Filter( + labels=schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_KEY), + rhs=schemas.Value.infer("k1"), + op=schemas.FilterOperator.EQ, + ), + ) labels = get_label_keys(db, filters=filters) assert len(labels) == 1 @@ -346,18 +358,78 @@ def test_get_joint_labels( labels = get_joint_labels( db=db, lhs=schemas.Filter( - dataset_names=[dataset_name], - task_types=[enums.TaskType.CLASSIFICATION], - require_bounding_box=False, - require_polygon=False, - require_raster=False, + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.BOX), + op=schemas.FilterOperator.ISNULL, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.POLYGON + ), + op=schemas.FilterOperator.ISNULL, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.RASTER + ), + op=schemas.FilterOperator.ISNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), rhs=schemas.Filter( - model_names=[model_name], - task_types=[enums.TaskType.CLASSIFICATION], - require_bounding_box=False, - require_polygon=False, - require_raster=False, + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.BOX), + op=schemas.FilterOperator.ISNULL, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.POLYGON + ), + op=schemas.FilterOperator.ISNULL, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.RASTER + ), + op=schemas.FilterOperator.ISNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ) assert len(labels) == 1 @@ -376,12 +448,46 @@ def test_get_joint_keys( keys = get_joint_keys( db=db, lhs=schemas.Filter( - dataset_names=[dataset_name], - task_types=[enums.TaskType.CLASSIFICATION], + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), rhs=schemas.Filter( - model_names=[model_name], - task_types=[enums.TaskType.CLASSIFICATION], + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ) assert len(keys) == 1 @@ -397,18 +503,78 @@ def test_get_disjoint_labels( ds_unique, md_unique = get_disjoint_labels( db=db, lhs=schemas.Filter( - dataset_names=[dataset_name], - task_types=[enums.TaskType.CLASSIFICATION], - require_bounding_box=False, - require_polygon=False, - require_raster=False, + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.BOX), + op=schemas.FilterOperator.ISNULL, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.POLYGON + ), + op=schemas.FilterOperator.ISNULL, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.RASTER + ), + op=schemas.FilterOperator.ISNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), rhs=schemas.Filter( - model_names=[model_name], - task_types=[enums.TaskType.CLASSIFICATION], - require_bounding_box=False, - require_polygon=False, - require_raster=False, + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.BOX), + op=schemas.FilterOperator.ISNULL, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.POLYGON + ), + op=schemas.FilterOperator.ISNULL, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.RASTER + ), + op=schemas.FilterOperator.ISNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ) assert len(ds_unique) == 2 @@ -433,12 +599,46 @@ def test_get_disjoint_keys( ds_unique, md_unique = get_disjoint_keys( db=db, lhs=schemas.Filter( - dataset_names=[dataset_name], - task_types=[enums.TaskType.CLASSIFICATION], + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), rhs=schemas.Filter( - model_names=[model_name], - task_types=[enums.TaskType.CLASSIFICATION], + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ) assert len(ds_unique) == 1 @@ -514,8 +714,27 @@ def test_label_functions( assert get_label_keys( db, schemas.Filter( - dataset_names=[dataset_name], - task_types=[enums.TaskType.SEMANTIC_SEGMENTATION], + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.SEMANTIC_SEGMENTATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_predictions=True, ) == {"semsegk1", "semsegk2", "semsegk3"} @@ -523,9 +742,33 @@ def test_label_functions( assert get_labels( db, schemas.Filter( - dataset_names=[dataset_name], - task_types=[enums.TaskType.SEMANTIC_SEGMENTATION], - require_raster=True, + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.SEMANTIC_SEGMENTATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.RASTER + ), + op=schemas.FilterOperator.ISNOTNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_predictions=True, ) == { @@ -537,9 +780,33 @@ def test_label_functions( get_labels( db, schemas.Filter( - dataset_names=[dataset_name], - task_types=[enums.TaskType.SEMANTIC_SEGMENTATION], - require_polygon=True, + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.SEMANTIC_SEGMENTATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.POLYGON + ), + op=schemas.FilterOperator.ISNOTNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_predictions=True, ) @@ -549,9 +816,34 @@ def test_label_functions( assert get_label_keys( db, schemas.Filter( - model_names=[model_name], - dataset_names=[dataset_name], - task_types=[enums.TaskType.SEMANTIC_SEGMENTATION], + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.SEMANTIC_SEGMENTATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_groundtruths=True, ) == {"semsegk1", "semsegk2", "semsegk3_pred"} @@ -559,10 +851,40 @@ def test_label_functions( assert get_labels( db, schemas.Filter( - model_names=[model_name], - dataset_names=[dataset_name], - require_raster=True, - task_types=[enums.TaskType.SEMANTIC_SEGMENTATION], + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.SEMANTIC_SEGMENTATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.RASTER + ), + op=schemas.FilterOperator.ISNOTNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_groundtruths=True, ) == { @@ -575,10 +897,40 @@ def test_label_functions( get_labels( db, schemas.Filter( - model_names=[model_name], - dataset_names=[dataset_name], - require_polygon=True, - task_types=[enums.TaskType.SEMANTIC_SEGMENTATION], + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.SEMANTIC_SEGMENTATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.POLYGON + ), + op=schemas.FilterOperator.ISNOTNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_groundtruths=True, ) @@ -589,8 +941,27 @@ def test_label_functions( get_label_keys( db, schemas.Filter( - dataset_names=[dataset_name], - task_types=[enums.TaskType.CLASSIFICATION], + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.CLASSIFICATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_predictions=True, ) @@ -600,9 +971,34 @@ def test_label_functions( get_labels( db, schemas.Filter( - model_names=[model_name], - dataset_names=[dataset_name], - task_types=[enums.TaskType.CLASSIFICATION], + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.CLASSIFICATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_groundtruths=True, ) @@ -612,8 +1008,27 @@ def test_label_functions( assert get_label_keys( db, schemas.Filter( - dataset_names=[dataset_name], - task_types=[enums.TaskType.OBJECT_DETECTION], + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.OBJECT_DETECTION + ), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_predictions=True, ) == {"inssegk1", "inssegk2", "inssegk3"} @@ -621,9 +1036,33 @@ def test_label_functions( assert get_labels( db, schemas.Filter( - dataset_names=[dataset_name], - require_raster=True, - task_types=[enums.TaskType.OBJECT_DETECTION], + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.OBJECT_DETECTION + ), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.RASTER + ), + op=schemas.FilterOperator.ISNOTNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_predictions=True, ) == { @@ -634,12 +1073,47 @@ def test_label_functions( assert get_labels( db, schemas.Filter( - dataset_names=[dataset_name], - require_raster=True, - task_types=[ - enums.TaskType.OBJECT_DETECTION, - enums.TaskType.SEMANTIC_SEGMENTATION, - ], + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.OBJECT_DETECTION + ), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.SEMANTIC_SEGMENTATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.OR, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.RASTER + ), + op=schemas.FilterOperator.ISNOTNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_predictions=True, ) == { @@ -654,8 +1128,27 @@ def test_label_functions( assert get_label_keys( db, schemas.Filter( - dataset_names=[dataset_name], - task_types=[enums.TaskType.SEMANTIC_SEGMENTATION], + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.SEMANTIC_SEGMENTATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_predictions=True, ) == {"semsegk1", "semsegk2", "semsegk3"} @@ -663,9 +1156,34 @@ def test_label_functions( assert get_label_keys( db, schemas.Filter( - model_names=[model_name], - dataset_names=[dataset_name], - task_types=[enums.TaskType.SEMANTIC_SEGMENTATION], + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.SEMANTIC_SEGMENTATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_groundtruths=True, ) == {"semsegk1", "semsegk2", "semsegk3_pred"} @@ -674,9 +1192,34 @@ def test_label_functions( get_labels( db, schemas.Filter( - model_names=[model_name], - dataset_names=[dataset_name], - task_types=[enums.TaskType.OBJECT_DETECTION], + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.OBJECT_DETECTION + ), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_groundtruths=True, ) @@ -686,13 +1229,54 @@ def test_label_functions( assert get_labels( db, schemas.Filter( - model_names=[model_name], - dataset_names=[dataset_name], - require_raster=True, - task_types=[ - enums.TaskType.SEMANTIC_SEGMENTATION, - enums.TaskType.OBJECT_DETECTION, - ], + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.OBJECT_DETECTION + ), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.SEMANTIC_SEGMENTATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.OR, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.RASTER + ), + op=schemas.FilterOperator.ISNOTNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_groundtruths=True, ) == { @@ -704,10 +1288,40 @@ def test_label_functions( get_labels( db, schemas.Filter( - model_names=[model_name], - dataset_names=[dataset_name], - require_raster=True, - task_types=[enums.TaskType.OBJECT_DETECTION], + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.OBJECT_DETECTION + ), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.RASTER + ), + op=schemas.FilterOperator.ISNOTNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_groundtruths=True, ) diff --git a/api/tests/functional-tests/backend/metrics/test_classification.py b/api/tests/functional-tests/backend/metrics/test_classification.py index 56239304d..778bec3f7 100644 --- a/api/tests/functional-tests/backend/metrics/test_classification.py +++ b/api/tests/functional-tests/backend/metrics/test_classification.py @@ -134,12 +134,42 @@ def test_compute_confusion_matrix_at_grouper_key( classification_test_data, ): prediction_filter = schemas.Filter( - model_names=[model_name], - task_types=[enums.TaskType.CLASSIFICATION], + predictions=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) groundtruth_filter = schemas.Filter( - dataset_names=[dataset_name], - task_types=[enums.TaskType.CLASSIFICATION], + groundtruths=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) labels = fetch_union_of_labels( @@ -160,11 +190,29 @@ def test_compute_confusion_matrix_at_grouper_key( # groundtruths filter gFilter = groundtruth_filter.model_copy() - gFilter.label_keys = label_key_filter + gFilter.labels = schemas.LogicalFunction.or_( + *[ + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_KEY), + rhs=schemas.Value.infer(key), + op=schemas.FilterOperator.EQ, + ) + for key in label_key_filter + ] + ) # predictions filter pFilter = prediction_filter.model_copy() - pFilter.label_keys = label_key_filter + pFilter.labels = schemas.LogicalFunction.or_( + *[ + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_KEY), + rhs=schemas.Value.infer(key), + op=schemas.FilterOperator.EQ, + ) + for key in label_key_filter + ] + ) groundtruths = generate_select( models.GroundTruth, @@ -217,11 +265,29 @@ def test_compute_confusion_matrix_at_grouper_key( # groundtruths filter gFilter = groundtruth_filter.model_copy() - gFilter.label_keys = label_key_filter + gFilter.labels = schemas.LogicalFunction.or_( + *[ + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_KEY), + rhs=schemas.Value.infer(key), + op=schemas.FilterOperator.EQ, + ) + for key in label_key_filter + ] + ) # predictions filter pFilter = prediction_filter.model_copy() - pFilter.label_keys = label_key_filter + pFilter.labels = schemas.LogicalFunction.or_( + *[ + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_KEY), + rhs=schemas.Value.infer(key), + op=schemas.FilterOperator.EQ, + ) + for key in label_key_filter + ] + ) groundtruths = generate_select( models.GroundTruth, @@ -277,16 +343,59 @@ def test_compute_confusion_matrix_at_grouper_key_and_filter( """ Test filtering by metadata (md1: md1-val0). """ - prediction_filter = schemas.Filter( - dataset_names=[dataset_name], - model_names=[model_name], + predictions=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) groundtruth_filter = schemas.Filter( - dataset_names=[dataset_name], - model_names=[model_name], - task_types=[enums.TaskType.CLASSIFICATION], - datum_metadata={"md1": [schemas.StringFilter(value="md1-val0")]}, + groundtruths=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATUM_META, key="md1" + ), + rhs=schemas.Value.infer("md1-val0"), + op=schemas.FilterOperator.EQ, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) labels = fetch_union_of_labels( @@ -307,11 +416,29 @@ def test_compute_confusion_matrix_at_grouper_key_and_filter( # groundtruths filter gFilter = groundtruth_filter.model_copy() - gFilter.label_keys = label_key_filter + gFilter.labels = schemas.LogicalFunction.or_( + *[ + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_KEY), + rhs=schemas.Value.infer(key), + op=schemas.FilterOperator.EQ, + ) + for key in label_key_filter + ] + ) # predictions filter pFilter = prediction_filter.model_copy() - pFilter.label_keys = label_key_filter + pFilter.labels = schemas.LogicalFunction.or_( + *[ + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_KEY), + rhs=schemas.Value.infer(key), + op=schemas.FilterOperator.EQ, + ) + for key in label_key_filter + ] + ) groundtruths = generate_select( models.GroundTruth, @@ -366,16 +493,59 @@ def test_compute_confusion_matrix_at_grouper_key_using_label_map( """ Test grouping using the label_map """ - prediction_filter = schemas.Filter( - dataset_names=[dataset_name], - model_names=[model_name], + predictions=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) groundtruth_filter = schemas.Filter( - dataset_names=[dataset_name], - model_names=[model_name], - task_types=[enums.TaskType.CLASSIFICATION], - datum_metadata={"md1": [schemas.StringFilter(value="md1-val0")]}, + groundtruths=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATUM_META, key="md1" + ), + rhs=schemas.Value.infer("md1-val0"), + op=schemas.FilterOperator.EQ, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) labels = fetch_union_of_labels( @@ -396,11 +566,29 @@ def test_compute_confusion_matrix_at_grouper_key_using_label_map( # groundtruths filter gFilter = groundtruth_filter.model_copy() - gFilter.label_keys = label_key_filter + gFilter.labels = schemas.LogicalFunction.or_( + *[ + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_KEY), + rhs=schemas.Value.infer(key), + op=schemas.FilterOperator.EQ, + ) + for key in label_key_filter + ] + ) # predictions filter pFilter = prediction_filter.model_copy() - pFilter.label_keys = label_key_filter + pFilter.labels = schemas.LogicalFunction.or_( + *[ + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_KEY), + rhs=schemas.Value.infer(key), + op=schemas.FilterOperator.EQ, + ) + for key in label_key_filter + ] + ) groundtruths = generate_select( models.GroundTruth, @@ -485,12 +673,42 @@ def test_compute_roc_auc( ``` """ prediction_filter = schemas.Filter( - model_names=[model_name], - task_types=[enums.TaskType.CLASSIFICATION], + predictions=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) groundtruth_filter = schemas.Filter( - dataset_names=[dataset_name], - task_types=[enums.TaskType.CLASSIFICATION], + groundtruths=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) labels = fetch_union_of_labels( @@ -568,13 +786,39 @@ def test_compute_roc_auc_groupby_metadata( which gives 2/3. So we expect our implementation to give the average of 0.5 and 2/3 """ + prediction_filter = schemas.Filter( - model_names=[model_name], + predictions=schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.MODEL_NAME), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), ) groundtruth_filter = schemas.Filter( - dataset_names=[dataset_name], - task_types=[enums.TaskType.CLASSIFICATION], - datum_metadata={"md1": [schemas.StringFilter(value="md1-val0")]}, + groundtruths=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATUM_META, key="md1" + ), + rhs=schemas.Value.infer("md1-val0"), + op=schemas.FilterOperator.EQ, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) labels = fetch_union_of_labels( @@ -630,14 +874,43 @@ def test_compute_roc_auc_with_label_map( assert score == 0.7777777777777778 """ - prediction_filter = schemas.Filter( - model_names=[model_name], - task_types=[enums.TaskType.CLASSIFICATION], + predictions=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) groundtruth_filter = schemas.Filter( - dataset_names=[dataset_name], - task_types=[enums.TaskType.CLASSIFICATION], + groundtruths=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) labels = fetch_union_of_labels( @@ -672,19 +945,59 @@ def test_compute_classification( """ Tests the _compute_classification function. """ - model_filter = schemas.Filter( - dataset_names=[dataset_name], model_names=[model_name] + + prediction_filter = schemas.Filter( + predictions=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) - dataset_filter = schemas.Filter( - dataset_names=[dataset_name], - model_names=[model_name], - task_types=[enums.TaskType.CLASSIFICATION], + groundtruth_filter = schemas.Filter( + groundtruths=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) confusion, metrics = _compute_clf_metrics( db, - prediction_filter=model_filter, - groundtruth_filter=dataset_filter, + prediction_filter=prediction_filter, + groundtruth_filter=groundtruth_filter, label_map=None, pr_curve_max_examples=0, metrics_to_return=[ @@ -857,12 +1170,42 @@ def test__compute_curves( """Test that _compute_curves correctly returns precision-recall curves for our animal ground truths.""" prediction_filter = schemas.Filter( - model_names=[model_name], - task_types=[enums.TaskType.CLASSIFICATION], + predictions=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) groundtruth_filter = schemas.Filter( - dataset_names=[dataset_name], - task_types=[enums.TaskType.CLASSIFICATION], + groundtruths=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) labels = fetch_union_of_labels( @@ -883,11 +1226,29 @@ def test__compute_curves( # groundtruths filter gFilter = groundtruth_filter.model_copy() - gFilter.label_keys = label_key_filter + gFilter.labels = schemas.LogicalFunction.or_( + *[ + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_KEY), + rhs=schemas.Value.infer(key), + op=schemas.FilterOperator.EQ, + ) + for key in label_key_filter + ] + ) # predictions filter pFilter = prediction_filter.model_copy() - pFilter.label_keys = label_key_filter + pFilter.labels = schemas.LogicalFunction.or_( + *[ + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_KEY), + rhs=schemas.Value.infer(key), + op=schemas.FilterOperator.EQ, + ) + for key in label_key_filter + ] + ) groundtruths = generate_select( models.GroundTruth, diff --git a/api/tests/functional-tests/backend/metrics/test_detection.py b/api/tests/functional-tests/backend/metrics/test_detection.py index e8e430889..6ad9c898a 100644 --- a/api/tests/functional-tests/backend/metrics/test_detection.py +++ b/api/tests/functional-tests/backend/metrics/test_detection.py @@ -1163,12 +1163,46 @@ def _metric_to_dict(m) -> dict: ], ), prediction_filter=schemas.Filter( - model_names=["test_model"], - label_keys=["class"], + predictions=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer("test_model"), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.LABEL_KEY + ), + rhs=schemas.Value.infer("class"), + op=schemas.FilterOperator.EQ, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), groundtruth_filter=schemas.Filter( - dataset_names=["test_dataset"], - label_keys=["class"], + groundtruths=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer("test_dataset"), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.LABEL_KEY + ), + rhs=schemas.Value.infer("class"), + op=schemas.FilterOperator.EQ, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), target_type=enums.AnnotationType.BOX, ) @@ -1366,12 +1400,46 @@ def _metric_to_dict(m) -> dict: ], ), prediction_filter=schemas.Filter( - model_names=["test_model"], - label_keys=["class"], + predictions=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer("test_model"), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.LABEL_KEY + ), + rhs=schemas.Value.infer("class"), + op=schemas.FilterOperator.EQ, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), groundtruth_filter=schemas.Filter( - dataset_names=["test_dataset"], - label_keys=["class"], + groundtruths=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer("test_dataset"), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.LABEL_KEY + ), + rhs=schemas.Value.infer("class"), + op=schemas.FilterOperator.EQ, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), target_type=enums.AnnotationType.BOX, ) @@ -1569,12 +1637,46 @@ def _metric_to_dict(m) -> dict: ], ), prediction_filter=schemas.Filter( - model_names=["test_model"], - label_keys=["class"], + predictions=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer("test_model"), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.LABEL_KEY + ), + rhs=schemas.Value.infer("class"), + op=schemas.FilterOperator.EQ, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), groundtruth_filter=schemas.Filter( - dataset_names=["test_dataset"], - label_keys=["class"], + groundtruths=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer("test_dataset"), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.LABEL_KEY + ), + rhs=schemas.Value.infer("class"), + op=schemas.FilterOperator.EQ, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), target_type=enums.AnnotationType.BOX, ) @@ -1778,12 +1880,46 @@ def test__compute_detection_metrics_with_rasters( ], ), prediction_filter=schemas.Filter( - model_names=["test_model"], - label_keys=["class"], + predictions=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer("test_model"), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.LABEL_KEY + ), + rhs=schemas.Value.infer("class"), + op=schemas.FilterOperator.EQ, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), groundtruth_filter=schemas.Filter( - dataset_names=["test_dataset"], - label_keys=["class"], + groundtruths=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer("test_dataset"), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.LABEL_KEY + ), + rhs=schemas.Value.infer("class"), + op=schemas.FilterOperator.EQ, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), target_type=enums.AnnotationType.RASTER, ) @@ -1918,12 +2054,46 @@ def test__compute_detection_metrics_with_rasters( ], ), prediction_filter=schemas.Filter( - model_names=["test_model"], - label_keys=["class"], + predictions=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer("test_model"), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.LABEL_KEY + ), + rhs=schemas.Value.infer("class"), + op=schemas.FilterOperator.EQ, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), groundtruth_filter=schemas.Filter( - dataset_names=["test_dataset"], - label_keys=["class"], + groundtruths=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer("test_dataset"), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.LABEL_KEY + ), + rhs=schemas.Value.infer("class"), + op=schemas.FilterOperator.EQ, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), target_type=enums.AnnotationType.RASTER, ) @@ -2055,8 +2225,21 @@ def test_detection_exceptions(db: Session): status=enums.ModelStatus.READY, ) evaluation = Evaluation( + dataset_names=[dataset_name], model_name=model_name, - filters={"dataset_names": [dataset_name]}, + filters={ + "datasets": { + "lhs": { + "name": "dataset.name", + "key": None, + }, + "rhs": { + "type": "string", + "value": dataset_name, + }, + "op": "eq", + } + }, parameters=schemas.EvaluationParameters( task_type=enums.TaskType.OBJECT_DETECTION, iou_thresholds_to_compute=[0.5], diff --git a/api/tests/functional-tests/backend/metrics/test_segmentation.py b/api/tests/functional-tests/backend/metrics/test_segmentation.py index 425343e5b..e4bbedf70 100644 --- a/api/tests/functional-tests/backend/metrics/test_segmentation.py +++ b/api/tests/functional-tests/backend/metrics/test_segmentation.py @@ -70,19 +70,62 @@ def test_query_generators( pred_semantic_segs_img2_create=pred_semantic_segs_img2_create, ) - groundtruth_filter = schemas.Filter( - dataset_names=[dataset_name], - task_types=[enums.TaskType.SEMANTIC_SEGMENTATION], - require_raster=True, - label_ids=None, - ) - prediction_filter = schemas.Filter( - dataset_names=[dataset_name], - model_names=[model_name], - task_types=[enums.TaskType.SEMANTIC_SEGMENTATION], - require_raster=True, - label_ids=None, + predictions=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value.infer( + enums.TaskType.SEMANTIC_SEGMENTATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.RASTER), + op=schemas.FilterOperator.ISNOTNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) + ) + groundtruth_filter = schemas.Filter( + groundtruths=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value.infer( + enums.TaskType.SEMANTIC_SEGMENTATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.RASTER), + op=schemas.FilterOperator.ISNOTNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) for label_key, label_value, expected_number in [ @@ -98,13 +141,21 @@ def test_query_generators( ) assert label_id is not None + groundtruth_filter.labels = schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_ID), + rhs=schemas.Value.infer(label_id), + op=schemas.FilterOperator.EQ, + ) - groundtruth_filter.label_ids = [label_id] q = _generate_groundtruth_query(groundtruth_filter) data = db.query(q).all() assert len(data) == expected_number - groundtruth_filter.label_ids = [10000000] + groundtruth_filter.labels = schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_ID), + rhs=schemas.Value.infer(10000000), + op=schemas.FilterOperator.EQ, + ) q = _generate_groundtruth_query(groundtruth_filter) data = db.query(q).all() assert len(data) == 0 @@ -120,15 +171,23 @@ def test_query_generators( and_(Label.key == label_key, Label.value == label_value) ) ) - assert label_id is not None - prediction_filter.label_ids = [label_id] + prediction_filter.labels = schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_ID), + rhs=schemas.Value.infer(label_id), + op=schemas.FilterOperator.EQ, + ) + q = _generate_prediction_query(prediction_filter) data = db.query(q).all() assert len(data) == expected_number - prediction_filter.label_ids = [10000000] + prediction_filter.labels = schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_ID), + rhs=schemas.Value.infer(10000000), + op=schemas.FilterOperator.EQ, + ) q = _generate_prediction_query(prediction_filter) data = db.query(q).all() assert len(data) == 0 @@ -212,19 +271,62 @@ def test__count_true_positives( pred_semantic_segs_img2_create=pred_semantic_segs_img2_create, ) - groundtruth_filter = schemas.Filter( - dataset_names=[dataset_name], - task_types=[enums.TaskType.SEMANTIC_SEGMENTATION], - require_raster=True, - label_ids=None, - ) - prediction_filter = schemas.Filter( - dataset_names=[dataset_name], - model_names=[model_name], - task_types=[enums.TaskType.SEMANTIC_SEGMENTATION], - require_raster=True, - label_ids=None, + predictions=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value.infer( + enums.TaskType.SEMANTIC_SEGMENTATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.RASTER), + op=schemas.FilterOperator.ISNOTNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) + ) + groundtruth_filter = schemas.Filter( + groundtruths=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value.infer( + enums.TaskType.SEMANTIC_SEGMENTATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.RASTER), + op=schemas.FilterOperator.ISNOTNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) for k, v in [("k1", "v1"), ("k2", "v2")]: @@ -241,8 +343,17 @@ def test__count_true_positives( schemas.Label(key=k, value=v), ) - groundtruth_filter.label_ids = [label_id] - prediction_filter.label_ids = [label_id] + groundtruth_filter.labels = schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_ID), + rhs=schemas.Value.infer(label_id), + op=schemas.FilterOperator.EQ, + ) + prediction_filter.labels = schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_ID), + rhs=schemas.Value.infer(label_id), + op=schemas.FilterOperator.EQ, + ) + tps = _count_true_positives( db=db, groundtruth_subquery=_generate_groundtruth_query( @@ -278,10 +389,29 @@ def test_count_groundtruths( ) groundtruth_filter = schemas.Filter( - dataset_names=[dataset_name], - task_types=[enums.TaskType.SEMANTIC_SEGMENTATION], - require_raster=True, - label_ids=None, + groundtruths=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value.infer( + enums.TaskType.SEMANTIC_SEGMENTATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.RASTER), + op=schemas.FilterOperator.ISNOTNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) for k, v in [("k1", "v1"), ("k1", "v2"), ("k3", "v3"), ("k2", "v2")]: @@ -296,7 +426,11 @@ def test_count_groundtruths( gt_semantic_segs_create, schemas.Label(key=k, value=v) ) - groundtruth_filter.label_ids = [label_id] + groundtruth_filter.labels = schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_ID), + rhs=schemas.Value.infer(label_id), + op=schemas.FilterOperator.EQ, + ) assert ( _count_groundtruths( db, @@ -305,7 +439,11 @@ def test_count_groundtruths( == expected ) - groundtruth_filter.label_ids = [1000000] + groundtruth_filter.labels = schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_ID), + rhs=schemas.Value.infer(1000000), + op=schemas.FilterOperator.EQ, + ) assert ( _count_groundtruths( db, @@ -345,11 +483,36 @@ def test_count_predictions( ) prediction_filter = schemas.Filter( - dataset_names=[dataset_name], - model_names=[model_name], - task_types=[enums.TaskType.SEMANTIC_SEGMENTATION], - require_raster=True, - label_ids=None, + predictions=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value.infer( + enums.TaskType.SEMANTIC_SEGMENTATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.RASTER), + op=schemas.FilterOperator.ISNOTNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) for k, v in [("k1", "v1"), ("k1", "v2"), ("k2", "v3"), ("k2", "v2")]: @@ -364,7 +527,11 @@ def test_count_predictions( schemas.Label(key=k, value=v), ) - prediction_filter.label_ids = [label_id] + prediction_filter.labels = schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_ID), + rhs=schemas.Value.infer(label_id), + op=schemas.FilterOperator.EQ, + ) assert ( _count_predictions( db, @@ -373,7 +540,11 @@ def test_count_predictions( == expected ) - prediction_filter.label_ids = [1000000] + prediction_filter.labels = schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_ID), + rhs=schemas.Value.infer(1000000), + op=schemas.FilterOperator.EQ, + ) assert ( _count_predictions(db, _generate_prediction_query(prediction_filter)) == 0 @@ -398,14 +569,61 @@ def test__compute_segmentation_metrics( ) prediction_filter = schemas.Filter( - model_names=[model_name], - dataset_names=[dataset_name], + predictions=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value.infer( + enums.TaskType.SEMANTIC_SEGMENTATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.RASTER), + op=schemas.FilterOperator.ISNOTNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) groundtruth_filter = schemas.Filter( - model_names=[model_name], - dataset_names=[dataset_name], - task_types=[enums.TaskType.SEMANTIC_SEGMENTATION], - require_raster=True, + groundtruths=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value.infer( + enums.TaskType.SEMANTIC_SEGMENTATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.RASTER), + op=schemas.FilterOperator.ISNOTNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ) metrics = _compute_segmentation_metrics( diff --git a/api/tests/functional-tests/backend/query/test_ops.py b/api/tests/functional-tests/backend/query/test_ops.py index 1fa857ddd..64403086a 100644 --- a/api/tests/functional-tests/backend/query/test_ops.py +++ b/api/tests/functional-tests/backend/query/test_ops.py @@ -11,20 +11,14 @@ from valor_api.backend import models from valor_api.backend.query.ops import generate_query, generate_select from valor_api.backend.query.types import LabelSourceAlias -from valor_api.schemas.filters import AdvancedFilter as Filter from valor_api.schemas.filters import ( - And, - Equal, - GreaterThan, - GreaterThanEqual, - Inside, - Intersects, - IsNotNull, - LessThan, - LessThanEqual, - NotEqual, - Operands, - Outside, + Condition, + Filter, + FilterOperator, + LogicalFunction, + LogicalOperator, + SupportedSymbol, + SupportedType, Symbol, Value, ) @@ -44,7 +38,10 @@ def geospatial_coordinates() -> dict[ dict, ]: return { - "point": {"type": "Point", "coordinates": [125.2750725, 38.760525]}, + SupportedType.POINT: { + "type": "Point", + "coordinates": [125.2750725, 38.760525], + }, "polygon1": { "type": "Polygon", "coordinates": [ @@ -80,7 +77,7 @@ def geospatial_coordinates() -> dict[ ] ], }, - "multipolygon": { + SupportedType.MULTIPOLYGON: { "type": "MultiPolygon", "coordinates": [ [ @@ -138,7 +135,7 @@ def metadata_2(geospatial_coordinates) -> dict[str, int | float | str | dict]: "some_bool_attribute": False, "some_geo_attribute": { "type": "geojson", - "value": geospatial_coordinates["multipolygon"], + "value": geospatial_coordinates[SupportedType.MULTIPOLYGON], }, } @@ -668,49 +665,45 @@ def model_sim( crud.finalize(db=db, dataset_name=dset_name, model_name=model_name2) -def create_dataset_filter(name: str) -> Equal: - return Equal( - eq=Operands( - lhs=Symbol(type="string", name="dataset.name"), - rhs=Value(type="string", value=name), - ) +def create_dataset_filter(name: str) -> Condition: + return Condition( + lhs=Symbol(name=SupportedSymbol.DATASET_NAME), + rhs=Value.infer(name), + op=FilterOperator.EQ, ) -def create_model_filter(name: str) -> Equal: - return Equal( - eq=Operands( - lhs=Symbol(type="string", name="model.name"), - rhs=Value(type="string", value=name), - ) +def create_model_filter(name: str) -> Condition: + return Condition( + lhs=Symbol(name=SupportedSymbol.MODEL_NAME), + rhs=Value.infer(name), + op=FilterOperator.EQ, ) -def create_datum_filter(uid: str) -> Equal: - return Equal( - eq=Operands( - lhs=Symbol(type="string", name="datum.uid"), - rhs=Value(type="string", value=uid), - ) +def create_datum_filter(uid: str) -> Condition: + return Condition( + lhs=Symbol(name=SupportedSymbol.DATUM_UID), + rhs=Value.infer(uid), + op=FilterOperator.EQ, ) -def create_label_filter(key: str, value: str) -> And: - return And( - logical_and=[ - Equal( - eq=Operands( - lhs=Symbol(type="string", name="label.key"), - rhs=Value(type="string", value=key), - ) +def create_label_filter(key: str, value: str) -> LogicalFunction: + return LogicalFunction( + args=[ + Condition( + lhs=Symbol(name=SupportedSymbol.LABEL_KEY), + rhs=Value.infer(key), + op=FilterOperator.EQ, ), - Equal( - eq=Operands( - lhs=Symbol(type="string", name="label.value"), - rhs=Value(type="string", value=value), - ) + Condition( + lhs=Symbol(name=SupportedSymbol.LABEL_VALUE), + rhs=Value.infer(value), + op=FilterOperator.EQ, ), - ] + ], + op=LogicalOperator.AND, ) @@ -784,16 +777,13 @@ def test_query_models( # Q: Get models with metadatum with `numeric` > 0.5. f = Filter( - predictions=GreaterThan( - gt=Operands( - lhs=Symbol( - type="float", - name="model.metadata", - key="some_numeric_attribute", - ), - rhs=Value(type="float", value=0.5), - ) - ), + predictions=Condition( + lhs=Symbol( + name=SupportedSymbol.MODEL_META, key="some_numeric_attribute" + ), + rhs=Value.infer(0.5), + op=FilterOperator.GT, + ) ) model_names = generate_select( models.Model.name, filters=f, label_source=models.Prediction @@ -804,16 +794,13 @@ def test_query_models( # Q: Get models with metadatum with `numeric` < 0.5. f = Filter( - predictions=LessThan( - lt=Operands( - lhs=Symbol( - type="float", - name="model.metadata", - key="some_numeric_attribute", - ), - rhs=Value(type="float", value=0.5), - ) - ), + predictions=Condition( + lhs=Symbol( + name=SupportedSymbol.MODEL_META, key="some_numeric_attribute" + ), + rhs=Value.infer(0.5), + op=FilterOperator.LT, + ) ) model_names = generate_select( models.Model.name, filters=f, label_source=models.Prediction @@ -829,39 +816,34 @@ def test_query_by_metadata( ): # Q: Get datums with metadatum with `numeric` < 0.5, `str` == 'abc', and `bool` == True. f = Filter( - datums=And( - logical_and=[ - LessThan( - lt=Operands( - lhs=Symbol( - type="float", - name="datum.metadata", - key="some_numeric_attribute", - ), - rhs=Value(type="float", value=0.5), - ) + datums=LogicalFunction( + args=[ + Condition( + lhs=Symbol( + name=SupportedSymbol.DATUM_META, + key="some_numeric_attribute", + ), + rhs=Value.infer(0.5), + op=FilterOperator.LT, ), - Equal( - eq=Operands( - lhs=Symbol( - type="string", - name="datum.metadata", - key="some_str_attribute", - ), - rhs=Value(type="string", value="abc"), - ) + Condition( + lhs=Symbol( + name=SupportedSymbol.DATUM_META, + key="some_str_attribute", + ), + rhs=Value.infer("abc"), + op=FilterOperator.EQ, ), - Equal( - eq=Operands( - lhs=Symbol( - type="boolean", - name="datum.metadata", - key="some_bool_attribute", - ), - rhs=Value(type="boolean", value=True), - ) + Condition( + lhs=Symbol( + name=SupportedSymbol.DATUM_META, + key="some_bool_attribute", + ), + rhs=Value.infer(True), + op=FilterOperator.EQ, ), - ] + ], + op=LogicalOperator.AND, ) ) datum_uids = generate_select(models.Datum.uid, filters=f).distinct() @@ -870,52 +852,39 @@ def test_query_by_metadata( assert (datum_uid1,) in datum_uids # repeat with `bool` == False or != `True` and check we get nothing - negative1 = Equal( - eq=Operands( - lhs=Symbol( - type="boolean", - name="datum.metadata", - key="some_bool_attribute", - ), - rhs=Value(type="boolean", value=False), - ) + negative1 = Condition( + lhs=Symbol(name=SupportedSymbol.DATUM_META, key="some_bool_attribute"), + rhs=Value.infer(False), + op=FilterOperator.EQ, ) - negative2 = NotEqual( - ne=Operands( - lhs=Symbol( - type="boolean", - name="datum.metadata", - key="some_bool_attribute", - ), - rhs=Value(type="boolean", value=True), - ) + negative2 = Condition( + lhs=Symbol(name=SupportedSymbol.DATUM_META, key="some_bool_attribute"), + rhs=Value.infer(True), + op=FilterOperator.NE, ) for bool_filter in [negative1, negative2]: f = Filter( - groundtruths=And( - logical_and=[ - LessThan( - lt=Operands( - lhs=Symbol( - type="float", - name="datum.metadata", - key="some_numeric_attribute", - ), - rhs=Value(type="float", value=0.5), - ) + groundtruths=LogicalFunction( + args=[ + Condition( + lhs=Symbol( + name=SupportedSymbol.DATUM_META, + key="some_numeric_attribute", + ), + rhs=Value.infer(0.5), + op=FilterOperator.LT, ), - Equal( - eq=Operands( - lhs=Symbol( - type="string", - name="datum.metadata", - key="some_str_attribute", - ), - rhs=Value(type="string", value="abc"), - ) + Condition( + lhs=Symbol( + name=SupportedSymbol.DATUM_META, + key="some_str_attribute", + ), + rhs=Value.infer("abc"), + op=FilterOperator.EQ, ), bool_filter, - ] + ], + op=LogicalOperator.AND, ) ) datum_uids = generate_select(models.Datum.uid, filters=f).distinct() @@ -924,29 +893,26 @@ def test_query_by_metadata( # Q: Get datums with metadatum with `numeric` > 0.5 and `str` == 'abc'. f = Filter( - datums=And( - logical_and=[ - GreaterThan( - gt=Operands( - lhs=Symbol( - type="float", - name="datum.metadata", - key="some_numeric_attribute", - ), - rhs=Value(type="float", value=0.5), - ) + datums=LogicalFunction( + args=[ + Condition( + lhs=Symbol( + name=SupportedSymbol.DATUM_META, + key="some_numeric_attribute", + ), + rhs=Value.infer(0.5), + op=FilterOperator.GT, ), - Equal( - eq=Operands( - lhs=Symbol( - type="string", - name="datum.metadata", - key="some_str_attribute", - ), - rhs=Value(type="string", value="abc"), - ) + Condition( + lhs=Symbol( + name=SupportedSymbol.DATUM_META, + key="some_str_attribute", + ), + rhs=Value.infer("abc"), + op=FilterOperator.EQ, ), - ] + ], + op=LogicalOperator.AND, ) ) datum_uids = generate_select(models.Datum.uid, filters=f).distinct() @@ -956,29 +922,26 @@ def test_query_by_metadata( # Q: Get datums with metadatum with `numeric` < 0.5 and `str` == 'xyz'. f = Filter( - datums=And( - logical_and=[ - LessThan( - lt=Operands( - lhs=Symbol( - type="float", - name="datum.metadata", - key="some_numeric_attribute", - ), - rhs=Value(type="float", value=0.5), - ) + datums=LogicalFunction( + args=[ + Condition( + lhs=Symbol( + name=SupportedSymbol.DATUM_META, + key="some_numeric_attribute", + ), + rhs=Value.infer(0.5), + op=FilterOperator.LT, ), - Equal( - eq=Operands( - lhs=Symbol( - type="string", - name="datum.metadata", - key="some_str_attribute", - ), - rhs=Value(type="string", value="xyz"), - ) + Condition( + lhs=Symbol( + name=SupportedSymbol.DATUM_META, + key="some_str_attribute", + ), + rhs=Value.infer("xyz"), + op=FilterOperator.EQ, ), - ] + ], + op=LogicalOperator.AND, ) ) datum_uids = generate_select(models.Datum.uid, filters=f).distinct() @@ -988,29 +951,26 @@ def test_query_by_metadata( # Q: Get models with metadatum with `numeric` > 0.5 and `str` == 'xyz'. f = Filter( - datums=And( - logical_and=[ - GreaterThan( - gt=Operands( - lhs=Symbol( - type="float", - name="datum.metadata", - key="some_numeric_attribute", - ), - rhs=Value(type="float", value=0.5), - ) + datums=LogicalFunction( + args=[ + Condition( + lhs=Symbol( + name=SupportedSymbol.DATUM_META, + key="some_numeric_attribute", + ), + rhs=Value.infer(0.5), + op=FilterOperator.GT, ), - Equal( - eq=Operands( - lhs=Symbol( - type="string", - name="datum.metadata", - key="some_str_attribute", - ), - rhs=Value(type="string", value="xyz"), - ) + Condition( + lhs=Symbol( + name=SupportedSymbol.DATUM_META, + key="some_str_attribute", + ), + rhs=Value.infer("xyz"), + op=FilterOperator.EQ, ), - ] + ], + op=LogicalOperator.AND, ) ) datum_uids = generate_select(models.Datum.uid, filters=f).distinct() @@ -1064,17 +1024,17 @@ def test_complex_queries( ): # Q: Get datums that `model1` has annotations for with label `dog` and prediction score > 0.9. f = Filter( - predictions=And( - logical_and=[ + predictions=LogicalFunction( + op=LogicalOperator.AND, + args=[ create_model_filter(model_name1), create_label_filter(key="class", value="dog"), - GreaterThan( - gt=Operands( - lhs=Symbol(type="float", name="label.score"), - rhs=Value(type="float", value=0.9), - ) + Condition( + lhs=Symbol(name=SupportedSymbol.SCORE), + rhs=Value.infer(0.9), + op=FilterOperator.GT, ), - ] + ], ) ) datum_uids = generate_select( @@ -1089,22 +1049,21 @@ def test_complex_queries( # Q: Get datums that `model1` has `bounding_box` annotations for with label `dog` and prediction score > 0.75. f = Filter( - predictions=And( - logical_and=[ + predictions=LogicalFunction( + op=LogicalOperator.AND, + args=[ create_model_filter(model_name1), create_label_filter(key="class", value="dog"), - GreaterThan( - gt=Operands( - lhs=Symbol(type="float", name="label.score"), - rhs=Value(type="float", value=0.75), - ) + Condition( + lhs=Symbol(name=SupportedSymbol.SCORE), + rhs=Value.infer(0.75), + op=FilterOperator.GT, ), - IsNotNull( - isnotnull=Symbol( - type="box", name="annotation.bounding_box" - ) + Condition( + lhs=Symbol(name=SupportedSymbol.BOX), + op=FilterOperator.ISNOTNULL, ), - ] + ], ) ) datum_uids = generate_select( @@ -1122,13 +1081,10 @@ def test_query_by_annotation_geometry( db: Session, model_sim, ): - bounding_box_filter = GreaterThan( - gt=Operands( - lhs=Symbol( - type="float", name="annotation.bounding_box", attribute="area" - ), - rhs=Value(type="float", value=75), - ) + bounding_box_filter = Condition( + lhs=Symbol(name=SupportedSymbol.BOX_AREA), + rhs=Value.infer(75.0), + op=FilterOperator.GT, ) # Q: Get `bounding_box` annotations that have an area > 75. @@ -1143,11 +1099,12 @@ def test_query_by_annotation_geometry( # Q: Get `bounding_box` annotations from `model1` that have an area > 75. f = Filter( - predictions=And( - logical_and=[ + predictions=LogicalFunction( + op=LogicalOperator.AND, + args=[ create_model_filter(model_name1), bounding_box_filter, - ] + ], ) ) annotations = generate_select( @@ -1215,67 +1172,61 @@ def test_multiple_tables_in_args( def create_geospatial_inside_filter( - symbol_name: str, + symbol: Symbol, value: Value, -): - return Inside( - inside=Operands( - lhs=Symbol( - type=value.type, name=symbol_name, key="some_geo_attribute" - ), - rhs=value, - ) +) -> Condition: + symbol.key = "some_geo_attribute" + return Condition( + lhs=symbol, + rhs=value, + op=FilterOperator.INSIDE, ) def create_geospatial_outside_filter( - symbol_name: str, + symbol: Symbol, value: Value, -): - return Outside( - outside=Operands( - lhs=Symbol( - type=value.type, name=symbol_name, key="some_geo_attribute" - ), - rhs=value, - ) +) -> Condition: + symbol.key = "some_geo_attribute" + return Condition( + lhs=symbol, + rhs=value, + op=FilterOperator.OUTSIDE, ) def create_geospatial_intersects_filter( - symbol_name: str, + symbol: Symbol, value: Value, -): - return Intersects( - intersects=Operands( - lhs=Symbol( - type=value.type, name=symbol_name, key="some_geo_attribute" - ), - rhs=value, - ) +) -> Condition: + symbol.key = "some_geo_attribute" + return Condition( + lhs=symbol, + rhs=value, + op=FilterOperator.INTERSECTS, ) def _get_geospatial_names_from_filter( db: Session, value: Value, - operator: str, + operator: FilterOperator, model_object: models.Datum | InstrumentedAttribute, - symbol_name: str, + symbol: Symbol, label_source: LabelSourceAlias = models.Annotation, ): match operator: - case "inside": + case FilterOperator.INSIDE: geofilter = create_geospatial_inside_filter( - symbol_name=symbol_name, value=value + symbol=symbol, value=value ) - case "outside": + case FilterOperator.OUTSIDE: geofilter = create_geospatial_outside_filter( - symbol_name=symbol_name, value=value + symbol=symbol, value=value ) - case "intersects": + case FilterOperator.INTERSECTS: geofilter = create_geospatial_intersects_filter( - symbol_name=symbol_name, value=value + symbol=symbol, value=value ) case _: raise NotImplementedError @@ -1296,13 +1247,14 @@ def test_datum_geospatial_filters( db: Session, model_sim, model_object=models.Datum.uid, - symbol_name: str = "datum.metadata", ): + symbol = Symbol(name=SupportedSymbol.DATUM_META) + # test inside filters names = _get_geospatial_names_from_filter( db=db, value=Value( - type="polygon", + type=SupportedType.POLYGON, value=[ [ [-20, -20], @@ -1313,9 +1265,9 @@ def test_datum_geospatial_filters( ] ], ), - operator="inside", + operator=FilterOperator.INSIDE, model_object=model_object, - symbol_name=symbol_name, + symbol=symbol, ) assert len(names) == 2 assert ("uid1",) in names @@ -1325,7 +1277,7 @@ def test_datum_geospatial_filters( names = _get_geospatial_names_from_filter( db=db, value=Value( - type="polygon", + type=SupportedType.POLYGON, value=[ [ [60, 60], @@ -1336,9 +1288,9 @@ def test_datum_geospatial_filters( ] ], ), - operator="intersects", + operator=FilterOperator.INTERSECTS, model_object=model_object, - symbol_name=symbol_name, + symbol=symbol, ) assert len(names) == 2 assert ("uid2",) in names @@ -1348,12 +1300,12 @@ def test_datum_geospatial_filters( names = _get_geospatial_names_from_filter( db=db, value=Value( - type="point", + type=SupportedType.POINT, value=[81, 80], ), - operator="intersects", + operator=FilterOperator.INTERSECTS, model_object=model_object, - symbol_name=symbol_name, + symbol=symbol, ) assert len(names) == 1 assert ("uid4",) in names @@ -1362,7 +1314,7 @@ def test_datum_geospatial_filters( names = _get_geospatial_names_from_filter( db=db, value=Value( - type="multipolygon", + type=SupportedType.MULTIPOLYGON, value=[ [ [ @@ -1384,9 +1336,9 @@ def test_datum_geospatial_filters( ], ], ), - operator="intersects", + operator=FilterOperator.INTERSECTS, model_object=model_object, - symbol_name=symbol_name, + symbol=symbol, ) assert len(names) == 3 assert ("uid1",) in names @@ -1397,12 +1349,12 @@ def test_datum_geospatial_filters( names = _get_geospatial_names_from_filter( db=db, value=Value( - type="point", + type=SupportedType.POINT, value=[-11, -11], ), - operator="intersects", + operator=FilterOperator.INTERSECTS, model_object=model_object, - symbol_name=symbol_name, + symbol=symbol, ) assert len(names) == 0 @@ -1410,12 +1362,12 @@ def test_datum_geospatial_filters( names = _get_geospatial_names_from_filter( db=db, value=Value( - type="point", + type=SupportedType.POINT, value=[-11, -11], ), - operator="outside", + operator=FilterOperator.OUTSIDE, model_object=model_object, - symbol_name=symbol_name, + symbol=symbol, ) assert len(names) == 4 assert ("uid1",) in names @@ -1426,7 +1378,7 @@ def test_datum_geospatial_filters( names = _get_geospatial_names_from_filter( db=db, value=Value( - type="polygon", + type=SupportedType.POLYGON, value=[ [ [-20, -20], @@ -1437,9 +1389,9 @@ def test_datum_geospatial_filters( ] ], ), - operator="outside", + operator=FilterOperator.OUTSIDE, model_object=model_object, - symbol_name=symbol_name, + symbol=symbol, ) assert len(names) == 2 assert ("uid2",) in names @@ -1450,14 +1402,14 @@ def test_dataset_geospatial_filters( db: Session, model_sim, model_object=models.Dataset.name, - symbol_name: str = "dataset.metadata", ): + symbol = Symbol(name=SupportedSymbol.DATASET_META) # test inside filters names = _get_geospatial_names_from_filter( db=db, value=Value( - type="polygon", + type=SupportedType.POLYGON, value=[ [ [-20, -20], @@ -1468,9 +1420,9 @@ def test_dataset_geospatial_filters( ] ], ), - operator="inside", + operator=FilterOperator.INSIDE, model_object=model_object, - symbol_name=symbol_name, + symbol=symbol, ) assert len(names) == 1 assert ("dataset1",) in names @@ -1479,12 +1431,12 @@ def test_dataset_geospatial_filters( names = _get_geospatial_names_from_filter( db=db, value=Value( - type="point", + type=SupportedType.POINT, value=[1, 1], ), - operator="intersects", + operator=FilterOperator.INTERSECTS, model_object=model_object, - symbol_name=symbol_name, + symbol=symbol, ) assert len(names) == 1 assert ("dataset1",) in names @@ -1493,7 +1445,7 @@ def test_dataset_geospatial_filters( names = _get_geospatial_names_from_filter( db=db, value=Value( - type="multipolygon", + type=SupportedType.MULTIPOLYGON, value=[ [ [ @@ -1515,9 +1467,9 @@ def test_dataset_geospatial_filters( ], ], ), - operator="intersects", + operator=FilterOperator.INTERSECTS, model_object=model_object, - symbol_name=symbol_name, + symbol=symbol, ) assert len(names) == 1 assert ("dataset1",) in names @@ -1526,12 +1478,12 @@ def test_dataset_geospatial_filters( names = _get_geospatial_names_from_filter( db=db, value=Value( - type="point", + type=SupportedType.POINT, value=[-11, -11], ), - operator="intersects", + operator=FilterOperator.INTERSECTS, model_object=model_object, - symbol_name=symbol_name, + symbol=symbol, ) assert len(names) == 0 @@ -1539,12 +1491,12 @@ def test_dataset_geospatial_filters( names = _get_geospatial_names_from_filter( db=db, value=Value( - type="point", + type=SupportedType.POINT, value=[-11, -11], ), - operator="outside", + operator=FilterOperator.OUTSIDE, model_object=model_object, - symbol_name=symbol_name, + symbol=symbol, ) assert len(names) == 1 assert ("dataset1",) in names @@ -1554,14 +1506,14 @@ def test_model_geospatial_filters( db: Session, model_sim, model_object=models.Model.name, - symbol_name: str = "model.metadata", ): + symbol = Symbol(name=SupportedSymbol.MODEL_META) # test inside filters names = _get_geospatial_names_from_filter( db=db, value=Value( - type="polygon", + type=SupportedType.POLYGON, value=[ [ [-20, -20], @@ -1572,9 +1524,9 @@ def test_model_geospatial_filters( ] ], ), - operator="inside", + operator=FilterOperator.INSIDE, model_object=model_object, - symbol_name=symbol_name, + symbol=symbol, label_source=models.Prediction, ) assert len(names) == 1 @@ -1584,12 +1536,12 @@ def test_model_geospatial_filters( names = _get_geospatial_names_from_filter( db=db, value=Value( - type="point", + type=SupportedType.POINT, value=[1, 1], ), - operator="intersects", + operator=FilterOperator.INTERSECTS, model_object=model_object, - symbol_name=symbol_name, + symbol=symbol, label_source=models.Prediction, ) assert len(names) == 1 @@ -1599,7 +1551,7 @@ def test_model_geospatial_filters( names = _get_geospatial_names_from_filter( db=db, value=Value( - type="multipolygon", + type=SupportedType.MULTIPOLYGON, value=[ [ [ @@ -1621,9 +1573,9 @@ def test_model_geospatial_filters( ], ], ), - operator="intersects", + operator=FilterOperator.INTERSECTS, model_object=model_object, - symbol_name=symbol_name, + symbol=symbol, label_source=models.Prediction, ) assert len(names) == 1 @@ -1633,12 +1585,12 @@ def test_model_geospatial_filters( names = _get_geospatial_names_from_filter( db=db, value=Value( - type="point", + type=SupportedType.POINT, value=[-11, -11], ), - operator="intersects", + operator=FilterOperator.INTERSECTS, model_object=model_object, - symbol_name=symbol_name, + symbol=symbol, ) assert len(names) == 0 @@ -1646,12 +1598,12 @@ def test_model_geospatial_filters( names = _get_geospatial_names_from_filter( db=db, value=Value( - type="point", + type=SupportedType.POINT, value=[-11, -11], ), - operator="outside", + operator=FilterOperator.OUTSIDE, model_object=model_object, - symbol_name=symbol_name, + symbol=symbol, label_source=models.Prediction, ) assert len(names) == 2 @@ -1735,60 +1687,37 @@ def duration_metadata() -> list[schemas.Duration]: def time_filter( db: Session, - symbol_name: str, - type_str: str, + symbol: Symbol, + type_: SupportedType, key: str, value: str | float, op: str, ): match op: case "==": - f = Equal( - eq=Operands( - lhs=Symbol(type=type_str, name=symbol_name, key=key), - rhs=Value(type=type_str, value=value), - ) - ) + op = FilterOperator.EQ case "!=": - f = NotEqual( - ne=Operands( - lhs=Symbol(type=type_str, name=symbol_name, key=key), - rhs=Value(type=type_str, value=value), - ) - ) + op = FilterOperator.NE case ">": - f = GreaterThan( - gt=Operands( - lhs=Symbol(type=type_str, name=symbol_name, key=key), - rhs=Value(type=type_str, value=value), - ) - ) + op = FilterOperator.GT case ">=": - f = GreaterThanEqual( - ge=Operands( - lhs=Symbol(type=type_str, name=symbol_name, key=key), - rhs=Value(type=type_str, value=value), - ) - ) + op = FilterOperator.GTE case "<": - f = LessThan( - lt=Operands( - lhs=Symbol(type=type_str, name=symbol_name, key=key), - rhs=Value(type=type_str, value=value), - ) - ) + op = FilterOperator.LT case "<=": - f = LessThanEqual( - le=Operands( - lhs=Symbol(type=type_str, name=symbol_name, key=key), - rhs=Value(type=type_str, value=value), - ) - ) + op = FilterOperator.LTE case _: raise NotImplementedError - match symbol_name: - case "dataset.metadata": + symbol.key = key + f = Condition( + lhs=symbol, + rhs=Value(type=type_, value=value), + op=op, + ) + + match symbol: + case Symbol(name=SupportedSymbol.DATASET_META): f = Filter(datasets=f) return generate_query( models.Dataset, @@ -1796,7 +1725,7 @@ def time_filter( filters=f, label_source=models.GroundTruth, ).all() - case "model.metadata": + case Symbol(name=SupportedSymbol.MODEL_META): f = Filter(models=f) return generate_query( models.Model, @@ -1804,7 +1733,7 @@ def time_filter( filters=f, label_source=models.Prediction, ).all() - case "datum.metadata": + case Symbol(name=SupportedSymbol.DATUM_META): f = Filter(datums=f) return generate_query( models.Datum, @@ -1812,7 +1741,7 @@ def time_filter( filters=f, label_source=models.GroundTruth, ).all() - case "annotation.metadata": + case Symbol(name=SupportedSymbol.ANNOTATION_META): f = Filter(annotations=f) return generate_query( models.Annotation, @@ -1821,13 +1750,13 @@ def time_filter( label_source=models.GroundTruth, ).all() case _: - raise NotImplementedError(symbol_name) + raise NotImplementedError(symbol) def _test_datetime_query( db: Session, - symbol_name: str, - symbol_type: str, + symbol: Symbol, + type_: SupportedType, key: str, metadata_: Sequence[ schemas.DateTime | schemas.Date | schemas.Time | schemas.Duration @@ -1842,8 +1771,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[0].value, op=op, @@ -1852,8 +1781,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[1].value, op=op, @@ -1862,8 +1791,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[2].value, op=op, @@ -1872,8 +1801,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[3].value, op=op, @@ -1882,8 +1811,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[4].value, op=op, @@ -1895,8 +1824,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[0].value, op=op, @@ -1905,8 +1834,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[1].value, op=op, @@ -1915,8 +1844,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[2].value, op=op, @@ -1925,8 +1854,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[3].value, op=op, @@ -1935,8 +1864,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[4].value, op=op, @@ -1948,8 +1877,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[0].value, op=op, @@ -1958,8 +1887,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[1].value, op=op, @@ -1968,8 +1897,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[2].value, op=op, @@ -1978,8 +1907,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[3].value, op=op, @@ -1988,8 +1917,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[4].value, op=op, @@ -2001,8 +1930,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[0].value, op=op, @@ -2011,8 +1940,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[1].value, op=op, @@ -2021,8 +1950,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[2].value, op=op, @@ -2031,8 +1960,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[3].value, op=op, @@ -2041,8 +1970,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[4].value, op=op, @@ -2054,8 +1983,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[0].value, op=op, @@ -2064,8 +1993,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[1].value, op=op, @@ -2074,8 +2003,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[2].value, op=op, @@ -2084,8 +2013,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[3].value, op=op, @@ -2094,8 +2023,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[4].value, op=op, @@ -2107,8 +2036,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[0].value, op=op, @@ -2117,8 +2046,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[1].value, op=op, @@ -2127,8 +2056,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[2].value, op=op, @@ -2137,8 +2066,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[3].value, op=op, @@ -2147,8 +2076,8 @@ def _test_datetime_query( results = time_filter( db=db, - symbol_name=symbol_name, - type_str=symbol_type, + symbol=symbol, + type_=type_, key=key, value=metadata_[4].value, op=op, @@ -2206,16 +2135,32 @@ def test_dataset_datetime_queries( ) _test_datetime_query( - db, "dataset.metadata", "datetime", datetime_key, datetime_metadata + db, + Symbol(name=SupportedSymbol.DATASET_META), + SupportedType.DATETIME, + datetime_key, + datetime_metadata, ) _test_datetime_query( - db, "dataset.metadata", "date", date_key, date_metadata + db, + Symbol(name=SupportedSymbol.DATASET_META), + SupportedType.DATE, + date_key, + date_metadata, ) _test_datetime_query( - db, "dataset.metadata", "time", time_key, time_metadata + db, + Symbol(name=SupportedSymbol.DATASET_META), + SupportedType.TIME, + time_key, + time_metadata, ) _test_datetime_query( - db, "dataset.metadata", "duration", duration_key, duration_metadata + db, + Symbol(name=SupportedSymbol.DATASET_META), + SupportedType.DURATION, + duration_key, + duration_metadata, ) @@ -2269,12 +2214,32 @@ def test_model_datetime_queries( ) _test_datetime_query( - db, "model.metadata", "datetime", datetime_key, datetime_metadata + db, + Symbol(name=SupportedSymbol.MODEL_META), + SupportedType.DATETIME, + datetime_key, + datetime_metadata, + ) + _test_datetime_query( + db, + Symbol(name=SupportedSymbol.MODEL_META), + SupportedType.DATE, + date_key, + date_metadata, ) - _test_datetime_query(db, "model.metadata", "date", date_key, date_metadata) - _test_datetime_query(db, "model.metadata", "time", time_key, time_metadata) _test_datetime_query( - db, "model.metadata", "duration", duration_key, duration_metadata + db, + Symbol(name=SupportedSymbol.MODEL_META), + SupportedType.TIME, + time_key, + time_metadata, + ) + _test_datetime_query( + db, + Symbol(name=SupportedSymbol.MODEL_META), + SupportedType.DURATION, + duration_key, + duration_metadata, ) @@ -2362,12 +2327,32 @@ def add_metadata_typing(value): ) _test_datetime_query( - db, "datum.metadata", "datetime", datetime_key, datetime_metadata + db, + Symbol(name=SupportedSymbol.DATUM_META), + SupportedType.DATETIME, + datetime_key, + datetime_metadata, + ) + _test_datetime_query( + db, + Symbol(name=SupportedSymbol.DATUM_META), + SupportedType.DATE, + date_key, + date_metadata, + ) + _test_datetime_query( + db, + Symbol(name=SupportedSymbol.DATUM_META), + SupportedType.TIME, + time_key, + time_metadata, ) - _test_datetime_query(db, "datum.metadata", "date", date_key, date_metadata) - _test_datetime_query(db, "datum.metadata", "time", time_key, time_metadata) _test_datetime_query( - db, "datum.metadata", "duration", duration_key, duration_metadata + db, + Symbol(name=SupportedSymbol.DATUM_META), + SupportedType.DURATION, + duration_key, + duration_metadata, ) @@ -2437,16 +2422,32 @@ def test_annotation_datetime_queries( ) _test_datetime_query( - db, "annotation.metadata", "datetime", datetime_key, datetime_metadata + db, + Symbol(name=SupportedSymbol.ANNOTATION_META), + SupportedType.DATETIME, + datetime_key, + datetime_metadata, ) _test_datetime_query( - db, "annotation.metadata", "date", date_key, date_metadata + db, + Symbol(name=SupportedSymbol.ANNOTATION_META), + SupportedType.DATE, + date_key, + date_metadata, ) _test_datetime_query( - db, "annotation.metadata", "time", time_key, time_metadata + db, + Symbol(name=SupportedSymbol.ANNOTATION_META), + SupportedType.TIME, + time_key, + time_metadata, ) _test_datetime_query( - db, "annotation.metadata", "duration", duration_key, duration_metadata + db, + Symbol(name=SupportedSymbol.ANNOTATION_META), + SupportedType.DURATION, + duration_key, + duration_metadata, ) diff --git a/api/tests/functional-tests/crud/test_create_delete.py b/api/tests/functional-tests/crud/test_create_delete.py index 8312e1ee1..225982ee9 100644 --- a/api/tests/functional-tests/crud/test_create_delete.py +++ b/api/tests/functional-tests/crud/test_create_delete.py @@ -396,7 +396,7 @@ def test_create_and_get_datasets( db=db, dataset=schemas.Dataset(name="other_dataset"), ) - datasets, _ = crud.get_datasets(db=db) + datasets, _ = crud.get_datasets(db=db, filters=schemas.Filter()) assert len(datasets) == 2 assert set([d.name for d in datasets]) == {dataset_name, "other_dataset"} @@ -1099,30 +1099,36 @@ def method_to_test( min_area: float | None = None, max_area: float | None = None, ): - geometric_filters = [] + conditions = [] if min_area: - geometric_filters.append( - schemas.NumericFilter( - value=min_area, - operator=">=", + conditions.append( + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.BOX_AREA), + rhs=schemas.Value.infer(float(min_area)), + op=schemas.FilterOperator.GTE, ) ) if max_area: - geometric_filters.append( - schemas.NumericFilter( - value=max_area, - operator="<=", + conditions.append( + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.BOX_AREA), + rhs=schemas.Value.infer(float(max_area)), + op=schemas.FilterOperator.LTE, ) ) - if not geometric_filters: - geometric_filters = None job_request = schemas.EvaluationRequest( dataset_names=["test_dataset"], model_names=["test_model"], filters=schemas.Filter( - label_keys=[label_key], - bounding_box_area=geometric_filters, + annotations=schemas.LogicalFunction.and_(*conditions) + if conditions + else None, + labels=schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_KEY), + rhs=schemas.Value.infer(label_key), + op=schemas.FilterOperator.EQ, + ), ), parameters=schemas.EvaluationParameters( task_type=enums.TaskType.OBJECT_DETECTION, @@ -1285,7 +1291,13 @@ def method_to_test( assert model_evals[1] == schemas.EvaluationResponse( dataset_names=[dataset_name], model_name=model_name, - filters=schemas.Filter(label_keys=["class"]), + filters=schemas.Filter( + labels=schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_KEY), + rhs=schemas.Value.infer("class"), + op=schemas.FilterOperator.EQ, + ) + ), parameters=schemas.EvaluationParameters( task_type=enums.TaskType.OBJECT_DETECTION, convert_annotations_to_type=enums.AnnotationType.BOX, @@ -1307,17 +1319,23 @@ def method_to_test( dataset_names=[dataset_name], model_name=model_name, filters=schemas.Filter( - label_keys=["class"], - bounding_box_area=[ - schemas.NumericFilter( - value=min_area, - operator=">=", + annotations=schemas.LogicalFunction.and_( + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.BOX_AREA), + rhs=schemas.Value.infer(float(min_area)), + op=schemas.FilterOperator.GTE, ), - schemas.NumericFilter( - value=max_area, - operator="<=", + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.BOX_AREA), + rhs=schemas.Value.infer(float(max_area)), + op=schemas.FilterOperator.LTE, ), - ], + ), + labels=schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_KEY), + rhs=schemas.Value.infer("class"), + op=schemas.FilterOperator.EQ, + ), ), parameters=schemas.EvaluationParameters( task_type=enums.TaskType.OBJECT_DETECTION, diff --git a/api/tests/functional-tests/crud/test_read.py b/api/tests/functional-tests/crud/test_read.py index 0670b6efe..6f0118ba6 100644 --- a/api/tests/functional-tests/crud/test_read.py +++ b/api/tests/functional-tests/crud/test_read.py @@ -59,7 +59,11 @@ def test_get_labels_from_dataset( ds1, headers = crud.get_labels( db=db, filters=schemas.Filter( - dataset_names=[dataset_name], + labels=schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.DATASET_NAME), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), ), ignore_prediction_labels=True, ) @@ -69,25 +73,77 @@ def test_get_labels_from_dataset( assert headers == {"content-range": "items 0-1/2"} # NEGATIVE - Test filter by task type + # This should be same result as previous b/c dataset only has Obj Dets ds1, _ = crud.get_labels( db=db, filters=schemas.Filter( - dataset_names=[dataset_name], - task_types=[ - enums.TaskType.CLASSIFICATION, - enums.TaskType.SEMANTIC_SEGMENTATION, - ], + groundtruths=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.OBJECT_DETECTION + ), + op=schemas.FilterOperator.CONTAINS, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.SEMANTIC_SEGMENTATION + ), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.OR, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_prediction_labels=True, ) - assert ds1 == {schemas.Label(key="k2", value="v2")} + assert len(ds1) == 2 + assert schemas.Label(key="k1", value="v1") in ds1 + assert schemas.Label(key="k2", value="v2") in ds1 # POSITIVE - Test filter by task type ds1, _ = crud.get_labels( db=db, filters=schemas.Filter( - dataset_names=[dataset_name], - task_types=[enums.TaskType.OBJECT_DETECTION], + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer( + enums.TaskType.OBJECT_DETECTION + ), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_prediction_labels=True, ) @@ -99,7 +155,22 @@ def test_get_labels_from_dataset( ds1, _ = crud.get_labels( db=db, filters=schemas.Filter( - dataset_names=[dataset_name], require_bounding_box=False + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.BOX), + op=schemas.FilterOperator.ISNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_prediction_labels=True, ) @@ -110,7 +181,24 @@ def test_get_labels_from_dataset( ds1, _ = crud.get_labels( db=db, filters=schemas.Filter( - dataset_names=[dataset_name], require_polygon=True + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.POLYGON + ), + op=schemas.FilterOperator.ISNOTNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_prediction_labels=True, ) @@ -121,8 +209,22 @@ def test_get_labels_from_dataset( ds1, _ = crud.get_labels( db=db, filters=schemas.Filter( - dataset_names=[dataset_name], - require_bounding_box=True, + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.BOX), + op=schemas.FilterOperator.ISNOTNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_prediction_labels=True, ) @@ -140,7 +242,11 @@ def test_get_labels_from_model( md1, _ = crud.get_labels( db=db, filters=schemas.Filter( - model_names=[model_name], + labels=schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.MODEL_NAME), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ) ), ignore_groundtruth_labels=True, ) @@ -154,8 +260,25 @@ def test_get_labels_from_model( md1, _ = crud.get_labels( db=db, filters=schemas.Filter( - model_names=[model_name], - task_types=[enums.TaskType.CLASSIFICATION], + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.TASK_TYPE + ), + rhs=schemas.Value.infer(enums.TaskType.CLASSIFICATION), + op=schemas.FilterOperator.CONTAINS, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_groundtruth_labels=True, ) @@ -165,8 +288,22 @@ def test_get_labels_from_model( md1, _ = crud.get_labels( db=db, filters=schemas.Filter( - model_names=[model_name], - require_bounding_box=True, + labels=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.BOX), + op=schemas.FilterOperator.ISNOTNULL, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ignore_groundtruth_labels=True, ) diff --git a/api/tests/unit-tests/backend/query/test_filtering.py b/api/tests/unit-tests/backend/query/test_filtering.py index 3f02ffe8d..0d1efe635 100644 --- a/api/tests/unit-tests/backend/query/test_filtering.py +++ b/api/tests/unit-tests/backend/query/test_filtering.py @@ -3,40 +3,74 @@ from valor_api.backend import models from valor_api.backend.query.filtering import ( _recursive_search_logic_tree, - create_cte, + create_where_expression, generate_logical_expression, map_filter_to_tables, + map_keyed_symbol_to_resources, + map_opstr_to_operator, + map_symbol_to_resources, + map_type_to_jsonb_type_cast, + map_type_to_type_cast, ) -from valor_api.schemas.filters import AdvancedFilter as Filter from valor_api.schemas.filters import ( - And, - Equal, - IsNull, - Not, - Operands, + Condition, + Filter, + FilterOperator, + LogicalFunction, + LogicalOperator, + SupportedSymbol, + SupportedType, Symbol, Value, ) -def test_create_cte_validation(): +def test_map_to_resources(): + for symbol in SupportedSymbol: + # test that there is a singular mapping for each symbol + assert (symbol in map_symbol_to_resources) != ( + symbol in map_keyed_symbol_to_resources + ) + + +def test_map_to_operator(): + for op in FilterOperator: + # test that each op has an associated function + assert op in map_opstr_to_operator + + +def test_map_to_type_cast(): + for type_ in SupportedType: + # value type cast + assert type_ in map_type_to_type_cast + # jsonb type cast + assert type_ in map_type_to_jsonb_type_cast + + +def test_create_where_expression_validation(): with pytest.raises(ValueError): - create_cte( - opstr="eq", - symbol="symbol", # type: ignore - testing - value=Value(type="string", value="some_name"), + create_where_expression( + Condition( + lhs="symbol", # type: ignore - testing + rhs=Value(type=SupportedType.STRING, value="some_name"), + op=FilterOperator.EQ, + ) ) with pytest.raises(ValueError): - create_cte( - opstr="eq", - symbol=Symbol(type="string", name="dataset.name"), - value="value", # type: ignore - testing + create_where_expression( + Condition( + lhs=Symbol(name=SupportedSymbol.DATASET_NAME), + rhs="value", # type: ignore - testing + op=FilterOperator.EQ, + ) ) with pytest.raises(TypeError): - create_cte( - opstr="eq", - symbol=Symbol(type="string", name="dataset.name"), - value=Value(type="integer", value=1), + create_where_expression( + Condition( + lhs=Symbol(name=SupportedSymbol.DATASET_NAME), + rhs=Value(type=SupportedType.INTEGER, value=1), + op=FilterOperator.EQ, + ) ) @@ -48,10 +82,12 @@ def test__recursive_search_logic_tree(): # test one arg function tree, _, tables = _recursive_search_logic_tree( - func=Not( - logical_not=IsNull( - isnull=Symbol(type="box", name="annotation.bounding_box") - ) + func=LogicalFunction( + args=Condition( + lhs=Symbol(name=SupportedSymbol.BOX), + op=FilterOperator.ISNULL, + ), + op=LogicalOperator.NOT, ) ) assert tables == [models.Annotation] @@ -59,11 +95,10 @@ def test__recursive_search_logic_tree(): # test two arg function tree, _, tables = _recursive_search_logic_tree( - func=Equal( - eq=Operands( - lhs=Symbol(type="string", name="dataset.name"), - rhs=Value(type="string", value="some_name"), - ) + func=Condition( + lhs=Symbol(name=SupportedSymbol.DATASET_NAME), + rhs=Value.infer("some_name"), + op=FilterOperator.EQ, ) ) assert tables == [models.Dataset] @@ -71,18 +106,19 @@ def test__recursive_search_logic_tree(): # test n arg function tree, _, tables = _recursive_search_logic_tree( - func=And( - logical_and=[ - IsNull( - isnull=Symbol(type="box", name="annotation.bounding_box") + func=LogicalFunction( + args=[ + Condition( + lhs=Symbol(name=SupportedSymbol.BOX), + op=FilterOperator.ISNULL, ), - Equal( - eq=Operands( - lhs=Symbol(type="string", name="dataset.name"), - rhs=Value(type="string", value="some_name"), - ) + Condition( + lhs=Symbol(name=SupportedSymbol.DATASET_NAME), + rhs=Value(type=SupportedType.STRING, value="some_name"), + op=FilterOperator.EQ, ), - ] + ], + op=LogicalOperator.AND, ) ) assert tables == [models.Annotation, models.Dataset] @@ -91,7 +127,10 @@ def test__recursive_search_logic_tree(): def test_map_filter_to_labels(): - fn = IsNull(isnull=Symbol(type="box", name="annotation.bounding_box")) + fn = Condition( + lhs=Symbol(name=SupportedSymbol.BOX), + op=FilterOperator.ISNULL, + ) filters = Filter( datasets=fn, @@ -140,13 +179,16 @@ def test_generate_logical_expression_validation(): # tree should be an int or a dict with pytest.raises(ValueError): generate_logical_expression( - root=select(models.Label.id).cte(), + ordered_ctes=[ + select(models.Label.id).cte(), + select(models.Label.id).cte(), + ], tree=[0, 1], # type: ignore - testing - prefix="cte", ) # n-arg expressions should be represented by a list with pytest.raises(ValueError): generate_logical_expression( - root=select(models.Label.id).cte(), tree={"and": 0}, prefix="cte" + ordered_ctes=[select(models.Label.id).cte()], + tree={"and": 0}, ) diff --git a/api/tests/unit-tests/backend/query/test_solvers.py b/api/tests/unit-tests/backend/query/test_solvers.py index 8e8cdf331..6553530df 100644 --- a/api/tests/unit-tests/backend/query/test_solvers.py +++ b/api/tests/unit-tests/backend/query/test_solvers.py @@ -204,7 +204,6 @@ def test_generate_filter_subquery_validation(): conditions=None, # type: ignore - testing select_from=models.Annotation, label_source=models.Dataset, - prefix="cte", ) # test that a valid logic tree has been created @@ -213,5 +212,4 @@ def test_generate_filter_subquery_validation(): conditions=None, # type: ignore - testing select_from=models.Annotation, label_source=models.Annotation, - prefix="cte", ) diff --git a/api/tests/unit-tests/schemas/test_filter_schemas.py b/api/tests/unit-tests/schemas/test_filter_schemas.py index 45f7df635..33ee1254d 100644 --- a/api/tests/unit-tests/schemas/test_filter_schemas.py +++ b/api/tests/unit-tests/schemas/test_filter_schemas.py @@ -1,159 +1,84 @@ -import datetime - import pytest -from valor_api.schemas import DateTime from valor_api.schemas.filters import ( - BooleanFilter, - DateTimeFilter, - Equal, - GeospatialFilter, - GreaterThan, - GreaterThanEqual, - Inside, - Intersects, - LessThan, - LessThanEqual, - NotEqual, - NumericFilter, - Operands, - Outside, - StringFilter, + Condition, + FilterOperator, + LogicalFunction, + LogicalOperator, + SupportedSymbol, Symbol, Value, - validate_type_symbol, ) -from valor_api.schemas.geometry import GeoJSON - - -def test_validate_type_symbol(): - x = Symbol(type="string", name="name") - y = Value(type="string", value="name") - validate_type_symbol(x) - with pytest.raises(TypeError): - validate_type_symbol(y) -def test_string_filter_to_function(): - name = "owner.name" - value = "hello world" - operands = Operands( - lhs=Symbol(type="string", name=name), - rhs=Value(type="string", value=value), +@pytest.fixture +def condition() -> Condition: + return Condition( + lhs=Symbol(name=SupportedSymbol.DATASET_NAME), + rhs=Value.infer("name"), + op=FilterOperator.EQ, ) - assert StringFilter(value=value, operator="==",).to_function( - name - ) == Equal(eq=operands) - assert StringFilter(value=value, operator="!=",).to_function( - name - ) == NotEqual(ne=operands) +def test_logical_and(condition: Condition): + # raises value error if list is empty + with pytest.raises(ValueError): + LogicalFunction.and_() -def test_boolean_filter_to_function(): - type_str = "boolean" - name = "owner.name" - value = True - operands = Operands( - lhs=Symbol(type=type_str, name=name), - rhs=Value(type=type_str, value=value), - ) - - assert BooleanFilter(value=value, operator="==",).to_function( - name - ) == Equal(eq=operands) + # raises value error if list is empty + with pytest.raises(ValueError): + LogicalFunction.and_(*[None, None, None]) - assert BooleanFilter(value=value, operator="!=",).to_function( - name - ) == NotEqual(ne=operands) + # if list has length of 1, return the contents + assert LogicalFunction.and_(*[condition]) == condition - -def test_numeric_filter_to_function(): - type_str = "float" - name = "owner.name" - value = 0.123 - operands = Operands( - lhs=Symbol(type=type_str, name=name), - rhs=Value(type=type_str, value=value), + # if list has length > 1, return the logical combination + assert LogicalFunction.and_(*[condition, condition]) == LogicalFunction( + args=[condition, condition], + op=LogicalOperator.AND, ) - assert NumericFilter(value=value, operator="==",).to_function( - name - ) == Equal(eq=operands) - - assert NumericFilter(value=value, operator="!=",).to_function( - name - ) == NotEqual(ne=operands) - assert NumericFilter(value=value, operator=">",).to_function( - name - ) == GreaterThan(gt=operands) +def test_logical_or(condition: Condition): - assert NumericFilter(value=value, operator=">=",).to_function( - name - ) == GreaterThanEqual(ge=operands) + # raises value error if list is empty + with pytest.raises(ValueError): + LogicalFunction.or_() - assert NumericFilter(value=value, operator="<",).to_function( - name - ) == LessThan(lt=operands) + # raises value error if list is empty + with pytest.raises(ValueError): + LogicalFunction.or_(*[None, None, None]) - assert NumericFilter(value=value, operator="<=",).to_function( - name - ) == LessThanEqual(le=operands) + # if list has length of 1, return the contents + assert LogicalFunction.or_(*[condition]) == condition - -def test_datetime_filter_to_function(): - type_str = "datetime" - name = "owner.name" - value = DateTime(value=datetime.datetime.now().isoformat()) - operands = Operands( - lhs=Symbol(type=type_str, name=name), - rhs=Value(type=type_str, value=value.value), + # if list has length > 1, return the logical combination + assert LogicalFunction.or_(*[condition, condition]) == LogicalFunction( + args=[condition, condition], + op=LogicalOperator.OR, ) - assert DateTimeFilter(value=value, operator="==",).to_function( - name - ) == Equal(eq=operands) - - assert DateTimeFilter(value=value, operator="!=",).to_function( - name - ) == NotEqual(ne=operands) - - assert DateTimeFilter(value=value, operator=">",).to_function( - name - ) == GreaterThan(gt=operands) - - assert DateTimeFilter(value=value, operator=">=",).to_function( - name - ) == GreaterThanEqual(ge=operands) - assert DateTimeFilter(value=value, operator="<",).to_function( - name - ) == LessThan(lt=operands) +def test_logical_not(condition: Condition): - assert DateTimeFilter(value=value, operator="<=",).to_function( - name - ) == LessThanEqual(le=operands) + # raises value error if list is empty + with pytest.raises(TypeError): + LogicalFunction.not_() # type: ignore - testing + # raises value error if list is empty + with pytest.raises(ValueError): + LogicalFunction.not_(None) # type: ignore - testing -def test_geospatial_filter_to_function(): - type_str = "geojson" - name = "owner.name" - value = GeoJSON(type="Point", coordinates=[0.1, 0.1]) - operands = Operands( - lhs=Symbol(type=type_str, name=name), - rhs=Value(type=type_str, value=value.geometry.to_json()), + # if list has length of 1, return the negation + assert LogicalFunction.not_(condition) == LogicalFunction( + args=condition, + op=LogicalOperator.NOT, ) - assert GeospatialFilter(value=value, operator="inside",).to_function( - name - ) == Inside(inside=operands) - - assert GeospatialFilter(value=value, operator="outside",).to_function( - name - ) == Outside(outside=operands) + # double negation should return the original condition + assert LogicalFunction.not_(LogicalFunction.not_(condition)) == condition - assert GeospatialFilter(value=value, operator="intersect",).to_function( - name - ) == Intersects(intersects=operands) + # not function cannot be passed more than one argument + with pytest.raises(TypeError): + assert LogicalFunction.not_(condition, condition) # type: ignore - testing diff --git a/api/tests/unit-tests/test_main.py b/api/tests/unit-tests/test_main.py index aad06938b..321234015 100644 --- a/api/tests/unit-tests/test_main.py +++ b/api/tests/unit-tests/test_main.py @@ -733,13 +733,24 @@ def test_get_datasets(crud, client: TestClient): crud.get_datasets.assert_called_once() +""" POST /datasets/filter """ + + +@patch("valor_api.main.crud") +def test_get_filtered_datasets(crud, client: TestClient): + crud.get_datasets.return_value = ([], {"headers": "headers"}) + resp = client.post("/datasets/filter", json=schemas.Filter().model_dump()) + assert resp.status_code == 200 + crud.get_datasets.assert_called_once() + + """ GET /datasets/{dataset_name} """ @patch("valor_api.main.crud") def test_get_dataset_by_name(crud, client: TestClient): crud.get_dataset.return_value = schemas.Dataset(name="name", metadata={}) - resp = client.get("/datasets/name") + resp = client.get("/datasets/filter") assert resp.status_code == 200 crud.get_dataset.assert_called_once() @@ -842,6 +853,17 @@ def test_get_models(crud, client: TestClient): crud.get_models.assert_called_once() +""" POST /models/filter """ + + +@patch("valor_api.main.crud") +def test_get_filtered_models(crud, client: TestClient): + crud.get_models.return_value = ([], {"headers": "headers"}) + resp = client.post("/models/filter", json=schemas.Filter().model_dump()) + assert resp.status_code == 200 + crud.get_models.assert_called_once() + + """ GET /models/{model_name} """ @@ -894,6 +916,144 @@ def test_delete_model(crud, client: TestClient): assert crud.delete.call_count == 1 +""" GET /data """ + + +@patch("valor_api.main.crud") +def test_get_datums(crud, client: TestClient): + crud.get_datums.return_value = ([], {"headers": "headers"}) + resp = client.get("/data") + assert resp.status_code == 200 + crud.get_datums.assert_called_once() + + resp = client.post("/data") + assert resp.status_code == 405 + + +""" POST /data/filter """ + + +@patch("valor_api.main.crud") +def test_get_filtered_datums(crud, client: TestClient): + crud.get_datums.return_value = ([], {"headers": "headers"}) + resp = client.post("/data/filter", json=schemas.Filter().model_dump()) + assert resp.status_code == 200 + crud.get_datums.assert_called_once() + + +""" GET /data/dataset/{dataset_name} """ + + +@patch("valor_api.main.crud") +def test_get_dataset_datums(crud, client: TestClient): + crud.get_datums.return_value = ([], {"headers": "headers"}) + resp = client.get("/data") + assert resp.status_code == 200 + crud.get_datums.assert_called_once() + + with patch( + "valor_api.main.crud.get_datums", + side_effect=exceptions.DatasetDoesNotExistError(""), + ): + resp = client.get("/data") + assert resp.status_code == 404 + + resp = client.post("/data") + assert resp.status_code == 405 + + +""" GET /data/dataset/{dataset_name}/uid/{uid} """ + + +@patch("valor_api.main.crud") +def test_get_datum_by_uid(crud, client: TestClient): + crud.get_datums.return_value = ( + [schemas.Datum(uid="uid")], + {}, + ) + + resp = client.get("/data/dataset/dsetname/uid/uid") + assert resp.status_code == 200 + crud.get_datums.assert_called_once() + + with patch( + "valor_api.main.crud.get_datums", + side_effect=exceptions.DatasetDoesNotExistError(""), + ): + resp = client.get("/data/dataset/dsetname/uid/uid") + assert resp.status_code == 404 + + resp = client.post("/data/dataset/dsetname/uid/uid") + assert resp.status_code == 405 + + +""" GET /labels """ + + +@patch("valor_api.main.crud") +def test_get_labels(crud, client: TestClient): + crud.get_labels.return_value = ([], {"headers": "headers"}) + resp = client.get("/labels") + assert resp.status_code == 200 + crud.get_labels.assert_called_once() + + resp = client.post("/labels") + assert resp.status_code == 405 + + +""" POST /labels/filter """ + + +@patch("valor_api.main.crud") +def test_get_filtered_labels(crud, client: TestClient): + crud.get_labels.return_value = ([], {"headers": "headers"}) + resp = client.post("/labels/filter", json={}) + assert resp.status_code == 200 + crud.get_labels.assert_called_once() + + resp = client.get("/labels") + assert resp.status_code == 200 + + +""" GET /labels/dataset/{dataset_name} """ + + +@patch("valor_api.main.crud") +def test_get_dataset_labels(crud, client: TestClient): + crud.get_labels.return_value = ([], {"headers": "headers"}) + resp = client.get("/labels/dataset/dsetname") + assert resp.status_code == 200 + crud.get_labels.assert_called_once() + + with patch( + "valor_api.main.crud.get_labels", + side_effect=exceptions.DatasetDoesNotExistError(""), + ): + resp = client.get("/labels/dataset/dsetname") + assert resp.status_code == 404 + + resp = client.post("/labels/dataset/dsetname") + assert resp.status_code == 405 + + +""" GET /labels/model/{model_name} """ + + +@patch("valor_api.main.crud") +def test_get_model_labels(crud, client: TestClient): + crud.get_labels.return_value = ([], {"headers": "headers"}) + resp = client.get("/labels/model/modelname") + assert resp.status_code == 200 + crud.get_labels.assert_called_once() + + with patch( + "valor_api.main.crud.get_labels", + side_effect=exceptions.ModelDoesNotExistError(""), + ): + resp = client.get("/labels/model/modelname") + assert resp.status_code == 404 + + """ POST /evaluations """ @@ -998,105 +1158,3 @@ def test_post_semenatic_segmentation_metrics(client: TestClient): response=[response], example_json=example_json, ) - - -""" GET /labels/dataset/{dataset_name} """ - - -@patch("valor_api.main.crud") -def test_get_dataset_labels(crud, client: TestClient): - crud.get_labels.return_value = ([], {"headers": "headers"}) - resp = client.get("/labels/dataset/dsetname") - assert resp.status_code == 200 - crud.get_labels.assert_called_once() - - with patch( - "valor_api.main.crud.get_labels", - side_effect=exceptions.DatasetDoesNotExistError(""), - ): - resp = client.get("/labels/dataset/dsetname") - assert resp.status_code == 404 - - resp = client.post("/labels/dataset/dsetname") - assert resp.status_code == 405 - - -""" GET /labels/model/{model_name} """ - - -@patch("valor_api.main.crud") -def test_get_model_labels(crud, client: TestClient): - crud.get_labels.return_value = ([], {"headers": "headers"}) - resp = client.get("/labels/model/modelname") - assert resp.status_code == 200 - crud.get_labels.assert_called_once() - - with patch( - "valor_api.main.crud.get_labels", - side_effect=exceptions.ModelDoesNotExistError(""), - ): - resp = client.get("/labels/model/modelname") - assert resp.status_code == 404 - - -""" GET /data/dataset/{dataset_name} """ - - -@patch("valor_api.main.crud") -def test_get_datums(crud, client: TestClient): - crud.get_datums.return_value = ([], {"headers": "headers"}) - resp = client.get("/data") - assert resp.status_code == 200 - crud.get_datums.assert_called_once() - - with patch( - "valor_api.main.crud.get_datums", - side_effect=exceptions.DatasetDoesNotExistError(""), - ): - resp = client.get("/data") - assert resp.status_code == 404 - - resp = client.post("/data") - assert resp.status_code == 405 - - -""" GET /data/dataset/{dataset_name}/uid/{uid} """ - - -@patch("valor_api.main.crud") -def test_get_datum(crud, client: TestClient): - crud.get_datums.return_value = ( - [schemas.Datum(uid="uid")], - {}, - ) - - resp = client.get("/data/dataset/dsetname/uid/uid") - assert resp.status_code == 200 - crud.get_datums.assert_called_once() - - with patch( - "valor_api.main.crud.get_datums", - side_effect=exceptions.DatasetDoesNotExistError(""), - ): - resp = client.get("/data/dataset/dsetname/uid/uid") - assert resp.status_code == 404 - - resp = client.post("/data/dataset/dsetname/uid/uid") - assert resp.status_code == 405 - - -""" GET /labels """ - - -@patch("valor_api.main.crud") -def test_get_labels(crud, client: TestClient): - crud.get_labels.return_value = ([], {"headers": "headers"}) - resp = client.get("/labels") - assert resp.status_code == 200 - crud.get_labels.assert_called_once() - - resp = client.post("/labels") - assert resp.status_code == 405 - - -""" GET /user """ diff --git a/api/valor_api/backend/core/dataset.py b/api/valor_api/backend/core/dataset.py index 8d161c5ac..58ab1c0da 100644 --- a/api/valor_api/backend/core/dataset.py +++ b/api/valor_api/backend/core/dataset.py @@ -165,10 +165,9 @@ def get_paginated_datasets( "Offset should be an int greater than or equal to zero. Limit should be an int greater than or equal to -1." ) - advanced_filter = filters.to_advanced_filter() if filters else None datasets_subquery = generate_select( models.Dataset.id.label("id"), - filters=advanced_filter, + filters=filters, label_source=models.GroundTruth, ).subquery() @@ -465,7 +464,13 @@ def get_unique_groundtruth_annotation_metadata_in_dataset( def get_dataset_summary(db: Session, name: str) -> schemas.DatasetSummary: gt_labels = core.get_labels( db, - schemas.Filter(dataset_names=[name]), + schemas.Filter( + datasets=schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.DATASET_NAME), + rhs=schemas.Value.infer(name), + op=schemas.FilterOperator.EQ, + ) + ), ignore_predictions=True, ) return schemas.DatasetSummary( diff --git a/api/valor_api/backend/core/evaluation.py b/api/valor_api/backend/core/evaluation.py index 48960309f..30ad8e8a5 100644 --- a/api/valor_api/backend/core/evaluation.py +++ b/api/valor_api/backend/core/evaluation.py @@ -1,6 +1,8 @@ +import warnings from datetime import timezone from typing import Sequence +from pydantic import ValidationError from sqlalchemy import ( ColumnElement, and_, @@ -21,6 +23,7 @@ from valor_api import api_utils, enums, exceptions, schemas from valor_api.backend import core, models from valor_api.backend.query import generate_query +from valor_api.schemas import migrations def _create_dataset_expr_from_list( @@ -245,6 +248,58 @@ def validate_request( ) +def _validate_evaluation_filter( + db: Session, + evaluation: models.Evaluation, +): + """ + Validates whether a new evaluation should proceed to a computation. + + Parameters + ---------- + db : Session + The database session. + evaluation : models.Evaluation + The evaluation row to validate. + """ + + # unpack filters and params + filters = schemas.Filter(**evaluation.filters) + parameters = schemas.EvaluationParameters(**evaluation.parameters) + + groundtruth_filter = filters.model_copy() + groundtruth_filter.predictions = None + + predictions_filter = filters.model_copy() + predictions_filter.groundtruths = None + + datasets = ( + generate_query( + models.Dataset.name, + db=db, + filters=groundtruth_filter, + label_source=models.GroundTruth, + ) + .distinct() + .all() + ) + + # verify model and datasets have data for this evaluation + if not datasets: + raise exceptions.EvaluationRequestError( + msg="No datasets were found that met the filter criteria." + ) + + # check that prediction label keys match ground truth label keys + if parameters.task_type == enums.TaskType.CLASSIFICATION: + core.validate_matching_label_keys( + db=db, + label_map=parameters.label_map, + groundtruth_filter=groundtruth_filter, + prediction_filter=predictions_filter, + ) + + def _create_response( db: Session, evaluation: models.Evaluation, @@ -295,65 +350,6 @@ def _create_response( ) -def _validate_create_evaluation( - db: Session, - job_request: schemas.EvaluationRequest, - evaluation: models.Evaluation, -) -> models.Evaluation: - """ - Validates whether a new or failed should proceed to a computation. - - Parameters - ---------- - db : Session - The database session. - job_request : schemas.EvaluationRequest - The evaluations to create. - evaluation : models.Evaluation - The evaluation row to validate. - - Returns - ------- - model.Evaluation - The row that was passed as input. - """ - - # unpack filters and params - groundtruth_filter = job_request.filters.model_copy() - groundtruth_filter.task_types = [job_request.parameters.task_type] - prediction_filter = groundtruth_filter.model_copy() - prediction_filter.model_names = [evaluation.model_name] - parameters = job_request.parameters - - datasets = ( - generate_query( - models.Dataset, - db=db, - filters=groundtruth_filter, - label_source=models.GroundTruth, - ) - .distinct() - .all() - ) - - # verify datums exist for this evaluation - if not datasets: - raise exceptions.EvaluationRequestError( - msg="No finalized datasets were found that met the filter criteria." - ) - - # check that prediction label keys match ground truth label keys - if job_request.parameters.task_type == enums.TaskType.CLASSIFICATION: - core.validate_matching_label_keys( - db=db, - label_map=parameters.label_map, - groundtruth_filter=groundtruth_filter, - prediction_filter=prediction_filter, - ) - - return evaluation - - def _create_responses( db: Session, evaluations: list[models.Evaluation], @@ -378,34 +374,48 @@ def _create_responses( if evaluation.id is None: raise exceptions.EvaluationDoesNotExistError() - dataset_filter = schemas.Filter(**evaluation.filters) - model_filter = dataset_filter.model_copy() - model_filter.dataset_names = None - model_filter.model_names = [evaluation.model_name] parameters = schemas.EvaluationParameters(**evaluation.parameters) + kwargs = dict() + try: + filters = schemas.Filter(**evaluation.filters) - match parameters.task_type: - case enums.TaskType.CLASSIFICATION: - kwargs = {} - case ( - enums.TaskType.OBJECT_DETECTION - | enums.TaskType.SEMANTIC_SEGMENTATION - ): - ( - missing_pred_labels, - ignored_pred_labels, - ) = core.get_disjoint_labels( - db, - dataset_filter, - model_filter, - label_map=parameters.label_map, + groundtruth_filter = filters.model_copy() + groundtruth_filter.predictions = None + + prediction_filter = filters.model_copy() + prediction_filter.groundtruths = None + + match parameters.task_type: + case enums.TaskType.CLASSIFICATION: + kwargs = {} + case ( + enums.TaskType.OBJECT_DETECTION + | enums.TaskType.SEMANTIC_SEGMENTATION + ): + ( + missing_pred_labels, + ignored_pred_labels, + ) = core.get_disjoint_labels( + db, + groundtruth_filter, + prediction_filter, + label_map=parameters.label_map, + ) + kwargs = { + "missing_pred_labels": missing_pred_labels, + "ignored_pred_labels": ignored_pred_labels, + } + case _: + raise NotImplementedError + except ValidationError as e: + try: + migrations.DeprecatedFilter(**evaluation.filters) + warnings.warn( + "Evaluation response is using a deprecated filter format.", + DeprecationWarning, ) - kwargs = { - "missing_pred_labels": missing_pred_labels, - "ignored_pred_labels": ignored_pred_labels, - } - case _: - raise NotImplementedError + except ValidationError: + raise e results.append( _create_response( @@ -473,6 +483,7 @@ def _split_request( job_request : EvaluationRequest The job request to split (if multiple model names exist). """ + return [ schemas.EvaluationRequest( dataset_names=job_request.dataset_names, @@ -510,12 +521,6 @@ def create_or_get_evaluations( # verify that all datasets and models are ready to be evaluated validate_request(db=db, job_request=job_request) - # reset dataset and model related filters - job_request.filters.dataset_names = None - job_request.filters.dataset_metadata = None - job_request.filters.model_names = None - job_request.filters.model_metadata = None - created_rows = [] existing_rows = [] for subrequest in _split_request(job_request): @@ -556,9 +561,8 @@ def create_or_get_evaluations( status=enums.EvaluationStatus.PENDING, meta=dict(), ) - evaluation = _validate_create_evaluation( + _validate_evaluation_filter( db=db, - job_request=subrequest, evaluation=evaluation, ) created_rows.append(evaluation) diff --git a/api/valor_api/backend/core/model.py b/api/valor_api/backend/core/model.py index 59c926ee8..a2e6e9944 100644 --- a/api/valor_api/backend/core/model.py +++ b/api/valor_api/backend/core/model.py @@ -182,10 +182,9 @@ def get_paginated_models( "Offset should be an int greater than or equal to zero. Limit should be an int greater than or equal to -1." ) - advanced_filter = filters.to_advanced_filter() if filters else None subquery = generate_select( models.Model.id.label("id"), - filters=advanced_filter, + filters=filters, label_source=models.Prediction, ).subquery() diff --git a/api/valor_api/backend/metrics/classification.py b/api/valor_api/backend/metrics/classification.py index 9857726ad..74ac14ffc 100644 --- a/api/valor_api/backend/metrics/classification.py +++ b/api/valor_api/backend/metrics/classification.py @@ -397,7 +397,14 @@ def _compute_binary_roc_auc( """ # query to get the datum_ids and label values of groundtruths that have the given label key gts_filter = groundtruth_filter.model_copy() - gts_filter.label_keys = [label.key] + gts_filter.labels = schemas.LogicalFunction.and_( + gts_filter.labels, + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_KEY), + rhs=schemas.Value.infer(label.key), + op=schemas.FilterOperator.EQ, + ), + ) gts_query = generate_select( models.Annotation.datum_id.label("datum_id"), models.Label.value.label("label_value"), @@ -407,7 +414,20 @@ def _compute_binary_roc_auc( # get the prediction scores for the given label (key and value) preds_filter = prediction_filter.model_copy() - preds_filter.labels = [{label.key: label.value}] + preds_filter.labels = schemas.LogicalFunction.and_( + preds_filter.labels, + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_KEY), + rhs=schemas.Value.infer(label.key), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_VALUE), + rhs=schemas.Value.infer(label.value), + op=schemas.FilterOperator.EQ, + ), + ) + preds_query = generate_select( models.Annotation.datum_id.label("datum_id"), models.Prediction.score.label("score"), @@ -538,7 +558,21 @@ def _compute_roc_auc( for grouper_value, labels in value_to_labels_mapping.items(): label_filter = groundtruth_filter.model_copy() - label_filter.label_ids = [label.id for label in labels] + label_filter.labels = schemas.LogicalFunction.and_( + label_filter.labels, + schemas.LogicalFunction.or_( + *[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.LABEL_ID + ), + rhs=schemas.Value.infer(label.id), + op=schemas.FilterOperator.EQ, + ) + for label in labels + ] + ), + ) # some labels in the "labels" argument may be out-of-scope given our groundtruth_filter, so we fetch all labels that are within scope of the groundtruth_filter to make sure we don't calculate ROCAUC for inappropriate labels in_scope_labels = [ @@ -805,16 +839,41 @@ def _compute_confusion_matrix_and_metrics_at_grouper_key( matrix and the second a list of all metrics (accuracy, ROC AUC, precisions, recalls, and f1s). """ - label_key_filter = list( + label_keys = list( grouper_mappings["grouper_key_to_label_keys_mapping"][grouper_key] ) - # get groundtruths and predictions that conform to filters + # groundtruths filter gFilter = groundtruth_filter.model_copy() - gFilter.label_keys = label_key_filter + gFilter.labels = schemas.LogicalFunction.and_( + gFilter.labels, + schemas.LogicalFunction.or_( + *[ + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_KEY), + rhs=schemas.Value.infer(key), + op=schemas.FilterOperator.EQ, + ) + for key in label_keys + ] + ), + ) + # predictions filter pFilter = prediction_filter.model_copy() - pFilter.label_keys = label_key_filter + pFilter.labels = schemas.LogicalFunction.and_( + pFilter.labels, + schemas.LogicalFunction.or_( + *[ + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_KEY), + rhs=schemas.Value.infer(key), + op=schemas.FilterOperator.EQ, + ) + for key in label_keys + ] + ), + ) groundtruths = generate_select( models.GroundTruth, @@ -985,8 +1044,8 @@ def _compute_clf_metrics( labels = core.fetch_union_of_labels( db=db, - rhs=prediction_filter, lhs=groundtruth_filter, + rhs=prediction_filter, ) grouper_mappings = create_grouper_mappings( diff --git a/api/valor_api/backend/metrics/detection.py b/api/valor_api/backend/metrics/detection.py index b2bdbdade..ebd2dd281 100644 --- a/api/valor_api/backend/metrics/detection.py +++ b/api/valor_api/backend/metrics/detection.py @@ -1693,16 +1693,6 @@ def compute_detection_metrics(*_, db: Session, evaluation_id: int): model=model, target_type=parameters.convert_annotations_to_type, ) - match target_type: - case AnnotationType.BOX: - groundtruth_filter.require_bounding_box = True - prediction_filter.require_bounding_box = True - case AnnotationType.POLYGON: - groundtruth_filter.require_polygon = True - prediction_filter.require_polygon = True - case AnnotationType.RASTER: - groundtruth_filter.require_raster = True - prediction_filter.require_raster = True else: target_type = min( [ @@ -1713,6 +1703,33 @@ def compute_detection_metrics(*_, db: Session, evaluation_id: int): ] ) + match target_type: + case AnnotationType.BOX: + symbol = schemas.Symbol(name=schemas.SupportedSymbol.BOX) + case AnnotationType.POLYGON: + symbol = schemas.Symbol(name=schemas.SupportedSymbol.POLYGON) + case AnnotationType.RASTER: + symbol = schemas.Symbol(name=schemas.SupportedSymbol.RASTER) + case _: + raise TypeError( + f"'{target_type}' is not a valid type for object detection." + ) + + groundtruth_filter.annotations = schemas.LogicalFunction.and_( + groundtruth_filter.annotations, + schemas.Condition( + lhs=symbol, + op=schemas.FilterOperator.ISNOTNULL, + ), + ) + prediction_filter.annotations = schemas.LogicalFunction.and_( + prediction_filter.annotations, + schemas.Condition( + lhs=symbol, + op=schemas.FilterOperator.ISNOTNULL, + ), + ) + if ( parameters.metrics_to_return and enums.MetricType.DetailedPrecisionRecallCurve diff --git a/api/valor_api/backend/metrics/metric_utils.py b/api/valor_api/backend/metrics/metric_utils.py index 235a1daec..aaaace6b1 100644 --- a/api/valor_api/backend/metrics/metric_utils.py +++ b/api/valor_api/backend/metrics/metric_utils.py @@ -477,11 +477,72 @@ def prepare_filter_for_evaluation( A filter ready for evaluation. """ + # create dataset constraint + dataset_conditions = schemas.LogicalFunction.or_( + *[ + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.DATASET_NAME), + rhs=schemas.Value.infer(name), + op=schemas.FilterOperator.EQ, + ) + for name in dataset_names + ] + ) + + # create model constraint + model_condition = schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.MODEL_NAME), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ) + + # create task type constraint + task_type_condition = schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.TASK_TYPE), + rhs=schemas.Value( + type=schemas.SupportedType.TASK_TYPE, value=task_type + ), + op=schemas.FilterOperator.CONTAINS, + ) + + # create new annotations filter + filters.annotations = ( + schemas.LogicalFunction.and_( + filters.annotations, + task_type_condition, + ) + if filters.annotations + else task_type_condition + ) + + # create new groundtruth filter + filters.groundtruths = ( + schemas.LogicalFunction.and_( + filters.groundtruths, + dataset_conditions, + ) + if filters.groundtruths + else dataset_conditions + ) + + # create new prediction filter + filters.predictions = ( + schemas.LogicalFunction.and_( + filters.predictions, + dataset_conditions, + model_condition, + ) + if filters.predictions + else schemas.LogicalFunction.and_( + dataset_conditions, + model_condition, + ) + ) + groundtruth_filter = filters.model_copy() - groundtruth_filter.task_types = [task_type] - groundtruth_filter.dataset_names = dataset_names + groundtruth_filter.predictions = None - predictions_filter = groundtruth_filter.model_copy() - predictions_filter.model_names = [model_name] + predictions_filter = filters.model_copy() + predictions_filter.groundtruths = None return (groundtruth_filter, predictions_filter) diff --git a/api/valor_api/backend/metrics/segmentation.py b/api/valor_api/backend/metrics/segmentation.py index 1f100cf0a..b4fea07b0 100644 --- a/api/valor_api/backend/metrics/segmentation.py +++ b/api/valor_api/backend/metrics/segmentation.py @@ -189,8 +189,26 @@ def _compute_segmentation_metrics( "grouper_id_to_label_ids_mapping" ].items(): # set filter - groundtruth_filter.label_ids = [label_id for label_id in label_ids] - prediction_filter.label_ids = [label_id for label_id in label_ids] + groundtruth_filter.labels = schemas.LogicalFunction.or_( + *[ + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_ID), + rhs=schemas.Value.infer(label_id), + op=schemas.FilterOperator.EQ, + ) + for label_id in label_ids + ] + ) + prediction_filter.labels = schemas.LogicalFunction.or_( + *[ + schemas.Condition( + lhs=schemas.Symbol(name=schemas.SupportedSymbol.LABEL_ID), + rhs=schemas.Value.infer(label_id), + op=schemas.FilterOperator.EQ, + ) + for label_id in label_ids + ] + ) computed_iou_score = _compute_iou( db, diff --git a/api/valor_api/backend/query/filtering.py b/api/valor_api/backend/query/filtering.py index 5b8aeeb28..0d757873f 100644 --- a/api/valor_api/backend/query/filtering.py +++ b/api/valor_api/backend/query/filtering.py @@ -13,7 +13,6 @@ func, not_, or_, - select, ) from sqlalchemy.dialects.postgresql import INTERVAL, TEXT @@ -28,12 +27,13 @@ Prediction, ) from valor_api.backend.query.types import LabelSourceAlias, TableTypeAlias -from valor_api.schemas.filters import AdvancedFilter as Filter from valor_api.schemas.filters import ( - NArgFunction, - OneArgFunction, - Symbol, - TwoArgFunction, + Condition, + Filter, + FilterOperator, + LogicalFunction, + SupportedSymbol, + SupportedType, Value, ) from valor_api.schemas.geometry import ( @@ -46,223 +46,256 @@ Polygon, ) + +def raise_not_implemented(x): + raise NotImplementedError(f"{x} is not implemented.") + + # Map an operation to a callable function. map_opstr_to_operator = { - "equal": operator.eq, - "notequal": operator.ne, - "greaterthan": operator.gt, - "greaterthanequal": operator.ge, - "lessthan": operator.lt, - "lessthanequal": operator.le, - "intersects": lambda lhs, rhs: func.ST_Intersects(lhs, rhs), - "inside": lambda lhs, rhs: func.ST_Covers(rhs, lhs), - "outside": lambda lhs, rhs: not_(func.ST_Covers(rhs, lhs)), - "isnull": lambda lhs, _: lhs.is_(None), - "isnotnull": lambda lhs, _: lhs.isnot(None), - "contains": lambda lhs, rhs: lhs.op("?")(rhs), -} - -# Map a symbol to a tuple containing a table and the relevant attribute. -map_name_to_table_column = { - "dataset.name": (Dataset, Dataset.name), - "dataset.metadata": (Dataset, Dataset.meta), - "model.name": (Model, Model.name), - "model.metadata": (Model, Model.meta), - "datum.uid": (Datum, Datum.uid), - "datum.metadata": (Datum, Datum.meta), - "annotation.bounding_box": (Annotation, Annotation.box), - "annotation.polygon": (Annotation, Annotation.polygon), - "annotation.raster": (Annotation, Annotation.raster), - "annotation.metadata": (Annotation, Annotation.meta), - "annotation.task_type": (Annotation, Annotation.implied_task_types), - "annotation.embedding": (Embedding, Embedding.value), - "annotation.labels": (Label, Label), - "annotation.model_id": (Annotation, Annotation.model_id), # remove - "label.key": (Label, Label.key), - "label.value": (Label, Label.value), - "label.score": (Prediction, Prediction.score), - "label.id": (Label, Label.id), # remove - "model.id": (Model, Model.id), + FilterOperator.EQ: operator.eq, + FilterOperator.NE: operator.ne, + FilterOperator.GT: operator.gt, + FilterOperator.GTE: operator.ge, + FilterOperator.LT: operator.lt, + FilterOperator.LTE: operator.le, + FilterOperator.INTERSECTS: lambda lhs, rhs: func.ST_Intersects(lhs, rhs), + FilterOperator.INSIDE: lambda lhs, rhs: func.ST_Covers(rhs, lhs), + FilterOperator.OUTSIDE: lambda lhs, rhs: not_(func.ST_Covers(rhs, lhs)), + FilterOperator.ISNULL: lambda lhs, _: lhs.is_(None), + FilterOperator.ISNOTNULL: lambda lhs, _: lhs.isnot(None), + FilterOperator.CONTAINS: lambda lhs, rhs: lhs.op("?")(rhs), } -# Map a symbol's attribute type to a modifying function. -map_attribute_to_type_cast = { - "area": { - "annotation.bounding_box": lambda x: ST_Area(x), - "annotation.polygon": lambda x: ST_Area(x), - "annotation.raster": lambda x: ST_Count(x), - "dataset.metadata": lambda x: ST_Area(x), - "model.metadata": lambda x: ST_Area(x), - "datum.metadata": lambda x: ST_Area(x), - "annotation.metadata": lambda x: ST_Area(x), - } +# Map a symbol to a tuple containing (table, column, type string). +map_symbol_to_resources = { + SupportedSymbol.DATASET_NAME: (Dataset, Dataset.name), + SupportedSymbol.MODEL_NAME: (Model, Model.name), + SupportedSymbol.DATUM_UID: (Datum, Datum.uid), + SupportedSymbol.BOX: (Annotation, Annotation.box), + SupportedSymbol.POLYGON: (Annotation, Annotation.polygon), + SupportedSymbol.RASTER: (Annotation, Annotation.raster), + SupportedSymbol.TASK_TYPE: (Annotation, Annotation.implied_task_types), + SupportedSymbol.EMBEDDING: (Embedding, Embedding.value), + SupportedSymbol.LABELS: (Label, Label), + SupportedSymbol.LABEL_KEY: (Label, Label.key), + SupportedSymbol.LABEL_VALUE: (Label, Label.value), + SupportedSymbol.SCORE: (Prediction, Prediction.score), + # 'area' attribute + SupportedSymbol.BOX_AREA: (Annotation, ST_Area(Annotation.box)), + SupportedSymbol.POLYGON_AREA: (Annotation, ST_Area(Annotation.polygon)), + SupportedSymbol.RASTER_AREA: (Annotation, ST_Count(Annotation.raster)), + # backend use only + SupportedSymbol.DATASET_ID: (Dataset, Dataset.id), + SupportedSymbol.MODEL_ID: (Model, Model.id), + SupportedSymbol.DATUM_ID: (Datum, Datum.id), + SupportedSymbol.ANNOTATION_ID: (Annotation, Annotation.id), + SupportedSymbol.GROUNDTRUTH_ID: (GroundTruth, GroundTruth.id), + SupportedSymbol.PREDICTION_ID: (Prediction, Prediction.id), + SupportedSymbol.LABEL_ID: (Label, Label.id), + SupportedSymbol.EMBEDDING_ID: (Embedding, Embedding.id), } -# Map a symbol's attribute to the type expected by the operation. -map_symbol_attribute_to_type = {"area": "float"} +# Map a keyed symbol to a tuple containing (table, column, type string). +map_keyed_symbol_to_resources = { + SupportedSymbol.DATASET_META: (Dataset, lambda key: Dataset.meta[key]), + SupportedSymbol.MODEL_META: (Model, lambda key: Model.meta[key]), + SupportedSymbol.DATUM_META: (Datum, lambda key: Datum.meta[key]), + SupportedSymbol.ANNOTATION_META: ( + Annotation, + lambda key: Annotation.meta[key], + ), + # 'area' attribute + SupportedSymbol.DATASET_META_AREA: ( + Dataset, + lambda key: ST_Area(ST_GeomFromGeoJSON(Dataset.meta[key]["value"])), + ), + SupportedSymbol.MODEL_META_AREA: ( + Model, + lambda key: ST_Area(ST_GeomFromGeoJSON(Model.meta[key]["value"])), + ), + SupportedSymbol.DATUM_META_AREA: ( + Datum, + lambda key: ST_Area(ST_GeomFromGeoJSON(Datum.meta[key]["value"])), + ), + SupportedSymbol.ANNOTATION_META_AREA: ( + Annotation, + lambda key: ST_Area(ST_GeomFromGeoJSON(Annotation.meta[key]["value"])), + ), +} # Map a type to a type casting function. This is used for accessing JSONB values. map_type_to_jsonb_type_cast = { - "boolean": lambda x: x.astext.cast(Boolean), - "integer": lambda x: x.astext.cast(Integer), - "float": lambda x: x.astext.cast(Float), - "string": lambda x: x.astext, - "tasktype": lambda x: x.astext, - "datetime": lambda x: cast( + SupportedType.BOOLEAN: lambda x: x.astext.cast(Boolean), + SupportedType.INTEGER: lambda x: x.astext.cast(Integer), + SupportedType.FLOAT: lambda x: x.astext.cast(Float), + SupportedType.STRING: lambda x: x.astext, + SupportedType.TASK_TYPE: lambda x: x.astext, + SupportedType.DATETIME: lambda x: cast( x["value"].astext, type_=TIMESTAMP(timezone=True) ), - "date": lambda x: cast(x["value"].astext, type_=TIMESTAMP(timezone=True)), - "time": lambda x: cast(x["value"].astext, type_=INTERVAL), - "duration": lambda x: cast(x["value"].astext, type_=INTERVAL), - "point": lambda x: ST_GeomFromGeoJSON(x["value"]), - "multipoint": lambda x: ST_GeomFromGeoJSON(x["value"]), - "linestring": lambda x: ST_GeomFromGeoJSON(x["value"]), - "multilinestring": lambda x: ST_GeomFromGeoJSON(x["value"]), - "polygon": lambda x: ST_GeomFromGeoJSON(x["value"]), - "box": lambda x: ST_GeomFromGeoJSON(x["value"]), - "multipolygon": lambda x: ST_GeomFromGeoJSON(x["value"]), - "geojson": lambda x: ST_GeomFromGeoJSON(x["value"]), -} - - -# Map an attribute to a type casting function. This is used for accessing JSONB values. -map_attribute_to_jsonb_type_cast = { - "area": lambda x: ST_GeomFromGeoJSON(x["value"]), + SupportedType.DATE: lambda x: cast( + x["value"].astext, type_=TIMESTAMP(timezone=True) + ), + SupportedType.TIME: lambda x: cast(x["value"].astext, type_=INTERVAL), + SupportedType.DURATION: lambda x: cast(x["value"].astext, type_=INTERVAL), + SupportedType.POINT: lambda x: ST_GeomFromGeoJSON(x["value"]), + SupportedType.MULTIPOINT: lambda x: ST_GeomFromGeoJSON(x["value"]), + SupportedType.LINESTRING: lambda x: ST_GeomFromGeoJSON(x["value"]), + SupportedType.MULTILINESTRING: lambda x: ST_GeomFromGeoJSON(x["value"]), + SupportedType.POLYGON: lambda x: ST_GeomFromGeoJSON(x["value"]), + SupportedType.BOX: lambda x: ST_GeomFromGeoJSON(x["value"]), + SupportedType.MULTIPOLYGON: lambda x: ST_GeomFromGeoJSON(x["value"]), + SupportedType.GEOJSON: lambda x: ST_GeomFromGeoJSON(x["value"]), + # unsupported + SupportedType.RASTER: raise_not_implemented, + SupportedType.EMBEDDING: raise_not_implemented, + SupportedType.LABEL: raise_not_implemented, } # Map a value type to a type casting function. -map_value_type_to_type_cast = { - "boolean": lambda x: x, - "integer": lambda x: x, - "float": lambda x: x, - "string": lambda x: x, - "tasktype": lambda x: x, - "datetime": lambda x: cast(x, type_=TIMESTAMP(timezone=True)), - "date": lambda x: cast(x, type_=TIMESTAMP(timezone=True)), - "time": lambda x: cast(x, type_=INTERVAL), - "duration": lambda x: cast(cast(x, TEXT), type_=INTERVAL), - "point": lambda x: ST_GeomFromGeoJSON(Point(value=x).to_json()), - "multipoint": lambda x: ST_GeomFromGeoJSON(MultiPoint(value=x).to_json()), - "linestring": lambda x: ST_GeomFromGeoJSON(LineString(value=x).to_json()), - "multilinestring": lambda x: ST_GeomFromGeoJSON( +map_type_to_type_cast = { + SupportedType.BOOLEAN: lambda x: x, + SupportedType.INTEGER: lambda x: x, + SupportedType.FLOAT: lambda x: x, + SupportedType.STRING: lambda x: x, + SupportedType.TASK_TYPE: lambda x: x, + SupportedType.DATETIME: lambda x: cast(x, type_=TIMESTAMP(timezone=True)), + SupportedType.DATE: lambda x: cast(x, type_=TIMESTAMP(timezone=True)), + SupportedType.TIME: lambda x: cast(x, type_=INTERVAL), + SupportedType.DURATION: lambda x: cast(cast(x, TEXT), type_=INTERVAL), + SupportedType.POINT: lambda x: ST_GeomFromGeoJSON( + Point(value=x).to_json() + ), + SupportedType.MULTIPOINT: lambda x: ST_GeomFromGeoJSON( + MultiPoint(value=x).to_json() + ), + SupportedType.LINESTRING: lambda x: ST_GeomFromGeoJSON( + LineString(value=x).to_json() + ), + SupportedType.MULTILINESTRING: lambda x: ST_GeomFromGeoJSON( MultiLineString(value=x).to_dict() ), - "polygon": lambda x: ST_GeomFromGeoJSON(Polygon(value=x).to_json()), - "box": lambda x: ST_GeomFromGeoJSON(Box(value=x).to_json()), - "multipolygon": lambda x: ST_GeomFromGeoJSON( + SupportedType.POLYGON: lambda x: ST_GeomFromGeoJSON( + Polygon(value=x).to_json() + ), + SupportedType.BOX: lambda x: ST_GeomFromGeoJSON(Box(value=x).to_json()), + SupportedType.MULTIPOLYGON: lambda x: ST_GeomFromGeoJSON( MultiPolygon(value=x).to_json() ), - "geojson": lambda x: ST_GeomFromGeoJSON(x), + SupportedType.GEOJSON: lambda x: ST_GeomFromGeoJSON(x), + # unsupported + SupportedType.RASTER: raise_not_implemented, + SupportedType.EMBEDDING: raise_not_implemented, + SupportedType.LABEL: raise_not_implemented, } -def create_cte( - opstr: str, symbol: Symbol, value: Value | None = None -) -> tuple[TableTypeAlias, CTE]: +def create_where_expression( + condition: Condition, +) -> tuple[TableTypeAlias, BinaryExpression]: """ - Creates a CTE from a binary expression. + Creates a binary expression from an conditional. Parameters ---------- - opstr : str - The expression operator. - symbol : Symbol - The lhs of the expression. - value : Value, optional - The rhs of the expression, if it exists. + condition : Condition + The conditional operation. Returns ------- - tuple[TableTypeAlias, CTE] - A tuple of a table to join on and the CTE. + tuple[TableTypeAlias, BinaryExpression] + A tuple of a table and its filtering expression. + + Raises + ------ + NotImplementedError + If the symbol is not implemented. + ValueError + If there is a type mismatch. """ - if not isinstance(symbol, Symbol): - raise ValueError(f"CTE passed a symbol with type '{type(symbol)}'.") - elif not isinstance(value, Value) and value is not None: - raise ValueError(f"CTE passed a value with type '{type(value)}'.") - elif value and symbol.type != value.type: - symbol_type = ( - map_symbol_attribute_to_type[symbol.attribute] - if symbol.attribute - else symbol.type + + # convert lhs (symbol) to sql representation + if condition.lhs.name in map_symbol_to_resources: + table, lhs = map_symbol_to_resources[condition.lhs.name] + elif ( + condition.lhs.name in map_keyed_symbol_to_resources + and condition.lhs.key + ): + table, generate_column = map_keyed_symbol_to_resources[ + condition.lhs.name + ] + lhs = generate_column(condition.lhs.key) + else: + raise NotImplementedError( + f"Symbol '{condition.lhs}' does not match any existing templates." + ) + + if condition.rhs and condition.lhs.key and condition.lhs.type is None: + lhs = map_type_to_jsonb_type_cast[condition.rhs.type](lhs) + elif ( + isinstance(condition.rhs, Value) + and condition.rhs.type != condition.lhs.type + ): + raise TypeError( + f"Type mismatch between '{condition.lhs}' and '{condition.rhs}'." ) - if symbol_type != value.type: - raise TypeError( - f"Type mismatch between symbol and value. {symbol_type} != {value.type}." - ) - op = map_opstr_to_operator[opstr] - table, lhs = map_name_to_table_column[symbol.name] + op = map_opstr_to_operator[condition.op] rhs = ( - map_value_type_to_type_cast[value.type](value.value) if value else None + map_type_to_type_cast[condition.rhs.type](condition.rhs.value) + if isinstance(condition.rhs, Value) + else None ) - # add keying - if symbol.key: - lhs = lhs[symbol.key] - - # add type cast - if not symbol.attribute: - lhs = map_type_to_jsonb_type_cast[symbol.type](lhs) - else: - lhs = map_attribute_to_jsonb_type_cast[symbol.attribute](lhs) - - # add attribute modifier - if symbol.attribute: - modifier = map_attribute_to_type_cast[symbol.attribute][symbol.name] - lhs = modifier(lhs) - - return (table, select(table.id).where(op(lhs, rhs)).cte()) + return (table, op(lhs, rhs)) def _recursive_search_logic_tree( - func: OneArgFunction | TwoArgFunction | NArgFunction, - cte_list: list | None = None, + func: Condition | LogicalFunction, + expr_list: list | None = None, tables: list[TableTypeAlias] | None = None, -) -> tuple[int | dict, list[CTE], list[TableTypeAlias]]: +) -> tuple[int | dict, list[BinaryExpression], list[TableTypeAlias]]: """ Walks the filtering function to produce dependencies. """ - if not isinstance(func, OneArgFunction | TwoArgFunction | NArgFunction): + if not isinstance(func, (Condition, LogicalFunction)): raise TypeError( f"Expected input to be of type 'OneArgFunction | TwoArgFunction | NArgFunction'. Received '{func}'." ) - cte_list = cte_list if cte_list else list() + expr_list = expr_list if expr_list else list() tables = tables if tables else list() logical_tree = dict() - if isinstance(func, OneArgFunction): - if isinstance(func.arg, Symbol): - table, cte = create_cte(opstr=func.op, symbol=func.arg) - tables.append(table) - cte_list.append(cte) - return (len(cte_list) - 1, cte_list, tables) - else: - branch, cte_list, tables = _recursive_search_logic_tree( - func.arg, cte_list, tables - ) - logical_tree[func.op] = branch - return (logical_tree, cte_list, tables) - - elif isinstance(func, TwoArgFunction): - table, cte = create_cte(opstr=func.op, symbol=func.lhs, value=func.rhs) + if isinstance(func, Condition): + table, cte = create_where_expression(func) tables.append(table) - cte_list.append(cte) - return (len(cte_list) - 1, cte_list, tables) - - elif isinstance(func, NArgFunction): - branches = list() - for arg in func.args: - branch, cte_list, tables = _recursive_search_logic_tree( - arg, cte_list, tables + expr_list.append(cte) + return (len(expr_list) - 1, expr_list, tables) + elif isinstance(func, LogicalFunction): + if isinstance(func.args, (Condition, LogicalFunction)): + branch, expr_list, tables = _recursive_search_logic_tree( + func.args, expr_list, tables ) - branches.append(branch) - logical_tree[func.op] = branches - return (logical_tree, cte_list, tables) + logical_tree[func.op] = branch + return (logical_tree, expr_list, tables) + else: + branches = list() + for arg in func.args: + branch, expr_list, tables = _recursive_search_logic_tree( + arg, expr_list, tables + ) + branches.append(branch) + logical_tree[func.op] = branches + return (logical_tree, expr_list, tables) + else: + raise TypeError( + f"Recieved an unsupported type '{type(func)}' in func." + ) def map_filter_to_tables( @@ -307,14 +340,14 @@ def map_filter_to_tables( def generate_dependencies( - func: OneArgFunction | TwoArgFunction | NArgFunction | None, -) -> tuple[int | dict | None, list[CTE], list[TableTypeAlias]]: + func: Condition | LogicalFunction | None, +) -> tuple[int | dict | None, list[BinaryExpression], list[TableTypeAlias]]: """ Recursively generates the dependencies for creating a filter subquery. Parameters ---------- - func : OneArgFunction | TwoArgFunction | NArgFunction, optional + func : Condition | LogicalFunction, optional An optional filtering function. Returns @@ -328,7 +361,7 @@ def generate_dependencies( def generate_logical_expression( - root: CTE, tree: int | dict[str, int | dict | list], prefix: str + ordered_ctes: list[CTE], tree: int | dict[str, int | dict | list] ) -> BinaryExpression: """ Generates the 'where' expression from a logical tree. @@ -339,8 +372,6 @@ def generate_logical_expression( The CTE that evaluates the binary expressions. tree : int | dict[str, int | dict | list] The logical index tree. - prefix : str - The prefix of the relevant CTE. Returns ------- @@ -348,7 +379,7 @@ def generate_logical_expression( A binary expression that can be used in a WHERE statement. """ if isinstance(tree, int): - return getattr(root.c, f"{prefix}{tree}") == 1 + return ordered_ctes[0].c.id.isnot(None) if not isinstance(tree, dict) or len(tree.keys()) != 1: raise ValueError("If not an 'int', expected tree to be dictionary.") @@ -364,10 +395,10 @@ def generate_logical_expression( raise ValueError("Expected a list of expressions.") return logical_operators[op]( *[ - (getattr(root.c, f"{prefix}{arg}") == 1) + ordered_ctes[arg].c.id.isnot(None) if isinstance(arg, int) else generate_logical_expression( - root=root, tree=arg, prefix=prefix + ordered_ctes=ordered_ctes, tree=arg ) for arg in args ] @@ -377,10 +408,12 @@ def generate_logical_expression( if isinstance(arg, list): raise ValueError return ( - (getattr(root.c, f"{prefix}{arg}") == 0) + ordered_ctes[arg].c.id.is_(None) if isinstance(arg, int) else not_( - generate_logical_expression(root=root, tree=arg, prefix=prefix) + generate_logical_expression( + ordered_ctes=ordered_ctes, tree=arg + ) ) ) else: diff --git a/api/valor_api/backend/query/ops.py b/api/valor_api/backend/query/ops.py index 3ced6fd84..bd0fdcb25 100644 --- a/api/valor_api/backend/query/ops.py +++ b/api/valor_api/backend/query/ops.py @@ -6,27 +6,12 @@ from valor_api.backend.models import Annotation, GroundTruth, Prediction from valor_api.backend.query.solvers import solver from valor_api.backend.query.types import LabelSourceAlias -from valor_api.schemas.filters import AdvancedFilter, Filter - - -def _format_filter_to_advanced_filter( - f: Filter | AdvancedFilter | None, label_source: LabelSourceAlias -) -> AdvancedFilter | None: - if f is None: - return None - elif isinstance(f, AdvancedFilter): - return f - elif label_source is GroundTruth: - return f.to_advanced_filter(ignore_predictions=True) - elif label_source is Prediction: - return f.to_advanced_filter(ignore_groundtruths=True) - else: - return f.to_advanced_filter() +from valor_api.schemas.filters import Filter def generate_select( *args: Any, - filters: Filter | AdvancedFilter | None = None, + filters: Filter | None = None, label_source: LabelSourceAlias = Annotation, ) -> Select[Any]: """ @@ -38,7 +23,7 @@ def generate_select( ---------- *args : Any A variable list of models or model attributes. (e.g. Label or Label.key) - filters : Filter | AdvancedFilter, optional + filters : Filter, optional An optional filter. label_source : LabelSourceAlias, default=Annotation The table to source labels from. This determines graph structure. @@ -62,7 +47,7 @@ def generate_select( query = solver( *args, stmt=select(*args), - filters=_format_filter_to_advanced_filter(filters, label_source), + filters=filters, label_source=label_source, ) if not isinstance(query, Select): @@ -75,7 +60,7 @@ def generate_select( def generate_query( *args: Any, db: Session, - filters: Filter | AdvancedFilter | None = None, + filters: Filter | None = None, label_source: LabelSourceAlias = Annotation, ) -> Query[Any]: """ @@ -89,7 +74,7 @@ def generate_query( A variable list of models or model attributes. (e.g. Label or Label.key) db : Session The database session to call query against. - filters : Filter | AdvancedFilter, optional + filters : Filter, optional An optional filter. label_source : LabelSourceAlias, default=Annotation The table to source labels from. This determines graph structure. @@ -113,7 +98,7 @@ def generate_query( query = solver( *args, stmt=db.query(*args), - filters=_format_filter_to_advanced_filter(filters, label_source), + filters=filters, label_source=label_source, ) if not isinstance(query, Query): diff --git a/api/valor_api/backend/query/solvers.py b/api/valor_api/backend/query/solvers.py index 77706cac4..62769e688 100644 --- a/api/valor_api/backend/query/solvers.py +++ b/api/valor_api/backend/query/solvers.py @@ -1,6 +1,6 @@ from typing import Any, Callable -from sqlalchemy import Select, Subquery, alias, case, or_, select +from sqlalchemy import CTE, Select, alias, or_, select from sqlalchemy.orm import InstrumentedAttribute, Query from sqlalchemy.sql.elements import UnaryExpression @@ -21,8 +21,7 @@ ) from valor_api.backend.query.mapping import map_arguments_to_tables from valor_api.backend.query.types import LabelSourceAlias, TableTypeAlias -from valor_api.schemas.filters import AdvancedFilter as Filter -from valor_api.schemas.filters import FunctionType +from valor_api.schemas.filters import Filter, FunctionType def _join_label_to_annotation(selection: Select) -> Select[Any]: @@ -308,9 +307,9 @@ def _join_model_to_datum(selection: Select) -> Select[Any]: }, GroundTruth: { Dataset: {Datum}, - Model: {Annotation}, - Datum: {Dataset, Annotation, Prediction}, - Annotation: {Datum, Model, GroundTruth, Embedding}, + Model: {Datum}, + Datum: {Dataset, Model, Annotation, Prediction}, + Annotation: {Datum, GroundTruth, Embedding}, Embedding: {Annotation}, GroundTruth: {Annotation, Label}, Prediction: {Datum}, @@ -469,10 +468,9 @@ def generate_filter_subquery( conditions: FunctionType, select_from: TableTypeAlias, label_source: LabelSourceAlias, - prefix: str, -) -> Subquery[Any]: +) -> CTE: """ - Generates the filtering subquery. + Generates the filtering CTE. Parameters ---------- @@ -482,61 +480,52 @@ def generate_filter_subquery( The table to center the query over. label_source : LabelSourceAlias The table to use as a source of labels. - prefix : str - The prefix to use in naming the CTE queries. Returns ------- - Subquery[Any] - A filtering subquery. + CTE + A filtering CTE. """ if label_source not in {Annotation, GroundTruth, Prediction}: raise ValueError(f"Invalid label source '{label_source}'.") - tree, ordered_ctes, ordered_tables = generate_dependencies(conditions) + tree, ordered_expressions, ordered_tables = generate_dependencies( + conditions + ) if tree is None: raise ValueError(f"Invalid function given as input. '{conditions}'") tables = set(ordered_tables) tables.discard(select_from) - ordered_joins = _solve_graph( - select_from=select_from, - label_source=label_source, - tables=tables, - ) - expressions = [ - (table.id == cte.c.id) - for table, cte in zip(ordered_tables, ordered_ctes) - ] + ordered_ctes = [] + for table, expression in zip(ordered_tables, ordered_expressions): + ordered_cte_joins = _solve_graph( + select_from=select_from, label_source=label_source, tables={table} + ) + + # define cte + cte_query = select( + select_from.id.label("id"), + ) + for join in ordered_cte_joins: + cte_query = join(cte_query) + cte_query = cte_query.where(expression).distinct().cte() + ordered_ctes.append(cte_query) # construct query - query = select( - select_from.id.label("id"), - *[ - case((expr, 1), else_=0).label(f"{prefix}{idx}") - for idx, expr in enumerate(expressions) - ], - ) + query = select(select_from.id.label("id")) query = query.select_from(select_from) - for join in ordered_joins: - query = join(query) - for table, cte in zip(ordered_tables, ordered_ctes): - query = query.join(cte, cte.c.id == table.id, isouter=True) - query = query.distinct().cte() - - return ( - select(query.c.id.label("id")) - .select_from(query) - .where(generate_logical_expression(query, tree, prefix=prefix)) - .subquery() - ) + for cte in ordered_ctes: + query = query.join(cte, cte.c.id == select_from.id, isouter=True) + query = query.where(generate_logical_expression(ordered_ctes, tree)) + return query.cte() def generate_filter_queries( filters: Filter, label_source: LabelSourceAlias, -) -> list[tuple[Subquery[Any], TableTypeAlias]]: +) -> list[tuple[CTE, TableTypeAlias]]: """ Generates the filtering subqueries. @@ -551,7 +540,7 @@ def generate_filter_queries( Returns ------- - list[tuple[Subquery[Any], TableTypeAlias]] + list[tuple[CTE, TableTypeAlias]] A list of tuples containing a filtering subquery and the table to join it on. """ @@ -559,96 +548,76 @@ def _generator( conditions: FunctionType, select_from: TableTypeAlias, label_source: LabelSourceAlias, - prefix: str, - ): - return generate_filter_subquery( + ) -> tuple[CTE, TableTypeAlias]: + cte = generate_filter_subquery( conditions=conditions, select_from=select_from, label_source=label_source, - prefix=f"{prefix}_cte", ) + return (cte, select_from) queries = list() if filters.datasets: - conditions = filters.datasets - select_from = Dataset - prefix = "ds" - queries.append( - ( - _generator(conditions, select_from, label_source, prefix), - select_from, - ) + result = _generator( + conditions=filters.datasets, + select_from=Dataset, + label_source=GroundTruth, ) + queries.append(result) if filters.models: - conditions = filters.models - select_from = Model - prefix = "md" - queries.append( - ( - _generator(conditions, select_from, label_source, prefix), - select_from, - ) + result = _generator( + conditions=filters.models, + select_from=Model, + label_source=Prediction, ) + queries.append(result) if filters.datums: - conditions = filters.datums - select_from = Datum - prefix = "dt" - queries.append( - ( - _generator(conditions, select_from, label_source, prefix), - select_from, - ) + result = _generator( + conditions=filters.datums, + select_from=Datum, + label_source=GroundTruth, ) + queries.append(result) if filters.annotations: - conditions = filters.annotations - select_from = Annotation - prefix = "an" - queries.append( - ( - _generator(conditions, select_from, label_source, prefix), - select_from, - ) + result = _generator( + conditions=filters.annotations, + select_from=Annotation, + label_source=label_source, ) + queries.append(result) if filters.groundtruths: - conditions = filters.groundtruths - select_from = GroundTruth if label_source is not Prediction else Datum - prefix = "gt" - queries.append( - ( - _generator(conditions, select_from, GroundTruth, prefix), - select_from, - ) + result = _generator( + conditions=filters.groundtruths, + select_from=GroundTruth + if label_source is not Prediction + else Datum, + label_source=GroundTruth, ) + queries.append(result) if filters.predictions: - conditions = filters.predictions - select_from = Prediction if label_source is not GroundTruth else Datum - prefix = "pd" - queries.append( - ( - _generator(conditions, select_from, Prediction, prefix), - select_from, - ) + result = _generator( + conditions=filters.predictions, + select_from=Prediction + if label_source is not GroundTruth + else Datum, + label_source=Prediction, ) + queries.append(result) if filters.labels: - conditions = filters.labels - select_from = Label - prefix = "lb" - queries.append( - ( - _generator(conditions, select_from, label_source, prefix), - select_from, - ) + result = _generator( + conditions=filters.labels, + select_from=Label, + label_source=label_source, ) + queries.append(result) if filters.embeddings: - conditions = filters.embeddings - select_from = Embedding - prefix = "em" - queries.append( - ( - _generator(conditions, select_from, label_source, prefix), - select_from, - ) + result = _generator( + conditions=filters.embeddings, + select_from=Embedding, + label_source=label_source, ) + queries.append(result) + return queries @@ -710,9 +679,9 @@ def solver( filters=filters, ) if filters is not None: - filter_subqueries = generate_filter_queries( + filter_ctes = generate_filter_queries( filters=filters, label_source=label_source ) - for subquery, selected_from in filter_subqueries: + for subquery, selected_from in filter_ctes: query = query.join(subquery, subquery.c.id == selected_from.id) return query diff --git a/api/valor_api/crud/_read.py b/api/valor_api/crud/_read.py index d08cde5ed..7478f79ae 100644 --- a/api/valor_api/crud/_read.py +++ b/api/valor_api/crud/_read.py @@ -170,7 +170,7 @@ def get_dataset( def get_datasets( *, db: Session, - filters: schemas.Filter | None = None, + filters: schemas.Filter, offset: int = 0, limit: int = -1, ) -> tuple[list[schemas.Dataset], dict[str, str]]: @@ -183,11 +183,11 @@ def get_datasets( ---------- db : Session The database Session to query against. - filters : schemas.Filter, optional - An optional filter to apply. - offset : int, optional + filters : schemas.Filter + A filter object to constrain the results by. + offset : int The start index of the items to return. - limit : int, optional + limit : int The number of items to return. Returns all items when set to -1. diff --git a/api/valor_api/main.py b/api/valor_api/main.py index 399d77030..2954c9b0f 100644 --- a/api/valor_api/main.py +++ b/api/valor_api/main.py @@ -2,7 +2,14 @@ import os from typing import Annotated -from fastapi import BackgroundTasks, Depends, FastAPI, HTTPException, Response +from fastapi import ( + BackgroundTasks, + Depends, + FastAPI, + HTTPException, + Query, + Response, +) from fastapi.exceptions import RequestValidationError from fastapi.middleware.cors import CORSMiddleware from fastapi.security import OAuth2PasswordRequestForm @@ -233,13 +240,17 @@ def get_prediction( status_code=200, dependencies=[Depends(token_auth_scheme)], tags=["Labels"], - description="Fetch labels using optional JSON strings as query parameters.", + description="Fetch all labels.", ) def get_labels( response: Response, - filters: schemas.FilterQueryParams = Depends(), - offset: int = 0, - limit: int = -1, + offset: int = Query( + 0, description="The start index of the items to return." + ), + limit: int = Query( + -1, + description="The number of items to return. Returns all items when set to -1.", + ), db: Session = Depends(get_db), ) -> list[schemas.Label]: """ @@ -251,24 +262,77 @@ def get_labels( ---------- response: Response The FastAPI response object. Used to return a content-range header to the user. - filters : schemas.FilterQueryParams, optional - An optional filter to constrain results by. + offset : int, optional + The start index of the items to return. + limit : int, optional + The number of items to return. Returns all items when set to -1. db : Session The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user. + + Returns + ------- + list[schemas.Label] + A list of all labels in the database. + """ + try: + content, headers = crud.get_labels( + db=db, + filters=schemas.Filter(), + offset=offset, + limit=limit, + ) + response.headers.update(headers) + return list(content) + except Exception as e: + raise exceptions.create_http_error(e) + + +@app.post( + "/labels/filter", + status_code=200, + dependencies=[Depends(token_auth_scheme)], + tags=["Labels"], + description="Fetch labels using a filter.", +) +def get_filtered_labels( + response: Response, + filters: schemas.Filter, + offset: int = Query( + 0, description="The start index of the items to return." + ), + limit: int = Query( + -1, + description="The number of items to return. Returns all items when set to -1.", + ), + db: Session = Depends(get_db), +) -> list[schemas.Label]: + """ + Fetch labels using a filter. + + POST Endpoint: `/labels/filter` + + Parameters + ---------- + response: Response + The FastAPI response object. Used to return a content-range header to the user. + filters : Filter + The filter to constrain the results by. offset : int, optional The start index of the items to return. limit : int, optional The number of items to return. Returns all items when set to -1. + db : Session + The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user. Returns ------- list[schemas.Label] - A list of all labels in the database. + A list of labels. """ try: content, headers = crud.get_labels( db=db, - filters=schemas.convert_filter_query_params_to_filter_obj(filters), + filters=filters, offset=offset, limit=limit, ) @@ -287,8 +351,13 @@ def get_labels( def get_labels_from_dataset( response: Response, dataset_name: str, - offset: int = 0, - limit: int = -1, + offset: int = Query( + 0, description="The start index of the items to return." + ), + limit: int = Query( + -1, + description="The number of items to return. Returns all items when set to -1.", + ), db: Session = Depends(get_db), ) -> list[schemas.Label]: """ @@ -321,7 +390,13 @@ def get_labels_from_dataset( content, headers = crud.get_labels( db=db, filters=schemas.Filter( - dataset_names=[dataset_name], + groundtruths=schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ) ), ignore_prediction_labels=True, offset=offset, @@ -343,8 +418,13 @@ def get_labels_from_dataset( def get_labels_from_model( response: Response, model_name: str, - offset: int = 0, - limit: int = -1, + offset: int = Query( + 0, description="The start index of the items to return." + ), + limit: int = Query( + -1, + description="The number of items to return. Returns all items when set to -1.", + ), db: Session = Depends(get_db), ) -> list[schemas.Label]: """ @@ -379,7 +459,13 @@ def get_labels_from_model( content, headers = crud.get_labels( db=db, filters=schemas.Filter( - model_names=[model_name], + groundtruths=schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.MODEL_NAME + ), + rhs=schemas.Value.infer(model_name), + op=schemas.FilterOperator.EQ, + ) ), ignore_groundtruth_labels=True, offset=offset, @@ -434,9 +520,13 @@ def create_dataset(dataset: schemas.Dataset, db: Session = Depends(get_db)): ) def get_datasets( response: Response, - filters: schemas.FilterQueryParams = Depends(), - offset: int = 0, - limit: int = -1, + offset: int = Query( + 0, description="The start index of the items to return." + ), + limit: int = Query( + -1, + description="The number of items to return. Returns all items when set to -1.", + ), db: Session = Depends(get_db), ) -> list[schemas.Dataset]: """ @@ -465,7 +555,7 @@ def get_datasets( try: content, headers = crud.get_datasets( db=db, - filters=schemas.convert_filter_query_params_to_filter_obj(filters), + filters=schemas.Filter(), offset=offset, limit=limit, ) @@ -475,6 +565,61 @@ def get_datasets( raise exceptions.create_http_error(e) +@app.post( + "/datasets/filter", + status_code=200, + dependencies=[Depends(token_auth_scheme)], + tags=["Datasets"], + description="Fetch datasets using a filter.", +) +def get_filtered_datasets( + response: Response, + filters: schemas.Filter, + offset: int = Query( + 0, description="The start index of the items to return." + ), + limit: int = Query( + -1, + description="The number of items to return. Returns all items when set to -1.", + ), + db: Session = Depends(get_db), +) -> list[schemas.Dataset]: + """ + Fetch datasets using a filter. + + POST Endpoint: `/datasets/filter` + + Parameters + ---------- + response: Response + The FastAPI response object. Used to return a content-range header to the user. + filters : Filter + The filter to constrain the results by. + offset : int, optional + The start index of the items to return. + limit : int, optional + The number of items to return. Returns all items when set to -1. + db : Session + The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user. + + Returns + ------- + list[schemas.Datasets] + A list of datasets. + """ + try: + content, headers = crud.get_datasets( + db=db, + filters=filters, + offset=offset, + limit=limit, + ) + response.headers.update(headers) + return list(content) + except Exception as e: + raise exceptions.create_http_error(e) + + @app.get( "/datasets/{dataset_name}", dependencies=[Depends(token_auth_scheme)], @@ -664,13 +809,17 @@ def delete_dataset( status_code=200, dependencies=[Depends(token_auth_scheme)], tags=["Datums"], - description="Fetch datums using optional JSON strings as query parameters.", + description="Fetch all datums.", ) def get_datums( response: Response, - filters: schemas.FilterQueryParams = Depends(), - offset: int = 0, - limit: int = -1, + offset: int = Query( + 0, description="The start index of the items to return." + ), + limit: int = Query( + -1, + description="The number of items to return. Returns all items when set to -1.", + ), db: Session = Depends(get_db), ) -> list[schemas.Datum]: """ @@ -682,8 +831,6 @@ def get_datums( ---------- response: Response The FastAPI response object. Used to return a content-range header to the user. - filters : schemas.FilterQueryParams, optional - An optional filter to constrain results by. offset : int, optional The start index of the items to return. limit : int, optional @@ -704,7 +851,7 @@ def get_datums( try: content, headers = crud.get_datums( db=db, - filters=schemas.convert_filter_query_params_to_filter_obj(filters), + filters=schemas.Filter(), offset=offset, limit=limit, ) @@ -714,6 +861,61 @@ def get_datums( raise exceptions.create_http_error(e) +@app.post( + "/data/filter", + status_code=200, + dependencies=[Depends(token_auth_scheme)], + tags=["Datums"], + description="Fetch datums using a filter.", +) +def get_filtered_datums( + response: Response, + filters: schemas.Filter, + offset: int = Query( + 0, description="The start index of the items to return." + ), + limit: int = Query( + -1, + description="The number of items to return. Returns all items when set to -1.", + ), + db: Session = Depends(get_db), +) -> list[schemas.Datum]: + """ + Fetch datums using a filter. + + POST Endpoint: `/data/filter` + + Parameters + ---------- + response: Response + The FastAPI response object. Used to return a content-range header to the user. + filters : Filter + The filter to constrain the results by. + offset : int, optional + The start index of the items to return. + limit : int, optional + The number of items to return. Returns all items when set to -1. + db : Session + The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user. + + Returns + ------- + list[schemas.Datum] + A list of datums. + """ + try: + content, headers = crud.get_datums( + db=db, + filters=filters, + offset=offset, + limit=limit, + ) + response.headers.update(headers) + return list(content) + except Exception as e: + raise exceptions.create_http_error(e) + + @app.get( "/data/dataset/{dataset_name}/uid/{uid}", status_code=200, @@ -747,8 +949,25 @@ def get_datum( datums, _ = crud.get_datums( db=db, filters=schemas.Filter( - dataset_names=[dataset_name], - datum_uids=[uid], + datums=schemas.LogicalFunction( + args=[ + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATASET_NAME + ), + rhs=schemas.Value.infer(dataset_name), + op=schemas.FilterOperator.EQ, + ), + schemas.Condition( + lhs=schemas.Symbol( + name=schemas.SupportedSymbol.DATUM_UID + ), + rhs=schemas.Value.infer(uid), + op=schemas.FilterOperator.EQ, + ), + ], + op=schemas.LogicalOperator.AND, + ) ), ) @@ -800,14 +1019,18 @@ def create_model(model: schemas.Model, db: Session = Depends(get_db)): status_code=200, dependencies=[Depends(token_auth_scheme)], tags=["Models"], - description="Fetch models using optional JSON strings as query parameters.", + description="Fetch all models.", response_model=list[schemas.Model], ) def get_models( response: Response, - filters: schemas.FilterQueryParams = Depends(), - offset: int = 0, - limit: int = -1, + offset: int = Query( + 0, description="The start index of the items to return." + ), + limit: int = Query( + -1, + description="The number of items to return. Returns all items when set to -1.", + ), db: Session = Depends(get_db), ) -> list[schemas.Model]: """ @@ -819,8 +1042,6 @@ def get_models( ---------- response: Response The FastAPI response object. Used to return a content-range header to the user. - filters : schemas.FilterQueryParams, optional - An optional filter to constrain results by. offset : int, optional The start index of the items to return. limit : int, optional @@ -835,7 +1056,7 @@ def get_models( """ content, headers = crud.get_models( db=db, - filters=schemas.convert_filter_query_params_to_filter_obj(filters), + filters=schemas.Filter(), offset=offset, limit=limit, ) @@ -845,6 +1066,61 @@ def get_models( return content +@app.post( + "/models/filter", + status_code=200, + dependencies=[Depends(token_auth_scheme)], + tags=["Models"], + description="Fetch models using a filter.", +) +def get_filtered_models( + response: Response, + filters: schemas.Filter, + offset: int = Query( + 0, description="The start index of the items to return." + ), + limit: int = Query( + -1, + description="The number of items to return. Returns all items when set to -1.", + ), + db: Session = Depends(get_db), +) -> list[schemas.Model]: + """ + Fetch models using a filter. + + POST Endpoint: `/models/filter` + + Parameters + ---------- + response: Response + The FastAPI response object. Used to return a content-range header to the user. + filters : Filter + The filter to constrain the results by. + offset : int, optional + The start index of the items to return. + limit : int, optional + The number of items to return. Returns all items when set to -1. + db : Session + The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user. + + Returns + ------- + list[schemas.Model] + A list of models. + """ + try: + content, headers = crud.get_models( + db=db, + filters=filters, + offset=offset, + limit=limit, + ) + response.headers.update(headers) + return list(content) + except Exception as e: + raise exceptions.create_http_error(e) + + @app.get( "/models/{model_name}", dependencies=[Depends(token_auth_scheme)], @@ -1100,11 +1376,25 @@ def create_or_get_evaluations( ) def get_evaluations( response: Response, - datasets: str | None = None, - models: str | None = None, - evaluation_ids: str | None = None, - offset: int = 0, - limit: int = -1, + datasets: str + | None = Query( + None, description="An optional set of dataset names to constrain by." + ), + models: str + | None = Query( + None, description="An optional set of model names to constrain by." + ), + evaluation_ids: str + | None = Query( + None, description="An optional set of evaluation_ids to constrain by." + ), + offset: int = Query( + 0, description="The start index of the items to return." + ), + limit: int = Query( + -1, + description="The number of items to return. Returns all items when set to -1.", + ), metrics_to_sort_by: str | None = None, db: Session = Depends(get_db), ) -> list[schemas.EvaluationResponse]: @@ -1124,11 +1414,11 @@ def get_evaluations( response: Response The FastAPI response object. Used to return a content-range header to the user. datasets : str - An optional set of dataset names to return metrics for + An optional set of dataset names to constrain by. models : str - An optional set of model names to return metrics for + An optional set of model names to constrain by. evaluation_ids : str - An optional set of evaluation_ids to return metrics for + An optional set of evaluation_ids to constrain by. db : Session The database session to use. This parameter is a sqlalchemy dependency and shouldn't be submitted by the user. offset : int, optional diff --git a/api/valor_api/schemas/__init__.py b/api/valor_api/schemas/__init__.py index 91b7ac33a..60c44e012 100644 --- a/api/valor_api/schemas/__init__.py +++ b/api/valor_api/schemas/__init__.py @@ -5,14 +5,15 @@ EvaluationResponse, ) from .filters import ( - BooleanFilter, - DateTimeFilter, + Condition, Filter, - FilterQueryParams, - GeospatialFilter, - NumericFilter, - StringFilter, - convert_filter_query_params_to_filter_obj, + FilterOperator, + LogicalFunction, + LogicalOperator, + SupportedSymbol, + SupportedType, + Symbol, + Value, ) from .geometry import ( Box, @@ -103,14 +104,15 @@ "EvaluationRequest", "EvaluationResponse", "EvaluationParameters", - "StringFilter", - "NumericFilter", - "GeospatialFilter", - "BooleanFilter", - "DateTimeFilter", "Filter", - "FilterQueryParams", - "convert_filter_query_params_to_filter_obj", + "Symbol", + "Value", + "FilterOperator", + "Condition", + "LogicalFunction", + "LogicalOperator", + "SupportedType", + "SupportedSymbol", "Health", "Readiness", "DatasetSummary", diff --git a/api/valor_api/schemas/evaluation.py b/api/valor_api/schemas/evaluation.py index 4a5f3d840..8af97df79 100644 --- a/api/valor_api/schemas/evaluation.py +++ b/api/valor_api/schemas/evaluation.py @@ -10,6 +10,7 @@ ) from valor_api.schemas.filters import Filter from valor_api.schemas.metrics import ConfusionMatrixResponse, Metric +from valor_api.schemas.migrations import DeprecatedFilter from valor_api.schemas.types import Label LabelMapType = list[list[list[str]]] @@ -203,7 +204,7 @@ class EvaluationResponse(BaseModel): id: int dataset_names: list[str] model_name: str - filters: Filter + filters: Filter | DeprecatedFilter parameters: EvaluationParameters status: EvaluationStatus created_at: datetime.datetime diff --git a/api/valor_api/schemas/filters.py b/api/valor_api/schemas/filters.py index e6ae2ee51..b084030ec 100644 --- a/api/valor_api/schemas/filters.py +++ b/api/valor_api/schemas/filters.py @@ -1,16 +1,8 @@ -import json +from enum import Enum -from pydantic import ( - BaseModel, - ConfigDict, - create_model, - field_validator, - model_validator, -) +from pydantic import BaseModel, ConfigDict, model_validator from valor_api.enums import TaskType -from valor_api.schemas.geometry import GeoJSON -from valor_api.schemas.timestamp import Date, DateTime, Duration, Time from valor_api.schemas.validators import ( validate_type_bool, validate_type_box, @@ -30,55 +22,273 @@ ) -def validate_type_symbol(x): - if not isinstance(x, Symbol): - raise TypeError +class SupportedType(str, Enum): + BOOLEAN = "boolean" + INTEGER = "integer" + FLOAT = "float" + STRING = "string" + TASK_TYPE = "tasktype" + DATETIME = "datetime" + DATE = "date" + TIME = "time" + DURATION = "duration" + POINT = "point" + MULTIPOINT = "multipoint" + LINESTRING = "linestring" + MULTILINESTRING = "multilinestring" + POLYGON = "polygon" + BOX = "box" + MULTIPOLYGON = "multipolygon" + RASTER = "raster" + GEOJSON = "geojson" + EMBEDDING = "embedding" + LABEL = "label" + + +map_type_to_validator = { + SupportedType.BOOLEAN: validate_type_bool, + SupportedType.STRING: validate_type_string, + SupportedType.INTEGER: validate_type_integer, + SupportedType.FLOAT: validate_type_float, + SupportedType.DATETIME: validate_type_datetime, + SupportedType.DATE: validate_type_date, + SupportedType.TIME: validate_type_time, + SupportedType.DURATION: validate_type_duration, + SupportedType.POINT: validate_type_point, + SupportedType.MULTIPOINT: validate_type_multipoint, + SupportedType.LINESTRING: validate_type_linestring, + SupportedType.MULTILINESTRING: validate_type_multilinestring, + SupportedType.POLYGON: validate_type_polygon, + SupportedType.BOX: validate_type_box, + SupportedType.MULTIPOLYGON: validate_type_multipolygon, + SupportedType.TASK_TYPE: validate_type_string, + SupportedType.LABEL: None, + SupportedType.EMBEDDING: None, + SupportedType.RASTER: None, +} -filterable_types_to_validator = { - "symbol": validate_type_symbol, - "bool": validate_type_bool, - "string": validate_type_string, - "integer": validate_type_integer, - "float": validate_type_float, - "datetime": validate_type_datetime, - "date": validate_type_date, - "time": validate_type_time, - "duration": validate_type_duration, - "point": validate_type_point, - "multipoint": validate_type_multipoint, - "linestring": validate_type_linestring, - "multilinestring": validate_type_multilinestring, - "polygon": validate_type_polygon, - "box": validate_type_box, - "multipolygon": validate_type_multipolygon, - "tasktypeenum": validate_type_string, - "label": None, - "embedding": None, - "raster": None, +class SupportedSymbol(str, Enum): + DATASET_NAME = "dataset.name" + DATASET_META = "dataset.metadata" + MODEL_NAME = "model.name" + MODEL_META = "model.metadata" + DATUM_UID = "datum.uid" + DATUM_META = "datum.metadata" + ANNOTATION_META = "annotation.metadata" + TASK_TYPE = "annotation.task_type" + BOX = "annotation.bounding_box" + POLYGON = "annotation.polygon" + RASTER = "annotation.raster" + EMBEDDING = "annotation.embedding" + LABELS = "annotation.labels" + LABEL_KEY = "label.key" + LABEL_VALUE = "label.value" + SCORE = "label.score" + + # 'area' attribute + DATASET_META_AREA = "dataset.metadata.area" + MODEL_META_AREA = "dataset.metadata.area" + DATUM_META_AREA = "dataset.metadata.area" + ANNOTATION_META_AREA = "dataset.metadata.area" + BOX_AREA = "annotation.bounding_box.area" + POLYGON_AREA = "annotation.polygon.area" + RASTER_AREA = "annotation.raster.area" + + # api-only attributes + DATASET_ID = "dataset.id" + MODEL_ID = "model.id" + DATUM_ID = "datum.id" + ANNOTATION_ID = "annotation.id" + GROUNDTRUTH_ID = "groundtruth.id" + PREDICTION_ID = "prediction.id" + LABEL_ID = "label.id" + EMBEDDING_ID = "embedding.id" + + +class FilterOperator(str, Enum): + EQ = "eq" + NE = "ne" + GT = "gt" + GTE = "gte" + LT = "lt" + LTE = "lte" + INTERSECTS = "intersects" + INSIDE = "inside" + OUTSIDE = "outside" + CONTAINS = "contains" + ISNULL = "isnull" + ISNOTNULL = "isnotnull" + + +map_type_to_operators = { + SupportedType.BOOLEAN: {FilterOperator.EQ, FilterOperator.NE}, + SupportedType.STRING: {FilterOperator.EQ, FilterOperator.NE}, + SupportedType.INTEGER: { + FilterOperator.EQ, + FilterOperator.NE, + FilterOperator.GT, + FilterOperator.GTE, + FilterOperator.LT, + FilterOperator.LTE, + }, + SupportedType.FLOAT: { + FilterOperator.EQ, + FilterOperator.NE, + FilterOperator.GT, + FilterOperator.GTE, + FilterOperator.LT, + FilterOperator.LTE, + }, + SupportedType.DATETIME: { + FilterOperator.EQ, + FilterOperator.NE, + FilterOperator.GT, + FilterOperator.GTE, + FilterOperator.LT, + FilterOperator.LTE, + }, + SupportedType.DATE: { + FilterOperator.EQ, + FilterOperator.NE, + FilterOperator.GT, + FilterOperator.GTE, + FilterOperator.LT, + FilterOperator.LTE, + }, + SupportedType.TIME: { + FilterOperator.EQ, + FilterOperator.NE, + FilterOperator.GT, + FilterOperator.GTE, + FilterOperator.LT, + FilterOperator.LTE, + }, + SupportedType.DURATION: { + FilterOperator.EQ, + FilterOperator.NE, + FilterOperator.GT, + FilterOperator.GTE, + FilterOperator.LT, + FilterOperator.LTE, + }, + SupportedType.POINT: { + FilterOperator.INTERSECTS, + FilterOperator.INSIDE, + FilterOperator.OUTSIDE, + }, + SupportedType.MULTIPOINT: { + FilterOperator.INTERSECTS, + FilterOperator.INSIDE, + FilterOperator.OUTSIDE, + }, + SupportedType.LINESTRING: { + FilterOperator.INTERSECTS, + FilterOperator.INSIDE, + FilterOperator.OUTSIDE, + }, + SupportedType.MULTILINESTRING: { + FilterOperator.INTERSECTS, + FilterOperator.INSIDE, + FilterOperator.OUTSIDE, + }, + SupportedType.POLYGON: { + FilterOperator.INTERSECTS, + FilterOperator.INSIDE, + FilterOperator.OUTSIDE, + }, + SupportedType.BOX: { + FilterOperator.INTERSECTS, + FilterOperator.INSIDE, + FilterOperator.OUTSIDE, + }, + SupportedType.MULTIPOLYGON: { + FilterOperator.INTERSECTS, + FilterOperator.INSIDE, + FilterOperator.OUTSIDE, + }, + SupportedType.TASK_TYPE: {FilterOperator.EQ, FilterOperator.NE}, + SupportedType.LABEL: {FilterOperator.CONTAINS}, + SupportedType.EMBEDDING: {}, + SupportedType.RASTER: {}, } +class LogicalOperator(str, Enum): + AND = "and" + OR = "or" + NOT = "not" + + class Symbol(BaseModel): """ - A symbolic variable. + A symbolic value. Attributes ---------- - type : str - The data type that this symbol represents. name : str The name of the symbol. key : str, optional - Optional key to define dictionary access of a value. - attribute : str, optional - Optional attribute that modifies the underlying value. + Optional dictionary key if the symbol is representing a dictionary value. """ - type: str - name: str + name: SupportedSymbol key: str | None = None - attribute: str | None = None + + @property + def type(self) -> SupportedType | None: + """ + Get the type associated with a symbol. + + Returns + ------- + SupportedType + The supported type. + + Raises + ------ + NotImplementedError + If the symbol does not have a type defined. + """ + map_symbol_to_type = { + SupportedSymbol.DATASET_NAME: SupportedType.STRING, + SupportedSymbol.MODEL_NAME: SupportedType.STRING, + SupportedSymbol.DATUM_UID: SupportedType.STRING, + SupportedSymbol.TASK_TYPE: SupportedType.TASK_TYPE, + SupportedSymbol.BOX: SupportedType.BOX, + SupportedSymbol.POLYGON: SupportedType.POLYGON, + SupportedSymbol.EMBEDDING: SupportedType.EMBEDDING, + SupportedSymbol.LABEL_KEY: SupportedType.STRING, + SupportedSymbol.LABEL_VALUE: SupportedType.STRING, + SupportedSymbol.SCORE: SupportedType.FLOAT, + # 'area' attribue + SupportedSymbol.DATASET_META_AREA: SupportedType.FLOAT, + SupportedSymbol.MODEL_META_AREA: SupportedType.FLOAT, + SupportedSymbol.DATUM_META_AREA: SupportedType.FLOAT, + SupportedSymbol.ANNOTATION_META_AREA: SupportedType.FLOAT, + SupportedSymbol.BOX_AREA: SupportedType.FLOAT, + SupportedSymbol.POLYGON_AREA: SupportedType.FLOAT, + SupportedSymbol.RASTER_AREA: SupportedType.FLOAT, + # api-only + SupportedSymbol.DATASET_ID: SupportedType.INTEGER, + SupportedSymbol.MODEL_ID: SupportedType.INTEGER, + SupportedSymbol.DATUM_ID: SupportedType.INTEGER, + SupportedSymbol.ANNOTATION_ID: SupportedType.INTEGER, + SupportedSymbol.GROUNDTRUTH_ID: SupportedType.INTEGER, + SupportedSymbol.PREDICTION_ID: SupportedType.INTEGER, + SupportedSymbol.LABEL_ID: SupportedType.INTEGER, + SupportedSymbol.EMBEDDING_ID: SupportedType.INTEGER, + # unsupported + SupportedSymbol.DATASET_META: None, + SupportedSymbol.MODEL_META: None, + SupportedSymbol.DATUM_META: None, + SupportedSymbol.ANNOTATION_META: None, + SupportedSymbol.RASTER: None, + SupportedSymbol.LABELS: None, + } + if self.name not in map_symbol_to_type: + raise NotImplementedError(f"{self.name} is does not have a type.") + return map_symbol_to_type[self.name] class Value(BaseModel): @@ -87,1405 +297,212 @@ class Value(BaseModel): Attributes ---------- - type : str + type : SupportedType The type of the value. value : bool | int | float | str | list | dict The stored value. """ - type: str + type: SupportedType value: bool | int | float | str | list | dict model_config = ConfigDict(extra="forbid") + @model_validator(mode="after") + def _validate_value(self): + if self.type not in map_type_to_validator: + raise TypeError(f"'{self.type}' is not a valid type.") + map_type_to_validator[self.type](self.value) + return self -class Operands(BaseModel): - """ - Function operands. - - Attributes - ---------- - lhs : Symbol - The symbol representing a table column this function should be applied to. - rhs : Value - A value to perform a comparison over. - """ - - lhs: Symbol - rhs: Value - model_config = ConfigDict(extra="forbid") - - -class And(BaseModel): - """ - Logical function representing an AND operation. - - Attributes - ---------- - logical_and : list[FunctionType] - A list of functions to AND together. - """ - - logical_and: list["FunctionType"] - model_config = ConfigDict(extra="forbid") - - @property - def op(self) -> str: - """Returns the operator name.""" - return type(self).__name__.lower() - - @property - def args(self) -> list["FunctionType"]: - """Returns the list of functional arguments.""" - return self.logical_and - - -class Or(BaseModel): - """ - Logical function representing an OR operation. - - Attributes - ---------- - logical_or : list[FunctionType] - A list of functions to OR together. - """ - - logical_or: list["FunctionType"] - model_config = ConfigDict(extra="forbid") - - @property - def op(self) -> str: - """Returns the operator name.""" - return type(self).__name__.lower() - - @property - def args(self): - """Returns the list of functional arguments.""" - return self.logical_or - - -class Not(BaseModel): - """ - Logical function representing an OR operation. - - Attributes - ---------- - logical_not : FunctionType - A functions to logically negate. - """ - - logical_not: "FunctionType" - model_config = ConfigDict(extra="forbid") - - @property - def op(self) -> str: - """Returns the operator name.""" - return type(self).__name__.lower() - - @property - def arg(self): - """Returns the functional argument.""" - return self.logical_not - - -class IsNull(BaseModel): - """ - Checks if symbol represents a null value. - - Attributes - ---------- - isnull : Symbol - The symbolic argument. - """ - - isnull: Symbol - model_config = ConfigDict(extra="forbid") - - @property - def op(self) -> str: - """Returns the operator name.""" - return type(self).__name__.lower() - - @property - def arg(self): - """Returns the symbolic argument.""" - return self.isnull - - -class IsNotNull(BaseModel): - """ - Checks if symbol represents an existing value. - - Attributes - ---------- - isnotnull : Symbol - The symbolic argument. - """ - - isnotnull: Symbol - model_config = ConfigDict(extra="forbid") - - @property - def op(self) -> str: - """Returns the operator name.""" - return type(self).__name__.lower() - - @property - def arg(self): - """Returns the symbolic argument.""" - return self.isnotnull - - -class Equal(BaseModel): - """ - Checks if symbol is equal to a provided value. - - Attributes - ---------- - eq : Operands - The operands of the function. - """ - - eq: Operands - model_config = ConfigDict(extra="forbid") - - @property - def op(self) -> str: - """Returns the operator name.""" - return type(self).__name__.lower() - - @property - def lhs(self): - """Returns the lhs operand.""" - return self.eq.lhs - - @property - def rhs(self): - """Returns the rhs operand.""" - return self.eq.rhs - - -class NotEqual(BaseModel): - """ - Checks if symbol is not equal to a provided value. - - Attributes - ---------- - ne : Operands - The operands of the function. - """ - - ne: Operands - model_config = ConfigDict(extra="forbid") - - @property - def op(self) -> str: - """Returns the operator name.""" - return type(self).__name__.lower() - - @property - def lhs(self): - """Returns the lhs operand.""" - return self.ne.lhs - - @property - def rhs(self): - """Returns the rhs operand.""" - return self.ne.rhs - - -class GreaterThan(BaseModel): - """ - Checks if symbol is greater than a provided value. - - Attributes - ---------- - gt : Operands - The operands of the function. - """ - - gt: Operands - model_config = ConfigDict(extra="forbid") - - @property - def op(self) -> str: - """Returns the operator name.""" - return type(self).__name__.lower() - - @property - def lhs(self): - """Returns the lhs operand.""" - return self.gt.lhs - - @property - def rhs(self): - """Returns the rhs operand.""" - return self.gt.rhs - - -class GreaterThanEqual(BaseModel): - """ - Checks if symbol is greater than or equal to a provided value. - - Attributes - ---------- - ge : Operands - The operands of the function. - """ - - ge: Operands - model_config = ConfigDict(extra="forbid") - - @property - def op(self) -> str: - """Returns the operator name.""" - return type(self).__name__.lower() - - @property - def lhs(self): - """Returns the lhs operand.""" - return self.ge.lhs - - @property - def rhs(self): - """Returns the rhs operand.""" - return self.ge.rhs - - -class LessThan(BaseModel): - """ - Checks if symbol is less than a provided value. - - Attributes - ---------- - lt : Operands - The operands of the function. - """ - - lt: Operands - model_config = ConfigDict(extra="forbid") - - @property - def op(self) -> str: - """Returns the operator name.""" - return type(self).__name__.lower() - - @property - def lhs(self): - """Returns the lhs operand.""" - return self.lt.lhs - - @property - def rhs(self): - """Returns the rhs operand.""" - return self.lt.rhs - - -class LessThanEqual(BaseModel): - """ - Checks if symbol is less than or equal to a provided value. - - Attributes - ---------- - le : Operands - The operands of the function. - """ - - le: Operands - model_config = ConfigDict(extra="forbid") - - @property - def op(self) -> str: - """Returns the operator name.""" - return type(self).__name__.lower() - - @property - def lhs(self): - """Returns the lhs operand.""" - return self.le.lhs - - @property - def rhs(self): - """Returns the rhs operand.""" - return self.le.rhs - - -class Intersects(BaseModel): - """ - Checks if symbol intersects a provided value. - - Attributes - ---------- - intersects : Operands - The operands of the function. - """ - - intersects: Operands - model_config = ConfigDict(extra="forbid") - - @property - def op(self) -> str: - """Returns the operator name.""" - return type(self).__name__.lower() - - @property - def lhs(self): - """Returns the lhs operand.""" - return self.intersects.lhs - - @property - def rhs(self): - """Returns the rhs operand.""" - return self.intersects.rhs - - -class Inside(BaseModel): - """ - Checks if symbol is inside a provided value. - - Attributes - ---------- - inside : Operands - The operands of the function. - """ - - inside: Operands - model_config = ConfigDict(extra="forbid") - - @property - def op(self) -> str: - """Returns the operator name.""" - return type(self).__name__.lower() - - @property - def lhs(self): - """Returns the lhs operand.""" - return self.inside.lhs - - @property - def rhs(self): - """Returns the rhs operand.""" - return self.inside.rhs - - -class Outside(BaseModel): - """ - Checks if symbol is outside a provided value. - - Attributes - ---------- - outside : Operands - The operands of the function. - """ - - outside: Operands - model_config = ConfigDict(extra="forbid") - - @property - def op(self) -> str: - """Returns the operator name.""" - return type(self).__name__.lower() - - @property - def lhs(self): - """Returns the lhs operand.""" - return self.outside.lhs - - @property - def rhs(self): - """Returns the rhs operand.""" - return self.outside.rhs - - -class Contains(BaseModel): - """ - Checks if symbolic list contains a provided value. - - Attributes - ---------- - contains : Operands - The operands of the function. - """ - - contains: Operands - model_config = ConfigDict(extra="forbid") - - @property - def op(self) -> str: - """Returns the operator name.""" - return type(self).__name__.lower() - - @property - def lhs(self): - """Returns the lhs operand.""" - return self.contains.lhs - - @property - def rhs(self): - """Returns the rhs operand.""" - return self.contains.rhs - - -NArgFunction = And | Or -OneArgFunction = Not | IsNull | IsNotNull -TwoArgFunction = ( - Equal - | NotEqual - | GreaterThan - | GreaterThanEqual - | LessThan - | LessThanEqual - | Intersects - | Inside - | Outside - | Contains -) -FunctionType = OneArgFunction | TwoArgFunction | NArgFunction - - -class AdvancedFilter(BaseModel): - """ - Filter schema that stores filters as logical trees under tables. - - The intent is for this object to replace 'Filter' in a future PR. - """ - - datasets: FunctionType | None = None - models: FunctionType | None = None - datums: FunctionType | None = None - annotations: FunctionType | None = None - groundtruths: FunctionType | None = None - predictions: FunctionType | None = None - labels: FunctionType | None = None - embeddings: FunctionType | None = None - - -# - - -class StringFilter(BaseModel): - """ - Used to filter on string values that meet some user-defined condition. - - Attributes - ---------- - value : str - The value to compare the specific field against. - operator : str - The operator to use for comparison. Should be one of `["==", "!="]`. - - Raises - ------ - ValueError - If the `operator` doesn't match one of the allowed patterns. - """ - - value: str - operator: str = "==" - - @field_validator("operator") - @classmethod - def _validate_comparison_operator(cls, op: str) -> str: - """Validate the operator.""" - allowed_operators = ["==", "!="] - if op not in allowed_operators: - raise ValueError( - f"Invalid comparison operator '{op}'. Allowed operators are {', '.join(allowed_operators)}." - ) - return op - - model_config = ConfigDict(extra="forbid") - - def to_function( - self, name: str, key: str | None = None, attribute: str | None = None - ) -> FunctionType: + def supports_operator(self, op: FilterOperator): """ - Converts the filter object into a function. - - This is for backwards-compatibility. + Validates whether value type supports operator. Parameters ---------- - name : str - The symbol name. - key : str, optional - An optional key to access the symbol by. - attribute : str, optional - An optional attribute to modify the symbol with. + op : FilterOperator + The operator to validate. - Returns - ------- - FunctionType + Raises + ------ + TypeError + If the type does not support this operation. """ - operands = Operands( - lhs=Symbol(type="string", name=name, key=key, attribute=attribute), - rhs=Value(type="string", value=self.value), - ) - match self.operator: - case "==": - return Equal(eq=operands) - case "!=": - return NotEqual(ne=operands) - case _: - raise NotImplementedError(self.operator) - + return -class NumericFilter(BaseModel): - """ - Used to filter on numeric values that meet some user-defined condition. - - Attributes - ---------- - value : float - The value to compare the specific field against. - operator : str - The operator to use for comparison. Should be one of `[">", "<", ">=", "<=", "==", "!="]`. - - Raises - ------ - ValueError - If the `operator` doesn't match one of the allowed patterns. - """ - - value: float - operator: str = "==" - - @field_validator("operator") @classmethod - def _validate_comparison_operator(cls, op: str) -> str: - """Validate the operator.""" - allowed_operators = [">", "<", ">=", "<=", "==", "!="] - if op not in allowed_operators: - raise ValueError( - f"Invalid comparison operator '{op}'. Allowed operators are {', '.join(allowed_operators)}." + def infer( + cls, + value: bool | int | float | str | TaskType, + ): + type_ = type(value) + if type_ is bool: + return cls(type=SupportedType.BOOLEAN, value=value) + elif type_ is int: + return cls(type=SupportedType.INTEGER, value=value) + elif type_ is float: + return cls(type=SupportedType.FLOAT, value=value) + elif type_ is str: + return cls(type=SupportedType.STRING, value=value) + elif type_ is TaskType: + return cls(type=SupportedType.TASK_TYPE, value=value) + else: + raise TypeError( + f"Type inference is not supported for type '{type_}'." ) - return op - model_config = ConfigDict(extra="forbid") - def to_function( - self, - name: str, - key: str | None = None, - attribute: str | None = None, - type_str: str = "float", - ) -> FunctionType: - """ - Converts the filter object into a function. - - This is for backwards-compatibility. - - Parameters - ---------- - name : str - The symbol name. - key : str, optional - An optional key to access the symbol by. - attribute : str, optional - An optional attribute to modify the symbol with. - type_str: str, default="float" - An optional override for the symbolic type. +class Condition(BaseModel): + lhs: Symbol + rhs: Value | None = None + op: FilterOperator + model_config = ConfigDict(extra="forbid") - Returns - ------- - FunctionType - """ - operands = Operands( - lhs=Symbol(type=type_str, name=name, key=key, attribute=attribute), - rhs=Value(type="float", value=self.value), - ) - match self.operator: - case "==": - return Equal(eq=operands) - case "!=": - return NotEqual(ne=operands) - case ">": - return GreaterThan(gt=operands) - case ">=": - return GreaterThanEqual(ge=operands) - case "<": - return LessThan(lt=operands) - case "<=": - return LessThanEqual(le=operands) + @model_validator(mode="after") + def _validate_object(self): + + # validate operator + match self.op: + case ( + FilterOperator.EQ + | FilterOperator.NE + | FilterOperator.GT + | FilterOperator.GTE + | FilterOperator.LT + | FilterOperator.LTE + | FilterOperator.INTERSECTS + | FilterOperator.INSIDE + | FilterOperator.OUTSIDE + | FilterOperator.CONTAINS + ): + if self.rhs is None: + raise ValueError( + f"Operator '{self.op}' requires a rhs value." + ) + elif self.rhs.type not in map_type_to_operators: + raise ValueError( + f"Value type '{self.rhs.type}' does not support operator '{self.op}'." + ) + case (FilterOperator.ISNULL | FilterOperator.ISNOTNULL): + if self.rhs is not None: + raise ValueError( + f"Operator '{self.op}' does not support a rhs value." + ) case _: - raise NotImplementedError(self.operator) - - -class BooleanFilter(BaseModel): - """ - Used to filter on boolean values that meet some user-defined condition. + raise NotImplementedError( + f"Filter operator '{self.op}' is not implemented." + ) - Attributes - ---------- - value : bool - The value to compare the specific field against. - operator : str - The operator to use for comparison. Should be one of `["==", "!="]`. + return self - Raises - ------ - ValueError - If the `operator` doesn't match one of the allowed patterns. - """ - value: bool - operator: str = "==" +class LogicalFunction(BaseModel): + args: "Condition | LogicalFunction | list[Condition] | list[LogicalFunction] | list[Condition | LogicalFunction]" + op: LogicalOperator model_config = ConfigDict(extra="forbid") - @field_validator("operator") @classmethod - def _validate_comparison_operator(cls, op: str) -> str: - """Validate the operator.""" - allowed_operators = ["==", "!="] - if op not in allowed_operators: - raise ValueError( - f"Invalid comparison operator '{op}'. Allowed operators are {', '.join(allowed_operators)}." - ) - return op - - def to_function( - self, name: str, key: str | None = None, attribute: str | None = None - ) -> FunctionType: + def and_( + cls, *args: "Condition | LogicalFunction | None" + ) -> "Condition | LogicalFunction": """ - Converts the filter object into a function. + Performs an AND operation if more than one element exists. - This is for backwards-compatibility. + This is useful when passing the results of a list comprehension. Parameters ---------- - name : str - The symbol name. - key : str, optional - An optional key to access the symbol by. - attribute : str, optional - An optional attribute to modify the symbol with. + *args + Variable length argument list consiting of Condition, LogicalFunction or None type values. Returns ------- FunctionType """ - operands = Operands( - lhs=Symbol( - type="boolean", name=name, key=key, attribute=attribute - ), - rhs=Value(type="boolean", value=self.value), - ) - match self.operator: - case "==": - return Equal(eq=operands) - case "!=": - return NotEqual(ne=operands) - case _: - raise NotImplementedError(self.operator) - - -class GeospatialFilter(BaseModel): - """ - Used to filter on geospatial coordinates. - - Attributes - ---------- - value : GeoJSON - A dictionary containing a Point, Polygon, or MultiPolygon. Mirrors `shapely's` `GeoJSON` format. - operator : str - The operator to use for comparison. Should be one of `intersect`, `inside`, or `outside`. - - """ - - value: GeoJSON - operator: str = "intersect" - model_config = ConfigDict(extra="forbid") - - @field_validator("operator") - @classmethod - def _validate_comparison_operator(cls, op: str) -> str: - """Validate the operator.""" - allowed_operators = ["inside", "outside", "intersect"] - if op not in allowed_operators: - raise ValueError( - f"Invalid comparison operator '{op}'. Allowed operators are {', '.join(allowed_operators)}." + items = [condition for condition in args if condition is not None] + if len(items) > 1: + return cls( + args=items, + op=LogicalOperator.AND, ) - return op + elif len(items) == 1: + return items[0] + else: + raise ValueError("Passed an empty list.") - def to_function( - self, name: str, key: str | None = None, attribute: str | None = None - ) -> FunctionType: + @classmethod + def or_( + cls, *args: "Condition | LogicalFunction | None" + ) -> "Condition | LogicalFunction": """ - Converts the filter object into a function. + Performs an OR operation if more than one element exists. - This is for backwards-compatibility. + This is useful when passing the results of a list comprehension. Parameters ---------- - name : str - The symbol name. - key : str, optional - An optional key to access the symbol by. - attribute : str, optional - An optional attribute to modify the symbol with. + *args + Variable length argument list consiting of Condition, LogicalFunction or None type values. Returns ------- FunctionType """ - operands = Operands( - lhs=Symbol( - type="geojson", name=name, key=key, attribute=attribute - ), - rhs=Value(type="geojson", value=self.value.geometry.to_json()), - ) - match self.operator: - case "inside": - return Inside(inside=operands) - case "outside": - return Outside(outside=operands) - case "intersect": - return Intersects(intersects=operands) - case _: - raise NotImplementedError(self.operator) - - -class DateTimeFilter(BaseModel): - """ - Used to filter on datetime values that meet some user-defined condition. - - Attributes - ---------- - value : DateTime - The value to compare the specific field against. - operator : str - The operator to use for comparison. Should be one of `[">", "<", ">=", "<=", "==", "!="]`. - - Raises - ------ - ValueError - If the `operator` doesn't match one of the allowed patterns. - """ - - value: DateTime | Date | Time | Duration - operator: str = "==" - - @model_validator(mode="before") - @classmethod - def _unpack_timestamp_value(cls, values): - # TODO - This will be addressed in Issue #526 - if isinstance(values, dict) and (value := values.get("value")): - if isinstance(value, dict) and ( - "datetime" in value - or "date" in value - or "time" in value - or "duration" in value - ): - k, v = list(value.items())[0] - types = { - "datetime": DateTime, - "date": Date, - "time": Time, - "duration": Duration, - } - values["value"] = types[k](value=v) - return values - - @field_validator("operator") - @classmethod - def _validate_comparison_operator(cls, op: str) -> str: - """Validate the operator.""" - allowed_operators = [">", "<", ">=", "<=", "==", "!="] - if op not in allowed_operators: - raise ValueError( - f"Invalid comparison operator '{op}'. Allowed operators are {', '.join(allowed_operators)}." + items = [condition for condition in args if condition is not None] + if len(items) > 1: + return cls( + args=items, + op=LogicalOperator.OR, ) - return op - - model_config = ConfigDict(extra="forbid") + elif len(items) == 1: + return items[0] + else: + raise ValueError("Passed an empty list.") - def to_function( - self, name: str, key: str | None = None, attribute: str | None = None - ) -> FunctionType: + @classmethod + def not_( + cls, arg: "Condition | LogicalFunction" + ) -> "Condition | LogicalFunction": """ - Converts the filter object into a function. + Performs an NOT operation over a function or condition. - This is for backwards-compatibility. + If the passed argument is a NOT function, this will return the contents. Parameters ---------- - name : str - The symbol name. - key : str, optional - An optional key to access the symbol by. - attribute : str, optional - An optional attribute to modify the symbol with. + arg : Condition | LogicalFunction + A condition or logical function to negate. Returns ------- FunctionType """ - type_str = type(self.value).__name__.lower() - operands = Operands( - lhs=Symbol(type=type_str, name=name, key=key, attribute=attribute), - rhs=Value(type=type_str, value=self.value.value), + if isinstance(arg, LogicalFunction) and arg.op == LogicalOperator.NOT: + if isinstance(arg.args, list): + raise RuntimeError("Pydantic should have caught this.") + return arg.args + return cls( + args=arg, + op=LogicalOperator.NOT, ) - match self.operator: - case "==": - return Equal(eq=operands) - case "!=": - return NotEqual(ne=operands) - case ">": - return GreaterThan(gt=operands) - case ">=": - return GreaterThanEqual(ge=operands) - case "<": - return LessThan(lt=operands) - case "<=": - return LessThanEqual(le=operands) - case _: - raise NotImplementedError(self.operator) + + +FunctionType = Condition | LogicalFunction class Filter(BaseModel): """ - Used to filter Evaluations according to specific, user-defined criteria. + Filter schema that stores filters as logical trees under tables. - Attributes - ---------- - dataset_names: List[str], default=None - A list of `Dataset` names to filter on. - dataset_metadata: Dict[str, list[StringFilter | NumericFilter | DateTimeFilter | BooleanFilter | GeospatialFilter]], default=None - A dictionary of `Dataset` metadata to filter on. - model_names: List[str], default=None - A list of `Model` names to filter on. - model_metadata: Dict[str, list[StringFilter | NumericFilter | DateTimeFilter | BooleanFilter | GeospatialFilter]], default=None - A dictionary of `Model` metadata to filter on. - datum_metadata: Dict[str, list[StringFilter | NumericFilter | DateTimeFilter | BooleanFilter | GeospatialFilter]], default=None - A dictionary of `Datum` metadata to filter on. - task_types: List[TaskType], default=None - A list of task types to filter on. - annotation_metadata: Dict[str, list[StringFilter | NumericFilter | DateTimeFilter | BooleanFilter | GeospatialFilter]], default=None - A dictionary of `Annotation` metadata to filter on. - require_bounding_box : bool, optional - A toggle for filtering by bounding boxes. - bounding_box_area : bool, optional - An optional constraint to filter by bounding box area. - require_polygon : bool, optional - A toggle for filtering by polygons. - polygon_area : bool, optional - An optional constraint to filter by polygon area. - require_raster : bool, optional - A toggle for filtering by rasters. - raster_area : bool, optional - An optional constraint to filter by raster area. - labels: List[Dict[str, str]], default=None - A dictionary of `Labels' to filter on. - label_ids: List[int], default=None - A list of `Label` IDs to filter on. - label_keys: List[str] = None, default=None - A list of `Label` keys to filter on. - label_scores: List[ValueFilter], default=None - A list of `ValueFilters` which are used to filter `Evaluations` according to the `Model`'s prediction scores. + The intent is for this object to replace 'Filter' in a future PR. """ - # datasets - dataset_names: list[str] | None = None - dataset_metadata: ( - dict[ - str, - list[ - StringFilter - | NumericFilter - | DateTimeFilter - | BooleanFilter - | GeospatialFilter - ], - ] - | None - ) = None - - # models - model_names: list[str] | None = None - model_metadata: ( - dict[ - str, - list[ - StringFilter - | NumericFilter - | DateTimeFilter - | BooleanFilter - | GeospatialFilter - ], - ] - | None - ) = None - - # datums - datum_uids: list[str] | None = None - datum_metadata: ( - dict[ - str, - list[ - StringFilter - | NumericFilter - | DateTimeFilter - | BooleanFilter - | GeospatialFilter - ], - ] - | None - ) = None - - # annotations - task_types: list[TaskType] | None = None - annotation_metadata: ( - dict[ - str, - list[ - StringFilter - | NumericFilter - | DateTimeFilter - | BooleanFilter - | GeospatialFilter - ], - ] - | None - ) = None - require_bounding_box: bool | None = None - bounding_box_area: list[NumericFilter] | None = None - require_polygon: bool | None = None - polygon_area: list[NumericFilter] | None = None - require_raster: bool | None = None - raster_area: list[NumericFilter] | None = None - - # labels - labels: list[dict[str, str]] | None = None - label_ids: list[int] | None = None - label_keys: list[str] | None = None - - # predictions - label_scores: list[NumericFilter] | None = None - - # pydantic settings - model_config = ConfigDict( - extra="forbid", - protected_namespaces=("protected_",), - ) - - def to_advanced_filter( - self, - ignore_groundtruths: bool = False, - ignore_predictions: bool = False, - ) -> AdvancedFilter: - def filter_equatable( - name: str, - values: list[str] | list[TaskType] | list[int], - type_str: str = "string", - ) -> FunctionType | None: - if len(values) > 1: - return Or( - logical_or=[ - Equal( - eq=Operands( - lhs=Symbol(type=type_str, name=name), - rhs=Value( - type=type_str, - value=value.value - if isinstance(value, TaskType) - else value, - ), - ) - ) - for value in values - ] - ) - elif len(values) == 1: - value = ( - values[0].value - if isinstance(values[0], TaskType) - else values[0] - ) - return Equal( - eq=Operands( - lhs=Symbol(type=type_str, name=name), - rhs=Value(type=type_str, value=value), - ) - ) - else: - return None - - def filter_metadata( - name: str, values: dict[str, list] - ) -> FunctionType | None: - filter_expressions = [ - f.to_function(name=name, key=key) - for key, filters in values.items() - for f in filters - ] - if len(filter_expressions) > 1: - return And(logical_and=filter_expressions) - elif len(filter_expressions) == 1: - return filter_expressions[0] - else: - return None - - def annotation_geometry_exist( - type_str: str, name: str, exists: bool - ) -> IsNull | IsNotNull: - if exists: - return IsNotNull(isnotnull=Symbol(type=type_str, name=name)) - else: - return IsNull(isnull=Symbol(type=type_str, name=name)) - - def filter_numerics( - type_str: str, - name: str, - values: list[NumericFilter], - attribute: str | None = None, - ) -> FunctionType | None: - expressions = [ - f.to_function( - name=name, attribute=attribute, type_str=type_str - ) - for f in values - ] - if len(expressions) > 1: - return And(logical_and=expressions) - elif len(expressions) == 1: - return expressions[0] - else: - return None - - def filter_labels( - values: list[dict[str, str]], - ) -> FunctionType | None: - if len(values) > 1: - return Or( - logical_or=[ - And( - logical_and=[ - Equal( - eq=Operands( - lhs=Symbol( - type="string", name="label.key" - ), - rhs=Value(type="string", value=key), - ) - ), - Equal( - eq=Operands( - lhs=Symbol( - type="string", name="label.value" - ), - rhs=Value(type="string", value=value), - ) - ), - ] - ) - for label in values - for key, value in label.items() - ] - ) - elif len(values) == 1: - key = list(values[0].keys())[0] - value = list(values[0].values())[0] - return And( - logical_and=[ - Equal( - eq=Operands( - lhs=Symbol(type="string", name="label.key"), - rhs=Value(type="string", value=key), - ) - ), - Equal( - eq=Operands( - lhs=Symbol(type="string", name="label.value"), - rhs=Value(type="string", value=value), - ) - ), - ] - ) - else: - return None - - def filter_task_types(values: list[TaskType]): - if len(values) > 1: - return Or( - logical_or=[ - Contains( - contains=Operands( - lhs=Symbol( - type="tasktype", - name="annotation.task_type", - ), - rhs=Value( - type="tasktype", value=task_type.value - ), - ) - ) - for task_type in values - ] - ) - elif len(values) == 1: - return Contains( - contains=Operands( - lhs=Symbol( - type="tasktype", name="annotation.task_type" - ), - rhs=Value(type="tasktype", value=values[0].value), - ) - ) - - dataset_names = None - dataset_metadata = None - model_names = None - model_metadata = None - datum_uids = None - datum_metadata = None - annotation_task_types = None - annotation_metadata = None - annotation_box = None - annotation_box_area = None - annotation_polygon = None - annotation_polygon_area = None - annotation_raster = None - annotation_raster_area = None - labels = None - label_keys = None - label_scores = None - label_ids = None - - if self.dataset_names: - dataset_names = filter_equatable( - name="dataset.name", values=self.dataset_names - ) - if self.dataset_metadata: - dataset_metadata = filter_metadata( - name="dataset.metadata", values=self.dataset_metadata - ) - if self.model_names: - model_names = filter_equatable( - name="model.name", values=self.model_names - ) - if self.model_metadata: - model_metadata = filter_metadata( - name="model.metadata", values=self.model_metadata - ) - if self.datum_uids: - datum_uids = filter_equatable( - name="datum.uid", values=self.datum_uids - ) - if self.datum_metadata: - datum_metadata = filter_metadata( - name="datum.metadata", values=self.datum_metadata - ) - if self.task_types: - annotation_task_types = filter_task_types(values=self.task_types) - if self.annotation_metadata: - annotation_metadata = filter_metadata( - name="annotation.metadata", values=self.annotation_metadata - ) - if self.require_bounding_box is not None: - annotation_box = annotation_geometry_exist( - type_str="box", - name="annotation.bounding_box", - exists=self.require_bounding_box, - ) - if self.bounding_box_area: - annotation_box_area = filter_numerics( - type_str="box", - name="annotation.bounding_box", - attribute="area", - values=self.bounding_box_area, - ) - if self.require_polygon is not None: - annotation_polygon = annotation_geometry_exist( - type_str="polygon", - name="annotation.polygon", - exists=self.require_polygon, - ) - if self.polygon_area: - annotation_polygon_area = filter_numerics( - type_str="polygon", - name="annotation.polygon", - attribute="area", - values=self.polygon_area, - ) - if self.require_raster is not None: - annotation_raster = annotation_geometry_exist( - type_str="raster", - name="annotation.raster", - exists=self.require_raster, - ) - if self.raster_area: - annotation_raster_area = filter_numerics( - type_str="raster", - name="annotation.raster", - attribute="area", - values=self.raster_area, - ) - if self.labels: - labels = filter_labels(self.labels) - if self.label_keys: - label_keys = filter_equatable( - name="label.key", values=self.label_keys - ) - if self.label_scores: - label_scores = filter_numerics( - type_str="float", - name="label.score", - values=self.label_scores, - ) - if self.label_ids: - label_ids = filter_equatable( - name="label.id", values=self.label_ids, type_str="integer" - ) - - def and_if_list(values: list[FunctionType]) -> FunctionType | None: - if len(values) > 1: - return And(logical_and=values) - elif len(values) == 1: - return values[0] - else: - return None - - groundtruth_filter = and_if_list( - [ - expr - for expr in [ - dataset_names, - dataset_metadata, - datum_uids, - datum_metadata, - annotation_task_types, - annotation_metadata, - annotation_box, - annotation_box_area, - annotation_polygon, - annotation_polygon_area, - annotation_raster, - annotation_raster_area, - labels, - label_keys, - label_ids, - ] - if expr is not None - ] - ) - prediction_filter = and_if_list( - [ - expr - for expr in [ - dataset_names, - dataset_metadata, - datum_uids, - datum_metadata, - model_names, - model_metadata, - annotation_task_types, - annotation_metadata, - annotation_box, - annotation_box_area, - annotation_polygon, - annotation_polygon_area, - annotation_raster, - annotation_raster_area, - labels, - label_keys, - label_ids, - label_scores, - ] - if expr is not None - ] - ) - annotation_filter = and_if_list( - [ - expr - for expr in [ - datum_uids, - datum_metadata, - annotation_task_types, - annotation_metadata, - annotation_box, - annotation_box_area, - annotation_polygon, - annotation_polygon_area, - annotation_raster, - annotation_raster_area, - ] - if expr is not None - ] - ) - label_filter = and_if_list( - [ - expr - for expr in [ - labels, - label_keys, - label_ids, - label_scores, - ] - if expr is not None - ] - ) - dataset_filter = and_if_list( - [ - expr - for expr in [ - dataset_names, - dataset_metadata, - ] - if expr is not None - ] - ) - model_filter = and_if_list( - [ - expr - for expr in [ - model_names, - model_metadata, - ] - if expr is not None - ] - ) - - f = AdvancedFilter() - if ignore_groundtruths: - f.predictions = prediction_filter - elif ignore_predictions: - f.groundtruths = groundtruth_filter - else: - f.annotations = annotation_filter - f.labels = label_filter - f.models = model_filter - f.datasets = dataset_filter - - return f - - -# we want to pass a Filter as a query parameters instead of a body -# so we make a new model `FilterQueryParams` where every value is a JSON string -model_fields = Filter.model_fields -model_def_dict = {kwarg: (str | None, None) for kwarg in model_fields} -FilterQueryParams = create_model( - "FilterQueryParams", - __config__=ConfigDict(extra="forbid"), - **model_def_dict, # type: ignore -) - - -def convert_filter_query_params_to_filter_obj(filter_query_params) -> Filter: - """Converts a `FilterQueryParams` object to a `Filter` object by - loading from JSON strings. - - Parameters - ---------- - filter_query_params : FilterQueryParams - The `FilterQueryParams` object to convert. - - Returns - ------- - Filter - The converted `Filter` object. - """ - return Filter( - **{ - k: json.loads(v if v is not None else "null") - for k, v in filter_query_params.model_dump().items() - } - ) + datasets: FunctionType | None = None + models: FunctionType | None = None + datums: FunctionType | None = None + annotations: FunctionType | None = None + groundtruths: FunctionType | None = None + predictions: FunctionType | None = None + labels: FunctionType | None = None + embeddings: FunctionType | None = None + model_config = ConfigDict(extra="forbid") diff --git a/api/valor_api/schemas/migrations.py b/api/valor_api/schemas/migrations.py new file mode 100644 index 000000000..f943e414a --- /dev/null +++ b/api/valor_api/schemas/migrations.py @@ -0,0 +1,85 @@ +from pydantic import BaseModel, ConfigDict + +from valor_api.enums import TaskType + + +class DeprecatedFilter(BaseModel): + """ + Deprecated Filter Schema. + + Used to retrieve old evaluations. + + Attributes + ---------- + dataset_names: List[str], default=None + A list of `Dataset` names to filter on. + dataset_metadata: Dict[dict], default=None + A dictionary of `Dataset` metadata to filter on. + model_names: List[str], default=None + A list of `Model` names to filter on. + model_metadata: Dict[dict], default=None + A dictionary of `Model` metadata to filter on. + datum_metadata: Dict[dict], default=None + A dictionary of `Datum` metadata to filter on. + task_types: List[TaskType], default=None + A list of task types to filter on. + annotation_metadata: Dict[dict], default=None + A dictionary of `Annotation` metadata to filter on. + require_bounding_box : bool, optional + A toggle for filtering by bounding boxes. + bounding_box_area : bool, optional + An optional constraint to filter by bounding box area. + require_polygon : bool, optional + A toggle for filtering by polygons. + polygon_area : bool, optional + An optional constraint to filter by polygon area. + require_raster : bool, optional + A toggle for filtering by rasters. + raster_area : bool, optional + An optional constraint to filter by raster area. + labels: List[Dict[str, str]], default=None + A dictionary of `Labels' to filter on. + label_ids: List[int], default=None + A list of `Label` IDs to filter on. + label_keys: List[str] = None, default=None + A list of `Label` keys to filter on. + label_scores: List[ValueFilter], default=None + A list of `ValueFilters` which are used to filter `Evaluations` according to the `Model`'s prediction scores. + """ + + # datasets + dataset_names: list[str] | None = None + dataset_metadata: dict | None = None + + # models + model_names: list[str] | None = None + model_metadata: dict | None = None + + # datums + datum_uids: list[str] | None = None + datum_metadata: dict | None = None + + # annotations + task_types: list[TaskType] | None = None + annotation_metadata: dict | None = None + + require_bounding_box: bool | None = None + bounding_box_area: list[dict] | None = None + require_polygon: bool | None = None + polygon_area: list[dict] | None = None + require_raster: bool | None = None + raster_area: list[dict] | None = None + + # labels + labels: list[dict[str, str]] | None = None + label_ids: list[int] | None = None + label_keys: list[str] | None = None + + # predictions + label_scores: list[dict] | None = None + + # pydantic settings + model_config = ConfigDict( + extra="forbid", + protected_namespaces=("protected_",), + ) diff --git a/client/unit-tests/coretypes/test_filtering.py b/client/unit-tests/coretypes/test_filtering.py index 065454f7a..9916e84c9 100644 --- a/client/unit-tests/coretypes/test_filtering.py +++ b/client/unit-tests/coretypes/test_filtering.py @@ -4,9 +4,7 @@ import pytest from valor import Annotation, Dataset, Filter, Label, Model -from valor.coretypes import _format_filter -from valor.schemas import Polygon -from valor.schemas.filters import Constraint +from valor.schemas import And, Polygon @pytest.fixture @@ -34,45 +32,12 @@ def geojson( return {"type": "Polygon", "coordinates": polygon.get_value()} -def test__format_filter( +def test_complex_filter( geojson: Dict[str, Union[str, List[List[Tuple[float, float]]]]], polygon: Polygon, ): - - filter_object = Filter( - dataset_names=["a", "b", "c"], - model_names=["x", "y", "z"], - label_scores=[Constraint(value=0.75, operator=">")], - polygon_area=[ - Constraint(value=1000, operator=">"), - Constraint(value=5000, operator="<"), - ], - raster_area=[ - Constraint(value=100, operator=">"), - Constraint(value=500, operator="<"), - ], - dataset_metadata={ - "some_str": [Constraint(value="foobar", operator="==")], - "some_float": [Constraint(value=0.123, operator=">=")], - "some_datetime": [ - Constraint( - value={ - "duration": datetime.timedelta(days=1).total_seconds() - }, - operator=">", - ) - ], - "some_geospatial": [ - Constraint( - value=geojson, - operator="intersect", - ) - ], - }, - ) - - filter_from_constraints = Filter.create( - [ + filter_from_constraints = Filter( + annotations=And( Dataset.name.in_(["a", "b", "c"]), (Model.name == "x") | Model.name.in_(["y", "z"]), Label.score > 0.75, @@ -83,44 +48,126 @@ def test__format_filter( Dataset.metadata["some_float"] >= 0.123, Dataset.metadata["some_datetime"] > datetime.timedelta(days=1), Dataset.metadata["some_geospatial"].intersects(polygon), # type: ignore - issue #605 - ] + ) ) - filter_from_dictionary = _format_filter( - { - "dataset_names": ["a", "b", "c"], - "model_names": ["x", "y", "z"], - "label_scores": [{"value": 0.75, "operator": ">"}], - "polygon_area": [ - {"value": 1000, "operator": ">"}, - {"value": 5000, "operator": "<"}, - ], - "raster_area": [ - {"value": 100, "operator": ">"}, - {"value": 500, "operator": "<"}, - ], - "dataset_metadata": { - "some_str": [{"value": "foobar", "operator": "=="}], - "some_float": [{"value": 0.123, "operator": ">="}], - "some_datetime": [ - { - "value": { - "duration": datetime.timedelta( - days=1 - ).total_seconds() + assert filter_from_constraints.to_dict() == { + "datasets": None, + "models": None, + "datums": None, + "annotations": { + "op": "and", + "args": [ + { + "op": "or", + "args": [ + { + "lhs": {"name": "dataset.name", "key": None}, + "rhs": {"type": "string", "value": "a"}, + "op": "eq", }, - "operator": ">", - } - ], - "some_geospatial": [ - { - "value": geojson, - "operator": "intersect", - } - ], - }, - } - ) - - assert filter_object == filter_from_constraints - assert filter_object == filter_from_dictionary + { + "lhs": {"name": "dataset.name", "key": None}, + "rhs": {"type": "string", "value": "b"}, + "op": "eq", + }, + { + "lhs": {"name": "dataset.name", "key": None}, + "rhs": {"type": "string", "value": "c"}, + "op": "eq", + }, + ], + }, + { + "op": "or", + "args": [ + { + "lhs": {"name": "model.name", "key": None}, + "rhs": {"type": "string", "value": "x"}, + "op": "eq", + }, + { + "lhs": {"name": "model.name", "key": None}, + "rhs": {"type": "string", "value": "y"}, + "op": "eq", + }, + { + "lhs": {"name": "model.name", "key": None}, + "rhs": {"type": "string", "value": "z"}, + "op": "eq", + }, + ], + }, + { + "lhs": {"name": "label.score", "key": None}, + "rhs": {"type": "float", "value": 0.75}, + "op": "gt", + }, + { + "lhs": {"name": "annotation.polygon.area", "key": None}, + "rhs": {"type": "float", "value": 1000}, + "op": "gt", + }, + { + "lhs": {"name": "annotation.polygon.area", "key": None}, + "rhs": {"type": "float", "value": 5000}, + "op": "lt", + }, + { + "lhs": {"name": "annotation.raster.area", "key": None}, + "rhs": {"type": "float", "value": 100}, + "op": "gt", + }, + { + "lhs": {"name": "annotation.raster.area", "key": None}, + "rhs": {"type": "float", "value": 500}, + "op": "lt", + }, + { + "lhs": {"name": "dataset.metadata", "key": "some_str"}, + "rhs": {"type": "string", "value": "foobar"}, + "op": "eq", + }, + { + "lhs": {"name": "dataset.metadata", "key": "some_float"}, + "rhs": {"type": "float", "value": 0.123}, + "op": "gte", + }, + { + "lhs": { + "name": "dataset.metadata", + "key": "some_datetime", + }, + "rhs": {"type": "duration", "value": 86400.0}, + "op": "gt", + }, + { + "lhs": { + "name": "dataset.metadata", + "key": "some_geospatial", + }, + "rhs": { + "type": "polygon", + "value": [ + [ + (125.2750725, 38.760525), + (125.3902365, 38.775069), + (125.5054005, 38.789613), + (125.5051935, 38.71402425), + (125.5049865, 38.6384355), + (125.3902005, 38.6244225), + (125.2754145, 38.6104095), + (125.2752435, 38.68546725), + (125.2750725, 38.760525), + ] + ], + }, + "op": "intersects", + }, + ], + }, + "groundtruths": None, + "predictions": None, + "labels": None, + "embeddings": None, + } diff --git a/client/unit-tests/schemas/test_filters.py b/client/unit-tests/schemas/test_filters.py index 8c4ee8758..8d519a04a 100644 --- a/client/unit-tests/schemas/test_filters.py +++ b/client/unit-tests/schemas/test_filters.py @@ -1,97 +1,196 @@ import datetime -from dataclasses import asdict from valor import Annotation, Dataset, Datum, Filter, Label, Model +from valor.schemas import And def test_empty_filter(): - f = asdict(Filter()) - assert f == { - "dataset_names": None, - "dataset_metadata": None, - "model_names": None, - "model_metadata": None, - "datum_uids": None, - "datum_metadata": None, - "task_types": None, - "annotation_metadata": None, - "require_bounding_box": None, - "bounding_box_area": None, - "require_polygon": None, - "polygon_area": None, - "require_raster": None, - "raster_area": None, + assert Filter().to_dict() == { + "datasets": None, + "models": None, + "datums": None, + "annotations": None, + "groundtruths": None, + "predictions": None, "labels": None, - "label_ids": None, - "label_keys": None, - "label_scores": None, + "embeddings": None, } def test_declarative_filtering(): - filters = [ - Dataset.name == "dataset1", - Model.name == "model1", - Datum.uid == "uid1", - Label.key == "k1", - Label.score > 0.5, - Label.score < 0.75, - Annotation.labels == [Label(key="k2", value="v2")], - # geometry filters - Annotation.raster.is_none(), - Annotation.polygon.is_none(), - Annotation.bounding_box.is_not_none(), - Annotation.bounding_box.area >= 1000, - Annotation.bounding_box.area <= 5000, - # metadata filters - Dataset.metadata["arbitrary_numeric_key"] >= 10, - Dataset.metadata["arbitrary_numeric_key"] < 20, - Model.metadata["arbitrary_str_key"] == "arbitrary value", - Datum.metadata["arbitrary_datetime_key"] >= datetime.timedelta(days=1), - Datum.metadata["arbitrary_datetime_key"] <= datetime.timedelta(days=2), - Annotation.metadata["myKey"] == "helloworld", - ] + filters = Filter( + datums=And( + Datum.uid == "uid1", + Datum.metadata["arbitrary_datetime_key"] + >= datetime.timedelta(days=1), + Datum.metadata["arbitrary_datetime_key"] + <= datetime.timedelta(days=2), + ), + annotations=And( + Dataset.name == "dataset1", + Dataset.metadata["arbitrary_numeric_key"] >= 10, + Dataset.metadata["arbitrary_numeric_key"] < 20, + # geometry filters + Annotation.raster.is_none(), + Annotation.polygon.is_none(), + Annotation.bounding_box.is_not_none(), + Annotation.bounding_box.area >= 1000, + Annotation.bounding_box.area <= 5000, + Annotation.metadata["myKey"] == "helloworld", + # label filters + Label.key == "k2", + Label.value == "v2", + ), + labels=And( + Label.key == "k1", + Label.score > 0.5, + Label.score < 0.75, + ), + predictions=And( + Model.name == "model1", + Model.metadata["arbitrary_str_key"] == "arbitrary value", + ), + ) - f = asdict(Filter.create(filters)) - assert f == { - "dataset_names": ["dataset1"], - "dataset_metadata": { - "arbitrary_numeric_key": [ - {"value": 10, "operator": ">="}, - {"value": 20, "operator": "<"}, - ] + assert filters.to_dict() == { + "datasets": None, + "models": None, + "datums": { + "op": "and", + "args": [ + { + "lhs": {"name": "datum.uid", "key": None}, + "rhs": {"type": "string", "value": "uid1"}, + "op": "eq", + }, + { + "lhs": { + "name": "datum.metadata", + "key": "arbitrary_datetime_key", + }, + "rhs": {"type": "duration", "value": 86400.0}, + "op": "gte", + }, + { + "lhs": { + "name": "datum.metadata", + "key": "arbitrary_datetime_key", + }, + "rhs": {"type": "duration", "value": 172800.0}, + "op": "lte", + }, + ], }, - "model_names": ["model1"], - "model_metadata": { - "arbitrary_str_key": [ - {"value": "arbitrary value", "operator": "=="} - ] + "annotations": { + "op": "and", + "args": [ + { + "lhs": {"name": "dataset.name", "key": None}, + "rhs": {"type": "string", "value": "dataset1"}, + "op": "eq", + }, + { + "lhs": { + "name": "dataset.metadata", + "key": "arbitrary_numeric_key", + }, + "rhs": {"type": "integer", "value": 10}, + "op": "gte", + }, + { + "lhs": { + "name": "dataset.metadata", + "key": "arbitrary_numeric_key", + }, + "rhs": {"type": "integer", "value": 20}, + "op": "lt", + }, + { + "lhs": {"name": "annotation.raster", "key": None}, + "rhs": None, + "op": "isnull", + }, + { + "lhs": {"name": "annotation.polygon", "key": None}, + "rhs": None, + "op": "isnull", + }, + { + "lhs": {"name": "annotation.bounding_box", "key": None}, + "rhs": None, + "op": "isnotnull", + }, + { + "lhs": { + "name": "annotation.bounding_box.area", + "key": None, + }, + "rhs": {"type": "float", "value": 1000}, + "op": "gte", + }, + { + "lhs": { + "name": "annotation.bounding_box.area", + "key": None, + }, + "rhs": {"type": "float", "value": 5000}, + "op": "lte", + }, + { + "lhs": {"name": "annotation.metadata", "key": "mykey"}, + "rhs": {"type": "string", "value": "helloworld"}, + "op": "eq", + }, + { + "lhs": {"name": "label.key", "key": None}, + "rhs": {"type": "string", "value": "k2"}, + "op": "eq", + }, + { + "lhs": {"name": "label.value", "key": None}, + "rhs": {"type": "string", "value": "v2"}, + "op": "eq", + }, + ], }, - "datum_uids": ["uid1"], - "datum_metadata": { - "arbitrary_datetime_key": [ - {"value": {"duration": 86400.0}, "operator": ">="}, - {"value": {"duration": 172800.0}, "operator": "<="}, - ] + "groundtruths": None, + "predictions": { + "op": "and", + "args": [ + { + "lhs": {"name": "model.name", "key": None}, + "rhs": {"type": "string", "value": "model1"}, + "op": "eq", + }, + { + "lhs": { + "name": "model.metadata", + "key": "arbitrary_str_key", + }, + "rhs": {"type": "string", "value": "arbitrary value"}, + "op": "eq", + }, + ], }, - "task_types": None, - "require_bounding_box": True, - "bounding_box_area": [ - {"value": 1000, "operator": ">="}, - {"value": 5000, "operator": "<="}, - ], - "annotation_metadata": { - "mykey": [{"value": "helloworld", "operator": "=="}] + "labels": { + "op": "and", + "args": [ + { + "lhs": {"name": "label.key", "key": None}, + "rhs": {"type": "string", "value": "k1"}, + "op": "eq", + }, + { + "lhs": {"name": "label.score", "key": None}, + "rhs": {"type": "float", "value": 0.5}, + "op": "gt", + }, + { + "lhs": {"name": "label.score", "key": None}, + "rhs": {"type": "float", "value": 0.75}, + "op": "lt", + }, + ], }, - "require_polygon": False, - "polygon_area": None, - "require_raster": False, - "raster_area": None, - "labels": [{"k2": "v2"}], - "label_ids": None, - "label_keys": ["k1"], - "label_scores": [ - {"value": 0.5, "operator": ">"}, - {"value": 0.75, "operator": "<"}, - ], + "embeddings": None, } diff --git a/client/unit-tests/symbolic/collections/test_static_collection.py b/client/unit-tests/symbolic/collections/test_static_collection.py index ae30e31fb..b59c3a885 100644 --- a/client/unit-tests/symbolic/collections/test_static_collection.py +++ b/client/unit-tests/symbolic/collections/test_static_collection.py @@ -2,7 +2,7 @@ from valor.schemas import List as SymbolicList from valor.schemas.symbolic.collections import StaticCollection -from valor.schemas.symbolic.types import Bool, Float, Integer, String +from valor.schemas.symbolic.types import Boolean, Float, Integer, String def test_static_collection_init(): @@ -10,7 +10,7 @@ class A(StaticCollection): w: Integer x: Float y: String - z: Bool + z: Boolean # test that kwargs are required with pytest.raises(ValueError): @@ -22,7 +22,7 @@ class A(StaticCollection): w: Integer x: Float y: String - z: Bool + z: Boolean # test that the 'symbolic' classmethod is the same as passing a symbol symA = A.symbolic() @@ -30,51 +30,26 @@ class A(StaticCollection): # test symbolic usage assert symA.to_dict() == { - "type": "symbol", - "value": { - "owner": None, - "name": "a", - "key": None, - "attribute": None, - }, + "name": "a", + "key": None, } # test that members are also symbolic assert symA.w.to_dict() == { - "type": "symbol", - "value": { - "owner": "a", - "name": "w", - "key": None, - "attribute": None, - }, + "name": "a.w", + "key": None, } assert symA.x.to_dict() == { - "type": "symbol", - "value": { - "owner": "a", - "name": "x", - "key": None, - "attribute": None, - }, + "name": "a.x", + "key": None, } assert symA.y.to_dict() == { - "type": "symbol", - "value": { - "owner": "a", - "name": "y", - "key": None, - "attribute": None, - }, + "name": "a.y", + "key": None, } assert symA.z.to_dict() == { - "type": "symbol", - "value": { - "owner": "a", - "name": "z", - "key": None, - "attribute": None, - }, + "name": "a.z", + "key": None, } @@ -83,13 +58,13 @@ class A(StaticCollection): w: Integer x: Float y: String - z: Bool + z: Boolean encoding = {"w": 101, "x": 0.123, "y": "foobar", "z": True} # test that casting to symbolics is implicit v1 = A(w=101, x=0.123, y="foobar", z=True) - v2 = A(w=Integer(101), x=Float(0.123), y=String("foobar"), z=Bool(True)) + v2 = A(w=Integer(101), x=Float(0.123), y=String("foobar"), z=Boolean(True)) v3 = A(w=101, x=Float(0.123), y=String("foobar"), z=True) assert v1.to_dict() == v2.to_dict() assert v1.to_dict() == v3.to_dict() @@ -125,14 +100,14 @@ class A(StaticCollection): w: Integer x: "Float" y: "String" - z: Bool + z: Boolean # test parsing of forward references assert A._get_static_types() == { "w": Integer, "x": Float, "y": String, - "z": Bool, + "z": Boolean, } # test lists of variables (note: these are not directly comparable) @@ -140,10 +115,10 @@ class B(StaticCollection): w: SymbolicList[Integer] x: SymbolicList[Float] y: SymbolicList[String] - z: SymbolicList[Bool] + z: SymbolicList[Boolean] types_ = B._get_static_types() assert types_["w"].get_element_type() == Integer assert types_["x"].get_element_type() == Float assert types_["y"].get_element_type() == String - assert types_["z"].get_element_type() == Bool + assert types_["z"].get_element_type() == Boolean diff --git a/client/unit-tests/symbolic/collections/test_structures.py b/client/unit-tests/symbolic/collections/test_structures.py index 40e1f89b7..f1b2b554c 100644 --- a/client/unit-tests/symbolic/collections/test_structures.py +++ b/client/unit-tests/symbolic/collections/test_structures.py @@ -3,7 +3,7 @@ import pytest from valor.schemas import ( - Bool, + Boolean, Date, DateTime, Duration, @@ -21,10 +21,7 @@ Time, Variable, ) -from valor.schemas.symbolic.operators import ( - AppendableFunction, - TwoArgumentFunction, -) +from valor.schemas.symbolic.operators import Condition, Eq, Function, Ne from valor.schemas.symbolic.types import ( Dictionary, DictionaryValue, @@ -34,7 +31,7 @@ def test__get_type_by_value(): - assert _get_type_by_value(True) is Bool + assert _get_type_by_value(True) is Boolean assert _get_type_by_value("hello world") is String assert _get_type_by_value(int(1)) is Integer assert _get_type_by_value(float(3.14)) is Float @@ -63,7 +60,7 @@ def test__get_type_by_value(): def test_get_type_by_name(): types_ = [ - Bool, + Boolean, String, Integer, Float, @@ -121,13 +118,8 @@ def _test_to_dict(objcls, value): } # test symbolic assert objcls.symbolic().to_dict() == { - "type": "symbol", - "value": { - "owner": None, - "name": objcls.__name__.lower(), - "key": None, - "attribute": None, - }, + "name": objcls.__name__.lower(), + "key": None, } @@ -156,14 +148,19 @@ def _test_generic(objcls, permutations, op): # test functional dictionary generation expr = C.__getattribute__(op)(a) expr_dict = expr.to_dict() - if issubclass(type(expr), AppendableFunction): + if isinstance(expr, Ne): + # this is an edge case as the Ne operator is currently set to Not(Equal(A, B)) + assert len(expr_dict) == 2 + assert expr_dict["op"] == "not" + assert expr_dict["args"] == Eq(C, A).to_dict() + elif issubclass(type(expr), Function): assert len(expr_dict) == 2 assert expr_dict["op"] == get_function_name(op) assert expr_dict["args"] == [ C.to_dict(), A.to_dict(), ] - elif issubclass(type(expr), TwoArgumentFunction): + elif issubclass(type(expr), Condition): assert len(expr_dict) == 3 assert expr_dict["op"] == get_function_name(op) assert expr_dict["lhs"] == C.to_dict() @@ -188,13 +185,8 @@ def test_list(): symbol = List[Float].symbolic() assert symbol.__str__() == "list[float]" assert symbol.to_dict() == { - "type": "symbol", - "value": { - "owner": None, - "name": "list[float]", - "key": None, - "attribute": None, - }, + "name": "list[float]", + "key": None, } # test creating valued lists @@ -218,13 +210,8 @@ def test_list(): assert (symbol == [0.1, 0.2, 0.3]).to_dict() == { "op": "eq", "lhs": { - "type": "symbol", - "value": { - "owner": None, - "name": "list[float]", - "key": None, - "attribute": None, - }, + "name": "list[float]", + "key": None, }, "rhs": {"type": "list[float]", "value": [0.1, 0.2, 0.3]}, } @@ -233,19 +220,16 @@ def test_list(): assert (symbol == variable).to_dict() == { "op": "eq", "lhs": { - "type": "symbol", - "value": { - "owner": None, - "name": "list[float]", - "key": None, - "attribute": None, - }, + "name": "list[float]", + "key": None, }, "rhs": {"type": "list[float]", "value": [0.1, 0.2, 0.3]}, } # test decode from json dict - assert List[Float].decode_value([0.1, 0.2, 0.3]).get_value() == [ # type: ignore - issue #604 + assert List[Float].decode_value( + [0.1, 0.2, 0.3] + ).get_value() == [ # type: ignore - issue #604 0.1, 0.2, 0.3, @@ -253,7 +237,7 @@ def test_list(): # test comparison between valued variable and value assert (variable == [0.1, 0.2, 0.3]).to_dict() == { - "type": "bool", + "type": "boolean", "value": True, } @@ -275,17 +259,10 @@ def test_dictionary_value(): with pytest.raises(ValueError): DictionaryValue(1) # type: ignore - testing - # test symbol cannot already attribute - with pytest.raises(ValueError) as e: - DictionaryValue( - symbol=Symbol(name="a", attribute="c", owner="d", key="b"), - ) - assert "attribute" in str(e) - # test symbol must have key with pytest.raises(ValueError) as e: DictionaryValue( - symbol=Symbol(name="a", owner="d"), + symbol=Symbol(name="a"), ) assert "key" in str(e) @@ -295,13 +272,16 @@ def test_dictionary_value(): ] == "eq" assert (DictionaryValue.symbolic(name="a", key="b") != 0).to_dict()[ "op" - ] == "ne" + ] == "not" + assert (DictionaryValue.symbolic(name="a", key="b") != 0).to_dict()[ + "args" + ]["op"] == "eq" assert (DictionaryValue.symbolic(name="a", key="b") >= 0).to_dict()[ "op" - ] == "ge" + ] == "gte" assert (DictionaryValue.symbolic(name="a", key="b") <= 0).to_dict()[ "op" - ] == "le" + ] == "lte" assert (DictionaryValue.symbolic(name="a", key="b") > 0).to_dict()[ "op" ] == "gt" diff --git a/client/unit-tests/symbolic/test_operators.py b/client/unit-tests/symbolic/test_operators.py index ca70ff27c..2c8d46d81 100644 --- a/client/unit-tests/symbolic/test_operators.py +++ b/client/unit-tests/symbolic/test_operators.py @@ -2,16 +2,7 @@ import pytest -from valor.schemas.symbolic.operators import ( - And, - AppendableFunction, - Function, - Negate, - OneArgumentFunction, - Or, - TwoArgumentFunction, - Xor, -) +from valor.schemas.symbolic.operators import And, Condition, Function, Not, Or from valor.schemas.symbolic.types import Float, Integer, String @@ -48,17 +39,15 @@ def test_function(variables): # test stringify w/ operator assert issubclass(And, Function) - assert And._operator is not None assert ( And(x, y, z).__repr__() == "And(Integer(1), String('2'), Float(0.3))" ) - assert And(x, y, z).__str__() == "(Integer(1) & String('2') & Float(0.3))" + assert And(x, y, z).__str__() == "And(Integer(1), String('2'), Float(0.3))" # test logical operators assert type(Function(x) & Function(y)) is And assert type(Function(x) | Function(y)) is Or - assert type(Function(x) ^ Function(y)) is Xor - assert type(~Function(x)) is Negate + assert type(~Function(x)) is Not # test requirement that args must have a 'to_dict' method. with pytest.raises(ValueError): @@ -70,24 +59,18 @@ def test_function(variables): def test_appendable_function(variables): - assert issubclass(AppendableFunction, Function) x, y, z = variables - # test case where too few args - with pytest.raises(TypeError): - AppendableFunction(x) # type: ignore - testing - # test that all appendable functions define a overloadable function - assert issubclass(And, AppendableFunction) - assert issubclass(Or, AppendableFunction) - assert issubclass(Xor, AppendableFunction) + assert issubclass(And, Function) + assert issubclass(Or, Function) # test append - f = AppendableFunction(x, y) - f.append(z) + f = Function(x, y) + f._args.append(z) assert f.to_dict() == { - "op": "appendablefunction", + "op": "function", "args": [ {"type": "integer", "value": 1}, {"type": "string", "value": "2"}, @@ -97,7 +80,7 @@ def test_appendable_function(variables): # continue append on the subclass 'And' f1 = And(x, y) - f1.append(z) + f1 &= z assert f1.to_dict() == { "op": "and", "args": [ @@ -129,39 +112,24 @@ def test_appendable_function(variables): { "op": "eq", "lhs": { - "type": "symbol", - "value": { - "owner": None, - "name": "integer", - "key": None, - "attribute": None, - }, + "name": "integer", + "key": None, }, "rhs": {"type": "integer", "value": 1}, }, { "op": "eq", "lhs": { - "type": "symbol", - "value": { - "owner": None, - "name": "string", - "key": None, - "attribute": None, - }, + "name": "string", + "key": None, }, "rhs": {"type": "string", "value": "2"}, }, { "op": "eq", "lhs": { - "type": "symbol", - "value": { - "owner": None, - "name": "float", - "key": None, - "attribute": None, - }, + "name": "float", + "key": None, }, "rhs": {"type": "float", "value": 0.3}, }, @@ -170,23 +138,22 @@ def test_appendable_function(variables): def test_one_arg_function(variables): - assert issubclass(OneArgumentFunction, Function) x, _, _ = variables - f = OneArgumentFunction(x) + f = Function(x) # test dictionary generation assert f.to_dict() == { - "op": "oneargumentfunction", - "arg": {"type": "integer", "value": 1}, + "op": "function", + "args": {"type": "integer", "value": 1}, } -def test_two_arg_function(variables): - assert issubclass(TwoArgumentFunction, Function) - +def test_condition(variables): x, y, z = variables - f = TwoArgumentFunction(x, y) + + with pytest.warns(RuntimeWarning): + f = Condition(x, y) # test memebers assert f.lhs == x @@ -194,14 +161,20 @@ def test_two_arg_function(variables): # test dictionary generation assert f.to_dict() == { - "op": "twoargumentfunction", + "op": "condition", "lhs": {"type": "integer", "value": 1}, "rhs": {"type": "string", "value": "2"}, } - # test cases where too few args are provided - with pytest.raises(TypeError): - TwoArgumentFunction(x) # type: ignore - testing + with pytest.warns(RuntimeWarning): + f = Condition(x) + + assert f.to_dict() == { + "op": "condition", + "lhs": {"type": "integer", "value": 1}, + "rhs": None, + } + # test case where too many args are provided with pytest.raises(TypeError): - TwoArgumentFunction(x, y, z) # type: ignore - testing + Condition(x, y, z) # type: ignore - testing diff --git a/client/unit-tests/symbolic/types/test_schemas.py b/client/unit-tests/symbolic/types/test_schemas.py index 000a4e9ed..0c8438717 100644 --- a/client/unit-tests/symbolic/types/test_schemas.py +++ b/client/unit-tests/symbolic/types/test_schemas.py @@ -4,10 +4,7 @@ import pytest from valor.schemas import Box, Embedding, Float, Raster, TaskTypeEnum -from valor.schemas.symbolic.operators import ( - AppendableFunction, - TwoArgumentFunction, -) +from valor.schemas.symbolic.operators import Condition, Eq, Function, Ne def get_function_name(fn: str) -> str: @@ -18,9 +15,9 @@ def get_function_name(fn: str) -> str: "__or__": "or", "__xor__": "xor", "__gt__": "gt", - "__ge__": "ge", + "__ge__": "gte", "__lt__": "lt", - "__le__": "le", + "__le__": "lte", "is_none": "isnull", "is_not_none": "isnotnull", "intersects": "intersects", @@ -51,13 +48,8 @@ def _test_to_dict(objcls, value, type_name: typing.Optional[str] = None): } # test symbolic assert objcls.symbolic().to_dict() == { - "type": "symbol", - "value": { - "owner": None, - "name": type_name, - "key": None, - "attribute": None, - }, + "name": type_name, + "key": None, } @@ -88,14 +80,19 @@ def _test_generic( # test functional dictionary generation expr = C.__getattribute__(op)(a) expr_dict = expr.to_dict() - if issubclass(type(expr), AppendableFunction): + if isinstance(expr, Ne): + # this is an edge case as the Ne operator is currently set to Not(Equal(A, B)) + assert len(expr_dict) == 2 + assert expr_dict["op"] == "not" + assert expr_dict["args"] == Eq(C, A).to_dict() + elif issubclass(type(expr), Function): assert len(expr_dict) == 2 assert expr_dict["op"] == get_function_name(op) assert expr_dict["args"] == [ C.to_dict(), A.to_dict(), ] - elif issubclass(type(expr), TwoArgumentFunction): + elif issubclass(type(expr), Condition): assert len(expr_dict) == 3 assert expr_dict["op"] == get_function_name(op) assert expr_dict["lhs"] == C.to_dict() @@ -378,13 +375,8 @@ def test_raster(): # test property 'area' assert objcls.symbolic().area.is_symbolic assert objcls.symbolic().area.to_dict() == { - "type": "symbol", - "value": { - "owner": None, - "name": objcls.__name__.lower(), - "key": None, - "attribute": "area", - }, + "name": f"{objcls.__name__.lower()}.area", + "key": None, } # test property 'area' is not available to values diff --git a/client/unit-tests/symbolic/types/test_symbolic_types.py b/client/unit-tests/symbolic/types/test_symbolic_types.py index 95294d386..3320e9614 100644 --- a/client/unit-tests/symbolic/types/test_symbolic_types.py +++ b/client/unit-tests/symbolic/types/test_symbolic_types.py @@ -3,12 +3,9 @@ import pytest -from valor.schemas.symbolic.operators import ( - AppendableFunction, - TwoArgumentFunction, -) +from valor.schemas.symbolic.operators import Condition, Eq, Function, Ne from valor.schemas.symbolic.types import ( - Bool, + Boolean, Date, DateTime, Duration, @@ -35,41 +32,24 @@ def test_symbol(): assert s.__repr__() == "Symbol(name='some_symbol')" assert s.__str__() == "some_symbol" assert s.to_dict() == { - "type": "symbol", - "value": { - "owner": None, - "name": "some_symbol", - "key": None, - "attribute": None, - }, + "name": "some_symbol", + "key": None, } s = Symbol( - owner="some_owner", name="some_name", - attribute="some_attribute", key="some_key", ) - assert ( - s.__repr__() - == "Symbol(owner='some_owner', name='some_name', key='some_key', attribute='some_attribute')" - ) - assert s.__str__() == "some_owner.some_name['some_key'].some_attribute" + assert s.__repr__() == "Symbol(name='some_name', key='some_key')" + assert s.__str__() == "some_name['some_key']" assert s.to_dict() == { - "type": "symbol", - "value": { - "owner": "some_owner", - "name": "some_name", - "key": "some_key", - "attribute": "some_attribute", - }, + "name": "some_name", + "key": "some_key", } # test '__eq__' assert s == Symbol( - owner="some_owner", name="some_name", - attribute="some_attribute", key="some_key", ) assert not (s == "symbol") @@ -78,9 +58,7 @@ def test_symbol(): assert not ( s != Symbol( - owner="some_owner", name="some_name", - attribute="some_attribute", key="some_key", ) ) @@ -109,15 +87,11 @@ def test_variable(): # test is_none assert Variable.symbolic().is_none().to_dict() == { "op": "isnull", - "arg": { - "type": "symbol", - "value": { - "name": "variable", - "owner": None, - "key": None, - "attribute": None, - }, + "lhs": { + "name": "variable", + "key": None, }, + "rhs": None, } assert Variable.symbolic().get_symbol() == Symbol(name="variable") assert Variable(None).is_none().get_value() is True # type: ignore - issue #604 @@ -128,15 +102,11 @@ def test_variable(): # test is_not_none assert Variable.symbolic().is_not_none().to_dict() == { "op": "isnotnull", - "arg": { - "type": "symbol", - "value": { - "name": "variable", - "owner": None, - "key": None, - "attribute": None, - }, + "lhs": { + "name": "variable", + "key": None, }, + "rhs": None, } assert Variable(None).is_not_none().get_value() is False # type: ignore - issue #604 assert Variable(1234).is_not_none().get_value() is True # type: ignore - issue #604 @@ -148,45 +118,28 @@ def _test_equatable(varA, varB, varC): assert (varA == varB).to_dict() == { "op": "eq", "lhs": { - "type": "symbol", - "value": { - "owner": None, - "name": "a", - "key": None, - "attribute": None, - }, + "name": "a", + "key": None, }, "rhs": { - "type": "symbol", - "value": { - "owner": None, - "name": "b", - "key": None, - "attribute": None, - }, + "name": "b", + "key": None, }, } assert (varA == varB).to_dict() == (varA == Symbol("B")).to_dict() # not equal assert (varA != varB).to_dict() == { - "op": "ne", - "lhs": { - "type": "symbol", - "value": { - "owner": None, + "op": "not", + "args": { + "op": "eq", + "lhs": { "name": "a", "key": None, - "attribute": None, }, - }, - "rhs": { - "type": "symbol", - "value": { - "owner": None, + "rhs": { "name": "b", "key": None, - "attribute": None, }, }, } @@ -199,43 +152,23 @@ def _test_equatable(varA, varB, varC): { "op": "eq", "lhs": { - "type": "symbol", - "value": { - "owner": None, - "name": "a", - "key": None, - "attribute": None, - }, + "name": "a", + "key": None, }, "rhs": { - "type": "symbol", - "value": { - "owner": None, - "name": "b", - "key": None, - "attribute": None, - }, + "name": "b", + "key": None, }, }, { "op": "eq", "lhs": { - "type": "symbol", - "value": { - "owner": None, - "name": "a", - "key": None, - "attribute": None, - }, + "name": "a", + "key": None, }, "rhs": { - "type": "symbol", - "value": { - "owner": None, - "name": "c", - "key": None, - "attribute": None, - }, + "name": "c", + "key": None, }, }, ], @@ -254,92 +187,52 @@ def _test_quantifiable(varA, varB, varC): # greater-than assert (varA > varB).to_dict() == { "lhs": { - "type": "symbol", - "value": { - "attribute": None, - "key": None, - "name": "a", - "owner": None, - }, + "key": None, + "name": "a", }, "op": "gt", "rhs": { - "type": "symbol", - "value": { - "attribute": None, - "key": None, - "name": "b", - "owner": None, - }, + "name": "b", + "key": None, }, } # greater-than or equal assert (varA >= varB).to_dict() == { "lhs": { - "type": "symbol", - "value": { - "attribute": None, - "key": None, - "name": "a", - "owner": None, - }, + "key": None, + "name": "a", }, - "op": "ge", + "op": "gte", "rhs": { - "type": "symbol", - "value": { - "attribute": None, - "key": None, - "name": "b", - "owner": None, - }, + "key": None, + "name": "b", }, } # less-than assert (varA < varB).to_dict() == { "lhs": { - "type": "symbol", - "value": { - "attribute": None, - "key": None, - "name": "a", - "owner": None, - }, + "key": None, + "name": "a", }, "op": "lt", "rhs": { - "type": "symbol", - "value": { - "attribute": None, - "key": None, - "name": "b", - "owner": None, - }, + "key": None, + "name": "b", }, } # less-than or equal assert (varA <= varB).to_dict() == { "lhs": { - "type": "symbol", - "value": { - "attribute": None, - "key": None, - "name": "a", - "owner": None, - }, + "key": None, + "name": "a", }, - "op": "le", + "op": "lte", "rhs": { - "type": "symbol", - "value": { - "attribute": None, - "key": None, - "name": "b", - "owner": None, - }, + "key": None, + "name": "b", }, } @@ -347,29 +240,21 @@ def _test_quantifiable(varA, varB, varC): def _test_nullable(varA, varB, varC): # is none assert varA.is_none().to_dict() == { - "arg": { - "type": "symbol", - "value": { - "attribute": None, - "key": None, - "name": "a", - "owner": None, - }, + "lhs": { + "key": None, + "name": "a", }, + "rhs": None, "op": "isnull", } # is not none assert varA.is_not_none().to_dict() == { - "arg": { - "type": "symbol", - "value": { - "attribute": None, - "key": None, - "name": "a", - "owner": None, - }, + "lhs": { + "key": None, + "name": "a", }, + "rhs": None, "op": "isnotnull", } @@ -378,69 +263,39 @@ def _test_spatial(varA, varB, varC): # intersects assert varA.intersects(varB).to_dict() == { "lhs": { - "type": "symbol", - "value": { - "attribute": None, - "key": None, - "name": "a", - "owner": None, - }, + "key": None, + "name": "a", }, "op": "intersects", "rhs": { - "type": "symbol", - "value": { - "attribute": None, - "key": None, - "name": "b", - "owner": None, - }, + "key": None, + "name": "b", }, } # inside assert varA.inside(varB).to_dict() == { "lhs": { - "type": "symbol", - "value": { - "attribute": None, - "key": None, - "name": "a", - "owner": None, - }, + "key": None, + "name": "a", }, "op": "inside", "rhs": { - "type": "symbol", - "value": { - "attribute": None, - "key": None, - "name": "b", - "owner": None, - }, + "key": None, + "name": "b", }, } # outside assert varA.outside(varB).to_dict() == { "lhs": { - "type": "symbol", - "value": { - "attribute": None, - "key": None, - "name": "a", - "owner": None, - }, + "key": None, + "name": "a", }, "op": "outside", "rhs": { - "type": "symbol", - "value": { - "attribute": None, - "key": None, - "name": "b", - "owner": None, - }, + "key": None, + "name": "b", }, } @@ -488,9 +343,9 @@ def get_function_name(fn: str) -> str: "__or__": "or", "__xor__": "xor", "__gt__": "gt", - "__ge__": "ge", + "__ge__": "gte", "__lt__": "lt", - "__le__": "le", + "__le__": "lte", "is_none": "isnull", "is_not_none": "isnotnull", "intersects": "intersects", @@ -523,13 +378,8 @@ def _test_to_dict(objcls, value, type_name: typing.Optional[str] = None): } # test symbolic assert objcls.symbolic().to_dict() == { - "type": "symbol", - "value": { - "owner": None, - "name": type_name, - "key": None, - "attribute": None, - }, + "name": type_name, + "key": None, } @@ -561,14 +411,19 @@ def _test_generic( # test functional dictionary generation expr = C.__getattribute__(op)(a) expr_dict = expr.to_dict() - if issubclass(type(expr), AppendableFunction): + if isinstance(expr, Ne): + # this is an edge case as the Ne operator is currently set to Not(Equal(A, B)) + assert len(expr_dict) == 2 + assert expr_dict["op"] == "not" + assert expr_dict["args"] == Eq(C, A).to_dict() + elif issubclass(type(expr), Function): assert len(expr_dict) == 2 assert expr_dict["op"] == get_function_name(op) assert expr_dict["args"] == [ C.to_dict(), A.to_dict(), ] - elif issubclass(type(expr), TwoArgumentFunction): + elif issubclass(type(expr), Condition): assert len(expr_dict) == 3 assert expr_dict["op"] == get_function_name(op) assert expr_dict["lhs"] == C.to_dict() @@ -580,7 +435,7 @@ def _test_generic( def _test_resolvable( objcls, permutations, op, type_name: typing.Optional[str] = None ): - """Test expressions that can be simplified to 'Bool'""" + """Test expressions that can be simplified to 'Boolean'""" type_name = type_name if type_name else objcls.__name__.lower() for a, b in permutations: A = objcls(a) @@ -611,7 +466,7 @@ def _test_unsupported(objcls, permutations, op): def test_bool(): # interoperable with builtin 'bool' - objcls = Bool + objcls = Boolean permutations = [ (True, True), (True, False), @@ -620,10 +475,10 @@ def test_bool(): ] # test supported methods - for op in ["__eq__", "__ne__", "__and__", "__or__", "__xor__"]: + for op in ["__eq__", "__ne__", "__and__", "__or__"]: _test_resolvable(objcls, permutations, op) - assert (~Bool(True)).get_value() is False # type: ignore - this will always return a bool - assert (~Bool(False)).get_value() is True # type: ignore - this will always return a bool + assert (~Boolean(True)).get_value() is False # type: ignore - this will always return a bool + assert (~Boolean(False)).get_value() is True # type: ignore - this will always return a bool # test unsupported methods for op in [ @@ -652,36 +507,25 @@ def test_bool(): _test_encoding(objcls, False, False) # test and operation - assert (Bool(True) & Bool(True)).get_value() is True # type: ignore - issue #604 - assert (Bool(True) & Bool(False)).get_value() is False # type: ignore - issue #604 - assert (Bool(False) & Bool(True)).get_value() is False # type: ignore - issue #604 - assert (Bool(False) & Bool(False)).get_value() is False # type: ignore - issue #604 + assert (Boolean(True) & Boolean(True)).get_value() is True # type: ignore - issue #604 + assert (Boolean(True) & Boolean(False)).get_value() is False # type: ignore - issue #604 + assert (Boolean(False) & Boolean(True)).get_value() is False # type: ignore - issue #604 + assert (Boolean(False) & Boolean(False)).get_value() is False # type: ignore - issue #604 # test or operation - assert (Bool(True) | Bool(True)).get_value() is True # type: ignore - issue #604 - assert (Bool(True) | Bool(False)).get_value() is True # type: ignore - issue #604 - assert (Bool(False) | Bool(True)).get_value() is True # type: ignore - issue #604 - assert (Bool(False) | Bool(False)).get_value() is False # type: ignore - issue #604 - - # test xor operation - assert (Bool(True) ^ Bool(True)).get_value() is False # type: ignore - issue #604 - assert (Bool(True) ^ Bool(False)).get_value() is True # type: ignore - issue #604 - assert (Bool(False) ^ Bool(True)).get_value() is True # type: ignore - issue #604 - assert (Bool(False) ^ Bool(False)).get_value() is False # type: ignore - issue #604 + assert (Boolean(True) | Boolean(True)).get_value() is True # type: ignore - issue #604 + assert (Boolean(True) | Boolean(False)).get_value() is True # type: ignore - issue #604 + assert (Boolean(False) | Boolean(True)).get_value() is True # type: ignore - issue #604 + assert (Boolean(False) | Boolean(False)).get_value() is False # type: ignore - issue #604 # test negation operation - assert (~Bool(True)).get_value() is False # type: ignore - issue #604 - assert (~Bool(False)).get_value() is True # type: ignore - issue #604 - assert (~Bool.symbolic()).to_dict() == { - "op": "negate", - "arg": { - "type": "symbol", - "value": { - "owner": None, - "name": "bool", - "key": None, - "attribute": None, - }, + assert (~Boolean(True)).get_value() is False # type: ignore - issue #604 + assert (~Boolean(False)).get_value() is True # type: ignore - issue #604 + assert (~Boolean.symbolic()).to_dict() == { + "op": "not", + "args": { + "name": "boolean", + "key": None, }, } @@ -704,7 +548,6 @@ def test_integer(): for op in [ "__and__", "__or__", - "__xor__", "intersects", "inside", "outside", @@ -712,10 +555,18 @@ def test_integer(): _test_unsupported(objcls, permutations, op) # test equatable - assert (Integer.nullable(None) == Integer(1)).get_value() is False # type: ignore - issue #604 - assert (Integer(1) == Integer.nullable(None)).get_value() is False # type: ignore - issue #604 - assert (Integer.nullable(None) != Integer(1)).get_value() is True # type: ignore - issue #604 - assert (Integer(1) != Integer.nullable(None)).get_value() is True # type: ignore - issue #604 + assert ( + Integer.nullable(None) == Integer(1) + ).get_value() is False # type: ignore - issue #604 + assert ( + Integer(1) == Integer.nullable(None) + ).get_value() is False # type: ignore - issue #604 + assert ( + Integer.nullable(None) != Integer(1) + ).get_value() is True # type: ignore - issue #604 + assert ( + Integer(1) != Integer.nullable(None) + ).get_value() is True # type: ignore - issue #604 # test nullable v1 = objcls.nullable(None) @@ -749,7 +600,6 @@ def test_float(): for op in [ "__and__", "__or__", - "__xor__", "intersects", "inside", "outside", @@ -792,7 +642,6 @@ def test_string(): "__le__", "__and__", "__or__", - "__xor__", "intersects", "inside", "outside", @@ -843,7 +692,6 @@ def test_datetime(): for op in [ "__and__", "__or__", - "__xor__", "intersects", "inside", "outside", @@ -898,7 +746,6 @@ def test_date(): for op in [ "__and__", "__or__", - "__xor__", "intersects", "inside", "outside", @@ -939,7 +786,6 @@ def test_time(): for op in [ "__and__", "__or__", - "__xor__", "intersects", "inside", "outside", @@ -978,7 +824,6 @@ def test_duration(): for op in [ "__and__", "__or__", - "__xor__", "intersects", "inside", "outside", @@ -1016,7 +861,6 @@ def test_point(): "__le__", "__and__", "__or__", - "__xor__", ]: _test_unsupported(objcls, permutations, op) @@ -1033,9 +877,6 @@ def test_point(): # test encoding _test_encoding(objcls, (1, -1), (1, -1)) - # test geojson rules - pass # TODO - def test_multipoint(): # interoperable with GeoJSON-style 'multipoint' geometry @@ -1056,7 +897,6 @@ def test_multipoint(): "__le__", "__and__", "__or__", - "__xor__", ]: _test_unsupported(objcls, permutations, op) @@ -1073,9 +913,6 @@ def test_multipoint(): # test encoding _test_encoding(objcls, [(0, 0), (1, 1)], [(0, 0), (1, 1)]) - # test geojson rules - pass # TODO - def test_linestring(): # interoperable with GeoJSON-style 'linestring' geometry @@ -1096,7 +933,6 @@ def test_linestring(): "__le__", "__and__", "__or__", - "__xor__", ]: _test_unsupported(objcls, permutations, op) @@ -1113,9 +949,6 @@ def test_linestring(): # test encoding _test_encoding(objcls, [(0, 0), (1, 1)], [(0, 0), (1, 1)]) - # test geojson rules - pass # TODO - def test_multilinestring(): # interoperable with GeoJSON-style 'multilinestring' geometry @@ -1141,7 +974,6 @@ def test_multilinestring(): "__le__", "__and__", "__or__", - "__xor__", ]: _test_unsupported(objcls, permutations, op) @@ -1158,9 +990,6 @@ def test_multilinestring(): # test encoding _test_encoding(objcls, [[(0, 0), (1, 1)]], [[(0, 0), (1, 1)]]) - # test geojson rules - pass # TODO - def test_polygon(): # interoperable with GeoJSON-style 'polygon' geometry @@ -1189,7 +1018,6 @@ def test_polygon(): "__le__", "__and__", "__or__", - "__xor__", ]: _test_unsupported(objcls, permutations, op) @@ -1213,21 +1041,13 @@ def test_polygon(): # test property 'area' assert objcls.symbolic().area.is_symbolic assert objcls.symbolic().area.to_dict() == { - "type": "symbol", - "value": { - "owner": None, - "name": objcls.__name__.lower(), - "key": None, - "attribute": "area", - }, + "name": f"{objcls.__name__.lower()}.area", + "key": None, } # test that property 'area' is not accessible when object is a value with pytest.raises(ValueError): objcls(permutations[0][0]).area - # test geojson rules - pass # TODO - def test_multipolygon(): # interoperable with GeoJSON-style 'multipolygon' geometry @@ -1265,7 +1085,6 @@ def test_multipolygon(): "__le__", "__and__", "__or__", - "__xor__", ]: _test_unsupported(objcls, permutations, op) @@ -1289,21 +1108,13 @@ def test_multipolygon(): # test property 'area' assert objcls.symbolic().area.is_symbolic assert objcls.symbolic().area.to_dict() == { - "type": "symbol", - "value": { - "owner": None, - "name": objcls.__name__.lower(), - "key": None, - "attribute": "area", - }, + "name": f"{objcls.__name__.lower()}.area", + "key": None, } # test that property 'area' is not accessible when object is a value with pytest.raises(ValueError): objcls(permutations[0][0]).area - # test geojson rules - pass # TODO - def test_nullable(): diff --git a/client/valor/client.py b/client/valor/client.py index 9f27f4b3b..17d494058 100644 --- a/client/valor/client.py +++ b/client/valor/client.py @@ -2,7 +2,7 @@ import logging import os import time -from dataclasses import asdict, dataclass +from dataclasses import dataclass from typing import Callable, Dict, List, Optional, TypeVar, Union from urllib.parse import urlencode, urljoin @@ -17,7 +17,6 @@ ClientNotConnectedError, raise_client_exception, ) -from valor.schemas import EvaluationRequest T = TypeVar("T") @@ -404,7 +403,7 @@ def get_labels( """ Gets all labels using an optional filter. - `GET` endpoint. + `POST` endpoint. Parameters ---------- @@ -414,12 +413,12 @@ def get_labels( Returns ------- list[dict] - A list of labels. + A list of labels in JSON format. """ - kwargs = {} - if filters: - kwargs["params"] = {k: json.dumps(v) for k, v in filters.items()} - return self._requests_get_rel_host("labels", **kwargs).json() + filters = filters if filters else dict() + return self._requests_post_rel_host( + "labels/filter", json=filters + ).json() def get_labels_from_dataset(self, name: str) -> List[dict]: """ @@ -474,7 +473,7 @@ def get_datasets(self, filters: Optional[dict] = None) -> List[dict]: """ Get all datasets with option to filter. - `GET` endpoint. + `POST` endpoint. Parameters ---------- @@ -484,12 +483,12 @@ def get_datasets(self, filters: Optional[dict] = None) -> List[dict]: Returns ------ List[dict] - A list of dictionaries describing all the datasets attributed to the `Client` object. + A list of datasets in JSON format. """ - kwargs = {} - if filters: - kwargs["params"] = {k: json.dumps(v) for k, v in filters.items()} - return self._requests_get_rel_host("datasets", **kwargs).json() + filters = filters if filters else dict() + return self._requests_post_rel_host( + "datasets/filter", json=filters + ).json() def get_dataset(self, name: str) -> dict: """ @@ -576,7 +575,7 @@ def get_datums(self, filters: Optional[dict] = None) -> List[dict]: """ Get all datums using an optional filter. - `GET` endpoint. + `POST` endpoint. Parameters ---------- @@ -586,12 +585,10 @@ def get_datums(self, filters: Optional[dict] = None) -> List[dict]: Returns ------- List[dict] - A list of dictionaries describing all the datums of the specified dataset. + A list of datums in JSON format. """ - kwargs = {} - if filters: - kwargs["params"] = {k: json.dumps(v) for k, v in filters.items()} - return self._requests_get_rel_host("data", **kwargs).json() + filters = filters if isinstance(filters, dict) else dict() + return self._requests_post_rel_host("data/filter", json=filters).json() def get_datum( self, @@ -633,7 +630,7 @@ def get_models(self, filters: Optional[dict] = None) -> List[dict]: """ Get all models using an optional filter. - `GET` endpoint. + `POST` endpoint. Parameters ---------- @@ -643,12 +640,12 @@ def get_models(self, filters: Optional[dict] = None) -> List[dict]: Returns ------ List[dict] - A list of dictionaries describing all the models. + A list of models in JSON format. """ - kwargs = {} - if filters: - kwargs["params"] = {k: json.dumps(v) for k, v in filters.items()} - return self._requests_get_rel_host("models", **kwargs).json() + filters = filters if filters else dict() + return self._requests_post_rel_host( + "models/filter", json=filters + ).json() def get_model(self, name: str) -> dict: """ @@ -752,7 +749,7 @@ def delete_model(self, name: str) -> None: self._requests_delete_rel_host(f"models/{name}") def evaluate( - self, request: EvaluationRequest, allow_retries: bool = False + self, request: dict, allow_retries: bool = False ) -> List[dict]: """ Creates as many evaluations as necessary to fulfill the request. @@ -761,7 +758,7 @@ def evaluate( Parameters ---------- - request : schemas.EvaluationRequest + request : dict The requested evaluation parameters. allow_retries : bool, default = False Option to retry previously failed evaluations. @@ -773,9 +770,7 @@ def evaluate( """ query_str = urlencode({"allow_retries": allow_retries}) endpoint = f"evaluations?{query_str}" - return self._requests_post_rel_host( - endpoint, json=asdict(request) - ).json() + return self._requests_post_rel_host(endpoint, json=request).json() def get_evaluations( self, diff --git a/client/valor/coretypes.py b/client/valor/coretypes.py index 7f20970e4..4dd48e435 100644 --- a/client/valor/coretypes.py +++ b/client/valor/coretypes.py @@ -32,33 +32,6 @@ from valor.schemas import List as SymbolicList from valor.schemas import StaticCollection, String -FilterType = Union[list, dict, Filter] # TODO - Remove this - - -def _format_filter(filter_by: Optional[FilterType]) -> Filter: - """ - Formats the various filter or constraint representations into a 'schemas.Filter' object. - - Parameters - ---------- - filter_by : FilterType, optional - The reference filter. - - Returns - ------- - valor.schemas.Filter - A properly formatted 'schemas.Filter' object. - """ - if isinstance(filter_by, Filter): - return filter_by - elif isinstance(filter_by, list) or filter_by is None: - filter_by = filter_by if filter_by else [] - return Filter.create(filter_by) - elif isinstance(filter_by, dict): - return Filter(**filter_by) - else: - raise TypeError - class GroundTruth(StaticCollection): """ @@ -183,7 +156,7 @@ def __init__( The names of the datasets the model was evaluated over. model_name : str The name of the evaluated model. - filters : schemas.Filter + filters : dict The filter used to select data partitions for evaluation. status : EvaluationStatus The status of the evaluation. @@ -205,7 +178,7 @@ def update( id: int, dataset_names: list[str], model_name: str, - filters: Filter, + filters: dict, parameters: EvaluationParameters, status: EvaluationStatus, metrics: List[Dict], @@ -217,9 +190,7 @@ def update( self.id = id self.dataset_names = dataset_names self.model_name = model_name - self.filters = ( - Filter(**filters) if isinstance(filters, dict) else Filter() - ) + self.filters = filters self.parameters = ( EvaluationParameters(**parameters) if isinstance(parameters, dict) @@ -304,7 +275,7 @@ def to_dict(self) -> dict: "id": self.id, "dataset_names": self.dataset_names, "model_name": self.model_name, - "filters": asdict(self.filters), + "filters": self.filters, "parameters": asdict(self.parameters), "status": self.status.value, "metrics": self.metrics, @@ -545,32 +516,24 @@ def get_labels( """ return Client(self.conn).get_labels_from_dataset(self) - def get_datums( - self, filter_by: Optional[FilterType] = None - ) -> List[Datum]: + def get_datums(self, filters: Optional[Filter] = None) -> List[Datum]: """ Get all datums associated with a given dataset. Parameters ---------- - filter_by - Optional constraints to filter by. + filters : Filter, optional + An optional datum filter. Returns ---------- List[Datum] A list of `Datums` associated with the dataset. """ - filters = _format_filter(filter_by) - if isinstance(filters, Filter): - filters = asdict(filters) - - if filters.get("dataset_names"): - raise ValueError( - "Cannot filter by dataset_names when calling `Dataset.get_datums`." - ) - filters["dataset_names"] = [self.name] # type: ignore - return Client(self.conn).get_datums(filter_by=filters) + if filters is None: + filters = Filter() + filters.datasets = Dataset.name == self.name # type: ignore - #issue 605 + return Client(self.conn).get_datums(filters=filters) def get_evaluations( self, @@ -814,33 +777,6 @@ def finalize_inferences(self, dataset: Union[Dataset, str]) -> None: dataset=dataset, model=self ) - def _format_constraints( - self, - datasets: Optional[Union[Dataset, List[Dataset]]] = None, - filter_by: Optional[FilterType] = None, - ) -> Filter: - """Formats the 'filter' for any evaluation requests.""" - - # get list of dataset names - dataset_names_from_obj = [] - if isinstance(datasets, list): - dataset_names_from_obj = [dataset.name for dataset in datasets] - elif isinstance(datasets, Dataset): - dataset_names_from_obj = [datasets.name] - - # create a 'schemas.Filter' object from the constraints. - filters = _format_filter(filter_by) - - # reset model name - filters.model_names = None - filters.model_metadata = None - - # set dataset names - if not filters.dataset_names: - filters.dataset_names = [] - filters.dataset_names.extend(dataset_names_from_obj) # type: ignore - return filters - def _create_label_map( self, label_map: Optional[Dict[Label, Label]], @@ -879,7 +815,7 @@ def _create_label_map( def evaluate_classification( self, datasets: Union[Dataset, List[Dataset]], - filter_by: Optional[FilterType] = None, + filters: Optional[Filter] = None, label_map: Optional[Dict[Label, Label]] = None, pr_curve_max_examples: int = 1, metrics_to_return: Optional[List[MetricType]] = None, @@ -892,7 +828,7 @@ def evaluate_classification( ---------- datasets : Union[Dataset, List[Dataset]], optional The dataset or list of datasets to evaluate against. - filter_by : FilterType, optional + filters : Filter, optional Optional set of constraints to filter evaluation by. label_map : Dict[Label, Label], optional Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models. @@ -906,7 +842,7 @@ def evaluate_classification( Evaluation A job object that can be used to track the status of the job and get the metrics of it upon completion. """ - if not datasets and not filter_by: + if not datasets and not filters: raise ValueError( "Evaluation requires the definition of either datasets, dataset filters or both." ) @@ -918,8 +854,8 @@ def evaluate_classification( ) # format request - filters = self._format_constraints(datasets, filter_by) datasets = datasets if isinstance(datasets, list) else [datasets] + filters = filters if filters else Filter() request = EvaluationRequest( dataset_names=[dataset.name for dataset in datasets], # type: ignore - issue #604 model_names=[self.name], # type: ignore - issue #604 @@ -943,7 +879,7 @@ def evaluate_classification( def evaluate_detection( self, datasets: Union[Dataset, List[Dataset]], - filter_by: Optional[FilterType] = None, + filters: Optional[Filter] = None, convert_annotations_to_type: Optional[AnnotationType] = None, iou_thresholds_to_compute: Optional[List[float]] = None, iou_thresholds_to_return: Optional[List[float]] = None, @@ -961,7 +897,7 @@ def evaluate_detection( ---------- datasets : Union[Dataset, List[Dataset]], optional The dataset or list of datasets to evaluate against. - filter_by : FilterType, optional + filters : Filter, optional Optional set of constraints to filter evaluation by. convert_annotations_to_type : enums.AnnotationType, optional Forces the object detection evaluation to compute over this type. @@ -1013,8 +949,8 @@ def evaluate_detection( pr_curve_iou_threshold=pr_curve_iou_threshold, pr_curve_max_examples=pr_curve_max_examples, ) - filters = self._format_constraints(datasets, filter_by) datasets = datasets if isinstance(datasets, list) else [datasets] + filters = filters if filters else Filter() request = EvaluationRequest( dataset_names=[dataset.name for dataset in datasets], # type: ignore - issue #604 model_names=[self.name], # type: ignore - issue #604 @@ -1033,7 +969,7 @@ def evaluate_detection( def evaluate_segmentation( self, datasets: Union[Dataset, List[Dataset]], - filter_by: Optional[FilterType] = None, + filters: Optional[Filter] = None, label_map: Optional[Dict[Label, Label]] = None, metrics_to_return: Optional[List[MetricType]] = None, allow_retries: bool = False, @@ -1045,7 +981,7 @@ def evaluate_segmentation( ---------- datasets : Union[Dataset, List[Dataset]], optional The dataset or list of datasets to evaluate against. - filter_by : FilterType, optional + filters : Filter, optional Optional set of constraints to filter evaluation by. label_map : Dict[Label, Label], optional Optional mapping of individual labels to a grouper label. Useful when you need to evaluate performance using labels that differ across datasets and models. @@ -1067,8 +1003,8 @@ def evaluate_segmentation( ) # format request - filters = self._format_constraints(datasets, filter_by) datasets = datasets if isinstance(datasets, list) else [datasets] + filters = filters if filters else Filter() request = EvaluationRequest( dataset_names=[dataset.name for dataset in datasets], # type: ignore - issue #604 model_names=[self.name], # type: ignore - issue #604 @@ -1174,14 +1110,14 @@ def connect( def get_labels( self, - filter_by: Optional[FilterType] = None, + filters: Optional[Filter] = None, ) -> List[Label]: """ Gets all labels using an optional filter. Parameters ---------- - filter_by : FilterType, optional + filters : Filter, optional Optional constraints to filter by. Returns @@ -1189,9 +1125,10 @@ def get_labels( List[valor.Label] A list of labels. """ - filters = _format_filter(filter_by) - filters = asdict(filters) - return [Label(**label) for label in self.conn.get_labels(filters)] + filters = filters if filters is not None else Filter() + return [ + Label(**label) for label in self.conn.get_labels(filters.to_dict()) + ] def get_labels_from_dataset( self, dataset: Union[Dataset, str] @@ -1365,14 +1302,14 @@ def get_dataset( def get_datasets( self, - filter_by: Optional[FilterType] = None, + filters: Optional[Filter] = None, ) -> List[Dataset]: """ Get all datasets, with an option to filter results according to some user-defined parameters. Parameters ---------- - filter_by : FilterType, optional + filters : Filter, optional Optional constraints to filter by. Returns @@ -1380,25 +1317,23 @@ def get_datasets( List[valor.Dataset] A list of datasets. """ - filters = _format_filter(filter_by) - if isinstance(filters, Filter): - filters = asdict(filters) dataset_list = [] - for kwargs in self.conn.get_datasets(filters): + filters = filters if filters is not None else Filter() + for kwargs in self.conn.get_datasets(filters.to_dict()): dataset = Dataset.decode_value({**kwargs, "connection": self.conn}) dataset_list.append(dataset) return dataset_list def get_datums( self, - filter_by: Optional[FilterType] = None, + filters: Optional[Filter] = None, ) -> List[Datum]: """ Get all datums using an optional filter. Parameters ---------- - filter_by : FilterType, optional + filters : Filter, optional Optional constraints to filter by. Returns @@ -1406,12 +1341,11 @@ def get_datums( List[valor.Datum] A list datums. """ - filters = _format_filter(filter_by) - if isinstance(filters, Filter): - filters = asdict(filters) + + filters = filters if filters is not None else Filter() return [ Datum.decode_value(datum) - for datum in self.conn.get_datums(filters) + for datum in self.conn.get_datums(filters.to_dict()) ] def get_datum( @@ -1630,14 +1564,14 @@ def get_model( def get_models( self, - filter_by: Optional[FilterType] = None, + filters: Optional[Filter] = None, ) -> List[Model]: """ Get all models using an optional filter. Parameters ---------- - filter_by : FilterType, optional + filters : Filter, optional Optional constraints to filter by. Returns @@ -1645,11 +1579,9 @@ def get_models( List[valor.Model] A list of models. """ - filters = _format_filter(filter_by) - if isinstance(filters, Filter): - filters = asdict(filters) model_list = [] - for kwargs in self.conn.get_models(filters): + filters = filters if filters is not None else Filter() + for kwargs in self.conn.get_models(filters.to_dict()): model = Model.decode_value({**kwargs, "connection": self.conn}) model_list.append(model) return model_list @@ -1801,7 +1733,7 @@ def evaluate( return [ Evaluation(**evaluation) for evaluation in self.conn.evaluate( - request, allow_retries=allow_retries + request.to_dict(), allow_retries=allow_retries ) ] diff --git a/client/valor/schemas/__init__.py b/client/valor/schemas/__init__.py index eec5cc4d8..fdc735793 100644 --- a/client/valor/schemas/__init__.py +++ b/client/valor/schemas/__init__.py @@ -1,25 +1,24 @@ from .evaluation import EvaluationParameters, EvaluationRequest -from .filters import Constraint, Filter +from .filters import Filter from .symbolic.collections import Annotation, Datum, Label, StaticCollection from .symbolic.operators import ( And, Eq, - Ge, Gt, + Gte, Inside, Intersects, IsNotNull, IsNull, - Le, Lt, + Lte, Ne, - Negate, + Not, Or, Outside, - Xor, ) from .symbolic.types import ( - Bool, + Boolean, Box, Date, DateTime, @@ -50,28 +49,26 @@ "EvaluationRequest", "EvaluationParameters", "Filter", - "Constraint", "And", "Eq", - "Ge", + "Gte", "Gt", "Inside", "Intersects", "IsNotNull", "IsNull", - "Le", + "Lte", "Lt", "Ne", - "Negate", + "Not", "Or", "Outside", - "Xor", "Symbol", "Variable", "Equatable", "Quantifiable", "Spatial", - "Bool", + "Boolean", "Box", "Integer", "Float", diff --git a/client/valor/schemas/evaluation.py b/client/valor/schemas/evaluation.py index 6391d3e47..e1613a7a1 100644 --- a/client/valor/schemas/evaluation.py +++ b/client/valor/schemas/evaluation.py @@ -1,4 +1,4 @@ -from dataclasses import dataclass, field +from dataclasses import asdict, dataclass, field from typing import List, Optional, Union from valor.enums import AnnotationType, MetricType, TaskType @@ -57,7 +57,7 @@ class EvaluationRequest: The list of datasets we want to evaluate by name. model_names : List[str] The list of models we want to evaluate by name. - filters : schemas.Filter + filters : dict The filter object used to define what the model(s) is evaluating against. parameters : EvaluationParameters Any parameters that are used to modify an evaluation method. @@ -66,10 +66,24 @@ class EvaluationRequest: dataset_names: Union[str, List[str]] model_names: Union[str, List[str]] parameters: EvaluationParameters - filters: Optional[Filter] = field(default=None) + filters: Filter = field(default_factory=Filter) def __post_init__(self): if isinstance(self.filters, dict): self.filters = Filter(**self.filters) + elif self.filters is None: + self.filters = Filter() + if isinstance(self.parameters, dict): self.parameters = EvaluationParameters(**self.parameters) + + def to_dict(self) -> dict: + """ + Converts the request into a JSON-compatible dictionary. + """ + return { + "dataset_names": self.dataset_names, + "model_names": self.model_names, + "parameters": asdict(self.parameters), + "filters": self.filters.to_dict(), + } diff --git a/client/valor/schemas/filters.py b/client/valor/schemas/filters.py index 1e3c1b7f4..a7b970ecd 100644 --- a/client/valor/schemas/filters.py +++ b/client/valor/schemas/filters.py @@ -1,454 +1,70 @@ -from dataclasses import dataclass -from typing import Any, Dict, List, Optional, Union +from dataclasses import asdict, dataclass +from typing import Optional, Union -from valor.enums import TaskType from valor.schemas.symbolic.operators import ( And, - AppendableFunction, - Function, + Contains, + Eq, + FunctionType, + Gt, + Gte, Inside, Intersects, IsNotNull, IsNull, - Negate, - OneArgumentFunction, + Lt, + Lte, + Ne, + Not, Or, Outside, - TwoArgumentFunction, - Xor, ) -from valor.schemas.symbolic.types import ( - Date, - DateTime, - Duration, - LineString, - MultiLineString, - MultiPoint, - MultiPolygon, - Point, - Polygon, - Time, - Variable, -) - - -@dataclass -class Constraint: - """ - Represents a constraint with a value and an operator. - - Attributes: - value : Any - The value associated with the constraint. - operator : str - The operator used to define the constraint. - """ - - value: Any - operator: str - - -def _convert_symbol_to_attribute_name(symbol_name): - map_sym_to_attr = { - "dataset.name": "dataset_names", - "dataset.metadata": "dataset_metadata", - "model.name": "model_names", - "model.metadata": "model_metadata", - "datum.uid": "datum_uids", - "datum.metadata": "datum_metadata", - "annotation.task_type": "task_types", - "annotation.metadata": "annotation_metadata", - "annotation.bounding_box": "require_bounding_box", - "annotation.bounding_box.area": "bounding_box_area", - "annotation.polygon": "require_polygon", - "annotation.polygon.area": "polygon_area", - "annotation.raster": "require_raster", - "annotation.raster.area": "raster_area", - "annotation.labels": "labels", - "label.id": "label_ids", - "label.key": "label_keys", - "label.score": "label_scores", - } - return map_sym_to_attr[symbol_name] - - -def _convert_expression_to_constraint(expr: Function): - # extract value - if isinstance(expr, TwoArgumentFunction): - variable = expr.rhs - if isinstance( - variable, - ( - Point, - MultiPoint, - LineString, - MultiLineString, - Polygon, - MultiPolygon, - ), - ): - value = { - "type": type(variable).__name__, - "coordinates": variable.get_value(), - } - elif isinstance(variable, (DateTime, Date, Time, Duration)): - value = {type(variable).__name__.lower(): variable.encode_value()} - else: - value = variable.encode_value() - else: - value = None - - # extract operator - if hasattr(expr, "_operator") and expr._operator is not None: - op = expr._operator - elif isinstance(expr, Inside): - op = "inside" - elif isinstance(expr, Intersects): - op = "intersect" - elif isinstance(expr, Outside): - op = "outside" - elif isinstance(expr, IsNotNull): - op = "exists" - elif isinstance(expr, IsNull): - op = "is_none" - else: - raise NotImplementedError( - f"Function '{type(expr)}' has not been implemented by the API." - ) - - return Constraint(value=value, operator=op) - - -def _scan_one_arg_function(fn: OneArgumentFunction): - if not fn.arg.is_symbolic: - raise ValueError( - "Single argument functions should take a symbol as input." - ) - - -def _scan_two_arg_function(fn: TwoArgumentFunction): - if not isinstance(fn.lhs, Variable) or not isinstance(fn.rhs, Variable): - raise ValueError("Nested arguments are currently unsupported.") - elif not fn.lhs.is_symbolic: - raise ValueError( - f"Values on the lhs of an operator are currently unsupported. {fn.lhs}" - ) - elif not fn.rhs.is_value: - raise ValueError( - f"Symbols on the rhs of an operator are currently unsupported. {fn.rhs}" - ) - -def _scan_appendable_function(fn: AppendableFunction): - if not isinstance(fn, (And, Or)): - raise ValueError( - f"Operation '{type(fn)}' is currently unsupported by the API." - ) - - symbols = set() - for arg in fn._args: - if not isinstance(fn, Function): - raise ValueError( - f"Expected a function but received value with type '{type(fn)}'" - ) - - # scan for nested logic - if isinstance(arg, (Or, And, Xor, Negate)): - raise NotImplementedError - - # scan for symbol/value positioning - if isinstance(arg, OneArgumentFunction): - _scan_one_arg_function(arg) - elif isinstance(arg, TwoArgumentFunction): - _scan_two_arg_function(arg) - - symbols.add(arg._args[0].get_symbol()) - - # check that only one symbol was defined per statement - if len(symbols) > 1: - raise ValueError( - f"Defining more than one variable per statement is currently unsupported. {symbols}" - ) - symbol = list(symbols)[0] - - # check that symbol is compatible with the logical operation - if isinstance(fn, And) and not ( - symbol._name in Filter._supports_and() - or symbol._attribute in Filter._supports_and() - ): - raise ValueError( - f"Symbol '{str(symbol)}' currently does not support the 'AND' operation." - ) - elif isinstance(fn, Or) and not ( - symbol._name in Filter._supports_or() - or symbol._attribute in Filter._supports_or() - ): - raise ValueError( - f"Symbol '{str(symbol)}' currently does not support the 'AND' operation." - ) - - -def _parse_listed_expressions(flist): - expressions = {} - for row in flist: - if not isinstance(row, Function): - raise ValueError( - f"Expected a function but received value with type '{type(row)}'" - ) - elif isinstance(row, AppendableFunction): - _scan_appendable_function(row) - symbol = row._args[0]._args[0].get_symbol() - constraints = [ - _convert_expression_to_constraint(arg) for arg in row._args - ] - elif isinstance(row, TwoArgumentFunction): - _scan_two_arg_function(row) - symbol = row.lhs.get_symbol() - constraints = [_convert_expression_to_constraint(row)] - elif isinstance(row, OneArgumentFunction): - _scan_one_arg_function(row) - symbol = row.arg.get_symbol() - constraints = [_convert_expression_to_constraint(row)] - else: - raise NotImplementedError - - symbol_name = f"{symbol._owner}.{symbol._name}" - if symbol._attribute: - symbol_name += f".{symbol._attribute}" - attribute_name = _convert_symbol_to_attribute_name(symbol_name) - if symbol._key: - if attribute_name not in expressions: - expressions[attribute_name] = dict() - if symbol._key not in expressions[attribute_name]: - expressions[attribute_name][symbol._key] = list() - expressions[attribute_name][symbol._key] += constraints - else: - if attribute_name not in expressions: - expressions[attribute_name] = list() - expressions[attribute_name] += constraints - - return expressions +FunctionTypeTuple = ( + And, + Or, + Not, + IsNull, + IsNotNull, + Eq, + Ne, + Gt, + Gte, + Lt, + Lte, + Intersects, + Inside, + Outside, + Contains, +) @dataclass class Filter: - """ - Used to filter Evaluations according to specific, user-defined criteria. - - Attributes - ---------- - dataset_names : List[str], optional - A list of `Dataset` names to filter on. - dataset_metadata : Dict[str, List[Constraint]], optional - A dictionary of `Dataset` metadata to filter on. - model_names : List[str], optional - A list of `Model` names to filter on. - model_metadata : Dict[str, List[Constraint]], optional - A dictionary of `Model` metadata to filter on. - datum_uids : List[str], optional - A list of `Datum` UIDs to filter on. - datum_metadata : Dict[str, List[Constraint]], optional - A dictionary of `Datum` metadata to filter on. - task_types : List[TaskType], optional - A list of task types to filter on. - annotation_metadata : Dict[str, List[Constraint]], optional - A dictionary of `Annotation` metadata to filter on. - require_bounding_box : bool, optional - A toggle for filtering by bounding boxes. - bounding_box_area : bool, optional - An optional constraint to filter by bounding box area. - require_polygon : bool, optional - A toggle for filtering by polygons. - polygon_area : bool, optional - An optional constraint to filter by polygon area. - require_raster : bool, optional - A toggle for filtering by rasters. - raster_area : bool, optional - An optional constraint to filter by raster area. - labels : List[Label], optional - A list of `Labels' to filter on. - label_ids : List[int], optional - A list of label row id's. - label_keys : List[str], optional - A list of `Label` keys to filter on. - label_scores : List[Constraint], optional - A list of `Constraints` which are used to filter `Evaluations` according to the `Model`'s prediction scores. - - Raises - ------ - TypeError - If `value` isn't of the correct type. - ValueError - If the `operator` doesn't match one of the allowed patterns. - """ - - # datasets - dataset_names: Optional[List[str]] = None - dataset_metadata: Optional[Dict[str, List[Constraint]]] = None - - # models - model_names: Optional[List[str]] = None - model_metadata: Optional[Dict[str, List[Constraint]]] = None - - # datums - datum_uids: Optional[List[str]] = None - datum_metadata: Optional[Dict[str, List[Constraint]]] = None - - # annotations - task_types: Optional[List[TaskType]] = None - annotation_metadata: Optional[Dict[str, List[Constraint]]] = None - - # geometries - require_bounding_box: Optional[bool] = None - bounding_box_area: Optional[List[Constraint]] = None - require_polygon: Optional[bool] = None - polygon_area: Optional[List[Constraint]] = None - require_raster: Optional[bool] = None - raster_area: Optional[List[Constraint]] = None - - # labels - labels: Optional[List[Dict[str, str]]] = None - label_ids: Optional[List[int]] = None - label_keys: Optional[List[str]] = None - label_scores: Optional[List[Constraint]] = None - - @staticmethod - def _supports_and(): - return { - "area", - "score", - "metadata", - } - - @staticmethod - def _supports_or(): - return { - "name", - "uid", - "task_type", - "labels", - "keys", - } - - def __post_init__(self): - def _unpack_metadata(metadata: Optional[dict]) -> Union[dict, None]: - if metadata is None: - return None - for k, vlist in metadata.items(): - metadata[k] = [ - v if isinstance(v, Constraint) else Constraint(**v) - for v in vlist - ] - return metadata - - # unpack metadata - self.dataset_metadata = _unpack_metadata(self.dataset_metadata) - self.model_metadata = _unpack_metadata(self.model_metadata) - self.datum_metadata = _unpack_metadata(self.datum_metadata) - self.annotation_metadata = _unpack_metadata(self.annotation_metadata) - - def _unpack_list( - vlist: Optional[list], object_type: type - ) -> Optional[list]: - def _handle_conversion(v, object_type): - if object_type is Constraint: - return object_type(**v) - else: - return object_type(v) - - if vlist is None: - return None - - return [ - ( - v - if isinstance(v, object_type) - else _handle_conversion(v=v, object_type=object_type) - ) - for v in vlist - ] - - # unpack tasktypes - self.task_types = _unpack_list(self.task_types, TaskType) - - # unpack area - self.bounding_box_area = _unpack_list( - self.bounding_box_area, Constraint - ) - self.polygon_area = _unpack_list(self.polygon_area, Constraint) - self.raster_area = _unpack_list(self.raster_area, Constraint) - - # scores - self.label_scores = _unpack_list(self.label_scores, Constraint) - - @classmethod - def create(cls, expressions: List[Any]): - """ - Parses a list of `BinaryExpression` to create a `schemas.Filter` object. - - Parameters - ---------- - expressions: Sequence[Union[BinaryExpression, Sequence[BinaryExpression]]] - A list of (lists of) `BinaryExpressions' to parse into a `Filter` object. - """ - - constraints = _parse_listed_expressions(expressions) - - # create filter - filter_request = cls() - - # metadata constraints - for attr in [ - "dataset_metadata", - "model_metadata", - "datum_metadata", - "annotation_metadata", - "bounding_box_area", - "polygon_area", - "raster_area", - "label_scores", - ]: - if attr in constraints: - setattr(filter_request, attr, constraints[attr]) - - # boolean constraints - for attr in [ - "require_bounding_box", - "require_polygon", - "require_raster", - ]: - if attr in constraints: - for constraint in constraints[attr]: - if constraint.operator == "exists": - setattr(filter_request, attr, True) - elif constraint.operator == "is_none": - setattr(filter_request, attr, False) - - # equality constraints - for attr in [ - "dataset_names", - "model_names", - "datum_uids", - "task_types", - "label_keys", - ]: - if attr in constraints: - setattr( - filter_request, - attr, - [expr.value for expr in constraints[attr]], - ) - - # edge case - label list - if "labels" in constraints: - setattr( - filter_request, - "labels", - [ - {label["key"]: label["value"]} - for labels in constraints["labels"] - for label in labels.value - ], - ) - - return filter_request + datasets: Optional[Union[dict, FunctionType]] = None + models: Optional[Union[dict, FunctionType]] = None + datums: Optional[Union[dict, FunctionType]] = None + annotations: Optional[Union[dict, FunctionType]] = None + groundtruths: Optional[Union[dict, FunctionType]] = None + predictions: Optional[Union[dict, FunctionType]] = None + labels: Optional[Union[dict, FunctionType]] = None + embeddings: Optional[Union[dict, FunctionType]] = None + + def to_dict(self) -> dict: + if isinstance(self.datasets, FunctionTypeTuple): + self.datasets = self.datasets.to_dict() + if isinstance(self.models, FunctionTypeTuple): + self.models = self.models.to_dict() + if isinstance(self.datums, FunctionTypeTuple): + self.datums = self.datums.to_dict() + if isinstance(self.annotations, FunctionTypeTuple): + self.annotations = self.annotations.to_dict() + if isinstance(self.groundtruths, FunctionTypeTuple): + self.groundtruths = self.groundtruths.to_dict() + if isinstance(self.predictions, FunctionTypeTuple): + self.predictions = self.predictions.to_dict() + if isinstance(self.labels, FunctionTypeTuple): + self.labels = self.labels.to_dict() + if isinstance(self.embeddings, FunctionTypeTuple): + self.embeddings = self.embeddings.to_dict() + return asdict(self) diff --git a/client/valor/schemas/symbolic/collections.py b/client/valor/schemas/symbolic/collections.py index ca8dc74e8..05a6955ad 100644 --- a/client/valor/schemas/symbolic/collections.py +++ b/client/valor/schemas/symbolic/collections.py @@ -3,7 +3,7 @@ import numpy as np from valor.schemas.symbolic.types import ( - Bool, + Boolean, Box, Dictionary, Embedding, @@ -326,7 +326,9 @@ class Annotation(StaticCollection): embedding: Embedding = Embedding.symbolic( owner="annotation", name="embedding" ) - is_instance: Bool = Bool.symbolic(owner="annotation", name="is_instance") + is_instance: Boolean = Boolean.symbolic( + owner="annotation", name="is_instance" + ) implied_task_types: SymbolicList[String] = SymbolicList[String].symbolic( owner="annotation", name="implied_task_types" ) @@ -385,7 +387,7 @@ def formatting() -> Dict[str, Any]: "polygon": Polygon.nullable, "raster": Raster.nullable, "embedding": Embedding.nullable, - "is_instance": Bool.nullable, + "is_instance": Boolean.nullable, "implied_task_types": SymbolicList, } diff --git a/client/valor/schemas/symbolic/operators.py b/client/valor/schemas/symbolic/operators.py index 734d0a855..fb39bce1d 100644 --- a/client/valor/schemas/symbolic/operators.py +++ b/client/valor/schemas/symbolic/operators.py @@ -1,31 +1,39 @@ -from typing import Any +import warnings +from typing import Any, Optional, Union -class Function: - """Base class for defining a function.""" +class Condition: + """Base class for defining a conditional operation.""" - _operator = None + def __init__(self, lhs: Any, rhs: Optional[Any] = None) -> None: + """ + Create a condition. + + Parameters + ---------- + lhs : Variable + A variable. + rhs : Variable, optional + An optional rhs variable. + """ + # validate lhs + if not lhs.is_symbolic: + warnings.warn( + "Values are currently not supported as the lhs operand in the API.", + RuntimeWarning, + ) - def __init__(self, *args) -> None: - for arg in args: - if not hasattr(arg, "to_dict"): - raise ValueError( - "Functions can only take arguments that have a 'to_dict' method defined." + # validate rhs - symbols are not current supported + if rhs is not None: + if rhs.is_symbolic: + warnings.warn( + "Symbols are currently not supported as the rhs operand in the API.", + RuntimeWarning, ) - self._args = list(args) - - def __repr__(self): - args = ", ".join([arg.__repr__() for arg in self._args]) - return f"{type(self).__name__}({args})" - def __str__(self): - values = [arg.__repr__() for arg in self._args] - if self._operator is None: - args = ", ".join(values) - return f"{type(self).__name__}({args})" - else: - args = f" {self._operator} ".join(values) - return f"({args})" + self.lhs = lhs + self.rhs = rhs + self.op = type(self).__name__.lower() def __and__(self, other: Any): return And(self, other) @@ -33,191 +41,200 @@ def __and__(self, other: Any): def __or__(self, other: Any): return Or(self, other) - def __xor__(self, other: Any): - return Xor(self, other) - def __invert__(self): - return Negate(self) + return Not(self) def to_dict(self): - """Encode to a JSON-compatible dictionary.""" return { - "op": type(self).__name__.lower(), - "args": [arg.to_dict() for arg in self._args], + "lhs": self.lhs.to_dict(), + "rhs": self.rhs.to_dict() if self.rhs is not None else None, + "op": self.op, } -class OneArgumentFunction(Function): - """Base class for defining single argument functions.""" - - def __init__(self, arg) -> None: - super().__init__(arg) +class Function: + """Base class for defining a logical function.""" - @property - def arg(self): - """Returns the argument.""" - return self._args[0] + def __init__(self, *args) -> None: + if len(args) == 0: + raise ValueError("Expected at least one argument.") - def to_dict(self): - """Encode to a JSON-compatible dictionary.""" - return {"op": type(self).__name__.lower(), "arg": self.arg.to_dict()} + self._args = [] + for arg in args: + if not hasattr(arg, "to_dict"): + raise ValueError( + f"Arguments should be symbolic or functional. Received '{arg}'." + ) + if isinstance(arg, type(self)): + self._args.extend(arg._args) + else: + self._args.append(arg) + self._args = self._args if len(self._args) > 1 else self._args[0] + def __repr__(self): + args = ", ".join([arg.__repr__() for arg in self._args]) + return f"{type(self).__name__}({args})" -class TwoArgumentFunction(Function): - """Base class for defining two argument functions.""" + def __str__(self): + values = [arg.__repr__() for arg in self._args] + args = ", ".join(values) + return f"{type(self).__name__}({args})" - def __init__(self, lhs: Any, rhs: Any) -> None: - self._lhs = lhs - self._rhs = rhs - super().__init__(lhs, rhs) + def __and__(self, other: Any): + return And(self, other) - @property - def lhs(self): - """Returns the lhs operand.""" - return self._lhs + def __or__(self, other: Any): + return Or(self, other) - @property - def rhs(self): - """Returns the rhs operand.""" - return self._rhs + def __invert__(self): + return Not(self) def to_dict(self): """Encode to a JSON-compatible dictionary.""" - return { - "op": type(self).__name__.lower(), - "lhs": self.lhs.to_dict(), - "rhs": self.rhs.to_dict(), - } - - -class AppendableFunction(Function): - """Base class for defining functions with an unlimited number of arguments.""" - - _function = None - - def __init__(self, *args) -> None: - """ - Appendable function. - """ - if len(args) < 2: - raise TypeError( - f"missing {2 - len(args)} required positional argument" - ) - flat_args = [] - for arg in args: - if isinstance(arg, type(self)): - flat_args += arg._args - else: - flat_args.append(arg) - super().__init__(*flat_args) - - def append(self, value: Any): - """Appends an argument to the function.""" - self._args.append(value) - return self + args = ( + [arg.to_dict() for arg in self._args] + if isinstance(self._args, list) + else self._args.to_dict() + ) + return {"op": type(self).__name__.lower(), "args": args} -class And(AppendableFunction): +class And(Function): """Implementation of logical AND (&).""" - _operator = "&" + def __init__(self, *args): + if len(args) < 2: + raise ValueError("Expected at least two arguments.") + super().__init__(*args) def __and__(self, other: Any): - self.append(other) + if isinstance(other, And): + self._args.extend(other._args) + else: + self._args.append(other) return self -class Or(AppendableFunction): +class Or(Function): """Implementation of logical OR (|).""" - _operator = "|" + def __init__(self, *args): + if len(args) < 2: + raise ValueError("Expected at least two arguments.") + super().__init__(*args) def __or__(self, other: Any): - self.append(other) - return self - - -class Xor(AppendableFunction): - """Implementation of logical XOR (^).""" - - _operator = "^" - - def __xor__(self, other: Any): - self.append(other) + if isinstance(other, Or): + self._args.extend(other._args) + else: + self._args.append(other) return self -class Negate(OneArgumentFunction): +class Not(Function): """Implementation of logical negation (~).""" - _operator = "~" + def __init__(self, *args): + if len(args) != 1: + raise ValueError("Negation only takes one argument.") + elif isinstance(args[0], Not): + return args[0]._args + super().__init__(*args) def __invert__(self): """Inverts negation so return contents.""" - return self.arg + if isinstance(self._args, list): + raise ValueError("Negation only takes one argument.") + return self._args -class IsNull(OneArgumentFunction): +class IsNull(Condition): """Implementation of is null value check.""" pass -class IsNotNull(OneArgumentFunction): +class IsNotNull(Condition): """Implementation of is not null value check.""" pass -class Eq(TwoArgumentFunction): +class Eq(Condition): """Implementation of the equality operator '=='.""" - _operator = "==" + pass -class Ne(TwoArgumentFunction): +class Ne(Condition): """Implementation of the inequality operator '!='.""" - _operator = "!=" + def to_dict(self): + return Not(Eq(lhs=self.lhs, rhs=self.rhs)).to_dict() -class Gt(TwoArgumentFunction): +class Gt(Condition): """Implementation of the greater-than operator '>'.""" - _operator = ">" + pass -class Ge(TwoArgumentFunction): +class Gte(Condition): """Implementation of the greater-than or equal operator '>='.""" - _operator = ">=" + pass -class Lt(TwoArgumentFunction): +class Lt(Condition): """Implementation of the less-than operator '<'.""" - _operator = "<" + pass -class Le(TwoArgumentFunction): +class Lte(Condition): """Implementation of the less-than or equal operator '<='.""" - _operator = "<=" + pass -class Intersects(TwoArgumentFunction): +class Intersects(Condition): """Implementation of the spatial 'intersects' operator.""" pass -class Inside(TwoArgumentFunction): +class Inside(Condition): """Implementation of the spatial 'inside' operator.""" pass -class Outside(TwoArgumentFunction): +class Outside(Condition): """Implementation of the spatial 'outside' operator.""" pass + + +class Contains(Condition): + """Implementation of the list 'contains' operator.""" + + pass + + +FunctionType = Union[ + And, + Or, + Not, + IsNull, + IsNotNull, + Eq, + Ne, + Gt, + Gte, + Lt, + Lte, + Intersects, + Inside, + Outside, + Contains, +] diff --git a/client/valor/schemas/symbolic/types.py b/client/valor/schemas/symbolic/types.py index cf50e1de4..da3d4f69e 100644 --- a/client/valor/schemas/symbolic/types.py +++ b/client/valor/schemas/symbolic/types.py @@ -5,7 +5,7 @@ import warnings from base64 import b64decode, b64encode from collections.abc import MutableMapping -from typing import Iterator +from typing import Iterator, Optional import numpy as np import PIL.Image @@ -14,19 +14,18 @@ from valor.schemas.symbolic.operators import ( And, Eq, - Ge, Gt, + Gte, Inside, Intersects, IsNotNull, IsNull, - Le, Lt, + Lte, Ne, - Negate, + Not, Or, Outside, - Xor, ) @@ -58,46 +57,29 @@ def __init__( self, name: str, key: typing.Optional[str] = None, - attribute: typing.Optional[str] = None, - owner: typing.Optional[str] = None, ): - self._owner = owner.lower() if owner else None self._name = name.lower() self._key = key.lower() if key else None - self._attribute = attribute.lower() if attribute else None def __repr__(self): ret = f"{type(self).__name__}(" - if self._owner: - ret += f"owner='{self._owner}', " ret += f"name='{self._name}'" if self._key: ret += f", key='{self._key}'" - if self._attribute: - ret += f", attribute='{self._attribute}'" ret += ")" return ret def __str__(self): ret = "" - if self._owner: - ret += f"{self._owner}." ret += self._name if self._key is not None: ret += f"['{self._key}']" - if self._attribute: - ret += f".{self._attribute}" return ret def __eq__(self, other): if not isinstance(other, Symbol): return False - return ( - self._owner == other._owner - and self._name == other._name - and self._key == other._key - and self._attribute == other._attribute - ) + return self._name == other._name and self._key == other._key def __ne__(self, other): return not (self == other) @@ -105,15 +87,18 @@ def __ne__(self, other): def __hash__(self) -> int: return hash(self.__repr__()) - def to_dict(self): + @property + def name(self) -> str: + return self._name + + @property + def key(self) -> Optional[str]: + return self._key + + def to_dict(self) -> dict: return { - "type": "symbol", - "value": { - "owner": self._owner, - "name": self._name, - "key": self._key, - "attribute": self._attribute, - }, + "name": self._name, + "key": self._key, } @@ -191,11 +176,12 @@ def symbolic( An optional name describing the class that owns this symbol. """ name = cls.__name__.lower() if not name else name + symbol_name = ".".join( + [value for value in [owner, name, attribute] if value is not None] + ) symbol = Symbol( - name=name, + name=symbol_name, key=key, - attribute=attribute, - owner=owner, ) obj = cls.__new__(cls) obj._value = symbol @@ -224,8 +210,6 @@ def preprocess(cls, value: typing.Any): return cls.symbolic( name=value._name, key=value._key, - attribute=value._attribute, - owner=value._owner, ) elif cls.supports(value): # @@ -327,50 +311,50 @@ def get_symbol(self) -> Symbol: raise TypeError(f"{type(self).__name__} is a valued object.") return self._value - def is_none(self) -> typing.Union["Bool", IsNull]: + def is_none(self) -> typing.Union["Boolean", IsNull]: """Conditional whether variable is 'None'""" if self.is_value: - return Bool(self.get_value() is None) + return Boolean(self.get_value() is None) return IsNull(self) - def is_not_none(self) -> typing.Union["Bool", IsNotNull]: + def is_not_none(self) -> typing.Union["Boolean", IsNotNull]: """Conditional whether variable is not 'None'""" if self.is_value: - return Bool(self.get_value() is not None) + return Boolean(self.get_value() is not None) return IsNotNull(self) - def __eq__(self, value: typing.Any) -> typing.Union["Bool", Eq]: # type: ignore - overriding __eq__ + def __eq__(self, value: typing.Any) -> typing.Union["Boolean", Eq]: # type: ignore - overriding __eq__ raise AttributeError( f"'{type(self).__name__}' object has no attribute '__eq__'" ) - def __ne__(self, value: typing.Any) -> typing.Union["Bool", Ne]: # type: ignore - overriding __ne__ + def __ne__(self, value: typing.Any) -> typing.Union["Boolean", Ne]: # type: ignore - overriding __ne__ raise AttributeError( f"'{type(self).__name__}' object has no attribute '__ne__'" ) - def __gt__(self, value: typing.Any) -> typing.Union["Bool", Gt]: + def __gt__(self, value: typing.Any) -> typing.Union["Boolean", Gt]: raise AttributeError( f"'{type(self).__name__}' object has no attribute '__gt__'" ) - def __ge__(self, value: typing.Any) -> typing.Union["Bool", Ge]: + def __ge__(self, value: typing.Any) -> typing.Union["Boolean", Gte]: raise AttributeError( f"'{type(self).__name__}' object has no attribute '__ge__'" ) - def __lt__(self, value: typing.Any) -> typing.Union["Bool", Lt]: + def __lt__(self, value: typing.Any) -> typing.Union["Boolean", Lt]: raise AttributeError( f"'{type(self).__name__}' object has no attribute '__lt__'" ) - def __le__(self, value: typing.Any) -> typing.Union["Bool", Le]: + def __le__(self, value: typing.Any) -> typing.Union["Boolean", Lte]: raise AttributeError( f"'{type(self).__name__}' object has no attribute '__le__'" ) -class Bool(Variable): +class Boolean(Variable): """ Implementation of the built-in type 'bool' as a Variable. @@ -381,7 +365,7 @@ class Bool(Variable): Examples -------- - >>> Bool(True) + >>> Boolean(True) """ def __init__(self, value: bool): @@ -407,40 +391,34 @@ def __validate__(cls, value: typing.Any): f"Expected type '{bool}' received type '{type(value)}'" ) - def __eq__(self, value: typing.Any) -> typing.Union["Bool", Eq]: + def __eq__(self, value: typing.Any) -> typing.Union["Boolean", Eq]: other = self.preprocess(value) if self.is_value and other.is_value: return type(self)(self.get_value() is other.get_value()) return Eq(self, other) - def __ne__(self, value: typing.Any) -> typing.Union["Bool", Ne]: + def __ne__(self, value: typing.Any) -> typing.Union["Boolean", Ne]: other = self.preprocess(value) if self.is_value and other.is_value: return type(self)(self.get_value() is not other.get_value()) return Ne(self, other) - def __and__(self, value: typing.Any) -> typing.Union["Bool", And]: + def __and__(self, value: typing.Any) -> typing.Union["Boolean", And]: other = self.preprocess(value) if self.is_value and other.is_value: return type(self)(self.get_value() and other.get_value()) return And(self, other) - def __or__(self, value: typing.Any) -> typing.Union["Bool", Or]: + def __or__(self, value: typing.Any) -> typing.Union["Boolean", Or]: other = self.preprocess(value) if self.is_value and other.is_value: return type(self)(self.get_value() or other.get_value()) return Or(self, other) - def __xor__(self, value: typing.Any) -> typing.Union["Bool", Xor]: - other = self.preprocess(value) - if self.is_value and other.is_value: - return self != value - return Xor(self, other) - - def __invert__(self) -> typing.Union["Bool", Negate]: + def __invert__(self) -> typing.Union["Boolean", Not]: if self.is_value: return type(self)(not self.get_value()) - return Negate(self) + return Not(self) class Equatable(Variable): @@ -448,30 +426,30 @@ class Equatable(Variable): Variable modifier to handle equatable values. """ - def __eq__(self, value: typing.Any) -> typing.Union["Bool", Eq]: + def __eq__(self, value: typing.Any) -> typing.Union["Boolean", Eq]: other = self.preprocess(value) if self.is_value and other.is_value: lhs = self.encode_value() rhs = other.encode_value() if lhs is None: - return Bool(rhs is None) + return Boolean(rhs is None) elif rhs is None: - return Bool(lhs is None) + return Boolean(lhs is None) else: - return Bool(bool(lhs == rhs)) + return Boolean(bool(lhs == rhs)) return Eq(self, other) - def __ne__(self, value: typing.Any) -> typing.Union["Bool", Ne]: + def __ne__(self, value: typing.Any) -> typing.Union["Boolean", Ne]: other = self.preprocess(value) if self.is_value and other.is_value: lhs = self.encode_value() rhs = other.encode_value() if lhs is None: - return Bool(rhs is not None) + return Boolean(rhs is not None) elif rhs is None: - return Bool(lhs is not None) + return Boolean(lhs is not None) else: - return Bool(lhs != rhs) + return Boolean(lhs != rhs) return Ne(self, other) def in_(self, vlist: typing.List[typing.Any]) -> Or: @@ -489,29 +467,29 @@ class Quantifiable(Equatable): Variable modifier to handle quantifiable values. """ - def __gt__(self, value: typing.Any) -> typing.Union["Bool", Gt]: + def __gt__(self, value: typing.Any) -> typing.Union["Boolean", Gt]: other = self.preprocess(value) if self.is_value and other.is_value: - return Bool(self.get_value() > other.get_value()) + return Boolean(self.get_value() > other.get_value()) return Gt(self, other) - def __ge__(self, value: typing.Any) -> typing.Union["Bool", Ge]: + def __ge__(self, value: typing.Any) -> typing.Union["Boolean", Gte]: other = self.preprocess(value) if self.is_value and other.is_value: - return Bool(self.get_value() >= other.get_value()) - return Ge(self, other) + return Boolean(self.get_value() >= other.get_value()) + return Gte(self, other) - def __lt__(self, value: typing.Any) -> typing.Union["Bool", Lt]: + def __lt__(self, value: typing.Any) -> typing.Union["Boolean", Lt]: other = self.preprocess(value) if self.is_value and other.is_value: - return Bool(self.get_value() < other.get_value()) + return Boolean(self.get_value() < other.get_value()) return Lt(self, other) - def __le__(self, value: typing.Any) -> typing.Union["Bool", Le]: + def __le__(self, value: typing.Any) -> typing.Union["Boolean", Lte]: other = self.preprocess(value) if self.is_value and other.is_value: - return Bool(self.get_value() <= other.get_value()) - return Le(self, other) + return Boolean(self.get_value() <= other.get_value()) + return Lte(self, other) class Spatial(Variable): @@ -1054,7 +1032,6 @@ def area(self) -> Float: if not isinstance(self._value, Symbol): raise ValueError return Float.symbolic( - owner=self._value._owner, name=self._value._name, key=self._value._key, attribute="area", @@ -1333,7 +1310,6 @@ def area(self): "attribute 'area' is reserved for symbolic variables." ) return Float.symbolic( - owner=self._value._owner, name=self._value._name, key=self._value._key, attribute="area", @@ -1502,13 +1478,9 @@ def __init__(self, symbol: Symbol): raise ValueError( "DictionaryValue should only be initialized as a symbol." ) - if symbol._attribute: - raise ValueError( - "DictionaryValue symbol should not contain attribute." - ) if not symbol._key: raise ValueError("DictionaryValue symbol should contain key.") - super().symbolic(name=symbol._name, owner=symbol._owner) + super().symbolic(name=symbol._name) @classmethod def nullable(cls, value: typing.Any): @@ -1552,7 +1524,6 @@ def area(self): """Returns area attribute.""" symbol = self.get_symbol() return Float.symbolic( - owner=symbol._owner, name=symbol._name, key=symbol._key, attribute="area", @@ -1566,9 +1537,7 @@ def _generate(self, other: typing.Any, fn: str): obj = _get_type_by_value(other) symbol = self.get_symbol() sym = obj.symbolic( - owner=symbol._owner, name=symbol._name, - attribute=symbol._attribute, key=symbol._key, ) return sym.__getattribute__(fn)(other) @@ -1678,7 +1647,7 @@ def encode_value(self) -> dict: elif isinstance( v, ( - Bool, + Boolean, Integer, Float, String, @@ -1695,9 +1664,7 @@ def __getitem__(self, __key: str): if self.is_symbolic: symbol = self.get_symbol() return DictionaryValue.symbolic( - owner=symbol._owner, name=symbol._name, - attribute=None, key=__key, ) else: @@ -1968,7 +1935,6 @@ def area(self) -> Float: if not isinstance(self._value, Symbol): raise ValueError return Float.symbolic( - owner=self._value._owner, name=self._value._name, key=self._value._key, attribute="area", @@ -2076,8 +2042,8 @@ def _get_type_by_value(other: typing.Any): Order of checking is very important as certain types are subsets of others. """ - if Bool.supports(other): - return Bool + if Boolean.supports(other): + return Boolean elif String.supports(other): return String elif Integer.supports(other): @@ -2121,7 +2087,7 @@ def get_type_by_name( ): """Retrieves variable type by name.""" types_ = { - "bool": Bool, + "boolean": Boolean, "string": String, "integer": Integer, "float": Float, diff --git a/examples/filtering.ipynb b/examples/filtering.ipynb new file mode 100644 index 000000000..2d65fa516 --- /dev/null +++ b/examples/filtering.ipynb @@ -0,0 +1,665 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Successfully connected to host at http://localhost:8000/\n" + ] + } + ], + "source": [ + "from valor import connect\n", + "connect(\"http://localhost:8000\")" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import datetime\n", + "\n", + "from valor import (\n", + " Annotation,\n", + " Client,\n", + " Dataset,\n", + " Datum,\n", + " Filter,\n", + " GroundTruth,\n", + " Label,\n", + ")\n", + "from valor.schemas import And, Or, Box, Polygon, Point\n", + "\n", + "client = Client()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Example - Swimmers and boats.\n", + "\n", + "This example demonstrates how to create and query a dataset containing images annotated with boats, swimmers, and fish." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "# Define the data for the example\n", + "contains_boat_swimmer = [\n", + " (\"uid1\", False, False),\n", + " (\"uid2\", True, False),\n", + " (\"uid3\", False, True),\n", + " (\"uid4\", True, True),\n", + "]\n", + "\n", + "# Create a bounding box for annotations\n", + "box = Box.from_extrema(0, 10, 0, 10)\n", + "\n", + "# Define labels for annotations\n", + "swimmer_label = Label(key=\"class\", value=\"swimmer\")\n", + "boat_label = Label(key=\"class\", value=\"boat\")\n", + "fish_label = Label(key=\"class\", value=\"fish\")\n", + "\n", + "# Create a dataset for the images\n", + "dataset1 = Dataset.create(\"dataset1\")\n", + "\n", + "# Add annotations to the dataset\n", + "for uid, is_boat, is_swimmer in contains_boat_swimmer:\n", + " annotations = [\n", + " Annotation(\n", + " labels=[boat_label if is_boat else fish_label],\n", + " bounding_box=box,\n", + " is_instance=True,\n", + " ),\n", + " Annotation(\n", + " labels=[swimmer_label if is_swimmer else fish_label],\n", + " bounding_box=box,\n", + " is_instance=True,\n", + " ),\n", + " ]\n", + " dataset1.add_groundtruth(GroundTruth(datum=Datum(uid=uid), annotations=annotations))" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Show all datums in the dataset." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[Datum({'uid': 'uid4', 'metadata': {}}),\n", + " Datum({'uid': 'uid3', 'metadata': {}}),\n", + " Datum({'uid': 'uid2', 'metadata': {}}),\n", + " Datum({'uid': 'uid1', 'metadata': {}})]" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "dataset1.get_datums()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Query the dataset for images containing just fish" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "just_fish = dataset1.get_datums(\n", + " Filter(\n", + " datums=And(\n", + " Label.key == \"class\",\n", + " Label.value != \"boat\",\n", + " Label.value != \"swimmer\",\n", + " )\n", + " )\n", + ")\n", + "assert len(just_fish) == 1\n", + "assert just_fish[0].uid == \"uid1\"" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Query the dataset for images containing no swimmers (only boats)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "no_swimmers = dataset1.get_datums(\n", + " Filter(\n", + " datums=And(\n", + " Label.key == \"class\",\n", + " Label.value == \"boat\",\n", + " Label.value != \"swimmer\",\n", + " )\n", + " )\n", + ")\n", + "assert len(no_swimmers) == 1\n", + "assert no_swimmers[0].uid == \"uid2\"" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Query the dataset for images containing no boats (only swimmers)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "no_boats = dataset1.get_datums(\n", + " Filter(\n", + " datums=And(\n", + " Label.key == \"class\",\n", + " Label.value != \"boat\",\n", + " Label.value == \"swimmer\",\n", + " )\n", + " )\n", + ")\n", + "assert len(no_boats) == 1\n", + "assert no_boats[0].uid == \"uid3\"" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Query the dataset for images contains either swimmers or boats but not both." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "no_boats = dataset1.get_datums(\n", + " Filter(\n", + " datums=Or(\n", + " And(\n", + " Label.key == \"class\",\n", + " Label.value != \"boat\",\n", + " Label.value == \"swimmer\",\n", + " ),\n", + " And(\n", + " Label.key == \"class\",\n", + " Label.value == \"boat\",\n", + " Label.value != \"swimmer\",\n", + " )\n", + " )\n", + " )\n", + ")\n", + "assert len(no_boats) == 2\n", + "uids = {datum.uid for datum in no_boats}\n", + "assert \"uid2\" in uids\n", + "assert \"uid3\" in uids" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Query the dataset for images containing both swimmers and boats" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "swimmers_and_boats = dataset1.get_datums(\n", + " Filter(\n", + " datums=And(\n", + " Label.key == \"class\",\n", + " Label.value == \"boat\",\n", + " Label.value == \"swimmer\",\n", + " )\n", + " )\n", + ")\n", + "assert len(swimmers_and_boats) == 1\n", + "assert swimmers_and_boats[0].uid == \"uid4\"" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Example - Swimmers w/ Boats of different sizes.\n", + "\n", + "This example demonstrates how to create and query a dataset containing images annotated with boats, swimmers, and fish.\n", + "\n", + "In this example, the boats are bounded by either a small bbox (5x5) or large bbox (10x10)." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "contains_boat_swimmer = (\n", + " (\"uid1\", False, False),\n", + " (\"uid2\", True, False),\n", + " (\"uid3\", False, True),\n", + " (\"uid4\", True, True),\n", + ")\n", + "\n", + "small_box = Box.from_extrema(0, 5, 0, 5)\n", + "large_box = Box.from_extrema(0, 10, 0, 10)\n", + "\n", + "swimmer = Label(key=\"class\", value=\"swimmer\")\n", + "boat = Label(key=\"class\", value=\"boat\")\n", + "fish = Label(key=\"class\", value=\"fish\")\n", + "\n", + "dataset2 = Dataset.create(\"dataset2\")\n", + "for uid, is_large_boat, is_swimmer in contains_boat_swimmer:\n", + " dataset2.add_groundtruth(\n", + " GroundTruth(\n", + " datum=Datum(uid=uid),\n", + " annotations=[\n", + " Annotation(\n", + " labels=[boat],\n", + " bounding_box=large_box if is_large_boat else small_box,\n", + " is_instance=True,\n", + " ),\n", + " Annotation(\n", + " labels=[swimmer if is_swimmer else fish],\n", + " bounding_box=small_box,\n", + " is_instance=True,\n", + " ),\n", + " ],\n", + " )\n", + " )" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "No swimmer, small boats" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "no_swimmer_small_boats = dataset2.get_datums(\n", + " Filter(\n", + " datums=And(\n", + " Label.key == \"class\",\n", + " Label.value != \"swimmer\",\n", + " ),\n", + " annotations=And(\n", + " Label.key == \"class\",\n", + " Label.value == \"boat\",\n", + " Annotation.bounding_box.area < 50,\n", + " ),\n", + " )\n", + ")\n", + "assert len(no_swimmer_small_boats) == 1\n", + "assert no_swimmer_small_boats[0].uid == \"uid1\"" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "No swimmer, large boats" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "no_swimmer_large_boats = dataset2.get_datums(\n", + " Filter(\n", + " datums=And(\n", + " Label.key == \"class\",\n", + " Label.value != \"swimmer\",\n", + " ),\n", + " annotations=And(\n", + " Label.key == \"class\",\n", + " Label.value == \"boat\",\n", + " Annotation.bounding_box.area > 50,\n", + " ),\n", + " )\n", + ")\n", + "assert len(no_swimmer_large_boats) == 1\n", + "assert no_swimmer_large_boats[0].uid == \"uid2\"" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Swimmer with small boats" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "swimmer_with_small_boats = dataset2.get_datums(\n", + " Filter(\n", + " datums=And(\n", + " Label.key == \"class\",\n", + " Label.value == \"swimmer\",\n", + " ),\n", + " annotations=And(\n", + " Label.key == \"class\",\n", + " Label.value == \"boat\",\n", + " Annotation.bounding_box.area < 50,\n", + " ),\n", + " )\n", + ")\n", + "assert len(swimmer_with_small_boats) == 1\n", + "assert swimmer_with_small_boats[0].uid == \"uid3\"" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Swimmer with large boat" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "swimmers_and_boats = dataset2.get_datums(\n", + " Filter(\n", + " datums=And(\n", + " Label.key == \"class\",\n", + " Label.value == \"swimmer\",\n", + " ),\n", + " annotations=And(\n", + " Label.key == \"class\",\n", + " Label.value == \"boat\",\n", + " Annotation.bounding_box.area > 50,\n", + " ),\n", + " )\n", + ")\n", + "assert len(swimmers_and_boats) == 1\n", + "assert swimmers_and_boats[0].uid == \"uid4\"" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Example - Geospatial + Time of Year\n", + "\n", + "This example demonstrates how to create and query a dataset containing images annotated with a time and region." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "# regions\n", + "geojson_alaska = Polygon(\n", + " [\n", + " [\n", + " (\n", + " -170.7603599457809,\n", + " 68.84625981507392\n", + " ),\n", + " (\n", + " -170.7603599457809,\n", + " 58.53538829807735\n", + " ),\n", + " (\n", + " -141.3435514691004,\n", + " 58.53538829807735\n", + " ),\n", + " (\n", + " -141.3435514691004,\n", + " 68.84625981507392\n", + " ),\n", + " (\n", + " -170.7603599457809,\n", + " 68.84625981507392\n", + " )\n", + " ]\n", + " ]\n", + ")\n", + "geojson_australia = Polygon(\n", + " [\n", + " [\n", + " (\n", + " 113.26697231702212,\n", + " -12.835622232181265\n", + " ),\n", + " (\n", + " 113.26697231702212,\n", + " -40.757486033452935\n", + " ),\n", + " (\n", + " 157.67091884462127,\n", + " -40.757486033452935\n", + " ),\n", + " (\n", + " 157.67091884462127,\n", + " -12.835622232181265\n", + " ),\n", + " (\n", + " 113.26697231702212,\n", + " -12.835622232181265\n", + " )\n", + " ]\n", + " ]\n", + ")\n", + "\n", + "# cities\n", + "geojson_austrailia_sydney = Point((151.27740157112845, -33.78747691475676))\n", + "geojson_alaska_anchorage = Point((-149.75306358105365, 61.21554843271193))" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "map_idx_to_month = {\n", + " 1: \"january\",\n", + " 2: \"february\",\n", + " 3: \"march\",\n", + " 4: \"april\",\n", + " 5: \"may\",\n", + " 6: \"june\",\n", + " 7: \"july\",\n", + " 8: \"august\",\n", + " 9: \"september\",\n", + " 10: \"october\",\n", + " 11: \"november\",\n", + " 12: \"december\"\n", + "}\n", + "\n", + "vehicle = Label(key=\"class\", value=\"vehicle\")\n", + "\n", + "dataset3 = Dataset.create(\"dataset3\")\n", + "for i in range(1, 13):\n", + " dataset3.add_groundtruth(\n", + " GroundTruth(\n", + " datum=Datum(\n", + " uid=f\"alaska_{map_idx_to_month[i]}\", \n", + " metadata={\n", + " \"month\": datetime.date(2024, i, 1), \n", + " \"region\": geojson_alaska,\n", + " }\n", + " ),\n", + " annotations=[\n", + " Annotation(\n", + " labels=[vehicle],\n", + " ),\n", + " ],\n", + " )\n", + " )\n", + " dataset3.add_groundtruth(\n", + " GroundTruth(\n", + " datum=Datum(\n", + " uid=f\"austrailia_{map_idx_to_month[i]}\", \n", + " metadata={\n", + " \"month\": datetime.date(2024, i, 1),\n", + " \"region\": geojson_australia\n", + " }\n", + " ),\n", + " annotations=[\n", + " Annotation(\n", + " labels=[vehicle],\n", + " ),\n", + " ],\n", + " )\n", + " )" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Find datums where the region is experiencing summer.\n", + "\n", + "Northern Hemisphere (June - September)\n", + "Southern Hemisphere (December - March)" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[Datum({'uid': 'austrailia_december', 'metadata': {'month': {'type': 'date', 'value': '2024-12-01'}, 'region': {'type': 'geojson', 'value': {'type': 'Polygon', 'coordinates': [[(113.26697231702212, -12.835622232181265), (113.26697231702212, -40.757486033452935), (157.67091884462127, -40.757486033452935), (157.67091884462127, -12.835622232181265), (113.26697231702212, -12.835622232181265)]]}}}}),\n", + " Datum({'uid': 'alaska_september', 'metadata': {'month': {'type': 'date', 'value': '2024-09-01'}, 'region': {'type': 'geojson', 'value': {'type': 'Polygon', 'coordinates': [[(-170.7603599457809, 68.84625981507392), (-170.7603599457809, 58.53538829807735), (-141.3435514691004, 58.53538829807735), (-141.3435514691004, 68.84625981507392), (-170.7603599457809, 68.84625981507392)]]}}}}),\n", + " Datum({'uid': 'alaska_august', 'metadata': {'month': {'type': 'date', 'value': '2024-08-01'}, 'region': {'type': 'geojson', 'value': {'type': 'Polygon', 'coordinates': [[(-170.7603599457809, 68.84625981507392), (-170.7603599457809, 58.53538829807735), (-141.3435514691004, 58.53538829807735), (-141.3435514691004, 68.84625981507392), (-170.7603599457809, 68.84625981507392)]]}}}}),\n", + " Datum({'uid': 'alaska_july', 'metadata': {'month': {'type': 'date', 'value': '2024-07-01'}, 'region': {'type': 'geojson', 'value': {'type': 'Polygon', 'coordinates': [[(-170.7603599457809, 68.84625981507392), (-170.7603599457809, 58.53538829807735), (-141.3435514691004, 58.53538829807735), (-141.3435514691004, 68.84625981507392), (-170.7603599457809, 68.84625981507392)]]}}}}),\n", + " Datum({'uid': 'alaska_june', 'metadata': {'month': {'type': 'date', 'value': '2024-06-01'}, 'region': {'type': 'geojson', 'value': {'type': 'Polygon', 'coordinates': [[(-170.7603599457809, 68.84625981507392), (-170.7603599457809, 58.53538829807735), (-141.3435514691004, 58.53538829807735), (-141.3435514691004, 68.84625981507392), (-170.7603599457809, 68.84625981507392)]]}}}}),\n", + " Datum({'uid': 'austrailia_march', 'metadata': {'month': {'type': 'date', 'value': '2024-03-01'}, 'region': {'type': 'geojson', 'value': {'type': 'Polygon', 'coordinates': [[(113.26697231702212, -12.835622232181265), (113.26697231702212, -40.757486033452935), (157.67091884462127, -40.757486033452935), (157.67091884462127, -12.835622232181265), (113.26697231702212, -12.835622232181265)]]}}}}),\n", + " Datum({'uid': 'austrailia_february', 'metadata': {'month': {'type': 'date', 'value': '2024-02-01'}, 'region': {'type': 'geojson', 'value': {'type': 'Polygon', 'coordinates': [[(113.26697231702212, -12.835622232181265), (113.26697231702212, -40.757486033452935), (157.67091884462127, -40.757486033452935), (157.67091884462127, -12.835622232181265), (113.26697231702212, -12.835622232181265)]]}}}}),\n", + " Datum({'uid': 'austrailia_january', 'metadata': {'month': {'type': 'date', 'value': '2024-01-01'}, 'region': {'type': 'geojson', 'value': {'type': 'Polygon', 'coordinates': [[(113.26697231702212, -12.835622232181265), (113.26697231702212, -40.757486033452935), (157.67091884462127, -40.757486033452935), (157.67091884462127, -12.835622232181265), (113.26697231702212, -12.835622232181265)]]}}}})]" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "march = datetime.date(2024, 3, 1)\n", + "june = datetime.date(2024, 6, 1)\n", + "september = datetime.date(2024, 9, 1)\n", + "december = datetime.date(2024, 12, 1)\n", + "\n", + "summer_time = dataset3.get_datums(\n", + " Filter(\n", + " datums=Or(\n", + " And(\n", + " Datum.metadata[\"month\"] >= june,\n", + " Datum.metadata[\"month\"] <= september,\n", + " Datum.metadata[\"region\"].intersects(geojson_alaska)\n", + " ),\n", + " And(\n", + " Or(\n", + " Datum.metadata[\"month\"] >= december,\n", + " Datum.metadata[\"month\"] <= march,\n", + " ),\n", + " Datum.metadata[\"region\"].intersects(geojson_australia)\n", + " ),\n", + " )\n", + " )\n", + ")\n", + "summer_time" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".env-velour", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/getting_started.ipynb b/examples/getting_started.ipynb index bd8254903..8dd9a7c35 100644 --- a/examples/getting_started.ipynb +++ b/examples/getting_started.ipynb @@ -56,6 +56,17 @@ "execution_count": 1, "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:root:The Valor client version (0.27.2.dev37+g6c9eaddf.d20240614) is newer than the Valor API version 0.27.2.dev37+g6c9eaddf\t==========================================================================================\n", + "\t== Running with a mismatched client != API version may have unexpected results.\n", + "\t== Please update your client to \u001b[1;0.27.2.dev37+g6c9eaddf\u001b[0;31m to avoid aberrant behavior.\n", + "\t==========================================================================================\n", + "\u001b[0m\n" + ] + }, { "name": "stdout", "output_type": "stream", @@ -77,6 +88,7 @@ " GroundTruth, \n", " Prediction,\n", " Label,\n", + " Filter,\n", ")\n", "from valor.schemas import (\n", " Box, \n", @@ -922,10 +934,10 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'key': 'class_label', 'value': 'cat', 'score': None}\n", - "{'key': 'class_label', 'value': 'person', 'score': None}\n", "{'key': 'class_label', 'value': 'car', 'score': None}\n", "{'key': 'label', 'value': 'positive', 'score': None}\n", + "{'key': 'class_label', 'value': 'person', 'score': None}\n", + "{'key': 'class_label', 'value': 'cat', 'score': None}\n", "{'key': 'class_label', 'value': 'dog', 'score': None}\n" ] } @@ -959,8 +971,8 @@ "{'datum': {'uid': 'img2', 'metadata': {'path': 'a/b/c/img2.png'}}, 'annotations': [{'metadata': {}, 'labels': [{'key': 'class_label', 'value': 'dog', 'score': 0.1}, {'key': 'class_label', 'value': 'cat', 'score': 0.9}], 'bounding_box': None, 'polygon': None, 'raster': None, 'embedding': None, 'is_instance': None, 'implied_task_types': ['classification']}]}\n", "{'datum': {'uid': 'img1', 'metadata': {'path': 'a/b/c/img1.png'}}, 'annotations': [{'metadata': {}, 'labels': [{'key': 'class_label', 'value': 'dog', 'score': 0.9}, {'key': 'class_label', 'value': 'cat', 'score': 0.1}], 'bounding_box': None, 'polygon': None, 'raster': None, 'embedding': None, 'is_instance': None, 'implied_task_types': ['classification']}]}\n", "{'datum': {'uid': 'img5', 'metadata': {'path': 'a/b/c/img5.png'}}, 'annotations': [{'metadata': {}, 'labels': [], 'bounding_box': None, 'polygon': None, 'raster': None, 'embedding': None, 'is_instance': None, 'implied_task_types': ['empty']}]}\n", - "{'datum': {'uid': 'img4', 'metadata': {'path': 'a/b/c/img4.png'}}, 'annotations': [{'metadata': {}, 'labels': [{'key': 'class_label', 'value': 'person', 'score': 0.1}, {'key': 'class_label', 'value': 'dog', 'score': 0.8}, {'key': 'class_label', 'value': 'cat', 'score': 0.1}], 'bounding_box': [[(500.0, 220.0), (530.0, 220.0), (530.0, 260.0), (500.0, 260.0), (500.0, 220.0)]], 'polygon': None, 'raster': None, 'embedding': None, 'is_instance': True, 'implied_task_types': ['object-detection']}]}\n", - "{'datum': {'uid': 'img3', 'metadata': {'path': 'a/b/c/img3.png'}}, 'annotations': [{'metadata': {}, 'labels': [{'key': 'class_label', 'value': 'person', 'score': 0.1}, {'key': 'class_label', 'value': 'dog', 'score': 0.8}, {'key': 'class_label', 'value': 'cat', 'score': 0.1}], 'bounding_box': [[(16.0, 130.0), (70.0, 130.0), (70.0, 150.0), (16.0, 150.0), (16.0, 130.0)]], 'polygon': None, 'raster': None, 'embedding': None, 'is_instance': True, 'implied_task_types': ['object-detection']}, {'metadata': {}, 'labels': [{'key': 'class_label', 'value': 'person', 'score': 0.9}, {'key': 'class_label', 'value': 'dog', 'score': 0.05}, {'key': 'class_label', 'value': 'cat', 'score': 0.05}], 'bounding_box': [[(89.0, 10.0), (97.0, 10.0), (97.0, 110.0), (89.0, 110.0), (89.0, 10.0)]], 'polygon': None, 'raster': None, 'embedding': None, 'is_instance': True, 'implied_task_types': ['object-detection']}]}\n" + "{'datum': {'uid': 'img4', 'metadata': {'path': 'a/b/c/img4.png'}}, 'annotations': [{'metadata': {}, 'labels': [{'key': 'class_label', 'value': 'dog', 'score': 0.8}, {'key': 'class_label', 'value': 'person', 'score': 0.1}, {'key': 'class_label', 'value': 'cat', 'score': 0.1}], 'bounding_box': [[(500.0, 220.0), (530.0, 220.0), (530.0, 260.0), (500.0, 260.0), (500.0, 220.0)]], 'polygon': None, 'raster': None, 'embedding': None, 'is_instance': True, 'implied_task_types': ['object-detection']}]}\n", + "{'datum': {'uid': 'img3', 'metadata': {'path': 'a/b/c/img3.png'}}, 'annotations': [{'metadata': {}, 'labels': [{'key': 'class_label', 'value': 'dog', 'score': 0.8}, {'key': 'class_label', 'value': 'person', 'score': 0.1}, {'key': 'class_label', 'value': 'cat', 'score': 0.1}], 'bounding_box': [[(16.0, 130.0), (70.0, 130.0), (70.0, 150.0), (16.0, 150.0), (16.0, 130.0)]], 'polygon': None, 'raster': None, 'embedding': None, 'is_instance': True, 'implied_task_types': ['object-detection']}, {'metadata': {}, 'labels': [{'key': 'class_label', 'value': 'dog', 'score': 0.05}, {'key': 'class_label', 'value': 'person', 'score': 0.9}, {'key': 'class_label', 'value': 'cat', 'score': 0.05}], 'bounding_box': [[(89.0, 10.0), (97.0, 10.0), (97.0, 110.0), (89.0, 110.0), (89.0, 10.0)]], 'polygon': None, 'raster': None, 'embedding': None, 'is_instance': True, 'implied_task_types': ['object-detection']}]}\n" ] } ], @@ -978,12 +990,12 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'key': 'class_label', 'value': 'cat', 'score': None}\n", - "{'key': 'class_label', 'value': 'person', 'score': None}\n", "{'key': 'class_label', 'value': 'car', 'score': None}\n", "{'key': 'label', 'value': 'positive', 'score': None}\n", - "{'key': 'class_label', 'value': 'dog', 'score': None}\n", - "{'key': 'label', 'value': 'negative', 'score': None}\n" + "{'key': 'class_label', 'value': 'cat', 'score': None}\n", + "{'key': 'class_label', 'value': 'person', 'score': None}\n", + "{'key': 'label', 'value': 'negative', 'score': None}\n", + "{'key': 'class_label', 'value': 'dog', 'score': None}\n" ] } ], @@ -1023,27 +1035,27 @@ "[{'type': 'AP',\n", " 'parameters': {'iou': 0.5},\n", " 'value': 1.0,\n", - " 'label': {'key': 'class_label', 'value': 'dog'}},\n", + " 'label': {'key': 'class_label', 'value': 'person'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.75},\n", " 'value': 1.0,\n", - " 'label': {'key': 'class_label', 'value': 'dog'}},\n", + " 'label': {'key': 'class_label', 'value': 'person'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.5},\n", " 'value': 1.0,\n", - " 'label': {'key': 'class_label', 'value': 'cat'}},\n", + " 'label': {'key': 'class_label', 'value': 'dog'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.75},\n", " 'value': 1.0,\n", - " 'label': {'key': 'class_label', 'value': 'cat'}},\n", + " 'label': {'key': 'class_label', 'value': 'dog'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.5},\n", " 'value': 1.0,\n", - " 'label': {'key': 'class_label', 'value': 'person'}},\n", + " 'label': {'key': 'class_label', 'value': 'cat'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.75},\n", " 'value': 1.0,\n", - " 'label': {'key': 'class_label', 'value': 'person'}},\n", + " 'label': {'key': 'class_label', 'value': 'cat'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1056,7 +1068,7 @@ " 0.9,\n", " 0.95]},\n", " 'value': 1.0,\n", - " 'label': {'key': 'class_label', 'value': 'dog'}},\n", + " 'label': {'key': 'class_label', 'value': 'person'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1069,7 +1081,7 @@ " 0.9,\n", " 0.95]},\n", " 'value': 1.0,\n", - " 'label': {'key': 'class_label', 'value': 'cat'}},\n", + " 'label': {'key': 'class_label', 'value': 'dog'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1082,7 +1094,7 @@ " 0.9,\n", " 0.95]},\n", " 'value': 1.0,\n", - " 'label': {'key': 'class_label', 'value': 'person'}},\n", + " 'label': {'key': 'class_label', 'value': 'cat'}},\n", " {'type': 'mAP',\n", " 'parameters': {'iou': 0.5, 'label_key': 'class_label'},\n", " 'value': 1.0},\n", @@ -1114,7 +1126,7 @@ " 0.9,\n", " 0.95]},\n", " 'value': 1.0,\n", - " 'label': {'key': 'class_label', 'value': 'dog'}},\n", + " 'label': {'key': 'class_label', 'value': 'person'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1127,7 +1139,7 @@ " 0.9,\n", " 0.95]},\n", " 'value': 1.0,\n", - " 'label': {'key': 'class_label', 'value': 'cat'}},\n", + " 'label': {'key': 'class_label', 'value': 'dog'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1140,7 +1152,7 @@ " 0.9,\n", " 0.95]},\n", " 'value': 1.0,\n", - " 'label': {'key': 'class_label', 'value': 'person'}},\n", + " 'label': {'key': 'class_label', 'value': 'cat'}},\n", " {'type': 'mAPAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1272,7 +1284,7 @@ } ], "source": [ - "eval_clf = model.evaluate_classification(dataset, filter_by=[Label.key == 'class_label'])\n", + "eval_clf = model.evaluate_classification(dataset, filters=Filter(labels=(Label.key == 'class_label')))\n", "eval_clf.wait_for_completion()\n", "eval_clf.metrics" ] @@ -1294,10 +1306,10 @@ "text/plain": [ "[{'type': 'IOU',\n", " 'value': 1.0,\n", - " 'label': {'key': 'class_label', 'value': 'dog'}},\n", + " 'label': {'key': 'class_label', 'value': 'car'}},\n", " {'type': 'IOU',\n", " 'value': 1.0,\n", - " 'label': {'key': 'class_label', 'value': 'car'}},\n", + " 'label': {'key': 'class_label', 'value': 'dog'}},\n", " {'type': 'IOU',\n", " 'value': 1.0,\n", " 'label': {'key': 'class_label', 'value': 'cat'}},\n", diff --git a/examples/object-detection/coco-yolo.ipynb b/examples/object-detection/coco-yolo.ipynb index a26b89c73..23efbcc8a 100644 --- a/examples/object-detection/coco-yolo.ipynb +++ b/examples/object-detection/coco-yolo.ipynb @@ -56,8 +56,9 @@ "from pathlib import Path\n", "import pandas as pd\n", "\n", - "from valor import Client, Model, Annotation, Label, connect\n", + "from valor import Client, Model, Annotation, Label, Filter, connect\n", "from valor.enums import TaskType, AnnotationType\n", + "from valor.schemas import And, Eq\n", "from valor.viz import create_combined_segmentation_mask\n", "\n", "# connect to Valor API\n", @@ -124,8 +125,8 @@ "name": "stderr", "output_type": "stream", "text": [ - "Formatting: 100%|██████████| 5/5 [00:00<00:00, 113.69it/s]\n", - "Uploading: 100%|██████████| 5/5 [00:00<00:00, 10.96it/s]\n" + "Formatting: 100%|██████████| 5/5 [00:00<00:00, 120.60it/s]\n", + "Uploading: 100%|██████████| 5/5 [00:00<00:00, 10.79it/s]\n" ] } ], @@ -159,7 +160,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 5/5 [00:05<00:00, 1.12s/it]\n" + "100%|██████████| 5/5 [00:03<00:00, 1.30it/s]\n" ] } ], @@ -248,8 +249,8 @@ "outputs": [ { "data": { - "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAGqAoADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD5/ooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACivf/AIffC/Q7fR7PWNSjTU7m7gjuI0nj/dwq6Z27MkOfmPLegwARzwPxU8Df8Ivq41Gz50y/kdkRY9ot36mPgbQvJ298AjHy5MqSbsbSoSjDmZ59RRRVGIUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAH2hXn3xm1D7H8PZYPK3/AG25ig3bsbMEyZ6c/wCrxjjrntWL4A+LWnSaZa6V4jupor6P92L2f5o5RuAXcw5VsHBLcYXJbJrmPir8Q7HxRHb6RpC+bYwSCd7p1ZTI+0gBVOCFAY5JGSemAMtiou56VStF0209zzGiiitjzQooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKlgga4kKIQCEd+fRVLH9BUVamhwLLJfOxIMNlM647nbt5/BjV04801Ezqz5IORl0UUVBoFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFKiNI6oilmY4CgZJPpQG4lFacPh7VrhC6WUgAOP3hCH8mINULi3ltZ3gnQpIhwyntSujWeHq04qU4tJ9WmR0UUUzIKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAoor6A8IWkdl4Q0qKMsVa3WU7jzl/nP4ZY1yYvFfVoqVr3OzB4R4mTje1j5/or6bqC7srS/iEV5aw3EYbcEmjDgH1we/JrgWcLrD8f+Aeg8ldtJ/h/wT5ror6I/4RvQv+gLp3/gKn+FH/CN6F/0BdO/8BU/wq/7Xh/KyP7Gn/Mj53or6I/4RvQv+gLp3/gKn+FULjwJ4Zup2mk0qNWbGRE7xr0xwqkAfgKazen1ixPJ6vSSPBqK90/4V74W/wCgX/5MS/8AxVH/AAr3wt/0C/8AyYl/+Kqv7Wodn+H+ZH9j1+6/H/I8Lor2+4+HHhmeBo47OS3Y4xLFO5Zee24ke3Iqj/wqjQv+fvUf+/if/EVSzWg97ol5TiFtZ/M8eor2H/hVGhf8/eo/9/E/+Io/4VRoX/P3qP8A38T/AOIp/wBqYfu/uF/ZWI7L7zx6ivUrj4RwNOxttYkjh42pLAHYcc5YMM8+1Rf8Kh/6jn/kp/8AZ1azHDfzfg/8iHlmK/l/Ff5nmVFem/8ACof+o5/5Kf8A2dR3HwjnWBjbaxHJNxtSWAop55ywY449qazHDP7X4P8AyE8txS+z+K/zPNqK7r/hVGu/8/enf9/H/wDiKwvEfhS+8MfZvtsttJ9o3bPJZjjbjOcgf3hWsMVRnLljK7MZ4StTjzSjZGFRRRXQc4UUUUAFFFFABRRRQAUUUUAFbOlfutE1m4TiVY44g3+y7YYfiBWNXQWECr4K1W4BO95Y0I7YVlI/9CNb4dXk32T/ACZzYppQSfVxX4o5+iiisDpCiiigAooooAKKKKACiiigAooooAKKKKACiiigAorb07wvfX8EdwWjhic8b87ivqBj8uRmrN/4QmtLZ54ruOVY0Z3DKVOAM8dc9/Sp5kd8crxcqftVB2/r5nN0UUVRwBXaeENPtfspv8+ZcbinI/1fsPcgjn3x654uul8HXxiv5LNn+SZdyg5Pzj07DjP5Cpnserks6ccbD2ivfReT6Hb1j+INGXVLMvEg+1xj922cbh3U/rj3/GtiisU7H3lehCvTdKotGeR0Vb1X/kMX3/XxJ/6EaqV0H5jUjyTcewUUUUEBRRVixsbnUryO0tI/MnkztTcBnAJPJ46A0N21Ym0ldleitm68Ka9Z7PN0udt2ceUBL09ducde9ZU9vNazNDcQyQyr95JFKsO/INSpRezFGcZfC7kdFFFUUFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABX0R4b/wCRW0j/AK8of/QBXzvX0R4b/wCRW0j/AK8of/QBXkZv8EfU9nJvjl6GnRRRXgn0AUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFeY/F7/mDf9t//AGnXp1eY/F7/AJg3/bf/ANp125d/vMfn+TOHMv8AdZfL80eZUUUV9QfKBRRRQAUUUUAFFFFABRRWnoek/wBs3r2/n+TtjL7tm7OCBjqPWqhCU5KMd2RUqRpxc5vRC6Rpy6pDeW8ak3ioJYTuwpAOGU/XIx9OorRsv+RB1H/r4H8463NH8LvpGoC5S/8AMXaVdPJxuB98nHIB/CrGu6MdRs4ra1SOLfciWVwoGPlbLEdz0r16eDnCk5Ne9Zq3e+x4dXMKc6yipe7dO+ult1r6Hm1FdF4ltbbSYrfTLQPg5nld8EueQvPt83HA5rna8qrTdKfI90e1QqqtBTS0YUUUVmahRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHpHh68S80S3KjBiURMPQqAP1GD+NW9Qj83TbqPeib4XXc5wq5B5J9Ko+GrL7FokOWy037488DIGP0A/HNZ/jDULi3ghtYspHOG3uP4gMfKOc9+fw96wteWh+hPEOhlyqV1ryr8dDiqKKK3Pz0K0/D0jRa9ZskZkJfbgdgQQT+AOfwrMrX8Mf8jFa/8D/9Aak9jqwKbxVNL+Zfmei0UUVzn6acX4x07y7mPUEHyy/JJ/vAcHr3A/8AHfeuXr1LUrQX+m3FsQMyIQuSQA3UHj3xXlzo0bsjqVZTgqRgg+lbQd0fC5/g/Y4j2kdp6/Pr/mJRRRVnhBXQ+B/+RxsP+2n/AKLauerofA//ACONh/20/wDRbVnV+CXozHEfwZ+j/I9jqve2FpqNsbe8t454j/C4zg4IyPQ8nkc1Yorx07bHzCbTujwXU7ZLLVry1jLGOCd41LdSAxAz+VVa0Nf/AORj1P8A6+5f/QzWfXtx1SPq4O8U2FFFFMoKKKKACiiigAooooAKKKKACiiigAooooAK+iPDf/IraR/15Q/+gCvnevorQI5IfDmlxSo0ciWkSsjDBUhBkEdjXj5v8EfU9nJvjl6GjRRRXhH0AUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFeY/F7/mDf9t//AGnXp1eR/Fi4lbXrK2L5hjtfMVcDhmZgTn6Kv5V3Zar4mL7X/I8/M3bDSXe35nAUUUV9OfLBRRRQAUUUUAFFFFABVvTdQm0y+juoSflPzKDjevdT9aqUU4ycWpLcmUVOLjLZnsDTInVhn0FRfadzqqrwSBk1TinFzBFcBNnmxrJtznGQDjP41T1mZYNHu3YEgxlOPVuB/OvrpVEoOfS1wwnD2DWHVWd23G+vS67Lt6sx/HUG29tLjd/rIym3HTac5/8AHv0rlK2tV1ldY0q2+0ZF9buV4HyyKRy3scqOPf8ALFr5nFzjOs5w2ZzYGE6dBQnurr8f8gooormOsKKKKACiiigAooooAKK6zwxp9vLp0k1xao7NIQrSJnKgDpn3z/kVQ8WMrasgDAlYQGAPQ5J5/MV1zwjhQVZvfodMsM40lVb3MKiiiuQ5gooooA9S0xGj0qzR1KssCAqRgg7RxVXxDZpeaJcBjgxKZVPoVBP6jI/GoLHxDpyWlpDPejz/ACow5YMfmKjOWxj68/Wp9X1CzXRLhvtMRE0LrGVYHecY4x15P4VhZ3P0WVbD1cJKPMmlHXVduvY83ooorc/Ogrb8JwtLr0bqRiJGds9xjbx+LCsSu38I6Uba2N/KMPOuEGCCqZ/rgH6AetTJ2R6eT4eVbGQstIu7+X/BOlooorA/RArz7xXarba47JjEyCTAXGDyD9ckZ/GvQa4jxpE41K3mI+RodoOepBJP8xVw3PD4hgpYO9tmv8jmqKKK2PhArofA/wDyONh/20/9FtXPV0Pgf/kcbD/tp/6Las6vwS9GY4j+DP0f5HsdFFFeOfMHhev/APIx6n/19y/+hms+u8vfhr4v1bUr2/s9Gd7ae5leNnmjjLLvODhmBweoOORgjg1ka14A8UeHtOOoappLw2qsFaRZY5ApPTO1jgZ4yeMkDuK9OGKoO0FNX7XVz6qn8C9DmqKKK6SwooooAKKKKACiiigAooooAKKKKACiiigAr6br5kr6brxM4+x8/wBD3Ml+38v1CiiivFPdCiiigAooooAKKKKACiiigAooooAKKKKACiiigArx74r/API023/Xkn/ob17DXj3xX/5Gm2/68k/9DevQyv8A3hejPNzX/d36o4WiiivpT5gKKKKACiiigAooooAKKKKAOjtfFksFpFBLaI/lII1ZXK/KBgZ4PNRal4k/tDTpLYW3lF2XJ3buBz7YOQP1rBorqeMruHI5abdDWFecKfs4v3QooorlMgooooAKKK+gr74ReFLvy/Ihu7LbnP2ecnfnHXfu6e2OtaU6Uql+XocWLx9LCOKqX1v+B8+0V7VffBHTZPL/ALP1i7gxnf8AaI1m3dMYxsx39fwqG1+CNtFcobvWJbmA5DCGMQsvHBBO8NzgYwOuc8YN/VqnY51nODavzfg/+GPG6K9e/wCFG/8AUxf+SX/2yj/hRv8A1MX/AJJf/bKX1ep2K/tjBfz/AIP/ACOe0gx2OhWn2maKMMu4MzYHzEsBz3wa5TXpY5tauJInV0O3DKcg/KO9XvGXhWTwhra6e90l0rwrNHIqFCVJI5GTg5U9zxj6Dn63xGKc6aouNrW/BWPZeNjiKMVBe7pY7ZdPtn8Oh0s4Wma0yCIgWLbPpnOa4mvS7OFreyghcgtHGqEjpkDFec3MP2e7mg3bvLdk3YxnBxXRmNLljBpHXjqfLGDsRUUUV5R5wUVc0rTLnWdVtdOtF3T3EgjXgkDPVjgE4AySccAGvYbL4JaTHCRf6rezy7shoFSJQvHGCG5685/CtIUpT+E48Vj6GFsqr1Z4lU9pZ3F/OILaIySYJwOMD1JPAr3KL4M+Go5kd7jUpVVgTG8yBXA7HCA4PsQaXWvhdpjR2sfh6OTS53nxPdRzSOUi2OcbWcZy4Qcc856A1o8LUtc5qWeYJ1Ep35ev9f8AAZ5laeEYrYGfVbuMRoeVRtq9R1Y49xj9av2/iH+0tZhs7JH+z/MZZSvOB0x6A4Ayefm7GtjxR8MW0rw890LrVtb1ZpVjgEEOVVep3r8zYADcg9WXjrXE6fNceFr9hqel3MM0qDAmVo2VM8kKw5yR+nWuepRlH4j6vLc9wlScYYX3IX1bu27fjbod9RWPp/iWx1K8W2iSZJGBK+Yowcc44J7Z/Ktiudq2593QxFKvHnpSugrlPG8TmGzmA+RWdSc9CcEfyNdXXP8AjCJ5NEVlGRHMrMc9Bgj+ZFOO5x5vDnwVReV/udzg6KKK3PzoK6HwP/yONh/20/8ARbVz1dD4H/5HGw/7af8Aotqzq/BL0ZjiP4M/R/kex0UUV458wej6Z/yCrP8A64J/6CKzvGP/ACI+v/8AYNuP/RbVjP8AFDwRp7tZNrHltbkxFPs0x2leMZ2c9KyvEvxQ8Hah4V1eytdY8y4uLKaKJPs0w3MyEAZKYHJ7187TwmIdZS9nK1+z7n1MFaKR850UUV98aBRRRQB6r8M9J02/8OXEt5p9pcSC7ZQ80KuQNicZI6cmuz/4RvQv+gLp3/gKn+FYnw1tI7bwZBKhYtcyySvk8AhtnHthB+tddXyuLqy9vOze59Zg6UfYQulsZn/CN6F/0BdO/wDAVP8ACj/hG9C/6Aunf+Aqf4Vp0Vz+1n3Z0+yh/KvuMz/hG9C/6Aunf+Aqf4Uf8I3oX/QF07/wFT/CtOij2s+7D2UP5V9xmf8ACN6F/wBAXTv/AAFT/Cj/AIRvQv8AoC6d/wCAqf4Vp0Ue1n3Yeyh/KvuMz/hG9C/6Aunf+Aqf4VzvjrRNJtPBt/PbaXZQzL5e2SK3RWGZFBwQPSu1rmPiF/yI2o/9sv8A0albYapN1oJt7r8zHE04KhNpLZ/keF19N18yV9N16OcfY+f6HmZL9v5fqFFFFeKe6FFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXj3xX/wCRptv+vJP/AEN69hrxD4j3Es/jW7jkfcsCRxxjAG1dgbHvyzH8a9LKlevfsjzM2dsPbu0cnRRRX0Z8yFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABX17XyFX1jpN9/amjWOoeX5X2q3jn8vdu27lDYzxnGa7MG9z5riKLtTfr+hcqnqiwnTpZJ7aW5SDFwIYQS7tGQ6hQCMncowO/SrlFdrPmouzTKTavp4tTcreQyRBPMBibeWXGflC5LcdMZz2rN/wCEz0X+/ff+C25/+N1x2hWj6fpQsXnac2k81v5jDG4JK6DjJxwBxnitKuyGG54KV90fp2F4FwdajGq6sveSfTqr9jhfieZfEXiW2u9Ks764gSzWJn+xyphg7nGGUHoRXCRRGz1OGO+haLZIplSVCCFyDyD7V7rRWMsrTlzKWvoe9R4ap0KcadOo9O6v/kcP/adh/wA/1t/39X/GuG1iWKbV7mSHZsL8FOh45PTv1/x617jRXRicLLERUXK3y/4J2V8plWioudvl/wAE8Aor3+iuL+yP7/4f8E5f9X/+nn4f8E5P4NeHftOo3HiCdf3drmC356yMPmPB7KccjB3+or2qq9jB9msYYtu0qg3DOeep/XNWKiFNU1yo/H8zxHt8VOSd0nZei2+/f5hVC71COz1OzhnvLKCK4V0VJpQsssuUChATyOWz3yVq/WbqEYl1TTVYkAM7ceoAP9K0jHmdiMBh44ityTdlaTvv8MXL9DSooopHEY/in/kW7v8A4B/6GteWjULM3K263MTTFivlqwLAgEnIHToetexX3/IPuf8Ark38jXC3H/HtL/uH+VZVMB9YTqc1rLsfrfh9iJLA1KUek7/ekv0/4BgVj+KXVfD1wGYAsUCgnqdwOB+AP5VpXfnfY5/s/wDr/Lby+n3scdeOteb3us3+oQiG6n8xA24DYo56dh7mvEhG7ufX5zj4Yei6Uk7zTS7dijRRRWx8CFdT8PrX7R4qSXft+zwvLjGd2Rsx7ffz+FctXY/DX/kY7j/r0b/0NKyru1NnPim1Rlbsep0UUV5B80eF6/8A8jHqf/X3L/6Gaz60Nf8A+Rj1P/r7l/8AQzWfXtR+FH1VP4F6BRRRVFhRRRQB7v4Bjki8EaasiMjFXYBhg4MjEH6EEH8a6Sszw3/yK2kf9eUP/oArTr46u+arJ+bPtKEeWlFeSCiiiszUKKKKACiiigArmPiF/wAiNqP/AGy/9GpXT1zHxC/5EbUf+2X/AKNStsN/Hh6r8zDFfwJ+j/I8X0m0jv8AWbGzlLCO4uI4mKnkBmAOPfmvo+vnfw3/AMjTpH/X7D/6GK+iK9LN378UeZkyXJN+YUUUV457QUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFeF/EL/kedR/7Zf+ikr3SvC/iF/wAjzqP/AGy/9FJXqZT/AB36fqjys4/gL1/RnMUUUV9CfNhRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAV9S+E/8AkTtD/wCwfB/6LWvlqvrHSbH+y9GsdP8AM837LbxweZt27tqhc45xnFdmEWrPneIZLkgvNlyiiiu4+VPGdU8RWfhPxLrem3hu7hmvWuUMaDaiyqsm0ZbsWP1696mj8ceH3jVmvWQkAlGhfK+xwCPyNUvjJot2Nfh1aGwf7G1qqzXMaZXzAxHzkdDgoAT14A6ceX1h9frUXyK1kfquU55XWDpqNmkkvu9Gev8A/CbeHf8AoIf+QZP/AImrFl4p0TULgW9vqEZlb7qurJuOcYG4DJ56da8YrQ0L/kYdM/6+4v8A0MVcM0rOSTS/H/M9WlnmIlNRcVZvz/zPb6ydV8SaXosnlXk7LMY/MWNY2JYcgYOMdQepFbEaNLIsaDLMQoHqTXE+LfAfi7V/EEtxBpvm2yoqQt58S8YyRywP3i3WvTxleVGF4K7PSzbNaWApq84qT2Ta272uiT/hY+j/APPtff8AfCf/ABVH/Cx9H/59r7/vhP8A4qsJ/hX4wVIyNLRy65KrcxZQ5Iwct14B4yMEd8gYt34U8Q2Pnm50TUESDd5kn2diihep3AYI465xXkvMMUt1+B89DiSc9I1Iv7j6looorc/JArNkAbxFCHcjZAWRc8FiSD+n8q0q871vUJLb416BA948VrJZkNGZSqM584LkZwSTtA98Ue0UNX10+89TK58s6vnCa+9f0j0Siiig8sZKsbQusuPLKkNk4GO/NcBcf8e0v+4f5V2OuXsen6PPNKrsrFYQEAJ3SMEH4ZYZ9q4u8YpZyEemPz4rppNRozb7fofqXh7Tfsa076OSX3J/5/kYdeW6lbi11O6gVCipKwVTnhc8dfbFepVwfi+28nWRMA+J4wxJ6bhxgfgB+dfL03qfX8SUebDxqL7L/B/0jn6KKK1Pigrsfhr/AMjHcf8AXo3/AKGlcdXY/DX/AJGO4/69G/8AQ0rKv/DZzYv+BL0PU6KKK8g+bPC9f/5GPU/+vuX/ANDNZ9aGv/8AIx6n/wBfcv8A6Gaz69qPwo+qp/AvQKKKKosKKKltreW7uobaBN80zrHGuQMsTgDJ96G7asEr6I+kba3itLWG2gTZDCixxrknCgYAyfapaKK+Kbvqz7hK2iCiiigYUUUUAFFFFABXMfEL/kRtR/7Zf+jUrp65j4hf8iNqP/bL/wBGpW2G/jw9V+Zhiv4E/R/keL6Tdx2Gs2N5KGMdvcRysFHJCsCce/Feq/8AC19C/wCfTUf+/af/ABdePUV9NXwlOu059D5fD4yrh01Dqew/8LX0L/n01H/v2n/xdH/C19C/59NR/wC/af8AxdePUVz/ANl4fs/vOj+1cR3X3Htdn8TPDlzv82W5tNuMedCTu+mzd098das/8LC8Lf8AQU/8l5f/AImvC6Kl5TQb3f8AXyKWb10tl/XzPdP+FheFv+gp/wCS8v8A8TR/wsLwt/0FP/JeX/4mvC6KX9k0O7/D/If9sV+y/H/M90/4WF4W/wCgp/5Ly/8AxNS2/jvwzdTrDHqsas2cGVHjXpnlmAA/E14NRQ8po9G/w/yGs4r9Uvx/zPfrzxn4csdnm6vbNvzjySZenrsBx171Tk+IvhiOJ3XUGkZVJCLBJlj6DKgZ+pFeG0UllNHq3+H+QPOK3RL8f8z2H/ha+hf8+mo/9+0/+Lqnd/Fu0SUCz0maaPby00ojOfTADcdOc15VRWqyzDrp+Jk80xLW/wCB6TcfFydoGFto8cc3G15Zy6jnnKhRnj3qh/wtfXf+fTTv+/b/APxdcLRWqwGHW0TJ5hiXvM6y4+I/iaedpI7yO3U4xFFAhVeO24E+/Jqld+N/El7EI5dWmVQ27MIWI5+qAHHPSsCitVhqK2gvuMpYmvLeb+9mn/wkmu/9BrUf/Ap/8azpJJJpXlldpJHYszscliepJ7mm0VooRjsjKU5S3YUUUVRIUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFfVHh3UL7U9Et7rUbJLW5dQWWOVJI5BgEOjKx+U5yMnI9+CflevTbvxjrnhPwn4Stba723TW73E8MyCQPAz/uQWOcAqCMKQQOOOK6MPUULtnjZvhZYlQhC17vf0v+nbse4UV4T/AMLp8R/8+Wlf9+pP/i61/wDheX/Uu/8Ak7/9rrrWJp9zwpZLjFtG/wA1+p6/RXD+Hfinoev39vp5juLK7mUACcL5Zk4+RWB5PXGQM49SBTPFHxR03w1qN3pn2C7ub632cZVIm3BW+9kkcN/d6/nV+1hbmvocqwGJdT2XI+bc6aXwzoFxM802h6bJLIxd3e0jLMx5JJI5NQXHhDQJICIdF06GZSHjkit1jZXU5U7lAPUD69OleRav8YvEF65XTo7fTotwKlVEsmMYILMNpGeeFB6e+dL4f+N/FWveMrOyvb57izKyPOq20YAUIcElVBA3bR16kDvWSr03JJI9KOX47Dx9u525dbXfTX0/E7Gx/wCQhbf9dV/mK7auFjmaO4WcBdyuHAxgZznoKoQfGHTYdVuLDVbCe38mfyRPCwkU4YhmYcFRwDgbj19OfTxslHlcj6/jnAYivOlWpxukmn6npNFZWkeJdF15AdM1K3uGKl/LVsSBQcElDhgM46juPWn61r2meHbNLvVbn7PA8giV/LZ8sQTjCgnoDXHzK176H5t7Kpz8nK79ra/caVFch/wtHwb/ANBj/wAlpv8A4ij/AIWj4N/6DH/ktN/8RS9pDujb6lif+fcvuZ19eTfFT7NpPjHwvr0vmtskHmouD8kUiv8AKOOTvbqfTpXbaX498Na1qMWn6fqXnXUudkfkSLnALHkqB0Brg/jl/wAwH/t4/wDadZVpJ020dmWUpwxkYVE1dPfTSzPXJZY7eF5ppEjijUu7uwCqo5JJPQU+uE8X67M3wiOotLFBdahZwDaMYYy7d6qGz/CX9wAT2zXhNjq2paX5n9n6hd2nmY3/AGeZo92M4zgjOMn86KmIUGlYrB5RLE05S5rNOx9B/EO9ktdH02FFQrd6pbQyFgchQ2/j3yg/DNY1/wD8eUn4fzFT2N/qPiD4U2V7ds91eS3MTyMkYBKpdjJwoAACrk8dBmoL/wD48pPw/mK64O+Hqvuv0Z+jcCQ9nh6tN7xnZ/gYtc14ztvM0+C4AcmKTacdArDqfxAH410tZ2vQfaNCvE3bcR784z935sfpXzUXZn3GY0vbYSpDy/LVHmlFFFdB+ahXY/DX/kY7j/r0b/0NK46ux+Gv/Ix3H/Xo3/oaVlX/AIbObF/wJeh6nRRRXkHzZ4Xr/wDyMep/9fcv/oZrPrQ1/wD5GPU/+vuX/wBDNZ9e1H4UfVU/gXoFFFFUWFafhv8A5GnSP+v2H/0MVmV0ngGOOXxvpqyIrqGdgGGRkRsQfqCAfwrKu+WlJ+TNaEearFeaPd6KKK+PPtAooooAKKKKACiiigArmPiF/wAiNqP/AGy/9GpXT1zHxC/5EbUf+2X/AKNStsN/Hh6r8zDFfwJ+j/I8Looor68+NCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAs6fZSalqdrYQsiy3MyQoXJChmIAzjtzXQfEO6hm8X3FpaPmy06OOxt0wf3axqAVyeTh9/Jzn1ximfDyyj1Dx9o8MrOqrMZgUIB3RqXH4ZUZ9qwtQvZNS1O6v5lRZbmZ5nCAhQzEk4z25q9oerOX4sT/hj+b/AOAVqKKKg6grr/ij/wAlG1X/ALY/+iUrF8MxR3HivR4Zo0kikvoUdHUFWUuAQQeora+KP/JRtV/7Y/8AolK0S/dt+a/U5JS/2uMf7svzj/kchXpPwaimj13VdRWPdBb2LI53AfMzKyj15CNz7V5tXqPw2+06Z4P1zVY/KZLi7tbMK2SRlwrnH+7MMc9RyPW8Mr1VceLip01Tf2nFffJL8jrq8c8YQR2/iy/SJdql1cjOeWUMT+ZNex15B42/5G++/wC2f/ota9nNl+5T8/0Z9tnyX1eL/vfozCilkt5kmhkeOWNg6OjEMrDkEEdDXsPxA+03nwg8P3L+bO4+yyzynLHmFgWY+7MBk9yPWvG6928S/wDJCIP+wfZf+hRV41HWMl5H57mL5a1B/wB633nhNFFFYHrHX/C7/ko2lf8Abb/0S9dx8b7KSTTNIvwyeVDNJCwJO4s4BGPb922fqK878A339nePNGn8vzN1wIMbsY8wGPP4bs++K9i+KOlyaxomlWSb1WbVoI5JVQsIlcMm4j0ywHUckDvXXSV6MkfP46Xs8zpVHtb/ADv+ZifG6+8vRtK0/wAvPn3DT+Zu+7sXbjHv5n6e9eK11/xI8S/8JH4qm8iXfY2eYLfa2VbB+ZxgkHcehHVQvpXIVjXlzTbR6OWUHRwsYy33+89x+D0Zm8B6jEpAZ72VRnpkxR1Lf/8AHlJ+H8xR8Ff+ROvP+wg//ouOpdYjEKXUSklUfaM9cBq9Wg/9kmv7r/Jnu8HV/wDbcVRfeLX4p/oYFR3EK3NtLA5IWVChI64IxUlFfOH6Q0pKzPI6Ks6hEkGpXUMY2okzqoznABIFVq6T8rnFwk4voFdj8Nf+RjuP+vRv/Q0rjquabqt7o9y1xYTeTKyFC21W+XIOOQfQVFSLlBxRhXpupTcF1PeKK8L/ALf1n/oLX/8A4Ev/AI02TW9WljaOTU7143BVla4chgeoIzXF9Tl3PL/suf8AMO1//kY9T/6+5f8A0M1n0UV3pWVj2IqySCiiimMK7D4Z2f2nxjHL5m37LDJLjGd2Rsx7ffz+FcfRWdaDqU3BO1zSjUVOoptXsfTdFfOdvrerWkCwW2qXsMK52xxXDqoycnAB9auWni/xDZSmSLWLtmK7cTP5ox9HyM8da8V5RPpJHuLOafWLPoCivDY/iL4njlR21BZFVgSjQR4YehwoOPoRWj/wtfXf+fTTv+/b/wDxdZPKq62szaObYd73XyPYaK8vj+LsgiQS6KrSBRuZbnaCe5A2nA9smr9p8WdNeIm8067hk3cLCyyDHrkleevGKxll+JX2fyNo5jhn9r8z0GiuMtPif4euZSkpu7VQud80OQT6fIWOfw7Vfj8feGJZUjXVVDOwUFoZFGT6krgD3NZSwteO8H9xrHF0JbTX3nSVzHxC/wCRG1H/ALZf+jUrU/4STQv+g1p3/gUn+NRa5cwXfg/VZ7aeOaFrKfbJE4ZThGBwR70UVKFWMmuqCs41KUop9GfPlFFFfXHxwUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHX+CP9G07xVqE3y2qaPJbNJ1xJKQsa4HPJB56DviuQrr9O/0D4V61c/6z+09QgstvTy/LBl3Z75zjHGOue1chVy2SOahrUqS87fcl+twoooqDpNjwn/yOOh/9hCD/ANGLWv8AFH/ko2q/9sf/AESlQ/Di1hvPiDpEU6b0WRpQMkfMiM6nj0ZQfwqt46vZNQ8c6zNKqKy3TwgICBtj+QfjhRn3rX/l18zh3x3pD83/AMA5+vUdC+06V8LtM/1TR6tr0XqSqKw+mDug9+D69PLq9da0az0P4faU8vm20wmvmXbt/ebfMT3+XzCOuD3HatMKrz+78WjecfaYnD0lu5r8Lv8AyOiryDxt/wAjfff9s/8A0Wtev15B42/5G++/7Z/+i1r182/gr1/Rn2eff7tH/EvyZz9e7eJf+SEQf9g+y/8AQoq8Jr6O/wCaOf8Acv8A/tvXkYdXUl5H5zm8uWVGXaSPnGiiiuY9ov6Jex6br+nX8yu0VtdRTOEALFVYE4z34r6C+Jcslv8AD/UpoZHjljaB0dGIZWEyEEEdDXzdX0LHex698GJbqZXmJ0mQObgBmaWNCC5znJ3puB69Dwa6sO7xlHyPCzeHLWo1u0rfjf8AzPnqiiiuU909w+Cd1C/hrULRXzPFeea6YPCsihTnpyUb8qs+Prg6bb3tyFKqrwsdqj5lLoGxn15/Guf+C93bWNt4hubu4it4E+zbpZnCKuTIBkngckCk+JXizS9Z0Rf7JvIbhJmSNwchwoLNnacEcgDkd/cGvUoVEqEk+sWcWTVKmGzfEThtyPXs7Ra/Er/2rp3/AD/2v/f5f8asLcQvB56TRtDgnzAwK4HU56V5PRXj+zPtY8TVPtU195d1ea3udWuZrUHyXfcM9z3P4nJ/GqVFFaHzVSbqTc31dwooooICiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKANt9XUeB4NFQoWOpSXcoKncoEaImD0wcyZHXgdO+JRRTbuRCChe3XUKKKKRZ6J8GYo5PGk7vGjNFYu8ZZQSjb0GR6HBI+hNcVrd7HqWv6jfwq6xXN1LMgcAMFZiRnHfmug8DeOf+EL+3/8S77Z9r8v/lv5ezZu/wBk5zu/SuQrSUlyKKOKlSmsVUqyWlkl+oV63rj20PxI8O6VY332m107TzbBRKH8t1WRWDAcB8KueB0HtXklbnhCeG28U2U1xLHFEu/c8jBVHyMOSa0w0rVIrzX5nfhqSeNo1G/hf5tHsdeQeNv+Rvvv+2f/AKLWvX68g8bf8jfff9s//Ra17GbfwV6/oz6rPv8Ado/4l+TOfr6O/wCaOf8Acv8A/tvXzjX01pBsP+Fc6amqSxRWMmlwxTtNJ5a7WjVSC2RjOcde9eThfteh+a547Kk/M+ZaKKK5T3gr1nwDqf2j4WeKtOZpWe0t55F3HKqkkTYVeePmRyRj+L3NeTVf0vV7vSHuzalMXdrJaTK65DRuMH6HoQfUdxkHSnPklc5cZh/b0uVbpp/cyhRRRWZ1BRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFWLOzmvp/JgXc+0t7cD17en1IqvXZ+F9P+z2RupFxJP8AdyOQnbt36/TFdOEoe3qKPTqb4aj7Wpy9Dj5IpIZDHKjI46qwwR+FMr0bULG0vrci7UbUBIfOCnHJz/kcV57OIluJBCxaIOQjHqVzwavF4R4d73TKxOGdF73TI67X4XaRoeu+L/7N12Dz4prZ/s8e91zKCG6oR/AH6nH44riq6v4aX8WnfEXRZ5ldlacwAIATukUxr17ZYZ9s15GOUnhanI2nZ2tvexjTtzq56Rf/AADsJJ1On67c28O3BW4gWZi2TzkFOMY4x+NcD4y+GmqeDLEX91eWdxaPc+REYiwkOQxBZSMDheQCcH1619PV578aNP8Atvw8ln83Z9iuYp9u3O/JMeOvH+sznnpjvXyGW51i5YiFKrO8W0tkd1XDwUW0tT5uooor7o84KKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAp8sskzh5ZHkYKqAuxJCqAAPoAAB7CmUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBb0yxbUL+K3GdpOXI7KOv+fUivRVVUQIihVUYAAwAKwvC+n/Z7I3Ui4kn+7kchO3bv1+mK3q+iy6h7OlzPdnuYKjyU+Z7s5bxZfSK0diowjKJGbP3uSAPpxn8vSuXrW8SMza7OCxIUKFBPQbQePzNZNePjJudeV+jt9x5eKm5VZXCruj6h/ZOuafqXleb9kuY5/L3bd+xg2M4OM464qlRXJKKknF7M507an2nXL/EXT/7T+Hmtweb5ey2M+7bnPlESY699mM9s55rU8NXc+oeFtIvbp/MuLiyhllfAG5mQEnA4HJ7VPrGn/wBraHqGm+b5X2u2kg8zbu2b1K5xkZxnpmvyuk3QxEW38LX4M9l+9H1Pjmiiiv1U8YKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACremWLahfxW4ztJy5HZR1/wA+pFVK7Pwvp/2eyN1IuJJ/u5HITt279fpiurCUPbVVHp1OjDUva1EuhuqqogRFCqowABgAUtFFfUH0BxPimFY9Y3gnMsau2fXkcfkKxK6zxhCzW9rMCNqOyEd8kAj/ANBNcnXzGOhyYiS+f3ngYuPLWkFFFFchzH1F8KppZ/hpozzSPIwWVAzsSQqyuqjnsAAAOwArsa8S+EPjPw9oHh2bTdU1JLW6nv2dFeN9u1kjUEsBtUZB5JGMc17HYalYapA0+n3tteQq2wyW8qyKGwDjIJ5wRx71+aZrhqlLFVG4uzbadtNT1qM1KC1PkDUrCXS9VvNPnZGmtZ3gdkJKllYqcZxxkVVrpPiBYS6d8QNcgmZGZrt5wUJI2yfvF698MM++a5uv0ahU9pSjPuk/vR5UlZtBRRRWogooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigC3pli2oX8VuM7ScuR2Udf8APqRXoqqqIERQqqMAAYAFcL4cnMOtwjeFWQFGzjnjIH5gV3de9lUY+zclvc9jLor2ba3uFFFFeoegZniCAz6JcBUDMgDjOOMHJI/DNcDXp0saTRPFIMo6lWGeoPWvMmVkco6lWU4IIwQa8PNYWnGff9P+HPJzGFpRkJRRRXknmhWppXiTW9D2jTNVvLVFkEvlRTERs3HLJ91ugByDkDBrLoqZwjNcsldDTa2Lur6vfa7qk2palP593Nt8yTYq5woUcKAOgHaqVFFOMVFKMVZITd9WFFFFMAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigCW2m+z3cM+3d5bq+3OM4Oa9Mry6vUa9rKW7TXp+p6uWvSS9Aooor2D0wrzjU/8AkK3n/Xd//QjXo9ecan/yFbz/AK7v/wChGvJzb4I+p5uY/DEq0UUV4Z5IUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAV6Ppn/IKs/8Argn/AKCK84rc0vxJNYxpBOnnQLwDn5lHt6gc8frXfl+IhRm+fqdmCrRpTfN1O1oqjBrGnzwpKLuFNwztkcKw9iM1J/adh/z/AFt/39X/ABr31VptXUkeyqkHrctV5tfSJNqFzLGco8rMpx1BJxXW67q1sukypb3MUkkv7sCN1bAPXPtjI/GuKrx80rKTjCPTU8vMKqk1BBRRRXknnBRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQB//9k=", - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAoAAAAGqCAIAAAAUcGK5AAAdAklEQVR4Ae3dLbMc150H4KstV0AEIhBiAS9ekkoFBZhEICTCISELNmSJv4GAv4FJUGhIsEwCHGIQlFKFBK+BRRZogQWW7LY9Vns009PTfV76vD0pVXnudPc55/+cTv3u6enp+/DgfwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBhM4NFg9SqXAAECGQX+94svTq3/6NmzjN1ouguBD7qoQhEECBAoLDBHb+Fx6L4dgX9pZ6hGSoAAgUoFrtP3+p1Kh25Y5QRcgi5nr2cCBNoXWA9aF6Lbn+GMFVgBZ8TVNAECgwusx/PgOMoXwM4BAgQIECBQQEAAF0DXJQEC4whYBI8z13srFcB7xexPgACB7wU2huvG3bCOJiCAR5tx9RIgQIBAFQICuIppMAgCBAgQGE1AAI824+olQIAAgSoEBHAV02AQBAh0LODbwB1PbkxpAjhGz7EECBAgQCBQQAAHwjmMAAECBAjECAjgGD3HEiBAgACBQAEBHAjnMAIECBAgECMggGP0HEuAwNAC7q4aevqjixfA0YQaIEBgYAEZPPDkx5YugGMFHU+AAIEVAQm9gjP4JgE8+AmgfAIEYgVWInZlU2yvjm9f4FH7JaiAAAECVQhc/NEF6VvFrBgEAQIECAwicBHDg1StTAIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBApwKf/q3TwpRFgEAbAp6E1cY8GSUBAgQIdCYggDubUOUQIECAQBsCAriNeTJKAgQIEOhMQAB3NqHKIUCAAIE2BARwG/NklAQIECDQmYAA7mxClbNNwC3Q25zsRYBAPgEBnM9WywQIECBA4KaAAL5JYwMBAgQIEMgn8EG+prVMgACBRgW+/vA3FyN/+vrzi3f8SCBSQABHAjqcAIFOBK5D97ywaasMPgfxOl7AJeh4Qy0QINC8wHr6nsrbsk/zEAo4UEAAH4itKwIEqhTYnqzb96yyUIOqS0AA1zUfRnOEwOk7SC9+eURf+qheYG+m7t2/egADLCYggIvR65gAgeICYWkadlTxYg2gNgEBXNuMGA8BAgcJyNGDoHVzQ0AA34DxNgECBG4LCO/bNrZsFRDAW6XsR4BAZwKRXyuSwZ2dD8eXI4CPN9cjAQIECBB4uPMgjm8+/XkY0uMXr8IOdBQBAgRaEZgWwZHL6FYqNc4cAncCOLjL4ORe71Gur/vYSoDALoEpPl1J3iVm54QCuQI44RDPm8qR60L9XNhrAgQIEDhG4NFKNznSbqW7njYJ9Xpnc/5LwB7EUe8kHT2ymEWwS9BHz1ZH/TW2Am5FPsfvLkK9ldk3zkiBiziUcJGeDq9WQABXOzWXA9se6qL60s7P1QtchO75eOdNkvicxesOBARwB5OoBAJtC8wRW6qMDm7F+vjl5R8w/vK5P2Bc6oTa2q8A3iplPwIEcggUT98cRdXQpkiuYRbWxyCA131sJUCAwJrA9AtEK9fG50i2OF6b0QO3eRLWgdi6IkCAQAUCUxLPYVzBcMYdggAed+77r3z+xlH/paqwpECjV9FlcMmT5ru+BXDxKTCAnAIyOKeutlsXkMFlZ1AAl/XXO4HRBSr5ADVyGI0ugkc/+UrXL4BLz4D+8wmcnnVlEZxP+NiWhdyx3nrLLiCAsxProLyADC4/B2sjiFx9rjV94Da/HxyI3UlXAriTiVQGge4Fsua0+Oz+/KmwQAFc4aQYUgaBi0Xw4l9iuNgnwyg02beAFO97fpNX50EcyUk1WKvAer6ub621pnHGlXX5Ow6jSqsSsAKuajoMJrXA4kr3upNT+m7c+fpw72QWaCh9LYIznwtdNW8F3NV0Kma3gIXvbrL0B6yHVkPpm55Gi10LCOCup3ec4sJyNOyocVRVSoBATgGXoHPqartmgYv0df250GT1t/xdr6gQs25rFBDANc6KMWUXuEjf7P3pIFBAmAXCOawFAQHcwiwZIwECTQn4vaGp6So2WAFcjF7HBAhsERBmW5Ts06KAAG5x1oz5SsAnuFck3igr4PeGsv5N9C6Am5gmg9wgIIM3IDW6izBrdOIMe11AAK/72NqnwGd//O/pX5+1qYoAgUYEBHAjE2WYWwTuLYJPubscve6L3iJcbp8WF8EtjrncDI/YswdxjDjro9W8nLijKaj3toCkvG1jS0YBAZwRV9NlBeRuWX+9EyCwLiCA131sbU9A7jY0Z5aeDU2WoSYXEMDJSTXYjsD0mbGPfr+brs8+enmatk++et7O/BkpgbYFBHDb82f0BIIF5tANbiH+wOkvHVkExzNqoVEBd0E3OnE3h/34xaub28bY8Ml//HRHofdunN7RVFO71pC+lYD5DaCSiRhwGAJ4wElX8vsCcwa7HP0+jJ8IEMgqIICz8mr8cIE5TXf1HHbUri7sXKWA5W+V0zLKoATwKDOtzguBy7uNZPAF0AA/St8BJrnqEt2EVfX0GNyhAjL4UO4fOptuxfrhh4eHxVy82Od8/5pfT7XUP/J/fPPxuuHPHn+5voOtYQI3A/ibT38e1qKjCBCoXODWHViXVwUOKeM6n+Z3Tkk8/5h2OIsxn7aLJlq7m75TFdM+MjjHbLoEnUNVm4UF9t0IXXiwur8pMEWv9L2pc+yGLTl97Ih66O3mCriH4tRAgEC0wPVyuchCOboODbQk0P0l2NP3Ra2AWzopjZUAgXiBxYvPmZba8aPVQscCVsAdT+6opZ3upXr3bMVFBWu4RZbu31yJ3sVN3YMosKyAAC7rr/dcAisRe31NNdcgtFuNwK18tfCtZopGHIgAHnHWB695JZtHkBnq949buTtNtOgd4WyvvEafAVc+QYZHgECIwBS9laTvyjBCCnNMRwJWwB1NplII7BQIuxgwraHDDtw5uvDdVzJvceG7sn/4IBxJ4J6AAL4nZDuBvgQqz85M2Iu5m6kvzRLYKCCAN0LZjQCBZgTEbTNTNfZABfDY86/6QgK/fvNm7vkvT57Mr70IEDjHPB2+i3Tk689/+NX/BIA7JJWAAE4lqR0CgQLX+XFqaCVF5kNW9gkcTWWHzZVGjmtup3uxSKgaDu/+MVgzsgCeKbwgUJfAnBnXw5pS5LR1ZZ/ro+Z34kMo931YYXXNBU4vtrcwX68eeSl8Tuf1YQIC+DBqHRFIJrA9XRa7nA7fmMFFvjQcWd1iyac316uek3huQSTPFF7kEBDAOVS1SYBASYFUET5FcpIMnhq5TveSQO/6/s+//uTL5+/9MeZ3W/z3CAEBfISyPggQ2CiQKjsXu7tufH1NvNiINwmkEhDAqSS1Q+BQgSk5ruPk0BF00dl9w3/7XReFKqJGAY+irHFWjIlADQLrHwCvb61h/MZAoHIBAVz5BBkegWWB+0u35eOqfrfLoqoWN7iiAgK4KL/OCRBILeBj3dSi2sslIIBzyWqXQN8CyZ8pbfkbfMJ8/PI3wcc6sKCAAC6Ir2sCUQJll3rVfgYsyKPOKgcfKOAu6AOxdUXgEIFUwTytcVdSNu0KuK3UTPX94ENOB53UKyCA650bIyOwLjCF1pS1b37x9eVu1+9c7rH885O/P73YkDZlLxrP9OP8+0faUI95kkbMsZmUqm12nAdBT1MggKs9Dw2MwNECC1l+yBB++8WPE/YzV/HbFI3++z//9Odnb6eW3nx42dzb17+/fMvPBPYI+Ax4j5Z9CdQncL1srW+MRkSAwIKAFfACSrtvPX7xqt3BG/mYAmmXvzUYuuBcwyw0MQYr4CamySAJENgn0F+u76vf3i0ICOAWZskYuxOYbxSKrCztfUaRg3E4AQK7BATwLi47EyBAgACBNAICOI2jVggQCBDo8kJxkj8hHIDpkOYEBHBzU2bABAgQINCDgADuYRbVQIAAAQLNCQjg5qbMgAlcCvgq8CxyemjG/KMXBGoWEMA1z46xESBQXiDgg2ofA4dN21DPoZyIlgN4NIWwc8VRBAjUKRAQmXUWYlR9CywHcN81q45ATwK+CtzTbKplKAGPohxquhXbrcD1x8Dz3yQ4rObrMdzt+tcPb+7uYwcCvQoI4F5nVl2jCwTE4Uy2Pbxjepm784LAmAIuQY8576omsCYwxWrTyZr2Xuiw1tyHtXaG2fadgAB2IhAgQIAAgQICArgAui4JpBVwH9bseVq7N718n2vxonsBAdz9FCuQQC4BOZdLVrtjCAjgMeZZlQQIJBV4+/r3d9vzMfBdosF3EMCDnwDKLyPgonEZd70SqElAANc0G8ZCoAuBnn69+MuTJ13MiSJqFPA94BpnxZgI7BWYMu+wqJgf9LjyGI3rDD5seHvp7F+JwICPQBbAlZx7hkEgVuCUedXm3MGRPDlc9xhLvP/46WPgp68/33+cI4YQEMD9TPPjF6/6KUYloQJ3U6eehL471FADxxFoQ0AAtzFPwrWNeWphlBtjr56cbgHVGAmECAjgELXkx8jX5KQajBT4NqefvY1sxOEECKwICOAVnDSbhGsax75aqeQTyr5Qv61m+r0h4do9SVM+Bu7vNEtVkQCOlZSvsYKjHi+DR515dRP4XmA5gDOFSnN3mWdycPYROAlUnsHT143C/hBQJfNbOW8lSoZRUGA5gDMNqGyeXcd/2fFkQtZsEwIb74RqohaDJEAgTODQAA4bYqqjxG0qSe3EC1icxRsGt3A8vo+Bgyer7wMHCuC+J1J1BAhsF5gyeN555WpE2lu65h69IHAS8CxoZwKBMgLnGVBmBBt6nZ86uWHfVneZJmL+12oN7Y/7+iPC9mu6X4EV8H0jexDIKjAn8cpSLNMA5q6v2z8NpumbsK6Lmt5ZKXneevxELA7Vm90LCODup1iBzQisZEPaSFjpaAvWKZWbWBxPlaal2+JjHwIbBQTwRii7ESgpsBKZGwNmpYWwwuYGNw4grJdujnIfVjdTmbAQAZwQU1MECgjMQTj1fZGF55vyjWzu5aL3fD1qmUAfAgK4j3lURXsCp7ia0ytJAVNrBVNwruXuGOY946u+21d8F1ogkEnAXdCZYDVLoIxAkmyLTLVpDKd/ZQhu9xqAE0lxeyy2EHgQwE4CAr0JBMRMJoLiMRwWn/UAZpoXzVYiIIArmQjDINCtQPEY7lZWYY0LCODGJ9Dw2xQ4rcyaXmm9+cXXbdobNYFaBARwLTNhHAQI9C0wfROp7wJVt1dAAO8Vsz+BUQQ6ewxW09cb+j7nxnwO5TSnArjvE1t1NQqE3RlUYyVVjik5bxPP/KpyKgzqjoAAvgNkM4FMAtUuyJIHWCZAzRJoXUAAtz6Dxk+gPQEZ396cGXEGAU/CyoCqSQIECFQv8OXzzysZ4+MXr85HMsJHwlONU9UC+HzevSbQg4D15fUsVnvB/3qoMe/Uk6kxVVzk8XVT3SS0AL6eXO8QIEAgi0Cmv4nUR+5uF19P6IbiWQBvn3R7EkggcFqe1r8g+3aEvTxqI157uhG6sy9lJTiVa22ioXgWwLWeRMZFgMAGgSlcb11yv/X+hlbt0q1AVfEsgLs9zxRGIECgv9CKX/4GMDqkUYGD41kAN3qeGDYBAgQIHCqQPJ4F8KHzpzMCBA4QmNbxp4Wv5e8B2ro4CazH87TP9d1hAtjJQ+A4gdMFXqlwnLieCFQjcJ3QnoRVzeQYCAECSQXS/qLjidBJJ0dj3woIYOcBAQIEdgs8fR34GCl/lHC3db8HCOB+51ZlBEIF0q4dQ0fhOAKdCwjgzidYeQQIECBQp4AArnNejIpAAYFGvwRcar0edhU67KgCZ4Mu8wsI4PzGeiDwnYBboBs6ETKFuvRt6Bw4YKi+hnQAsi4IEOhB4PqJ0OuBenG/1frOPQCpYaeAAN4JZncCBFIIzM/KSNFYpW1I3EonppphuQRdzVQYCAECoQLXV4wb/Tw7FMBxTQoI4CanzaAJ3BKID55TmL3p5W8R3oLyPoHiAgK4+BQYAIEsAteLwizdDNnok78/HbJuRScWEMCJQTVHYFGg/lug45fOi4V7kwCBWwIC+JaM9wkQaEnggBX/n5+9bUnEWKsXcBd09VNkgAQIlBb4Pt19Ll56IjrrXwB3NqHKIUAgpcD5wtpHvylltfXwIICdBQQIEFgQOI/ehc3eIhAt4DPgaEINEKhGIP5GqqZTp+nBV3MSGchxAlbAx1nriQABAgTqF/i///rwNMhH//o662gFcFZejRP4QaDm9Vn80vmHOr0i0ItA7iQWwL2cKeogQIAAgTwCcxLPzSdZHPsMePb0gkA/AjWvtrMqD1t4VtUKG//so5dlRzVF8vwveCQCOJjOgQR2CAiGHVh2JbBBIF8GJ1ndbqjA15C2INmHwAACfkUYYJI7KXGO3tOLT7563mhhVsCNTpxhE7gUiLmRqqf07amWyzn285LAFMNzJC9tr/c9AVzv3BgZAQIECFwI3MraW+9fHF7VjwK4qukwGAIDCcQs2deZLILXfXrd2txSWAD3eiqqi8DQAjK4y+nfssxtKIZ9D7jLs1RRIwqInBFnXc0VCEzfRwq7cdoKuILZMwQCBAgQuCewZfl7r42t28MCdWvr7/YTwO8k/JcAAQIEuhBo5YtJLkF3cbopgkBqgdPfvn2T7U/Qu2CeesY6b2/78reV9J0mTAB3ftYqj0CMQNo/QS90Y+bCsVsEGkrfqRyXoLfMqX0IEIgVkL6xggMfv3H521b6TvMpgAc+qZVOgAABAuUEBHA5ez0TIECAwD2BXpe/U90C+N7k206AAAECdQvkuPi865tI138weAuYAN6iZB8CBAgQKCCwcflbYGQpuhTAKRS1QYAAAQIEdgoI4J1gdidAgACBQwT6Xv5OhAL4kPNIJwQIECBA4H0BAfy+h58IECBAoAKB7cvfHHdgHQMggI9x1gsBAgQINCaw60bogNoEcACaQwgQIEAgo8D25W/GQeRvWgDnN9YDAQIECPQuEPBVYAHc+0mhPgIECDQlMMjyd5oTAdzUiWmwBAgQINCLgADuZSbVQYAAgfYFxln+TnMlgNs/YVVAgAABAg0KCOAGJ82QCRAg0KNAhcvfrN9EEsA9nsVqIkCAAIHqBQRw9VNkgAQIEBhAIHj5G3xgcVQBXHwKDIAAAQIERhQQwCPOupoJECBQlUC7q9hzxr3P4hDA53peEyBAgACBgwQE8EHQuiFAgACBRYE+lr+Lpa2/KYDXfWwlQIAAgdoFskZ4vm8iCeDaTyzjI0CAQMcCWbOzcjcBXPkEGR4BAgQI9CkggPucV1URIECgfoGRl7/T7Ajg+k9RIyRAgACBDgUEcIeTqiQCBAjUL5B2+Zu2tWC9XV8F/iC4GwcSIECAAIERBM5vhN4Vses4Anjdx1YCBAgQSC9QyYI1oLCEYewSdIC/QwgQIECAQKzAo9gGHE+AAAECBPYI5Fv+fvLV8z0DSbnvfGn6fIm83oFL0Os+thIgQIAAgfsC23N3bssl6JnCCwIECBDILpBv+Zt96Kk7EMCpRbVHgAABAgQ2CAjgDUh2IUCAAIEUApa/54oC+FzDawIECBBoWKCtgBfADZ9qhk6AAIGGBNpKxwNgBfAByLogQIAAAQKXAgL4UsTPBAgQIEDgAAEBfACyLggQIEDg4ZinZDR0oVsA+38FAQIECBAoICCAC6DrkgABAgQICGDnAAECBAgQKCAggAug65IAAQIECAhg5wABAgQIHCTgPqxzaAF8ruE1AQIECBA4SEAAHwStGwIECBAgcC4ggM81vCZAgAABAgcJCOCDoHVDgAABAgTOBQTwuYbXBAgQIJBX4Jj7sPLWkKh1AZwIUjMECBAgQGCPgADeo2VfAgQIECCQSEAAJ4LUDAECBAgQ2CMggPdo2ZcAAQIECCQSEMCJIDVDgAABAgT2CAjgPVr2JUCAAAECiQQEcCJIzRAgQIAAgT0CAniPln0JECBAgEAiAQGcCFIzBAgQIEBgj4AA3qNlXwIECBAgkEhAACeC1AwBAgQIENgjIID3aNmXAAECBKIFPA76RCiAo08lDRAgQIAAgf0CAni/mSMIECBAgEC0gACOJtQAAQIECBDYLyCA95s5ggABAgQIRAsI4GhCDRAgQIAAgf0CAni/mSMIECBAoGKBVu6yFsAVn0SGRoAAAQL9CgjgfudWZQQIEBhPoJXl7zQzAni801PFBAgQKC3QUEzmoxLA+Wy1TIAAAQI3BXJkcI42bxYQvUEARxNqgAABAgSCBKa8bCsyg6q8eZAAvkljAwECBAgcIDBsDD86AFcXBAgQIEBgi8BnH73cstviPs0tpq2AF+fRmwQIECBQQKC5EC1gpEsCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQI8C/w8rRMFzjykN6wAAAABJRU5ErkJggg==", + "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAGqAoADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD5/ooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiux0bQLRLaK5nUTvIiuocfKoI6Y79ep/SsfxDpP9n3Pnxf6iZiQAuNh9PTHp/wDWrrqYKrTpe1Z0zwtSFP2jMaiiiuQ5gooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAPUaxvFE3laKybc+a6pnPT+L/2WqujeI4TBHb30jLMOPNfkNzxk9j9fTOaz/EOtRagqW1sN0KMHMhBGTjsPTn/AD3+hxGMpSw7cXq1t1ParYmnKg2nqzBooor548UKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACrWn2Euo3LwQsissEs5LkgbY42kbp3wpx74qrXV+BLCK8udenkZw1noV7PGFIwWMfl8+2JD+OKxxFT2VKU+xUVd2OUooorYkKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKciNI6oilnY4VVGST6CgBtFakHhzV7hC6WMgAOP3hCH8mINULi3mtLh4J4zHKhwyntVypziryTSM41ac3yxkm/UioooqDQKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAoor6w+H1hFp3w/wBCghZ2VrRJyXIJ3SfvGHHbLnHtivPzHHrBwUuW933t/mB8n0V9q1Vv9NsNVgWDUbK2vIVbeI7iJZFDYIzhgecE8+9eSuI1fWn+P/AFc+NKK+uv+EO8L/8AQt6P/wCAMX/xNH/CHeF/+hb0f/wBi/8Aia0/1ipfyMLnyLRX11/wh3hf/oW9H/8AAGL/AOJrLu/hd4Lvbp7iXQoVd8ZEMskScDHCowUdOwqo8Q0L+9B/h/wAufLNFfUH/CpPA/8A0BP/ACbn/wDi6P8AhUngf/oCf+Tc/wD8XV/6wYb+WX3L/MLny/RX0zd/B3wXc2rwxadNaO2MTQ3Mhdec8byy89OQetZf/CiPC/8Az/6x/wB/ov8A43Vxz7CNa3Xy/wCCFz56or6F/wCFEeF/+f8A1j/v9F/8bo/4UR4X/wCf/WP+/wBF/wDG6r+3cJ3f3Bc+eqK9xu/gDavdO1l4hmhtzjZHNaiRxxzlgyg857D+tQf8M/f9TP8A+SH/ANsrRZzgmvj/AAf+Q7nitFe1f8M/f9TP/wCSH/2yobv4A3SWrtZeIYZrgY2RzWpjQ885YMxHGex/rTWcYJu3P+D/AMgueN0V6h/wojxR/wA/+j/9/pf/AI3XL+MPAmqeCfsX9pT2cv2vf5f2Z2bGzbnO5R/eFdFLHYarNQhNNsDl6KKK6wCiiigAooooAKKKKACiiigArtfCX+i+BfG2pQ/LdpbW1qsnXEU0u2RcHjkAc4yO2K4qvRdAsIo/gf4s1AM/nT3dvAykjaFjeNhj3zI2foK4sfJKnFPrKC/8mX6GlJav0f5HnVFFFdpmFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRW7pvhS/1C3juC0cELnjzM7iv94DH5ZIz+tWtQ8GT2drJcRXkcqRozuHQocAZ4657+ldKwldx51HQ5HjsOp8jnqcxRRRXMdYV3HgzTrT7IdQz5lzuKcj/Vew9yCOffHrnh66jwVfmLUJLJ3/dzKWRTk/OPTsOM5+grswEoqvHmX/DnBmcZyw0uR7fl1O7rF8R6IurWZeKMfbIx+7bONw7qf1x7+nNbVFfR1KcakXCWzPk6VWVKanDdHjlFXNX/AOQ1f/8AXxJ/6Eap18lJcsmj7mEuaKl3CiiipKCiirml6Xea1qMWn6fD511LnZHuC5wCx5JA6A0JX0QpSUU23ZIp0V0d94B8V6d5fn6Hdv5mcfZwJ8Yx12E469+v4VhXVpc2Ny9td28tvOmN0UyFGXIyMg8jgg03FrdEQrU6nwST9GQ0UUUjQKKKKACiiigAooooAKKKKACiiigAooooAKKKKACvrrwd/wAiPoH/AGDbf/0WtfItfXXg7/kR9A/7Btv/AOi1r53iL+FD1Ezbooor5QQUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFeKftA/8y7/28/8AtKva68U/aB/5l3/t5/8AaVenk3++w+f5MaPFaKKK+6GFFFFABRRRQAUUUUAFFFdR4E8H/wDCa65Npv277H5Vs0/meT5mcMq4xuH97rntWdatCjB1KjsluOMXJ2Q/wd4ai8WWWtafbxO2tRQJdWTbwqFVbbIjZ7sHXHuvJAznpND/AOTevEv/AGEk/wDQreu78F/Cufwb4hTVIvEHnoY2img+xBPMU843bzjDBTwO2O9aPj7wS3iTRrTTNKhtrMTaoLq7mVAoA8tw8hAxvYkqPUkjJAyR8riM3o1cSoKV4c0ZX10tute9unfqdsaElC9tdUfMlFek/E/StL8IWum+F9ISbD5vruaYKzzkkpFlgAflHmDAAHzZ5JJrzavpsLiFiKSqxVk9vQ45x5XysKKKK6CQooooAKKKKACiiigAooooAKKKKACiiigD03w3epe6FbFBhoVELj0KgD9Rg/jV3UY/O0y7i3om+F13OcKuVPJPYVn+F7H7DoUOWy0/7488DcBj9APxzWd401G5treG0hykc4bzHH8QGPlHOe/PHp719N7X2eFU6nY+PVFVca4Un1OGooor5k+wCtTw5K0XiGzZIzIS+3aOwIIJ/AHP4Vl1s+Ff+RltP+B/+gNW2H/jQ9V+ZhitKE79n+R6VRRRX1p8OcP4103yrmPUUHyy/JJ/vAcHr3A/8d965SvWNTsxqGmXFqQCZEIXcSAG6qTj3xXlLo0bsjqVdThlYYIPoa+dzKh7OrzraX5n1WUYj2tHke8fy6f5DaKKK849UK6/4Xf8lG0r/tt/6JeuQrr/AIXf8lG0r/tt/wCiXq6fxr1ObG/7tU/wv8j6OqnqelWGs2bWmo2kVzA2fkkXODgjIPUHBPIwRmrlFes1c/PoycXdOzPlHW7KPTdf1GwhZ2itrqWFC5BYqrEDOO/FUK2PFn/I465/2EJ//RjVj148t2fo1Jt04t9kFFFFI0CiiigAooooAKKKKACiiigAooooAKKKKACvrrwd/wAiPoH/AGDbf/0WtfItfX3hSGW28HaJBPG8U0enwI8bqVZWEaggg9CD2r53iL+HD1Yma9FFFfKCCiiigAooooAKKKKACiiigAooooAKKKKACiiigArxT9oH/mXf+3n/ANpV7XXgPx5u538VabZM+beKy81EwOGd2DHPXkIv5fWvVyWLeNi+1/yGjyiiiivuBhRRRQAUUUUAFFFFABWv4Z8Q3nhfXrbVLN3BjYCWNW2iaPI3IeDwQPQ4OCOQKyKKicI1IuE1dMabTuj7SyKN3NUNI1D+1tEsNS8ryvtdtHP5e7ds3qGxnAzjPXFZHj+/i07wBrk8yuytaPAAgBO6T92vXtlhn2zXzOEyLDezUpXba69L+S7erPYcrLmPL/j3p/l65pGpebn7RbPB5e37vltuznPOfN6Y42+/Hkddx4s8axeMvCemjUd669p85jJQAR3ETr80mAOGBRARwOSR1wvD17OV0qlHCxpVfijdfjpb5Hl1mpTcl1CiiivQMgooooAKKKKACiiigAor3L4OeFtKvvCdzf6po1tczTXbLHJdQBwY1VcbdwIxuLgkdSMHpxzHxxmil8c26RyI7RWEaSKrAlG3u2D6HDA/Qj1qebWxs6LVPnbPNKKKKoxCiiigD1jS0aPSLJHUq6wIGVhgg7RwaqeJLJL3QrkOcNCpmQ+hUE/qMj8agsPEemJZ2kFxfD7R5UYcsGPzFRnLYx9efrU+s6jZLoVy32qErNC6xFXB3nGOMdeT+FfUOdKdBq6asfGxp1oYhS5Wnft5nmVFFFfLn2QVu+EIGl8QxOpAEKM7Z7jG3j8WFYVd34O0g21s2oSjEk64jXBBVM9fxwD9APWuvA0nUrxt01OHMaypYeV+un3nUUUUV9QfHBXnPi61W2152TAE6CXaFxg8g/XJGfxr0auE8cRONTt5iv7todqnPUhiT/6EPzrzszjehfsz1MnlbE27pnL0UUV86fVhXX/C7/ko2lf9tv8A0S9chXX/AAu/5KNpX/bb/wBEvV0/jXqc2N/3ap/hf5H0dRRRXrn54fLXiz/kcdc/7CE//oxqx67HxD4T1y98T6xcwWDNFJfTshaRFyPMbnBINYmoeGdY0u1NzeWTRwggFw6tjPTOCcfWvLnQqq8uV29D9Oo4auqEZuDtZa2fYyaKKKxJCiiigAooooAKKKKACiiigAooooAKKKKACvtWviqvtWvmOI/+XXz/AEEwooor5kQUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFfPXx3/5Hiy/7Bqf+jJa+ha+evjv/wAjxZf9g1P/AEZLXsZF/va9GNHl9FFFfajCiiigAooooAKKKKACiiigD1Xw/wDGy60fRLXTbrRIbr7LEkMUkVwYvkVQo3Aq2W45IIHPQVB4v+Ln/CVeFbrSF0j7G88ke5jN5oKKdxwcLhtyp2IILdOK8xorOFKEFaKsa+3qW5b6BRRRWhkFFFFABRRXtd58M/Dlzs8qK5tNuc+TMTu+u/d09sda5sRi6eHa5+p1YbCVMQnydDxSivV7z4S2L7PsWp3MOM7/ADkWXPpjG3Hf1qK3+EsEc6m51OSeE5DCJBEy8cEE7s88Y465zxg4/wBpYa1+b8GbPLMTe3L+KPLaK9N/4VD/ANRz/wAlP/s6P+FQ/wDUc/8AJT/7Oq/tHDfzfg/8if7NxX8v4r/M9P8AAklr4a+Gmh/2tf2dqkkfmLJLMEQ+azSKuWx821uR7Hr1rxD4nXlrf/EPVLqzuYbm3fytksLh0bESA4I4PII/Cs3xT4dk8MasLF7hbhXiWVJAu0kEkcjnByD3Pb6ViV003GS54u6ZlXnJL2Ulax9GQ+FdHuPhPHPBoFjLqMmhh0kSzRpWlMGQQQMli345r5zr7A0Gwl0vw7pmnzsjTWtpFA7ISVLKgU4zjjIr5M1jT/7J1u/03zfN+yXMkHmbdu/YxXOMnGcdM0Qe5WJhZRZSooorQ5Aoq1p1hPqmo29jbLumncIvBIHqTgHgDknsAa9QtPhNpqREXmo3c0m7hoVWMY9MENz15zXNXxVKhZTep04fCVa93BaI8lqxZ2VzqFwILWIySYJwOMD1JPAr1uP4V6DHKjtPfyKrAlGlXDD0OFBx9CKfqnw/sQluuhLJp0zy4muY5nYrHtY4wX5ywQcVjDNMNzpSvY3nlWK5G42v0OJtPB0NqDcavdxrEh5VG2r1GMscdeRj9a0bfxJ/amtw2VjG/wBn+ZpZivOB0x6A4Ayefm7GpfEXgF7DRWuTdanq+pGRY4vKj3Kq9TuX5mxw3IPUjiuX0q7l8L37f2hplwksqADzAY2VM8kKRznH6da9vCZnQnJKj7sb6vq7Hz+NynEQi5Yj3p20XRX/AA8z0WisXTvFFhqd4trEk6SMCV8xRg45xwT2z+VbVfR06kKi5oO6PlKtGdKXLUVmFcj47icw2UwX92rOrHPQkAj/ANBP5V11c740ieTQlZFyscys5z0GCP5kVhjY82Hkjpy6XLioPz/PQ8+ooor5c+yCuv8Ahd/yUbSv+23/AKJeuQrr/hd/yUbSv+23/ol6un8a9Tmxv+7VP8L/ACPo6iiivXPzw4m+/wCQhc/9dW/max9d/wCRe1P/AK9Jf/QDVe/8X6Nbahc293fbbmKVkmXyXOHBIYcLjrnpWXq3i/QrnRr63hvt0stvIiL5LjJKkAcrXfOvR9m1zrbuj+gIYnDwwqh7SLajbRrseWUUUV8sfFhRRRQB7p8F9A0bVfB13PqOk2F5MuoOgkuLZJGC+XGcZYHjJPHvXo3/AAh3hf8A6FvR/wDwBi/+Jrm/g1YRWfw5tZ42cteTyzyBiMBg3l4HtiMevOa7+vg8wrz+tVOWTtd9RGJ/wh3hf/oW9H/8AYv/AImj/hDvC/8A0Lej/wDgDF/8TW3RXF7er/M/vEYn/CHeF/8AoW9H/wDAGL/4mj/hDvC//Qt6P/4Axf8AxNbdFHt6v8z+8DE/4Q7wv/0Lej/+AMX/AMTR/wAId4X/AOhb0f8A8AYv/ia26KPb1f5n94GJ/wAId4X/AOhb0f8A8AYv/ia5H4oeGtB0/wCHWq3Vlomm21wnk7JYbVEdcyoDggZHBI/GvSa4r4t/8kw1j/tj/wCjo66sFWqPE005P4l18xny/X2rXxVX2rXscR/8uvn+gMKKKK+ZEFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXz18d/wDkeLL/ALBqf+jJa+ha+ZvjFdz3PxJv4pn3JbRwxQjAG1TGHxx1+Z2PPrXtZDFvF37J/oNHB0UUV9mMKKKKACiiigAooooAKKKKACiiigAooooAKKKKACvpuvmSvpHTbz+0NLtL3y/L+0QpLsznbuUHGe/WvFzhO0H6/oe5krV5r0/UtVV1FYjYyPNBJOsOJxFECXZkIdQoHU5UYHerVFeInZ3PcaurFNtVsFtzOLyF4wm/MbhyRjPAGSfw61Q/4S3R/wDnpdf+AM//AMRXH6ZbNZWRs3lMxtppYPMIxuCSMo45xwOlXK9qOW0urbOulhXUpxmna6T+/wCZzXxBkbXNegudOt7qaFbVYy32aRcMGc4wyjsRXN6N5Fj4m01tWi2WsV3E1yk0RI8sMC2VxyNueMc16TRXpUrU4KC2R59fh2NWbqKpZvy/4J61/wAJh4Y/6GPSP/A6L/4qvm3x9eWeoeO9XurD7MbZ5/la2OUcgAFgcDJJBJI4yTgsOT3FFVGVgq8PuorOr/5L/wAE8jor1yir9p5GH+rH/T3/AMl/4Jj/AAs0Pz76fWpR8ltmKHnrIR8x69lOORg7/avV6r2MH2axhi27SqDcM556n9c1Yr5fF13WquXTobYfDRw8PZxd/PuFU7m9jtb+1imurSGOdWVUlk2ySSZXaEB6jls98lauVmahGJdV01WJADO3HqAD/SsqUVKVn2f5HQoczte3/A1NOiiisxFHWP8AkFTf8B/9CFcYNRsjcrbLdQtOzFfLVwWBAJOQOnQ9a7m//wCQfc/9cm/ka4lfvD619bw9mDoUXRUb3le/qkv0Pn844aWZSli3U5eSNrWvtd738+xNWL4sdV8OXIZgCxQKCep3A4H4A/lWpd+f9in+zf6/y28vp97HHXjr615jfa1qGpQrDd3HmRq24DYo5wR2Hua+sx+JjSg4NayTPznLcJKtUVRNWi1fuUKKKK+bPrQru/hFY/a/HkU/mbPsdvJPjbnfkeXj2/1mfw964SvR/gr/AMjjef8AYPf/ANGR1pRX7xHFmMmsJUa7Hu1FFFesfAHy14s/5HHXP+whP/6MasetjxZ/yOOuf9hCf/0Y1Y9ePL4mfo9D+FH0QUUUVJqFFFFAH1H8KYZYPhnoyTRvGxWVwrqQSrSuynnsQQQe4IrsqxPB3/Ij6B/2Dbf/ANFrW3X5zipc1ecu7f5khRRRWABRRRQAUUUUAFcV8W/+SYax/wBsf/R0ddrXFfFv/kmGsf8AbH/0dHXTgv8Aeaf+JfmB84aBYRar4j0vTp2dYbq7igdkIDBWcKSM55wa+xK+RfB3/I8aB/2Erf8A9GLX11Xs8RN+0gvJjYUUUV86IKKKKACiiigAooooAKKKKACiiigAooooAKKKKACvl/4t/wDJT9Y/7Y/+iY6+oK+X/i3/AMlP1j/tj/6Jjr3eH/8AeZf4X+aGjiqKKK+wGFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABX0R4b/wCRW0j/AK8of/QBXzvX0jptn/Z+l2ll5nmfZ4Ui34xu2qBnHbpXj5u1yxR7WTJ8836FqiiivCPfPJtU1e28PeIdWsbk3Exa6adSijCrIA+3luxY/wA+9OTxTpDIrG6KkjJUxtke3AxTPilpNyNZi1KKzb7K1uqyzonG8MR8xHQ4KAE9eg6V59X1WFaqUYy8jyJ5zisJN0Uk0trp7fej0X/hJ9H/AOfz/wAhP/hU1tr2mXcwhhu0LnoGBXPbAyBk+1eaVb0r/kMWP/XxH/6EK3dNF0eIsTKpGMoxs35/5nqNZ+oa1Y6Y/l3MpEmzeECElhz+Hbua0o0aWRY0GWYhQPUmuc8QeDPE2oavJNFp/mQhVWM+dGOMZP8AFnqTWLqU4u05JfM+hzPF1cNR5qKvJvtf56Df+Ez07/njdf8AfK//ABVH/CZ6d/zxuv8Avlf/AIqs9/h14nVUI09WLLkgTx5U5Iwct14zxnqO+QMm58Oa3aecZ9JvUWHd5j+QxQAdTuAxj3ziqjUoy+GSfzPmpZzmUfiVv+3T6Iooor5I9gKzJQH8RQh3I2QFkXPBYkg/p/KtOuH1e8eD4saLE9y0du9qQUMmFZj5oHHQknAHviujDR5nK3Z/kTOuqKu+un3ncUUUVzlDJVjaF1lx5ZUhsnAx35rhF+8PrXX61dx2WkTyyBirbYhtHOXYIPwywrkF5YV7uSwblp1aRdSpGngq85bKLf4MmryfVLcWmq3cCxmNElYIpzwueOvtivWK8+8Z2vk62JgH2zxhiT03DggfgB+dfb5rC9JS7M/HMlqctZw7r8jnaKKK8A+nCvR/gr/yON5/2D3/APRkdecV6P8ABX/kcbz/ALB7/wDoyOtaP8RHDmf+6VPQ92ooor1T4E+WvFn/ACOOuf8AYQn/APRjVj1seLP+Rx1z/sIT/wDoxqx68eXxM/R6H8KPogoooqTUKKKns7SfUL63srVPMuLiRYokyBuZjgDJ4HJ70m0ldgfY9naQafY29lap5dvbxrFEmSdqqMAZPJ4Hep6KK/M223dkhRRRQAUUUUAFFFFABXFfFv8A5JhrH/bH/wBHR12tcV8W/wDkmGsf9sf/AEdHXTgv95p/4l+YHzhoF/FpXiPS9RnV2htbuKd1QAsVVwxAzjnAr3T/AIXv4X/58NY/78xf/HK+eqK+2xeX0cU1Kr0KPoX/AIXv4X/58NY/78xf/HKP+F7+F/8Anw1j/vzF/wDHK+eqK5f7CwnZ/eKx9Jaf8afB975nnzXlhsxt+025O/Oenl7umO+Over3/C2/A/8A0G//ACUn/wDiK+X6KzlkGFbunJfNf5BY+oP+Ft+B/wDoN/8AkpP/APEUf8Lb8D/9Bv8A8lJ//iK+X6KX+r+G/ml96/yCx9Qf8Lb8D/8AQb/8lJ//AIiprT4o+C726S3i12FXfODNFJEnAzyzqFHTua+WaKT4ew1tJS/D/ILH1bqHxG8H6Z5fn+ILN/Mzt+zMZ8Yx18sNjr3xn8Kzpvi94JigkkTVnmZVLCNLWUM5A6DcoGT05IHvXzHRRHh/Dpe9J/h/kFj6F/4Xv4X/AOfDWP8AvzF/8crOv/j7YRzqNO0G5uIduS1xOsLBsnjCh+MY5z+FeF0VvHJMGndxb+bCx7Jd/H66e1dbLw9DDcHGySa6MiDnnKhVJ4z3H9Ky/wDhe/ij/nw0f/vzL/8AHK8voreOVYOKsqa/FjO8u/jF40ubp5otRhtEbGIYbaMovGON4ZuevJPWs6/+JfjLUYFhn1+5RVbcDbhYGzgj70YUkc9M4/KuUoreOCw0bWpr7kBt/wDCY+KP+hk1j/wOl/8Aiqx5ppbmeSeeR5ZpGLvI7FmZickknqSe9MoraNOEfhVgCiiirAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACvorQ7281DSYLi+tVt7hlGRHKsiOMZDIVJ+U/5z1PzrXf3PijV/Dfhvw1b29ztuDC880UqiQPEX/dAk9BtBGAQQPTivOzCg6yjGO9/wBP+AelluIVBzlK9rfr/wAH8z1yivHv+Fr67/z6ad/37f8A+LrT/wCFvf8AUD/8m/8A7CvKlluJWyv80evHNMM93b5M9OorkdD+Imka1eQWRSa1uZVGPO27C/8AdDA8n0yBn6kCm+IfiHY6DfXOn/Y7me8h28ZVYzkBvvZJ6H+71/OsPqlbn5OXU3+t0OT2nNob8mgaNNK8sukWEkjsWZ2tkJYnqSccmoLjwvo0kDLFpVjFKMNG8cKoVYHIOVGeoFeaan8Udau2IsY4bGPcCCFEj4xyCWGCM8/dHb8b3gvxb4i1nxTa2l1eNPalXaZRAgAAU4JIXIG7b+YHeuv6liqcPaOVra7s5IY/CyqKEY3bfZG9Yf8AIQtv+uq/zFdvXCRzNHcLOAu5XDgYwM5z0FNg+KFgl/NaajZTQeXL5QmiPmKcEgsRwQO+BuP9dsfh6tVqUFex7uZVYUnF1Ha53tFZ2ma9pWsqDp9/DOxUt5YbDgA4JKn5gM+o7j1p2razYaHarc6jP5MLOIw2xmyxBOMKD2BryPZz5uW2vY4/aQ5ee6t3L9Fcx/wsLwt/0FP/ACXl/wDiaP8AhYXhb/oKf+S8v/xNafVq/wDI/uZn9aofzr70dPXm3xF8jTfFPh7WJPMbY48xVwfkjdW4Hr87d/TpXWad4y0DVb6Oysr/AM24kzsTyZFzgEnkqB0Brjvi9/zBv+2//tOunBU5RxMYzTV7/kzlx1SEsNKcGna35o9MkkjhieWV1jjRSzOxwFA6knsKdXH+J9Ylb4Zm+aSOG4vbWEYGMMZNu9VBz/CW98DPbNePWepX2n7/ALFe3Ntvxv8AJlZN2OmcHnqfzqsNl8q0HK9rOwsTmMaE1HlvdXPavHV3JbaRZRIFK3OoQRPkcgBt/HvlB+tZKffFT215fa18Nba7uWa4upJkZ2VACQtyOcKMYCr+magT74r1smjyVIwe6nb8UdNeXPlmKmtnTb/8lZLXL+N7XzNNt7kBy0Mm046BWHJP4gD8a6iszxDB9p0C9TdtxHvzjP3fmx+mK+5xUOejKPkfjODqezxEJef/AADy+iiivlD7YK9H+Cv/ACON5/2D3/8ARkdecV6P8Ff+RxvP+we//oyOtaP8RHDmf+6VPQ92ooor1T4E+WvFn/I465/2EJ//AEY1Y9bHiz/kcdc/7CE//oxqx68eXxM/R6H8KPogoooqTUK2/B3/ACPGgf8AYSt//Ri1iV2Xwphin+JmjJNGkihpXCuoIDLE7Kee4IBB7ECsMVLloTl2T/ID6jooor85JCiiigAooooAKKKKACuK+Lf/ACTDWP8Atj/6Ojrta4r4t/8AJMNY/wC2P/o6OunBf7zT/wAS/MD5fooor9EKCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAnsrSS/v7aziKiS4lWJSx4BYgDPtzW343uIpfE81tbPm0sUS0gXB+RUGCuTycNu5OfrimeCLSO98Z6ZFIWCrKZRtPOUUuPwyorHvbuS/v7m8lCiS4laVgo4BYknHtzWG9f0X5v/gG+1D1f5L/gkFFFFbmAV0/xC/5HnUf+2X/opKydAjjm8R6XFKiyRvdxKyMMhgXGQR3Fa3xC/wCR51H/ALZf+ikrnb/2iK8n+aOmMf8AZ5P+8vyZzFd38Lo5Y9W1K/Ee6GCzKsdwHzFgQPxCN+VcJXoPgbz9P8K6zqKeWyz3EFqFbJI+YKx/KUY9x+c4zWk497L72a5bFPExvstTbrzfxHEkPiC7WMYBYNjPcqCf1Jr0ivOvE/8AyMV1/wAA/wDQFrWnufUcSpfVYv8AvfozKjkkhlSWJ2jkRgyupwVI6EHsa9Q8a+fdfDHRZ28yZh9nklkOWPMRG5j7kjk9zXltew69/wAkdi/68rT+cdcmMdqlJ+Z81glelVX908eooor0Dzjp/h7/AMjzp3/bX/0U9dd8W7SR7DTLwFfLileJhnnLgEY9vkP6Vw/gy8+w+MdLl8vfumEWM4++Cmfw3Z/CvUPiFp8mqaTp1om5Vl1KFHkCbhGGDLuPtlh+YFeRipcmNpze1v8AM9jCx58DUgt7/wCRk/Fq82aXp1l5efOmaXfnpsXGMe+/9K8orp/Hev8A9u+I5fKk3WdrmGHByrY+8w5IOT3HUBa5iuzA0nSoRi9zjx9VVcRKS22PXPhlGZvBl/EpAZ7qRRnpkxpTk++Kf8KP+RWuf+v1/wD0BKdNGIb2WJSSqOyjPXANRl0v9vcf7y/M99P/AIRK6/6dy/8ASWFRXMC3VrLbuSElQoxXrgjHFS0V+gNXVmfiybTujxyirOoxJBqd3DEu2OOZ1UZzgBiBVavjpKzaPvIy5kpLqFej/BX/AJHG8/7B7/8AoyOvOK0tF17U/Dt493pVz9nneMxM/lq+VJBxhgR1AqqclGSbMMZRlWoSpx3aPquivlr/AISzxH/0MGq/+Bkn+NMl8Ta/cQvDNrmpSRSKUdHu5CrKeCCCeRXX9bXY+e/1eqfzr7h/iz/kcdc/7CE//oxqx6KK4m7u59RCPLFR7BRRRSKCvQ/gtp/234hwz+bs+xW0s+3bnfkCPHXj/WZzz0x3rzyiscRSdalKmna6sB9q0V8fWniXXtPtUtbLW9StrdM7Iobp0RcnJwAcDkk/jWjYfEHxdp07TQeIb92ZdpFxL564yD92TcAeOuM/nXzMuHauvLNfj/wRWPrCivmOH4veNop45H1ZJlVgxje1iCuAeh2qDg9OCD71r/8AC9/FH/Pho/8A35l/+OVhLIcWtrP5/wDACx9C0V4jD8f5VgjE/htHmCgOyXhVWbHJAKHAz2yfqa1LD496NJAx1HR7+3m3YC27JMpXA5yxTnOeMfjXPLKMbHVw/Ff5hY9aorzmw+NnhG8naOc39ioXcJLi3ypORx+7LHPOemOOtakPxW8EzzxwpriBpGCgvbyooJOOWZQAPckAVhLAYqO9N/cxHZVxXxb/AOSYax/2x/8AR0dbX/CY+F/+hk0f/wADov8A4qofE15a6h8PtcurK5hubd9NudksLh0bEbA4I4PII/Clh4zpV4TlF6NfmB8l0UUV+hlBRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAdP4S/cWPiO9l+W3TTJLdn64kkICDHXkg89B3xXMV09j/ofw61W4+/9vvYbTb08vywZN3vnOMcetcxWFLWc5edvuS/W5vV0hCPlf72/0sFFFFbmBp+G/wDkadI/6/Yf/QxWp8Qv+R51H/tl/wCikqLwJbxXXjXTI5k3KHaQDJHzKjMp49CAag8X3cl74v1WWQKGW4aIbRxhPkH44UVy74r0j+b/AOAde2E9Zfkv+CYleg6V5+nfDmx/1bJqWsR+pKoCPyO6L34P5efV6TNbta6H4L05pfMglEt2w24+fbvX3+XzCPf0oxOrhHz/ACTZtlkW6unkvvaRp1514n/5GK6/4B/6Atei1514n/5GK6/4B/6AtaU9z6XiT/dI/wCJfkzIr2HXv+SOxf8AXlafzjrx6vdP+aXf9wX/ANo1x5hLllTf94+dy6PNGqu8TwuiiivSPLLmk3cdhrNjeShjHb3EcrBRyQrAnHvxXtfj6SSHwXfyxO0ciNEyupwVIlTBB7GvCK9tS7j1j4VyXEoaUnTXDGcbi0kakFu/O5cg9eh615WYRtUp1OzPWy6d6dWl3R4lRRRXqnknrnwnuIm0G9tg+Zo7rzGXB4VlUA5+qt+VSeL7k6ZNcXQUoqvEx2qPmUsobGfX5vxrI+FVzBaQa3Pczxwwr5G6SVwqjJcDJPvUXj3xHp+raep025jmSQrG2chsAls7TgjkAcj+leVRU4ZhKUVp3+5n02Hqw/s6cJveElb70af9r6b/ANBG0/7/AC/41OtzA9v9oSaNocE+YGBXA6nPSvIaK+sWbS6x/E/PHkcOk/wL2sT21zq91PaAiF33DPc9zz6nJ/HtVGiivKlLmk5Pqe3CKhFRXQKKKKkoKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA1n1NR4Rh0pSpY373Mg2nIAjRVwemDl/yHTvk0UVMYqOxUpOW4UUUVRJ3Hwrjjk8VzM6KzR2jshIyVO5BkehwSPxNcnq13Hf6zfXkQYR3FxJKoYcgMxIz781t+EPF/wDwiv2z/QftX2nZ/wAtdm3bu/2Tn736VzFc1OnL285taWSX6nVUqR+rwhF63bf6BXo+uPBF420XTrS78+3sbLyAokDbHUOpBxwGwq56dBXnFanh2WODXraSaRI0G7LOwAHynua0nT5pKd9r/ia5fVUK8IvrKOvoz0ivOvE//IxXX/AP/QFr0WvOvE//ACMV1/wD/wBAWlT3Pp+JP90j/iX5MyK90/5pd/3Bf/aNeF179phs/wDhBbBdQkjjs30+KOVpZNi7WQLgtkYznH41wZo7KD8z5/Kld1F5HgNFFFeqeSFek+DL/wA/4d+I7FmkZraGVxuOVVHjOAvPHKsSPf3NebVc0/U7nTGuTblcXNvJbShlyCjjB+h6H8PTIrDE0faw5Vvozow1b2M+Z7Wa+8p0UUVuc4UUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABWnoHh/UfEup/wBn6ZD5tx5byYPAAVSeT0GThQTgZYAkZrMr6C+DHhb+yvDz63dRbbvUf9VuXBSAdOoBG4/NwSCAhqZOyNaNP2krHgd3Z3VhdPa3ltNbXCY3xTIUdcjIyDyOCD+NQ19Z+KvDWh+I9LZNciQQ26vItyX8trf5eWDdgByQcrwMg4r5W1JLOLVLuPT5XmslndbeRxhnjDHaTwOSMdh9KIyuOtRdN7lWuh8GWOnalrv2TUo/MSSFvKTcwy4weq/7IbrXPVt+ELlLTxXp0kgYhpDGNvq4Kj9WFKpfkdjkr39lK29jrbn4ZWzyA2upTRR45WWISHPrkFfbtXMa/wCELzw/bC5muIJYGm8pChO48EgkEYHA9TXsdct8QbX7R4VeXft+zzJLjGd2Tsx7ffz+FcFHETc0m9DyMNjarqRjJ3TPJKKKK9I9wKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAp0kkkrBpHZ2ChQWOTgDAH0AAH4U2igAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAN3wd4cl8VeKLPS0DiF233Ei5/dxDljnBwccDIxuIHevrCGGK3gjggjSKGNQiRooVVUDAAA6ACvNPgx4W/srw8+t3UW271H/AFW5cFIB06gEbj83BIICGvTqxm7s9LDU+WF3uzxf45eJLqOS08NRLst5I1u5pA5zJ8zKqEdNoK7uc5O3pjnxeu3+Lk0svxK1NJJHdYlhSNWYkIvlK2B6DLE/Un1riK0irI4q8nKowqxYXX2HUbW72b/ImSXbnG7aQcZ7dKr0VTV9DFq6sz6ErG8V2v2zwrqMW/ZthMucZ+4d+Px24/GrukTyXWi2FxM26WW3jd2wBlioJPFSX9r9u066tN+zz4Xi3Yzt3AjOO/WvFj7svQ+Xg+Son2Z4HRRRXtH1IUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFbvg7w5L4q8UWeloHELtvuJFz+7iHLHODg44GRjcQO9YVfQXwY8Lf2V4efW7qLbd6j/qty4KQDp1AI3H5uCQQENTJ2RrRp887HpcMMVvBHBBGkUMahEjRQqqoGAAB0AFPoorA9Y+c/jVYRWnj8zxs5a8tI55AxGAw3R8e2Ix+Oa87r3L49WEsml6NqAZPJgnkgZSTuLSKGGPbEbZ+orw2t4vQ8qvG1RhRRRVGJ7L4KkeTwhYNI7OQHUFjngOwA/AACt+vO/AniDStM0mSzvbtYZpLosoZWxgqoBLYwOQeprvra7tr2MyWtxDPGDtLROGAPpkfUV5FaDjN6HzeKpyhVk2tLng93bPZXs9rIVMkEjRsV6Eg4OPyqGtfxRbPaeKNSjkKktO0g2+j/ADD9GFZFerF3imfQwlzRUu4UUUVRYUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBu+DvDkvirxRZ6WgcQu2+4kXP7uIcsc4ODjgZGNxA719YQwxW8EcEEaRQxqESNFCqqgYAAHQAV80/CTUW0/4iWC/aEhhulkt5d+AHBUlVyehLqmMck4HfFfTNZVNz0MIlythRRRWZ1nHfFLTm1L4d6qsduk00CrcJuxlAjAuwJ6EJv6ckEjvivmCvsq8tIL+xuLO6TzLe4jaKVMkblYYIyORwe1fHE0MtvPJBPG8U0bFHjdSrKwOCCD0INa03ocGMj7yYyiiitDjCrllq2o6dgWd7PAoffsSQhS3HJXoeg61TopNJ7iaTVmWL6+udSvJLu7k8yeTG59oGcAAcDjoBVeiimlbRAkkrIKKKKBhRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAF3R9Q/snW7DUvK837Jcxz+Xu279jBsZwcZx1xX2HXxfX2hWVQ7sG9wooorM7Qr5J8Yf8AI7a//wBhG4/9GNX1tXyT4w/5HbX/APsI3H/oxq0p7nHjPhRi0UUVqcAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAV9beD/wDkSdA/7B1v/wCi1r5Jr0fwX8XNR8OWsGmalB9v02L5UYNiaJcjgE8MoGcKcdQNwAAqJptaHRh6kYSfMfRNFc3p3j7wtqWnwXi65Y24lXd5VzcJFIh7hlJ4IP4HqCRg1a/4TDwx/wBDHpH/AIHRf/FVlZno88e5tV8heJLuC/8AFOr3lq/mW9xezSxPgjcrOSDg8jg9692+JXjjSY/A97b6Vq9nc3d5i2C2s8cpVW++WGThSgZc44LDp1HztWlNdTixc02ooKKKK0OMKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAP/Z", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAoAAAAGqCAIAAAAUcGK5AAAc/klEQVR4Ae3drbIkR3oG4DMbsoAHeOhIETYwn9gwEhAasESLTXwDFvIVrMD6Cox0BUsW7xKBQQOMHI7lBnaER1QCMpCBXXbPVPT0T3VVVmbll5mPQhHq012V+X1PluI92X/n6ck/BAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQGEzg2WD9apcAAQIFBf7pL/9wGv0f/uPXBacxdBcCn3TRhSYIECBQWWCO3sp1mL4dgV+0U6pKCRAgEFTgOn2v7wlaurLqCXgKup69mQkQaF9gOWg9Ed3+ChfswA64IK6hCRAYXGA5ngfH0b4Adg0QIECAAIEKAgK4AropCRAYR8AmeJy13tqpAN4q5ngCBAi8F1gZrisPwzqagAAebcX1S4AAAQIhBARwiGVQBAECBAiMJiCAR1tx/RIgQIBACAEBHGIZFEGAQMcCPg3c8eLuaU0A79FzLgECBAgQSBQQwIlwTiNAgAABAnsEBPAePecSIECAAIFEAQGcCOc0AgQIECCwR0AA79FzLgECQwt4d9XQy7+7eQG8m9AABAgMLCCDB178va0L4L2CzidAgMCCgIRewBn8IQE8+AWgfQIE9gosROzCQ3tndX77As/ab0EHBAgQCCFw8UcXpG+IVVEEAQIECAwicBHDg3StTQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAnwL/8+8v+2xMVwQINCLgm7AaWShlEiBAgEBfAgK4r/XUDQECBAg0IiCAG1koZRIgQIBAXwICuK/11A0BAgQINCIggBtZKGUSIECAQF8CAriv9dTNOgFvgV7n5CgCBAoKCOCCuIYmQIAAAQL3BATwPRn3EyBAgACBggKfFBzb0AQIEGhU4Lf/fFn4N19c3uNnAvsEnu073dkEmhQ4vQb87K++b7J6RRcSuA7di4lk8AWIH/cJeAp6n5+zCRDoQ+Bh+k5trjmmDw1dHCIggA9hNgkBApEF1ifr+iMj96u2GAICOMY6qOJAAc8/H4jdwlRbM3Xr8S0YqLGKgACuwm5SAgRiCKSladpZMTpWRRwBARxnLVRCgMCxAnL0WG+zXQgI4AsQPxIgQGCFgPBegeSQZQEBvOzjUQIE+hXY+bEiGdzvpXFMZwL4GGezECBAgACBjwQefBPWn3768qPDV//w6vnb1cc6kAABAm0KTJvgndvoNvtWdRaBBwGcPEdyci/PKNeXfTxKgMA2gSk+PZO8jczR2QRKBXC2Aj8eqESuC/WPjf1EgAABAkcILH0XdIm0O6KnAHMI9QCLcLuE+S8B+yLo20Bj3rtnE+wp6DGvmRxdN7YDztHyEWOU+N1FqB+xcuaIIHARhxIuwqKooYCAAC6AWmbI9aEuqsusgFFLClyE7vlU80OS+JzF7fYFBHD7a6gDAq0LzBFbq5H234r1qx9+uMD77sWLi3v8GE1AAEdbEfUQGEygevp26i2S4y+sL+KIv0YqJEAgsEA7v0BMkXz6N7DmWKUJ4LHWW7cECBAQw0GuAQEcZCGUkV9g/sRR/qGNSOBcoJ1N8HnV189Rnz/q9gECAvgAZFNUE5DB1ehN3IKADK67SgK4rr/ZCQwvEOTDRTvLaHMTPPzFVxlAAFdeANOXEzh915VNcDnho0cWckeLm6+sgAAu62v0CAIyOMIqLNWwc/e5NPSBj/n94EDsPqYSwH2soy4IDCBQNKfF5wBXULQWBXC0FVFPEYGLTfDNv8RwcUyROgzat4AU73t9c3fnm7ByixovqsByvi4/GrWnkeoquv0dCVKvcQTsgOOshUryC9zc6V5Pc0rflQdfn+6e4gINpa9NcPGroZ8J7ID7WUudJAjY+Cag5T9lObQaSt/8NEbsWUAA97y64/SWlqNpZ42jqlMCBIoKeAq6KK/B4wpcpK/nn6stVX/b3+WOqkGbOJyAAA63JAo6QOAifQ+Y0RSJAsIsEc5pDQgI4AYWSYkECDQm4PeGxhasTrkCuI67WQkQWCsgzNZKOa4xAQHc2IIp96aAV3BvsrizpoDfG2rqtzG3AG5jnVT5UEAGPyRq+ABh1vDiKf2ugAC+S+OBjgX++99+N/3bcYNaI0AgvoAAjr9GKlwr8HATfMrdm9HrfdFrlWsd1+ImuMWaa63vkPP6Io4hl32wpm8m7mAG2l0UkJSLPB4sJCCAC8Eatr6A3K2/BiogQOC+gAC+b+ORNgXkbkvrZuvZ0mqpNbOAAM4MariGBKbXjL30e1qvn9+8Od349PXrhlZQqQSaFhDATS+f4gmkC8yhmz7E/jOnv3RkE7yf0QhtCngXdJvrdr/qV8/f3n9wiEf+7K//bn2fD984vX6oto4Mkb5ByPwGEGQhxitDAI+35jr+WGDOYE9HfwzjJwIEygoI4LK+Rj9YYE7TTfOmnbVpCgcHFbD9DbowQ5QlgIdYZk1eC1y820gGXxP1f4/07X+NQ3foTVihl0dxRwrI4CO1P5preivW+T83c/HimPPjI9+eeglf+Q9/826Z8MW/fLZ8gEfTBO4G8J9++jJtRGcRIBBc4N47sC6eFTioi+t8mu85JfH8Y96CbsZ83ilaGO1h+k5NTMfI4BKL6SnoEqrGrCyw6Y3QlWs1/YLAFL3Sd8HnwIfW5PSB5XQy1d0dcCf9aYMAgX0C19vlOhvlfV04uy2B7p+CPX1e1A64rctStQQI7Ba4+eRzoa327mIN0LGAHXDHiztoa6f3Uv38/qsVbyPYw9126f7ehei9+VD3IBqsKiCAq/KbvJjAQsReP6darAoDhxG4l682vmGWaMBCBPCAiz56ywvZPALNWL9/3MvdaaVF7wiXe+wevQYce31UR4BAmsAUvUHSd6GMtNac1YuAHXAvK6kPAtsF0p4MmPbQaSduLzD1jIXMu7nxXTg+tQTnEXgoIIAfEjmAQFcC0bOzEPbN3C00l2EJrBMQwOucHEWAQEMC4rahxRq4VAE88OJrvZ7AT7/95Tz582/+db7tRoLAOebp9G2kAz///PvX/5UA7pRcAgI4l6RxCCQKXOfHaaCFFJlPWTgmsZpgp82d7qxrHqd7sZ1QEU7v/muwZmQBPFO4QSCWwJwZ12VNKXJ6dOGY67Pme/aHUOn3YaX1NTc43dgwwvx89cBb4XM6tw8TEMCHUZuIQDaBDelya87p9JUZXOVDwzu7u9Xx+/sedD0n8TyESJ4p3CggIIALoBqSAIGqAtkifIrkLBk8DXKd7lWJTpP/7Zs//+7FiwCFDFqCAB504bVNIKZAtuy81d714A/2xLcGcR+BXAICOJekcQgcKjAlx3WcHFpBF5M9NHzeRZuaiCngqyhjrouqCNQXWH4BePnR+tWrgEB4AQEcfokUSOCWwMOt262Tot/XZVPR0dVXT0AA17M3MwECBQS8rFsA1ZBFBARwEVaDEuheIPt3Stv+Jl8zv/rhh+RznVhRQABXxDc1gV0Cdbd6YV8DFuS7rionHyjgXdAHYpuKwCECuYJ52uMupGzeHXBjqZnr88GHXA8mCSsggMMujcIIPBCYQmvK2ncvv7o47t3Lzy/uWfnjZ9//8eLIvCl7MXihH+ffPzKH+p5v0thzbiGmqMOO80XQ0woI4KiXoboIHC5wneXHlPAXX/9nxonOusgw7E9Pf//jt///C83FLzrf/uNnX/8mY9mGGlDAa8ADLrqWuxK43rZ21Z5mCPQrYAfc1dq+ev62q340M4BA3u1vCDBPOIdYhgaKsANuYJGUSIDAVoEOc30rgePDCwjg8EukwB4F5jcK7Wwu8/uMdlbjdAIEtggI4C1ajiVAgAABApkEBHAmSMMQILBdoM8nirP8CeHtmM5oTkAAN7dkCiZAgACBHgQEcA+rqAcCBAgQaE5AADe3ZAomcCngo8CzyPsvzZh/doNAYAEBHHhxlEaAQACBlBeqvQyctHBDfQ/lJHQ7gEdTSLpUnESAQFCBlMgM2oqyeha4HcA9d6w3An0J+ChwX+upm4EEfBXlQIut1Y4Frl8GPvubBAf1fV3Dw4l/evrlw2McQKBXAQHc68rqa3SBhDicydaH955Z5uncIDCmgKegx1x3XRNYEphitelkzfte6MTRvA9r6RLz2P8JCGDXAQECBAgQqCDgKegK6KYkkFdgeh9Wrr/ukLew40ebN+5eXT4e34xbBeyAt4o5ngCB9wJz2hEhQCBBQAAnoDmFAIHRBT77+jePCbwM/Nho6CME8NDLr/laAj68W0vevATiCAjgOGuhEgKdCPT064UX1zu5KEO24U1YIZdFUQQ2Chz5Pqz5ix4X3uh0ncGSbOOSDnf4gF+BLICHu8o13KvAKfPC5tzBkTw5XM9YYemnl4G/+aLCvKZsQUAAt7BK62p89fztugMd1bPAw9SJk9APS+15nfRG4OlJALdxFQjXNtaphSpXxl6cnG4BVY0EUgQEcIpa9nPka3ZSA+4UmHL6x293juF0AgSWBATwkk6Wx4RrFsbOBonyCmVnrE9P0+8NGffueYbyMnB3l1muhgTwXkn5uldw1PNl8Kgrr28C7wVuB3ChUGnuXeaFHFx9BE4CwTN4+rhR4h8CirHAwXljIKmipsDtAC5UUd08u47/uvUUQjZsEwIr3wnVRC+KJEAgTeDQAE4rMddZ4jaXpHH2C9ic7TdMHqECvpeBk1er6xMHCuCu11FzBAhsEJgyeD564dmIvG/pmmd0g8BJQAC7EgjUEaiwD9veaOsvA6/peGUYrxnKMckC1y8RJg/V0IkCuKHFUmqfAnMALGzFCnU+T309/qmYpt+Edd3UdM9Cy/Ojxy/EzVLd2b2AAO5+iTXYjMBCNuSNhIWJ1mCdUnn+kwxrTql1zNRpXrpajZi3SwEB3OWyaqo3gYXIXBkwCyOkYc0DriwgbZZ+zvI+rH7WMlsnAjgbpYEIVBGYg3Ca/SILzx8qV9s8y8Xs5WY0MoE+BARwH+uoi/YETnE1p1eWBqbRKqbg3MvDGuYj93f9cK79UxiBQCGBXxQa17AECFQRyJJtO1NtquH0bxWBhUkTcHZSLBTjIQIC2DVAoDeBhJgpRFA9htPiMw5goXUxbBABARxkIZRBoFuB6jHcrazGGhcQwI0voPLbFDjtzJreab17+VWb9qomEEVAAEdZCXUQINC5wPRJJP8QOBMQwGcYbhIgcCbQ2ddgNf18w9mydHhzzO+hnBZSAHd4NWspuEDaO4OCNxWnvOy8TXznVxx/lawXEMDrrRxJIKdA2A1Z9gDLqWYsAh0JCOCOFlMrBBoRkPGNLJQyywr4JqyyvkYnQIBATIHvXrwIUtir52/PKxnhJeGpx6lrAXy+7m4T6EHA/vJ6FcM+4X9d6p574mTqni4u8vh6qG4SWgBfL657CBAgUEagzN9E6iN314svJ3RD8SyA1y+6IwlkEDhtT+NvyKYK3738PEPDAYbYrz29EbqzD2UFWJZSJTQUzwK41EVgXAIEDhCYwvXeU+737j+gKlOEFQgVzwI47HWiMAIVBPoLrf3b3wrLYMpKAgfHswCutM6mJUCAAIGmBLLHswBuav0VS4DACoFpH3/a+Nr+rtBySB6B5Xie5rh+d5gAzkNvFAJrBE5P8EqFNVaOIdCZwHVC+yaszpZYOwQIvBfI+4uOb4R2YWUXEMDZSQ1IgMAAAt98kdikP0qYCNfhaQK4w0XVEoGdAnn3jjuLcTqBXgUEcK8rqy8CBAgQCC0ggEMvj+IIHCnQ6IeAq+3X056FTjvryOvAXEcJCOCjpM0zvIC3QDd0CZQKdenb0EVQvlQfQypvbAYCBLoQuPGN0MuBevF+q+WDuyDSxCYBAbyJy8EECOQRmL8rI89wMUeRuDHXJUxVnoIOsxQKIUAgVeD6GeNGX89OBXBekwICuMllUzSBewL7g+cUZu9efnVvCvcTIJBFQABnYTQIgXAC15vCcCU2W9Bn3/+x2doVHkhAAAdaDKV0LBD/LdD7t84dL5/WCJQQEMAlVI1JgMDRAgfs+H/89vOjuzJf1wLeBd318mqOAIEcAh9eFxfAOTSN8UFAAH+Q8F8CBAhcCZxvrL30e8Xjjl0CAngXn5MJEOhV4Dx6e+1RX3UFvAZc19/sBHIK7H8jVdOp03TxOa8DYzUiYAfcyEIpkwABAgQOEfjyD+8/BP/212U/byaAD1lPkxB4eoq8P9u/dbbCBPoTKJ3EAri/a0ZHBAgQIJBTYE7iedAsm2MBPHu6QaAfgci77aLKU+N280WFgwz+85s3n75+XbGY80hODmNvwqq4gqYeSGDYRBxojbV6rMCUwYUmTA7UrfXYAW8VczyBPgX8itDnuvbY1Ry9pxt1t8J7gO2A9+g5l0AggT1PvfaUvj31EujyClzKFMNzJAcu80ZpAvgGirsIECBAIKbAvay9d3/MLk5VCeDIq6M2Aj0L7NmyL7vYBC/79Ppoc1thAdzrpagvAkMLyOAul3/NNrehGPYmrC6vUk2NKCByRlx1PQcQmD6SlPbGaTvgAKunBAIECBB4JLBm+/tojLWPpwXq2tE/HCeAP0j4LwECBAh0IdDKB5M8Bd3F5aYJArkFTn/79t3L999Kn3v40N+Mnb1ZA+4XWL/9bSV9JxMBvP/CMAKBbgXy/gl6r1J3e6GEaayh9J3MPAUd5sJRCIGuBaRv18tbtrmV29+20nciE8BlrxujEyBAgACBmwIC+CaLOwkQIEAghECv298JVwCHuMIUQYAAAQLJAiWefN70SaTzv064vgsBvN7KkQQIECBwqMDK7e+hNeWbTADnszQSAQIECBBYLSCAV1M5kAABAgQOFOh7+ztBCuADryZTESBAgACBDwIC+IOE/xIgQIBAGIH1298S78A6hkEAH+NsFgIECBBoTGDTG6ETehPACWhOIUCAAIGCAuu3vwWLKD+0AC5vbAYCBAgQ6F0g4aPAArj3i0J/BAgQaEpgkO3vtCYCuKkLU7EECBAg0IuAAO5lJfVBgACB9gXG2f5OayWA279gdUCAAAECDQoI4AYXTckECBDoUSDg9rfoJ5EEcI9XsZ4IECBAILyAAA6/RAokQIDAAALJ29/kE6ujCuDqS6AAAgQIEBhRQACPuOp6JkCAQCiBdnex54xbv4tDAJ/ruU2AAAECBA4SEMAHQZuGAAECBG4K9LH9vdna8p0CeNnHowQIECAQXaBohJf7JJIAjn5hqY8AAQIdCxTNzuBuAjj4AimPAAECBPoUEMB9rquuCBAgEF9g5O3vtDoCOP4lqkICBAgQ6FBAAHe4qFoiQIBAfIG829+8oyXrbfoo8CfJ0ziRAAECBAiMIHD+RuhNEbuMI4CXfTxKgAABAvkFgmxYExrLGMaegk7wdwoBAgQIENgr8GzvAM4nQIAAAQJbBMptfz99/XpLITmPnZ+aPt8iL0/gKehlH48SIECAAIHHAutzdx7LU9AzhRsECBAgUFyg3Pa3eOm5JxDAuUWNR4AAAQIEVggI4BVIDiFAgACBHAK2v+eKAvhcw20CBAgQaFigrYAXwA1fakonQIBAQwJtpeMBsAL4AGRTECBAgACBSwEBfCniZwIECBAgcICAAD4A2RQECBAg8HTMt2Q09ES3APZ/BQECBAgQqCAggCugm5IAAQIECAhg1wABAgQIEKggIIAroJuSAAECBAgIYNcAAQIECBwk4H1Y59AC+FzDbQIECBAgcJCAAD4I2jQECBAgQOBcQACfa7hNgAABAgQOEhDAB0GbhgABAgQInAsI4HMNtwkQIECgrMAx78Mq20Om0QVwJkjDECBAgACBLQICeIuWYwkQIECAQCYBAZwJ0jAECBAgQGCLgADeouVYAgQIECCQSUAAZ4I0DAECBAgQ2CIggLdoOZYAAQIECGQSEMCZIA1DgAABAgS2CAjgLVqOJUCAAAECmQQEcCZIwxAgQIAAgS0CAniLlmMJECBAgEAmAQGcCdIwBAgQIEBgi4AA3qLlWAIECBDYLeDroE+EAnj3pWQAAgQIECCwXUAAbzdzBgECBAgQ2C0ggHcTGoAAAQIECGwXEMDbzZxBgAABAgR2Cwjg3YQGIECAAAEC2wUE8HYzZxAgQIBAYIFW3mUtgANfREojQIAAgX4FBHC/a6szAgQIjCfQyvZ3WhkBPN7lqWMCBAjUFmgoJstRCeBytkYmQIAAgbsCJTK4xJh3G9j9gADeTWgAAgQIEEgSmPKyrchM6vLuSQL4Lo0HCBAgQOAAgWFj+NkBuKYgQIAAAQJrBH5+82bNYTePaW4zbQd8cx3dSYAAAQIVBJoL0QpGpiRAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQI9Cjwv4nKxMHetKhHAAAAAElFTkSuQmCC", "text/plain": [ "" ] @@ -279,7 +280,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "person\n" + "dining table\n" ] }, { @@ -297,7 +298,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "refrigerator\n" + "book\n" ] }, { @@ -315,7 +316,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "book\n" + "potted plant\n" ] }, { @@ -333,7 +334,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "potted plant\n" + "person\n" ] }, { @@ -351,7 +352,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "tv\n" + "chair\n" ] }, { @@ -369,7 +370,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "vase\n" + "clock\n" ] }, { @@ -387,7 +388,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "chair\n" + "refrigerator\n" ] }, { @@ -405,7 +406,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "dining table\n" + "microwave\n" ] }, { @@ -423,7 +424,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "microwave\n" + "tv\n" ] }, { @@ -441,7 +442,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "clock\n" + "vase\n" ] }, { @@ -489,44 +490,60 @@ "text/plain": [ "[{'type': 'AP',\n", " 'parameters': {'iou': 0.5},\n", - " 'value': 0.25742574257425743,\n", - " 'label': {'key': 'name', 'value': 'vase'}},\n", + " 'value': 0.504950495049505,\n", + " 'label': {'key': 'name', 'value': 'bed'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.75},\n", - " 'value': 0.25742574257425743,\n", - " 'label': {'key': 'name', 'value': 'vase'}},\n", + " 'value': 0.504950495049505,\n", + " 'label': {'key': 'name', 'value': 'bed'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.5},\n", - " 'value': 0.6633663366336634,\n", - " 'label': {'key': 'name', 'value': 'potted plant'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'book'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.75},\n", - " 'value': 0.6633663366336634,\n", - " 'label': {'key': 'name', 'value': 'potted plant'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'book'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.5},\n", - " 'value': 0.504950495049505,\n", - " 'label': {'key': 'name', 'value': 'chair'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'truck'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.75},\n", - " 'value': 0.33663366336633666,\n", - " 'label': {'key': 'name', 'value': 'chair'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'truck'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.5},\n", " 'value': 0.504950495049505,\n", - " 'label': {'key': 'name', 'value': 'bed'}},\n", + " 'label': {'key': 'name', 'value': 'stop sign'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.75},\n", " 'value': 0.504950495049505,\n", - " 'label': {'key': 'name', 'value': 'bed'}},\n", + " 'label': {'key': 'name', 'value': 'stop sign'}},\n", + " {'type': 'AP',\n", + " 'parameters': {'iou': 0.5},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'dining table'}},\n", + " {'type': 'AP',\n", + " 'parameters': {'iou': 0.75},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'dining table'}},\n", + " {'type': 'AP',\n", + " 'parameters': {'iou': 0.5},\n", + " 'value': 0.504950495049505,\n", + " 'label': {'key': 'name', 'value': 'chair'}},\n", + " {'type': 'AP',\n", + " 'parameters': {'iou': 0.75},\n", + " 'value': 0.33663366336633666,\n", + " 'label': {'key': 'name', 'value': 'chair'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.5},\n", " 'value': 1.0,\n", - " 'label': {'key': 'name', 'value': 'tv'}},\n", + " 'label': {'key': 'name', 'value': 'bear'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.75},\n", " 'value': 1.0,\n", - " 'label': {'key': 'name', 'value': 'tv'}},\n", + " 'label': {'key': 'name', 'value': 'bear'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.5},\n", " 'value': 0.6633663366336634,\n", @@ -537,14 +554,6 @@ " 'label': {'key': 'name', 'value': 'teddy bear'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.5},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'microwave'}},\n", - " {'type': 'AP',\n", - " 'parameters': {'iou': 0.75},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'microwave'}},\n", - " {'type': 'AP',\n", - " 'parameters': {'iou': 0.5},\n", " 'value': 1.0,\n", " 'label': {'key': 'name', 'value': 'clock'}},\n", " {'type': 'AP',\n", @@ -553,68 +562,60 @@ " 'label': {'key': 'name', 'value': 'clock'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.5},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'book'}},\n", - " {'type': 'AP',\n", - " 'parameters': {'iou': 0.75},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'book'}},\n", - " {'type': 'AP',\n", - " 'parameters': {'iou': 0.5},\n", - " 'value': 0.504950495049505,\n", - " 'label': {'key': 'name', 'value': 'stop sign'}},\n", + " 'value': 0.25742574257425743,\n", + " 'label': {'key': 'name', 'value': 'vase'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.75},\n", - " 'value': 0.504950495049505,\n", - " 'label': {'key': 'name', 'value': 'stop sign'}},\n", + " 'value': 0.25742574257425743,\n", + " 'label': {'key': 'name', 'value': 'vase'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.5},\n", " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'refrigerator'}},\n", + " 'label': {'key': 'name', 'value': 'microwave'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.75},\n", " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'refrigerator'}},\n", + " 'label': {'key': 'name', 'value': 'microwave'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.5},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'car'}},\n", + " 'value': 1.0,\n", + " 'label': {'key': 'name', 'value': 'tv'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.75},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'car'}},\n", + " 'value': 1.0,\n", + " 'label': {'key': 'name', 'value': 'tv'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.5},\n", " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'dining table'}},\n", + " 'label': {'key': 'name', 'value': 'refrigerator'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.75},\n", " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'dining table'}},\n", + " 'label': {'key': 'name', 'value': 'refrigerator'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.5},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'truck'}},\n", + " 'value': 0.504950495049505,\n", + " 'label': {'key': 'name', 'value': 'person'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.75},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'truck'}},\n", + " 'value': 0.504950495049505,\n", + " 'label': {'key': 'name', 'value': 'person'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.5},\n", - " 'value': 1.0,\n", - " 'label': {'key': 'name', 'value': 'bear'}},\n", + " 'value': 0.6633663366336634,\n", + " 'label': {'key': 'name', 'value': 'potted plant'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.75},\n", - " 'value': 1.0,\n", - " 'label': {'key': 'name', 'value': 'bear'}},\n", + " 'value': 0.6633663366336634,\n", + " 'label': {'key': 'name', 'value': 'potted plant'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.5},\n", - " 'value': 0.504950495049505,\n", - " 'label': {'key': 'name', 'value': 'person'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'car'}},\n", " {'type': 'AP',\n", " 'parameters': {'iou': 0.75},\n", - " 'value': 0.504950495049505,\n", - " 'label': {'key': 'name', 'value': 'person'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'car'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -626,8 +627,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.225,\n", - " 'label': {'key': 'name', 'value': 'vase'}},\n", + " 'value': 0.4,\n", + " 'label': {'key': 'name', 'value': 'bed'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -639,8 +640,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.4999999999999999,\n", - " 'label': {'key': 'name', 'value': 'potted plant'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'book'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -652,8 +653,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.35,\n", - " 'label': {'key': 'name', 'value': 'chair'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'truck'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -665,8 +666,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.4,\n", - " 'label': {'key': 'name', 'value': 'bed'}},\n", + " 'value': 0.5,\n", + " 'label': {'key': 'name', 'value': 'stop sign'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -678,8 +679,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.9,\n", - " 'label': {'key': 'name', 'value': 'tv'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'dining table'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -691,8 +692,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.6,\n", - " 'label': {'key': 'name', 'value': 'teddy bear'}},\n", + " 'value': -1.0,\n", + " 'label': {'key': 'name', 'value': 'bottle'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -704,8 +705,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'microwave'}},\n", + " 'value': 0.35,\n", + " 'label': {'key': 'name', 'value': 'chair'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -717,8 +718,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.6,\n", - " 'label': {'key': 'name', 'value': 'clock'}},\n", + " 'value': 0.9,\n", + " 'label': {'key': 'name', 'value': 'bear'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -730,8 +731,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'book'}},\n", + " 'value': 0.6,\n", + " 'label': {'key': 'name', 'value': 'teddy bear'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -743,8 +744,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.5,\n", - " 'label': {'key': 'name', 'value': 'stop sign'}},\n", + " 'value': 0.6,\n", + " 'label': {'key': 'name', 'value': 'clock'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -756,8 +757,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'refrigerator'}},\n", + " 'value': 0.225,\n", + " 'label': {'key': 'name', 'value': 'vase'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -770,7 +771,7 @@ " 0.9,\n", " 0.95]},\n", " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'car'}},\n", + " 'label': {'key': 'name', 'value': 'microwave'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -782,8 +783,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'dining table'}},\n", + " 'value': 0.9,\n", + " 'label': {'key': 'name', 'value': 'tv'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -796,7 +797,7 @@ " 0.9,\n", " 0.95]},\n", " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'truck'}},\n", + " 'label': {'key': 'name', 'value': 'refrigerator'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -808,8 +809,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': -1.0,\n", - " 'label': {'key': 'name', 'value': 'bottle'}},\n", + " 'value': 0.45,\n", + " 'label': {'key': 'name', 'value': 'person'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -821,8 +822,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.9,\n", - " 'label': {'key': 'name', 'value': 'bear'}},\n", + " 'value': 0.4999999999999999,\n", + " 'label': {'key': 'name', 'value': 'potted plant'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -834,14 +835,14 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.45,\n", - " 'label': {'key': 'name', 'value': 'person'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'car'}},\n", " {'type': 'mAP',\n", " 'parameters': {'iou': 0.5, 'label_key': 'name'},\n", " 'value': 0.4127475247524752},\n", " {'type': 'mAP',\n", " 'parameters': {'iou': 0.75, 'label_key': 'name'},\n", - " 'value': 0.40222772277227714},\n", + " 'value': 0.40222772277227725},\n", " {'type': 'mAR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -854,7 +855,7 @@ " 0.9,\n", " 0.95],\n", " 'label_key': 'name'},\n", - " 'value': 0.3390625},\n", + " 'value': 0.33906250000000004},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -866,8 +867,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.23168316831683172,\n", - " 'label': {'key': 'name', 'value': 'vase'}},\n", + " 'value': 0.403960396039604,\n", + " 'label': {'key': 'name', 'value': 'bed'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -879,8 +880,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.49801980198019813,\n", - " 'label': {'key': 'name', 'value': 'potted plant'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'book'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -892,8 +893,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.403960396039604,\n", - " 'label': {'key': 'name', 'value': 'bed'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'truck'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -905,8 +906,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.597029702970297,\n", - " 'label': {'key': 'name', 'value': 'teddy bear'}},\n", + " 'value': 0.5049504950495048,\n", + " 'label': {'key': 'name', 'value': 'stop sign'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -918,8 +919,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.6,\n", - " 'label': {'key': 'name', 'value': 'clock'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'dining table'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -931,8 +932,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.5049504950495048,\n", - " 'label': {'key': 'name', 'value': 'stop sign'}},\n", + " 'value': 0.9,\n", + " 'label': {'key': 'name', 'value': 'bear'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -944,8 +945,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'car'}},\n", + " 'value': 0.6,\n", + " 'label': {'key': 'name', 'value': 'clock'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -958,7 +959,7 @@ " 0.9,\n", " 0.95]},\n", " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'truck'}},\n", + " 'label': {'key': 'name', 'value': 'microwave'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -970,8 +971,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.4544554455445544,\n", - " 'label': {'key': 'name', 'value': 'person'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'refrigerator'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -983,21 +984,21 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.35346534653465345,\n", - " 'label': {'key': 'name', 'value': 'chair'}},\n", - " {'type': 'APAveragedOverIOUs',\n", + " 'value': 0.49801980198019813,\n", + " 'label': {'key': 'name', 'value': 'potted plant'}},\n", + " {'type': 'mAPAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", " 0.6,\n", - " 0.65,\n", " 0.7,\n", + " 0.65,\n", " 0.75,\n", " 0.8,\n", " 0.85,\n", " 0.9,\n", - " 0.95]},\n", - " 'value': 0.9009900990099009,\n", - " 'label': {'key': 'name', 'value': 'tv'}},\n", + " 0.95],\n", + " 'label_key': 'name'},\n", + " 'value': 0.34028465346534653},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1009,8 +1010,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'microwave'}},\n", + " 'value': 0.35346534653465345,\n", + " 'label': {'key': 'name', 'value': 'chair'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1022,8 +1023,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'book'}},\n", + " 'value': 0.597029702970297,\n", + " 'label': {'key': 'name', 'value': 'teddy bear'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1035,8 +1036,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'refrigerator'}},\n", + " 'value': 0.23168316831683172,\n", + " 'label': {'key': 'name', 'value': 'vase'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1048,8 +1049,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'dining table'}},\n", + " 'value': 0.9009900990099009,\n", + " 'label': {'key': 'name', 'value': 'tv'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1061,21 +1062,21 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.9,\n", - " 'label': {'key': 'name', 'value': 'bear'}},\n", - " {'type': 'mAPAveragedOverIOUs',\n", + " 'value': 0.4544554455445544,\n", + " 'label': {'key': 'name', 'value': 'person'}},\n", + " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", " 0.6,\n", - " 0.7,\n", " 0.65,\n", + " 0.7,\n", " 0.75,\n", " 0.8,\n", " 0.85,\n", " 0.9,\n", - " 0.95],\n", - " 'label_key': 'name'},\n", - " 'value': 0.34028465346534653}]" + " 0.95]},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'car'}}]" ] }, "execution_count": 8, @@ -1087,9 +1088,11 @@ "# bounding box evaluation\n", "eval_bbox = valor_model_bbox.evaluate_detection(\n", " valor_dataset,\n", - " filter_by=[\n", - " Label.key == \"name\",\n", - " ],\n", + " filters=Filter(\n", + " labels=(\n", + " Label.key == \"name\"\n", + " )\n", + " )\n", ")\n", "eval_bbox.wait_for_completion()\n", "eval_bbox.metrics" @@ -1105,81 +1108,68 @@ "data": { "text/plain": [ "[{'type': 'AP',\n", - " 'parameters': {'iou': 0.5},\n", - " 'value': 0.25742574257425743,\n", - " 'label': {'key': 'name', 'value': 'vase'}},\n", - " {'type': 'AP',\n", - " 'parameters': {'iou': 0.5},\n", - " 'value': 0.6633663366336634,\n", - " 'label': {'key': 'name', 'value': 'potted plant'}},\n", - " {'type': 'AP',\n", - " 'parameters': {'iou': 0.5},\n", - " 'value': 0.22442244224422447,\n", - " 'label': {'key': 'name', 'value': 'chair'}},\n", - " {'type': 'AP',\n", - " 'parameters': {'iou': 0.5},\n", - " 'value': 0.504950495049505,\n", - " 'label': {'key': 'name', 'value': 'bed'}},\n", + " 'parameters': {'iou': 0.75},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'microwave'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.5},\n", + " 'parameters': {'iou': 0.75},\n", " 'value': 1.0,\n", " 'label': {'key': 'name', 'value': 'tv'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.5},\n", - " 'value': 1.0,\n", - " 'label': {'key': 'name', 'value': 'teddy bear'}},\n", - " {'type': 'AP',\n", - " 'parameters': {'iou': 0.5},\n", + " 'parameters': {'iou': 0.75},\n", " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'microwave'}},\n", + " 'label': {'key': 'name', 'value': 'refrigerator'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.5},\n", - " 'value': 1.0,\n", - " 'label': {'key': 'name', 'value': 'clock'}},\n", + " 'parameters': {'iou': 0.75},\n", + " 'value': 0.504950495049505,\n", + " 'label': {'key': 'name', 'value': 'person'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.5},\n", + " 'parameters': {'iou': 0.75},\n", + " 'value': 0.33663366336633666,\n", + " 'label': {'key': 'name', 'value': 'potted plant'}},\n", + " {'type': 'AP',\n", + " 'parameters': {'iou': 0.75},\n", " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'book'}},\n", + " 'label': {'key': 'name', 'value': 'car'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.5},\n", + " 'parameters': {'iou': 0.75},\n", " 'value': 0.504950495049505,\n", - " 'label': {'key': 'name', 'value': 'stop sign'}},\n", + " 'label': {'key': 'name', 'value': 'bed'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.5},\n", + " 'parameters': {'iou': 0.75},\n", " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'refrigerator'}},\n", + " 'label': {'key': 'name', 'value': 'book'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.5},\n", + " 'parameters': {'iou': 0.75},\n", " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'car'}},\n", + " 'label': {'key': 'name', 'value': 'truck'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.5},\n", + " 'parameters': {'iou': 0.75},\n", + " 'value': 0.504950495049505,\n", + " 'label': {'key': 'name', 'value': 'stop sign'}},\n", + " {'type': 'AP',\n", + " 'parameters': {'iou': 0.75},\n", " 'value': 0.0,\n", " 'label': {'key': 'name', 'value': 'dining table'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.5},\n", + " 'parameters': {'iou': 0.75},\n", " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'truck'}},\n", + " 'label': {'key': 'name', 'value': 'chair'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.5},\n", + " 'parameters': {'iou': 0.75},\n", " 'value': 1.0,\n", " 'label': {'key': 'name', 'value': 'bear'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.5},\n", - " 'value': 0.504950495049505,\n", - " 'label': {'key': 'name', 'value': 'person'}},\n", - " {'type': 'AR',\n", - " 'parameters': {'ious': [0.5,\n", - " 0.55,\n", - " 0.6,\n", - " 0.65,\n", - " 0.7,\n", - " 0.75,\n", - " 0.8,\n", - " 0.85,\n", - " 0.9,\n", - " 0.95]},\n", - " 'value': 0.125,\n", + " 'parameters': {'iou': 0.75},\n", + " 'value': 0.33663366336633666,\n", + " 'label': {'key': 'name', 'value': 'teddy bear'}},\n", + " {'type': 'AP',\n", + " 'parameters': {'iou': 0.75},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'clock'}},\n", + " {'type': 'AP',\n", + " 'parameters': {'iou': 0.75},\n", + " 'value': 0.0,\n", " 'label': {'key': 'name', 'value': 'vase'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", @@ -1192,8 +1182,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.06666666666666667,\n", - " 'label': {'key': 'name', 'value': 'chair'}},\n", + " 'value': 0.9,\n", + " 'label': {'key': 'name', 'value': 'tv'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1205,8 +1195,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.9,\n", - " 'label': {'key': 'name', 'value': 'tv'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'refrigerator'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1218,8 +1208,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'microwave'}},\n", + " 'value': 0.4,\n", + " 'label': {'key': 'name', 'value': 'potted plant'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1231,8 +1221,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'book'}},\n", + " 'value': 0.4,\n", + " 'label': {'key': 'name', 'value': 'bed'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1245,7 +1235,7 @@ " 0.9,\n", " 0.95]},\n", " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'refrigerator'}},\n", + " 'label': {'key': 'name', 'value': 'truck'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1270,8 +1260,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': -1.0,\n", - " 'label': {'key': 'name', 'value': 'bottle'}},\n", + " 'value': 0.9,\n", + " 'label': {'key': 'name', 'value': 'bear'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1283,24 +1273,24 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.3,\n", - " 'label': {'key': 'name', 'value': 'person'}},\n", + " 'value': 0.1,\n", + " 'label': {'key': 'name', 'value': 'clock'}},\n", " {'type': 'mAP',\n", - " 'parameters': {'iou': 0.75, 'label_key': 'name'},\n", - " 'value': 0.26175742574257427},\n", - " {'type': 'APAveragedOverIOUs',\n", + " 'parameters': {'iou': 0.5, 'label_key': 'name'},\n", + " 'value': 0.41625412541254125},\n", + " {'type': 'mAR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", " 0.6,\n", - " 0.65,\n", " 0.7,\n", + " 0.65,\n", " 0.75,\n", " 0.8,\n", " 0.85,\n", " 0.9,\n", - " 0.95]},\n", - " 'value': 0.12871287128712872,\n", - " 'label': {'key': 'name', 'value': 'vase'}},\n", + " 0.95],\n", + " 'label_key': 'name'},\n", + " 'value': 0.26197916666666665},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1312,8 +1302,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.04488448844884489,\n", - " 'label': {'key': 'name', 'value': 'chair'}},\n", + " 'value': 0.9009900990099009,\n", + " 'label': {'key': 'name', 'value': 'tv'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1325,8 +1315,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.9009900990099009,\n", - " 'label': {'key': 'name', 'value': 'tv'}},\n", + " 'value': 0.30297029702970296,\n", + " 'label': {'key': 'name', 'value': 'person'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1339,7 +1329,7 @@ " 0.9,\n", " 0.95]},\n", " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'microwave'}},\n", + " 'label': {'key': 'name', 'value': 'car'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1364,8 +1354,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'refrigerator'}},\n", + " 'value': 0.5049504950495048,\n", + " 'label': {'key': 'name', 'value': 'stop sign'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1377,8 +1367,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'dining table'}},\n", + " 'value': 0.04488448844884489,\n", + " 'label': {'key': 'name', 'value': 'chair'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1390,85 +1380,85 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.9,\n", - " 'label': {'key': 'name', 'value': 'bear'}},\n", - " {'type': 'mAPAveragedOverIOUs',\n", + " 'value': 0.5009900990099011,\n", + " 'label': {'key': 'name', 'value': 'teddy bear'}},\n", + " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", " 0.6,\n", - " 0.7,\n", " 0.65,\n", + " 0.7,\n", " 0.75,\n", " 0.8,\n", " 0.85,\n", " 0.9,\n", - " 0.95],\n", - " 'label_key': 'name'},\n", - " 'value': 0.2616542904290429},\n", - " {'type': 'AP',\n", - " 'parameters': {'iou': 0.75},\n", - " 'value': 0.0,\n", + " 0.95]},\n", + " 'value': 0.12871287128712872,\n", " 'label': {'key': 'name', 'value': 'vase'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.75},\n", - " 'value': 0.33663366336633666,\n", - " 'label': {'key': 'name', 'value': 'potted plant'}},\n", - " {'type': 'AP',\n", - " 'parameters': {'iou': 0.75},\n", + " 'parameters': {'iou': 0.5},\n", " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'chair'}},\n", - " {'type': 'AP',\n", - " 'parameters': {'iou': 0.75},\n", - " 'value': 0.504950495049505,\n", - " 'label': {'key': 'name', 'value': 'bed'}},\n", + " 'label': {'key': 'name', 'value': 'microwave'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.75},\n", + " 'parameters': {'iou': 0.5},\n", " 'value': 1.0,\n", " 'label': {'key': 'name', 'value': 'tv'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.75},\n", - " 'value': 0.33663366336633666,\n", - " 'label': {'key': 'name', 'value': 'teddy bear'}},\n", - " {'type': 'AP',\n", - " 'parameters': {'iou': 0.75},\n", + " 'parameters': {'iou': 0.5},\n", " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'microwave'}},\n", + " 'label': {'key': 'name', 'value': 'refrigerator'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.75},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'clock'}},\n", + " 'parameters': {'iou': 0.5},\n", + " 'value': 0.504950495049505,\n", + " 'label': {'key': 'name', 'value': 'person'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.75},\n", + " 'parameters': {'iou': 0.5},\n", + " 'value': 0.6633663366336634,\n", + " 'label': {'key': 'name', 'value': 'potted plant'}},\n", + " {'type': 'AP',\n", + " 'parameters': {'iou': 0.5},\n", " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'book'}},\n", + " 'label': {'key': 'name', 'value': 'car'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.75},\n", + " 'parameters': {'iou': 0.5},\n", " 'value': 0.504950495049505,\n", - " 'label': {'key': 'name', 'value': 'stop sign'}},\n", + " 'label': {'key': 'name', 'value': 'bed'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.75},\n", + " 'parameters': {'iou': 0.5},\n", " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'refrigerator'}},\n", + " 'label': {'key': 'name', 'value': 'book'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.75},\n", + " 'parameters': {'iou': 0.5},\n", " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'car'}},\n", + " 'label': {'key': 'name', 'value': 'truck'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.75},\n", + " 'parameters': {'iou': 0.5},\n", + " 'value': 0.504950495049505,\n", + " 'label': {'key': 'name', 'value': 'stop sign'}},\n", + " {'type': 'AP',\n", + " 'parameters': {'iou': 0.5},\n", " 'value': 0.0,\n", " 'label': {'key': 'name', 'value': 'dining table'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.75},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'truck'}},\n", + " 'parameters': {'iou': 0.5},\n", + " 'value': 0.22442244224422447,\n", + " 'label': {'key': 'name', 'value': 'chair'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.75},\n", + " 'parameters': {'iou': 0.5},\n", " 'value': 1.0,\n", " 'label': {'key': 'name', 'value': 'bear'}},\n", " {'type': 'AP',\n", - " 'parameters': {'iou': 0.75},\n", - " 'value': 0.504950495049505,\n", - " 'label': {'key': 'name', 'value': 'person'}},\n", + " 'parameters': {'iou': 0.5},\n", + " 'value': 1.0,\n", + " 'label': {'key': 'name', 'value': 'teddy bear'}},\n", + " {'type': 'AP',\n", + " 'parameters': {'iou': 0.5},\n", + " 'value': 1.0,\n", + " 'label': {'key': 'name', 'value': 'clock'}},\n", + " {'type': 'AP',\n", + " 'parameters': {'iou': 0.5},\n", + " 'value': 0.25742574257425743,\n", + " 'label': {'key': 'name', 'value': 'vase'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1480,8 +1470,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.4,\n", - " 'label': {'key': 'name', 'value': 'potted plant'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'microwave'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1493,8 +1483,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.4,\n", - " 'label': {'key': 'name', 'value': 'bed'}},\n", + " 'value': -1.0,\n", + " 'label': {'key': 'name', 'value': 'bottle'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1506,8 +1496,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.4999999999999999,\n", - " 'label': {'key': 'name', 'value': 'teddy bear'}},\n", + " 'value': 0.3,\n", + " 'label': {'key': 'name', 'value': 'person'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1519,8 +1509,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.1,\n", - " 'label': {'key': 'name', 'value': 'clock'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'car'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1532,8 +1522,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.5,\n", - " 'label': {'key': 'name', 'value': 'stop sign'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'book'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1545,8 +1535,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'car'}},\n", + " 'value': 0.5,\n", + " 'label': {'key': 'name', 'value': 'stop sign'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1558,8 +1548,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'truck'}},\n", + " 'value': 0.06666666666666667,\n", + " 'label': {'key': 'name', 'value': 'chair'}},\n", " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1571,24 +1561,24 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.9,\n", - " 'label': {'key': 'name', 'value': 'bear'}},\n", - " {'type': 'mAP',\n", - " 'parameters': {'iou': 0.5, 'label_key': 'name'},\n", - " 'value': 0.41625412541254125},\n", - " {'type': 'mAR',\n", + " 'value': 0.4999999999999999,\n", + " 'label': {'key': 'name', 'value': 'teddy bear'}},\n", + " {'type': 'AR',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", " 0.6,\n", - " 0.7,\n", " 0.65,\n", + " 0.7,\n", " 0.75,\n", " 0.8,\n", " 0.85,\n", " 0.9,\n", - " 0.95],\n", - " 'label_key': 'name'},\n", - " 'value': 0.26197916666666665},\n", + " 0.95]},\n", + " 'value': 0.125,\n", + " 'label': {'key': 'name', 'value': 'vase'}},\n", + " {'type': 'mAP',\n", + " 'parameters': {'iou': 0.75, 'label_key': 'name'},\n", + " 'value': 0.26175742574257427},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1600,8 +1590,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.39900990099009903,\n", - " 'label': {'key': 'name', 'value': 'potted plant'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'microwave'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1613,8 +1603,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.403960396039604,\n", - " 'label': {'key': 'name', 'value': 'bed'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'refrigerator'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1626,8 +1616,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.5009900990099011,\n", - " 'label': {'key': 'name', 'value': 'teddy bear'}},\n", + " 'value': 0.39900990099009903,\n", + " 'label': {'key': 'name', 'value': 'potted plant'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1639,8 +1629,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.1,\n", - " 'label': {'key': 'name', 'value': 'clock'}},\n", + " 'value': 0.403960396039604,\n", + " 'label': {'key': 'name', 'value': 'bed'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1652,8 +1642,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.5049504950495048,\n", - " 'label': {'key': 'name', 'value': 'stop sign'}},\n", + " 'value': 0.0,\n", + " 'label': {'key': 'name', 'value': 'truck'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1666,7 +1656,7 @@ " 0.9,\n", " 0.95]},\n", " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'car'}},\n", + " 'label': {'key': 'name', 'value': 'dining table'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1678,8 +1668,8 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.0,\n", - " 'label': {'key': 'name', 'value': 'truck'}},\n", + " 'value': 0.9,\n", + " 'label': {'key': 'name', 'value': 'bear'}},\n", " {'type': 'APAveragedOverIOUs',\n", " 'parameters': {'ious': [0.5,\n", " 0.55,\n", @@ -1691,8 +1681,21 @@ " 0.85,\n", " 0.9,\n", " 0.95]},\n", - " 'value': 0.30297029702970296,\n", - " 'label': {'key': 'name', 'value': 'person'}}]" + " 'value': 0.1,\n", + " 'label': {'key': 'name', 'value': 'clock'}},\n", + " {'type': 'mAPAveragedOverIOUs',\n", + " 'parameters': {'ious': [0.5,\n", + " 0.55,\n", + " 0.6,\n", + " 0.7,\n", + " 0.65,\n", + " 0.75,\n", + " 0.8,\n", + " 0.85,\n", + " 0.9,\n", + " 0.95],\n", + " 'label_key': 'name'},\n", + " 'value': 0.2616542904290429}]" ] }, "execution_count": 9, @@ -1704,9 +1707,11 @@ "# raster evaluation\n", "eval_raster = valor_model_seg.evaluate_detection(\n", " valor_dataset,\n", - " filter_by=[\n", - " Label.key == \"name\",\n", - " ]\n", + " filters=Filter(\n", + " labels=(\n", + " Label.key == \"name\"\n", + " )\n", + " )\n", ")\n", "eval_raster.wait_for_completion()\n", "eval_raster.metrics" @@ -1837,7 +1842,7 @@ " mAR\n", " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8, 0.85, 0.9, 0.95], \"label_key\": \"name\"}\n", " n/a\n", - " 0.339062\n", + " 0.339063\n", " 0.261979\n", " \n", " \n", @@ -1859,7 +1864,7 @@ "mAP {\"iou\": 0.5, \"label_key\": \"name\"} n/a 0.412748 \n", " {\"iou\": 0.75, \"label_key\": \"name\"} n/a 0.402228 \n", "mAPAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.340285 \n", - "mAR {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.339062 \n", + "mAR {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.339063 \n", "\n", " \n", "annotation type raster \n", @@ -1943,10 +1948,12 @@ "# bounding box evaluation\n", "eval_bbox_small = valor_model_bbox.evaluate_detection(\n", " valor_dataset,\n", - " filter_by=[\n", - " Label.key == \"name\",\n", - " Annotation.bounding_box.area < lower_bound,\n", - " ],\n", + " filters=Filter(\n", + " annotations=And(\n", + " Label.key == \"name\",\n", + " Annotation.bounding_box.area < lower_bound,\n", + " )\n", + " ),\n", ")\n", "eval_bbox_small.wait_for_completion()" ] @@ -1972,10 +1979,12 @@ "# raster evaluation\n", "eval_raster_small = valor_model_seg.evaluate_detection(\n", " valor_dataset,\n", - " filter_by=[\n", - " Label.key == \"name\",\n", - " Annotation.raster.area < lower_bound,\n", - " ]\n", + " filters=Filter(\n", + " annotations=And(\n", + " Label.key == \"name\",\n", + " Annotation.raster.area < lower_bound,\n", + " )\n", + " )\n", ")\n", "eval_raster_small.wait_for_completion()" ] @@ -2032,110 +2041,105 @@ " \n", " \n", " \n", - " AP\n", + " AP\n", " {\"iou\": 0.5}\n", - " name: book\n", + " iscrowd: 0\n", + " 0.00000\n", " 0.000000\n", + " \n", + " \n", + " name: book\n", + " 0.00000\n", " 0.000000\n", " \n", " \n", " name: car\n", - " 0.000000\n", + " 0.00000\n", " 0.000000\n", " \n", " \n", " name: chair\n", - " 0.504950\n", + " 0.50495\n", " 0.224422\n", " \n", " \n", " name: clock\n", + " 1.00000\n", " 1.000000\n", - " 1.000000\n", - " \n", - " \n", - " name: dining table\n", - " 0.000000\n", - " 0.000000\n", " \n", " \n", " ...\n", " ...\n", - " ...\n", " ...\n", " ...\n", " \n", " \n", - " mAR\n", - " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8, 0.85, 0.9, 0.95], \"label_key\": \"name\"}\n", - " n/a\n", - " 0.271154\n", - " 0.170833\n", + " {\"iou\": 0.75}\n", + " name: bed\n", + " NaN\n", + " 0.000000\n", " \n", " \n", - " AP\n", - " {\"iou\": 0.5}\n", - " name: bed\n", + " APAveragedOverIOUs\n", + " {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]}\n", + " iscrowd: 1\n", " NaN\n", " 0.000000\n", " \n", " \n", - " {\"iou\": 0.75}\n", " name: bed\n", " NaN\n", " 0.000000\n", " \n", " \n", - " APAveragedOverIOUs\n", - " {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]}\n", - " name: bed\n", + " AR\n", + " {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]}\n", + " iscrowd: 1\n", " NaN\n", " 0.000000\n", " \n", " \n", - " AR\n", - " {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]}\n", " name: bed\n", " NaN\n", " 0.000000\n", " \n", " \n", "\n", - "

61 rows × 2 columns

\n", + "

105 rows × 2 columns

\n", "" ], "text/plain": [ - " value \\\n", - "annotation type bbox \n", - "type parameters label \n", - "AP {\"iou\": 0.5} name: book 0.000000 \n", - " name: car 0.000000 \n", - " name: chair 0.504950 \n", - " name: clock 1.000000 \n", - " name: dining table 0.000000 \n", - "... ... \n", - "mAR {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.271154 \n", - "AP {\"iou\": 0.5} name: bed NaN \n", - " {\"iou\": 0.75} name: bed NaN \n", - "APAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... name: bed NaN \n", - "AR {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... name: bed NaN \n", + " value \\\n", + "annotation type bbox \n", + "type parameters label \n", + "AP {\"iou\": 0.5} iscrowd: 0 0.00000 \n", + " name: book 0.00000 \n", + " name: car 0.00000 \n", + " name: chair 0.50495 \n", + " name: clock 1.00000 \n", + "... ... \n", + " {\"iou\": 0.75} name: bed NaN \n", + "APAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... iscrowd: 1 NaN \n", + " name: bed NaN \n", + "AR {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... iscrowd: 1 NaN \n", + " name: bed NaN \n", "\n", - " \n", - "annotation type raster \n", - "type parameters label \n", - "AP {\"iou\": 0.5} name: book 0.000000 \n", - " name: car 0.000000 \n", - " name: chair 0.224422 \n", - " name: clock 1.000000 \n", - " name: dining table 0.000000 \n", - "... ... \n", - "mAR {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.170833 \n", - "AP {\"iou\": 0.5} name: bed 0.000000 \n", - " {\"iou\": 0.75} name: bed 0.000000 \n", - "APAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... name: bed 0.000000 \n", - "AR {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... name: bed 0.000000 \n", + " \n", + "annotation type raster \n", + "type parameters label \n", + "AP {\"iou\": 0.5} iscrowd: 0 0.000000 \n", + " name: book 0.000000 \n", + " name: car 0.000000 \n", + " name: chair 0.224422 \n", + " name: clock 1.000000 \n", + "... ... \n", + " {\"iou\": 0.75} name: bed 0.000000 \n", + "APAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... iscrowd: 1 0.000000 \n", + " name: bed 0.000000 \n", + "AR {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... iscrowd: 1 0.000000 \n", + " name: bed 0.000000 \n", "\n", - "[61 rows x 2 columns]" + "[105 rows x 2 columns]" ] }, "execution_count": 14, @@ -2179,11 +2183,13 @@ "# bounding box evaluation\n", "eval_bbox_mid = valor_model_bbox.evaluate_detection(\n", " valor_dataset,\n", - " filter_by=[\n", - " Label.key == \"name\",\n", - " Annotation.bounding_box.area >= lower_bound,\n", - " Annotation.bounding_box.area <= upper_bound,\n", - " ],\n", + " filters=Filter(\n", + " annotations=And(\n", + " Label.key == \"name\",\n", + " Annotation.bounding_box.area >= lower_bound,\n", + " Annotation.bounding_box.area <= upper_bound,\n", + " )\n", + " ),\n", ")\n", "eval_bbox_mid.wait_for_completion()" ] @@ -2209,11 +2215,13 @@ "# raster evaluation\n", "eval_raster_mid = valor_model_seg.evaluate_detection(\n", " valor_dataset,\n", - " filter_by=[\n", - " Label.key == \"name\",\n", - " Annotation.raster.area >= lower_bound,\n", - " Annotation.raster.area <= upper_bound,\n", - " ]\n", + " filters=Filter(\n", + " annotations=And(\n", + " Label.key == \"name\",\n", + " Annotation.raster.area >= lower_bound,\n", + " Annotation.raster.area <= upper_bound,\n", + " )\n", + " )\n", ")\n", "eval_raster_mid.wait_for_completion()" ] @@ -2270,8 +2278,18 @@ " \n", " \n", " \n", - " AP\n", - " {\"iou\": 0.5}\n", + " AP\n", + " {\"iou\": 0.5}\n", + " iscrowd: 0\n", + " 0.0\n", + " 0.000000\n", + " \n", + " \n", + " iscrowd: 1\n", + " 0.0\n", + " NaN\n", + " \n", + " \n", " name: bed\n", " 1.0\n", " 1.000000\n", @@ -2282,7 +2300,27 @@ " NaN\n", " \n", " \n", - " {\"iou\": 0.75}\n", + " supercategory: furniture\n", + " 0.0\n", + " 0.000000\n", + " \n", + " \n", + " supercategory: indoor\n", + " 0.0\n", + " 0.000000\n", + " \n", + " \n", + " {\"iou\": 0.75}\n", + " iscrowd: 0\n", + " 0.0\n", + " 0.000000\n", + " \n", + " \n", + " iscrowd: 1\n", + " 0.0\n", + " NaN\n", + " \n", + " \n", " name: bed\n", " 1.0\n", " 1.000000\n", @@ -2293,8 +2331,28 @@ " NaN\n", " \n", " \n", - " APAveragedOverIOUs\n", - " {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]}\n", + " supercategory: furniture\n", + " 0.0\n", + " 0.000000\n", + " \n", + " \n", + " supercategory: indoor\n", + " 0.0\n", + " 0.000000\n", + " \n", + " \n", + " APAveragedOverIOUs\n", + " {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]}\n", + " iscrowd: 0\n", + " 0.0\n", + " 0.000000\n", + " \n", + " \n", + " iscrowd: 1\n", + " 0.0\n", + " NaN\n", + " \n", + " \n", " name: bed\n", " 0.8\n", " 0.800000\n", @@ -2305,8 +2363,28 @@ " NaN\n", " \n", " \n", - " AR\n", - " {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]}\n", + " supercategory: furniture\n", + " 0.0\n", + " 0.000000\n", + " \n", + " \n", + " supercategory: indoor\n", + " 0.0\n", + " 0.000000\n", + " \n", + " \n", + " AR\n", + " {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]}\n", + " iscrowd: 0\n", + " 0.0\n", + " 0.000000\n", + " \n", + " \n", + " iscrowd: 1\n", + " 0.0\n", + " NaN\n", + " \n", + " \n", " name: bed\n", " 0.8\n", " 0.800000\n", @@ -2322,33 +2400,91 @@ " 0.466667\n", " \n", " \n", - " mAP\n", + " supercategory: furniture\n", + " 0.0\n", + " 0.000000\n", + " \n", + " \n", + " supercategory: indoor\n", + " 0.0\n", + " 0.000000\n", + " \n", + " \n", + " mAP\n", + " {\"iou\": 0.5, \"label_key\": \"iscrowd\"}\n", + " n/a\n", + " 0.0\n", + " 0.000000\n", + " \n", + " \n", " {\"iou\": 0.5, \"label_key\": \"name\"}\n", " n/a\n", " 0.5\n", " 1.000000\n", " \n", " \n", + " {\"iou\": 0.5, \"label_key\": \"supercategory\"}\n", + " n/a\n", + " 0.0\n", + " 0.000000\n", + " \n", + " \n", + " {\"iou\": 0.75, \"label_key\": \"iscrowd\"}\n", + " n/a\n", + " 0.0\n", + " 0.000000\n", + " \n", + " \n", " {\"iou\": 0.75, \"label_key\": \"name\"}\n", " n/a\n", " 0.5\n", " 0.668317\n", " \n", " \n", - " mAPAveragedOverIOUs\n", + " {\"iou\": 0.75, \"label_key\": \"supercategory\"}\n", + " n/a\n", + " 0.0\n", + " 0.000000\n", + " \n", + " \n", + " mAPAveragedOverIOUs\n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8, 0.85, 0.9, 0.95], \"label_key\": \"iscrowd\"}\n", + " n/a\n", + " 0.0\n", + " 0.000000\n", + " \n", + " \n", " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8, 0.85, 0.9, 0.95], \"label_key\": \"name\"}\n", " n/a\n", " 0.4\n", " 0.628713\n", " \n", " \n", - " mAR\n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8, 0.85, 0.9, 0.95], \"label_key\": \"supercategory\"}\n", + " n/a\n", + " 0.0\n", + " 0.000000\n", + " \n", + " \n", + " mAR\n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8, 0.85, 0.9, 0.95], \"label_key\": \"iscrowd\"}\n", + " n/a\n", + " 0.0\n", + " 0.000000\n", + " \n", + " \n", " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8, 0.85, 0.9, 0.95], \"label_key\": \"name\"}\n", " n/a\n", " 0.4\n", " 0.633333\n", " \n", " \n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8, 0.85, 0.9, 0.95], \"label_key\": \"supercategory\"}\n", + " n/a\n", + " 0.0\n", + " 0.000000\n", + " \n", + " \n", " AP\n", " {\"iou\": 0.5}\n", " name: teddy bear\n", @@ -2373,45 +2509,93 @@ "" ], "text/plain": [ - " value \\\n", - "annotation type bbox \n", - "type parameters label \n", - "AP {\"iou\": 0.5} name: bed 1.0 \n", - " name: book 0.0 \n", - " {\"iou\": 0.75} name: bed 1.0 \n", - " name: book 0.0 \n", - "APAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... name: bed 0.8 \n", - " name: book 0.0 \n", - "AR {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... name: bed 0.8 \n", - " name: book 0.0 \n", - " name: teddy bear -1.0 \n", - "mAP {\"iou\": 0.5, \"label_key\": \"name\"} n/a 0.5 \n", - " {\"iou\": 0.75, \"label_key\": \"name\"} n/a 0.5 \n", - "mAPAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.4 \n", - "mAR {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.4 \n", - "AP {\"iou\": 0.5} name: teddy bear NaN \n", - " {\"iou\": 0.75} name: teddy bear NaN \n", - "APAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... name: teddy bear NaN \n", + " value \\\n", + "annotation type bbox \n", + "type parameters label \n", + "AP {\"iou\": 0.5} iscrowd: 0 0.0 \n", + " iscrowd: 1 0.0 \n", + " name: bed 1.0 \n", + " name: book 0.0 \n", + " supercategory: furniture 0.0 \n", + " supercategory: indoor 0.0 \n", + " {\"iou\": 0.75} iscrowd: 0 0.0 \n", + " iscrowd: 1 0.0 \n", + " name: bed 1.0 \n", + " name: book 0.0 \n", + " supercategory: furniture 0.0 \n", + " supercategory: indoor 0.0 \n", + "APAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... iscrowd: 0 0.0 \n", + " iscrowd: 1 0.0 \n", + " name: bed 0.8 \n", + " name: book 0.0 \n", + " supercategory: furniture 0.0 \n", + " supercategory: indoor 0.0 \n", + "AR {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... iscrowd: 0 0.0 \n", + " iscrowd: 1 0.0 \n", + " name: bed 0.8 \n", + " name: book 0.0 \n", + " name: teddy bear -1.0 \n", + " supercategory: furniture 0.0 \n", + " supercategory: indoor 0.0 \n", + "mAP {\"iou\": 0.5, \"label_key\": \"iscrowd\"} n/a 0.0 \n", + " {\"iou\": 0.5, \"label_key\": \"name\"} n/a 0.5 \n", + " {\"iou\": 0.5, \"label_key\": \"supercategory\"} n/a 0.0 \n", + " {\"iou\": 0.75, \"label_key\": \"iscrowd\"} n/a 0.0 \n", + " {\"iou\": 0.75, \"label_key\": \"name\"} n/a 0.5 \n", + " {\"iou\": 0.75, \"label_key\": \"supercategory\"} n/a 0.0 \n", + "mAPAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.0 \n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.4 \n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.0 \n", + "mAR {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.0 \n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.4 \n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.0 \n", + "AP {\"iou\": 0.5} name: teddy bear NaN \n", + " {\"iou\": 0.75} name: teddy bear NaN \n", + "APAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... name: teddy bear NaN \n", "\n", - " \n", - "annotation type raster \n", - "type parameters label \n", - "AP {\"iou\": 0.5} name: bed 1.000000 \n", - " name: book NaN \n", - " {\"iou\": 0.75} name: bed 1.000000 \n", - " name: book NaN \n", - "APAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... name: bed 0.800000 \n", - " name: book NaN \n", - "AR {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... name: bed 0.800000 \n", - " name: book NaN \n", - " name: teddy bear 0.466667 \n", - "mAP {\"iou\": 0.5, \"label_key\": \"name\"} n/a 1.000000 \n", - " {\"iou\": 0.75, \"label_key\": \"name\"} n/a 0.668317 \n", - "mAPAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.628713 \n", - "mAR {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.633333 \n", - "AP {\"iou\": 0.5} name: teddy bear 1.000000 \n", - " {\"iou\": 0.75} name: teddy bear 0.336634 \n", - "APAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... name: teddy bear 0.457426 " + " \n", + "annotation type raster \n", + "type parameters label \n", + "AP {\"iou\": 0.5} iscrowd: 0 0.000000 \n", + " iscrowd: 1 NaN \n", + " name: bed 1.000000 \n", + " name: book NaN \n", + " supercategory: furniture 0.000000 \n", + " supercategory: indoor 0.000000 \n", + " {\"iou\": 0.75} iscrowd: 0 0.000000 \n", + " iscrowd: 1 NaN \n", + " name: bed 1.000000 \n", + " name: book NaN \n", + " supercategory: furniture 0.000000 \n", + " supercategory: indoor 0.000000 \n", + "APAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... iscrowd: 0 0.000000 \n", + " iscrowd: 1 NaN \n", + " name: bed 0.800000 \n", + " name: book NaN \n", + " supercategory: furniture 0.000000 \n", + " supercategory: indoor 0.000000 \n", + "AR {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... iscrowd: 0 0.000000 \n", + " iscrowd: 1 NaN \n", + " name: bed 0.800000 \n", + " name: book NaN \n", + " name: teddy bear 0.466667 \n", + " supercategory: furniture 0.000000 \n", + " supercategory: indoor 0.000000 \n", + "mAP {\"iou\": 0.5, \"label_key\": \"iscrowd\"} n/a 0.000000 \n", + " {\"iou\": 0.5, \"label_key\": \"name\"} n/a 1.000000 \n", + " {\"iou\": 0.5, \"label_key\": \"supercategory\"} n/a 0.000000 \n", + " {\"iou\": 0.75, \"label_key\": \"iscrowd\"} n/a 0.000000 \n", + " {\"iou\": 0.75, \"label_key\": \"name\"} n/a 0.668317 \n", + " {\"iou\": 0.75, \"label_key\": \"supercategory\"} n/a 0.000000 \n", + "mAPAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.000000 \n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.628713 \n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.000000 \n", + "mAR {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.000000 \n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.633333 \n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.000000 \n", + "AP {\"iou\": 0.5} name: teddy bear 1.000000 \n", + " {\"iou\": 0.75} name: teddy bear 0.336634 \n", + "APAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... name: teddy bear 0.457426 " ] }, "execution_count": 17, @@ -2455,10 +2639,12 @@ "# bounding box evaluation\n", "eval_bbox_large = valor_model_bbox.evaluate_detection(\n", " valor_dataset,\n", - " filter_by=[\n", - " Label.key == \"name\",\n", - " Annotation.bounding_box.area > upper_bound,\n", - " ],\n", + " filters=Filter(\n", + " annotations=And(\n", + " Label.key == \"name\",\n", + " Annotation.bounding_box.area > upper_bound,\n", + " )\n", + " )\n", ")\n", "eval_bbox_large.wait_for_completion()" ] @@ -2484,10 +2670,12 @@ "# raster evaluation\n", "eval_raster_large = valor_model_seg.evaluate_detection(\n", " valor_dataset,\n", - " filter_by=[\n", - " Label.key == \"name\",\n", - " Annotation.raster.area > upper_bound,\n", - " ]\n", + " filters=Filter(\n", + " annotations=And(\n", + " Label.key == \"name\",\n", + " Annotation.raster.area > upper_bound,\n", + " )\n", + " )\n", ")\n", "eval_raster_large.wait_for_completion()" ] @@ -2544,8 +2732,13 @@ " \n", " \n", " \n", - " AP\n", - " {\"iou\": 0.5}\n", + " AP\n", + " {\"iou\": 0.5}\n", + " iscrowd: 0\n", + " 0.000000\n", + " 0.0\n", + " \n", + " \n", " name: bear\n", " 1.000000\n", " 1.0\n", @@ -2561,7 +2754,27 @@ " NaN\n", " \n", " \n", - " {\"iou\": 0.75}\n", + " supercategory: animal\n", + " 0.000000\n", + " 0.0\n", + " \n", + " \n", + " supercategory: furniture\n", + " 0.000000\n", + " NaN\n", + " \n", + " \n", + " supercategory: indoor\n", + " 0.000000\n", + " NaN\n", + " \n", + " \n", + " {\"iou\": 0.75}\n", + " iscrowd: 0\n", + " 0.000000\n", + " 0.0\n", + " \n", + " \n", " name: bear\n", " 1.000000\n", " 1.0\n", @@ -2577,8 +2790,28 @@ " NaN\n", " \n", " \n", - " APAveragedOverIOUs\n", - " {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]}\n", + " supercategory: animal\n", + " 0.000000\n", + " 0.0\n", + " \n", + " \n", + " supercategory: furniture\n", + " 0.000000\n", + " NaN\n", + " \n", + " \n", + " supercategory: indoor\n", + " 0.000000\n", + " NaN\n", + " \n", + " \n", + " APAveragedOverIOUs\n", + " {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]}\n", + " iscrowd: 0\n", + " 0.000000\n", + " 0.0\n", + " \n", + " \n", " name: bear\n", " 0.900000\n", " 0.9\n", @@ -2594,8 +2827,28 @@ " NaN\n", " \n", " \n", - " AR\n", - " {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]}\n", + " supercategory: animal\n", + " 0.000000\n", + " 0.0\n", + " \n", + " \n", + " supercategory: furniture\n", + " 0.000000\n", + " NaN\n", + " \n", + " \n", + " supercategory: indoor\n", + " 0.000000\n", + " NaN\n", + " \n", + " \n", + " AR\n", + " {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]}\n", + " iscrowd: 0\n", + " 0.000000\n", + " 0.0\n", + " \n", + " \n", " name: bear\n", " 0.900000\n", " 0.9\n", @@ -2611,76 +2864,187 @@ " -1.0\n", " \n", " \n", - " mAP\n", + " supercategory: animal\n", + " 0.000000\n", + " 0.0\n", + " \n", + " \n", + " supercategory: furniture\n", + " 0.000000\n", + " NaN\n", + " \n", + " \n", + " supercategory: indoor\n", + " 0.000000\n", + " NaN\n", + " \n", + " \n", + " mAP\n", + " {\"iou\": 0.5, \"label_key\": \"iscrowd\"}\n", + " n/a\n", + " 0.000000\n", + " 0.0\n", + " \n", + " \n", " {\"iou\": 0.5, \"label_key\": \"name\"}\n", " n/a\n", " 0.554455\n", " 1.0\n", " \n", " \n", + " {\"iou\": 0.5, \"label_key\": \"supercategory\"}\n", + " n/a\n", + " 0.000000\n", + " 0.0\n", + " \n", + " \n", + " {\"iou\": 0.75, \"label_key\": \"iscrowd\"}\n", + " n/a\n", + " 0.000000\n", + " 0.0\n", + " \n", + " \n", " {\"iou\": 0.75, \"label_key\": \"name\"}\n", " n/a\n", " 0.554455\n", " 1.0\n", " \n", " \n", - " mAPAveragedOverIOUs\n", + " {\"iou\": 0.75, \"label_key\": \"supercategory\"}\n", + " n/a\n", + " 0.000000\n", + " 0.0\n", + " \n", + " \n", + " mAPAveragedOverIOUs\n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8, 0.85, 0.9, 0.95], \"label_key\": \"iscrowd\"}\n", + " n/a\n", + " 0.000000\n", + " 0.0\n", + " \n", + " \n", " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8, 0.85, 0.9, 0.95], \"label_key\": \"name\"}\n", " n/a\n", " 0.499010\n", " 0.9\n", " \n", " \n", - " mAR\n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8, 0.85, 0.9, 0.95], \"label_key\": \"supercategory\"}\n", + " n/a\n", + " 0.000000\n", + " 0.0\n", + " \n", + " \n", + " mAR\n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8, 0.85, 0.9, 0.95], \"label_key\": \"iscrowd\"}\n", + " n/a\n", + " 0.000000\n", + " 0.0\n", + " \n", + " \n", " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8, 0.85, 0.9, 0.95], \"label_key\": \"name\"}\n", " n/a\n", " 0.500000\n", " 0.9\n", " \n", + " \n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8, 0.85, 0.9, 0.95], \"label_key\": \"supercategory\"}\n", + " n/a\n", + " 0.000000\n", + " 0.0\n", + " \n", " \n", "\n", "" ], "text/plain": [ - " value \\\n", - "annotation type bbox \n", - "type parameters label \n", - "AP {\"iou\": 0.5} name: bear 1.000000 \n", - " name: bed 0.000000 \n", - " name: teddy bear 0.663366 \n", - " {\"iou\": 0.75} name: bear 1.000000 \n", - " name: bed 0.000000 \n", - " name: teddy bear 0.663366 \n", - "APAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... name: bear 0.900000 \n", - " name: bed 0.000000 \n", - " name: teddy bear 0.597030 \n", - "AR {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... name: bear 0.900000 \n", - " name: bed 0.000000 \n", - " name: teddy bear 0.600000 \n", - "mAP {\"iou\": 0.5, \"label_key\": \"name\"} n/a 0.554455 \n", - " {\"iou\": 0.75, \"label_key\": \"name\"} n/a 0.554455 \n", - "mAPAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.499010 \n", - "mAR {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.500000 \n", + " value \\\n", + "annotation type bbox \n", + "type parameters label \n", + "AP {\"iou\": 0.5} iscrowd: 0 0.000000 \n", + " name: bear 1.000000 \n", + " name: bed 0.000000 \n", + " name: teddy bear 0.663366 \n", + " supercategory: animal 0.000000 \n", + " supercategory: furniture 0.000000 \n", + " supercategory: indoor 0.000000 \n", + " {\"iou\": 0.75} iscrowd: 0 0.000000 \n", + " name: bear 1.000000 \n", + " name: bed 0.000000 \n", + " name: teddy bear 0.663366 \n", + " supercategory: animal 0.000000 \n", + " supercategory: furniture 0.000000 \n", + " supercategory: indoor 0.000000 \n", + "APAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... iscrowd: 0 0.000000 \n", + " name: bear 0.900000 \n", + " name: bed 0.000000 \n", + " name: teddy bear 0.597030 \n", + " supercategory: animal 0.000000 \n", + " supercategory: furniture 0.000000 \n", + " supercategory: indoor 0.000000 \n", + "AR {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... iscrowd: 0 0.000000 \n", + " name: bear 0.900000 \n", + " name: bed 0.000000 \n", + " name: teddy bear 0.600000 \n", + " supercategory: animal 0.000000 \n", + " supercategory: furniture 0.000000 \n", + " supercategory: indoor 0.000000 \n", + "mAP {\"iou\": 0.5, \"label_key\": \"iscrowd\"} n/a 0.000000 \n", + " {\"iou\": 0.5, \"label_key\": \"name\"} n/a 0.554455 \n", + " {\"iou\": 0.5, \"label_key\": \"supercategory\"} n/a 0.000000 \n", + " {\"iou\": 0.75, \"label_key\": \"iscrowd\"} n/a 0.000000 \n", + " {\"iou\": 0.75, \"label_key\": \"name\"} n/a 0.554455 \n", + " {\"iou\": 0.75, \"label_key\": \"supercategory\"} n/a 0.000000 \n", + "mAPAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.000000 \n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.499010 \n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.000000 \n", + "mAR {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.000000 \n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.500000 \n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.000000 \n", "\n", - " \n", - "annotation type raster \n", - "type parameters label \n", - "AP {\"iou\": 0.5} name: bear 1.0 \n", - " name: bed NaN \n", - " name: teddy bear NaN \n", - " {\"iou\": 0.75} name: bear 1.0 \n", - " name: bed NaN \n", - " name: teddy bear NaN \n", - "APAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... name: bear 0.9 \n", - " name: bed NaN \n", - " name: teddy bear NaN \n", - "AR {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... name: bear 0.9 \n", - " name: bed NaN \n", - " name: teddy bear -1.0 \n", - "mAP {\"iou\": 0.5, \"label_key\": \"name\"} n/a 1.0 \n", - " {\"iou\": 0.75, \"label_key\": \"name\"} n/a 1.0 \n", - "mAPAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.9 \n", - "mAR {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.9 " + " \n", + "annotation type raster \n", + "type parameters label \n", + "AP {\"iou\": 0.5} iscrowd: 0 0.0 \n", + " name: bear 1.0 \n", + " name: bed NaN \n", + " name: teddy bear NaN \n", + " supercategory: animal 0.0 \n", + " supercategory: furniture NaN \n", + " supercategory: indoor NaN \n", + " {\"iou\": 0.75} iscrowd: 0 0.0 \n", + " name: bear 1.0 \n", + " name: bed NaN \n", + " name: teddy bear NaN \n", + " supercategory: animal 0.0 \n", + " supercategory: furniture NaN \n", + " supercategory: indoor NaN \n", + "APAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... iscrowd: 0 0.0 \n", + " name: bear 0.9 \n", + " name: bed NaN \n", + " name: teddy bear NaN \n", + " supercategory: animal 0.0 \n", + " supercategory: furniture NaN \n", + " supercategory: indoor NaN \n", + "AR {\"ious\": [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,... iscrowd: 0 0.0 \n", + " name: bear 0.9 \n", + " name: bed NaN \n", + " name: teddy bear -1.0 \n", + " supercategory: animal 0.0 \n", + " supercategory: furniture NaN \n", + " supercategory: indoor NaN \n", + "mAP {\"iou\": 0.5, \"label_key\": \"iscrowd\"} n/a 0.0 \n", + " {\"iou\": 0.5, \"label_key\": \"name\"} n/a 1.0 \n", + " {\"iou\": 0.5, \"label_key\": \"supercategory\"} n/a 0.0 \n", + " {\"iou\": 0.75, \"label_key\": \"iscrowd\"} n/a 0.0 \n", + " {\"iou\": 0.75, \"label_key\": \"name\"} n/a 1.0 \n", + " {\"iou\": 0.75, \"label_key\": \"supercategory\"} n/a 0.0 \n", + "mAPAveragedOverIOUs {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.0 \n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.9 \n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.0 \n", + "mAR {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.0 \n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.9 \n", + " {\"ious\": [0.5, 0.55, 0.6, 0.7, 0.65, 0.75, 0.8,... n/a 0.0 " ] }, "execution_count": 20, diff --git a/examples/pedestrian_detection.ipynb b/examples/pedestrian_detection.ipynb index 3fd877180..b67354ada 100644 --- a/examples/pedestrian_detection.ipynb +++ b/examples/pedestrian_detection.ipynb @@ -32,7 +32,7 @@ "from tqdm.notebook import tqdm\n", "\n", "from valor.enums import TaskType\n", - "from valor import Annotation, Datum, Dataset, Model, GroundTruth, Label, Client, Prediction, viz, connect\n", + "from valor import Annotation, Datum, Dataset, Model, GroundTruth, Label, Client, Prediction, Filter, viz, connect\n", "from valor.schemas import Box, Raster" ] }, @@ -528,7 +528,7 @@ " reg_eval.wait_for_completion()\n", " \n", " # evaluate on just people for which the `\"in_road\"` metadata field is `True`\n", - " in_road_eval = valor_model.evaluate_detection(dset, filter_by=[Annotation.metadata[\"in_road\"] == True])\n", + " in_road_eval = valor_model.evaluate_detection(dset, filters=Filter(annotations=(Annotation.metadata[\"in_road\"] == True)))\n", " in_road_eval.wait_for_completion()\n", " \n", " \n", diff --git a/examples/tabular_classification.ipynb b/examples/tabular_classification.ipynb index 1ba490989..17bde3321 100644 --- a/examples/tabular_classification.ipynb +++ b/examples/tabular_classification.ipynb @@ -32,6 +32,17 @@ "id": "f9daebe8-0bb4-41eb-8359-9cadaa4a7779", "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:root:The Valor client version (0.27.2.dev37+g6c9eaddf.d20240614) is newer than the Valor API version 0.27.2.dev37+g6c9eaddf\t==========================================================================================\n", + "\t== Running with a mismatched client != API version may have unexpected results.\n", + "\t== Please update your client to \u001b[1;0.27.2.dev37+g6c9eaddf\u001b[0;31m to avoid aberrant behavior.\n", + "\t==========================================================================================\n", + "\u001b[0m\n" + ] + }, { "name": "stdout", "output_type": "stream", @@ -99,7 +110,7 @@ { "data": { "text/plain": [ - "((426, 30), array([0, 1, 1, 0]), array(['malignant', 'benign'], dtype='F1\n", " \"n/a\"\n", " class: benign\n", - " 0.990253\n", + " 0.990689\n", " \n", " \n", " class: malignant\n", - " 0.985251\n", + " 0.984127\n", " \n", " \n", " Precision\n", " \"n/a\"\n", " class: benign\n", - " 0.984496\n", + " 0.981550\n", " \n", " \n", " class: malignant\n", - " 0.994048\n", + " 1.000000\n", " \n", " \n", " ROCAUC\n", " {\"label_key\": \"class\"}\n", " n/a\n", - " 0.997615\n", + " 0.997086\n", " \n", " \n", " Recall\n", " \"n/a\"\n", " class: benign\n", - " 0.996078\n", + " 1.000000\n", " \n", " \n", " class: malignant\n", - " 0.976608\n", + " 0.968750\n", " \n", " \n", "\n", @@ -449,16 +460,16 @@ "evaluation 1\n", "type parameters label \n", "Accuracy {\"label_key\": \"class\"} n/a 0.988263\n", - "F1 \"n/a\" class: benign 0.990253\n", - " class: malignant 0.985251\n", - "Precision \"n/a\" class: benign 0.984496\n", - " class: malignant 0.994048\n", - "ROCAUC {\"label_key\": \"class\"} n/a 0.997615\n", - "Recall \"n/a\" class: benign 0.996078\n", - " class: malignant 0.976608" + "F1 \"n/a\" class: benign 0.990689\n", + " class: malignant 0.984127\n", + "Precision \"n/a\" class: benign 0.981550\n", + " class: malignant 1.000000\n", + "ROCAUC {\"label_key\": \"class\"} n/a 0.997086\n", + "Recall \"n/a\" class: benign 1.000000\n", + " class: malignant 0.968750" ] }, - "execution_count": 10, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } @@ -471,7 +482,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 13, "id": "73626229", "metadata": {}, "outputs": [ @@ -479,13 +490,12 @@ "data": { "text/plain": [ "[{'label_key': 'class',\n", - " 'entries': [{'prediction': 'benign', 'groundtruth': 'benign', 'count': 254},\n", - " {'prediction': 'benign', 'groundtruth': 'malignant', 'count': 4},\n", - " {'prediction': 'malignant', 'groundtruth': 'benign', 'count': 1},\n", - " {'prediction': 'malignant', 'groundtruth': 'malignant', 'count': 167}]}]" + " 'entries': [{'prediction': 'benign', 'groundtruth': 'benign', 'count': 266},\n", + " {'prediction': 'benign', 'groundtruth': 'malignant', 'count': 5},\n", + " {'prediction': 'malignant', 'groundtruth': 'malignant', 'count': 155}]}]" ] }, - "execution_count": 11, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } @@ -504,7 +514,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 14, "id": "347c180e-9913-4aa4-994e-de507da32d72", "metadata": {}, "outputs": [ @@ -514,12 +524,12 @@ "text": [ " precision recall f1-score support\n", "\n", - " malignant 0.994048 0.976608 0.985251 171\n", - " benign 0.984496 0.996078 0.990253 255\n", + " malignant 1.000000 0.968750 0.984127 160\n", + " benign 0.981550 1.000000 0.990689 266\n", "\n", " accuracy 0.988263 426\n", - " macro avg 0.989272 0.986343 0.987752 426\n", - "weighted avg 0.988330 0.988263 0.988245 426\n", + " macro avg 0.990775 0.984375 0.987408 426\n", + "weighted avg 0.988479 0.988263 0.988224 426\n", "\n" ] } diff --git a/integration_tests/client/datasets/test_datum.py b/integration_tests/client/datasets/test_datum.py index cbeba8544..e6f862167 100644 --- a/integration_tests/client/datasets/test_datum.py +++ b/integration_tests/client/datasets/test_datum.py @@ -8,7 +8,15 @@ from sqlalchemy import select from sqlalchemy.orm import Session -from valor import Annotation, Client, Dataset, Datum, GroundTruth, Label +from valor import ( + Annotation, + Client, + Dataset, + Datum, + Filter, + GroundTruth, + Label, +) from valor.metatypes import ImageMetadata from valor.schemas import Box from valor_api.backend import models @@ -87,12 +95,15 @@ def test_get_datums( db: Session, dataset_with_metadata: Dataset, metadata: dict ): assert len(dataset_with_metadata.get_datums()) == 2 + assert ( len( dataset_with_metadata.get_datums( - filter_by=[ - Datum.metadata["metadatum1"] == metadata["metadatum1"] - ] + filters=Filter( + datums=( + Datum.metadata["metadatum1"] == metadata["metadatum1"] + ) + ) ) ) == 1 @@ -100,17 +111,12 @@ def test_get_datums( assert ( len( dataset_with_metadata.get_datums( - filter_by=[Datum.metadata["metadatum1"] == "nonexistent value"] + filters=Filter( + datums=( + Datum.metadata["metadatum1"] == "nonexistent value" + ) + ) ) ) == 0 ) - - with pytest.raises(ValueError) as exc_info: - dataset_with_metadata.get_datums( - filter_by=[Dataset.name == "dataset name"] - ) - assert ( - "Cannot filter by dataset_names when calling `Dataset.get_datums`" - in str(exc_info) - ) diff --git a/integration_tests/client/datatype/test_data_generation.py b/integration_tests/client/datatype/test_data_generation.py index 40f5329c6..1b8686921 100644 --- a/integration_tests/client/datatype/test_data_generation.py +++ b/integration_tests/client/datatype/test_data_generation.py @@ -1,6 +1,5 @@ import io import random -from dataclasses import asdict from typing import cast import numpy as np @@ -313,7 +312,7 @@ def test_generate_segmentation_data( for image in dataset.get_datums(): uid = image.uid - sample_gt = dataset.get_groundtruth(uid) + sample_gt = dataset.get_groundtruth(uid) # type: ignore - issue #604 assert sample_gt sample_annotations = sample_gt.annotations @@ -364,7 +363,7 @@ def test_generate_prediction_data(client: Client): dataset, iou_thresholds_to_compute=[0.1, 0.9], iou_thresholds_to_return=[0.1, 0.9], - filter_by=[Label.key == "k1"], + filters=Filter(labels=(Label.key == "k1")), convert_annotations_to_type=AnnotationType.BOX, ) assert eval_job.wait_for_completion(timeout=30) == EvaluationStatus.DONE @@ -392,10 +391,16 @@ def test_generate_prediction_data(client: Client): "dataset_names": [dataset_name], "model_name": model_name, "filters": { - **asdict( - Filter() - ), # default filter properties with overrides below - "label_keys": ["k1"], + "labels": { + "lhs": { + "name": "label.key", + }, + "op": "eq", + "rhs": { + "type": "string", + "value": "k1", + }, + }, }, "parameters": { "task_type": TaskType.OBJECT_DETECTION.value, diff --git a/integration_tests/client/evaluations/test_evaluation_crud.py b/integration_tests/client/evaluations/test_evaluation_crud.py index fae632b72..b9e0a10b6 100644 --- a/integration_tests/client/evaluations/test_evaluation_crud.py +++ b/integration_tests/client/evaluations/test_evaluation_crud.py @@ -4,7 +4,15 @@ from sqlalchemy.orm import Session # client -from valor import Client, Dataset, GroundTruth, Label, Model, Prediction +from valor import ( + Client, + Dataset, + Filter, + GroundTruth, + Label, + Model, + Prediction, +) from valor.enums import EvaluationStatus from valor.exceptions import ClientException @@ -122,7 +130,7 @@ def test_delete_evaluation_scope( eval1 = model.evaluate_classification(dataset) assert eval1.wait_for_completion(timeout=30) == EvaluationStatus.DONE eval2 = model.evaluate_classification( - dataset, filter_by=[Label.key == "k4"] + dataset, filters=Filter(labels=Label.key == "k4") ) assert eval2.wait_for_completion(timeout=30) diff --git a/integration_tests/client/filtering/test_filtering_examples.py b/integration_tests/client/filtering/test_filtering_examples.py new file mode 100644 index 000000000..9589e7ee1 --- /dev/null +++ b/integration_tests/client/filtering/test_filtering_examples.py @@ -0,0 +1,202 @@ +from valor import ( + Annotation, + Client, + Dataset, + Datum, + Filter, + GroundTruth, + Label, +) +from valor.schemas import And, Box + + +def test_example_boats_and_swimmers(client: Client): + + contains_boat_swimmer = ( + ("uid1", False, False), + ("uid2", True, False), + ("uid3", False, True), + ("uid4", True, True), + ) + + box = Box.from_extrema(0, 10, 0, 10) + swimmer = Label(key="class", value="swimmer") + boat = Label(key="class", value="boat") + fish = Label(key="class", value="fish") + + dataset = Dataset.create("ocean_images") + for uid, is_boat, is_swimmer in contains_boat_swimmer: + dataset.add_groundtruth( + GroundTruth( + datum=Datum(uid=uid), + annotations=[ + Annotation( + labels=[boat if is_boat else fish], + bounding_box=box, + is_instance=True, + ), + Annotation( + labels=[swimmer if is_swimmer else fish], + bounding_box=box, + is_instance=True, + ), + ], + ) + ) + + # Just fish + just_fish = client.get_datums( + Filter( + datums=And( + Label.key == "class", + Label.value != "boat", + Label.value != "swimmer", + ), + ) + ) + assert len(just_fish) == 1 + assert just_fish[0].uid == "uid1" + + # No swimmers + no_swimmers = client.get_datums( + Filter( + datums=And( + Label.key == "class", + Label.value == "boat", + Label.value != "swimmer", + ), + ) + ) + assert len(no_swimmers) == 1 + assert no_swimmers[0].uid == "uid2" + + # No boats + no_boats = client.get_datums( + Filter( + datums=And( + Label.key == "class", + Label.value != "boat", + Label.value == "swimmer", + ), + ) + ) + assert len(no_boats) == 1 + assert no_boats[0].uid == "uid3" + + # Both swimmers and boats + swimmers_and_boats = client.get_datums( + Filter( + datums=And( + Label.key == "class", + Label.value == "boat", + Label.value == "swimmer", + ), + ) + ) + assert len(swimmers_and_boats) == 1 + assert swimmers_and_boats[0].uid == "uid4" + + +def test_example_boats_of_different_sizes(client: Client): + + contains_boat_swimmer = ( + ("uid1", False, False), + ("uid2", True, False), + ("uid3", False, True), + ("uid4", True, True), + ) + + small_box = Box.from_extrema(0, 5, 0, 5) + large_box = Box.from_extrema(0, 10, 0, 10) + + swimmer = Label(key="class", value="swimmer") + boat = Label(key="class", value="boat") + fish = Label(key="class", value="fish") + + dataset = Dataset.create("ocean_images") + for uid, is_large_boat, is_swimmer in contains_boat_swimmer: + dataset.add_groundtruth( + GroundTruth( + datum=Datum(uid=uid), + annotations=[ + Annotation( + labels=[boat], + bounding_box=large_box if is_large_boat else small_box, + is_instance=True, + ), + Annotation( + labels=[swimmer if is_swimmer else fish], + bounding_box=small_box, + is_instance=True, + ), + ], + ) + ) + + # No swimmer, small boats + no_swimmer_small_boats = client.get_datums( + Filter( + datums=And( + Label.key == "class", + Label.value != "swimmer", + ), + annotations=And( + Label.key == "class", + Label.value == "boat", + Annotation.bounding_box.area < 50, + ), + ) + ) + assert len(no_swimmer_small_boats) == 1 + assert no_swimmer_small_boats[0].uid == "uid1" + + # No swimmer, large boats + no_swimmer_large_boats = client.get_datums( + Filter( + datums=And( + Label.key == "class", + Label.value != "swimmer", + ), + annotations=And( + Label.key == "class", + Label.value == "boat", + Annotation.bounding_box.area > 50, + ), + ) + ) + assert len(no_swimmer_large_boats) == 1 + assert no_swimmer_large_boats[0].uid == "uid2" + + # Swimmer with small boat + swimmer_with_small_boats = client.get_datums( + Filter( + datums=And( + Label.key == "class", + Label.value == "swimmer", + ), + annotations=And( + Label.key == "class", + Label.value == "boat", + Annotation.bounding_box.area < 50, + ), + ) + ) + assert len(swimmer_with_small_boats) == 1 + assert swimmer_with_small_boats[0].uid == "uid3" + + # Swimmer with large boat + swimmers_and_boats = client.get_datums( + Filter( + datums=And( + Label.key == "class", + Label.value == "swimmer", + ), + annotations=And( + Label.key == "class", + Label.value == "boat", + Annotation.bounding_box.area > 50, + ), + ) + ) + assert len(swimmers_and_boats) == 1 + assert swimmers_and_boats[0].uid == "uid4" diff --git a/integration_tests/client/filtering/test_geometric_filtering.py b/integration_tests/client/filtering/test_geometric_filtering.py index d946cfc23..f984da627 100644 --- a/integration_tests/client/filtering/test_geometric_filtering.py +++ b/integration_tests/client/filtering/test_geometric_filtering.py @@ -133,12 +133,12 @@ def test_filter_by_bounding_box(client, groundtruths_with_areas, areas): label_key = "box" all_labels = client.get_labels( - Filter.create( - [ - Annotation.bounding_box.is_not_none(), - Annotation.polygon.is_none(), - Annotation.raster.is_none(), - ] + Filter( + labels=( + Annotation.bounding_box.is_not_none() + & Annotation.polygon.is_none() + & Annotation.raster.is_none() + ) ) ) assert set(all_labels) == { @@ -151,13 +151,13 @@ def test_filter_by_bounding_box(client, groundtruths_with_areas, areas): # threshold area for idx, area in enumerate(areas): thresholded_labels = client.get_labels( - Filter.create( - [ - Annotation.bounding_box.is_not_none(), - Annotation.polygon.is_none(), - Annotation.raster.is_none(), - Annotation.bounding_box.area > area, - ] + Filter( + labels=( + Annotation.bounding_box.is_not_none() + & Annotation.polygon.is_none() + & Annotation.raster.is_none() + & (Annotation.bounding_box.area > area) + ) ) ) assert len(thresholded_labels) == len(areas) - idx - 1 @@ -181,12 +181,12 @@ def test_filter_by_polygon(client, groundtruths_with_areas, areas): label_key = "polygon" all_labels = client.get_labels( - Filter.create( - [ - Annotation.bounding_box.is_none(), - Annotation.polygon.is_not_none(), - Annotation.raster.is_none(), - ] + Filter( + labels=( + Annotation.bounding_box.is_none() + & Annotation.polygon.is_not_none() + & Annotation.raster.is_none() + ) ) ) assert set(all_labels) == { @@ -199,13 +199,13 @@ def test_filter_by_polygon(client, groundtruths_with_areas, areas): # threshold area for idx, area in enumerate(areas): thresholded_labels = client.get_labels( - Filter.create( - [ - Annotation.bounding_box.is_none(), - Annotation.polygon.is_not_none(), - Annotation.raster.is_none(), - Annotation.polygon.area > area, - ] + Filter( + labels=( + Annotation.bounding_box.is_none() + & Annotation.polygon.is_not_none() + & Annotation.raster.is_none() + & (Annotation.polygon.area > area) + ) ) ) assert len(thresholded_labels) == len(areas) - idx - 1 @@ -230,13 +230,13 @@ def test_filter_by_multipolygon(client, groundtruths_with_areas, areas): label_key = "multipolygon" all_labels = client.get_labels( - Filter.create( - [ - Label.key == label_key, - Annotation.bounding_box.is_none(), - Annotation.polygon.is_none(), - Annotation.raster.is_not_none(), - ] + Filter( + labels=( + (Label.key == label_key) + & Annotation.bounding_box.is_none() + & Annotation.polygon.is_none() + & Annotation.raster.is_not_none() + ) ) ) assert set(all_labels) == { @@ -249,14 +249,14 @@ def test_filter_by_multipolygon(client, groundtruths_with_areas, areas): # threshold area for idx, area in enumerate(areas): thresholded_labels = client.get_labels( - Filter.create( - [ - Label.key == label_key, - Annotation.bounding_box.is_none(), - Annotation.polygon.is_none(), - Annotation.raster.is_not_none(), - Annotation.raster.area > area, - ] + Filter( + labels=( + (Label.key == label_key) + & Annotation.bounding_box.is_none() + & Annotation.polygon.is_none() + & Annotation.raster.is_not_none() + & (Annotation.raster.area > area) + ) ) ) assert len(thresholded_labels) == len(areas) - idx - 1 @@ -280,13 +280,13 @@ def test_filter_by_raster(client, groundtruths_with_areas, areas): label_key = "raster" all_labels = client.get_labels( - Filter.create( - [ - Label.key == label_key, - Annotation.bounding_box.is_none(), - Annotation.polygon.is_none(), - Annotation.raster.is_not_none(), - ] + Filter( + labels=( + (Label.key == label_key) + & Annotation.bounding_box.is_none() + & Annotation.polygon.is_none() + & Annotation.raster.is_not_none() + ) ) ) assert set(all_labels) == { @@ -299,14 +299,14 @@ def test_filter_by_raster(client, groundtruths_with_areas, areas): # threshold area for idx, area in enumerate(areas): thresholded_labels = client.get_labels( - Filter.create( - [ - Label.key == label_key, - Annotation.bounding_box.is_none(), - Annotation.polygon.is_none(), - Annotation.raster.is_not_none(), - Annotation.raster.area > area, - ] + Filter( + labels=( + (Label.key == label_key) + & Annotation.bounding_box.is_none() + & Annotation.polygon.is_none() + & Annotation.raster.is_not_none() + & (Annotation.raster.area > area) + ) ) ) assert len(thresholded_labels) == len(areas) - idx - 1 diff --git a/integration_tests/client/filtering/test_geospatial_filtering.py b/integration_tests/client/filtering/test_geospatial_filtering.py index d989ad9d1..2903bbcc0 100644 --- a/integration_tests/client/filtering/test_geospatial_filtering.py +++ b/integration_tests/client/filtering/test_geospatial_filtering.py @@ -4,9 +4,17 @@ import pytest -from valor import Client, Dataset, Datum, GroundTruth, Model, Prediction +from valor import ( + Client, + Dataset, + Datum, + Filter, + GroundTruth, + Model, + Prediction, +) from valor.enums import EvaluationStatus -from valor.schemas import Constraint, Polygon +from valor.schemas import Polygon def test_set_and_get_geospatial( @@ -94,12 +102,6 @@ def test_geospatial_filter( (125.2750725, 38.760525), ] ] - geodict = { - "type": "Polygon", - "coordinates": [ - [list(point) for point in subpoly] for subpoly in coordinates - ], - } dataset = Dataset.create( name=dataset_name, metadata={"geospatial": Polygon(coordinates)} @@ -122,16 +124,11 @@ def test_geospatial_filter( datasets=dataset, iou_thresholds_to_compute=[0.1, 0.6], iou_thresholds_to_return=[0.1, 0.6], - filter_by={ - "dataset_metadata": { - "geospatial": [ - { - "operator": "intersect", - "value": geodict, - } - ], - } - }, + filters=Filter( + datasets=Dataset.metadata["geospatial"].intersects( + Polygon(coordinates) + ) + ), ) assert eval_job.wait_for_completion(timeout=30) == EvaluationStatus.DONE assert len(eval_job.metrics) == 16 @@ -143,7 +140,7 @@ def test_geospatial_filter( dataset, iou_thresholds_to_compute=[0.1, 0.6], iou_thresholds_to_return=[0.1, 0.6], - filter_by=[geospatial_metadatum.inside({1234: {}})], + filters=Filter(datums=geospatial_metadatum.inside({1234: {}})), ) # test datums @@ -151,14 +148,11 @@ def test_geospatial_filter( dataset, iou_thresholds_to_compute=[0.1, 0.6], iou_thresholds_to_return=[0.1, 0.6], - filter_by=[geospatial_metadatum.intersects(Polygon(coordinates))], + filters=Filter( + datums=geospatial_metadatum.intersects(Polygon(coordinates)) + ), ) assert eval_job.wait_for_completion(timeout=30) == EvaluationStatus.DONE - - assert eval_job.filters.datum_metadata - assert eval_job.filters.datum_metadata["geospatial"] == [ - Constraint(value=geodict, operator="intersect") - ] assert len(eval_job.metrics) == 16 # filtering by model is allowed, this is the equivalent of requesting.. @@ -167,25 +161,20 @@ def test_geospatial_filter( dataset, iou_thresholds_to_compute=[0.1, 0.6], iou_thresholds_to_return=[0.1, 0.6], - filter_by={ - "model_metadata": { - "geospatial": [ - { - "operator": "inside", - "value": { - "type": "polygon", - "coordinates": [ - [ - [124.0, 37.0], - [128.0, 37.0], - [128.0, 40.0], - [124.0, 40.0], - ] - ], - }, - } - ], - } - }, + filters=Filter( + models=Model.metadata["geospatial"].inside( + Polygon( + [ + [ + (124.0, 37.0), + (128.0, 37.0), + (128.0, 40.0), + (124.0, 40.0), + (124.0, 37.0), + ] + ] + ) + ) + ), ) assert eval_job.wait_for_completion(timeout=30) == EvaluationStatus.DONE diff --git a/integration_tests/client/metrics/test_classification.py b/integration_tests/client/metrics/test_classification.py index d645eacbb..f4f2e176a 100644 --- a/integration_tests/client/metrics/test_classification.py +++ b/integration_tests/client/metrics/test_classification.py @@ -12,6 +12,7 @@ Client, Dataset, Datum, + Filter, GroundTruth, Label, Model, @@ -511,9 +512,7 @@ def test_stratify_clf_metrics( eval_results_val2 = model.evaluate_classification( dataset, - filter_by=[ - Datum.metadata["md1"] == "md1-val2", - ], + filters=Filter(datums=(Datum.metadata["md1"] == "md1-val2")), # type: ignore - issue #605 ) assert ( eval_results_val2.wait_for_completion(timeout=30) @@ -524,10 +523,7 @@ def test_stratify_clf_metrics( # should get the same thing if we use the boolean filter eval_results_bool = model.evaluate_classification( dataset, - filter_by=[ - Datum.metadata["md3"] - == True # noqa: E712 - 'is' keyword is not overloadable, so we have to use 'symbol == True' - ], + filters=Filter(datums=(Datum.metadata["md3"] == True)), # type: ignore - issue #605 # noqa: E712 ) assert ( eval_results_bool.wait_for_completion(timeout=30) @@ -660,9 +656,7 @@ def test_stratify_clf_metrics_by_time( eval_results_val2 = model.evaluate_classification( dataset, - filter_by=[ - Datum.metadata["md1"] == date.fromisoformat("2002-01-01"), - ], + filters=Filter(datums=(Datum.metadata["md1"] == date.fromisoformat("2002-01-01"))), # type: ignore - issue #605 ) assert ( eval_results_val2.wait_for_completion(timeout=30) diff --git a/integration_tests/client/metrics/test_detection.py b/integration_tests/client/metrics/test_detection.py index 5bfb5299c..580e194ae 100644 --- a/integration_tests/client/metrics/test_detection.py +++ b/integration_tests/client/metrics/test_detection.py @@ -3,7 +3,6 @@ """ import random -from dataclasses import asdict import pytest import requests @@ -27,8 +26,6 @@ from valor.schemas import Box from valor_api.backend import models -default_filter_properties = asdict(Filter()) - def test_evaluate_detection( db: Session, @@ -116,9 +113,9 @@ def test_evaluate_detection( dataset, iou_thresholds_to_compute=[0.1, 0.6], iou_thresholds_to_return=[0.1, 0.6], - filter_by=[ - Label.key == "k1", - ], + filters=Filter( + labels=(Label.key == "k1"), + ), convert_annotations_to_type=AnnotationType.BOX, ) assert eval_job.wait_for_completion(timeout=30) == EvaluationStatus.DONE @@ -142,8 +139,16 @@ def test_evaluate_detection( "dataset_names": ["test_dataset"], "model_name": model_name, "filters": { - **default_filter_properties, - "label_keys": ["k1"], + "labels": { + "lhs": { + "name": "label.key", + }, + "op": "eq", + "rhs": { + "type": "string", + "value": "k1", + }, + }, }, "parameters": { "task_type": TaskType.OBJECT_DETECTION.value, @@ -182,10 +187,10 @@ def test_evaluate_detection( dataset, iou_thresholds_to_compute=[0.1, 0.6], iou_thresholds_to_return=[0.1, 0.6], - filter_by=[ - Annotation.labels == [Label(key="k1", value="v1")], - Annotation.bounding_box.is_not_none(), - ], + filters=Filter( + annotations=Annotation.bounding_box.is_not_none(), + labels=((Label.key == "k1") & (Label.value == "v1")), + ), ) assert ( eval_job_value_filter_using_in_.wait_for_completion(timeout=30) @@ -202,9 +207,9 @@ def test_evaluate_detection( dataset, iou_thresholds_to_compute=[0.1, 0.6], iou_thresholds_to_return=[0.1, 0.6], - filter_by=[ - Annotation.labels == [Label(key="k1", value="v1")], - ], + filters=Filter( + labels=((Label.key == "k1") & (Label.value == "v1")), + ), convert_annotations_to_type=AnnotationType.BOX, ) assert ( @@ -224,9 +229,9 @@ def test_evaluate_detection( dataset, iou_thresholds_to_compute=[0.1, 0.6], iou_thresholds_to_return=[0.1, 0.6], - filter_by=[ - Annotation.labels == [Label(key="k1", value="v2")], - ], + filters=Filter( + labels=((Label.key == "k1") & (Label.value == "v2")), + ), convert_annotations_to_type=AnnotationType.BOX, ) assert "EvaluationRequestError" in str(e) @@ -244,11 +249,13 @@ def test_evaluate_detection( dataset, iou_thresholds_to_compute=[0.1, 0.6], iou_thresholds_to_return=[0.1, 0.6], - filter_by=[ - Label.key == "k1", - Annotation.bounding_box.area >= 10, - Annotation.bounding_box.area <= 2000, - ], + filters=Filter( + labels=(Label.key == "k1"), + annotations=( + (Annotation.bounding_box.area >= 10.0) + & (Annotation.bounding_box.area <= 2000.0) + ), + ), convert_annotations_to_type=AnnotationType.BOX, ) @@ -266,18 +273,41 @@ def test_evaluate_detection( "dataset_names": ["test_dataset"], "model_name": model_name, "filters": { - **default_filter_properties, - "bounding_box_area": [ - { - "operator": ">=", - "value": 10.0, + "annotations": { + "args": [ + { + "lhs": { + "name": "annotation.bounding_box.area", + }, + "op": "gte", + "rhs": { + "type": "float", + "value": 10.0, + }, + }, + { + "lhs": { + "name": "annotation.bounding_box.area", + }, + "op": "lte", + "rhs": { + "type": "float", + "value": 2000.0, + }, + }, + ], + "op": "and", + }, + "labels": { + "lhs": { + "name": "label.key", }, - { - "operator": "<=", - "value": 2000.0, + "op": "eq", + "rhs": { + "type": "string", + "value": "k1", }, - ], - "label_keys": ["k1"], + }, }, "parameters": { "task_type": TaskType.OBJECT_DETECTION.value, @@ -318,10 +348,10 @@ def test_evaluate_detection( dataset, iou_thresholds_to_compute=[0.1, 0.6], iou_thresholds_to_return=[0.1, 0.6], - filter_by=[ - Label.key == "k1", - Annotation.bounding_box.area >= 1200, - ], + filters=Filter( + labels=(Label.key == "k1"), + annotations=(Annotation.bounding_box.area >= 1200.0), + ), convert_annotations_to_type=AnnotationType.BOX, ) assert ( @@ -336,14 +366,26 @@ def test_evaluate_detection( "dataset_names": ["test_dataset"], "model_name": model_name, "filters": { - **default_filter_properties, - "bounding_box_area": [ - { - "operator": ">=", + "annotations": { + "lhs": { + "name": "annotation.bounding_box.area", + }, + "op": "gte", + "rhs": { + "type": "float", "value": 1200.0, }, - ], - "label_keys": ["k1"], + }, + "labels": { + "lhs": { + "name": "label.key", + }, + "op": "eq", + "rhs": { + "type": "string", + "value": "k1", + }, + }, }, "parameters": { "task_type": TaskType.OBJECT_DETECTION.value, @@ -377,10 +419,10 @@ def test_evaluate_detection( dataset, iou_thresholds_to_compute=[0.1, 0.6], iou_thresholds_to_return=[0.1, 0.6], - filter_by=[ - Label.key == "k1", - Annotation.bounding_box.area <= 1200, - ], + filters=Filter( + labels=(Label.key == "k1"), + annotations=(Annotation.bounding_box.area <= 1200.0), + ), convert_annotations_to_type=AnnotationType.BOX, ) assert ( @@ -398,11 +440,13 @@ def test_evaluate_detection( dataset, iou_thresholds_to_compute=[0.1, 0.6], iou_thresholds_to_return=[0.1, 0.6], - filter_by=[ - Label.key == "k1", - Annotation.bounding_box.area >= 1200, - Annotation.bounding_box.area <= 1800, - ], + filters=Filter( + labels=(Label.key == "k1"), + annotations=( + (Annotation.bounding_box.area >= 1200.0) + & (Annotation.bounding_box.area <= 1800.0) + ), + ), convert_annotations_to_type=AnnotationType.BOX, ) assert ( @@ -417,18 +461,41 @@ def test_evaluate_detection( "dataset_names": ["test_dataset"], "model_name": model_name, "filters": { - **default_filter_properties, - "bounding_box_area": [ - { - "operator": ">=", - "value": 1200.0, + "annotations": { + "args": [ + { + "lhs": { + "name": "annotation.bounding_box.area", + }, + "op": "gte", + "rhs": { + "type": "float", + "value": 1200.0, + }, + }, + { + "lhs": { + "name": "annotation.bounding_box.area", + }, + "op": "lte", + "rhs": { + "type": "float", + "value": 1800.0, + }, + }, + ], + "op": "and", + }, + "labels": { + "lhs": { + "name": "label.key", }, - { - "operator": "<=", - "value": 1800.0, + "op": "eq", + "rhs": { + "type": "string", + "value": "k1", }, - ], - "label_keys": ["k1"], + }, }, "parameters": { "task_type": TaskType.OBJECT_DETECTION.value, @@ -481,11 +548,13 @@ def test_evaluate_detection( dataset, iou_thresholds_to_compute=[0.1, 0.6], iou_thresholds_to_return=[0.1, 0.6], - filter_by=[ - Label.key == "k1", - Annotation.bounding_box.area >= 1200, - Annotation.bounding_box.area <= 1800, - ], + filters=Filter( + labels=(Label.key == "k1"), + annotations=( + (Annotation.bounding_box.area >= 1200.0) + & (Annotation.bounding_box.area <= 1800.0) + ), + ), convert_annotations_to_type=AnnotationType.BOX, metrics_to_return=selected_metrics, ) @@ -518,10 +587,10 @@ def test_evaluate_detection_with_json_filters( # test default iou arguments eval_results = model.evaluate_detection( dataset, - filter_by=[ - Label.key == "k1", - Annotation.bounding_box.is_not_none(), - ], + filters=Filter( + labels=(Label.key == "k1"), + annotations=Annotation.bounding_box.is_not_none(), + ), ) assert ( eval_results.wait_for_completion(timeout=30) == EvaluationStatus.DONE @@ -578,10 +647,10 @@ def test_evaluate_detection_with_json_filters( dataset, iou_thresholds_to_compute=[0.1, 0.6], iou_thresholds_to_return=[0.1, 0.6], - filter_by=[ - Label.key == "k1", - Annotation.bounding_box.area >= 1200, - ], + filters=Filter( + labels=(Label.key == "k1"), + annotations=(Annotation.bounding_box.area >= 1200.0), + ), convert_annotations_to_type=AnnotationType.BOX, ) assert ( @@ -594,20 +663,13 @@ def test_evaluate_detection_with_json_filters( dataset, iou_thresholds_to_compute=[0.1, 0.6], iou_thresholds_to_return=[0.1, 0.6], - filter_by={ - **default_filter_properties, - "bounding_box_area": [ - { - "operator": ">=", - "value": 1200.0, - }, - { - "operator": "<=", - "value": 1800.0, - }, - ], - "label_keys": ["k1"], - }, + filters=Filter( + labels=(Label.key == "k1"), + annotations=( + (Annotation.bounding_box.area >= 1200.0) + & (Annotation.bounding_box.area <= 1800.0) + ), + ), convert_annotations_to_type=AnnotationType.BOX, ) @@ -623,18 +685,41 @@ def test_evaluate_detection_with_json_filters( "dataset_names": ["test_dataset"], "model_name": model_name, "filters": { - **default_filter_properties, - "bounding_box_area": [ - { - "operator": ">=", - "value": 1200.0, + "annotations": { + "args": [ + { + "lhs": { + "name": "annotation.bounding_box.area", + }, + "op": "gte", + "rhs": { + "type": "float", + "value": 1200.0, + }, + }, + { + "lhs": { + "name": "annotation.bounding_box.area", + }, + "op": "lte", + "rhs": { + "type": "float", + "value": 1800.0, + }, + }, + ], + "op": "and", + }, + "labels": { + "lhs": { + "name": "label.key", }, - { - "operator": "<=", - "value": 1800.0, + "op": "eq", + "rhs": { + "type": "string", + "value": "k1", }, - ], - "label_keys": ["k1"], + }, }, "parameters": { "task_type": TaskType.OBJECT_DETECTION.value, @@ -692,10 +777,10 @@ def test_get_evaluations( dataset, iou_thresholds_to_compute=[0.1, 0.6], iou_thresholds_to_return=[0.1, 0.6], - filter_by=[ - Label.key == "k1", - Annotation.bounding_box.is_not_none(), - ], + filters=Filter( + labels=(Label.key == "k1"), + annotations=(Annotation.bounding_box.is_not_none()), + ), ) eval_job.wait_for_completion(timeout=30) @@ -834,10 +919,10 @@ def test_get_evaluations( dataset, iou_thresholds_to_compute=[0.1, 0.6], iou_thresholds_to_return=[0.1, 0.6], - filter_by=[ - Label.key == "k1", - Annotation.bounding_box.is_not_none(), - ], + filters=Filter( + labels=(Label.key == "k1"), + annotations=(Annotation.bounding_box.is_not_none()), + ), ) eval_job2.wait_for_completion(timeout=30) diff --git a/integration_tests/client/metrics/test_evaluations.py b/integration_tests/client/metrics/test_evaluations.py index e3b81a163..587de1ba0 100644 --- a/integration_tests/client/metrics/test_evaluations.py +++ b/integration_tests/client/metrics/test_evaluations.py @@ -6,6 +6,7 @@ Client, Dataset, Datum, + Filter, GroundTruth, Label, Model, @@ -140,10 +141,7 @@ def test_get_sorted_evaluations( clf_eval_1 = model.evaluate_classification( dataset, - filter_by=[ - Datum.metadata["md3"] - == True # noqa: E712 - 'is' keyword is not overloadable, so we have to use 'symbol == True' - ], + filters=Filter(datums=(Datum.metadata["md3"] == True)), # noqa: E712 ) assert clf_eval_1.wait_for_completion(timeout=30) == EvaluationStatus.DONE @@ -154,9 +152,7 @@ def test_get_sorted_evaluations( clf_eval_3 = model.evaluate_classification( dataset, - filter_by=[ - Datum.metadata["md1"] == "md1-val2", - ], + filters=Filter(datums=(Datum.metadata["md1"] == "md1-val2")), ) assert clf_eval_3.wait_for_completion(timeout=30) == EvaluationStatus.DONE diff --git a/integration_tests/client/metrics/test_segmentation.py b/integration_tests/client/metrics/test_segmentation.py index 4b431ed5a..bda6c5b38 100644 --- a/integration_tests/client/metrics/test_segmentation.py +++ b/integration_tests/client/metrics/test_segmentation.py @@ -4,7 +4,16 @@ import random -from valor import Client, Dataset, Datum, GroundTruth, Label, Model, Prediction +from valor import ( + Client, + Dataset, + Datum, + Filter, + GroundTruth, + Label, + Model, + Prediction, +) from valor.enums import EvaluationStatus, MetricType @@ -107,9 +116,7 @@ def test_evaluate_segmentation_with_filter( color = Datum.metadata["color"] eval_job = model.evaluate_segmentation( dataset, - filter_by=[ - color == "red", - ], + filters=Filter(datums=(color == "red")), ) assert eval_job.wait_for_completion(timeout=30) == EvaluationStatus.DONE diff --git a/integration_tests/client/test_client.py b/integration_tests/client/test_client.py index e5e418111..ed20fcb1a 100644 --- a/integration_tests/client/test_client.py +++ b/integration_tests/client/test_client.py @@ -20,7 +20,7 @@ ) from valor.client import connect from valor.exceptions import ClientException -from valor.schemas import Constraint, Filter +from valor.schemas import And, Filter @pytest.fixture @@ -223,14 +223,18 @@ def test_get_labels( assert len(all_labels) == 10 high_score_labels = client.get_labels( - Filter(label_scores=[Constraint(value=0.5, operator=">")]) + Filter( + predictions=(Label.score > 0.5), + ) ) assert len(high_score_labels) == 5 for label in high_score_labels: assert int(label.value) % 2 == 1 low_score_labels = client.get_labels( - Filter(label_scores=[Constraint(value=0.5, operator="<")]) + Filter( + predictions=(Label.score < 0.5), + ) ) assert len(low_score_labels) == 5 for label in low_score_labels: @@ -253,11 +257,15 @@ def test_get_datasets( assert len(all_datasets) == 1 assert all_datasets[0].name == created_dataset.name - pos_query = client.get_datasets(Filter(labels=[{"class0": "1"}])) + pos_query = client.get_datasets( + Filter(labels=And(Label.key == "class0", Label.value == "1")) + ) assert len(pos_query) == 1 assert pos_query[0].name == created_dataset.name - neg_query = client.get_datasets(Filter(labels=[{"some_other_class": "1"}])) + neg_query = client.get_datasets( + Filter(labels=And(Label.key == "some_other_class", Label.value == "1")) + ) assert len(neg_query) == 0 # check that the content-range header exists on the raw response @@ -277,11 +285,15 @@ def test_get_models( assert len(all_models) == 1 assert all_models[0].name == created_model.name - pos_query = client.get_models(Filter(labels=[{"class0": "1"}])) + pos_query = client.get_models( + Filter(labels=And(Label.key == "class0", Label.value == "1")) + ) assert len(pos_query) == 1 assert pos_query[0].name == created_model.name - neg_query = client.get_models(Filter(labels=[{"some_other_class": "1"}])) + neg_query = client.get_models( + Filter(labels=And(Label.key == "some_other_class", Label.value == "1")) + ) assert len(neg_query) == 0 # check that the content-range header exists on the raw response @@ -301,11 +313,15 @@ def test_get_datums( assert len(all_datums) == 1 assert all_datums[0].uid == "1" - pos_query = client.get_datums(Filter(labels=[{"class0": "1"}])) + pos_query = client.get_datums( + Filter(labels=And(Label.key == "class0", Label.value == "1")) + ) assert len(pos_query) == 1 assert pos_query[0].uid == "1" - neg_query = client.get_datums(Filter(labels=[{"some_other_class": "1"}])) + neg_query = client.get_datums( + Filter(labels=And(Label.key == "some_other_class", Label.value == "1")) + ) assert len(neg_query) == 0 # check that the content-range header exists on the raw response diff --git a/integration_tests/client/test_migrations.py b/integration_tests/client/test_migrations.py new file mode 100644 index 000000000..b3a05e9ae --- /dev/null +++ b/integration_tests/client/test_migrations.py @@ -0,0 +1,97 @@ +import pytest +from sqlalchemy.exc import IntegrityError +from sqlalchemy.orm import Session + +from valor import Client +from valor_api.backend.models import Evaluation +from valor_api.enums import EvaluationStatus, TaskType +from valor_api.schemas import EvaluationParameters +from valor_api.schemas.migrations import DeprecatedFilter + + +@pytest.fixture +def deprecated_filter() -> DeprecatedFilter: + return DeprecatedFilter( + model_names=["1", "2"], + model_metadata={ + "geospatial": [ + { + "operator": "inside", + "value": { + "type": "polygon", + "coordinates": [ + [ + [124.0, 37.0], + [128.0, 37.0], + [128.0, 40.0], + [124.0, 40.0], + ] + ], + }, + } + ], + }, + bounding_box_area=[ + { + "operator": ">=", + "value": 10.0, + }, + { + "operator": "<=", + "value": 2000.0, + }, + ], + label_keys=["k1"], + ) + + +@pytest.fixture +def evaluation_with_deprecated_filter( + db: Session, deprecated_filter: DeprecatedFilter +): + + # manually add to database + row_id = 0 + try: + row = Evaluation( + id=row_id, + dataset_names=["1", "2"], + model_name="3", + filters=deprecated_filter.model_dump(), + parameters=EvaluationParameters( + task_type=TaskType.CLASSIFICATION + ).model_dump(), + status=EvaluationStatus.DONE, + meta=dict(), + ) + db.add(row) + db.commit() + except IntegrityError as e: + db.rollback() + raise e + + yield row_id + + # delete evaluation + try: + db.delete(row) + db.commit() + except IntegrityError as e: + db.rollback() + raise e + + +def test_filter_migration( + client: Client, + evaluation_with_deprecated_filter: Evaluation, + deprecated_filter: DeprecatedFilter, +): + # get row id + row_id = evaluation_with_deprecated_filter + + # verify deprecated format is accessible to client + evaluations = client.get_evaluations(evaluation_ids=[row_id]) + assert len(evaluations) == 1 + assert evaluations[0].filters == deprecated_filter.model_dump( + exclude_none=True + ) diff --git a/ts-client/src/ValorClient.ts b/ts-client/src/ValorClient.ts index 0dc3c90c3..82e1ec531 100644 --- a/ts-client/src/ValorClient.ts +++ b/ts-client/src/ValorClient.ts @@ -157,17 +157,23 @@ export type Evaluation = { created_at: Date; }; -const metadataDictToString = (input: { [key: string]: string | number }): string => { - const result: { [key: string]: Array<{ value: string | number; operator: string }> } = - {}; - - Object.entries(input).forEach(([key, value]) => { - result[key] = [{ value: value, operator: '==' }]; - }); +const metadataDictToFilter = (name: string, input: { [key: string]: string | number }): object => { + const args = Object.entries(input).map(([key, value]) => ({ + op: "eq", + lhs: { + name: name, + key: key + }, + rhs: { + type: typeof value === 'string' ? 'string' : 'number', + value: value + } + })); - return JSON.stringify(result); + return args.length === 1 ? args[0] : { op: "and", args: args }; }; + export class ValorClient { private client: AxiosInstance; @@ -188,13 +194,13 @@ export class ValorClient { * Fetches datasets matching the filters defined by queryParams. This is private * because we define higher-level methods that use this. * - * @param queryParams An object containing query parameters to filter datasets by. + * @param filters An object containing a filter. * * @returns {Promise} * */ - private async getDatasets(queryParams: object): Promise { - const response = await this.client.get('/datasets', { params: queryParams }); + private async getDatasets(filters: object): Promise { + const response = await this.client.post('/datasets/filter', filters); var datasets: Dataset[] = response.data; for (let index = 0, length = datasets.length; index < length; ++index) { datasets[index].metadata = decodeMetadata(datasets[index].metadata); @@ -226,7 +232,7 @@ export class ValorClient { public async getDatasetsByMetadata(metadata: { [key: string]: string | number; }): Promise { - return this.getDatasets({ dataset_metadata: metadataDictToString(metadata) }); + return this.getDatasets({ datasets: metadataDictToFilter("dataset.metadata", metadata) }); } /** @@ -281,12 +287,12 @@ export class ValorClient { * Fetches models matching the filters defined by queryParams. This is * private because we define higher-level methods that use this. * - * @param queryParams An object containing query parameters to filter models by. + * @param filters An object containing query parameters to filter models by. * * @returns {Promise} */ - private async getModels(queryParams: object): Promise { - const response = await this.client.get('/models', { params: queryParams }); + private async getModels(filters: object): Promise { + const response = await this.client.post('/models/filter', filters); var models: Model[] = response.data; for (let index = 0, length = models.length; index < length; ++index) { models[index].metadata = decodeMetadata(models[index].metadata); @@ -317,7 +323,7 @@ export class ValorClient { public async getModelsByMetadata(metadata: { [key: string]: string | number; }): Promise { - return this.getModels({ model_metadata: metadataDictToString(metadata) }); + return this.getModels({ models: metadataDictToFilter("model.metadata", metadata) }); } /**