diff --git a/integration_tests/benchmarks/data/load_coco.py b/integration_tests/benchmarks/data/load_coco.py index 22edfd4a9..ddcd0d446 100644 --- a/integration_tests/benchmarks/data/load_coco.py +++ b/integration_tests/benchmarks/data/load_coco.py @@ -231,6 +231,7 @@ def create_bounding_boxes( "is_instance" ] # type: ignore - dict typing is True + and bitmask_to_bbox(mask_ids == segmentation["id"]) is not None ] @@ -272,6 +273,7 @@ def create_bounding_polygons( "is_instance" ] # type: ignore - dict typing is True + and bitmask_to_polygon(mask_ids == segmentation["id"]) is not None ] diff --git a/integration_tests/benchmarks/data/run_yolo.py b/integration_tests/benchmarks/data/run_yolo.py index 9e30a603a..1d844c6fc 100644 --- a/integration_tests/benchmarks/data/run_yolo.py +++ b/integration_tests/benchmarks/data/run_yolo.py @@ -291,9 +291,14 @@ def parse_bitmask_into_multipolygon_raster_detection( prediction = parse_bitmask_detection( result=result, datum=datum, label_key=label_key, resample=resample ) + annotations = [] for annotation in prediction.annotations: array = annotation.raster.array - annotation.raster = bitmask_to_multipolygon_raster(array) + multipolygon = bitmask_to_multipolygon_raster(array) + if multipolygon is not None: + annotation.raster = multipolygon + annotations.append(annotation) + prediction.annotations = annotations return prediction @@ -306,10 +311,15 @@ def parse_bitmask_into_bounding_polygon_detection( prediction = parse_bitmask_detection( result=result, datum=datum, label_key=label_key, resample=resample ) + annotations = [] for annotation in prediction.annotations: array = annotation.raster.array - annotation.polygon = bitmask_to_polygon(array) - annotation.raster = None + polygon = bitmask_to_polygon(array) + if polygon is not None: + annotation.polygon = polygon + annotation.raster = None + annotations.append(annotation) + prediction.annotations = annotations return prediction diff --git a/integration_tests/benchmarks/object-detection/benchmark_script.py b/integration_tests/benchmarks/object-detection/benchmark_script.py index c8bef39a4..b73d68903 100644 --- a/integration_tests/benchmarks/object-detection/benchmark_script.py +++ b/integration_tests/benchmarks/object-detection/benchmark_script.py @@ -89,10 +89,10 @@ def ingest_groundtruths( elif len(chunks) < chunk_size: continue - dataset.add_groundtruths(chunks) + dataset.add_groundtruths(chunks, timeout=30) chunks = [] if chunks: - dataset.add_groundtruths(chunks) + dataset.add_groundtruths(chunks, timeout=30) def ingest_predictions( @@ -122,10 +122,10 @@ def ingest_predictions( elif len(chunks) < chunk_size: continue - model.add_predictions(dataset, chunks) + model.add_predictions(dataset, chunks, timeout=30) chunks = [] if chunks: - model.add_predictions(dataset, chunks) + model.add_predictions(dataset, chunks, timeout=30) def run_base_evaluation(dset: Dataset, model: Model): @@ -292,6 +292,8 @@ def run_benchmarking_analysis( for gt_type, gt_filename in groundtruths.items(): for pd_type, pd_filename in predictions.items(): + print(gt_type, pd_type) + try: dataset = Dataset.create(name="coco") model = Model.create(name="yolo")