diff --git a/scripts/train_torchreid.py b/scripts/train_torchreid.py index 344d3f9..8b305a6 100644 --- a/scripts/train_torchreid.py +++ b/scripts/train_torchreid.py @@ -38,7 +38,7 @@ # Specify ReID model model = build_model( name="osnet_x1_0", - num_classes=6878, # max person id of PT21 + num_classes=6879, # max person id of PT21 # use_gpu=True, # pretrained=True, # loss="softmax", diff --git a/tests/utils/test__image.py b/tests/utils/test__image.py index d52663d..4b248ac 100644 --- a/tests/utils/test__image.py +++ b/tests/utils/test__image.py @@ -22,7 +22,7 @@ ) from dgs.utils.types import ImgShape, TVImage from dgs.utils.validation import validate_bboxes, validate_images, validate_key_points -from helper import load_test_image +from helper import load_test_image, load_test_images # Map image name to shape. # Shape is torch shape and therefore [C x H x W]. @@ -654,6 +654,23 @@ def test_batched_input(self): self.assertTrue(torch.allclose(new_bboxes[0], new_bboxes[1])) self.assertTrue(torch.allclose(new_coords[0], new_coords[1])) + def test_exceptions(self): + for images, bboxes, coords, exception in [ + (load_test_image("866-200x300.jpg"), create_bbox(10, 10), torch.zeros((10, 21, 2)), ValueError), + ( + load_test_images(["866-200x300.jpg", "866-200x300.jpg"]), + tv_tensors.BoundingBoxes(torch.zeros((10, 4)), canvas_size=(10, 10), format="xywh"), + torch.zeros((10, 21, 2)), + ValueError, + ), + ]: + with self.subTest(msg=f"bboxes, coords, exception"): + data = create_structured_data( + image=images, out_shape=(100, 100), mode="zero-pad", bbox=bboxes, key_points=coords + ) + with self.assertRaises(exception): + CustomCropResize()(data) + if __name__ == "__main__": unittest.main()